From 7956af05aa8a092a3e546db65f8f0ad6a013e889 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sat, 25 Oct 2025 13:31:21 +0000 Subject: [PATCH 001/242] Move `lightning-transaction-sync` back into the main workspace Now that it has the same MSRV as everything else in the workspace, it doesn't need to live on its own. --- .github/workflows/build.yml | 27 +++++-------------------- Cargo.toml | 2 +- ci/ci-tests.sh | 21 +++++++++++++++++++- ci/ci-tx-sync-tests.sh | 39 ------------------------------------- 4 files changed, 26 insertions(+), 63 deletions(-) delete mode 100755 ci/ci-tx-sync-tests.sh diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 3c50b2a0041..2658ff454e9 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -62,52 +62,35 @@ jobs: - name: Set RUSTFLAGS to deny warnings if: "matrix.toolchain == '1.75.0'" run: echo "RUSTFLAGS=-D warnings" >> "$GITHUB_ENV" - - name: Run CI script - shell: bash # Default on Winblows is powershell - run: CI_ENV=1 CI_MINIMIZE_DISK_USAGE=1 ./ci/ci-tests.sh - - build-tx-sync: - strategy: - fail-fast: false - matrix: - platform: [ ubuntu-latest, macos-latest ] - toolchain: [ stable, beta, 1.75.0 ] - runs-on: ${{ matrix.platform }} - steps: - - name: Checkout source code - uses: actions/checkout@v4 - - name: Install Rust ${{ matrix.toolchain }} toolchain - run: | - curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile=minimal --default-toolchain ${{ matrix.toolchain }} - - name: Set RUSTFLAGS to deny warnings - if: "matrix.toolchain == '1.75.0'" - run: echo "RUSTFLAGS=-D warnings" >> "$GITHUB_ENV" - name: Enable caching for bitcoind + if: matrix.platform != 'windows-latest' id: cache-bitcoind uses: actions/cache@v4 with: path: bin/bitcoind-${{ runner.os }}-${{ runner.arch }} key: bitcoind-${{ runner.os }}-${{ runner.arch }} - name: Enable caching for electrs + if: matrix.platform != 'windows-latest' id: cache-electrs uses: actions/cache@v4 with: path: bin/electrs-${{ runner.os }}-${{ runner.arch }} key: electrs-${{ runner.os }}-${{ runner.arch }} - name: Download bitcoind/electrs - if: "steps.cache-bitcoind.outputs.cache-hit != 'true' || steps.cache-electrs.outputs.cache-hit != 'true'" + if: "matrix.platform != 'windows-latest' && (steps.cache-bitcoind.outputs.cache-hit != 'true' || steps.cache-electrs.outputs.cache-hit != 'true')" run: | source ./contrib/download_bitcoind_electrs.sh mkdir bin mv "$BITCOIND_EXE" bin/bitcoind-${{ runner.os }}-${{ runner.arch }} mv "$ELECTRS_EXE" bin/electrs-${{ runner.os }}-${{ runner.arch }} - name: Set bitcoind/electrs environment variables + if: matrix.platform != 'windows-latest' run: | echo "BITCOIND_EXE=$( pwd )/bin/bitcoind-${{ runner.os }}-${{ runner.arch }}" >> "$GITHUB_ENV" echo "ELECTRS_EXE=$( pwd )/bin/electrs-${{ runner.os }}-${{ runner.arch }}" >> "$GITHUB_ENV" - name: Run CI script shell: bash # Default on Winblows is powershell - run: CI_ENV=1 CI_MINIMIZE_DISK_USAGE=1 ./ci/ci-tx-sync-tests.sh + run: CI_ENV=1 CI_MINIMIZE_DISK_USAGE=1 ./ci/ci-tests.sh coverage: needs: fuzz diff --git a/Cargo.toml b/Cargo.toml index f9f7406339e..a0895fe1641 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,11 +16,11 @@ members = [ "lightning-macros", "lightning-dns-resolver", "lightning-liquidity", + "lightning-transaction-sync", "possiblyrandom", ] exclude = [ - "lightning-transaction-sync", "lightning-tests", "ext-functional-test-demo", "no-std-check", diff --git a/ci/ci-tests.sh b/ci/ci-tests.sh index 91ead9903cb..488c5ac4826 100755 --- a/ci/ci-tests.sh +++ b/ci/ci-tests.sh @@ -2,7 +2,6 @@ #shellcheck disable=SC2002,SC2207 set -eox pipefail -# Currently unused as we don't have to pin anything for MSRV: RUSTC_MINOR_VERSION=$(rustc --version | awk '{ split($2,a,"."); print a[2] }') # Some crates require pinning to meet our MSRV even for our downstream users, @@ -20,6 +19,9 @@ PIN_RELEASE_DEPS # pin the release dependencies in our main workspace # proptest 1.9.0 requires rustc 1.82.0 [ "$RUSTC_MINOR_VERSION" -lt 82 ] && cargo update -p proptest --precise "1.8.0" --verbose +# Starting with version 1.2.0, the `idna_adapter` crate has an MSRV of rustc 1.81.0. +[ "$RUSTC_MINOR_VERSION" -lt 81 ] && cargo update -p idna_adapter --precise "1.1.0" --verbose + export RUST_BACKTRACE=1 echo -e "\n\nChecking the workspace, except lightning-transaction-sync." @@ -57,6 +59,23 @@ cargo check -p lightning-block-sync --verbose --color always --features rpc-clie cargo test -p lightning-block-sync --verbose --color always --features rpc-client,rest-client,tokio cargo check -p lightning-block-sync --verbose --color always --features rpc-client,rest-client,tokio +echo -e "\n\nChecking Transaction Sync Clients with features." +cargo check -p lightning-transaction-sync --verbose --color always --features esplora-blocking +cargo check -p lightning-transaction-sync --verbose --color always --features esplora-async +cargo check -p lightning-transaction-sync --verbose --color always --features esplora-async-https +cargo check -p lightning-transaction-sync --verbose --color always --features electrum + +if [ -z "$CI_ENV" ] && [[ -z "$BITCOIND_EXE" || -z "$ELECTRS_EXE" ]]; then + echo -e "\n\nSkipping testing Transaction Sync Clients due to BITCOIND_EXE or ELECTRS_EXE being unset." + cargo check -p lightning-transaction-sync --tests +else + echo -e "\n\nTesting Transaction Sync Clients with features." + cargo test -p lightning-transaction-sync --verbose --color always --features esplora-blocking + cargo test -p lightning-transaction-sync --verbose --color always --features esplora-async + cargo test -p lightning-transaction-sync --verbose --color always --features esplora-async-https + cargo test -p lightning-transaction-sync --verbose --color always --features electrum +fi + echo -e "\n\nChecking and testing lightning-persister with features" cargo test -p lightning-persister --verbose --color always --features tokio cargo check -p lightning-persister --verbose --color always --features tokio diff --git a/ci/ci-tx-sync-tests.sh b/ci/ci-tx-sync-tests.sh deleted file mode 100755 index 0839e2ced3d..00000000000 --- a/ci/ci-tx-sync-tests.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash -set -eox pipefail - -RUSTC_MINOR_VERSION=$(rustc --version | awk '{ split($2,a,"."); print a[2] }') - -pushd lightning-transaction-sync - -# Some crates require pinning to meet our MSRV even for our downstream users, -# which we do here. -# Further crates which appear only as dev-dependencies are pinned further down. -function PIN_RELEASE_DEPS { - return 0 # Don't fail the script if our rustc is higher than the last check -} - -PIN_RELEASE_DEPS # pin the release dependencies - -# Starting with version 1.2.0, the `idna_adapter` crate has an MSRV of rustc 1.81.0. -[ "$RUSTC_MINOR_VERSION" -lt 81 ] && cargo update -p idna_adapter --precise "1.1.0" --verbose - -export RUST_BACKTRACE=1 - -echo -e "\n\nChecking Transaction Sync Clients with features." -cargo check --verbose --color always --features esplora-blocking -cargo check --verbose --color always --features esplora-async -cargo check --verbose --color always --features esplora-async-https -cargo check --verbose --color always --features electrum - -if [ -z "$CI_ENV" ] && [[ -z "$BITCOIND_EXE" || -z "$ELECTRS_EXE" ]]; then - echo -e "\n\nSkipping testing Transaction Sync Clients due to BITCOIND_EXE or ELECTRS_EXE being unset." - cargo check --tests -else - echo -e "\n\nTesting Transaction Sync Clients with features." - cargo test --verbose --color always --features esplora-blocking - cargo test --verbose --color always --features esplora-async - cargo test --verbose --color always --features esplora-async-https - cargo test --verbose --color always --features electrum -fi - -popd From 210528475e876b96b59d2844727b09c28f427e4b Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sat, 25 Oct 2025 13:41:47 +0000 Subject: [PATCH 002/242] Trivially replace `Box::pin` with `pin!` in a few places Now that our MSRV is above 1.68 we can use the `pin!` macro to avoid having to `Box` various futures, avoiding some allocations, especially in `lightning-net-tokio`, which happens in a tight loop. --- lightning-liquidity/src/lsps2/service.rs | 17 ++++++++--------- lightning-liquidity/src/manager.rs | 8 ++++---- lightning-net-tokio/src/lib.rs | 13 ++++++------- lightning/src/events/bump_transaction/sync.rs | 3 ++- lightning/src/util/persist.rs | 5 ++--- lightning/src/util/sweep.rs | 5 +++-- 6 files changed, 25 insertions(+), 26 deletions(-) diff --git a/lightning-liquidity/src/lsps2/service.rs b/lightning-liquidity/src/lsps2/service.rs index a6736e63eef..5c4bd63bc48 100644 --- a/lightning-liquidity/src/lsps2/service.rs +++ b/lightning-liquidity/src/lsps2/service.rs @@ -9,7 +9,6 @@ //! Contains the main bLIP-52 / LSPS2 server-side object, [`LSPS2ServiceHandler`]. -use alloc::boxed::Box; use alloc::string::{String, ToString}; use alloc::vec::Vec; use lightning::util::persist::KVStore; @@ -17,6 +16,7 @@ use lightning::util::persist::KVStore; use core::cmp::Ordering as CmpOrdering; use core::future::Future as StdFuture; use core::ops::Deref; +use core::pin::pin; use core::sync::atomic::{AtomicUsize, Ordering}; use core::task; @@ -2173,7 +2173,7 @@ where &self, counterparty_node_id: &PublicKey, request_id: LSPSRequestId, intercept_scid: u64, cltv_expiry_delta: u32, client_trusts_lsp: bool, user_channel_id: u128, ) -> Result<(), APIError> { - let mut fut = Box::pin(self.inner.invoice_parameters_generated( + let mut fut = pin!(self.inner.invoice_parameters_generated( counterparty_node_id, request_id, intercept_scid, @@ -2202,7 +2202,7 @@ where &self, intercept_scid: u64, intercept_id: InterceptId, expected_outbound_amount_msat: u64, payment_hash: PaymentHash, ) -> Result<(), APIError> { - let mut fut = Box::pin(self.inner.htlc_intercepted( + let mut fut = pin!(self.inner.htlc_intercepted( intercept_scid, intercept_id, expected_outbound_amount_msat, @@ -2228,7 +2228,7 @@ where pub fn htlc_handling_failed( &self, failure_type: HTLCHandlingFailureType, ) -> Result<(), APIError> { - let mut fut = Box::pin(self.inner.htlc_handling_failed(failure_type)); + let mut fut = pin!(self.inner.htlc_handling_failed(failure_type)); let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); @@ -2249,7 +2249,7 @@ where pub fn payment_forwarded( &self, next_channel_id: ChannelId, skimmed_fee_msat: u64, ) -> Result<(), APIError> { - let mut fut = Box::pin(self.inner.payment_forwarded(next_channel_id, skimmed_fee_msat)); + let mut fut = pin!(self.inner.payment_forwarded(next_channel_id, skimmed_fee_msat)); let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); @@ -2290,7 +2290,7 @@ where &self, counterparty_node_id: &PublicKey, user_channel_id: u128, ) -> Result<(), APIError> { let mut fut = - Box::pin(self.inner.channel_open_abandoned(counterparty_node_id, user_channel_id)); + pin!(self.inner.channel_open_abandoned(counterparty_node_id, user_channel_id)); let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); @@ -2309,8 +2309,7 @@ where pub fn channel_open_failed( &self, counterparty_node_id: &PublicKey, user_channel_id: u128, ) -> Result<(), APIError> { - let mut fut = - Box::pin(self.inner.channel_open_failed(counterparty_node_id, user_channel_id)); + let mut fut = pin!(self.inner.channel_open_failed(counterparty_node_id, user_channel_id)); let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); @@ -2332,7 +2331,7 @@ where &self, user_channel_id: u128, channel_id: &ChannelId, counterparty_node_id: &PublicKey, ) -> Result<(), APIError> { let mut fut = - Box::pin(self.inner.channel_ready(user_channel_id, channel_id, counterparty_node_id)); + pin!(self.inner.channel_ready(user_channel_id, channel_id, counterparty_node_id)); let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); diff --git a/lightning-liquidity/src/manager.rs b/lightning-liquidity/src/manager.rs index f0143fc624f..d3822715b8d 100644 --- a/lightning-liquidity/src/manager.rs +++ b/lightning-liquidity/src/manager.rs @@ -7,7 +7,6 @@ // You may not use this file except in accordance with one or both of these // licenses. -use alloc::boxed::Box; use alloc::string::ToString; use alloc::vec::Vec; @@ -61,6 +60,7 @@ use bitcoin::secp256k1::PublicKey; use core::future::Future as StdFuture; use core::ops::Deref; +use core::pin::pin; use core::task; const LSPS_FEATURE_BIT: usize = 729; @@ -1106,7 +1106,7 @@ where ) -> Result { let kv_store = KVStoreSyncWrapper(kv_store_sync); - let mut fut = Box::pin(LiquidityManager::new( + let mut fut = pin!(LiquidityManager::new( entropy_source, node_signer, channel_manager, @@ -1159,7 +1159,7 @@ where client_config: Option, time_provider: TP, ) -> Result { let kv_store = KVStoreSyncWrapper(kv_store_sync); - let mut fut = Box::pin(LiquidityManager::new_with_custom_time_provider( + let mut fut = pin!(LiquidityManager::new_with_custom_time_provider( entropy_source, node_signer, channel_manager, @@ -1289,7 +1289,7 @@ where pub fn persist(&self) -> Result<(), lightning::io::Error> { let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); - match Box::pin(self.inner.persist()).as_mut().poll(&mut ctx) { + match pin!(self.inner.persist()).as_mut().poll(&mut ctx) { task::Poll::Ready(result) => result, task::Poll::Pending => { // In a sync context, we can't wait for the future to complete. diff --git a/lightning-net-tokio/src/lib.rs b/lightning-net-tokio/src/lib.rs index 068f77a84bb..c6fbd3dc3c5 100644 --- a/lightning-net-tokio/src/lib.rs +++ b/lightning-net-tokio/src/lib.rs @@ -43,7 +43,7 @@ use std::hash::Hash; use std::net::SocketAddr; use std::net::TcpStream as StdTcpStream; use std::ops::Deref; -use std::pin::Pin; +use std::pin::{pin, Pin}; use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::{Arc, Mutex}; use std::task::{self, Poll}; @@ -205,18 +205,17 @@ impl Connection { } us_lock.read_paused }; - // TODO: Drop the Box'ing of the futures once Rust has pin-on-stack support. let select_result = if read_paused { TwoSelector { - a: Box::pin(write_avail_receiver.recv()), - b: Box::pin(read_wake_receiver.recv()), + a: pin!(write_avail_receiver.recv()), + b: pin!(read_wake_receiver.recv()), } .await } else { ThreeSelector { - a: Box::pin(write_avail_receiver.recv()), - b: Box::pin(read_wake_receiver.recv()), - c: Box::pin(reader.readable()), + a: pin!(write_avail_receiver.recv()), + b: pin!(read_wake_receiver.recv()), + c: pin!(reader.readable()), } .await }; diff --git a/lightning/src/events/bump_transaction/sync.rs b/lightning/src/events/bump_transaction/sync.rs index 653710a3358..cbc686ed8fe 100644 --- a/lightning/src/events/bump_transaction/sync.rs +++ b/lightning/src/events/bump_transaction/sync.rs @@ -11,6 +11,7 @@ use core::future::Future; use core::ops::Deref; +use core::pin::pin; use core::task; use crate::chain::chaininterface::BroadcasterInterface; @@ -289,7 +290,7 @@ where /// Handles all variants of [`BumpTransactionEvent`]. pub fn handle_event(&self, event: &BumpTransactionEvent) { - let mut fut = Box::pin(self.bump_transaction_event_handler.handle_event(event)); + let mut fut = pin!(self.bump_transaction_event_handler.handle_event(event)); let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); match fut.as_mut().poll(&mut ctx) { diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index d00e29e686a..3ad9b4270c5 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -19,7 +19,7 @@ use bitcoin::{BlockHash, Txid}; use core::future::Future; use core::mem; use core::ops::Deref; -use core::pin::Pin; +use core::pin::{pin, Pin}; use core::str::FromStr; use core::task; @@ -490,8 +490,7 @@ impl FutureSpawner for PanicingSpawner { fn poll_sync_future(future: F) -> F::Output { let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); - // TODO A future MSRV bump to 1.68 should allow for the pin macro - match Pin::new(&mut Box::pin(future)).poll(&mut ctx) { + match pin!(future).poll(&mut ctx) { task::Poll::Ready(result) => result, task::Poll::Pending => { // In a sync context, we can't wait for the future to complete. diff --git a/lightning/src/util/sweep.rs b/lightning/src/util/sweep.rs index 5a1ffad3e04..a3ded6f32b8 100644 --- a/lightning/src/util/sweep.rs +++ b/lightning/src/util/sweep.rs @@ -35,6 +35,7 @@ use bitcoin::{BlockHash, ScriptBuf, Transaction, Txid}; use core::future::Future; use core::ops::Deref; +use core::pin::pin; use core::sync::atomic::{AtomicBool, Ordering}; use core::task; @@ -970,7 +971,7 @@ where &self, output_descriptors: Vec, channel_id: Option, exclude_static_outputs: bool, delay_until_height: Option, ) -> Result<(), ()> { - let mut fut = Box::pin(self.sweeper.track_spendable_outputs( + let mut fut = pin!(self.sweeper.track_spendable_outputs( output_descriptors, channel_id, exclude_static_outputs, @@ -1005,7 +1006,7 @@ where /// /// Wraps [`OutputSweeper::regenerate_and_broadcast_spend_if_necessary`]. pub fn regenerate_and_broadcast_spend_if_necessary(&self) -> Result<(), ()> { - let mut fut = Box::pin(self.sweeper.regenerate_and_broadcast_spend_if_necessary()); + let mut fut = pin!(self.sweeper.regenerate_and_broadcast_spend_if_necessary()); let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); match fut.as_mut().poll(&mut ctx) { From 0b4e1b5c58a864a2ca7dda7f3b0737df2ec2b8fa Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Fri, 31 Oct 2025 14:37:38 +0000 Subject: [PATCH 003/242] Drop required `Box`ing of `KVStore` `Future`s Now that our MSRV is 1.75, we can return `impl Trait` from trait methods. Here we use this to clean up `KVStore` methods, dropping the `Pin>` we had to use to have trait methods return a concrete type. Sadly, there's two places where we can't drop a `Box::pin` until we switch to edition 2024. --- ci/check-lint.sh | 3 +- lightning-background-processor/src/lib.rs | 43 +++++++- lightning-persister/src/fs_store.rs | 113 ++++++++++------------ lightning/src/util/persist.rs | 71 ++++++++------ lightning/src/util/sweep.rs | 27 +++++- lightning/src/util/test_utils.rs | 18 ++-- 6 files changed, 165 insertions(+), 110 deletions(-) diff --git a/ci/check-lint.sh b/ci/check-lint.sh index 39c10692310..c1f1b08a1e1 100755 --- a/ci/check-lint.sh +++ b/ci/check-lint.sh @@ -107,7 +107,8 @@ CLIPPY() { -A clippy::useless_conversion \ -A clippy::manual_repeat_n `# to be removed once we hit MSRV 1.86` \ -A clippy::manual_is_multiple_of `# to be removed once we hit MSRV 1.87` \ - -A clippy::uninlined-format-args + -A clippy::uninlined-format-args \ + -A clippy::manual-async-fn # Not really sure why this is even a warning when there's a Send bound } CLIPPY diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index 19333c5823a..bc0d42ac191 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -41,6 +41,8 @@ use lightning::events::ReplayEvent; use lightning::events::{Event, PathFailure}; use lightning::util::ser::Writeable; +#[cfg(not(c_bindings))] +use lightning::io::Error; use lightning::ln::channelmanager::AChannelManager; use lightning::ln::msgs::OnionMessageHandler; use lightning::ln::peer_handler::APeerManager; @@ -51,6 +53,8 @@ use lightning::routing::utxo::UtxoLookup; use lightning::sign::{ ChangeDestinationSource, ChangeDestinationSourceSync, EntropySource, OutputSpender, }; +#[cfg(not(c_bindings))] +use lightning::util::async_poll::MaybeSend; use lightning::util::logger::Logger; use lightning::util::persist::{ KVStore, KVStoreSync, KVStoreSyncWrapper, CHANNEL_MANAGER_PERSISTENCE_KEY, @@ -83,7 +87,11 @@ use std::time::Instant; #[cfg(not(feature = "std"))] use alloc::boxed::Box; #[cfg(all(not(c_bindings), not(feature = "std")))] +use alloc::string::String; +#[cfg(all(not(c_bindings), not(feature = "std")))] use alloc::sync::Arc; +#[cfg(all(not(c_bindings), not(feature = "std")))] +use alloc::vec::Vec; /// `BackgroundProcessor` takes care of tasks that (1) need to happen periodically to keep /// Rust-Lightning running properly, and (2) either can or should be run in the background. Its @@ -416,6 +424,37 @@ pub const NO_ONION_MESSENGER: Option< >, > = None; +#[cfg(not(c_bindings))] +/// A panicking implementation of [`KVStore`] that is used in [`NO_LIQUIDITY_MANAGER`]. +pub struct DummyKVStore; + +#[cfg(not(c_bindings))] +impl KVStore for DummyKVStore { + fn read( + &self, _: &str, _: &str, _: &str, + ) -> impl core::future::Future, Error>> + MaybeSend + 'static { + async { unimplemented!() } + } + + fn write( + &self, _: &str, _: &str, _: &str, _: Vec, + ) -> impl core::future::Future> + MaybeSend + 'static { + async { unimplemented!() } + } + + fn remove( + &self, _: &str, _: &str, _: &str, _: bool, + ) -> impl core::future::Future> + MaybeSend + 'static { + async { unimplemented!() } + } + + fn list( + &self, _: &str, _: &str, + ) -> impl core::future::Future, Error>> + MaybeSend + 'static { + async { unimplemented!() } + } +} + /// When initializing a background processor without a liquidity manager, this can be used to avoid /// specifying a concrete `LiquidityManager` type. #[cfg(not(c_bindings))] @@ -430,8 +469,8 @@ pub const NO_LIQUIDITY_MANAGER: Option< CM = &DynChannelManager, Filter = dyn chain::Filter + Send + Sync, C = &(dyn chain::Filter + Send + Sync), - KVStore = dyn lightning::util::persist::KVStore + Send + Sync, - K = &(dyn lightning::util::persist::KVStore + Send + Sync), + KVStore = DummyKVStore, + K = &DummyKVStore, TimeProvider = dyn lightning_liquidity::utils::time::TimeProvider + Send + Sync, TP = &(dyn lightning_liquidity::utils::time::TimeProvider + Send + Sync), BroadcasterInterface = dyn lightning::chain::chaininterface::BroadcasterInterface diff --git a/lightning-persister/src/fs_store.rs b/lightning-persister/src/fs_store.rs index 9b15398d4d1..b2d327f6bc1 100644 --- a/lightning-persister/src/fs_store.rs +++ b/lightning-persister/src/fs_store.rs @@ -14,8 +14,6 @@ use std::sync::{Arc, Mutex, RwLock}; #[cfg(feature = "tokio")] use core::future::Future; #[cfg(feature = "tokio")] -use core::pin::Pin; -#[cfg(feature = "tokio")] use lightning::util::persist::KVStore; #[cfg(target_os = "windows")] @@ -464,93 +462,85 @@ impl FilesystemStoreInner { impl KVStore for FilesystemStore { fn read( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, - ) -> Pin, lightning::io::Error>> + 'static + Send>> { + ) -> impl Future, lightning::io::Error>> + 'static + Send { let this = Arc::clone(&self.inner); - let path = match this.get_checked_dest_file_path( + let path = this.get_checked_dest_file_path( primary_namespace, secondary_namespace, Some(key), "read", - ) { - Ok(path) => path, - Err(e) => return Box::pin(async move { Err(e) }), - }; + ); - Box::pin(async move { + async move { + let path = match path { + Ok(path) => path, + Err(e) => return Err(e), + }; tokio::task::spawn_blocking(move || this.read(path)).await.unwrap_or_else(|e| { Err(lightning::io::Error::new(lightning::io::ErrorKind::Other, e)) }) - }) + } } fn write( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, - ) -> Pin> + 'static + Send>> { + ) -> impl Future> + 'static + Send { let this = Arc::clone(&self.inner); - let path = match this.get_checked_dest_file_path( - primary_namespace, - secondary_namespace, - Some(key), - "write", - ) { - Ok(path) => path, - Err(e) => return Box::pin(async move { Err(e) }), - }; - - let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(path.clone()); - Box::pin(async move { + let path = this + .get_checked_dest_file_path(primary_namespace, secondary_namespace, Some(key), "write") + .map(|path| (self.get_new_version_and_lock_ref(path.clone()), path)); + + async move { + let ((inner_lock_ref, version), path) = match path { + Ok(res) => res, + Err(e) => return Err(e), + }; tokio::task::spawn_blocking(move || { this.write_version(inner_lock_ref, path, buf, version) }) .await .unwrap_or_else(|e| Err(lightning::io::Error::new(lightning::io::ErrorKind::Other, e))) - }) + } } fn remove( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, - ) -> Pin> + 'static + Send>> { + ) -> impl Future> + 'static + Send { let this = Arc::clone(&self.inner); - let path = match this.get_checked_dest_file_path( - primary_namespace, - secondary_namespace, - Some(key), - "remove", - ) { - Ok(path) => path, - Err(e) => return Box::pin(async move { Err(e) }), - }; - - let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(path.clone()); - Box::pin(async move { + let path = this + .get_checked_dest_file_path(primary_namespace, secondary_namespace, Some(key), "remove") + .map(|path| (self.get_new_version_and_lock_ref(path.clone()), path)); + + async move { + let ((inner_lock_ref, version), path) = match path { + Ok(res) => res, + Err(e) => return Err(e), + }; tokio::task::spawn_blocking(move || { this.remove_version(inner_lock_ref, path, lazy, version) }) .await .unwrap_or_else(|e| Err(lightning::io::Error::new(lightning::io::ErrorKind::Other, e))) - }) + } } fn list( &self, primary_namespace: &str, secondary_namespace: &str, - ) -> Pin, lightning::io::Error>> + 'static + Send>> { + ) -> impl Future, lightning::io::Error>> + 'static + Send { let this = Arc::clone(&self.inner); - let path = match this.get_checked_dest_file_path( - primary_namespace, - secondary_namespace, - None, - "list", - ) { - Ok(path) => path, - Err(e) => return Box::pin(async move { Err(e) }), - }; + let path = + this.get_checked_dest_file_path(primary_namespace, secondary_namespace, None, "list"); - Box::pin(async move { + async move { + let path = match path { + Ok(path) => path, + Err(e) => return Err(e), + }; tokio::task::spawn_blocking(move || this.list(path)).await.unwrap_or_else(|e| { Err(lightning::io::Error::new(lightning::io::ErrorKind::Other, e)) }) - }) + } } } @@ -758,24 +748,24 @@ mod tests { let fs_store = Arc::new(FilesystemStore::new(temp_path)); assert_eq!(fs_store.state_size(), 0); - let async_fs_store: Arc = fs_store.clone(); + let async_fs_store = Arc::clone(&fs_store); let data1 = vec![42u8; 32]; let data2 = vec![43u8; 32]; - let primary_namespace = "testspace"; - let secondary_namespace = "testsubspace"; + let primary = "testspace"; + let secondary = "testsubspace"; let key = "testkey"; // Test writing the same key twice with different data. Execute the asynchronous part out of order to ensure // that eventual consistency works. - let fut1 = async_fs_store.write(primary_namespace, secondary_namespace, key, data1); + let fut1 = KVStore::write(&*async_fs_store, primary, secondary, key, data1); assert_eq!(fs_store.state_size(), 1); - let fut2 = async_fs_store.remove(primary_namespace, secondary_namespace, key, false); + let fut2 = KVStore::remove(&*async_fs_store, primary, secondary, key, false); assert_eq!(fs_store.state_size(), 1); - let fut3 = async_fs_store.write(primary_namespace, secondary_namespace, key, data2.clone()); + let fut3 = KVStore::write(&*async_fs_store, primary, secondary, key, data2.clone()); assert_eq!(fs_store.state_size(), 1); fut3.await.unwrap(); @@ -788,21 +778,18 @@ mod tests { assert_eq!(fs_store.state_size(), 0); // Test list. - let listed_keys = - async_fs_store.list(primary_namespace, secondary_namespace).await.unwrap(); + let listed_keys = KVStore::list(&*async_fs_store, primary, secondary).await.unwrap(); assert_eq!(listed_keys.len(), 1); assert_eq!(listed_keys[0], key); // Test read. We expect to read data2, as the write call was initiated later. - let read_data = - async_fs_store.read(primary_namespace, secondary_namespace, key).await.unwrap(); + let read_data = KVStore::read(&*async_fs_store, primary, secondary, key).await.unwrap(); assert_eq!(data2, &*read_data); // Test remove. - async_fs_store.remove(primary_namespace, secondary_namespace, key, false).await.unwrap(); + KVStore::remove(&*async_fs_store, primary, secondary, key, false).await.unwrap(); - let listed_keys = - async_fs_store.list(primary_namespace, secondary_namespace).await.unwrap(); + let listed_keys = KVStore::list(&*async_fs_store, primary, secondary).await.unwrap(); assert_eq!(listed_keys.len(), 0); } diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index 3ad9b4270c5..7feb781a57a 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -34,7 +34,7 @@ use crate::chain::transaction::OutPoint; use crate::ln::types::ChannelId; use crate::sign::{ecdsa::EcdsaChannelSigner, EntropySource, SignerProvider}; use crate::sync::Mutex; -use crate::util::async_poll::{dummy_waker, AsyncResult, MaybeSend, MaybeSync}; +use crate::util::async_poll::{dummy_waker, MaybeSend, MaybeSync}; use crate::util::logger::Logger; use crate::util::native_async::FutureSpawner; use crate::util::ser::{Readable, ReadableArgs, Writeable}; @@ -216,34 +216,34 @@ where { fn read( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, - ) -> AsyncResult<'static, Vec, io::Error> { + ) -> impl Future, io::Error>> + 'static + MaybeSend { let res = self.0.read(primary_namespace, secondary_namespace, key); - Box::pin(async move { res }) + async move { res } } fn write( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, - ) -> AsyncResult<'static, (), io::Error> { + ) -> impl Future> + 'static + MaybeSend { let res = self.0.write(primary_namespace, secondary_namespace, key, buf); - Box::pin(async move { res }) + async move { res } } fn remove( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, - ) -> AsyncResult<'static, (), io::Error> { + ) -> impl Future> + 'static + MaybeSend { let res = self.0.remove(primary_namespace, secondary_namespace, key, lazy); - Box::pin(async move { res }) + async move { res } } fn list( &self, primary_namespace: &str, secondary_namespace: &str, - ) -> AsyncResult<'static, Vec, io::Error> { + ) -> impl Future, io::Error>> + 'static + MaybeSend { let res = self.0.list(primary_namespace, secondary_namespace); - Box::pin(async move { res }) + async move { res } } } @@ -283,16 +283,18 @@ pub trait KVStore { /// [`ErrorKind::NotFound`]: io::ErrorKind::NotFound fn read( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, - ) -> AsyncResult<'static, Vec, io::Error>; + ) -> impl Future, io::Error>> + 'static + MaybeSend; /// Persists the given data under the given `key`. /// - /// The order of multiple writes to the same key needs to be retained while persisting - /// asynchronously. In other words, if two writes to the same key occur, the state (as seen by - /// [`Self::read`]) must either see the first write then the second, or only ever the second, - /// no matter when the futures complete (and must always contain the second write once the - /// second future completes). The state should never contain the first write after the second - /// write's future completes, nor should it contain the second write, then contain the first - /// write at any point thereafter (even if the second write's future hasn't yet completed). + /// Note that this is *not* an `async fn`. Rather, the order of multiple writes to the same key + /// (as defined by the order of the synchronous function calls) needs to be retained while + /// persisting asynchronously. In other words, if two writes to the same key occur, the state + /// (as seen by [`Self::read`]) must either see the first write then the second, or only ever + /// the second, no matter when the futures complete (and must always contain the second write + /// once the second future completes). The state should never contain the first write after the + /// second write's future completes, nor should it contain the second write, then contain the + /// first write at any point thereafter (even if the second write's future hasn't yet + /// completed). /// /// One way to ensure this requirement is met is by assigning a version number to each write /// before returning the future, and then during asynchronous execution, ensuring that the @@ -303,7 +305,7 @@ pub trait KVStore { /// Will create the given `primary_namespace` and `secondary_namespace` if not already present in the store. fn write( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, - ) -> AsyncResult<'static, (), io::Error>; + ) -> impl Future> + 'static + MaybeSend; /// Removes any data that had previously been persisted under the given `key`. /// /// If the `lazy` flag is set to `true`, the backend implementation might choose to lazily @@ -311,6 +313,10 @@ pub trait KVStore { /// eventual batch deletion of multiple keys. As a consequence, subsequent calls to /// [`KVStoreSync::list`] might include the removed key until the changes are actually persisted. /// + /// Note that similar to [`Self::write`] this is *not* an `async fn`, but rather a sync fn + /// which defines the order of writes to a given key, but which may complete its operation + /// asynchronously. + /// /// Note that while setting the `lazy` flag reduces the I/O burden of multiple subsequent /// `remove` calls, it also influences the atomicity guarantees as lazy `remove`s could /// potentially get lost on crash after the method returns. Therefore, this flag should only be @@ -321,12 +327,13 @@ pub trait KVStore { /// to the same key which occur before a removal completes must cancel/overwrite the pending /// removal. /// + /// /// Returns successfully if no data will be stored for the given `primary_namespace`, /// `secondary_namespace`, and `key`, independently of whether it was present before its /// invokation or not. fn remove( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, - ) -> AsyncResult<'static, (), io::Error>; + ) -> impl Future> + 'static + MaybeSend; /// Returns a list of keys that are stored under the given `secondary_namespace` in /// `primary_namespace`. /// @@ -334,7 +341,7 @@ pub trait KVStore { /// returned keys. Returns an empty list if `primary_namespace` or `secondary_namespace` is unknown. fn list( &self, primary_namespace: &str, secondary_namespace: &str, - ) -> AsyncResult<'static, Vec, io::Error>; + ) -> impl Future, io::Error>> + 'static + MaybeSend; } /// Provides additional interface methods that are required for [`KVStore`]-to-[`KVStore`] @@ -1005,6 +1012,9 @@ where } } +trait MaybeSendableFuture: Future> + MaybeSend {} +impl> + MaybeSend> MaybeSendableFuture for F {} + impl MonitorUpdatingPersisterAsyncInner where @@ -1178,9 +1188,9 @@ where Ok(()) } - fn persist_new_channel( - &self, monitor_name: MonitorName, monitor: &ChannelMonitor, - ) -> impl Future> { + fn persist_new_channel<'a, ChannelSigner: EcdsaChannelSigner>( + &'a self, monitor_name: MonitorName, monitor: &'a ChannelMonitor, + ) -> Pin> + 'static>> { // Determine the proper key for this monitor let monitor_key = monitor_name.to_string(); // Serialize and write the new monitor @@ -1199,7 +1209,10 @@ where // completion of the write. This ensures monitor persistence ordering is preserved. let primary = CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE; let secondary = CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE; - self.kv_store.write(primary, secondary, monitor_key.as_str(), monitor_bytes) + // There's no real reason why this needs to be boxed, but dropping it rams into the "hidden + // type for impl... captures lifetime that does not appear in bounds" issue. This can + // trivially be dropped once we upgrade to edition 2024/MSRV 1.85. + Box::pin(self.kv_store.write(primary, secondary, monitor_key.as_str(), monitor_bytes)) } fn update_persisted_channel<'a, ChannelSigner: EcdsaChannelSigner + 'a>( @@ -1225,12 +1238,10 @@ where // write method, allowing it to do its queueing immediately, and then return a // future for the completion of the write. This ensures monitor persistence // ordering is preserved. - res_a = Some(self.kv_store.write( - primary, - &monitor_key, - update_name.as_str(), - update.encode(), - )); + let encoded = update.encode(); + res_a = Some(async move { + self.kv_store.write(primary, &monitor_key, update_name.as_str(), encoded).await + }); } else { // We could write this update, but it meets criteria of our design that calls for a full monitor write. // Note that this is NOT an async function, but rather calls the *sync* KVStore diff --git a/lightning/src/util/sweep.rs b/lightning/src/util/sweep.rs index a3ded6f32b8..bf048efdae1 100644 --- a/lightning/src/util/sweep.rs +++ b/lightning/src/util/sweep.rs @@ -35,11 +35,11 @@ use bitcoin::{BlockHash, ScriptBuf, Transaction, Txid}; use core::future::Future; use core::ops::Deref; -use core::pin::pin; +use core::pin::{pin, Pin}; use core::sync::atomic::{AtomicBool, Ordering}; use core::task; -use super::async_poll::{dummy_waker, AsyncResult}; +use super::async_poll::dummy_waker; /// The number of blocks we wait before we prune the tracked spendable outputs. pub const PRUNE_DELAY_BLOCKS: u32 = ARCHIVAL_DELAY_BLOCKS + ANTI_REORG_DELAY; @@ -610,15 +610,32 @@ where sweeper_state.dirty = true; } - fn persist_state<'a>(&self, sweeper_state: &SweeperState) -> AsyncResult<'a, (), io::Error> { + #[cfg(feature = "std")] + fn persist_state<'a>( + &'a self, sweeper_state: &SweeperState, + ) -> Pin> + Send + 'static>> { let encoded = sweeper_state.encode(); - self.kv_store.write( + Box::pin(self.kv_store.write( OUTPUT_SWEEPER_PERSISTENCE_PRIMARY_NAMESPACE, OUTPUT_SWEEPER_PERSISTENCE_SECONDARY_NAMESPACE, OUTPUT_SWEEPER_PERSISTENCE_KEY, encoded, - ) + )) + } + + #[cfg(not(feature = "std"))] + fn persist_state<'a>( + &'a self, sweeper_state: &SweeperState, + ) -> Pin> + 'static>> { + let encoded = sweeper_state.encode(); + + Box::pin(self.kv_store.write( + OUTPUT_SWEEPER_PERSISTENCE_PRIMARY_NAMESPACE, + OUTPUT_SWEEPER_PERSISTENCE_SECONDARY_NAMESPACE, + OUTPUT_SWEEPER_PERSISTENCE_KEY, + encoded, + )) } /// Updates the sweeper state by executing the given callback. Persists the state afterwards if it is marked dirty, diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index ad8ea224205..b4db17bee20 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -50,7 +50,7 @@ use crate::sign::{self, ReceiveAuthKey}; use crate::sign::{ChannelSigner, PeerStorageKey}; use crate::sync::RwLock; use crate::types::features::{ChannelFeatures, InitFeatures, NodeFeatures}; -use crate::util::async_poll::AsyncResult; +use crate::util::async_poll::MaybeSend; use crate::util::config::UserConfig; use crate::util::dyn_signer::{ DynKeysInterface, DynKeysInterfaceTrait, DynPhantomKeysInterface, DynSigner, @@ -1012,13 +1012,13 @@ impl TestStore { impl KVStore for TestStore { fn read( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, - ) -> AsyncResult<'static, Vec, io::Error> { + ) -> impl Future, io::Error>> + 'static + MaybeSend { let res = self.read_internal(&primary_namespace, &secondary_namespace, &key); - Box::pin(async move { res }) + async move { res } } fn write( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, - ) -> AsyncResult<'static, (), io::Error> { + ) -> impl Future> + 'static + MaybeSend { let path = format!("{primary_namespace}/{secondary_namespace}/{key}"); let future = Arc::new(Mutex::new((None, None))); @@ -1027,19 +1027,19 @@ impl KVStore for TestStore { let new_id = pending_writes.last().map(|(id, _, _)| id + 1).unwrap_or(0); pending_writes.push((new_id, Arc::clone(&future), buf)); - Box::pin(OneShotChannel(future)) + OneShotChannel(future) } fn remove( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, - ) -> AsyncResult<'static, (), io::Error> { + ) -> impl Future> + 'static + MaybeSend { let res = self.remove_internal(&primary_namespace, &secondary_namespace, &key, lazy); - Box::pin(async move { res }) + async move { res } } fn list( &self, primary_namespace: &str, secondary_namespace: &str, - ) -> AsyncResult<'static, Vec, io::Error> { + ) -> impl Future, io::Error>> + 'static + MaybeSend { let res = self.list_internal(primary_namespace, secondary_namespace); - Box::pin(async move { res }) + async move { res } } } From a435228ce6a9a668a0cf065012d42e7abd6f83b8 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sat, 25 Oct 2025 18:11:18 +0000 Subject: [PATCH 004/242] Drop required `Box`ing of `lightning-block-sync` `Future`s Now that our MSRV is 1.75, we can return `impl Trait` from trait methods. Here we use this to clean up `lightning-block-sync` trait methods, dropping the `Pin>` we had to use to have trait methods return a concrete type. --- lightning-block-sync/src/gossip.rs | 49 +++++++++++++++----------- lightning-block-sync/src/lib.rs | 18 ++++------ lightning-block-sync/src/poll.rs | 30 +++++++--------- lightning-block-sync/src/rest.rs | 37 ++++++++++--------- lightning-block-sync/src/rpc.rs | 35 ++++++++++-------- lightning-block-sync/src/test_utils.rs | 24 +++++++------ 6 files changed, 101 insertions(+), 92 deletions(-) diff --git a/lightning-block-sync/src/gossip.rs b/lightning-block-sync/src/gossip.rs index 0fe221b9231..596098350c7 100644 --- a/lightning-block-sync/src/gossip.rs +++ b/lightning-block-sync/src/gossip.rs @@ -2,7 +2,7 @@ //! current UTXO set. This module defines an implementation of the LDK API required to do so //! against a [`BlockSource`] which implements a few additional methods for accessing the UTXO set. -use crate::{AsyncBlockSourceResult, BlockData, BlockSource, BlockSourceError}; +use crate::{BlockData, BlockSource, BlockSourceError, BlockSourceResult}; use bitcoin::block::Block; use bitcoin::constants::ChainHash; @@ -18,7 +18,7 @@ use lightning::util::native_async::FutureSpawner; use std::collections::VecDeque; use std::future::Future; use std::ops::Deref; -use std::pin::Pin; +use std::pin::{pin, Pin}; use std::sync::{Arc, Mutex}; use std::task::Poll; @@ -35,11 +35,13 @@ pub trait UtxoSource: BlockSource + 'static { /// for gossip validation. fn get_block_hash_by_height<'a>( &'a self, block_height: u32, - ) -> AsyncBlockSourceResult<'a, BlockHash>; + ) -> impl Future> + Send + 'a; /// Returns true if the given output has *not* been spent, i.e. is a member of the current UTXO /// set. - fn is_output_unspent<'a>(&'a self, outpoint: OutPoint) -> AsyncBlockSourceResult<'a, bool>; + fn is_output_unspent<'a>( + &'a self, outpoint: OutPoint, + ) -> impl Future> + Send + 'a; } #[cfg(feature = "tokio")] @@ -55,34 +57,37 @@ impl FutureSpawner for TokioSpawner { /// A trivial future which joins two other futures and polls them at the same time, returning only /// once both complete. pub(crate) struct Joiner< - A: Future), BlockSourceError>> + Unpin, - B: Future> + Unpin, + 'a, + A: Future), BlockSourceError>>, + B: Future>, > { - pub a: A, - pub b: B, + pub a: Pin<&'a mut A>, + pub b: Pin<&'a mut B>, a_res: Option<(BlockHash, Option)>, b_res: Option, } impl< - A: Future), BlockSourceError>> + Unpin, - B: Future> + Unpin, - > Joiner + 'a, + A: Future), BlockSourceError>>, + B: Future>, + > Joiner<'a, A, B> { - fn new(a: A, b: B) -> Self { + fn new(a: Pin<&'a mut A>, b: Pin<&'a mut B>) -> Self { Self { a, b, a_res: None, b_res: None } } } impl< - A: Future), BlockSourceError>> + Unpin, - B: Future> + Unpin, - > Future for Joiner + 'a, + A: Future), BlockSourceError>>, + B: Future>, + > Future for Joiner<'a, A, B> { type Output = Result<((BlockHash, Option), BlockHash), BlockSourceError>; fn poll(mut self: Pin<&mut Self>, ctx: &mut core::task::Context<'_>) -> Poll { if self.a_res.is_none() { - match Pin::new(&mut self.a).poll(ctx) { + match self.a.as_mut().poll(ctx) { Poll::Ready(res) => { if let Ok(ok) = res { self.a_res = Some(ok); @@ -94,7 +99,7 @@ impl< } } if self.b_res.is_none() { - match Pin::new(&mut self.b).poll(ctx) { + match self.b.as_mut().poll(ctx) { Poll::Ready(res) => { if let Ok(ok) = res { self.b_res = Some(ok); @@ -200,10 +205,12 @@ where } } - let ((_, tip_height_opt), block_hash) = - Joiner::new(source.get_best_block(), source.get_block_hash_by_height(block_height)) - .await - .map_err(|_| UtxoLookupError::UnknownTx)?; + let ((_, tip_height_opt), block_hash) = Joiner::new( + pin!(source.get_best_block()), + pin!(source.get_block_hash_by_height(block_height)), + ) + .await + .map_err(|_| UtxoLookupError::UnknownTx)?; if let Some(tip_height) = tip_height_opt { // If the block doesn't yet have five confirmations, error out. // diff --git a/lightning-block-sync/src/lib.rs b/lightning-block-sync/src/lib.rs index 8656ba6ec6b..02593047658 100644 --- a/lightning-block-sync/src/lib.rs +++ b/lightning-block-sync/src/lib.rs @@ -53,7 +53,6 @@ use lightning::chain::{BestBlock, Listen}; use std::future::Future; use std::ops::Deref; -use std::pin::Pin; /// Abstract type for retrieving block headers and data. pub trait BlockSource: Sync + Send { @@ -65,12 +64,13 @@ pub trait BlockSource: Sync + Send { /// when `height_hint` is `None`. fn get_header<'a>( &'a self, header_hash: &'a BlockHash, height_hint: Option, - ) -> AsyncBlockSourceResult<'a, BlockHeaderData>; + ) -> impl Future> + Send + 'a; /// Returns the block for a given hash. A headers-only block source should return a `Transient` /// error. - fn get_block<'a>(&'a self, header_hash: &'a BlockHash) - -> AsyncBlockSourceResult<'a, BlockData>; + fn get_block<'a>( + &'a self, header_hash: &'a BlockHash, + ) -> impl Future> + Send + 'a; /// Returns the hash of the best block and, optionally, its height. /// @@ -78,18 +78,14 @@ pub trait BlockSource: Sync + Send { /// to allow for a more efficient lookup. /// /// [`get_header`]: Self::get_header - fn get_best_block(&self) -> AsyncBlockSourceResult<'_, (BlockHash, Option)>; + fn get_best_block<'a>( + &'a self, + ) -> impl Future)>> + Send + 'a; } /// Result type for `BlockSource` requests. pub type BlockSourceResult = Result; -// TODO: Replace with BlockSourceResult once `async` trait functions are supported. For details, -// see: https://areweasyncyet.rs. -/// Result type for asynchronous `BlockSource` requests. -pub type AsyncBlockSourceResult<'a, T> = - Pin> + 'a + Send>>; - /// Error type for `BlockSource` requests. /// /// Transient errors may be resolved when re-polling, but no attempt will be made to re-poll on diff --git a/lightning-block-sync/src/poll.rs b/lightning-block-sync/src/poll.rs index 843cc961899..13e0403c3b6 100644 --- a/lightning-block-sync/src/poll.rs +++ b/lightning-block-sync/src/poll.rs @@ -1,14 +1,12 @@ //! Adapters that make one or more [`BlockSource`]s simpler to poll for new chain tip transitions. -use crate::{ - AsyncBlockSourceResult, BlockData, BlockHeaderData, BlockSource, BlockSourceError, - BlockSourceResult, -}; +use crate::{BlockData, BlockHeaderData, BlockSource, BlockSourceError, BlockSourceResult}; use bitcoin::hash_types::BlockHash; use bitcoin::network::Network; use lightning::chain::BestBlock; +use std::future::Future; use std::ops::Deref; /// The `Poll` trait defines behavior for polling block sources for a chain tip and retrieving @@ -22,17 +20,17 @@ pub trait Poll { /// Returns a chain tip in terms of its relationship to the provided chain tip. fn poll_chain_tip<'a>( &'a self, best_known_chain_tip: ValidatedBlockHeader, - ) -> AsyncBlockSourceResult<'a, ChainTip>; + ) -> impl Future> + Send + 'a; /// Returns the header that preceded the given header in the chain. fn look_up_previous_header<'a>( &'a self, header: &'a ValidatedBlockHeader, - ) -> AsyncBlockSourceResult<'a, ValidatedBlockHeader>; + ) -> impl Future> + Send + 'a; /// Returns the block associated with the given header. fn fetch_block<'a>( &'a self, header: &'a ValidatedBlockHeader, - ) -> AsyncBlockSourceResult<'a, ValidatedBlock>; + ) -> impl Future> + Send + 'a; } /// A chain tip relative to another chain tip in terms of block hash and chainwork. @@ -217,8 +215,8 @@ impl + Sized + Send + Sync, T: BlockSource + ?Sized> Poll { fn poll_chain_tip<'a>( &'a self, best_known_chain_tip: ValidatedBlockHeader, - ) -> AsyncBlockSourceResult<'a, ChainTip> { - Box::pin(async move { + ) -> impl Future> + Send + 'a { + async move { let (block_hash, height) = self.block_source.get_best_block().await?; if block_hash == best_known_chain_tip.header.block_hash() { return Ok(ChainTip::Common); @@ -231,13 +229,13 @@ impl + Sized + Send + Sync, T: BlockSource + ?Sized> Poll } else { Ok(ChainTip::Worse(chain_tip)) } - }) + } } fn look_up_previous_header<'a>( &'a self, header: &'a ValidatedBlockHeader, - ) -> AsyncBlockSourceResult<'a, ValidatedBlockHeader> { - Box::pin(async move { + ) -> impl Future> + Send + 'a { + async move { if header.height == 0 { return Err(BlockSourceError::persistent("genesis block reached")); } @@ -252,15 +250,13 @@ impl + Sized + Send + Sync, T: BlockSource + ?Sized> Poll header.check_builds_on(&previous_header, self.network)?; Ok(previous_header) - }) + } } fn fetch_block<'a>( &'a self, header: &'a ValidatedBlockHeader, - ) -> AsyncBlockSourceResult<'a, ValidatedBlock> { - Box::pin(async move { - self.block_source.get_block(&header.block_hash).await?.validate(header.block_hash) - }) + ) -> impl Future> + Send + 'a { + async move { self.block_source.get_block(&header.block_hash).await?.validate(header.block_hash) } } } diff --git a/lightning-block-sync/src/rest.rs b/lightning-block-sync/src/rest.rs index 1f79ab4a0b0..619981bb4d0 100644 --- a/lightning-block-sync/src/rest.rs +++ b/lightning-block-sync/src/rest.rs @@ -4,13 +4,14 @@ use crate::convert::GetUtxosResponse; use crate::gossip::UtxoSource; use crate::http::{BinaryResponse, HttpClient, HttpEndpoint, JsonResponse}; -use crate::{AsyncBlockSourceResult, BlockData, BlockHeaderData, BlockSource}; +use crate::{BlockData, BlockHeaderData, BlockSource, BlockSourceResult}; use bitcoin::hash_types::BlockHash; use bitcoin::OutPoint; use std::convert::TryFrom; use std::convert::TryInto; +use std::future::Future; use std::sync::Mutex; /// A simple REST client for requesting resources using HTTP `GET`. @@ -49,49 +50,51 @@ impl RestClient { impl BlockSource for RestClient { fn get_header<'a>( &'a self, header_hash: &'a BlockHash, _height: Option, - ) -> AsyncBlockSourceResult<'a, BlockHeaderData> { - Box::pin(async move { + ) -> impl Future> + Send + 'a { + async move { let resource_path = format!("headers/1/{}.json", header_hash.to_string()); Ok(self.request_resource::(&resource_path).await?) - }) + } } fn get_block<'a>( &'a self, header_hash: &'a BlockHash, - ) -> AsyncBlockSourceResult<'a, BlockData> { - Box::pin(async move { + ) -> impl Future> + Send + 'a { + async move { let resource_path = format!("block/{}.bin", header_hash.to_string()); Ok(BlockData::FullBlock( self.request_resource::(&resource_path).await?, )) - }) + } } - fn get_best_block<'a>(&'a self) -> AsyncBlockSourceResult<'a, (BlockHash, Option)> { - Box::pin( - async move { Ok(self.request_resource::("chaininfo.json").await?) }, - ) + fn get_best_block<'a>( + &'a self, + ) -> impl Future)>> + Send + 'a { + async move { Ok(self.request_resource::("chaininfo.json").await?) } } } impl UtxoSource for RestClient { fn get_block_hash_by_height<'a>( &'a self, block_height: u32, - ) -> AsyncBlockSourceResult<'a, BlockHash> { - Box::pin(async move { + ) -> impl Future> + Send + 'a { + async move { let resource_path = format!("blockhashbyheight/{}.bin", block_height); Ok(self.request_resource::(&resource_path).await?) - }) + } } - fn is_output_unspent<'a>(&'a self, outpoint: OutPoint) -> AsyncBlockSourceResult<'a, bool> { - Box::pin(async move { + fn is_output_unspent<'a>( + &'a self, outpoint: OutPoint, + ) -> impl Future> + Send + 'a { + async move { let resource_path = format!("getutxos/{}-{}.json", outpoint.txid.to_string(), outpoint.vout); let utxo_result = self.request_resource::(&resource_path).await?; Ok(utxo_result.hit_bitmap_nonempty) - }) + } } } diff --git a/lightning-block-sync/src/rpc.rs b/lightning-block-sync/src/rpc.rs index 3df50a2267b..d851ba2ccf0 100644 --- a/lightning-block-sync/src/rpc.rs +++ b/lightning-block-sync/src/rpc.rs @@ -3,7 +3,7 @@ use crate::gossip::UtxoSource; use crate::http::{HttpClient, HttpEndpoint, HttpError, JsonResponse}; -use crate::{AsyncBlockSourceResult, BlockData, BlockHeaderData, BlockSource}; +use crate::{BlockData, BlockHeaderData, BlockSource, BlockSourceResult}; use bitcoin::hash_types::BlockHash; use bitcoin::OutPoint; @@ -16,6 +16,7 @@ use std::convert::TryFrom; use std::convert::TryInto; use std::error::Error; use std::fmt; +use std::future::Future; use std::sync::atomic::{AtomicUsize, Ordering}; /// An error returned by the RPC server. @@ -135,47 +136,51 @@ impl RpcClient { impl BlockSource for RpcClient { fn get_header<'a>( &'a self, header_hash: &'a BlockHash, _height: Option, - ) -> AsyncBlockSourceResult<'a, BlockHeaderData> { - Box::pin(async move { + ) -> impl Future> + Send + 'a { + async move { let header_hash = serde_json::json!(header_hash.to_string()); Ok(self.call_method("getblockheader", &[header_hash]).await?) - }) + } } fn get_block<'a>( &'a self, header_hash: &'a BlockHash, - ) -> AsyncBlockSourceResult<'a, BlockData> { - Box::pin(async move { + ) -> impl Future> + Send + 'a { + async move { let header_hash = serde_json::json!(header_hash.to_string()); let verbosity = serde_json::json!(0); Ok(BlockData::FullBlock(self.call_method("getblock", &[header_hash, verbosity]).await?)) - }) + } } - fn get_best_block<'a>(&'a self) -> AsyncBlockSourceResult<'a, (BlockHash, Option)> { - Box::pin(async move { Ok(self.call_method("getblockchaininfo", &[]).await?) }) + fn get_best_block<'a>( + &'a self, + ) -> impl Future)>> + Send + 'a { + async move { Ok(self.call_method("getblockchaininfo", &[]).await?) } } } impl UtxoSource for RpcClient { fn get_block_hash_by_height<'a>( &'a self, block_height: u32, - ) -> AsyncBlockSourceResult<'a, BlockHash> { - Box::pin(async move { + ) -> impl Future> + Send + 'a { + async move { let height_param = serde_json::json!(block_height); Ok(self.call_method("getblockhash", &[height_param]).await?) - }) + } } - fn is_output_unspent<'a>(&'a self, outpoint: OutPoint) -> AsyncBlockSourceResult<'a, bool> { - Box::pin(async move { + fn is_output_unspent<'a>( + &'a self, outpoint: OutPoint, + ) -> impl Future> + Send + 'a { + async move { let txid_param = serde_json::json!(outpoint.txid.to_string()); let vout_param = serde_json::json!(outpoint.vout); let include_mempool = serde_json::json!(false); let utxo_opt: serde_json::Value = self.call_method("gettxout", &[txid_param, vout_param, include_mempool]).await?; Ok(!utxo_opt.is_null()) - }) + } } } diff --git a/lightning-block-sync/src/test_utils.rs b/lightning-block-sync/src/test_utils.rs index d307c4506eb..40788e4d08c 100644 --- a/lightning-block-sync/src/test_utils.rs +++ b/lightning-block-sync/src/test_utils.rs @@ -1,7 +1,6 @@ use crate::poll::{Validate, ValidatedBlockHeader}; use crate::{ - AsyncBlockSourceResult, BlockData, BlockHeaderData, BlockSource, BlockSourceError, - UnboundedCache, + BlockData, BlockHeaderData, BlockSource, BlockSourceError, BlockSourceResult, UnboundedCache, }; use bitcoin::block::{Block, Header, Version}; @@ -17,6 +16,7 @@ use lightning::chain::BestBlock; use std::cell::RefCell; use std::collections::VecDeque; +use std::future::Future; #[derive(Default)] pub struct Blockchain { @@ -141,8 +141,8 @@ impl Blockchain { impl BlockSource for Blockchain { fn get_header<'a>( &'a self, header_hash: &'a BlockHash, _height_hint: Option, - ) -> AsyncBlockSourceResult<'a, BlockHeaderData> { - Box::pin(async move { + ) -> impl Future> + Send + 'a { + async move { if self.without_headers { return Err(BlockSourceError::persistent("header not found")); } @@ -158,13 +158,13 @@ impl BlockSource for Blockchain { } } Err(BlockSourceError::transient("header not found")) - }) + } } fn get_block<'a>( &'a self, header_hash: &'a BlockHash, - ) -> AsyncBlockSourceResult<'a, BlockData> { - Box::pin(async move { + ) -> impl Future> + Send + 'a { + async move { for (height, block) in self.blocks.iter().enumerate() { if block.header.block_hash() == *header_hash { if let Some(without_blocks) = &self.without_blocks { @@ -181,11 +181,13 @@ impl BlockSource for Blockchain { } } Err(BlockSourceError::transient("block not found")) - }) + } } - fn get_best_block<'a>(&'a self) -> AsyncBlockSourceResult<'a, (BlockHash, Option)> { - Box::pin(async move { + fn get_best_block<'a>( + &'a self, + ) -> impl Future)>> + Send + 'a { + async move { match self.blocks.last() { None => Err(BlockSourceError::transient("empty chain")), Some(block) => { @@ -193,7 +195,7 @@ impl BlockSource for Blockchain { Ok((block.block_hash(), Some(height))) }, } - }) + } } } From 3da5f583e503d742a185ebf7b0207b2f6cd6c0d6 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sat, 25 Oct 2025 18:24:51 +0000 Subject: [PATCH 005/242] Drop required `Box`ing of `lightning` trait `Future`s Now that our MSRV is 1.75, we can return `impl Trait` from trait methods. Here we use this to clean up `lightning` crate trait methods, dropping the `Pin>`/`AsyncResult` we had to use to have trait methods return a concrete type. --- lightning/src/events/bump_transaction/mod.rs | 31 ++++++++++----- lightning/src/events/bump_transaction/sync.rs | 38 +++++++++++-------- lightning/src/sign/mod.rs | 14 +++++-- lightning/src/util/async_poll.rs | 12 ------ 4 files changed, 54 insertions(+), 41 deletions(-) diff --git a/lightning/src/events/bump_transaction/mod.rs b/lightning/src/events/bump_transaction/mod.rs index 3d9beb82c07..e141d9b8abc 100644 --- a/lightning/src/events/bump_transaction/mod.rs +++ b/lightning/src/events/bump_transaction/mod.rs @@ -14,6 +14,7 @@ pub mod sync; use alloc::collections::BTreeMap; +use core::future::Future; use core::ops::Deref; use crate::chain::chaininterface::{ @@ -36,7 +37,7 @@ use crate::sign::{ ChannelDerivationParameters, HTLCDescriptor, SignerProvider, P2WPKH_WITNESS_WEIGHT, }; use crate::sync::Mutex; -use crate::util::async_poll::{AsyncResult, MaybeSend, MaybeSync}; +use crate::util::async_poll::{MaybeSend, MaybeSync}; use crate::util::logger::Logger; use bitcoin::amount::Amount; @@ -394,13 +395,15 @@ pub trait CoinSelectionSource { fn select_confirmed_utxos<'a>( &'a self, claim_id: ClaimId, must_spend: Vec, must_pay_to: &'a [TxOut], target_feerate_sat_per_1000_weight: u32, max_tx_weight: u64, - ) -> AsyncResult<'a, CoinSelection, ()>; + ) -> impl Future> + MaybeSend + 'a; /// Signs and provides the full witness for all inputs within the transaction known to the /// trait (i.e., any provided via [`CoinSelectionSource::select_confirmed_utxos`]). /// /// If your wallet does not support signing PSBTs you can call `psbt.extract_tx()` to get the /// unsigned transaction and then sign it with your wallet. - fn sign_psbt<'a>(&'a self, psbt: Psbt) -> AsyncResult<'a, Transaction, ()>; + fn sign_psbt<'a>( + &'a self, psbt: Psbt, + ) -> impl Future> + MaybeSend + 'a; } /// An alternative to [`CoinSelectionSource`] that can be implemented and used along [`Wallet`] to @@ -412,17 +415,23 @@ pub trait CoinSelectionSource { // Note that updates to documentation on this trait should be copied to the synchronous version. pub trait WalletSource { /// Returns all UTXOs, with at least 1 confirmation each, that are available to spend. - fn list_confirmed_utxos<'a>(&'a self) -> AsyncResult<'a, Vec, ()>; + fn list_confirmed_utxos<'a>( + &'a self, + ) -> impl Future, ()>> + MaybeSend + 'a; /// Returns a script to use for change above dust resulting from a successful coin selection /// attempt. - fn get_change_script<'a>(&'a self) -> AsyncResult<'a, ScriptBuf, ()>; + fn get_change_script<'a>( + &'a self, + ) -> impl Future> + MaybeSend + 'a; /// Signs and provides the full [`TxIn::script_sig`] and [`TxIn::witness`] for all inputs within /// the transaction known to the wallet (i.e., any provided via /// [`WalletSource::list_confirmed_utxos`]). /// /// If your wallet does not support signing PSBTs you can call `psbt.extract_tx()` to get the /// unsigned transaction and then sign it with your wallet. - fn sign_psbt<'a>(&'a self, psbt: Psbt) -> AsyncResult<'a, Transaction, ()>; + fn sign_psbt<'a>( + &'a self, psbt: Psbt, + ) -> impl Future> + MaybeSend + 'a; } /// A wrapper over [`WalletSource`] that implements [`CoinSelectionSource`] by preferring UTXOs @@ -617,8 +626,8 @@ where fn select_confirmed_utxos<'a>( &'a self, claim_id: ClaimId, must_spend: Vec, must_pay_to: &'a [TxOut], target_feerate_sat_per_1000_weight: u32, max_tx_weight: u64, - ) -> AsyncResult<'a, CoinSelection, ()> { - Box::pin(async move { + ) -> impl Future> + MaybeSend + 'a { + async move { let utxos = self.source.list_confirmed_utxos().await?; // TODO: Use fee estimation utils when we upgrade to bitcoin v0.30.0. let total_output_size: u64 = must_pay_to @@ -665,10 +674,12 @@ where } } Err(()) - }) + } } - fn sign_psbt<'a>(&'a self, psbt: Psbt) -> AsyncResult<'a, Transaction, ()> { + fn sign_psbt<'a>( + &'a self, psbt: Psbt, + ) -> impl Future> + MaybeSend + 'a { self.source.sign_psbt(psbt) } } diff --git a/lightning/src/events/bump_transaction/sync.rs b/lightning/src/events/bump_transaction/sync.rs index cbc686ed8fe..1328c2c1b3a 100644 --- a/lightning/src/events/bump_transaction/sync.rs +++ b/lightning/src/events/bump_transaction/sync.rs @@ -18,7 +18,7 @@ use crate::chain::chaininterface::BroadcasterInterface; use crate::chain::ClaimId; use crate::prelude::*; use crate::sign::SignerProvider; -use crate::util::async_poll::{dummy_waker, AsyncResult, MaybeSend, MaybeSync}; +use crate::util::async_poll::{dummy_waker, MaybeSend, MaybeSync}; use crate::util::logger::Logger; use bitcoin::{Psbt, ScriptBuf, Transaction, TxOut}; @@ -72,19 +72,25 @@ impl WalletSource for WalletSourceSyncWrapper where T::Target: WalletSourceSync, { - fn list_confirmed_utxos<'a>(&'a self) -> AsyncResult<'a, Vec, ()> { + fn list_confirmed_utxos<'a>( + &'a self, + ) -> impl Future, ()>> + MaybeSend + 'a { let utxos = self.0.list_confirmed_utxos(); - Box::pin(async move { utxos }) + async move { utxos } } - fn get_change_script<'a>(&'a self) -> AsyncResult<'a, ScriptBuf, ()> { + fn get_change_script<'a>( + &'a self, + ) -> impl Future> + MaybeSend + 'a { let script = self.0.get_change_script(); - Box::pin(async move { script }) + async move { script } } - fn sign_psbt<'a>(&'a self, psbt: Psbt) -> AsyncResult<'a, Transaction, ()> { + fn sign_psbt<'a>( + &'a self, psbt: Psbt, + ) -> impl Future> + MaybeSend + 'a { let signed_psbt = self.0.sign_psbt(psbt); - Box::pin(async move { signed_psbt }) + async move { signed_psbt } } } @@ -123,7 +129,7 @@ where &self, claim_id: ClaimId, must_spend: Vec, must_pay_to: &[TxOut], target_feerate_sat_per_1000_weight: u32, max_tx_weight: u64, ) -> Result { - let mut fut = self.wallet.select_confirmed_utxos( + let fut = self.wallet.select_confirmed_utxos( claim_id, must_spend, must_pay_to, @@ -132,7 +138,7 @@ where ); let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); - match fut.as_mut().poll(&mut ctx) { + match pin!(fut).poll(&mut ctx) { task::Poll::Ready(result) => result, task::Poll::Pending => { unreachable!( @@ -143,10 +149,10 @@ where } fn sign_psbt(&self, psbt: Psbt) -> Result { - let mut fut = self.wallet.sign_psbt(psbt); + let fut = self.wallet.sign_psbt(psbt); let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); - match fut.as_mut().poll(&mut ctx) { + match pin!(fut).poll(&mut ctx) { task::Poll::Ready(result) => result, task::Poll::Pending => { unreachable!("Wallet::sign_psbt should not be pending in a sync context"); @@ -234,7 +240,7 @@ where fn select_confirmed_utxos<'a>( &'a self, claim_id: ClaimId, must_spend: Vec, must_pay_to: &'a [TxOut], target_feerate_sat_per_1000_weight: u32, max_tx_weight: u64, - ) -> AsyncResult<'a, CoinSelection, ()> { + ) -> impl Future> + MaybeSend + 'a { let coins = self.0.select_confirmed_utxos( claim_id, must_spend, @@ -242,12 +248,14 @@ where target_feerate_sat_per_1000_weight, max_tx_weight, ); - Box::pin(async move { coins }) + async move { coins } } - fn sign_psbt<'a>(&'a self, psbt: Psbt) -> AsyncResult<'a, Transaction, ()> { + fn sign_psbt<'a>( + &'a self, psbt: Psbt, + ) -> impl Future> + MaybeSend + 'a { let psbt = self.0.sign_psbt(psbt); - Box::pin(async move { psbt }) + async move { psbt } } } diff --git a/lightning/src/sign/mod.rs b/lightning/src/sign/mod.rs index 1d771d22783..6d0d5bf405a 100644 --- a/lightning/src/sign/mod.rs +++ b/lightning/src/sign/mod.rs @@ -58,7 +58,7 @@ use crate::ln::script::ShutdownScript; use crate::offers::invoice::UnsignedBolt12Invoice; use crate::types::features::ChannelTypeFeatures; use crate::types::payment::PaymentPreimage; -use crate::util::async_poll::AsyncResult; +use crate::util::async_poll::MaybeSend; use crate::util::ser::{ReadableArgs, Writeable}; use crate::util::transaction_utils; @@ -68,7 +68,9 @@ use crate::sign::ecdsa::EcdsaChannelSigner; #[cfg(taproot)] use crate::sign::taproot::TaprootChannelSigner; use crate::util::atomic_counter::AtomicCounter; + use core::convert::TryInto; +use core::future::Future; use core::ops::Deref; use core::sync::atomic::{AtomicUsize, Ordering}; #[cfg(taproot)] @@ -1066,7 +1068,9 @@ pub trait ChangeDestinationSource { /// /// This method should return a different value each time it is called, to avoid linking /// on-chain funds controlled to the same user. - fn get_change_destination_script<'a>(&'a self) -> AsyncResult<'a, ScriptBuf, ()>; + fn get_change_destination_script<'a>( + &'a self, + ) -> impl Future> + MaybeSend + 'a; } /// A synchronous helper trait that describes an on-chain wallet capable of returning a (change) destination script. @@ -1101,9 +1105,11 @@ impl ChangeDestinationSource for ChangeDestinationSourceSyncWrapper where T::Target: ChangeDestinationSourceSync, { - fn get_change_destination_script<'a>(&'a self) -> AsyncResult<'a, ScriptBuf, ()> { + fn get_change_destination_script<'a>( + &'a self, + ) -> impl Future> + MaybeSend + 'a { let script = self.0.get_change_destination_script(); - Box::pin(async move { script }) + async move { script } } } diff --git a/lightning/src/util/async_poll.rs b/lightning/src/util/async_poll.rs index eefa40d1055..9c2ca4c247f 100644 --- a/lightning/src/util/async_poll.rs +++ b/lightning/src/util/async_poll.rs @@ -9,7 +9,6 @@ //! Some utilities to make working with the standard library's [`Future`]s easier -use alloc::boxed::Box; use alloc::vec::Vec; use core::future::Future; use core::marker::Unpin; @@ -92,17 +91,6 @@ pub(crate) fn dummy_waker() -> Waker { unsafe { Waker::from_raw(RawWaker::new(core::ptr::null(), &DUMMY_WAKER_VTABLE)) } } -#[cfg(feature = "std")] -/// A type alias for a future that returns a result of type `T` or error `E`. -/// -/// This is not exported to bindings users as async is only supported in Rust. -pub type AsyncResult<'a, T, E> = Pin> + 'a + Send>>; -#[cfg(not(feature = "std"))] -/// A type alias for a future that returns a result of type `T` or error `E`. -/// -/// This is not exported to bindings users as async is only supported in Rust. -pub type AsyncResult<'a, T, E> = Pin> + 'a>>; - /// Marker trait to optionally implement `Sync` under std. /// /// This is not exported to bindings users as async is only supported in Rust. From b1f1ee2a1d36f611c187b21b7a0dbbb2efb52036 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sat, 25 Oct 2025 14:41:09 +0000 Subject: [PATCH 006/242] Drop `Box`ing of iterators during BOLT 11 invoice serialization Now that we have an MSRV that supports returning `impl Trait` in trait methods, we can use it to avoid the `Box` we had spewed all over our BOLT 11 invoice serialization. --- lightning-invoice/src/lib.rs | 5 +- lightning-invoice/src/ser.rs | 136 ++++++++++++++++++----------------- 2 files changed, 71 insertions(+), 70 deletions(-) diff --git a/lightning-invoice/src/lib.rs b/lightning-invoice/src/lib.rs index 47f929377de..60d413cf76a 100644 --- a/lightning-invoice/src/lib.rs +++ b/lightning-invoice/src/lib.rs @@ -40,7 +40,6 @@ use bitcoin::secp256k1::ecdsa::RecoverableSignature; use bitcoin::secp256k1::PublicKey; use bitcoin::secp256k1::{Message, Secp256k1}; -use alloc::boxed::Box; use alloc::string; use core::cmp::Ordering; use core::fmt::{self, Display, Formatter}; @@ -1081,8 +1080,8 @@ macro_rules! find_all_extract { #[allow(missing_docs)] impl RawBolt11Invoice { /// Hash the HRP (as bytes) and signatureless data part (as Fe32 iterator) - fn hash_from_parts<'s>( - hrp_bytes: &[u8], data_without_signature: Box + 's>, + fn hash_from_parts<'s, I: Iterator + 's>( + hrp_bytes: &[u8], data_without_signature: I, ) -> [u8; 32] { use crate::bech32::Fe32IterExt; use bitcoin::hashes::HashEngine; diff --git a/lightning-invoice/src/ser.rs b/lightning-invoice/src/ser.rs index 5c93fa84ae0..853accdd3ca 100644 --- a/lightning-invoice/src/ser.rs +++ b/lightning-invoice/src/ser.rs @@ -1,4 +1,3 @@ -use alloc::boxed::Box; use core::fmt; use core::fmt::{Display, Formatter}; use core::{array, iter}; @@ -13,14 +12,28 @@ use super::{ SignedRawBolt11Invoice, TaggedField, }; +macro_rules! define_iterator_enum { + ($name: ident, $($n: ident),*) => { + enum $name<$($n: Iterator,)*> { + $($n($n),)* + } + impl<$($n: Iterator,)*> Iterator for $name<$($n,)*> { + type Item = Fe32; + fn next(&mut self) -> Option { + match self { + $(Self::$n(iter) => iter.next(),)* + } + } + } + } +} + /// Objects that can be encoded to base32 (bech32). /// -/// Private to this crate to avoid polluting the API. +/// Private to this crate (except in fuzzing) to avoid polluting the API. pub trait Base32Iterable { - /// apoelstra: In future we want to replace this Box with an explicit - /// associated type, to avoid the allocation. But we cannot do this until - /// Rust 1.65 and GATs since the iterator may contain a reference to self. - fn fe_iter<'s>(&'s self) -> Box + 's>; + /// Serialize this object, returning an iterator over bech32 field elements. + fn fe_iter<'s>(&'s self) -> impl Iterator + 's; } /// Interface to calculate the length of the base32 representation before actually serializing @@ -32,7 +45,7 @@ pub(crate) trait Base32Len: Base32Iterable { // Base32Iterable & Base32Len implementations are here, because the traits are in this module. impl Base32Iterable for [u8; N] { - fn fe_iter<'s>(&'s self) -> Box + 's> { + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { self[..].fe_iter() } } @@ -45,8 +58,8 @@ impl Base32Len for [u8; N] { } impl Base32Iterable for [u8] { - fn fe_iter<'s>(&'s self) -> Box + 's> { - Box::new(self.iter().copied().bytes_to_fes()) + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + self.iter().copied().bytes_to_fes() } } @@ -58,8 +71,8 @@ impl Base32Len for [u8] { } impl Base32Iterable for Vec { - fn fe_iter<'s>(&'s self) -> Box + 's> { - Box::new(self.iter().copied().bytes_to_fes()) + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + self.iter().copied().bytes_to_fes() } } @@ -71,8 +84,8 @@ impl Base32Len for Vec { } impl Base32Iterable for PaymentSecret { - fn fe_iter<'s>(&'s self) -> Box + 's> { - Box::new(self.0[..].fe_iter()) + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + self.0[..].fe_iter() } } @@ -88,7 +101,7 @@ impl Base32Iterable for Bolt11InvoiceFeatures { /// starting from the rightmost bit, /// and taking the resulting 5-bit values in reverse (left-to-right), /// with the leading 0's skipped. - fn fe_iter<'s>(&'s self) -> Box + 's> { + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { // Fe32 conversion cannot be used, because this packs from right, right-to-left let mut input_iter = self.le_flags().iter(); // Carry bits, 0..7 bits @@ -126,7 +139,7 @@ impl Base32Iterable for Bolt11InvoiceFeatures { output.push(Fe32::try_from(next_out8 & 31u8).expect("<32")) } // Take result in reverse order, and skip leading 0s - Box::new(output.into_iter().rev().skip_while(|e| *e == Fe32::Q)) + output.into_iter().rev().skip_while(|e| *e == Fe32::Q) } } @@ -241,36 +254,35 @@ fn encoded_int_be_base32_size(int: u64) -> usize { } impl Base32Iterable for RawDataPart { - fn fe_iter<'s>(&'s self) -> Box + 's> { + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { let ts_iter = self.timestamp.fe_iter(); let fields_iter = self.tagged_fields.iter().map(RawTaggedField::fe_iter).flatten(); - Box::new(ts_iter.chain(fields_iter)) + ts_iter.chain(fields_iter) } } impl Base32Iterable for PositiveTimestamp { - fn fe_iter<'s>(&'s self) -> Box + 's> { + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { let fes = encode_int_be_base32(self.as_unix_timestamp()); debug_assert!(fes.len() <= 7, "Invalid timestamp length"); let to_pad = 7 - fes.len(); - Box::new(core::iter::repeat(Fe32::Q).take(to_pad).chain(fes)) + core::iter::repeat(Fe32::Q).take(to_pad).chain(fes) } } impl Base32Iterable for RawTaggedField { - fn fe_iter<'s>(&'s self) -> Box + 's> { - // Annoyingly, when we move to explicit types, we will need an - // explicit enum holding the two iterator variants. + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + define_iterator_enum!(TwoIters, A, B); match *self { - RawTaggedField::UnknownSemantics(ref content) => Box::new(content.iter().copied()), - RawTaggedField::KnownSemantics(ref tagged_field) => tagged_field.fe_iter(), + RawTaggedField::UnknownSemantics(ref content) => TwoIters::A(content.iter().copied()), + RawTaggedField::KnownSemantics(ref tagged_field) => TwoIters::B(tagged_field.fe_iter()), } } } impl Base32Iterable for Sha256 { - fn fe_iter<'s>(&'s self) -> Box + 's> { - Box::new(self.0[..].fe_iter()) + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + self.0[..].fe_iter() } } @@ -281,8 +293,8 @@ impl Base32Len for Sha256 { } impl Base32Iterable for Description { - fn fe_iter<'s>(&'s self) -> Box + 's> { - Box::new(self.0 .0.as_bytes().fe_iter()) + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + self.0 .0.as_bytes().fe_iter() } } @@ -293,8 +305,8 @@ impl Base32Len for Description { } impl Base32Iterable for PayeePubKey { - fn fe_iter<'s>(&'s self) -> Box + 's> { - Box::new(self.serialize().into_iter().bytes_to_fes()) + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + self.serialize().into_iter().bytes_to_fes() } } @@ -305,8 +317,8 @@ impl Base32Len for PayeePubKey { } impl Base32Iterable for ExpiryTime { - fn fe_iter<'s>(&'s self) -> Box + 's> { - Box::new(encode_int_be_base32(self.as_seconds())) + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + encode_int_be_base32(self.as_seconds()) } } @@ -317,8 +329,8 @@ impl Base32Len for ExpiryTime { } impl Base32Iterable for MinFinalCltvExpiryDelta { - fn fe_iter<'s>(&'s self) -> Box + 's> { - Box::new(encode_int_be_base32(self.0)) + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + encode_int_be_base32(self.0) } } @@ -329,8 +341,8 @@ impl Base32Len for MinFinalCltvExpiryDelta { } impl Base32Iterable for Fallback { - fn fe_iter<'s>(&'s self) -> Box + 's> { - Box::new(match *self { + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + match *self { Fallback::SegWitProgram { version: v, program: ref p } => { let v = Fe32::try_from(v.to_num()).expect("valid version"); core::iter::once(v).chain(p[..].fe_iter()) @@ -343,7 +355,7 @@ impl Base32Iterable for Fallback { // 18 'J' core::iter::once(Fe32::J).chain(hash[..].fe_iter()) }, - }) + } } } @@ -371,7 +383,7 @@ type RouteHintHopIter = iter::Chain< >; impl Base32Iterable for PrivateRoute { - fn fe_iter<'s>(&'s self) -> Box + 's> { + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { fn serialize_to_iter(hop: &RouteHintHop) -> RouteHintHopIter { let i1 = hop.src_node_id.serialize().into_iter(); let i2 = u64::to_be_bytes(hop.short_channel_id).into_iter(); @@ -381,7 +393,7 @@ impl Base32Iterable for PrivateRoute { i1.chain(i2).chain(i3).chain(i4).chain(i5) } - Box::new(self.0 .0.iter().map(serialize_to_iter).flatten().bytes_to_fes()) + self.0 .0.iter().map(serialize_to_iter).flatten().bytes_to_fes() } } @@ -391,16 +403,11 @@ impl Base32Len for PrivateRoute { } } -// Shorthand type -type TaggedFieldIter = core::iter::Chain, I>; - impl Base32Iterable for TaggedField { - fn fe_iter<'s>(&'s self) -> Box + 's> { + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { /// Writes a tagged field: tag, length and data. `tag` should be in `0..32` otherwise the /// function will panic. - fn write_tagged_field<'s, P>( - tag: u8, payload: &'s P, - ) -> TaggedFieldIter + 's>> + fn write_tagged_field<'s, P>(tag: u8, payload: &'s P) -> impl Iterator + 's where P: Base32Iterable + Base32Len + ?Sized, { @@ -416,54 +423,49 @@ impl Base32Iterable for TaggedField { .chain(payload.fe_iter()) } - // we will also need a giant enum for this - Box::new(match *self { + define_iterator_enum!(ManyIters, A, B, C, D, E, F, G, H, I, J, K); + match *self { TaggedField::PaymentHash(ref hash) => { - write_tagged_field(constants::TAG_PAYMENT_HASH, hash) + ManyIters::A(write_tagged_field(constants::TAG_PAYMENT_HASH, hash)) }, TaggedField::Description(ref description) => { - write_tagged_field(constants::TAG_DESCRIPTION, description) + ManyIters::B(write_tagged_field(constants::TAG_DESCRIPTION, description)) }, TaggedField::PayeePubKey(ref pub_key) => { - write_tagged_field(constants::TAG_PAYEE_PUB_KEY, pub_key) + ManyIters::C(write_tagged_field(constants::TAG_PAYEE_PUB_KEY, pub_key)) }, TaggedField::DescriptionHash(ref hash) => { - write_tagged_field(constants::TAG_DESCRIPTION_HASH, hash) + ManyIters::D(write_tagged_field(constants::TAG_DESCRIPTION_HASH, hash)) }, TaggedField::ExpiryTime(ref duration) => { - write_tagged_field(constants::TAG_EXPIRY_TIME, duration) + ManyIters::E(write_tagged_field(constants::TAG_EXPIRY_TIME, duration)) }, TaggedField::MinFinalCltvExpiryDelta(ref expiry) => { - write_tagged_field(constants::TAG_MIN_FINAL_CLTV_EXPIRY_DELTA, expiry) + ManyIters::F(write_tagged_field(constants::TAG_MIN_FINAL_CLTV_EXPIRY_DELTA, expiry)) }, TaggedField::Fallback(ref fallback_address) => { - write_tagged_field(constants::TAG_FALLBACK, fallback_address) + ManyIters::G(write_tagged_field(constants::TAG_FALLBACK, fallback_address)) }, TaggedField::PrivateRoute(ref route_hops) => { - write_tagged_field(constants::TAG_PRIVATE_ROUTE, route_hops) + ManyIters::H(write_tagged_field(constants::TAG_PRIVATE_ROUTE, route_hops)) }, TaggedField::PaymentSecret(ref payment_secret) => { - write_tagged_field(constants::TAG_PAYMENT_SECRET, payment_secret) + ManyIters::I(write_tagged_field(constants::TAG_PAYMENT_SECRET, payment_secret)) }, TaggedField::PaymentMetadata(ref payment_metadata) => { - write_tagged_field(constants::TAG_PAYMENT_METADATA, payment_metadata) + ManyIters::J(write_tagged_field(constants::TAG_PAYMENT_METADATA, payment_metadata)) }, TaggedField::Features(ref features) => { - write_tagged_field(constants::TAG_FEATURES, features) + ManyIters::K(write_tagged_field(constants::TAG_FEATURES, features)) }, - }) + } } } impl Base32Iterable for Bolt11InvoiceSignature { - fn fe_iter<'s>(&'s self) -> Box + 's> { + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { let (recovery_id, signature) = self.0.serialize_compact(); - Box::new( - signature - .into_iter() - .chain(core::iter::once(recovery_id.to_i32() as u8)) - .bytes_to_fes(), - ) + signature.into_iter().chain(core::iter::once(recovery_id.to_i32() as u8)).bytes_to_fes() } } From 4561bc5bf3887897f077bddb96330cccf3ccff0d Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Mon, 1 Dec 2025 16:12:17 -0800 Subject: [PATCH 007/242] Git-ignore lightning-tests/target Similar to the other /target directories we ignore where a bunch of files are generated during testing. --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index ed10eb14387..56e94616eeb 100644 --- a/.gitignore +++ b/.gitignore @@ -16,3 +16,4 @@ lightning-dns-resolver/target ext-functional-test-demo/target no-std-check/target msrv-no-dev-deps-check/target +lightning-tests/target From 4de6b5c8bf43a746320e3abf4a3d83ef779bf62c Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Fri, 14 Nov 2025 11:36:18 +0100 Subject: [PATCH 008/242] Channel logging improvements Additional trace logs to help with debugging. --- lightning/src/ln/channel.rs | 4 ++-- lightning/src/ln/channelmanager.rs | 14 ++++++++++---- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 2068a254f45..b1c2458014c 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -8905,8 +8905,8 @@ where ); return_with_htlcs_to_fail!(htlcs_to_fail); } else { - log_debug!(logger, "Received a valid revoke_and_ack with no reply necessary. {} monitor update.", - release_state_str); + log_debug!(logger, "Received a valid revoke_and_ack with no reply necessary. {} monitor update {}.", + release_state_str, monitor_update.update_id); self.monitor_updating_paused( false, diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 399c51b9d9a..f938939f279 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -9451,6 +9451,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ for action in actions.into_iter() { match action { MonitorUpdateCompletionAction::PaymentClaimed { payment_hash, pending_mpp_claim } => { + let (peer_id, chan_id) = pending_mpp_claim.as_ref().map(|c| (Some(c.0), Some(c.1))).unwrap_or_default(); + let logger = WithContext::from(&self.logger, peer_id, chan_id, Some(payment_hash)); + log_trace!(logger, "Handling PaymentClaimed monitor update completion action"); + if let Some((counterparty_node_id, chan_id, claim_ptr)) = pending_mpp_claim { let per_peer_state = self.per_peer_state.read().unwrap(); per_peer_state.get(&counterparty_node_id).map(|peer_state_mutex| { @@ -9526,6 +9530,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // `payment_id` should suffice to ensure we never spuriously drop a second // event for a duplicate payment. if !pending_events.contains(&event_action) { + log_trace!(logger, "Queuing PaymentClaimed event with event completion action {:?}", event_action.1); pending_events.push_back(event_action); } } @@ -17109,10 +17114,6 @@ where let logger = WithChannelMonitor::from(&args.logger, monitor, None); let channel_id = monitor.channel_id(); - log_info!( - logger, - "Queueing monitor update to ensure missing channel is force closed", - ); let monitor_update = ChannelMonitorUpdate { update_id: monitor.get_latest_update_id().saturating_add(1), updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { @@ -17120,6 +17121,11 @@ where }], channel_id: Some(monitor.channel_id()), }; + log_info!( + logger, + "Queueing monitor update {} to ensure missing channel is force closed", + monitor_update.update_id + ); let funding_txo = monitor.get_funding_txo(); let update = BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, From f312c24f5375b47be6488f7af03ec1448adf73c8 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Wed, 3 Dec 2025 09:05:34 +0100 Subject: [PATCH 009/242] Rustfmt handle_monitor_update_completion_actions --- lightning/src/ln/channelmanager.rs | 109 +++++++++++++++++++++-------- 1 file changed, 78 insertions(+), 31 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index f938939f279..ea6409d0e1e 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -9440,8 +9440,11 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } - #[rustfmt::skip] - fn handle_monitor_update_completion_actions>(&self, actions: I) { + fn handle_monitor_update_completion_actions< + I: IntoIterator, + >( + &self, actions: I, + ) { debug_assert_ne!(self.pending_events.held_by_thread(), LockHeldState::HeldByThread); debug_assert_ne!(self.claimable_payments.held_by_thread(), LockHeldState::HeldByThread); debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread); @@ -9450,40 +9453,71 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ for action in actions.into_iter() { match action { - MonitorUpdateCompletionAction::PaymentClaimed { payment_hash, pending_mpp_claim } => { - let (peer_id, chan_id) = pending_mpp_claim.as_ref().map(|c| (Some(c.0), Some(c.1))).unwrap_or_default(); - let logger = WithContext::from(&self.logger, peer_id, chan_id, Some(payment_hash)); + MonitorUpdateCompletionAction::PaymentClaimed { + payment_hash, + pending_mpp_claim, + } => { + let (peer_id, chan_id) = pending_mpp_claim + .as_ref() + .map(|c| (Some(c.0), Some(c.1))) + .unwrap_or_default(); + let logger = + WithContext::from(&self.logger, peer_id, chan_id, Some(payment_hash)); log_trace!(logger, "Handling PaymentClaimed monitor update completion action"); if let Some((counterparty_node_id, chan_id, claim_ptr)) = pending_mpp_claim { let per_peer_state = self.per_peer_state.read().unwrap(); per_peer_state.get(&counterparty_node_id).map(|peer_state_mutex| { let mut peer_state = peer_state_mutex.lock().unwrap(); - let blockers_entry = peer_state.actions_blocking_raa_monitor_updates.entry(chan_id); + let blockers_entry = + peer_state.actions_blocking_raa_monitor_updates.entry(chan_id); if let btree_map::Entry::Occupied(mut blockers) = blockers_entry { - blockers.get_mut().retain(|blocker| - if let &RAAMonitorUpdateBlockingAction::ClaimedMPPPayment { pending_claim } = &blocker { + blockers.get_mut().retain(|blocker| { + if let &RAAMonitorUpdateBlockingAction::ClaimedMPPPayment { + pending_claim, + } = &blocker + { if *pending_claim == claim_ptr { - let mut pending_claim_state_lock = pending_claim.0.lock().unwrap(); - let pending_claim_state = &mut *pending_claim_state_lock; - pending_claim_state.channels_without_preimage.retain(|(cp, cid)| { - let this_claim = - *cp == counterparty_node_id && *cid == chan_id; - if this_claim { - pending_claim_state.channels_with_preimage.push((*cp, *cid)); - false - } else { true } - }); - if pending_claim_state.channels_without_preimage.is_empty() { - for (cp, cid) in pending_claim_state.channels_with_preimage.iter() { + let mut pending_claim_state_lock = + pending_claim.0.lock().unwrap(); + let pending_claim_state = + &mut *pending_claim_state_lock; + pending_claim_state.channels_without_preimage.retain( + |(cp, cid)| { + let this_claim = *cp == counterparty_node_id + && *cid == chan_id; + if this_claim { + pending_claim_state + .channels_with_preimage + .push((*cp, *cid)); + false + } else { + true + } + }, + ); + if pending_claim_state + .channels_without_preimage + .is_empty() + { + for (cp, cid) in pending_claim_state + .channels_with_preimage + .iter() + { let freed_chan = (*cp, *cid, blocker.clone()); freed_channels.push(freed_chan); } } - !pending_claim_state.channels_without_preimage.is_empty() - } else { true } - } else { true } - ); + !pending_claim_state + .channels_without_preimage + .is_empty() + } else { + true + } + } else { + true + } + }); if blockers.get().is_empty() { blockers.remove(); } @@ -9491,7 +9525,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }); } - let payment = self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash); + let payment = self + .claimable_payments + .lock() + .unwrap() + .pending_claiming_payments + .remove(&payment_hash); if let Some(ClaimingPayment { amount_msat, payment_purpose: purpose, @@ -9501,7 +9540,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ onion_fields, payment_id, durable_preimage_channel, - }) = payment { + }) = payment + { let event = events::Event::PaymentClaimed { payment_hash, purpose, @@ -9512,8 +9552,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ onion_fields, payment_id, }; - let action = if let Some((outpoint, counterparty_node_id, channel_id)) - = durable_preimage_channel + let action = if let Some((outpoint, counterparty_node_id, channel_id)) = + durable_preimage_channel { Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate { channel_funding_outpoint: Some(outpoint), @@ -9530,13 +9570,18 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // `payment_id` should suffice to ensure we never spuriously drop a second // event for a duplicate payment. if !pending_events.contains(&event_action) { - log_trace!(logger, "Queuing PaymentClaimed event with event completion action {:?}", event_action.1); + log_trace!( + logger, + "Queuing PaymentClaimed event with event completion action {:?}", + event_action.1 + ); pending_events.push_back(event_action); } } }, MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel { - event, downstream_counterparty_and_funding_outpoint + event, + downstream_counterparty_and_funding_outpoint, } => { self.pending_events.lock().unwrap().push_back((event, None)); if let Some(unblocked) = downstream_counterparty_and_funding_outpoint { @@ -9548,7 +9593,9 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } }, MonitorUpdateCompletionAction::FreeOtherChannelImmediately { - downstream_counterparty_node_id, downstream_channel_id, blocking_action, + downstream_counterparty_node_id, + downstream_channel_id, + blocking_action, } => { self.handle_monitor_update_release( downstream_counterparty_node_id, From cdba25fa8797ee39e69e0276b28fe7473dac5f19 Mon Sep 17 00:00:00 2001 From: Leo Nash Date: Fri, 5 Dec 2025 22:55:26 +0000 Subject: [PATCH 010/242] Assert peer supports splicing before splicing channel --- lightning/src/ln/channelmanager.rs | 13 +++++- lightning/src/ln/splicing_tests.rs | 67 +++++++++++++++++++++++++++++- 2 files changed, 77 insertions(+), 3 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 72585d69f80..8595b23bee7 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -4774,8 +4774,17 @@ where Err(e) => return Err(e), }; - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; + let mut peer_state = peer_state_mutex.lock().unwrap(); + if !peer_state.latest_features.supports_splicing() { + return Err(APIError::ChannelUnavailable { + err: "Peer does not support splicing".to_owned(), + }); + } + if !peer_state.latest_features.supports_quiescence() { + return Err(APIError::ChannelUnavailable { + err: "Peer does not support quiescence, a splicing prerequisite".to_owned(), + }); + } // Look for the channel match peer_state.channel_by_id.entry(*channel_id) { diff --git a/lightning/src/ln/splicing_tests.rs b/lightning/src/ln/splicing_tests.rs index a96af7bbc5d..a05c0bd92d8 100644 --- a/lightning/src/ln/splicing_tests.rs +++ b/lightning/src/ln/splicing_tests.rs @@ -17,7 +17,9 @@ use crate::events::bump_transaction::sync::WalletSourceSync; use crate::events::{ClosureReason, Event, FundingInfo, HTLCHandlingFailureType}; use crate::ln::chan_utils; use crate::ln::channel::CHANNEL_ANNOUNCEMENT_PROPAGATION_DELAY; -use crate::ln::channelmanager::{PaymentId, RecipientOnionFields, BREAKDOWN_TIMEOUT}; +use crate::ln::channelmanager::{ + provided_init_features, PaymentId, RecipientOnionFields, BREAKDOWN_TIMEOUT, +}; use crate::ln::functional_test_utils::*; use crate::ln::funding::{FundingTxInput, SpliceContribution}; use crate::ln::msgs::{self, BaseMessageHandler, ChannelMessageHandler, MessageSendEvent}; @@ -30,6 +32,69 @@ use crate::util::test_channel_signer::SignerOp; use bitcoin::secp256k1::PublicKey; use bitcoin::{Amount, OutPoint as BitcoinOutPoint, ScriptBuf, Transaction, TxOut}; +#[test] +fn test_splicing_not_supported_api_error() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let mut features = provided_init_features(&test_default_channel_config()); + features.clear_splicing(); + *node_cfgs[0].override_init_features.borrow_mut() = Some(features); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_id_0 = nodes[0].node.get_our_node_id(); + let node_id_1 = nodes[1].node.get_our_node_id(); + + let (_, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1); + + let bs_contribution = SpliceContribution::SpliceIn { + value: Amount::ZERO, + inputs: Vec::new(), + change_script: None, + }; + + let res = nodes[1].node.splice_channel( + &channel_id, + &node_id_0, + bs_contribution.clone(), + 0, // funding_feerate_per_kw, + None, // locktime + ); + match res { + Err(APIError::ChannelUnavailable { err }) => { + assert!(err.contains("Peer does not support splicing")) + }, + _ => panic!("Wrong error {:?}", res.err().unwrap()), + } + + nodes[0].node.peer_disconnected(node_id_1); + nodes[1].node.peer_disconnected(node_id_0); + + let mut features = nodes[0].node.init_features(); + features.set_splicing_optional(); + features.clear_quiescence(); + *nodes[0].override_init_features.borrow_mut() = Some(features); + + let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); + reconnect_args.send_channel_ready = (true, true); + reconnect_args.send_announcement_sigs = (true, true); + reconnect_nodes(reconnect_args); + + let res = nodes[1].node.splice_channel( + &channel_id, + &node_id_0, + bs_contribution, + 0, // funding_feerate_per_kw, + None, // locktime + ); + match res { + Err(APIError::ChannelUnavailable { err }) => { + assert!(err.contains("Peer does not support quiescence, a splicing prerequisite")) + }, + _ => panic!("Wrong error {:?}", res.err().unwrap()), + } +} + #[test] fn test_v1_splice_in_negative_insufficient_inputs() { let chanmon_cfgs = create_chanmon_cfgs(2); From 173481f6e77cd1c91e9bc6fa3ff2771d31413a4d Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Fri, 14 Nov 2025 23:20:41 +0000 Subject: [PATCH 011/242] Avoid force-closing 0-conf channels when funding is reorg'd When we see a funding transaction for one of our chanels reorg'd out, we worry that its possible we've been double-spent and immediately force-close the channel to avoid accepting any more HTLCs on it. This isn't ideal, but is mostly fine as most nodes require 6 confirmations and 6 block reorgs are exceedingly rare. However, this isn't so okay for 0-conf channels - in that case we elected to trust the funder anyway, so reorgs shouldn't worry us. Still, to handle this correctly we needed to track the old SCID and ensure our logic is safe across an SCID change. Luckily, we did that work for splices, and can now take advantage of it here. Fixes #3836. --- lightning/src/ln/channel.rs | 41 +++- lightning/src/ln/functional_test_utils.rs | 43 ++-- lightning/src/ln/priv_short_conf_tests.rs | 236 ++++++++++++++++++---- lightning/src/ln/reorg_tests.rs | 4 +- 4 files changed, 261 insertions(+), 63 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index e55e4144ef2..ddaa729f50c 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -11344,9 +11344,13 @@ where } // Check if the funding transaction was unconfirmed + let original_scid = self.funding.short_channel_id; + let was_confirmed = self.funding.funding_tx_confirmed_in.is_some(); let funding_tx_confirmations = self.funding.get_funding_tx_confirmations(height); if funding_tx_confirmations == 0 { self.funding.funding_tx_confirmation_height = 0; + self.funding.short_channel_id = None; + self.funding.funding_tx_confirmed_in = None; } if let Some(channel_ready) = self.check_get_channel_ready(height, logger) { @@ -11361,18 +11365,33 @@ where self.context.channel_state.is_our_channel_ready() { // If we've sent channel_ready (or have both sent and received channel_ready), and - // the funding transaction has become unconfirmed, - // close the channel and hope we can get the latest state on chain (because presumably - // the funding transaction is at least still in the mempool of most nodes). + // the funding transaction has become unconfirmed, we'll probably get a new SCID when + // it re-confirms. // - // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or - // 0-conf channel, but not doing so may lead to the - // `ChannelManager::short_to_chan_info` map being inconsistent, so we currently have - // to. - if funding_tx_confirmations == 0 && self.funding.funding_tx_confirmed_in.is_some() { - let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.", - self.context.minimum_depth.unwrap(), funding_tx_confirmations); - return Err(ClosureReason::ProcessingError { err: err_reason }); + // Worse, if the funding has un-confirmed we could have accepted some HTLC(s) over it + // and are now at risk of double-spend. While its possible, even likely, that this is + // just a trivial reorg and we should wait to see the new block connected in the next + // call, its also possible we've been double-spent. To avoid further loss of funds, we + // need some kind of method to freeze the channel and avoid accepting further HTLCs, + // but absent such a method, we just force-close. + // + // The one exception we make is for 0-conf channels, which we decided to trust anyway, + // in which case we simply track the previous SCID as a `historical_scids` the same as + // after a channel is spliced. + if funding_tx_confirmations == 0 && was_confirmed { + if let Some(scid) = original_scid { + self.context.historical_scids.push(scid); + } else { + debug_assert!(false); + } + if self.context.minimum_depth(&self.funding).expect("set for a ready channel") > 0 { + // Reset the original short_channel_id so that we'll generate a closure + // `channel_update` broadcast event. + self.funding.short_channel_id = original_scid; + let err_reason = format!("Funding transaction was un-confirmed, originally locked at {} confs.", + self.context.minimum_depth.unwrap()); + return Err(ClosureReason::ProcessingError { err: err_reason }); + } } } else if !self.funding.is_outbound() && self.funding.funding_tx_confirmed_in.is_none() && height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS { diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index e31630a4926..be32e1fd23a 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -3224,12 +3224,13 @@ pub fn expect_probe_successful_events( } pub struct PaymentFailedConditions<'a> { - pub(crate) expected_htlc_error_data: Option<(LocalHTLCFailureReason, &'a [u8])>, - pub(crate) expected_blamed_scid: Option, - pub(crate) expected_blamed_chan_closed: Option, - pub(crate) expected_mpp_parts_remain: bool, - pub(crate) retry_expected: bool, - pub(crate) from_mon_update: bool, + pub expected_htlc_error_data: Option<(LocalHTLCFailureReason, &'a [u8])>, + pub expected_blamed_scid: Option, + pub expected_blamed_chan_closed: Option, + pub expected_mpp_parts_remain: bool, + pub retry_expected: bool, + pub from_mon_update: bool, + pub reason: Option, } impl<'a> PaymentFailedConditions<'a> { @@ -3241,6 +3242,7 @@ impl<'a> PaymentFailedConditions<'a> { expected_mpp_parts_remain: false, retry_expected: false, from_mon_update: false, + reason: None, } } pub fn mpp_parts_remain(mut self) -> Self { @@ -3321,14 +3323,21 @@ pub fn expect_payment_failed_conditions_event<'a, 'b, 'c, 'd, 'e>( *payment_failed_permanently, expected_payment_failed_permanently, "unexpected payment_failed_permanently value" ); - { - assert!(error_code.is_some(), "expected error_code.is_some() = true"); - assert!(error_data.is_some(), "expected error_data.is_some() = true"); - let reason: LocalHTLCFailureReason = error_code.unwrap().into(); - if let Some((code, data)) = conditions.expected_htlc_error_data { - assert_eq!(reason, code, "unexpected error code"); - assert_eq!(&error_data.as_ref().unwrap()[..], data, "unexpected error data"); - } + match failure { + PathFailure::OnPath { .. } => { + assert!(error_code.is_some(), "expected error_code.is_some() = true"); + assert!(error_data.is_some(), "expected error_data.is_some() = true"); + let reason: LocalHTLCFailureReason = error_code.unwrap().into(); + if let Some((code, data)) = conditions.expected_htlc_error_data { + assert_eq!(reason, code, "unexpected error code"); + assert_eq!(&error_data.as_ref().unwrap()[..], data); + } + }, + PathFailure::InitialSend { .. } => { + assert!(error_code.is_none()); + assert!(error_data.is_none()); + assert!(conditions.expected_htlc_error_data.is_none()); + }, } if let Some(chan_closed) = conditions.expected_blamed_chan_closed { @@ -3362,7 +3371,9 @@ pub fn expect_payment_failed_conditions_event<'a, 'b, 'c, 'd, 'e>( assert_eq!(*payment_id, expected_payment_id); assert_eq!( reason.unwrap(), - if expected_payment_failed_permanently { + if let Some(expected_reason) = conditions.reason { + expected_reason + } else if expected_payment_failed_permanently { PaymentFailureReason::RecipientRejected } else { PaymentFailureReason::RetriesExhausted @@ -3414,7 +3425,7 @@ pub fn send_along_route_with_secret<'a, 'b, 'c>( payment_id } -fn fail_payment_along_path<'a, 'b, 'c>(expected_path: &[&Node<'a, 'b, 'c>]) { +pub fn fail_payment_along_path<'a, 'b, 'c>(expected_path: &[&Node<'a, 'b, 'c>]) { let origin_node_id = expected_path[0].node.get_our_node_id(); // iterate from the receiving node to the origin node and handle update fail htlc. diff --git a/lightning/src/ln/priv_short_conf_tests.rs b/lightning/src/ln/priv_short_conf_tests.rs index ea34e88f619..ab7cad9be44 100644 --- a/lightning/src/ln/priv_short_conf_tests.rs +++ b/lightning/src/ln/priv_short_conf_tests.rs @@ -12,7 +12,8 @@ //! LSP). use crate::chain::ChannelMonitorUpdateStatus; -use crate::events::{ClosureReason, Event, HTLCHandlingFailureType}; +use crate::events::{ClosureReason, Event, HTLCHandlingFailureType, PaymentFailureReason}; +use crate::ln::channel::CHANNEL_ANNOUNCEMENT_PROPAGATION_DELAY; use crate::ln::channelmanager::{PaymentId, RecipientOnionFields, MIN_CLTV_EXPIRY_DELTA}; use crate::ln::msgs; use crate::ln::msgs::{ @@ -1078,66 +1079,233 @@ fn test_public_0conf_channel() { #[test] fn test_0conf_channel_reorg() { // If we accept a 0conf channel, which is then confirmed, but then changes SCID in a reorg, we - // have to make sure we handle this correctly (or, currently, just force-close the channel). + // have to ensure we still accept relays to the previous SCID, at least for some time, as well + // as send a fresh channel announcement. - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let mut chan_config = test_default_channel_config(); chan_config.manually_accept_inbound_channels = true; - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(chan_config.clone())]); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let node_a_id = nodes[0].node.get_our_node_id(); + let node_chanmgrs = + create_node_chanmgrs(3, &node_cfgs, &[None, None, Some(chan_config.clone())]); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + + create_chan_between_nodes(&nodes[0], &nodes[1]); + + // Make sure all nodes are at the same starting height + connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1); + connect_blocks(&nodes[1], CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1); + connect_blocks(&nodes[2], CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1); // This is the default but we force it on anyway chan_config.channel_handshake_config.announce_for_forwarding = true; - let (tx, ..) = open_zero_conf_channel(&nodes[0], &nodes[1], Some(chan_config)); + let (tx, ..) = open_zero_conf_channel(&nodes[1], &nodes[2], Some(chan_config)); // We can use the channel immediately, but we can't announce it until we get 6+ confirmations - send_payment(&nodes[0], &[&nodes[1]], 100_000); + send_payment(&nodes[1], &[&nodes[2]], 100_000); - mine_transaction(&nodes[0], &tx); mine_transaction(&nodes[1], &tx); + mine_transaction(&nodes[2], &tx); // Send a payment using the channel's real SCID, which will be public in a few blocks once we // can generate a channel_announcement. - let real_scid = nodes[0].node.list_usable_channels()[0].short_channel_id.unwrap(); - assert_eq!(nodes[1].node.list_usable_channels()[0].short_channel_id.unwrap(), real_scid); + let bs_chans = nodes[1].node.list_usable_channels(); + let bs_chan = bs_chans.iter().find(|chan| chan.counterparty.node_id == node_c_id).unwrap(); + let original_scid = bs_chan.short_channel_id.unwrap(); + assert_eq!(nodes[2].node.list_usable_channels()[0].short_channel_id.unwrap(), original_scid); let (mut route, payment_hash, payment_preimage, payment_secret) = - get_route_and_payment_hash!(nodes[0], nodes[1], 10_000); - assert_eq!(route.paths[0].hops[0].short_channel_id, real_scid); + get_route_and_payment_hash!(nodes[1], nodes[2], 10_000); + assert_eq!(route.paths[0].hops[0].short_channel_id, original_scid); + send_along_route_with_secret( + &nodes[1], + route.clone(), + &[&[&nodes[2]]], + 10_000, + payment_hash, + payment_secret, + ); + claim_payment(&nodes[1], &[&nodes[2]], payment_preimage); + + // Check that we can forward a payment over the channel's SCID as well (i.e. as if node C + // generated an invoice with a route hint through the 0-conf channel). + let mut forwarded_route = route.clone(); + let (ab_route, ..) = get_route_and_payment_hash!(nodes[0], nodes[1], 10_000); + forwarded_route.paths[0].hops.insert(0, ab_route.paths[0].hops[0].clone()); + forwarded_route.paths[0].hops[0].fee_msat = 1000; + forwarded_route.paths[0].hops[0].cltv_expiry_delta = MIN_CLTV_EXPIRY_DELTA.into(); send_along_route_with_secret( &nodes[0], - route, - &[&[&nodes[1]]], + forwarded_route.clone(), + &[&[&nodes[1], &nodes[2]]], 10_000, payment_hash, payment_secret, ); - claim_payment(&nodes[0], &[&nodes[1]], payment_preimage); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); - disconnect_blocks(&nodes[0], 1); + // Now disconnect blocks, checking that the SCID was wiped but that it still works both for a + // forwarded HTLC and a directly-sent one. disconnect_blocks(&nodes[1], 1); + disconnect_blocks(&nodes[2], 1); - // At this point the channel no longer has an SCID again. In the future we should likely - // support simply un-setting the SCID and waiting until the channel gets re-confirmed, but for - // now we force-close the channel here. - let reason = ClosureReason::ProcessingError { - err: "Funding transaction was un-confirmed. Locked at 0 confs, now have 0 confs." - .to_owned(), - }; - check_closed_event(&nodes[0], 1, reason, &[node_b_id], 100000); - check_closed_broadcast!(nodes[0], true); + let bs_chans = nodes[1].node.list_usable_channels(); + let bs_chan = bs_chans.iter().find(|chan| chan.counterparty.node_id == node_c_id).unwrap(); + assert!(bs_chan.short_channel_id.is_none()); + assert!(nodes[2].node.list_usable_channels()[0].short_channel_id.is_none()); + + send_along_route_with_secret( + &nodes[1], + route.clone(), + &[&[&nodes[2]]], + 10_000, + payment_hash, + payment_secret, + ); + claim_payment(&nodes[1], &[&nodes[2]], payment_preimage); + + send_along_route_with_secret( + &nodes[0], + forwarded_route.clone(), + &[&[&nodes[1], &nodes[2]]], + 10_000, + payment_hash, + payment_secret, + ); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); + + // Finally, connect an extra block then re-mine the funding tx, giving the channel a new SCID. + connect_blocks(&nodes[1], 1); + connect_blocks(&nodes[2], 1); + + mine_transaction(&nodes[1], &tx); + mine_transaction(&nodes[2], &tx); + + let bs_chans = nodes[1].node.list_usable_channels(); + let bs_chan = bs_chans.iter().find(|chan| chan.counterparty.node_id == node_c_id).unwrap(); + let new_scid = bs_chan.short_channel_id.unwrap(); + assert_ne!(original_scid, new_scid); + assert_eq!(nodes[2].node.list_usable_channels()[0].short_channel_id.unwrap(), new_scid); + + // At this point, the channel should happily forward or send payments with either the old SCID + // or the new SCID... + send_along_route_with_secret( + &nodes[1], + route.clone(), + &[&[&nodes[2]]], + 10_000, + payment_hash, + payment_secret, + ); + claim_payment(&nodes[1], &[&nodes[2]], payment_preimage); + + send_along_route_with_secret( + &nodes[0], + forwarded_route.clone(), + &[&[&nodes[1], &nodes[2]]], + 10_000, + payment_hash, + payment_secret, + ); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); + + let mut new_scid_route = route.clone(); + new_scid_route.paths[0].hops[0].short_channel_id = new_scid; + send_along_route_with_secret( + &nodes[1], + new_scid_route.clone(), + &[&[&nodes[2]]], + 10_000, + payment_hash, + payment_secret, + ); + claim_payment(&nodes[1], &[&nodes[2]], payment_preimage); + + let mut new_scid_forwarded_route = forwarded_route.clone(); + new_scid_forwarded_route.paths[0].hops[1].short_channel_id = new_scid; + send_along_route_with_secret( + &nodes[0], + new_scid_forwarded_route.clone(), + &[&[&nodes[1], &nodes[2]]], + 10_000, + payment_hash, + payment_secret, + ); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); + + // However after CHANNEL_ANNOUNCEMENT_PROPAGATION_DELAY blocks, the old SCID should be removed + // and will no longer work for sent or forwarded payments (but the new one still will). + connect_blocks(&nodes[1], 5); + let bs_announcement_sigs = + get_event_msg!(nodes[1], MessageSendEvent::SendAnnouncementSignatures, node_c_id); + + connect_blocks(&nodes[2], 5); + let cs_announcement_sigs = + get_event_msg!(nodes[2], MessageSendEvent::SendAnnouncementSignatures, node_b_id); + + nodes[2].node.handle_announcement_signatures(node_b_id, &bs_announcement_sigs); + let cs_broadcast = nodes[2].node.get_and_clear_pending_msg_events(); + assert_eq!(cs_broadcast.len(), 1); + if let MessageSendEvent::BroadcastChannelAnnouncement { .. } = cs_broadcast[0] { + } else { + panic!("Expected broadcast"); + } + + nodes[1].node.handle_announcement_signatures(node_c_id, &cs_announcement_sigs); + let bs_broadcast = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(bs_broadcast.len(), 1); + if let MessageSendEvent::BroadcastChannelAnnouncement { .. } = bs_broadcast[0] { + } else { + panic!("Expected broadcast"); + } + + connect_blocks(&nodes[0], CHANNEL_ANNOUNCEMENT_PROPAGATION_DELAY); + connect_blocks(&nodes[1], CHANNEL_ANNOUNCEMENT_PROPAGATION_DELAY - 5); + connect_blocks(&nodes[2], CHANNEL_ANNOUNCEMENT_PROPAGATION_DELAY - 5); + + send_along_route_with_secret( + &nodes[1], + new_scid_route, + &[&[&nodes[2]]], + 10_000, + payment_hash, + payment_secret, + ); + claim_payment(&nodes[1], &[&nodes[2]], payment_preimage); + + send_along_route_with_secret( + &nodes[0], + new_scid_forwarded_route, + &[&[&nodes[1], &nodes[2]]], + 10_000, + payment_hash, + payment_secret, + ); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); + + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId([0; 32]); + nodes[1].node.send_payment_with_route(route, payment_hash, onion.clone(), id).unwrap(); + let mut conditions = PaymentFailedConditions::new(); + conditions.reason = Some(PaymentFailureReason::RouteNotFound); + expect_payment_failed_conditions(&nodes[1], payment_hash, false, conditions); + + nodes[0].node.send_payment_with_route(forwarded_route, payment_hash, onion, id).unwrap(); check_added_monitors(&nodes[0], 1); - let reason = ClosureReason::ProcessingError { - err: "Funding transaction was un-confirmed. Locked at 0 confs, now have 0 confs." - .to_owned(), - }; - check_closed_event(&nodes[1], 1, reason, &[node_a_id], 100000); - check_closed_broadcast!(nodes[1], true); - check_added_monitors(&nodes[1], 1); + let mut ev = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(ev.len(), 1); + let ev = ev.pop().unwrap(); + let path = &[&nodes[1]]; + let failure = HTLCHandlingFailureType::InvalidForward { requested_forward_scid: original_scid }; + let args = + PassAlongPathArgs::new(&nodes[0], path, 10_000, payment_hash, ev).expect_failure(failure); + do_pass_along_path(args); + fail_payment_along_path(&[&nodes[0], &nodes[1]]); + expect_payment_failed!(nodes[0], payment_hash, false); } #[test] diff --git a/lightning/src/ln/reorg_tests.rs b/lightning/src/ln/reorg_tests.rs index 97e4429fbd6..043862fea90 100644 --- a/lightning/src/ln/reorg_tests.rs +++ b/lightning/src/ln/reorg_tests.rs @@ -386,9 +386,9 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_ assert_eq!(txn.len(), 1); } - let expected_err = "Funding transaction was un-confirmed. Locked at 6 confs, now have 0 confs."; + let expected_err = "Funding transaction was un-confirmed, originally locked at 6 confs."; if reorg_after_reload || !reload_node { - handle_announce_close_broadcast_events(&nodes, 0, 1, true, "Channel closed because of an exception: Funding transaction was un-confirmed. Locked at 6 confs, now have 0 confs."); + handle_announce_close_broadcast_events(&nodes, 0, 1, true, "Channel closed because of an exception: Funding transaction was un-confirmed, originally locked at 6 confs."); check_added_monitors!(nodes[1], 1); let reason = ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Channel closed because of an exception: {}", expected_err)) }; check_closed_event(&nodes[1], 1, reason, &[nodes[0].node.get_our_node_id()], 100000); From 4289db5ccee70a658371f99181b2b269f84dd479 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 1 Dec 2025 13:39:45 +0100 Subject: [PATCH 012/242] Consistently use `wire::Message` for encoding network messages Previously, `enqueue_message` took an `M: Type + Writeable` reference, which didn't make use of our `wire::Message` type, which turned out to be rather confusing. Here, we use `Message` consistently in `PeerManager`'s `enqueue_message`, but also in `encrypt_message`, etc. While at it we also switch to move semantics, which is a nice cleanup. --- lightning/src/ln/channelmanager.rs | 2 + lightning/src/ln/functional_test_utils.rs | 2 + lightning/src/ln/msgs.rs | 2 + lightning/src/ln/peer_channel_encryptor.rs | 4 +- lightning/src/ln/peer_handler.rs | 371 ++++++++++++--------- 5 files changed, 226 insertions(+), 155 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 399c51b9d9a..d52d8535114 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -13872,7 +13872,9 @@ where &MessageSendEvent::UpdateHTLCs { .. } => false, &MessageSendEvent::SendRevokeAndACK { .. } => false, &MessageSendEvent::SendClosingSigned { .. } => false, + #[cfg(simple_close)] &MessageSendEvent::SendClosingComplete { .. } => false, + #[cfg(simple_close)] &MessageSendEvent::SendClosingSig { .. } => false, &MessageSendEvent::SendShutdown { .. } => false, &MessageSendEvent::SendChannelReestablish { .. } => false, diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index e31630a4926..e4e9c583f32 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1124,7 +1124,9 @@ pub fn remove_first_msg_event_to_node( MessageSendEvent::UpdateHTLCs { node_id, .. } => node_id == msg_node_id, MessageSendEvent::SendRevokeAndACK { node_id, .. } => node_id == msg_node_id, MessageSendEvent::SendClosingSigned { node_id, .. } => node_id == msg_node_id, + #[cfg(simple_close)] MessageSendEvent::SendClosingComplete { node_id, .. } => node_id == msg_node_id, + #[cfg(simple_close)] MessageSendEvent::SendClosingSig { node_id, .. } => node_id == msg_node_id, MessageSendEvent::SendShutdown { node_id, .. } => node_id == msg_node_id, MessageSendEvent::SendChannelReestablish { node_id, .. } => node_id == msg_node_id, diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index 8e230fab1d9..0484ebe7530 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -1857,6 +1857,7 @@ pub enum MessageSendEvent { msg: ClosingSigned, }, /// Used to indicate that a `closing_complete` message should be sent to the peer with the given `node_id`. + #[cfg(simple_close)] SendClosingComplete { /// The node_id of the node which should receive this message node_id: PublicKey, @@ -1864,6 +1865,7 @@ pub enum MessageSendEvent { msg: ClosingComplete, }, /// Used to indicate that a `closing_sig` message should be sent to the peer with the given `node_id`. + #[cfg(simple_close)] SendClosingSig { /// The node_id of the node which should receive this message node_id: PublicKey, diff --git a/lightning/src/ln/peer_channel_encryptor.rs b/lightning/src/ln/peer_channel_encryptor.rs index 09b970a9ab2..1d34d9a8674 100644 --- a/lightning/src/ln/peer_channel_encryptor.rs +++ b/lightning/src/ln/peer_channel_encryptor.rs @@ -565,12 +565,12 @@ impl PeerChannelEncryptor { /// Encrypts the given message, returning the encrypted version. /// panics if the length of `message`, once encoded, is greater than 65535 or if the Noise /// handshake has not finished. - pub fn encrypt_message(&mut self, message: &M) -> Vec { + pub fn encrypt_message(&mut self, message: wire::Message) -> Vec { // Allocate a buffer with 2KB, fitting most common messages. Reserve the first 16+2 bytes // for the 2-byte message type prefix and its MAC. let mut res = VecWriter(Vec::with_capacity(MSG_BUF_ALLOC_SIZE)); res.0.resize(16 + 2, 0); - wire::write(message, &mut res).expect("In-memory messages must never fail to serialize"); + wire::write(&message, &mut res).expect("In-memory messages must never fail to serialize"); self.encrypt_message_with_header_0s(&mut res.0); res.0 diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index c3b490ef31a..8a6c6a786b1 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -29,7 +29,7 @@ use crate::ln::peer_channel_encryptor::{ }; use crate::ln::types::ChannelId; use crate::ln::wire; -use crate::ln::wire::{Encode, Type}; +use crate::ln::wire::{Encode, Message, Type}; use crate::onion_message::async_payments::{ AsyncPaymentsMessageHandler, HeldHtlcAvailable, OfferPaths, OfferPathsRequest, ReleaseHeldHtlc, ServeStaticInvoice, StaticInvoicePersisted, @@ -53,12 +53,14 @@ use crate::util::ser::{VecWriter, Writeable, Writer}; #[allow(unused_imports)] use crate::prelude::*; +use super::wire::CustomMessageReader; use crate::io; use crate::sync::{FairRwLock, Mutex, MutexGuard}; use core::convert::Infallible; use core::ops::Deref; use core::sync::atomic::{AtomicBool, AtomicI32, AtomicU32, Ordering}; use core::{cmp, fmt, hash, mem}; + #[cfg(not(c_bindings))] use { crate::chain::chainmonitor::ChainMonitor, @@ -1121,7 +1123,7 @@ pub struct PeerManager< } enum LogicalMessage { - FromWire(wire::Message), + FromWire(Message), CommitmentSignedBatch(ChannelId, Vec), } @@ -1572,7 +1574,8 @@ where if let Some(next_onion_message) = handler.next_onion_message_for_peer(peer_node_id) { - self.enqueue_message(peer, &next_onion_message); + let msg = Message::OnionMessage(next_onion_message); + self.enqueue_message(peer, msg); } } } @@ -1590,16 +1593,20 @@ where if let Some((announce, update_a_option, update_b_option)) = self.message_handler.route_handler.get_next_channel_announcement(c) { - self.enqueue_message(peer, &announce); + peer.sync_status = InitSyncTracker::ChannelsSyncing( + announce.contents.short_channel_id + 1, + ); + let msg = Message::ChannelAnnouncement(announce); + self.enqueue_message(peer, msg); + if let Some(update_a) = update_a_option { - self.enqueue_message(peer, &update_a); + let msg = Message::ChannelUpdate(update_a); + self.enqueue_message(peer, msg); } if let Some(update_b) = update_b_option { - self.enqueue_message(peer, &update_b); + let msg = Message::ChannelUpdate(update_b); + self.enqueue_message(peer, msg); } - peer.sync_status = InitSyncTracker::ChannelsSyncing( - announce.contents.short_channel_id + 1, - ); } else { peer.sync_status = InitSyncTracker::ChannelsSyncing(0xffff_ffff_ffff_ffff); @@ -1608,8 +1615,9 @@ where InitSyncTracker::ChannelsSyncing(c) if c == 0xffff_ffff_ffff_ffff => { let handler = &self.message_handler.route_handler; if let Some(msg) = handler.get_next_node_announcement(None) { - self.enqueue_message(peer, &msg); peer.sync_status = InitSyncTracker::NodesSyncing(msg.contents.node_id); + let msg = Message::NodeAnnouncement(msg); + self.enqueue_message(peer, msg); } else { peer.sync_status = InitSyncTracker::NoSyncRequested; } @@ -1618,8 +1626,9 @@ where InitSyncTracker::NodesSyncing(sync_node_id) => { let handler = &self.message_handler.route_handler; if let Some(msg) = handler.get_next_node_announcement(Some(&sync_node_id)) { - self.enqueue_message(peer, &msg); peer.sync_status = InitSyncTracker::NodesSyncing(msg.contents.node_id); + let msg = Message::NodeAnnouncement(msg); + self.enqueue_message(peer, msg); } else { peer.sync_status = InitSyncTracker::NoSyncRequested; } @@ -1727,7 +1736,10 @@ where } /// Append a message to a peer's pending outbound/write buffer - fn enqueue_message(&self, peer: &mut Peer, message: &M) { + fn enqueue_message( + &self, peer: &mut Peer, + message: Message<::CustomMessage>, + ) { let their_node_id = peer.their_node_id.map(|p| p.0); if their_node_id.is_some() { let logger = WithContext::from(&self.logger, their_node_id, None, None); @@ -1792,12 +1804,14 @@ where }, msgs::ErrorAction::SendErrorMessage { msg } => { log_debug!(logger, "Error handling message{}; sending error message with: {}", OptionalFromDebugger(&peer_node_id), e.err); - self.enqueue_message($peer, &msg); + let msg = Message::Error(msg); + self.enqueue_message($peer, msg); continue; }, msgs::ErrorAction::SendWarningMessage { msg, log_level } => { log_given_level!(logger, log_level, "Error handling message{}; sending warning message with: {}", OptionalFromDebugger(&peer_node_id), e.err); - self.enqueue_message($peer, &msg); + let msg = Message::Warning(msg); + self.enqueue_message($peer, msg); continue; }, } @@ -1892,7 +1906,8 @@ where peer.their_socket_address.clone(), ), }; - self.enqueue_message(peer, &resp); + let msg = Message::Init(resp); + self.enqueue_message(peer, msg); }, NextNoiseStep::ActThree => { let res = peer @@ -1912,7 +1927,8 @@ where peer.their_socket_address.clone(), ), }; - self.enqueue_message(peer, &resp); + let msg = Message::Init(resp); + self.enqueue_message(peer, msg); }, NextNoiseStep::NoiseComplete => { if peer.pending_read_is_header { @@ -1972,8 +1988,11 @@ where let channel_id = ChannelId::new_zero(); let data = "Unsupported message compression: zlib" .to_owned(); - let msg = msgs::WarningMessage { channel_id, data }; - self.enqueue_message(peer, &msg); + let msg = Message::Warning(msgs::WarningMessage { + channel_id, + data, + }); + self.enqueue_message(peer, msg); continue; }, (_, Some(ty)) if is_gossip_msg(ty) => { @@ -1983,8 +2002,11 @@ where "Unreadable/bogus gossip message of type {}", ty ); - let msg = msgs::WarningMessage { channel_id, data }; - self.enqueue_message(peer, &msg); + let msg = Message::Warning(msgs::WarningMessage { + channel_id, + data, + }); + self.enqueue_message(peer, msg); continue; }, (msgs::DecodeError::UnknownRequiredFeature, _) => { @@ -2060,9 +2082,7 @@ where /// Returns the message back if it needs to be broadcasted to all other peers. fn handle_message( &self, peer_mutex: &Mutex, peer_lock: MutexGuard, - message: wire::Message< - <::Target as wire::CustomMessageReader>::CustomMessage, - >, + message: Message<<::Target as wire::CustomMessageReader>::CustomMessage>, ) -> Result, MessageHandlingError> { let their_node_id = peer_lock .their_node_id @@ -2103,9 +2123,7 @@ where // allow it to be subsequently processed by `do_handle_message_without_peer_lock`. fn do_handle_message_holding_peer_lock<'a>( &self, mut peer_lock: MutexGuard, - message: wire::Message< - <::Target as wire::CustomMessageReader>::CustomMessage, - >, + message: Message<<::Target as wire::CustomMessageReader>::CustomMessage>, their_node_id: PublicKey, logger: &WithContext<'a, L>, ) -> Result< Option< @@ -2116,7 +2134,7 @@ where peer_lock.received_message_since_timer_tick = true; // Need an Init as first message - if let wire::Message::Init(msg) = message { + if let Message::Init(msg) = message { // Check if we have any compatible chains if the `networks` field is specified. if let Some(networks) = &msg.networks { let chan_handler = &self.message_handler.chan_handler; @@ -2225,7 +2243,7 @@ where // During splicing, commitment_signed messages need to be collected into a single batch // before they are handled. - if let wire::Message::StartBatch(msg) = message { + if let Message::StartBatch(msg) = message { if peer_lock.message_batch.is_some() { let error = format!( "Peer {} sent start_batch for channel {} before previous batch completed", @@ -2296,7 +2314,7 @@ where return Ok(None); } - if let wire::Message::CommitmentSigned(msg) = message { + if let Message::CommitmentSigned(msg) = message { if let Some(message_batch) = &mut peer_lock.message_batch { let MessageBatchImpl::CommitmentSigned(ref mut messages) = &mut message_batch.messages; @@ -2325,7 +2343,7 @@ where return Ok(None); } } else { - return Ok(Some(LogicalMessage::FromWire(wire::Message::CommitmentSigned(msg)))); + return Ok(Some(LogicalMessage::FromWire(Message::CommitmentSigned(msg)))); } } else if let Some(message_batch) = &peer_lock.message_batch { match message_batch.messages { @@ -2341,7 +2359,7 @@ where return Err(PeerHandleError {}.into()); } - if let wire::Message::GossipTimestampFilter(_msg) = message { + if let Message::GossipTimestampFilter(_msg) = message { // When supporting gossip messages, start initial gossip sync only after we receive // a GossipTimestampFilter if peer_lock.their_features.as_ref().unwrap().supports_gossip_queries() @@ -2373,7 +2391,7 @@ where return Ok(None); } - if let wire::Message::ChannelAnnouncement(ref _msg) = message { + if let Message::ChannelAnnouncement(ref _msg) = message { peer_lock.received_channel_announce_since_backlogged = true; } @@ -2385,9 +2403,7 @@ where // Returns the message back if it needs to be broadcasted to all other peers. fn do_handle_message_without_peer_lock<'a>( &self, peer_mutex: &Mutex, - message: wire::Message< - <::Target as wire::CustomMessageReader>::CustomMessage, - >, + message: Message<<::Target as wire::CustomMessageReader>::CustomMessage>, their_node_id: PublicKey, logger: &WithContext<'a, L>, ) -> Result, MessageHandlingError> { if is_gossip_msg(message.type_id()) { @@ -2400,13 +2416,13 @@ where match message { // Setup and Control messages: - wire::Message::Init(_) => { + Message::Init(_) => { // Handled above }, - wire::Message::GossipTimestampFilter(_) => { + Message::GossipTimestampFilter(_) => { // Handled above }, - wire::Message::Error(msg) => { + Message::Error(msg) => { log_debug!( logger, "Got Err message from {}: {}", @@ -2418,149 +2434,150 @@ where return Err(PeerHandleError {}.into()); } }, - wire::Message::Warning(msg) => { + Message::Warning(msg) => { log_debug!(logger, "Got warning message: {}", PrintableString(&msg.data)); }, - wire::Message::Ping(msg) => { + Message::Ping(msg) => { if msg.ponglen < 65532 { let resp = msgs::Pong { byteslen: msg.ponglen }; - self.enqueue_message(&mut *peer_mutex.lock().unwrap(), &resp); + let msg = Message::Pong(resp); + self.enqueue_message(&mut *peer_mutex.lock().unwrap(), msg); } }, - wire::Message::Pong(_msg) => { + Message::Pong(_msg) => { let mut peer_lock = peer_mutex.lock().unwrap(); peer_lock.awaiting_pong_timer_tick_intervals = 0; peer_lock.msgs_sent_since_pong = 0; }, // Channel messages: - wire::Message::StartBatch(_msg) => { + Message::StartBatch(_msg) => { debug_assert!(false); }, - wire::Message::OpenChannel(msg) => { + Message::OpenChannel(msg) => { self.message_handler.chan_handler.handle_open_channel(their_node_id, &msg); }, - wire::Message::OpenChannelV2(_msg) => { + Message::OpenChannelV2(_msg) => { self.message_handler.chan_handler.handle_open_channel_v2(their_node_id, &_msg); }, - wire::Message::AcceptChannel(msg) => { + Message::AcceptChannel(msg) => { self.message_handler.chan_handler.handle_accept_channel(their_node_id, &msg); }, - wire::Message::AcceptChannelV2(msg) => { + Message::AcceptChannelV2(msg) => { self.message_handler.chan_handler.handle_accept_channel_v2(their_node_id, &msg); }, - wire::Message::FundingCreated(msg) => { + Message::FundingCreated(msg) => { self.message_handler.chan_handler.handle_funding_created(their_node_id, &msg); }, - wire::Message::FundingSigned(msg) => { + Message::FundingSigned(msg) => { self.message_handler.chan_handler.handle_funding_signed(their_node_id, &msg); }, - wire::Message::ChannelReady(msg) => { + Message::ChannelReady(msg) => { self.message_handler.chan_handler.handle_channel_ready(their_node_id, &msg); }, - wire::Message::PeerStorage(msg) => { + Message::PeerStorage(msg) => { self.message_handler.chan_handler.handle_peer_storage(their_node_id, msg); }, - wire::Message::PeerStorageRetrieval(msg) => { + Message::PeerStorageRetrieval(msg) => { self.message_handler.chan_handler.handle_peer_storage_retrieval(their_node_id, msg); }, // Quiescence messages: - wire::Message::Stfu(msg) => { + Message::Stfu(msg) => { self.message_handler.chan_handler.handle_stfu(their_node_id, &msg); }, // Splicing messages: - wire::Message::SpliceInit(msg) => { + Message::SpliceInit(msg) => { self.message_handler.chan_handler.handle_splice_init(their_node_id, &msg); }, - wire::Message::SpliceAck(msg) => { + Message::SpliceAck(msg) => { self.message_handler.chan_handler.handle_splice_ack(their_node_id, &msg); }, - wire::Message::SpliceLocked(msg) => { + Message::SpliceLocked(msg) => { self.message_handler.chan_handler.handle_splice_locked(their_node_id, &msg); }, // Interactive transaction construction messages: - wire::Message::TxAddInput(msg) => { + Message::TxAddInput(msg) => { self.message_handler.chan_handler.handle_tx_add_input(their_node_id, &msg); }, - wire::Message::TxAddOutput(msg) => { + Message::TxAddOutput(msg) => { self.message_handler.chan_handler.handle_tx_add_output(their_node_id, &msg); }, - wire::Message::TxRemoveInput(msg) => { + Message::TxRemoveInput(msg) => { self.message_handler.chan_handler.handle_tx_remove_input(their_node_id, &msg); }, - wire::Message::TxRemoveOutput(msg) => { + Message::TxRemoveOutput(msg) => { self.message_handler.chan_handler.handle_tx_remove_output(their_node_id, &msg); }, - wire::Message::TxComplete(msg) => { + Message::TxComplete(msg) => { self.message_handler.chan_handler.handle_tx_complete(their_node_id, &msg); }, - wire::Message::TxSignatures(msg) => { + Message::TxSignatures(msg) => { self.message_handler.chan_handler.handle_tx_signatures(their_node_id, &msg); }, - wire::Message::TxInitRbf(msg) => { + Message::TxInitRbf(msg) => { self.message_handler.chan_handler.handle_tx_init_rbf(their_node_id, &msg); }, - wire::Message::TxAckRbf(msg) => { + Message::TxAckRbf(msg) => { self.message_handler.chan_handler.handle_tx_ack_rbf(their_node_id, &msg); }, - wire::Message::TxAbort(msg) => { + Message::TxAbort(msg) => { self.message_handler.chan_handler.handle_tx_abort(their_node_id, &msg); }, - wire::Message::Shutdown(msg) => { + Message::Shutdown(msg) => { self.message_handler.chan_handler.handle_shutdown(their_node_id, &msg); }, - wire::Message::ClosingSigned(msg) => { + Message::ClosingSigned(msg) => { self.message_handler.chan_handler.handle_closing_signed(their_node_id, &msg); }, #[cfg(simple_close)] - wire::Message::ClosingComplete(msg) => { + Message::ClosingComplete(msg) => { self.message_handler.chan_handler.handle_closing_complete(their_node_id, msg); }, #[cfg(simple_close)] - wire::Message::ClosingSig(msg) => { + Message::ClosingSig(msg) => { self.message_handler.chan_handler.handle_closing_sig(their_node_id, msg); }, // Commitment messages: - wire::Message::UpdateAddHTLC(msg) => { + Message::UpdateAddHTLC(msg) => { self.message_handler.chan_handler.handle_update_add_htlc(their_node_id, &msg); }, - wire::Message::UpdateFulfillHTLC(msg) => { + Message::UpdateFulfillHTLC(msg) => { self.message_handler.chan_handler.handle_update_fulfill_htlc(their_node_id, msg); }, - wire::Message::UpdateFailHTLC(msg) => { + Message::UpdateFailHTLC(msg) => { self.message_handler.chan_handler.handle_update_fail_htlc(their_node_id, &msg); }, - wire::Message::UpdateFailMalformedHTLC(msg) => { + Message::UpdateFailMalformedHTLC(msg) => { let chan_handler = &self.message_handler.chan_handler; chan_handler.handle_update_fail_malformed_htlc(their_node_id, &msg); }, - wire::Message::CommitmentSigned(msg) => { + Message::CommitmentSigned(msg) => { self.message_handler.chan_handler.handle_commitment_signed(their_node_id, &msg); }, - wire::Message::RevokeAndACK(msg) => { + Message::RevokeAndACK(msg) => { self.message_handler.chan_handler.handle_revoke_and_ack(their_node_id, &msg); }, - wire::Message::UpdateFee(msg) => { + Message::UpdateFee(msg) => { self.message_handler.chan_handler.handle_update_fee(their_node_id, &msg); }, - wire::Message::ChannelReestablish(msg) => { + Message::ChannelReestablish(msg) => { self.message_handler.chan_handler.handle_channel_reestablish(their_node_id, &msg); }, // Routing messages: - wire::Message::AnnouncementSignatures(msg) => { + Message::AnnouncementSignatures(msg) => { let chan_handler = &self.message_handler.chan_handler; chan_handler.handle_announcement_signatures(their_node_id, &msg); }, - wire::Message::ChannelAnnouncement(msg) => { + Message::ChannelAnnouncement(msg) => { let route_handler = &self.message_handler.route_handler; if route_handler .handle_channel_announcement(Some(their_node_id), &msg) @@ -2570,7 +2587,7 @@ where } self.update_gossip_backlogged(); }, - wire::Message::NodeAnnouncement(msg) => { + Message::NodeAnnouncement(msg) => { let route_handler = &self.message_handler.route_handler; if route_handler .handle_node_announcement(Some(their_node_id), &msg) @@ -2580,7 +2597,7 @@ where } self.update_gossip_backlogged(); }, - wire::Message::ChannelUpdate(msg) => { + Message::ChannelUpdate(msg) => { let chan_handler = &self.message_handler.chan_handler; chan_handler.handle_channel_update(their_node_id, &msg); @@ -2594,31 +2611,31 @@ where } self.update_gossip_backlogged(); }, - wire::Message::QueryShortChannelIds(msg) => { + Message::QueryShortChannelIds(msg) => { let route_handler = &self.message_handler.route_handler; route_handler.handle_query_short_channel_ids(their_node_id, msg)?; }, - wire::Message::ReplyShortChannelIdsEnd(msg) => { + Message::ReplyShortChannelIdsEnd(msg) => { let route_handler = &self.message_handler.route_handler; route_handler.handle_reply_short_channel_ids_end(their_node_id, msg)?; }, - wire::Message::QueryChannelRange(msg) => { + Message::QueryChannelRange(msg) => { let route_handler = &self.message_handler.route_handler; route_handler.handle_query_channel_range(their_node_id, msg)?; }, - wire::Message::ReplyChannelRange(msg) => { + Message::ReplyChannelRange(msg) => { let route_handler = &self.message_handler.route_handler; route_handler.handle_reply_channel_range(their_node_id, msg)?; }, // Onion message: - wire::Message::OnionMessage(msg) => { + Message::OnionMessage(msg) => { let onion_message_handler = &self.message_handler.onion_message_handler; onion_message_handler.handle_onion_message(their_node_id, &msg); }, // Unknown messages: - wire::Message::Unknown(type_id) if message.is_even() => { + Message::Unknown(type_id) if message.is_even() => { log_debug!( logger, "Received unknown even message of type {}, disconnecting peer!", @@ -2626,10 +2643,10 @@ where ); return Err(PeerHandleError {}.into()); }, - wire::Message::Unknown(type_id) => { + Message::Unknown(type_id) => { log_trace!(logger, "Received unknown odd message of type {}, ignoring", type_id); }, - wire::Message::Custom(custom) => { + Message::Custom(custom) => { let custom_message_handler = &self.message_handler.custom_message_handler; custom_message_handler.handle_custom_message(custom, their_node_id)?; }, @@ -2858,68 +2875,77 @@ where // robustly gossip broadcast events even if a peer's message buffer is full. let mut handle_event = |event, from_chan_handler| { match event { - MessageSendEvent::SendPeerStorage { ref node_id, ref msg } => { + MessageSendEvent::SendPeerStorage { ref node_id, msg } => { log_debug!( WithContext::from(&self.logger, Some(*node_id), None, None), "Handling SendPeerStorage event in peer_handler for {}", node_id, ); + let msg = Message::PeerStorage(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendPeerStorageRetrieval { ref node_id, ref msg } => { + MessageSendEvent::SendPeerStorageRetrieval { ref node_id, msg } => { log_debug!( WithContext::from(&self.logger, Some(*node_id), None, None), "Handling SendPeerStorageRetrieval event in peer_handler for {}", node_id, ); + let msg = Message::PeerStorageRetrieval(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendAcceptChannel { ref node_id, ref msg } => { + MessageSendEvent::SendAcceptChannel { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.common_fields.temporary_channel_id), None), "Handling SendAcceptChannel event in peer_handler for node {} for channel {}", node_id, &msg.common_fields.temporary_channel_id); + let msg = Message::AcceptChannel(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendAcceptChannelV2 { ref node_id, ref msg } => { + MessageSendEvent::SendAcceptChannelV2 { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.common_fields.temporary_channel_id), None), "Handling SendAcceptChannelV2 event in peer_handler for node {} for channel {}", node_id, &msg.common_fields.temporary_channel_id); + let msg = Message::AcceptChannelV2(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendOpenChannel { ref node_id, ref msg } => { + MessageSendEvent::SendOpenChannel { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.common_fields.temporary_channel_id), None), "Handling SendOpenChannel event in peer_handler for node {} for channel {}", node_id, &msg.common_fields.temporary_channel_id); + let msg = Message::OpenChannel(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendOpenChannelV2 { ref node_id, ref msg } => { + MessageSendEvent::SendOpenChannelV2 { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.common_fields.temporary_channel_id), None), "Handling SendOpenChannelV2 event in peer_handler for node {} for channel {}", node_id, &msg.common_fields.temporary_channel_id); + let msg = Message::OpenChannelV2(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendFundingCreated { ref node_id, ref msg } => { + MessageSendEvent::SendFundingCreated { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.temporary_channel_id), None), "Handling SendFundingCreated event in peer_handler for node {} for channel {} (which becomes {})", node_id, &msg.temporary_channel_id, ChannelId::v1_from_funding_txid(msg.funding_txid.as_byte_array(), msg.funding_output_index)); // TODO: If the peer is gone we should generate a DiscardFunding event // indicating to the wallet that they should just throw away this funding transaction + let msg = Message::FundingCreated(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendFundingSigned { ref node_id, ref msg } => { + MessageSendEvent::SendFundingSigned { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendFundingSigned event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::FundingSigned(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendChannelReady { ref node_id, ref msg } => { + MessageSendEvent::SendChannelReady { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendChannelReady event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::ChannelReady(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendStfu { ref node_id, ref msg } => { + MessageSendEvent::SendStfu { ref node_id, msg } => { let logger = WithContext::from( &self.logger, Some(*node_id), @@ -2929,9 +2955,10 @@ where log_debug!(logger, "Handling SendStfu event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::Stfu(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendSpliceInit { ref node_id, ref msg } => { + MessageSendEvent::SendSpliceInit { ref node_id, msg } => { let logger = WithContext::from( &self.logger, Some(*node_id), @@ -2941,9 +2968,10 @@ where log_debug!(logger, "Handling SendSpliceInit event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::SpliceInit(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendSpliceAck { ref node_id, ref msg } => { + MessageSendEvent::SendSpliceAck { ref node_id, msg } => { let logger = WithContext::from( &self.logger, Some(*node_id), @@ -2953,9 +2981,10 @@ where log_debug!(logger, "Handling SendSpliceAck event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::SpliceAck(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendSpliceLocked { ref node_id, ref msg } => { + MessageSendEvent::SendSpliceLocked { ref node_id, msg } => { let logger = WithContext::from( &self.logger, Some(*node_id), @@ -2965,66 +2994,77 @@ where log_debug!(logger, "Handling SendSpliceLocked event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::SpliceLocked(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendTxAddInput { ref node_id, ref msg } => { + MessageSendEvent::SendTxAddInput { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendTxAddInput event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::TxAddInput(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendTxAddOutput { ref node_id, ref msg } => { + MessageSendEvent::SendTxAddOutput { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendTxAddOutput event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::TxAddOutput(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendTxRemoveInput { ref node_id, ref msg } => { + MessageSendEvent::SendTxRemoveInput { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendTxRemoveInput event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::TxRemoveInput(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendTxRemoveOutput { ref node_id, ref msg } => { + MessageSendEvent::SendTxRemoveOutput { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendTxRemoveOutput event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::TxRemoveOutput(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendTxComplete { ref node_id, ref msg } => { + MessageSendEvent::SendTxComplete { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendTxComplete event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::TxComplete(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendTxSignatures { ref node_id, ref msg } => { + MessageSendEvent::SendTxSignatures { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendTxSignatures event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::TxSignatures(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendTxInitRbf { ref node_id, ref msg } => { + MessageSendEvent::SendTxInitRbf { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendTxInitRbf event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::TxInitRbf(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendTxAckRbf { ref node_id, ref msg } => { + MessageSendEvent::SendTxAckRbf { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendTxAckRbf event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::TxAckRbf(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendTxAbort { ref node_id, ref msg } => { + MessageSendEvent::SendTxAbort { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendTxAbort event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::TxAbort(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendAnnouncementSignatures { ref node_id, ref msg } => { + MessageSendEvent::SendAnnouncementSignatures { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendAnnouncementSignatures event in peer_handler for node {} for channel {})", node_id, &msg.channel_id); + let msg = Message::AnnouncementSignatures(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, MessageSendEvent::UpdateHTLCs { @@ -3032,12 +3072,12 @@ where ref channel_id, updates: msgs::CommitmentUpdate { - ref update_add_htlcs, - ref update_fulfill_htlcs, - ref update_fail_htlcs, - ref update_fail_malformed_htlcs, - ref update_fee, - ref commitment_signed, + update_add_htlcs, + update_fulfill_htlcs, + update_fail_htlcs, + update_fail_malformed_htlcs, + update_fee, + commitment_signed, }, } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(*channel_id), None), "Handling UpdateHTLCs event in peer_handler for node {} with {} adds, {} fulfills, {} fails, {} commits for channel {}", @@ -3049,18 +3089,23 @@ where channel_id); let mut peer = get_peer_for_forwarding!(node_id)?; for msg in update_fulfill_htlcs { + let msg = Message::UpdateFulfillHTLC(msg); self.enqueue_message(&mut *peer, msg); } for msg in update_fail_htlcs { + let msg = Message::UpdateFailHTLC(msg); self.enqueue_message(&mut *peer, msg); } for msg in update_fail_malformed_htlcs { + let msg = Message::UpdateFailMalformedHTLC(msg); self.enqueue_message(&mut *peer, msg); } for msg in update_add_htlcs { + let msg = Message::UpdateAddHTLC(msg); self.enqueue_message(&mut *peer, msg); } - if let &Some(ref msg) = update_fee { + if let Some(msg) = update_fee { + let msg = Message::UpdateFee(msg); self.enqueue_message(&mut *peer, msg); } if commitment_signed.len() > 1 { @@ -3069,37 +3114,45 @@ where batch_size: commitment_signed.len() as u16, message_type: Some(msgs::CommitmentSigned::TYPE), }; - self.enqueue_message(&mut *peer, &msg); + let msg = Message::StartBatch(msg); + self.enqueue_message(&mut *peer, msg); } for msg in commitment_signed { + let msg = Message::CommitmentSigned(msg); self.enqueue_message(&mut *peer, msg); } }, - MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { + MessageSendEvent::SendRevokeAndACK { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendRevokeAndACK event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::RevokeAndACK(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => { + MessageSendEvent::SendClosingSigned { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendClosingSigned event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::ClosingSigned(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendClosingComplete { ref node_id, ref msg } => { + #[cfg(simple_close)] + MessageSendEvent::SendClosingComplete { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendClosingComplete event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::ClosingComplete(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendClosingSig { ref node_id, ref msg } => { + #[cfg(simple_close)] + MessageSendEvent::SendClosingSig { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendClosingSig event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::ClosingSig(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendShutdown { ref node_id, ref msg } => { + MessageSendEvent::SendShutdown { ref node_id, msg } => { log_debug!( WithContext::from( &self.logger, @@ -3109,23 +3162,27 @@ where ), "Handling Shutdown event in peer_handler", ); + let msg = Message::Shutdown(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => { + MessageSendEvent::SendChannelReestablish { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendChannelReestablish event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::ChannelReestablish(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, MessageSendEvent::SendChannelAnnouncement { ref node_id, - ref msg, - ref update_msg, + msg, + update_msg, } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), None, None), "Handling SendChannelAnnouncement event in peer_handler for node {} for short channel id {}", node_id, msg.contents.short_channel_id); + let msg = Message::ChannelAnnouncement(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); + let update_msg = Message::ChannelUpdate(update_msg); self.enqueue_message( &mut *get_peer_for_forwarding!(node_id)?, update_msg, @@ -3216,12 +3273,13 @@ where _ => {}, } }, - MessageSendEvent::SendChannelUpdate { ref node_id, ref msg } => { + MessageSendEvent::SendChannelUpdate { ref node_id, msg } => { log_trace!( WithContext::from(&self.logger, Some(*node_id), None, None), "Handling SendChannelUpdate event in peer_handler for channel {}", msg.contents.short_channel_id ); + let msg = Message::ChannelUpdate(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, MessageSendEvent::HandleError { node_id, action } => { @@ -3239,7 +3297,7 @@ where // about to disconnect the peer and do it after we finish // processing most messages. let msg = msg.map(|msg| { - wire::Message::<<::Target as wire::CustomMessageReader>::CustomMessage>::Error(msg) + Message::<<::Target as wire::CustomMessageReader>::CustomMessage>::Error(msg) }); peers_to_disconnect.insert(node_id, msg); }, @@ -3250,7 +3308,7 @@ where // about to disconnect the peer and do it after we finish // processing most messages. peers_to_disconnect - .insert(node_id, Some(wire::Message::Warning(msg))); + .insert(node_id, Some(Message::Warning(msg))); }, msgs::ErrorAction::IgnoreAndLog(level) => { log_given_level!( @@ -3266,22 +3324,21 @@ where "Received a HandleError event to be ignored", ); }, - msgs::ErrorAction::SendErrorMessage { ref msg } => { + msgs::ErrorAction::SendErrorMessage { msg } => { log_trace!(logger, "Handling SendErrorMessage HandleError event in peer_handler with message {}", msg.data); + let msg = Message::Error(msg); self.enqueue_message( &mut *get_peer_for_forwarding!(&node_id)?, msg, ); }, - msgs::ErrorAction::SendWarningMessage { - ref msg, - ref log_level, - } => { + msgs::ErrorAction::SendWarningMessage { msg, ref log_level } => { log_given_level!(logger, *log_level, "Handling SendWarningMessage HandleError event in peer_handler with message {}", msg.data); + let msg = Message::Warning(msg); self.enqueue_message( &mut *get_peer_for_forwarding!(&node_id)?, msg, @@ -3289,33 +3346,37 @@ where }, } }, - MessageSendEvent::SendChannelRangeQuery { ref node_id, ref msg } => { + MessageSendEvent::SendChannelRangeQuery { ref node_id, msg } => { log_gossip!(WithContext::from(&self.logger, Some(*node_id), None, None), "Handling SendChannelRangeQuery event in peer_handler with first_blocknum={}, number_of_blocks={}", msg.first_blocknum, msg.number_of_blocks); + let msg = Message::QueryChannelRange(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendShortIdsQuery { ref node_id, ref msg } => { + MessageSendEvent::SendShortIdsQuery { ref node_id, msg } => { log_gossip!(WithContext::from(&self.logger, Some(*node_id), None, None), "Handling SendShortIdsQuery event in peer_handler with num_scids={}", msg.short_channel_ids.len()); + let msg = Message::QueryShortChannelIds(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendReplyChannelRange { ref node_id, ref msg } => { + MessageSendEvent::SendReplyChannelRange { ref node_id, msg } => { log_gossip!(WithContext::from(&self.logger, Some(*node_id), None, None), "Handling SendReplyChannelRange event in peer_handler with num_scids={} first_blocknum={} number_of_blocks={}, sync_complete={}", msg.short_channel_ids.len(), msg.first_blocknum, msg.number_of_blocks, msg.sync_complete); + let msg = Message::ReplyChannelRange(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendGossipTimestampFilter { ref node_id, ref msg } => { + MessageSendEvent::SendGossipTimestampFilter { ref node_id, msg } => { log_gossip!(WithContext::from(&self.logger, Some(*node_id), None, None), "Handling SendGossipTimestampFilter event in peer_handler with first_timestamp={}, timestamp_range={}", msg.first_timestamp, msg.timestamp_range); + let msg = Message::GossipTimestampFilter(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, } @@ -3351,7 +3412,8 @@ where } else { continue; }; - self.enqueue_message(&mut peer, &msg); + let msg = Message::Custom(msg); + self.enqueue_message(&mut peer, msg); } for (descriptor, peer_mutex) in peers.iter() { @@ -3381,7 +3443,7 @@ where if let Some(peer_mutex) = peers.remove(&descriptor) { let mut peer = peer_mutex.lock().unwrap(); if let Some(msg) = msg { - self.enqueue_message(&mut *peer, &msg); + self.enqueue_message(&mut *peer, msg); // This isn't guaranteed to work, but if there is enough free // room in the send buffer, put the error message there... self.do_attempt_write_data(&mut descriptor, &mut *peer, false); @@ -3506,7 +3568,9 @@ where if peer.awaiting_pong_timer_tick_intervals == 0 { peer.awaiting_pong_timer_tick_intervals = -1; let ping = msgs::Ping { ponglen: 0, byteslen: 64 }; - self.enqueue_message(peer, &ping); + let msg: Message<::CustomMessage> = + Message::Ping(ping); + self.enqueue_message(peer, msg); } } @@ -3577,7 +3641,8 @@ where peer.awaiting_pong_timer_tick_intervals = 1; let ping = msgs::Ping { ponglen: 0, byteslen: 64 }; - self.enqueue_message(&mut *peer, &ping); + let msg = Message::Ping(ping); + self.enqueue_message(&mut *peer, msg); break; } self.do_attempt_write_data( @@ -4226,7 +4291,7 @@ mod tests { .push(MessageSendEvent::SendShutdown { node_id: their_id, msg: msg.clone() }); peers[0].message_handler.chan_handler = &a_chan_handler; - b_chan_handler.expect_receive_msg(wire::Message::Shutdown(msg)); + b_chan_handler.expect_receive_msg(Message::Shutdown(msg)); peers[1].message_handler.chan_handler = &b_chan_handler; peers[0].process_events(); @@ -4261,7 +4326,8 @@ mod tests { peers[0].read_event(&mut fd_dup, &act_three).unwrap(); let not_init_msg = msgs::Ping { ponglen: 4, byteslen: 0 }; - let msg_bytes = dup_encryptor.encrypt_message(¬_init_msg); + let msg: Message<()> = Message::Ping(not_init_msg); + let msg_bytes = dup_encryptor.encrypt_message(msg); assert!(peers[0].read_event(&mut fd_dup, &msg_bytes).is_err()); } @@ -4639,13 +4705,12 @@ mod tests { { let peers = peer_a.peers.read().unwrap(); let mut peer_b = peers.get(&fd_a).unwrap().lock().unwrap(); - peer_a.enqueue_message( - &mut peer_b, - &msgs::WarningMessage { - channel_id: ChannelId([0; 32]), - data: "no disconnect plz".to_string(), - }, - ); + let warning = msgs::WarningMessage { + channel_id: ChannelId([0; 32]), + data: "no disconnect plz".to_string(), + }; + let msg = Message::Warning(warning); + peer_a.enqueue_message(&mut peer_b, msg); } peer_a.process_events(); let msg = fd_a.outbound_data.lock().unwrap().split_off(0); From dfa12d8807fd109be504d249853e6b2a3dad7fda Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 1 Dec 2025 14:08:17 +0100 Subject: [PATCH 013/242] Drop `wire::write` and replace `encode_msg!` macro Now that we consistently use `wire::Message` everywhere, it's easier to simply use `Message::write`/`Type::write` instead of heaving yet another `wire::write` around. Here we drop `wire::write`, replace the `encode_msg` macro with a method that takes `wire::Message`, and convert a bunch of additional places to move semantics. --- lightning/src/ln/peer_channel_encryptor.rs | 6 +- lightning/src/ln/peer_handler.rs | 64 ++++++++++++---------- lightning/src/ln/wire.rs | 41 -------------- 3 files changed, 39 insertions(+), 72 deletions(-) diff --git a/lightning/src/ln/peer_channel_encryptor.rs b/lightning/src/ln/peer_channel_encryptor.rs index 1d34d9a8674..894de045b14 100644 --- a/lightning/src/ln/peer_channel_encryptor.rs +++ b/lightning/src/ln/peer_channel_encryptor.rs @@ -12,7 +12,9 @@ use crate::prelude::*; use crate::ln::msgs; use crate::ln::msgs::LightningError; use crate::ln::wire; +use crate::ln::wire::Type; use crate::sign::{NodeSigner, Recipient}; +use crate::util::ser::Writeable; use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hashes::{Hash, HashEngine}; @@ -570,7 +572,9 @@ impl PeerChannelEncryptor { // for the 2-byte message type prefix and its MAC. let mut res = VecWriter(Vec::with_capacity(MSG_BUF_ALLOC_SIZE)); res.0.resize(16 + 2, 0); - wire::write(&message, &mut res).expect("In-memory messages must never fail to serialize"); + + message.type_id().write(&mut res).expect("In-memory messages must never fail to serialize"); + message.write(&mut res).expect("In-memory messages must never fail to serialize"); self.encrypt_message_with_header_0s(&mut res.0); res.0 diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index 8a6c6a786b1..4d1dff9cd52 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -1144,12 +1144,11 @@ impl From for MessageHandlingError { } } -macro_rules! encode_msg { - ($msg: expr) => {{ - let mut buffer = VecWriter(Vec::with_capacity(MSG_BUF_ALLOC_SIZE)); - wire::write($msg, &mut buffer).unwrap(); - buffer.0 - }}; +fn encode_message(message: wire::Message) -> Vec { + let mut buffer = VecWriter(Vec::with_capacity(MSG_BUF_ALLOC_SIZE)); + message.type_id().write(&mut buffer).expect("In-memory messages must never fail to serialize"); + message.write(&mut buffer).expect("In-memory messages must never fail to serialize"); + buffer.0 } impl @@ -2068,7 +2067,7 @@ where for msg in msgs_to_forward.drain(..) { self.forward_broadcast_msg( &*peers, - &msg, + msg, peer_node_id.as_ref().map(|(pk, _)| pk), false, ); @@ -2661,22 +2660,25 @@ where /// unless `allow_large_buffer` is set, in which case the message will be treated as critical /// and delivered no matter the available buffer space. fn forward_broadcast_msg( - &self, peers: &HashMap>, msg: &BroadcastGossipMessage, + &self, peers: &HashMap>, msg: BroadcastGossipMessage, except_node: Option<&PublicKey>, allow_large_buffer: bool, ) { match msg { - BroadcastGossipMessage::ChannelAnnouncement(ref msg) => { + BroadcastGossipMessage::ChannelAnnouncement(msg) => { log_gossip!(self.logger, "Sending message to all peers except {:?} or the announced channel's counterparties: {:?}", except_node, msg); - let encoded_msg = encode_msg!(msg); let our_channel = self.our_node_id == msg.contents.node_id_1 || self.our_node_id == msg.contents.node_id_2; - + let scid = msg.contents.short_channel_id; + let node_id_1 = msg.contents.node_id_1; + let node_id_2 = msg.contents.node_id_2; + let msg: Message<::CustomMessage> = + Message::ChannelAnnouncement(msg); + let encoded_msg = encode_message(msg); for (_, peer_mutex) in peers.iter() { let mut peer = peer_mutex.lock().unwrap(); if !peer.handshake_complete() { continue; } - let scid = msg.contents.short_channel_id; if !our_channel && !peer.should_forward_channel_announcement(scid) { continue; } @@ -2693,9 +2695,7 @@ where continue; } if let Some((_, their_node_id)) = peer.their_node_id { - if their_node_id == msg.contents.node_id_1 - || their_node_id == msg.contents.node_id_2 - { + if their_node_id == node_id_1 || their_node_id == node_id_2 { continue; } } @@ -2708,23 +2708,25 @@ where peer.gossip_broadcast_buffer.push_back(encoded_message); } }, - BroadcastGossipMessage::NodeAnnouncement(ref msg) => { + BroadcastGossipMessage::NodeAnnouncement(msg) => { log_gossip!( self.logger, "Sending message to all peers except {:?} or the announced node: {:?}", except_node, msg ); - let encoded_msg = encode_msg!(msg); let our_announcement = self.our_node_id == msg.contents.node_id; + let msg_node_id = msg.contents.node_id; + let msg: Message<::CustomMessage> = + Message::NodeAnnouncement(msg); + let encoded_msg = encode_message(msg); for (_, peer_mutex) in peers.iter() { let mut peer = peer_mutex.lock().unwrap(); if !peer.handshake_complete() { continue; } - let node_id = msg.contents.node_id; - if !our_announcement && !peer.should_forward_node_announcement(node_id) { + if !our_announcement && !peer.should_forward_node_announcement(msg_node_id) { continue; } debug_assert!(peer.their_node_id.is_some()); @@ -2740,7 +2742,7 @@ where continue; } if let Some((_, their_node_id)) = peer.their_node_id { - if their_node_id == msg.contents.node_id { + if their_node_id == msg_node_id { continue; } } @@ -2760,15 +2762,16 @@ where except_node, msg ); - let encoded_msg = encode_msg!(msg); - let our_channel = self.our_node_id == *node_id_1 || self.our_node_id == *node_id_2; - + let our_channel = self.our_node_id == node_id_1 || self.our_node_id == node_id_2; + let scid = msg.contents.short_channel_id; + let msg: Message<::CustomMessage> = + Message::ChannelUpdate(msg); + let encoded_msg = encode_message(msg); for (_, peer_mutex) in peers.iter() { let mut peer = peer_mutex.lock().unwrap(); if !peer.handshake_complete() { continue; } - let scid = msg.contents.short_channel_id; if !our_channel && !peer.should_forward_channel_announcement(scid) { continue; } @@ -3201,7 +3204,7 @@ where let forward = BroadcastGossipMessage::ChannelAnnouncement(msg); self.forward_broadcast_msg( peers, - &forward, + forward, None, from_chan_handler, ); @@ -3222,7 +3225,7 @@ where }; self.forward_broadcast_msg( peers, - &forward, + forward, None, from_chan_handler, ); @@ -3246,7 +3249,7 @@ where }; self.forward_broadcast_msg( peers, - &forward, + forward, None, from_chan_handler, ); @@ -3265,7 +3268,7 @@ where let forward = BroadcastGossipMessage::NodeAnnouncement(msg); self.forward_broadcast_msg( peers, - &forward, + forward, None, from_chan_handler, ); @@ -3742,7 +3745,7 @@ where let _ = self.message_handler.route_handler.handle_node_announcement(None, &msg); self.forward_broadcast_msg( &*self.peers.read().unwrap(), - &BroadcastGossipMessage::NodeAnnouncement(msg), + BroadcastGossipMessage::NodeAnnouncement(msg), None, true, ); @@ -4557,7 +4560,8 @@ mod tests { assert_eq!(peer.gossip_broadcast_buffer.len(), 1); let pending_msg = &peer.gossip_broadcast_buffer[0]; - let expected = encode_msg!(&msg_100); + let msg: Message<()> = Message::ChannelUpdate(msg_100); + let expected = encode_message(msg); assert_eq!(expected, pending_msg.fetch_encoded_msg_with_type_pfx()); } } diff --git a/lightning/src/ln/wire.rs b/lightning/src/ln/wire.rs index bc1d83adb68..9065c49c676 100644 --- a/lightning/src/ln/wire.rs +++ b/lightning/src/ln/wire.rs @@ -425,19 +425,6 @@ where } } -/// Writes a message to the data buffer encoded as a 2-byte big-endian type and a variable-length -/// payload. -/// -/// # Errors -/// -/// Returns an I/O error if the write could not be completed. -pub(crate) fn write( - message: &M, buffer: &mut W, -) -> Result<(), io::Error> { - message.type_id().write(buffer)?; - message.write(buffer) -} - mod encode { /// Defines a constant type identifier for reading messages from the wire. pub trait Encode { @@ -737,34 +724,6 @@ mod tests { } } - #[test] - fn write_message_with_type() { - let message = msgs::Pong { byteslen: 2u16 }; - let mut buffer = Vec::new(); - assert!(write(&message, &mut buffer).is_ok()); - - let type_length = ::core::mem::size_of::(); - let (type_bytes, payload_bytes) = buffer.split_at(type_length); - assert_eq!(u16::from_be_bytes(type_bytes.try_into().unwrap()), msgs::Pong::TYPE); - assert_eq!(payload_bytes, &ENCODED_PONG[type_length..]); - } - - #[test] - fn read_message_encoded_with_write() { - let message = msgs::Pong { byteslen: 2u16 }; - let mut buffer = Vec::new(); - assert!(write(&message, &mut buffer).is_ok()); - - let decoded_message = read(&mut &buffer[..], &IgnoringMessageHandler {}).unwrap(); - match decoded_message { - Message::Pong(msgs::Pong { byteslen: 2u16 }) => (), - Message::Pong(msgs::Pong { byteslen }) => { - panic!("Expected byteslen {}; found: {}", message.byteslen, byteslen); - }, - _ => panic!("Expected pong message; found message type: {}", decoded_message.type_id()), - } - } - #[test] fn is_even_message_type() { let message = Message::<()>::Unknown(42); From bd578235fbe5ed8ec18eb0ccd5e2e8fe10732ce4 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 9 Dec 2025 01:15:56 +0000 Subject: [PATCH 014/242] Make `AttributionData` actually pub since its used in the public API `AttributionData` is a part of the public `UpdateFulfillHTLC` and `UpdateFailHTLC` messages, but its not actually `pub`. Yet again re-exports bite us and leave us with a broken public API - we ended up accidentally sealing `AttributionData`. Instead, here, we just make `onion_utils` `pub` so that we avoid making the same mistake in the future. Note that this still leaves us with arather useless public `AttributionData` API - it can't be created, updated, or decoded, it can only be serialized and deserialized, but at least it exists. --- fuzz/src/process_onion_failure.rs | 13 +-- lightning/src/events/mod.rs | 3 +- lightning/src/ln/mod.rs | 11 +-- lightning/src/ln/msgs.rs | 9 +- lightning/src/ln/onion_utils.rs | 157 ++++++++++++++++-------------- 5 files changed, 97 insertions(+), 96 deletions(-) diff --git a/fuzz/src/process_onion_failure.rs b/fuzz/src/process_onion_failure.rs index 1bc9900718a..ac70562c006 100644 --- a/fuzz/src/process_onion_failure.rs +++ b/fuzz/src/process_onion_failure.rs @@ -9,10 +9,12 @@ use lightning::{ ln::{ channelmanager::{HTLCSource, PaymentId}, msgs::OnionErrorPacket, + onion_utils, }, routing::router::{BlindedTail, Path, RouteHop, TrampolineHop}, types::features::{ChannelFeatures, NodeFeatures}, util::logger::Logger, + util::ser::Readable, }; // Imports that need to be added manually @@ -126,19 +128,18 @@ fn do_test(data: &[u8], out: Out) { let failure_data = get_slice!(failure_len); let attribution_data = if get_bool!() { - Some(lightning::ln::AttributionData { - hold_times: get_slice!(80).try_into().unwrap(), - hmacs: get_slice!(840).try_into().unwrap(), - }) + let mut bytes = get_slice!(80 + 840); + let data: onion_utils::AttributionData = Readable::read(&mut bytes).unwrap(); + Some(data) } else { None }; let encrypted_packet = OnionErrorPacket { data: failure_data.into(), attribution_data: attribution_data.clone() }; - lightning::ln::process_onion_failure(&secp_ctx, &logger, &htlc_source, encrypted_packet); + onion_utils::process_onion_failure(&secp_ctx, &logger, &htlc_source, encrypted_packet); if let Some(attribution_data) = attribution_data { - lightning::ln::decode_fulfill_attribution_data( + onion_utils::decode_fulfill_attribution_data( &secp_ctx, &logger, &path, diff --git a/lightning/src/events/mod.rs b/lightning/src/events/mod.rs index b9c4b1ca1ef..d97ae6097b6 100644 --- a/lightning/src/events/mod.rs +++ b/lightning/src/events/mod.rs @@ -25,8 +25,9 @@ use crate::blinded_path::payment::{ use crate::chain::transaction; use crate::ln::channel::FUNDING_CONF_DEADLINE_BLOCKS; use crate::ln::channelmanager::{InterceptId, PaymentId, RecipientOnionFields}; +use crate::ln::msgs; +use crate::ln::onion_utils::LocalHTLCFailureReason; use crate::ln::types::ChannelId; -use crate::ln::{msgs, LocalHTLCFailureReason}; use crate::offers::invoice::Bolt12Invoice; use crate::offers::invoice_request::InvoiceRequest; use crate::offers::static_invoice::StaticInvoice; diff --git a/lightning/src/ln/mod.rs b/lightning/src/ln/mod.rs index 9473142cfed..04aa8181b92 100644 --- a/lightning/src/ln/mod.rs +++ b/lightning/src/ln/mod.rs @@ -41,26 +41,17 @@ pub mod channel; #[cfg(not(fuzzing))] pub(crate) mod channel; -pub(crate) mod onion_utils; +pub mod onion_utils; mod outbound_payment; pub mod wire; #[allow(dead_code)] // TODO(dual_funding): Remove once contribution to V2 channels is enabled. pub(crate) mod interactivetxs; -pub use onion_utils::{create_payment_onion, LocalHTLCFailureReason}; // Older rustc (which we support) refuses to let us call the get_payment_preimage_hash!() macro // without the node parameter being mut. This is incorrect, and thus newer rustcs will complain // about an unnecessary mut. Thus, we silence the unused_mut warning in two test modules below. -#[cfg(fuzzing)] -pub use onion_utils::decode_fulfill_attribution_data; -#[cfg(fuzzing)] -pub use onion_utils::process_onion_failure; - -#[cfg(fuzzing)] -pub use onion_utils::AttributionData; - #[cfg(test)] #[allow(unused_mut)] mod async_payments_tests; diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index 0484ebe7530..f237d73e533 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -4366,7 +4366,7 @@ mod tests { InboundOnionForwardPayload, InboundOnionReceivePayload, OutboundTrampolinePayload, TrampolineOnionPacket, }; - use crate::ln::onion_utils::{AttributionData, HMAC_COUNT, HMAC_LEN, HOLD_TIME_LEN, MAX_HOPS}; + use crate::ln::onion_utils::AttributionData; use crate::ln::types::ChannelId; use crate::routing::gossip::{NodeAlias, NodeId}; use crate::types::features::{ @@ -5899,13 +5899,10 @@ mod tests { channel_id: ChannelId::from_bytes([2; 32]), htlc_id: 2316138423780173, reason: [1; 32].to_vec(), - attribution_data: Some(AttributionData { - hold_times: [3; MAX_HOPS * HOLD_TIME_LEN], - hmacs: [3; HMAC_LEN * HMAC_COUNT], - }), + attribution_data: Some(AttributionData::new()), }; let encoded_value = update_fail_htlc.encode(); - let target_value = >::from_hex("020202020202020202020202020202020202020202020202020202020202020200083a840000034d0020010101010101010101010101010101010101010101010101010101010101010101fd03980303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303").unwrap(); + let target_value = >::from_hex("020202020202020202020202020202020202020202020202020202020202020200083a840000034d0020010101010101010101010101010101010101010101010101010101010101010101fd03980000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap(); assert_eq!(encoded_value, target_value); } diff --git a/lightning/src/ln/onion_utils.rs b/lightning/src/ln/onion_utils.rs index 18aa43e27c6..dbc2ebc9d48 100644 --- a/lightning/src/ln/onion_utils.rs +++ b/lightning/src/ln/onion_utils.rs @@ -7,6 +7,8 @@ // You may not use this file except in accordance with one or both of these // licenses. +//! Low-level onion manipulation logic and fields + use super::msgs::OnionErrorPacket; use crate::blinded_path::BlindedHop; use crate::crypto::chacha20::ChaCha20; @@ -979,27 +981,79 @@ mod fuzzy_onion_utils { #[cfg(test)] pub(crate) attribution_failed_channel: Option, } + + pub fn process_onion_failure( + secp_ctx: &Secp256k1, logger: &L, htlc_source: &HTLCSource, + encrypted_packet: OnionErrorPacket, + ) -> DecodedOnionFailure + where + L::Target: Logger, + { + let (path, session_priv) = match htlc_source { + HTLCSource::OutboundRoute { ref path, ref session_priv, .. } => (path, session_priv), + _ => unreachable!(), + }; + + process_onion_failure_inner(secp_ctx, logger, path, &session_priv, None, encrypted_packet) + } + + /// Decodes the attribution data that we got back from upstream on a payment we sent. + pub fn decode_fulfill_attribution_data( + secp_ctx: &Secp256k1, logger: &L, path: &Path, outer_session_priv: &SecretKey, + mut attribution_data: AttributionData, + ) -> Vec + where + L::Target: Logger, + { + let mut hold_times = Vec::new(); + + // Only consider hops in the regular path for attribution data. Blinded path attribution data isn't accessible. + let shared_secrets = + construct_onion_keys_generic(secp_ctx, &path.hops, None, outer_session_priv) + .map(|(shared_secret, _, _, _, _)| shared_secret); + + // Path length can reach 27 hops, but attribution data can only be conveyed back to the sender from the first 20 + // hops. Determine the number of hops to be used for attribution data. + let attributable_hop_count = usize::min(path.hops.len(), MAX_HOPS); + + for (route_hop_idx, shared_secret) in + shared_secrets.enumerate().take(attributable_hop_count) + { + attribution_data.crypt(shared_secret.as_ref()); + + // Calculate position relative to the last attributable hop. The last attributable hop is at position 0. We need + // to look at the chain of HMACs that does include all data up to the last attributable hop. Hold times beyond + // the last attributable hop will not be available. + let position = attributable_hop_count - route_hop_idx - 1; + let res = attribution_data.verify(&Vec::new(), shared_secret.as_ref(), position); + match res { + Ok(hold_time) => { + hold_times.push(hold_time); + + // Shift attribution data to prepare for processing the next hop. + attribution_data.shift_left(); + }, + Err(()) => { + // We will hit this if there is a node on the path that does not support fulfill attribution data. + log_debug!( + logger, + "Invalid fulfill HMAC in attribution data for node at pos {}", + route_hop_idx + ); + + break; + }, + } + } + + hold_times + } } #[cfg(fuzzing)] pub use self::fuzzy_onion_utils::*; #[cfg(not(fuzzing))] pub(crate) use self::fuzzy_onion_utils::*; -pub fn process_onion_failure( - secp_ctx: &Secp256k1, logger: &L, htlc_source: &HTLCSource, - encrypted_packet: OnionErrorPacket, -) -> DecodedOnionFailure -where - L::Target: Logger, -{ - let (path, session_priv) = match htlc_source { - HTLCSource::OutboundRoute { ref path, ref session_priv, .. } => (path, session_priv), - _ => unreachable!(), - }; - - process_onion_failure_inner(secp_ctx, logger, path, &session_priv, None, encrypted_packet) -} - /// Process failure we got back from upstream on a payment we sent (implying htlc_source is an /// OutboundRoute). fn process_onion_failure_inner( @@ -1449,56 +1503,6 @@ where } } -/// Decodes the attribution data that we got back from upstream on a payment we sent. -pub fn decode_fulfill_attribution_data( - secp_ctx: &Secp256k1, logger: &L, path: &Path, outer_session_priv: &SecretKey, - mut attribution_data: AttributionData, -) -> Vec -where - L::Target: Logger, -{ - let mut hold_times = Vec::new(); - - // Only consider hops in the regular path for attribution data. Blinded path attribution data isn't accessible. - let shared_secrets = - construct_onion_keys_generic(secp_ctx, &path.hops, None, outer_session_priv) - .map(|(shared_secret, _, _, _, _)| shared_secret); - - // Path length can reach 27 hops, but attribution data can only be conveyed back to the sender from the first 20 - // hops. Determine the number of hops to be used for attribution data. - let attributable_hop_count = usize::min(path.hops.len(), MAX_HOPS); - - for (route_hop_idx, shared_secret) in shared_secrets.enumerate().take(attributable_hop_count) { - attribution_data.crypt(shared_secret.as_ref()); - - // Calculate position relative to the last attributable hop. The last attributable hop is at position 0. We need - // to look at the chain of HMACs that does include all data up to the last attributable hop. Hold times beyond - // the last attributable hop will not be available. - let position = attributable_hop_count - route_hop_idx - 1; - let res = attribution_data.verify(&Vec::new(), shared_secret.as_ref(), position); - match res { - Ok(hold_time) => { - hold_times.push(hold_time); - - // Shift attribution data to prepare for processing the next hop. - attribution_data.shift_left(); - }, - Err(()) => { - // We will hit this if there is a node on the path that does not support fulfill attribution data. - log_debug!( - logger, - "Invalid fulfill HMAC in attribution data for node at pos {}", - route_hop_idx - ); - - break; - }, - } - } - - hold_times -} - const BADONION: u16 = 0x8000; const PERM: u16 = 0x4000; const NODE: u16 = 0x2000; @@ -2522,6 +2526,7 @@ where } /// Build a payment onion, returning the first hop msat and cltv values as well. +/// /// `cur_block_height` should be set to the best known block height + 1. pub fn create_payment_onion( secp_ctx: &Secp256k1, path: &Path, session_priv: &SecretKey, total_msat: u64, @@ -2711,22 +2716,28 @@ fn decode_next_hop, N: NextPacketBytes>( } } -pub const HOLD_TIME_LEN: usize = 4; -pub const MAX_HOPS: usize = 20; -pub const HMAC_LEN: usize = 4; +pub(crate) const HOLD_TIME_LEN: usize = 4; +pub(crate) const MAX_HOPS: usize = 20; +pub(crate) const HMAC_LEN: usize = 4; // Define the number of HMACs in the attributable data block. For the first node, there are 20 HMACs, and then for every // subsequent node, the number of HMACs decreases by 1. 20 + 19 + 18 + ... + 1 = 20 * 21 / 2 = 210. -pub const HMAC_COUNT: usize = MAX_HOPS * (MAX_HOPS + 1) / 2; +pub(crate) const HMAC_COUNT: usize = MAX_HOPS * (MAX_HOPS + 1) / 2; #[derive(Clone, Debug, Hash, PartialEq, Eq)] +/// Attribution data allows the sender of an HTLC to identify which hop failed an HTLC robustly, +/// preventing earlier hops from corrupting the HTLC failure information (or at least allowing the +/// sender to identify the earliest hop which corrupted HTLC failure information). +/// +/// Additionally, it allows a sender to identify how long each hop along a path held an HTLC, with +/// 100ms granularity. pub struct AttributionData { - pub hold_times: [u8; MAX_HOPS * HOLD_TIME_LEN], - pub hmacs: [u8; HMAC_LEN * HMAC_COUNT], + hold_times: [u8; MAX_HOPS * HOLD_TIME_LEN], + hmacs: [u8; HMAC_LEN * HMAC_COUNT], } impl AttributionData { - pub fn new() -> Self { + pub(crate) fn new() -> Self { Self { hold_times: [0; MAX_HOPS * HOLD_TIME_LEN], hmacs: [0; HMAC_LEN * HMAC_COUNT] } } } @@ -2775,7 +2786,7 @@ impl AttributionData { /// Writes the HMACs corresponding to the given position that have been added already by downstream hops. Position is /// relative to the final node. The final node is at position 0. - pub fn write_downstream_hmacs(&self, position: usize, w: &mut HmacEngine) { + pub(crate) fn write_downstream_hmacs(&self, position: usize, w: &mut HmacEngine) { // Set the index to the first downstream HMAC that we need to include. Note that we skip the first MAX_HOPS HMACs // because this is space reserved for the HMACs that we are producing for the current node. let mut hmac_idx = MAX_HOPS + MAX_HOPS - position - 1; From 926523d878305bd7700c8277a5b003bde2481621 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 9 Dec 2025 13:02:48 +0100 Subject: [PATCH 015/242] Bump `lightning-block-sync` version number to fix SemVer CI PR #4175 made the first breaking API change in `lightning-block-sync` since v0.2 without bumping the version number. Here we bump the version number, allowing our SemVer CI check to be happy again. --- lightning-block-sync/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lightning-block-sync/Cargo.toml b/lightning-block-sync/Cargo.toml index 51b19e3901e..97f199963ac 100644 --- a/lightning-block-sync/Cargo.toml +++ b/lightning-block-sync/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning-block-sync" -version = "0.2.0+git" +version = "0.3.0+git" authors = ["Jeffrey Czyz", "Matt Corallo"] license = "MIT OR Apache-2.0" repository = "https://github.com/lightningdevkit/rust-lightning" From 9b2d57cafd2e46a59a9c276e0b41d3c33f1871f2 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 9 Dec 2025 13:31:49 +0100 Subject: [PATCH 016/242] Clean up handle_monitor_update_completion_actions after rustfmt --- lightning/src/ln/channelmanager.rs | 67 +++++++++++------------------- 1 file changed, 25 insertions(+), 42 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index ea6409d0e1e..cd0adaaef28 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -9465,58 +9465,41 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ WithContext::from(&self.logger, peer_id, chan_id, Some(payment_hash)); log_trace!(logger, "Handling PaymentClaimed monitor update completion action"); - if let Some((counterparty_node_id, chan_id, claim_ptr)) = pending_mpp_claim { + if let Some((cp_node_id, chan_id, claim_ptr)) = pending_mpp_claim { let per_peer_state = self.per_peer_state.read().unwrap(); - per_peer_state.get(&counterparty_node_id).map(|peer_state_mutex| { + per_peer_state.get(&cp_node_id).map(|peer_state_mutex| { let mut peer_state = peer_state_mutex.lock().unwrap(); let blockers_entry = peer_state.actions_blocking_raa_monitor_updates.entry(chan_id); if let btree_map::Entry::Occupied(mut blockers) = blockers_entry { blockers.get_mut().retain(|blocker| { - if let &RAAMonitorUpdateBlockingAction::ClaimedMPPPayment { - pending_claim, - } = &blocker - { - if *pending_claim == claim_ptr { - let mut pending_claim_state_lock = - pending_claim.0.lock().unwrap(); - let pending_claim_state = - &mut *pending_claim_state_lock; - pending_claim_state.channels_without_preimage.retain( - |(cp, cid)| { - let this_claim = *cp == counterparty_node_id - && *cid == chan_id; - if this_claim { - pending_claim_state - .channels_with_preimage - .push((*cp, *cid)); - false - } else { - true - } - }, - ); - if pending_claim_state - .channels_without_preimage - .is_empty() - { - for (cp, cid) in pending_claim_state - .channels_with_preimage - .iter() - { - let freed_chan = (*cp, *cid, blocker.clone()); - freed_channels.push(freed_chan); - } - } - !pending_claim_state - .channels_without_preimage - .is_empty() + let pending_claim = match &blocker { + RAAMonitorUpdateBlockingAction::ClaimedMPPPayment { + pending_claim, + } => pending_claim, + _ => return true, + }; + if *pending_claim != claim_ptr { + return true; + } + let mut claim_state_lock = pending_claim.0.lock().unwrap(); + let claim_state = &mut *claim_state_lock; + claim_state.channels_without_preimage.retain(|(cp, cid)| { + let this_claim = *cp == cp_node_id && *cid == chan_id; + if this_claim { + claim_state.channels_with_preimage.push((*cp, *cid)); + false } else { true } - } else { - true + }); + if claim_state.channels_without_preimage.is_empty() { + for (cp, cid) in claim_state.channels_with_preimage.iter() { + let freed_chan = (*cp, *cid, blocker.clone()); + freed_channels.push(freed_chan); + } } + !claim_state.channels_without_preimage.is_empty() }); if blockers.get().is_empty() { blockers.remove(); From e5528eadbf4d98ad7c8c7b435dbd3e1c5c7962c3 Mon Sep 17 00:00:00 2001 From: elnosh Date: Wed, 12 Nov 2025 10:21:48 -0500 Subject: [PATCH 017/242] Add docs to `commitment_signed_dance_return_raa` --- lightning/src/ln/functional_test_utils.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 3460d300b3a..7fbf72a357c 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -2670,6 +2670,10 @@ pub fn do_main_commitment_signed_dance( (extra_msg_option, bs_revoke_and_ack) } +/// Runs the commitment_signed dance by delivering the commitment_signed and handling the +/// responding `revoke_and_ack` and `commitment_signed`. +/// +/// Returns the recipient's `revoke_and_ack`. pub fn commitment_signed_dance_return_raa( node_a: &Node<'_, '_, '_>, node_b: &Node<'_, '_, '_>, commitment_signed: &Vec, fail_backwards: bool, From 8423ffadbd260909941dc0061208394bdc69a7fc Mon Sep 17 00:00:00 2001 From: elnosh Date: Fri, 14 Nov 2025 09:01:36 -0500 Subject: [PATCH 018/242] Remove `check_added_monitors` macro Replace calls to `check_added_monitors` macro to the identically-named function. --- .../tests/lsps2_integration_tests.rs | 22 +- lightning-persister/src/test_utils.rs | 11 +- lightning/src/chain/chainmonitor.rs | 21 +- lightning/src/chain/channelmonitor.rs | 7 +- lightning/src/ln/async_payments_tests.rs | 30 +- lightning/src/ln/async_signer_tests.rs | 30 +- lightning/src/ln/blinded_payment_tests.rs | 66 +-- lightning/src/ln/chanmon_update_fail_tests.rs | 408 +++++++++--------- lightning/src/ln/channelmanager.rs | 60 +-- lightning/src/ln/functional_test_utils.rs | 94 ++-- .../src/ln/max_payment_path_len_tests.rs | 4 +- lightning/src/ln/monitor_tests.rs | 66 +-- lightning/src/ln/offers_tests.rs | 6 +- lightning/src/ln/onion_route_tests.rs | 48 +-- lightning/src/ln/payment_tests.rs | 228 +++++----- lightning/src/ln/priv_short_conf_tests.rs | 40 +- lightning/src/ln/quiescence_tests.rs | 6 +- lightning/src/ln/reload_tests.rs | 34 +- lightning/src/ln/reorg_tests.rs | 30 +- lightning/src/ln/shutdown_tests.rs | 42 +- lightning/src/ln/update_fee_tests.rs | 2 +- lightning/src/ln/zero_fee_commitment_tests.rs | 6 +- lightning/src/util/persist.rs | 6 +- 23 files changed, 621 insertions(+), 646 deletions(-) diff --git a/lightning-liquidity/tests/lsps2_integration_tests.rs b/lightning-liquidity/tests/lsps2_integration_tests.rs index 82f93b5990c..e4ace27b715 100644 --- a/lightning-liquidity/tests/lsps2_integration_tests.rs +++ b/lightning-liquidity/tests/lsps2_integration_tests.rs @@ -7,19 +7,11 @@ use common::{ get_lsps_message, LSPSNodes, LSPSNodesWithPayer, LiquidityNode, }; -use lightning::check_added_monitors; use lightning::events::{ClosureReason, Event}; use lightning::get_event_msg; use lightning::ln::channelmanager::PaymentId; use lightning::ln::channelmanager::Retry; -use lightning::ln::functional_test_utils::create_funding_transaction; -use lightning::ln::functional_test_utils::do_commitment_signed_dance; -use lightning::ln::functional_test_utils::expect_channel_pending_event; -use lightning::ln::functional_test_utils::expect_channel_ready_event; -use lightning::ln::functional_test_utils::expect_payment_sent; -use lightning::ln::functional_test_utils::test_default_channel_config; -use lightning::ln::functional_test_utils::SendEvent; -use lightning::ln::functional_test_utils::{connect_blocks, create_chan_between_nodes_with_value}; +use lightning::ln::functional_test_utils::*; use lightning::ln::msgs::BaseMessageHandler; use lightning::ln::msgs::ChannelMessageHandler; use lightning::ln::msgs::MessageSendEvent; @@ -1226,7 +1218,7 @@ fn client_trusts_lsp_end_to_end_test() { ) .unwrap(); - check_added_monitors!(payer_node, 1); + check_added_monitors(&payer_node, 1); let events = payer_node.node.get_and_clear_pending_msg_events(); let ev = SendEvent::from_event(events[0].clone()); service_node.inner.node.handle_update_add_htlc(payer_node_id, &ev.msgs[0]); @@ -1566,7 +1558,7 @@ fn create_channel_with_manual_broadcast( let funding_created = get_event_msg!(service_node, MessageSendEvent::SendFundingCreated, *client_node_id); client_node.node.handle_funding_created(*service_node_id, &funding_created); - check_added_monitors!(client_node.inner, 1); + check_added_monitors(&client_node.inner, 1); let bs_signed_locked = client_node.node.get_and_clear_pending_msg_events(); assert_eq!(bs_signed_locked.len(), 2); @@ -1602,7 +1594,7 @@ fn create_channel_with_manual_broadcast( _ => panic!("Unexpected event"), } expect_channel_pending_event(&client_node, &service_node_id); - check_added_monitors!(service_node.inner, 1); + check_added_monitors(&service_node.inner, 1); as_channel_ready = get_event_msg!(service_node, MessageSendEvent::SendChannelReady, *client_node_id); @@ -1699,7 +1691,7 @@ fn late_payment_forwarded_and_safe_after_force_close_does_not_broadcast() { ) .unwrap(); - check_added_monitors!(payer_node, 1); + check_added_monitors(&payer_node, 1); let events = payer_node.node.get_and_clear_pending_msg_events(); let ev = SendEvent::from_event(events[0].clone()); service_node.inner.node.handle_update_add_htlc(payer_node_id, &ev.msgs[0]); @@ -1890,7 +1882,7 @@ fn htlc_timeout_before_client_claim_results_in_handling_failed() { ) .unwrap(); - check_added_monitors!(payer_node, 1); + check_added_monitors(&payer_node, 1); let events = payer_node.node.get_and_clear_pending_msg_events(); let ev = SendEvent::from_event(events[0].clone()); service_node.inner.node.handle_update_add_htlc(payer_node_id, &ev.msgs[0]); @@ -2227,7 +2219,7 @@ fn client_trusts_lsp_partial_fee_does_not_trigger_broadcast() { ) .unwrap(); - check_added_monitors!(payer_node, 1); + check_added_monitors(&payer_node, 1); let events = payer_node.node.get_and_clear_pending_msg_events(); let ev = SendEvent::from_event(events[0].clone()); service_node.inner.node.handle_update_add_htlc(payer_node_id, &ev.msgs[0]); diff --git a/lightning-persister/src/test_utils.rs b/lightning-persister/src/test_utils.rs index 1de51f44cb2..55208c61491 100644 --- a/lightning-persister/src/test_utils.rs +++ b/lightning-persister/src/test_utils.rs @@ -1,14 +1,11 @@ +use lightning::check_closed_broadcast; use lightning::events::ClosureReason; -use lightning::ln::functional_test_utils::{ - check_closed_event, connect_block, create_announced_chan_between_nodes, create_chanmon_cfgs, - create_dummy_block, create_network, create_node_cfgs, create_node_chanmgrs, send_payment, -}; +use lightning::ln::functional_test_utils::*; use lightning::util::persist::{ migrate_kv_store_data, read_channel_monitors, KVStoreSync, MigratableKVStore, KVSTORE_NAMESPACE_KEY_ALPHABET, KVSTORE_NAMESPACE_KEY_MAX_LEN, }; use lightning::util::test_utils; -use lightning::{check_added_monitors, check_closed_broadcast}; use std::panic::RefUnwindSafe; @@ -190,7 +187,7 @@ pub(crate) fn do_test_store(store_0: &K, store_1: &K) { let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, reason, &[node_b_id], 100000); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_txn.len(), 1); @@ -206,7 +203,7 @@ pub(crate) fn do_test_store(store_0: &K, store_1: &K) { check_closed_broadcast!(nodes[1], true); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[1], 1, reason, &[nodes[0].node.get_our_node_id()], 100000); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Make sure everything is persisted as expected after close. check_persisted_data!(11); diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index f4a1edff038..9fd6383cf7e 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -1568,7 +1568,6 @@ where mod tests { use crate::chain::channelmonitor::ANTI_REORG_DELAY; use crate::chain::{ChannelMonitorUpdateStatus, Watch}; - use crate::check_added_monitors; use crate::events::{ClosureReason, Event}; use crate::ln::functional_test_utils::*; use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, MessageSendEvent}; @@ -1601,9 +1600,9 @@ mod tests { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.claim_funds(payment_preimage_1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.claim_funds(payment_preimage_2); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let persistences = chanmon_cfgs[1].persister.offchain_monitor_updates.lock().unwrap().clone(); @@ -1666,14 +1665,14 @@ mod tests { nodes[0].node.handle_update_fulfill_htlc(node_b_id, updates.update_fulfill_htlcs.remove(0)); expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &updates.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let (as_first_raa, as_first_update) = get_revoke_commit_msgs(&nodes[0], &node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_first_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut bs_2nd_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_first_update); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); nodes[0] @@ -1683,21 +1682,21 @@ mod tests { nodes[0] .node .handle_commitment_signed_batch_test(node_b_id, &bs_2nd_updates.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_first_raa); expect_payment_path_successful!(nodes[0]); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let (as_second_raa, as_second_update) = get_revoke_commit_msgs(&nodes[0], &node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_second_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_second_update); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_raa); expect_payment_path_successful!(nodes[0]); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); } #[test] diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 10e5049682e..515a3dc5f1d 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -6911,10 +6911,7 @@ mod tests { use crate::util::logger::Logger; use crate::util::ser::{ReadableArgs, Writeable}; use crate::util::test_utils::{TestBroadcaster, TestFeeEstimator, TestLogger}; - use crate::{ - check_added_monitors, check_spends, get_local_commitment_txn, get_monitor, - get_route_and_payment_hash, - }; + use crate::{check_spends, get_local_commitment_txn, get_monitor, get_route_and_payment_hash}; #[allow(unused_imports)] use crate::prelude::*; @@ -6973,7 +6970,7 @@ mod tests { nodes[1].node.send_payment_with_route(route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0) ).unwrap(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Build a new ChannelMonitorUpdate which contains both the failing commitment tx update // and provides the claim preimages for the two pending HTLCs. The first update generates diff --git a/lightning/src/ln/async_payments_tests.rs b/lightning/src/ln/async_payments_tests.rs index 8e7fbdf94fd..1f1bb70714d 100644 --- a/lightning/src/ln/async_payments_tests.rs +++ b/lightning/src/ln/async_payments_tests.rs @@ -981,7 +981,7 @@ fn ignore_duplicate_invoice() { assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&always_online_node_id, &mut events); let payment_hash = extract_payment_hash(&ev); - check_added_monitors!(sender, 1); + check_added_monitors(&sender, 1); let route: &[&[&Node]] = &[&[always_online_node, async_recipient]]; let args = PassAlongPathArgs::new(sender, route[0], amt_msat, payment_hash, ev); @@ -1060,7 +1060,7 @@ fn ignore_duplicate_invoice() { assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&always_online_node_id, &mut events); let payment_hash = extract_payment_hash(&ev); - check_added_monitors!(sender, 1); + check_added_monitors(&sender, 1); let args = PassAlongPathArgs::new(sender, route[0], amt_msat, payment_hash, ev) .without_clearing_recipient_events(); @@ -1129,7 +1129,7 @@ fn async_receive_flow_success() { assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events); let payment_hash = extract_payment_hash(&ev); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); // Receiving a duplicate release_htlc message doesn't result in duplicate payment. nodes[0] @@ -1519,7 +1519,7 @@ fn amount_doesnt_match_invreq() { let mut ev = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events); assert!(matches!( ev, MessageSendEvent::UpdateHTLCs { ref updates, .. } if updates.update_add_htlcs.len() == 1)); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let route: &[&[&Node]] = &[&[&nodes[2], &nodes[3]]]; let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev); let claimable_ev = do_pass_along_path(args).unwrap(); @@ -1723,7 +1723,7 @@ fn invalid_async_receive_with_retry( &[HTLCHandlingFailureType::Receive { payment_hash }], ); nodes[2].node.process_pending_htlc_forwards(); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); fail_blinded_htlc_backwards(payment_hash, 1, &[&nodes[0], &nodes[1], &nodes[2]], true); // Trigger a retry and make sure it fails after calling the closure that induces recipient @@ -1735,7 +1735,7 @@ fn invalid_async_receive_with_retry( let mut ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events); assert!(matches!( ev, MessageSendEvent::UpdateHTLCs { ref updates, .. } if updates.update_add_htlcs.len() == 1)); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let route: &[&[&Node]] = &[&[&nodes[1], &nodes[2]]]; let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev) .without_claimable_event() @@ -1749,7 +1749,7 @@ fn invalid_async_receive_with_retry( let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let mut ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let route: &[&[&Node]] = &[&[&nodes[1], &nodes[2]]]; let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev); let claimable_ev = do_pass_along_path(args).unwrap(); @@ -1915,7 +1915,7 @@ fn expired_static_invoice_payment_path() { assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events); let payment_hash = extract_payment_hash(&ev); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let route: &[&[&Node]] = &[&[&nodes[1], &nodes[2]]]; let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev) @@ -2360,7 +2360,7 @@ fn refresh_static_invoices_for_used_offers() { assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&server.node.get_our_node_id(), &mut events); let payment_hash = extract_payment_hash(&ev); - check_added_monitors!(sender, 1); + check_added_monitors(&sender, 1); let route: &[&[&Node]] = &[&[server, recipient]]; let args = PassAlongPathArgs::new(sender, route[0], amt_msat, payment_hash, ev); @@ -2694,7 +2694,7 @@ fn invoice_server_is_not_channel_peer() { assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&forwarding_node.node.get_our_node_id(), &mut events); let payment_hash = extract_payment_hash(&ev); - check_added_monitors!(sender, 1); + check_added_monitors(&sender, 1); let route: &[&[&Node]] = &[&[forwarding_node, recipient]]; let args = PassAlongPathArgs::new(sender, route[0], amt_msat, payment_hash, ev); @@ -2933,7 +2933,7 @@ fn async_payment_e2e() { let mut events = sender_lsp.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&invoice_server.node.get_our_node_id(), &mut events); - check_added_monitors!(sender_lsp, 1); + check_added_monitors(&sender_lsp, 1); let path: &[&Node] = &[invoice_server, recipient]; let args = PassAlongPathArgs::new(sender_lsp, path, amt_msat, payment_hash, ev); @@ -3170,7 +3170,7 @@ fn intercepted_hold_htlc() { let mut events = lsp.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&recipient.node.get_our_node_id(), &mut events); - check_added_monitors!(lsp, 1); + check_added_monitors(&lsp, 1); let path: &[&Node] = &[recipient]; let args = PassAlongPathArgs::new(lsp, path, amt_msat, payment_hash, ev); @@ -3271,7 +3271,7 @@ fn async_payment_mpp() { let expected_path: &[&Node] = &[recipient]; lsp_a.node.process_pending_htlc_forwards(); - check_added_monitors!(lsp_a, 1); + check_added_monitors(&lsp_a, 1); let mut events = lsp_a.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&recipient.node.get_our_node_id(), &mut events); @@ -3280,7 +3280,7 @@ fn async_payment_mpp() { do_pass_along_path(args); lsp_b.node.process_pending_htlc_forwards(); - check_added_monitors!(lsp_b, 1); + check_added_monitors(&lsp_b, 1); let mut events = lsp_b.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&recipient.node.get_our_node_id(), &mut events); @@ -3417,7 +3417,7 @@ fn release_htlc_races_htlc_onion_decode() { let mut events = sender_lsp.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&invoice_server.node.get_our_node_id(), &mut events); - check_added_monitors!(sender_lsp, 1); + check_added_monitors(&sender_lsp, 1); let path: &[&Node] = &[invoice_server, recipient]; let args = PassAlongPathArgs::new(sender_lsp, path, amt_msat, payment_hash, ev); diff --git a/lightning/src/ln/async_signer_tests.rs b/lightning/src/ln/async_signer_tests.rs index 0c7a467fde7..f38afc41fcc 100644 --- a/lightning/src/ln/async_signer_tests.rs +++ b/lightning/src/ln/async_signer_tests.rs @@ -301,7 +301,7 @@ fn do_test_async_commitment_signature_for_commitment_signed_revoke_and_ack( src.node .send_payment_with_route(route, our_payment_hash, recipient_fields, payment_id) .unwrap(); - check_added_monitors!(src, 1); + check_added_monitors(&src, 1); // Pass the payment along the route. let payment_event = { @@ -528,7 +528,7 @@ fn do_test_async_raa_peer_disconnect( src.node .send_payment_with_route(route, our_payment_hash, recipient_fields, payment_id) .unwrap(); - check_added_monitors!(src, 1); + check_added_monitors(&src, 1); // Pass the payment along the route. let payment_event = { @@ -593,7 +593,7 @@ fn do_test_async_raa_peer_disconnect( (latest_update, _) = channel_map.get(&chan_id).unwrap().clone(); } dst.chain_monitor.chain_monitor.force_channel_monitor_updated(chan_id, latest_update); - check_added_monitors!(dst, 0); + check_added_monitors(&dst, 0); } // Expect the RAA @@ -677,7 +677,7 @@ fn do_test_async_commitment_signature_peer_disconnect( src.node .send_payment_with_route(route, our_payment_hash, recipient_fields, payment_id) .unwrap(); - check_added_monitors!(src, 1); + check_added_monitors(&src, 1); // Pass the payment along the route. let payment_event = { @@ -743,7 +743,7 @@ fn do_test_async_commitment_signature_peer_disconnect( (latest_update, _) = channel_map.get(&chan_id).unwrap().clone(); } dst.chain_monitor.chain_monitor.force_channel_monitor_updated(chan_id, latest_update); - check_added_monitors!(dst, 0); + check_added_monitors(&dst, 0); } // Expect the RAA @@ -813,14 +813,14 @@ fn do_test_async_commitment_signature_ordering(monitor_update_failure: bool) { .node .send_payment_with_route(route, payment_hash_2, recipient_fields, payment_id) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); get_htlc_update_msgs(&nodes[0], &node_b_id); // Send back update_fulfill_htlc + commitment_signed for the first payment. nodes[1].node.claim_funds(payment_preimage_1); expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Handle the update_fulfill_htlc, but fail to persist the monitor update when handling the // commitment_signed. @@ -844,7 +844,7 @@ fn do_test_async_commitment_signature_ordering(monitor_update_failure: bool) { let _ = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); } // No commitment_signed so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); }, _ => panic!("Unexpected event"), } @@ -893,7 +893,7 @@ fn do_test_async_commitment_signature_ordering(monitor_update_failure: bool) { (latest_update, _) = channel_map.get(&chan_id).unwrap().clone(); } nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_id, latest_update); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); } // Make sure that on signer_unblocked we have the same behavior (even though RAA is ready, @@ -946,18 +946,18 @@ fn do_test_async_commitment_signature_ordering(monitor_update_failure: bool) { nodes[1].node.handle_revoke_and_ack(node_a_id, as_resp.1.as_ref().unwrap()); let (bs_revoke_and_ack, bs_second_commitment_signed) = get_revoke_commit_msgs(&nodes[1], &node_a_id); - check_added_monitors!(nodes[1], 2); + check_added_monitors(&nodes[1], 2); // The rest of this is boilerplate for resolving the previous state. nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_revoke_and_ack); let as_commitment_signed = get_htlc_update_msgs(&nodes[0], &node_b_id); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_second_commitment_signed); let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // No commitment_signed so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[1] .node @@ -965,15 +965,15 @@ fn do_test_async_commitment_signature_ordering(monitor_update_failure: bool) { let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); // No commitment_signed so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_revoke_and_ack); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); expect_and_process_pending_htlcs(&nodes[1], false); diff --git a/lightning/src/ln/blinded_payment_tests.rs b/lightning/src/ln/blinded_payment_tests.rs index a902cfebd12..7941a81f61e 100644 --- a/lightning/src/ln/blinded_payment_tests.rs +++ b/lightning/src/ln/blinded_payment_tests.rs @@ -438,11 +438,11 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) { } nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &update_add); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &updates_0_1.commitment_signed, true, true); expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); if intro_fails { let mut updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); @@ -476,7 +476,7 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) { cause_error!(2, 3, update_add); nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &update_add); - check_added_monitors!(nodes[2], 0); + check_added_monitors(&nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &updates_1_2.commitment_signed, true, true); expect_and_process_pending_htlcs(&nodes[2], false); @@ -488,7 +488,7 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) { expect_htlc_handling_failed_destinations!( nodes[2].node.get_and_clear_pending_events(), core::slice::from_ref(&failed_destination) ); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let mut updates = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); let update_malformed = &mut updates.update_fail_malformed_htlcs[0]; @@ -535,10 +535,10 @@ fn failed_backwards_to_intro_node() { let mut payment_event = SendEvent::from_event(ev); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, false, false); expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -548,7 +548,7 @@ fn failed_backwards_to_intro_node() { // Ensure the final node fails to handle the HTLC. payment_event.msgs[0].onion_routing_packet.hop_data[0] ^= 1; nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); - check_added_monitors!(nodes[2], 0); + check_added_monitors(&nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event.commitment_msg, true, true); expect_and_process_pending_htlcs(&nodes[2], false); @@ -621,7 +621,7 @@ fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck, let mut payment_event = SendEvent::from_event(ev); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, false, false); macro_rules! cause_error { @@ -645,7 +645,7 @@ fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck, _ => panic!("Unexpected event {:?}", events), } check_closed_broadcast(&$curr_node, 1, true); - check_added_monitors!($curr_node, 1); + check_added_monitors(&$curr_node, 1); $curr_node.node.process_pending_htlc_forwards(); expect_htlc_handling_failed_destinations!($curr_node.node.get_and_clear_pending_events(), @@ -657,22 +657,22 @@ fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck, if intro_fails { cause_error!(nodes[0], nodes[1], nodes[2], chan_id_1_2, chan_upd_1_2.short_channel_id); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); fail_blinded_htlc_backwards(payment_hash, 1, &[&nodes[0], &nodes[1]], false); return } expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut updates_1_2 = get_htlc_update_msgs(&nodes[1], &nodes[2].node.get_our_node_id()); let mut update_add = &mut updates_1_2.update_add_htlcs[0]; nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &update_add); - check_added_monitors!(nodes[2], 0); + check_added_monitors(&nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &updates_1_2.commitment_signed, true, true); cause_error!(nodes[1], nodes[2], nodes[3], chan_id_2_3, chan_upd_2_3.short_channel_id); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let mut updates = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); let update_malformed = &mut updates.update_fail_malformed_htlcs[0]; @@ -751,7 +751,7 @@ fn do_blinded_intercept_payment(intercept_node_fails: bool) { nodes[1].node.fail_intercepted_htlc(intercept_id).unwrap(); expect_htlc_failure_conditions(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::InvalidForward { requested_forward_scid: intercept_scid }]); nodes[1].node.process_pending_htlc_forwards(); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); fail_blinded_htlc_backwards(payment_hash, 1, &[&nodes[0], &nodes[1]], false); return } @@ -860,7 +860,7 @@ fn three_hop_blinded_path_fail() { nodes[3].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }] ); nodes[3].node.process_pending_htlc_forwards(); - check_added_monitors!(nodes[3], 1); + check_added_monitors(&nodes[3], 1); fail_blinded_htlc_backwards(payment_hash, 1, &[&nodes[0], &nodes[1], &nodes[2], &nodes[3]], false); } @@ -962,10 +962,10 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { SendEvent::from_event(ev) }; nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event_0_1.msgs[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event_0_1.commitment_msg, false, false); expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut payment_event_1_2 = { let mut events = nodes[1].node.get_and_clear_pending_msg_events(); @@ -977,7 +977,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { match check { ReceiveCheckFail::RecipientFail => { nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event_1_2.msgs[0]); - check_added_monitors!(nodes[2], 0); + check_added_monitors(&nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true); expect_and_process_pending_htlcs(&nodes[2], false); check_payment_claimable( @@ -989,7 +989,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }] ); nodes[2].node.process_pending_htlc_forwards(); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); }, ReceiveCheckFail::OnionDecodeFail => { let session_priv = SecretKey::from_slice(&session_priv).unwrap(); @@ -1013,7 +1013,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { &payment_hash ).unwrap(); nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), update_add); - check_added_monitors!(nodes[2], 0); + check_added_monitors(&nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true); expect_and_process_pending_htlcs(&nodes[2], false); expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::InvalidOnion]); @@ -1023,7 +1023,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { let update_add = &mut payment_event_1_2.msgs[0]; update_add.amount_msat -= 1; nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), update_add); - check_added_monitors!(nodes[2], 0); + check_added_monitors(&nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true); expect_and_process_pending_htlcs(&nodes[2], false); expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }]); @@ -1037,7 +1037,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event_1_2.msgs[0]); nodes[2].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &payment_event_1_2.commitment_msg); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); nodes[2].node.handle_shutdown(nodes[1].node.get_our_node_id(), &node_1_shutdown); assert!(commitment_signed_dance_through_cp_raa(&nodes[2], &nodes[1], false, false).is_none()); @@ -1048,15 +1048,15 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { ReceiveCheckFail::ProcessPendingHTLCsCheck => { assert_eq!(payment_event_1_2.msgs[0].cltv_expiry, nodes[0].best_block_info().1 + 1 + excess_final_cltv_delta_opt.unwrap() as u32 + TEST_FINAL_CLTV); nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event_1_2.msgs[0]); - check_added_monitors!(nodes[2], 0); + check_added_monitors(&nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true); expect_and_process_pending_htlcs(&nodes[2], true); expect_htlc_failure_conditions(nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }]); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); }, ReceiveCheckFail::PaymentConstraints => { nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event_1_2.msgs[0]); - check_added_monitors!(nodes[2], 0); + check_added_monitors(&nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true); expect_and_process_pending_htlcs(&nodes[2], false); expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }]); @@ -1152,7 +1152,7 @@ fn blinded_path_retries() { nodes[3].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }] ); nodes[3].node.process_pending_htlc_forwards(); - check_added_monitors!(nodes[3], 1); + check_added_monitors(&nodes[3], 1); let updates = get_htlc_update_msgs(&nodes[3], &$intro_node.node.get_our_node_id()); assert_eq!(updates.update_fail_malformed_htlcs.len(), 1); @@ -1183,7 +1183,7 @@ fn blinded_path_retries() { fail_payment_back!(nodes[1]); // Pass the retry along. - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], amt_msat, payment_hash, Some(payment_secret), msg_events.pop().unwrap(), true, None); @@ -1263,7 +1263,7 @@ fn min_htlc() { SendEvent::from_event(ev) }; nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event_0_1.msgs[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event_0_1.commitment_msg, true, true); expect_and_process_pending_htlcs(&nodes[1], false); expect_htlc_handling_failed_destinations!( @@ -1461,7 +1461,7 @@ fn fails_receive_tlvs_authentication() { do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, true, true); expect_and_process_pending_htlcs(&nodes[1], false); nodes[1].node.process_pending_htlc_forwards(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::InvalidOnion]); let mut update_fail = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); @@ -2098,7 +2098,7 @@ fn test_trampoline_forward_payload_encoded_as_receive() { }; nodes[0].node.send_payment_with_route(route.clone(), payment_hash, RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0)).unwrap(); - check_added_monitors!(&nodes[0], 1); + check_added_monitors(&nodes[0], 1); let replacement_onion = { // create a substitute onion where the last Trampoline hop is a forward @@ -2263,7 +2263,7 @@ fn do_test_trampoline_single_hop_receive(success: bool) { }; nodes[0].node.send_payment_with_route(route.clone(), payment_hash, RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0)).unwrap(); - check_added_monitors!(&nodes[0], 1); + check_added_monitors(&nodes[0], 1); pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], amt_msat, payment_hash, payment_secret); if success { @@ -2586,7 +2586,7 @@ fn do_test_trampoline_relay(blinded: bool, test_case: TrampolineTestCase) { ) .unwrap(); - check_added_monitors!(&nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -2765,7 +2765,7 @@ fn test_trampoline_forward_rejection() { nodes[0].node.send_payment_with_route(route.clone(), payment_hash, RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0)).unwrap(); - check_added_monitors!(&nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index e79e8becc66..57f0ca87d45 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -123,7 +123,7 @@ fn test_monitor_and_persister_update_fail() { // Try to update ChannelMonitor nodes[1].node.claim_funds(preimage); expect_payment_claimed!(nodes[1], payment_hash, 9_000_000); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut updates = get_htlc_update_msgs(&nodes[1], &node_a_id); assert_eq!(updates.update_fulfill_htlcs.len(), 1); @@ -169,7 +169,7 @@ fn test_monitor_and_persister_update_fail() { } } - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); expect_payment_sent(&nodes[0], preimage, None, false, false); } @@ -195,7 +195,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { let onion = RecipientOnionFields::secret_only(payment_secret_1); let id = PaymentId(payment_hash_1.0); nodes[0].node.send_payment_with_route(route, payment_hash_1, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -213,7 +213,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (latest_update, _) = get_latest_mon_update_id(&nodes[0], channel_id); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); let mut events_2 = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events_2.len(), 1); @@ -262,7 +262,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { let onion = RecipientOnionFields::secret_only(payment_secret_2); let id = PaymentId(payment_hash_2.0); nodes[0].node.send_payment_with_route(route, payment_hash_2, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -281,7 +281,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { message: message.clone(), }; nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &node_b_id, message).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_closed_broadcast!(nodes[0], true); // TODO: Once we hit the chain with the failure transaction we should check that we get a @@ -338,7 +338,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { let onion = RecipientOnionFields::secret_only(payment_secret_2); let id = PaymentId(payment_hash_2.0); nodes[0].node.send_payment_with_route(route, payment_hash_2, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -347,7 +347,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { // Claim the previous payment, which will result in a update_fulfill_htlc/CS from nodes[1] // but nodes[0] won't respond since it is frozen. nodes[1].node.claim_funds(payment_preimage_1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); let events_2 = nodes[1].node.get_and_clear_pending_msg_events(); @@ -387,7 +387,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { } nodes[0].node.handle_commitment_signed_batch_test(node_b_id, commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); } @@ -405,7 +405,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (latest_update, _) = get_latest_mon_update_id(&nodes[0], channel_id); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); macro_rules! disconnect_reconnect_peers { () => {{ @@ -454,10 +454,10 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { assert_eq!(reestablish_2.len(), 1); nodes[0].node.handle_channel_reestablish(node_b_id, &reestablish_2[0]); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); let mut as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]); nodes[1].node.handle_channel_reestablish(node_a_id, &reestablish_1[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); let mut bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]); assert!(as_resp.0.is_none()); @@ -501,7 +501,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { let as_resp_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // No commitment_signed so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); as_resp.1 = Some(as_resp_raa); bs_resp.2 = None; @@ -544,7 +544,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); // nodes[1] is awaiting an RAA from nodes[0] still so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); if disconnect_count & !disconnect_flags > 2 { let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!(); @@ -568,7 +568,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { assert!(as_commitment_update.update_fail_htlcs.is_empty()); assert!(as_commitment_update.update_fail_malformed_htlcs.is_empty()); assert!(as_commitment_update.update_fee.is_none()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); }; } @@ -581,7 +581,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { assert!(bs_second_commitment_update.update_fail_htlcs.is_empty()); assert!(bs_second_commitment_update.update_fail_malformed_htlcs.is_empty()); assert!(bs_second_commitment_update.update_fee.is_none()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); }; } @@ -645,7 +645,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { ); let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // No commitment_signed so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[1] .node @@ -653,15 +653,15 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); // No commitment_signed so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_revoke_and_ack); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); expect_payment_path_successful!(nodes[0]); expect_and_process_pending_htlcs(&nodes[1], false); @@ -743,7 +743,7 @@ fn test_monitor_update_fail_cs() { let onion = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); @@ -752,13 +752,13 @@ fn test_monitor_update_fail_cs() { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &send_event.commitment_msg); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); let responses = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(responses.len(), 2); @@ -766,7 +766,7 @@ fn test_monitor_update_fail_cs() { MessageSendEvent::SendRevokeAndACK { ref msg, ref node_id } => { assert_eq!(*node_id, node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &msg); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); }, _ => panic!("Unexpected event"), } @@ -784,7 +784,7 @@ fn test_monitor_update_fail_cs() { .node .handle_commitment_signed_batch_test(node_b_id, &updates.commitment_signed); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); }, _ => panic!("Unexpected event"), @@ -793,11 +793,11 @@ fn test_monitor_update_fail_cs() { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (latest_update, _) = get_latest_mon_update_id(&nodes[0], channel_id); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); let final_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &final_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_and_process_pending_htlcs(&nodes[1], false); @@ -851,7 +851,7 @@ fn test_monitor_update_fail_no_rebroadcast() { let onion = RecipientOnionFields::secret_only(payment_secret_1); let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); @@ -864,13 +864,13 @@ fn test_monitor_update_fail_no_rebroadcast() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); expect_and_process_pending_htlcs(&nodes[1], false); let events = nodes[1].node.get_and_clear_pending_events(); @@ -906,7 +906,7 @@ fn test_monitor_update_raa_while_paused() { let id = PaymentId(our_payment_hash_1.0); nodes[0].node.send_payment_with_route(route, our_payment_hash_1, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let send_event_1 = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); @@ -916,13 +916,13 @@ fn test_monitor_update_raa_while_paused() { let id_2 = PaymentId(our_payment_hash_2.0); nodes[1].node.send_payment_with_route(route, our_payment_hash_2, onion_2, id_2).unwrap(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let send_event_2 = SendEvent::from_event(nodes[1].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(node_a_id, &send_event_1.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &send_event_1.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); @@ -930,37 +930,37 @@ fn test_monitor_update_raa_while_paused() { nodes[0].node.handle_update_add_htlc(node_b_id, &send_event_2.msgs[0]); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &send_event_2.commitment_msg); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let (latest_update, _) = get_latest_mon_update_id(&nodes[0], channel_id); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); let as_update_raa = get_revoke_commit_msgs(&nodes[0], &node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_update_raa.0); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_cs = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_update_raa.1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_cs.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); expect_and_process_pending_htlcs(&nodes[0], false); expect_payment_claimable!(nodes[0], our_payment_hash_2, our_payment_secret_2, 1000000); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_second_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_and_process_pending_htlcs(&nodes[1], false); expect_payment_claimable!(nodes[1], our_payment_hash_1, our_payment_secret_1, 1000000); @@ -994,7 +994,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { &nodes[2], &[HTLCHandlingFailureType::Receive { payment_hash: payment_hash_1 }], ); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let updates = get_htlc_update_msgs(&nodes[2], &node_b_id); assert!(updates.update_add_htlcs.is_empty()); @@ -1007,7 +1007,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { let commitment = updates.commitment_signed; let bs_revoke_and_ack = commitment_signed_dance_return_raa(&nodes[1], &nodes[2], &commitment, false); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); // While the second channel is AwaitingRAA, forward a second payment to get it into the // holding cell. @@ -1016,7 +1016,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { let onion_2 = RecipientOnionFields::secret_only(payment_secret_2); let id_2 = PaymentId(payment_hash_2.0); nodes[0].node.send_payment_with_route(route, payment_hash_2, onion_2, id_2).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); @@ -1024,7 +1024,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { do_commitment_signed_dance(&nodes[1], &nodes[0], &send_event.commitment_msg, false, false); expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); // Now fail monitor updating. @@ -1033,7 +1033,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Forward a third payment which will also be added to the holding cell, despite the channel // being paused waiting a monitor update. @@ -1042,18 +1042,18 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { let onion_3 = RecipientOnionFields::secret_only(payment_secret_3); let id_3 = PaymentId(payment_hash_3.0); nodes[0].node.send_payment_with_route(route, payment_hash_3, onion_3, id_3).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); // We succeed in updating the monitor for the first channel send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); do_commitment_signed_dance(&nodes[1], &nodes[0], &send_event.commitment_msg, false, true); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); // Call forward_pending_htlcs and check that the new HTLC was simply added to the holding cell // and not forwarded. expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); let (payment_preimage_4, payment_hash_4) = if test_ignore_second_cs { @@ -1063,13 +1063,13 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { let onion_4 = RecipientOnionFields::secret_only(payment_secret_4); let id_4 = PaymentId(payment_hash_4.0); nodes[2].node.send_payment_with_route(route, payment_hash_4, onion_4, id_4).unwrap(); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); send_event = SendEvent::from_event(nodes[2].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(node_c_id, &send_event.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &send_event.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); (Some(payment_preimage_4), Some(payment_hash_4)) } else { @@ -1081,12 +1081,12 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (latest_update, _) = get_latest_mon_update_id(&nodes[1], chan_2.2); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_2.2, latest_update); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); expect_and_process_pending_htlcs_and_htlc_handling_failed( &nodes[1], &[HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }], ); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut events_3 = nodes[1].node.get_and_clear_pending_msg_events(); if test_ignore_second_cs { @@ -1138,11 +1138,11 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { let as_cs; if test_ignore_second_cs { nodes[2].node.handle_commitment_signed_batch_test(node_b_id, &send_event_b.commitment_msg); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let bs_revoke_and_ack = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[2].node.handle_revoke_and_ack(node_b_id, &raa.unwrap()); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let bs_cs = get_htlc_update_msgs(&nodes[2], &node_b_id); assert!(bs_cs.update_add_htlcs.is_empty()); assert!(bs_cs.update_fail_htlcs.is_empty()); @@ -1151,14 +1151,14 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { assert!(bs_cs.update_fee.is_none()); nodes[1].node.handle_revoke_and_ack(node_c_id, &bs_revoke_and_ack); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); as_cs = get_htlc_update_msgs(&nodes[1], &node_c_id); nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &bs_cs.commitment_signed); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); } else { nodes[2].node.handle_commitment_signed_batch_test(node_b_id, &send_event_b.commitment_msg); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let bs_revoke_and_commit = nodes[2].node.get_and_clear_pending_msg_events(); // As both messages are for nodes[1], they're in order. @@ -1167,7 +1167,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { assert_eq!(*node_id, node_b_id); nodes[1].node.handle_revoke_and_ack(node_c_id, &msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); }, _ => panic!("Unexpected event"), } @@ -1185,7 +1185,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { nodes[1] .node .handle_commitment_signed_batch_test(node_c_id, &updates.commitment_signed); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); }, _ => panic!("Unexpected event"), } @@ -1200,23 +1200,23 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { nodes[2].node.handle_update_add_htlc(node_b_id, &as_cs.update_add_htlcs[0]); nodes[2].node.handle_commitment_signed_batch_test(node_b_id, &as_cs.commitment_signed); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let bs_second_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[2].node.handle_revoke_and_ack(node_b_id, &as_raa); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let bs_second_cs = get_htlc_update_msgs(&nodes[2], &node_b_id); nodes[1].node.handle_revoke_and_ack(node_c_id, &bs_second_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &bs_second_cs.commitment_signed); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let as_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_c_id); nodes[2].node.handle_revoke_and_ack(node_b_id, &as_second_raa); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty()); expect_and_process_pending_htlcs(&nodes[2], false); @@ -1238,7 +1238,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { if test_ignore_second_cs { expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); send_event = SendEvent::from_node(&nodes[1]); assert_eq!(send_event.node_id, node_a_id); @@ -1292,7 +1292,7 @@ fn test_monitor_update_fail_reestablish() { nodes[0].node.peer_disconnected(node_b_id); nodes[2].node.claim_funds(payment_preimage); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash, 1_000_000); let mut updates = get_htlc_update_msgs(&nodes[2], &node_b_id); @@ -1303,7 +1303,7 @@ fn test_monitor_update_fail_reestablish() { assert_eq!(updates.update_fulfill_htlcs.len(), 1); nodes[1].node.handle_update_fulfill_htlc(node_c_id, updates.update_fulfill_htlcs.remove(0)); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, false, false); @@ -1328,7 +1328,7 @@ fn test_monitor_update_fail_reestablish() { assert_eq!(as_channel_upd.contents.channel_flags & 2, 0); nodes[1].node.get_and_clear_pending_msg_events(); // Free the holding cell - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.peer_disconnected(node_a_id); nodes[0].node.peer_disconnected(node_b_id); @@ -1346,7 +1346,7 @@ fn test_monitor_update_fail_reestablish() { assert_eq!(as_channel_upd.contents.channel_flags & 2, 0); nodes[1].node.handle_channel_reestablish(node_a_id, &as_reestablish); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); // The "disabled" bit should be unset as we just reconnected let bs_channel_upd = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, node_a_id); @@ -1355,7 +1355,7 @@ fn test_monitor_update_fail_reestablish() { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (latest_update, _) = get_latest_mon_update_id(&nodes[1], chan_1.2); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_1.2, latest_update); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); updates = get_htlc_update_msgs(&nodes[1], &node_a_id); assert!(updates.update_add_htlcs.is_empty()); @@ -1399,28 +1399,28 @@ fn raa_no_response_awaiting_raa_state() { let onion_1 = RecipientOnionFields::secret_only(payment_secret_1); let id_1 = PaymentId(payment_hash_1.0); nodes[0].node.send_payment_with_route(route.clone(), payment_hash_1, onion_1, id_1).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let onion_2 = RecipientOnionFields::secret_only(payment_secret_2); let id_2 = PaymentId(payment_hash_2.0); nodes[0].node.send_payment_with_route(route.clone(), payment_hash_2, onion_2, id_2).unwrap(); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_responses = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_responses.0); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_responses.1); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // Now we have a CS queued up which adds a new HTLC (which will need a RAA/CS response from @@ -1431,17 +1431,17 @@ fn raa_no_response_awaiting_raa_state() { nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); // nodes[1] should be AwaitingRAA here! - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); let bs_responses = get_revoke_commit_msgs(&nodes[1], &node_a_id); expect_and_process_pending_htlcs(&nodes[1], false); expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 1000000); @@ -1452,39 +1452,39 @@ fn raa_no_response_awaiting_raa_state() { let onion_3 = RecipientOnionFields::secret_only(payment_secret_3); let id_3 = PaymentId(payment_hash_3.0); nodes[0].node.send_payment_with_route(route, payment_hash_3, onion_3, id_3).unwrap(); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_responses.0); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_responses.1); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); // Finally deliver the RAA to nodes[1] which results in a CS response to the last update nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_and_process_pending_htlcs(&nodes[1], false); expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000); let bs_update = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_update.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_and_process_pending_htlcs(&nodes[1], false); expect_payment_claimable!(nodes[1], payment_hash_3, payment_secret_3, 1000000); @@ -1519,7 +1519,7 @@ fn claim_while_disconnected_monitor_update_fail() { nodes[1].node.peer_disconnected(node_a_id); nodes[1].node.claim_funds(payment_preimage_1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); let init_msg = msgs::Init { @@ -1544,7 +1544,7 @@ fn claim_while_disconnected_monitor_update_fail() { nodes[1].node.handle_channel_reestablish(node_a_id, &as_reconnect); let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, node_a_id); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); // Send a second payment from A to B, resulting in a commitment update that gets swallowed with @@ -1554,12 +1554,12 @@ fn claim_while_disconnected_monitor_update_fail() { let onion_2 = RecipientOnionFields::secret_only(payment_secret_2); let id_2 = PaymentId(payment_hash_2.0); nodes[0].node.send_payment_with_route(route, payment_hash_2, onion_2, id_2).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_updates = get_htlc_update_msgs(&nodes[0], &node_b_id); nodes[1].node.handle_update_add_htlc(node_a_id, &as_updates.update_add_htlcs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_updates.commitment_signed); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); // Note that nodes[1] not updating monitor here is OK - it wont take action on the new HTLC // until we've channel_monitor_update'd and updated for the new commitment transaction. @@ -1569,7 +1569,7 @@ fn claim_while_disconnected_monitor_update_fail() { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); let mut bs_msgs = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(bs_msgs.len(), 2); @@ -1583,11 +1583,11 @@ fn claim_while_disconnected_monitor_update_fail() { nodes[0] .node .handle_commitment_signed_batch_test(node_b_id, &updates.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); }, _ => panic!("Unexpected event"), } @@ -1596,7 +1596,7 @@ fn claim_while_disconnected_monitor_update_fail() { MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { assert_eq!(*node_id, node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, msg); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); }, _ => panic!("Unexpected event"), } @@ -1605,20 +1605,20 @@ fn claim_while_disconnected_monitor_update_fail() { let bs_commitment = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_commitment.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_commitment.commitment_signed); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_and_process_pending_htlcs(&nodes[1], false); expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); expect_payment_path_successful!(nodes[0]); claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); @@ -1661,7 +1661,7 @@ fn monitor_failed_no_reestablish_response() { let onion = RecipientOnionFields::secret_only(payment_secret_1); let id = PaymentId(payment_hash_1.0); nodes[0].node.send_payment_with_route(route, payment_hash_1, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -1670,7 +1670,7 @@ fn monitor_failed_no_reestablish_response() { nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Now disconnect and immediately reconnect, delivering the channel_reestablish while nodes[1] // is still failing to update monitors. @@ -1698,17 +1698,17 @@ fn monitor_failed_no_reestablish_response() { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); let bs_responses = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_responses.0); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_responses.1); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_and_process_pending_htlcs(&nodes[1], false); expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 1000000); @@ -1745,7 +1745,7 @@ fn first_message_on_recv_ordering() { let onion_1 = RecipientOnionFields::secret_only(payment_secret_1); let id_1 = PaymentId(payment_hash_1.0); nodes[0].node.send_payment_with_route(route, payment_hash_1, onion_1, id_1).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -1753,13 +1753,13 @@ fn first_message_on_recv_ordering() { assert_eq!(payment_event.node_id, node_b_id); nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_responses = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_responses.0); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_responses.1); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); @@ -1770,7 +1770,7 @@ fn first_message_on_recv_ordering() { let id_2 = PaymentId(payment_hash_2.0); nodes[0].node.send_payment_with_route(route, payment_hash_2, onion_2, id_2).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); @@ -1783,20 +1783,20 @@ fn first_message_on_recv_ordering() { // to the next message also tests resetting the delivery order. nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Now deliver the update_add_htlc/commitment_signed for the second payment, which does need an // RAA/CS response, which should be generated when we call channel_monitor_update (with the // appropriate HTLC acceptance). nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); expect_and_process_pending_htlcs(&nodes[1], false); @@ -1804,13 +1804,13 @@ fn first_message_on_recv_ordering() { let bs_responses = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_responses.0); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_responses.1); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_and_process_pending_htlcs(&nodes[1], false); expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000); @@ -1850,7 +1850,7 @@ fn test_monitor_update_fail_claim() { nodes[1].node.claim_funds(payment_preimage_1); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Note that at this point there is a pending commitment transaction update for A being held by // B. Even when we go to send the payment from C through B to A, B will not update this @@ -1862,7 +1862,7 @@ fn test_monitor_update_fail_claim() { let onion_2 = RecipientOnionFields::secret_only(payment_secret_2); let id_2 = PaymentId(payment_hash_2.0); nodes[2].node.send_payment_with_route(route.clone(), payment_hash_2, onion_2, id_2).unwrap(); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); // Successfully update the monitor on the 1<->2 channel, but the 0<->1 channel should still be // paused, so forward shouldn't succeed until we call channel_monitor_updated(). @@ -1881,7 +1881,7 @@ fn test_monitor_update_fail_claim() { let id_3 = PaymentId(payment_hash_3.0); let onion_3 = RecipientOnionFields::secret_only(payment_secret_3); nodes[2].node.send_payment_with_route(route, payment_hash_3, onion_3, id_3).unwrap(); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let mut events = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -1896,7 +1896,7 @@ fn test_monitor_update_fail_claim() { let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); let mut bs_fulfill = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fulfill_htlc(node_b_id, bs_fulfill.update_fulfill_htlcs.remove(0)); @@ -1905,7 +1905,7 @@ fn test_monitor_update_fail_claim() { // Get the payment forwards, note that they were batched into one commitment update. nodes[1].node.process_pending_htlc_forwards(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_forward_update = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_add_htlc(node_b_id, &bs_forward_update.update_add_htlcs[0]); nodes[0].node.handle_update_add_htlc(node_b_id, &bs_forward_update.update_add_htlcs[1]); @@ -1994,7 +1994,7 @@ fn test_monitor_update_on_pending_forwards() { &nodes[2], &[HTLCHandlingFailureType::Receive { payment_hash: payment_hash_1 }], ); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let cs_fail_update = get_htlc_update_msgs(&nodes[2], &node_b_id); nodes[1].node.handle_update_fail_htlc(node_c_id, &cs_fail_update.update_fail_htlcs[0]); @@ -2006,7 +2006,7 @@ fn test_monitor_update_on_pending_forwards() { let onion = RecipientOnionFields::secret_only(payment_secret_2); let id = PaymentId(payment_hash_2.0); nodes[2].node.send_payment_with_route(route, payment_hash_2, onion, id).unwrap(); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let mut events = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -2019,12 +2019,12 @@ fn test_monitor_update_on_pending_forwards() { &nodes[1], &[HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }], ); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (latest_update, _) = get_latest_mon_update_id(&nodes[1], chan_1.2); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_1.2, latest_update); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); let bs_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fail_htlc(node_b_id, &bs_updates.update_fail_htlcs[0]); @@ -2077,7 +2077,7 @@ fn monitor_update_claim_fail_no_response() { let onion = RecipientOnionFields::secret_only(payment_secret_2); let id = PaymentId(payment_hash_2.0); nodes[0].node.send_payment_with_route(route, payment_hash_2, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -2088,7 +2088,7 @@ fn monitor_update_claim_fail_no_response() { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.claim_funds(payment_preimage_1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -2096,11 +2096,11 @@ fn monitor_update_claim_fail_no_response() { let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_and_process_pending_htlcs(&nodes[1], false); expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000); @@ -2144,7 +2144,7 @@ fn do_during_funding_monitor_fail( .node .funding_transaction_generated(temporary_channel_id, node_b_id, funding_tx.clone()) .unwrap(); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); let funding_created_msg = @@ -2154,20 +2154,20 @@ fn do_during_funding_monitor_fail( funding_created_msg.funding_output_index, ); nodes[1].node.handle_funding_created(node_a_id, &funding_created_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[0].node.handle_funding_signed( node_b_id, &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id), ); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (latest_update, _) = get_latest_mon_update_id(&nodes[0], channel_id); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); expect_channel_pending_event(&nodes[0], &node_b_id); let events = nodes[0].node.get_and_clear_pending_events(); @@ -2222,7 +2222,7 @@ fn do_during_funding_monitor_fail( chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); let (channel_id, (announcement, as_update, bs_update)) = if !confirm_a_first { if !restore_b_before_lock { @@ -2326,7 +2326,7 @@ fn test_path_paused_mpp() { let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 2); + check_added_monitors(&nodes[0], 2); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); // Pass the first HTLC of the payment along to nodes[3]. @@ -2382,7 +2382,7 @@ fn test_pending_update_fee_ack_on_reconnect() { let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[1].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_initial_send_msgs = get_htlc_update_msgs(&nodes[1], &node_a_id); // bs_initial_send_msgs are not delivered until they are re-generated after reconnect @@ -2391,7 +2391,7 @@ fn test_pending_update_fee_ack_on_reconnect() { *feerate_lock *= 2; } nodes[0].node.timer_tick_occurred(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_update_fee_msgs = get_htlc_update_msgs(&nodes[0], &node_b_id); assert!(as_update_fee_msgs.update_fee.is_some()); @@ -2399,7 +2399,7 @@ fn test_pending_update_fee_ack_on_reconnect() { nodes[1] .node .handle_commitment_signed_batch_test(node_a_id, &as_update_fee_msgs.commitment_signed); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); // bs_first_raa is not delivered until it is re-generated after reconnect @@ -2441,33 +2441,33 @@ fn test_pending_update_fee_ack_on_reconnect() { nodes[0] .node .handle_commitment_signed_batch_test(node_b_id, &bs_initial_send_msgs.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[1].node.handle_revoke_and_ack( node_a_id, &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id), ); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_second_cs = get_htlc_update_msgs(&nodes[1], &node_a_id).commitment_signed; nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_first_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[1].node.handle_commitment_signed_batch_test( node_a_id, &get_htlc_update_msgs(&nodes[0], &node_b_id).commitment_signed, ); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_second_cs); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_third_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[1].node.handle_revoke_and_ack( node_a_id, &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id), ); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_and_process_pending_htlcs(&nodes[0], false); expect_payment_claimable!(nodes[0], payment_hash, payment_secret, 1_000_000); @@ -2504,13 +2504,13 @@ fn test_fail_htlc_on_broadcast_after_claim() { assert_eq!(bs_txn.len(), 1); nodes[2].node.claim_funds(payment_preimage); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash, 2000); let mut cs_updates = get_htlc_update_msgs(&nodes[2], &node_b_id); nodes[1].node.handle_update_fulfill_htlc(node_c_id, cs_updates.update_fulfill_htlcs.remove(0)); let mut bs_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); mine_transaction(&nodes[1], &bs_txn[0]); @@ -2518,7 +2518,7 @@ fn test_fail_htlc_on_broadcast_after_claim() { check_closed_event(&nodes[1], 1, reason, &[node_c_id], 100000); check_closed_broadcast!(nodes[1], true); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_and_process_pending_htlcs_and_htlc_handling_failed( &nodes[1], &[HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_id_2 }], @@ -2550,7 +2550,7 @@ fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) { *feerate_lock += 20; } nodes[0].node.timer_tick_occurred(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_msgs = get_htlc_update_msgs(&nodes[0], &node_b_id); assert!(update_msgs.update_fee.is_some()); if deliver_update { @@ -2602,38 +2602,38 @@ fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) { nodes[1] .node .handle_commitment_signed_batch_test(node_a_id, &update_msgs.commitment_signed); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_first_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_second_update = get_htlc_update_msgs(&nodes[0], &node_b_id); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_first_cs); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_update_fee(node_a_id, as_second_update.update_fee.as_ref().unwrap()); nodes[1] .node .handle_commitment_signed_batch_test(node_a_id, &as_second_update.commitment_signed); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_first_raa); let bs_second_cs = get_htlc_update_msgs(&nodes[1], &node_a_id); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0] .node .handle_commitment_signed_batch_test(node_b_id, &bs_second_cs.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_second_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); } else { let commitment = &update_msgs.commitment_signed; do_commitment_signed_dance(&nodes[1], &nodes[0], commitment, false, false); @@ -2697,29 +2697,29 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { let onion_1 = RecipientOnionFields::secret_only(payment_secret_1); let id_1 = PaymentId(payment_hash_1.0); nodes[0].node.send_payment_with_route(route.clone(), payment_hash_1, onion_1, id_1).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let send = SendEvent::from_node(&nodes[0]); assert_eq!(send.msgs.len(), 1); let onion_2 = RecipientOnionFields::secret_only(payment_secret_2); let id_2 = PaymentId(payment_hash_2.0); nodes[0].node.send_payment_with_route(route, payment_hash_2, onion_2, id_2).unwrap(); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode(); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[0].node.claim_funds(payment_preimage_0); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[1].node.handle_update_add_htlc(node_a_id, &send.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &send.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let (raa, cs) = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); if disconnect { // Optionally reload nodes[0] entirely through a serialization roundtrip, otherwise just @@ -2751,7 +2751,7 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { nodes[1].node.handle_channel_reestablish(node_a_id, &reestablish_1[0]); let resp_1 = handle_chan_reestablish_msgs!(nodes[1], nodes[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); nodes[0].node.handle_channel_reestablish(node_b_id, &reestablish_2[0]); let resp_0 = handle_chan_reestablish_msgs!(nodes[0], nodes[1]); @@ -2792,14 +2792,14 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { // New outbound messages should be generated immediately upon a call to // get_and_clear_pending_msg_events (but not before). - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert_eq!(events.len(), 1); // Deliver the pending in-flight CS nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &cs); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let commitment_msg = match events.pop().unwrap() { MessageSendEvent::UpdateHTLCs { node_id, channel_id: _, mut updates } => { @@ -2819,13 +2819,13 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { }; nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); expect_and_process_pending_htlcs(&nodes[1], false); expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 100000); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(commitment_signed_dance_through_cp_raa(&nodes[1], &nodes[0], false, false).is_none()); let events = nodes[1].node.get_and_clear_pending_events(); @@ -2885,19 +2885,19 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f let onion_2 = RecipientOnionFields::secret_only(second_payment_secret); let id_2 = PaymentId(second_payment_hash.0); nodes[0].node.send_payment_with_route(route, second_payment_hash, onion_2, id_2).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &send_event.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let (bs_raa, bs_cs) = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_cs); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); as_raa = Some(get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id)); } @@ -2914,13 +2914,13 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f &nodes[2], &[HTLCHandlingFailureType::Receive { payment_hash }], ); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); get_htlc_update_msgs(&nodes[2], &node_b_id); // Note that we don't populate fulfill_msg.attribution_data here, which will lead to hold times being // unavailable. } else { nodes[2].node.claim_funds(payment_preimage); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash, 100_000); let cs_updates = get_htlc_update_msgs(&nodes[2], &node_b_id); @@ -2937,7 +2937,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f } nodes[1].node.handle_update_fulfill_htlc(node_c_id, fulfill_msg); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut bs_updates = None; if htlc_status != HTLCStatusAtDupClaim::HoldingCell { @@ -2976,7 +2976,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f if htlc_status == HTLCStatusAtDupClaim::HoldingCell { nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa.unwrap()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_htlc_failure_conditions(nodes[1].node.get_and_clear_pending_events(), &[]); // We finally receive the second payment, but don't claim it bs_updates = Some(get_htlc_update_msgs(&nodes[1], &node_a_id)); @@ -3029,13 +3029,13 @@ fn test_temporary_error_during_shutdown() { node_a_id, &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, node_b_id), ); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[0].node.handle_shutdown( node_b_id, &get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, node_a_id), ); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -3097,20 +3097,20 @@ fn double_temp_error() { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); // `claim_funds` results in a ChannelMonitorUpdate. nodes[1].node.claim_funds(payment_preimage_1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let (latest_update_1, _) = get_latest_mon_update_id(&nodes[1], channel_id); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); // Previously, this would've panicked due to a double-call to `Channel::monitor_update_failed`, // which had some asserts that prevented it from being called twice. nodes[1].node.claim_funds(payment_preimage_2); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (latest_update_2, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update_1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update_2); // Complete the first HTLC. Note that as a side-effect we handle the monitor update completions @@ -3160,18 +3160,18 @@ fn double_temp_error() { }; assert_eq!(node_id, node_a_id); nodes[0].node.handle_update_fulfill_htlc(node_b_id, update_fulfill_1); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &commitment_signed_b1); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.process_pending_htlc_forwards(); let (raa_a1, commitment_signed_a1) = get_revoke_commit_msgs(&nodes[0], &node_b_id); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].node.handle_revoke_and_ack(node_a_id, &raa_a1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &commitment_signed_a1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Complete the second HTLC. let ((update_fulfill_2, commitment_signed_b2), raa_b2) = { @@ -3200,11 +3200,11 @@ fn double_temp_error() { ) }; nodes[0].node.handle_revoke_and_ack(node_b_id, &raa_b2); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); expect_payment_path_successful!(nodes[0]); nodes[0].node.handle_update_fulfill_htlc(node_b_id, update_fulfill_2); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); do_commitment_signed_dance(&nodes[0], &nodes[1], &commitment_signed_b2, false, false); expect_payment_sent!(nodes[0], payment_preimage_2); @@ -3267,12 +3267,12 @@ fn do_test_outbound_reload_without_init_mon(use_0conf: bool) { .node .funding_transaction_generated(temporary_channel_id, node_b_id, funding_tx.clone()) .unwrap(); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); nodes[1].node.handle_funding_created(node_a_id, &funding_created_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_channel_pending_event(&nodes[1], &node_a_id); let bs_signed_locked = nodes[1].node.get_and_clear_pending_msg_events(); @@ -3282,7 +3282,7 @@ fn do_test_outbound_reload_without_init_mon(use_0conf: bool) { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[0].node.handle_funding_signed(node_b_id, &msg); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); }, _ => panic!("Unexpected event"), } @@ -3377,13 +3377,13 @@ fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: boo .node .funding_transaction_generated(temporary_channel_id, node_b_id, funding_tx.clone()) .unwrap(); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.handle_funding_created(node_a_id, &funding_created_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // nodes[1] happily sends its funding_signed even though its awaiting the persistence of the // initial ChannelMonitor, but it will decline to send its channel_ready even if the funding @@ -3392,7 +3392,7 @@ fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: boo get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id); nodes[0].node.handle_funding_signed(node_b_id, &funding_signed_msg); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); expect_channel_pending_event(&nodes[0], &node_b_id); let as_funding_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); @@ -4045,7 +4045,7 @@ fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) { message: msg.clone(), }; nodes[0].node.force_close_broadcasting_latest_txn(&chan_id_ab, &node_b_id, msg).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_closed_broadcast!(nodes[0], true); check_closed_event(&nodes[0], 1, reason, &[node_b_id], 100_000); let as_closing_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); @@ -4492,7 +4492,7 @@ fn test_claim_to_closed_channel_blocks_forwarded_preimage_removal() { .node .force_close_broadcasting_latest_txn(&chan_a.2, &node_b_id, message.clone()) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let a_reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, a_reason, &[node_b_id], 1000000); check_closed_broadcast!(nodes[0], true); @@ -4502,20 +4502,20 @@ fn test_claim_to_closed_channel_blocks_forwarded_preimage_removal() { mine_transaction(&nodes[1], &as_commit_tx[0]); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let b_reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[1], 1, b_reason, &[node_a_id], 1000000); // Now that B has a pending forwarded payment across it with the inbound edge on-chain, claim // the payment on C and give B the preimage for it. nodes[2].node.claim_funds(payment_preimage); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash, 1_000_000); let mut updates = get_htlc_update_msgs(&nodes[2], &node_b_id); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.handle_update_fulfill_htlc(node_c_id, updates.update_fulfill_htlcs.remove(0)); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, false, false); // At this point nodes[1] has the preimage and is waiting for the `ChannelMonitorUpdate` for @@ -4530,13 +4530,13 @@ fn test_claim_to_closed_channel_blocks_forwarded_preimage_removal() { // background events (via `get_and_clear_pending_msg_events`), the final `ChannelMonitorUpdate` // will fly and we'll drop the preimage from channel B's `ChannelMonitor`. We'll also release // the `Event::PaymentForwarded`. - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); nodes[1].chain_monitor.complete_sole_pending_chan_update(&chan_a.2); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(!get_monitor!(nodes[1], chan_b.2) .get_all_current_outbound_htlcs() .iter() @@ -4569,7 +4569,7 @@ fn test_claim_to_closed_channel_blocks_claimed_event() { .node .force_close_broadcasting_latest_txn(&chan_a.2, &node_b_id, message.clone()) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let a_reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, a_reason, &[node_b_id], 1000000); check_closed_broadcast!(nodes[0], true); @@ -4579,7 +4579,7 @@ fn test_claim_to_closed_channel_blocks_claimed_event() { mine_transaction(&nodes[1], &as_commit_tx[0]); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let b_reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[1], 1, b_reason, &[node_a_id], 1000000); @@ -4588,7 +4588,7 @@ fn test_claim_to_closed_channel_blocks_claimed_event() { // `Event::PaymentClaimed` from being generated. chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.claim_funds(payment_preimage); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); // Once we complete the `ChannelMonitorUpdate` the `Event::PaymentClaimed` will become diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index c8f209236ef..0411d519a9d 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -18673,7 +18673,7 @@ mod tests { RecipientOnionFields::secret_only(payment_secret), payment_id, &mpp_route).unwrap(); nodes[0].node.test_send_payment_along_path(&mpp_route.paths[0], &our_payment_hash, RecipientOnionFields::secret_only(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[0]).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); pass_along_path(&nodes[0], &[&nodes[1]], 200_000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None); @@ -18683,19 +18683,19 @@ mod tests { Some(payment_preimage), RecipientOnionFields::spontaneous_empty(), PaymentId(payment_preimage.0), route.route_params.clone().unwrap(), Retry::Attempts(0) ).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let ev = events.drain(..).next().unwrap(); let payment_event = SendEvent::from_event(ev); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, false, false); expect_and_process_pending_htlcs(&nodes[1], true); let events = nodes[1].node.get_and_clear_pending_events(); let fail = HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }; expect_htlc_failure_conditions(events, &[fail]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); @@ -18709,7 +18709,7 @@ mod tests { // Send the second half of the original MPP payment. nodes[0].node.test_send_payment_along_path(&mpp_route.paths[1], &our_payment_hash, RecipientOnionFields::secret_only(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[1]).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); pass_along_path(&nodes[0], &[&nodes[1]], 200_000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), true, None); @@ -18720,34 +18720,34 @@ mod tests { // lightning messages manually. nodes[1].node.claim_funds(payment_preimage); expect_payment_claimed!(nodes[1], our_payment_hash, 200_000); - check_added_monitors!(nodes[1], 2); + check_added_monitors(&nodes[1], 2); let mut bs_1st_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), bs_1st_updates.update_fulfill_htlcs.remove(0)); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_1st_updates.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let (as_first_raa, as_first_cs) = get_revoke_commit_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_first_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut bs_2nd_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_first_cs); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), bs_2nd_updates.update_fulfill_htlcs.remove(0)); nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_2nd_updates.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_first_raa); let as_second_updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_second_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_second_updates.commitment_signed); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_third_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); // Note that successful MPP payments will generate a single PaymentSent event upon the first // path's success and a PaymentPathSuccessful event for each path's success. @@ -18801,13 +18801,13 @@ mod tests { Some(payment_preimage), RecipientOnionFields::spontaneous_empty(), PaymentId(payment_preimage.0), route_params.clone(), Retry::Attempts(0) ).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let ev = events.drain(..).next().unwrap(); let payment_event = SendEvent::from_event(ev); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, false, false); // We have to forward pending HTLCs twice - once tries to forward the payment forward (and // fails), the second will process the resulting failure and fail the HTLC backward @@ -18815,7 +18815,7 @@ mod tests { let events = nodes[1].node.get_and_clear_pending_events(); let fail = HTLCHandlingFailureType::Receive { payment_hash }; expect_htlc_failure_conditions(events, &[fail]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); @@ -18839,7 +18839,7 @@ mod tests { Some(payment_preimage), RecipientOnionFields::spontaneous_empty(), PaymentId(payment_preimage.0), route.route_params.clone().unwrap(), Retry::Attempts(0) ).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let event = events.pop().unwrap(); @@ -18850,19 +18850,19 @@ mod tests { let payment_secret = PaymentSecret([43; 32]); nodes[0].node.send_payment_with_route(route.clone(), payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let ev = events.drain(..).next().unwrap(); let payment_event = SendEvent::from_event(ev); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, false, false); expect_and_process_pending_htlcs(&nodes[1], true); let events = nodes[1].node.get_and_clear_pending_events(); let fail = HTLCHandlingFailureType::Receive { payment_hash }; expect_htlc_failure_conditions(events, &[fail]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); @@ -18882,7 +18882,7 @@ mod tests { Some(payment_preimage), RecipientOnionFields::spontaneous_empty(), payment_id_1, route.route_params.clone().unwrap(), Retry::Attempts(0) ).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let event = events.pop().unwrap(); @@ -18899,19 +18899,19 @@ mod tests { Some(payment_preimage), RecipientOnionFields::spontaneous_empty(), payment_id_2, route_params, Retry::Attempts(0) ).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let ev = events.drain(..).next().unwrap(); let payment_event = SendEvent::from_event(ev); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, false, false); expect_and_process_pending_htlcs(&nodes[1], true); let events = nodes[1].node.get_and_clear_pending_events(); let fail = HTLCHandlingFailureType::Receive { payment_hash }; expect_htlc_failure_conditions(events, &[fail]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); @@ -18957,7 +18957,7 @@ mod tests { RecipientOnionFields::spontaneous_empty(), PaymentId(mismatch_payment_hash.0), &route).unwrap(); nodes[0].node.test_send_payment_internal(&route, mismatch_payment_hash, RecipientOnionFields::spontaneous_empty(), Some(test_preimage), PaymentId(mismatch_payment_hash.0), None, session_privs).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); assert_eq!(updates.update_add_htlcs.len(), 1); @@ -19025,7 +19025,7 @@ mod tests { let message = "Channel force-closed".to_owned(); nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id(), message.clone()).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, reason, &[nodes[1].node.get_our_node_id()], 100000); @@ -19089,7 +19089,7 @@ mod tests { .node .force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), message.clone()) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, reason, &[nodes[1].node.get_our_node_id()], 1_000_000); @@ -19291,13 +19291,13 @@ mod tests { let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()); nodes[0].node.handle_funding_signed(nodes[1].node.get_our_node_id(), &funding_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id()); } open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager); diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 7fbf72a357c..563c60ecdcf 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1267,16 +1267,6 @@ pub fn check_added_monitors>(node: & } } -/// Check whether N channel monitor(s) have been added. -/// -/// Don't use this, use the identically-named function instead. -#[macro_export] -macro_rules! check_added_monitors { - ($node: expr, $count: expr) => { - $crate::ln::functional_test_utils::check_added_monitors(&$node, $count); - }; -} - fn claimed_htlc_matches_path<'a, 'b, 'c>( origin_node: &Node<'a, 'b, 'c>, path: &[&Node<'a, 'b, 'c>], htlc: &ClaimedHTLC, ) -> bool { @@ -1355,7 +1345,7 @@ pub fn _reload_node<'a, 'b, 'c>( node.chain_monitor.load_existing_monitor(channel_id, monitor), Ok(ChannelMonitorUpdateStatus::Completed), ); - check_added_monitors!(node, 1); + check_added_monitors(&node, 1); } node_deserialized @@ -1511,7 +1501,7 @@ pub fn sign_funding_transaction<'a, 'b, 'c>( .node .funding_transaction_generated(temporary_channel_id, node_b_id, tx.clone()) .is_ok()); - check_added_monitors!(node_a, 0); + check_added_monitors(&node_a, 0); let funding_created_msg = get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b_id); @@ -1554,7 +1544,7 @@ pub fn sign_funding_transaction<'a, 'b, 'c>( .funding_transaction_generated(temporary_channel_id, node_b_id, tx.clone()) .is_err()); assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(node_a, 0); + check_added_monitors(&node_a, 0); tx } @@ -1636,7 +1626,7 @@ pub fn open_zero_conf_channel_with_value<'a, 'b, 'c, 'd>( get_event_msg!(initiator, MessageSendEvent::SendFundingCreated, receiver_node_id); receiver.node.handle_funding_created(initiator_node_id, &funding_created); - check_added_monitors!(receiver, 1); + check_added_monitors(&receiver, 1); let bs_signed_locked = receiver.node.get_and_clear_pending_msg_events(); assert_eq!(bs_signed_locked.len(), 2); let as_channel_ready; @@ -1646,7 +1636,7 @@ pub fn open_zero_conf_channel_with_value<'a, 'b, 'c, 'd>( initiator.node.handle_funding_signed(receiver_node_id, &msg); expect_channel_pending_event(&initiator, &receiver_node_id); expect_channel_pending_event(&receiver, &initiator_node_id); - check_added_monitors!(initiator, 1); + check_added_monitors(&initiator, 1); assert_eq!(initiator.tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); assert_eq!( @@ -1840,11 +1830,11 @@ pub fn create_channel_manual_funding<'a, 'b, 'c: 'd, 'd>( funding_tx.clone(), ) .unwrap(); - check_added_monitors!(node_a, 0); + check_added_monitors(&node_a, 0); let funding_created = get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b_id); node_b.node.handle_funding_created(node_a_id, &funding_created); - check_added_monitors!(node_b, 1); + check_added_monitors(&node_b, 1); let channel_id_b = expect_channel_pending_event(node_b, &node_a_id); if zero_conf { @@ -2010,7 +2000,7 @@ pub fn create_unannounced_chan_between_nodes_with_value<'a, 'b, 'c, 'd>( let as_funding_created = get_event_msg!(nodes[a], MessageSendEvent::SendFundingCreated, node_b_id); nodes[b].node.handle_funding_created(node_a_id, &as_funding_created); - check_added_monitors!(nodes[b], 1); + check_added_monitors(&nodes[b], 1); let cs_funding_signed = get_event_msg!(nodes[b], MessageSendEvent::SendFundingSigned, node_a_id); @@ -2018,7 +2008,7 @@ pub fn create_unannounced_chan_between_nodes_with_value<'a, 'b, 'c, 'd>( nodes[a].node.handle_funding_signed(node_b_id, &cs_funding_signed); expect_channel_pending_event(&nodes[a], &node_b_id); - check_added_monitors!(nodes[a], 1); + check_added_monitors(&nodes[a], 1); assert_eq!(nodes[a].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); assert_eq!(nodes[a].tx_broadcaster.txn_broadcasted.lock().unwrap()[0], tx); @@ -2641,11 +2631,11 @@ pub fn do_main_commitment_signed_dance( let node_b_id = node_b.node.get_our_node_id(); let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs(node_a, &node_b_id); - check_added_monitors!(node_b, 0); + check_added_monitors(&node_b, 0); assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); node_b.node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(node_b, 1); + check_added_monitors(&node_b, 1); node_b.node.handle_commitment_signed_batch_test(node_a_id, &as_commitment_signed); let (bs_revoke_and_ack, extra_msg_option) = { let mut events = node_b.node.get_and_clear_pending_msg_events(); @@ -2662,7 +2652,7 @@ pub fn do_main_commitment_signed_dance( events.get(0).map(|e| e.clone()), ) }; - check_added_monitors!(node_b, 1); + check_added_monitors(&node_b, 1); if fail_backwards { assert!(node_a.node.get_and_clear_pending_events().is_empty()); assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); @@ -2701,10 +2691,10 @@ pub fn do_commitment_signed_dance( ) { let node_b_id = node_b.node.get_our_node_id(); - check_added_monitors!(node_a, 0); + check_added_monitors(&node_a, 0); assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); node_a.node.handle_commitment_signed_batch_test(node_b_id, commitment_signed); - check_added_monitors!(node_a, 1); + check_added_monitors(&node_a, 1); // If this commitment signed dance was due to a claim, don't check for an RAA monitor update. let channel_id = commitment_signed[0].channel_id; @@ -2728,7 +2718,7 @@ pub fn do_commitment_signed_dance( channel_id, }], ); - check_added_monitors!(node_a, 1); + check_added_monitors(&node_a, 1); let node_a_per_peer_state = node_a.node.per_peer_state.read().unwrap(); let mut number_of_msg_events = 0; @@ -3426,7 +3416,7 @@ pub fn send_along_route_with_secret<'a, 'b, 'c>( Retry::Attempts(0), ) .unwrap(); - check_added_monitors!(origin_node, expected_paths.len()); + check_added_monitors(&origin_node, expected_paths.len()); pass_along_route(origin_node, expected_paths, recv_value, our_payment_hash, our_payment_secret); payment_id } @@ -3440,7 +3430,7 @@ pub fn fail_payment_along_path<'a, 'b, 'c>(expected_path: &[&Node<'a, 'b, 'c>]) prev_node .node .handle_update_fail_htlc(node.node.get_our_node_id(), &updates.update_fail_htlcs[0]); - check_added_monitors!(prev_node, 0); + check_added_monitors(&prev_node, 0); let is_first_hop = origin_node_id == prev_node.node.get_our_node_id(); // We do not want to fail backwards on the first hop. All other hops should fail backwards. @@ -3548,7 +3538,7 @@ pub fn do_pass_along_path<'a, 'b, 'c>(args: PassAlongPathArgs) -> Option assert_eq!(node.node.get_our_node_id(), payment_event.node_id); node.node.handle_update_add_htlc(prev_node.node.get_our_node_id(), &payment_event.msgs[0]); - check_added_monitors!(node, 0); + check_added_monitors(&node, 0); if is_last_hop && is_probe { do_commitment_signed_dance(node, prev_node, &payment_event.commitment_msg, true, true); @@ -3650,14 +3640,14 @@ pub fn do_pass_along_path<'a, 'b, 'c>(args: PassAlongPathArgs) -> Option assert!(events_2.len() == 1); expect_htlc_handling_failed_destinations!(events_2, &[failure]); node.node.process_pending_htlc_forwards(); - check_added_monitors!(node, 1); + check_added_monitors(&node, 1); } else { assert!(events_2.is_empty()); } } else if !is_last_hop { let mut events_2 = node.node.get_and_clear_pending_msg_events(); assert_eq!(events_2.len(), 1); - check_added_monitors!(node, 1); + check_added_monitors(&node, 1); payment_event = SendEvent::from_event(events_2.remove(0)); assert_eq!(payment_event.msgs.len(), 1); } @@ -3692,7 +3682,7 @@ pub fn send_probe_along_route<'a, 'b, 'c>( let mut events = origin_node.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), expected_route.len()); - check_added_monitors!(origin_node, expected_route.len()); + check_added_monitors(&origin_node, expected_route.len()); for (path, payment_hash) in expected_route.iter() { let ev = remove_first_msg_event_to_node(&path[0].node.get_our_node_id(), &mut events); @@ -3959,7 +3949,7 @@ pub fn pass_claimed_payment_along_route_from_ev( $prev_node.node.get_our_node_id(), next_msgs.as_ref().unwrap().0.clone(), ); - check_added_monitors!($node, 0); + check_added_monitors(&$node, 0); assert!($node.node.get_and_clear_pending_msg_events().is_empty()); let commitment = &next_msgs.as_ref().unwrap().1; do_commitment_signed_dance($node, $prev_node, commitment, false, false); @@ -4024,7 +4014,7 @@ pub fn pass_claimed_payment_along_route_from_ev( ); expected_total_fee_msat += actual_fee.unwrap(); fwd_amt_msat += actual_fee.unwrap(); - check_added_monitors!($node, 1); + check_added_monitors(&$node, 1); let new_next_msgs = if $new_msgs { let events = $node.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -4073,7 +4063,7 @@ pub fn pass_claimed_payment_along_route_from_ev( // Ensure that claim_funds is idempotent. expected_paths[0].last().unwrap().node.claim_funds(our_payment_preimage); assert!(expected_paths[0].last().unwrap().node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(expected_paths[0].last().unwrap(), 0); + check_added_monitors(&expected_paths[0].last().unwrap(), 0); expected_total_fee_msat } @@ -4168,7 +4158,7 @@ pub fn pass_failed_payment_back<'a, 'b, 'c>( our_payment_hash: PaymentHash, expected_fail_reason: PaymentFailureReason, ) { let mut expected_paths: Vec<_> = expected_paths_slice.iter().collect(); - check_added_monitors!(expected_paths[0].last().unwrap(), expected_paths.len()); + check_added_monitors(&expected_paths[0].last().unwrap(), expected_paths.len()); let mut per_path_msgs: Vec<((msgs::UpdateFailHTLC, Vec), PublicKey)> = Vec::with_capacity(expected_paths.len()); @@ -4280,7 +4270,7 @@ pub fn pass_failed_payment_back<'a, 'b, 'c>( prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0, ); - check_added_monitors!(origin_node, 0); + check_added_monitors(&origin_node, 0); assert!(origin_node.node.get_and_clear_pending_msg_events().is_empty()); let commitment = &next_msgs.as_ref().unwrap().1; do_commitment_signed_dance(origin_node, prev_node, commitment, false, false); @@ -4343,7 +4333,7 @@ pub fn pass_failed_payment_back<'a, 'b, 'c>( pending_events ); assert!(expected_paths[0].last().unwrap().node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(expected_paths[0].last().unwrap(), 0); + check_added_monitors(&expected_paths[0].last().unwrap(), 0); } pub fn fail_payment<'a, 'b, 'c>( @@ -5199,9 +5189,9 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { || pending_cell_htlc_fails.0 != 0 || expect_renegotiated_funding_locked_monitor_update.1 { - check_added_monitors!(node_b, 1); + check_added_monitors(&node_b, 1); } else { - check_added_monitors!(node_b, 0); + check_added_monitors(&node_b, 0); } let mut resp_2 = Vec::new(); @@ -5213,9 +5203,9 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { || pending_cell_htlc_fails.1 != 0 || expect_renegotiated_funding_locked_monitor_update.0 { - check_added_monitors!(node_a, 1); + check_added_monitors(&node_a, 1); } else { - check_added_monitors!(node_a, 0); + check_added_monitors(&node_a, 0); } // We don't yet support both needing updates, as that would require a different commitment dance: @@ -5290,7 +5280,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { assert!(chan_msgs.3 == RAACommitmentOrder::RevokeAndACKFirst); node_a.node.handle_revoke_and_ack(node_b_id, &chan_msgs.1.unwrap()); assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(node_a, 1); + check_added_monitors(&node_a, 1); } else { assert!(chan_msgs.1.is_none()); } @@ -5330,15 +5320,15 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { node_b_id, &commitment_update.commitment_signed, ); - check_added_monitors!(node_a, 1); + check_added_monitors(&node_a, 1); let as_revoke_and_ack = get_event_msg!(node_a, MessageSendEvent::SendRevokeAndACK, node_b_id); // No commitment_signed so get_event_msg's assert(len == 1) passes node_b.node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!( - node_b, - if pending_responding_commitment_signed_dup_monitor.0 { 0 } else { 1 } + check_added_monitors( + &node_b, + if pending_responding_commitment_signed_dup_monitor.0 { 0 } else { 1 }, ); } } else { @@ -5404,7 +5394,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { assert!(chan_msgs.3 == RAACommitmentOrder::RevokeAndACKFirst); node_b.node.handle_revoke_and_ack(node_a_id, &chan_msgs.1.unwrap()); assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(node_b, 1); + check_added_monitors(&node_b, 1); } else { assert!(chan_msgs.1.is_none()); } @@ -5444,15 +5434,15 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { node_a_id, &commitment_update.commitment_signed, ); - check_added_monitors!(node_b, 1); + check_added_monitors(&node_b, 1); let bs_revoke_and_ack = get_event_msg!(node_b, MessageSendEvent::SendRevokeAndACK, node_a_id); // No commitment_signed so get_event_msg's assert(len == 1) passes node_a.node.handle_revoke_and_ack(node_b_id, &bs_revoke_and_ack); assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!( - node_a, - if pending_responding_commitment_signed_dup_monitor.1 { 0 } else { 1 } + check_added_monitors( + &node_a, + if pending_responding_commitment_signed_dup_monitor.1 { 0 } else { 1 }, ); } } else { @@ -5535,7 +5525,7 @@ pub fn create_batch_channel_funding<'a, 'b, 'c>( tx.clone(), ) .is_ok()); - check_added_monitors!(funding_node, 0); + check_added_monitors(&funding_node, 0); let events = funding_node.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), params.len()); for (other_node, ..) in params { diff --git a/lightning/src/ln/max_payment_path_len_tests.rs b/lightning/src/ln/max_payment_path_len_tests.rs index f67ad442c29..fa7e8d8f132 100644 --- a/lightning/src/ln/max_payment_path_len_tests.rs +++ b/lightning/src/ln/max_payment_path_len_tests.rs @@ -92,7 +92,7 @@ fn large_payment_metadata() { .node .send_payment(payment_hash, max_sized_onion.clone(), id, route_params, Retry::Attempts(0)) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let path = &[&nodes[1]]; @@ -174,7 +174,7 @@ fn large_payment_metadata() { .node .send_payment(payment_hash_2, onion_allowing_2_hops, id, route_params, Retry::Attempts(0)) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let path = &[&nodes[1], &nodes[2]]; diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 34064ebb484..04915affa20 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -68,7 +68,7 @@ fn chanmon_fail_from_stale_commitment() { let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 1_000_000); nodes[0].node.send_payment_with_route(route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let bs_txn = get_local_commitment_txn!(nodes[1], chan_id_2); @@ -78,19 +78,19 @@ fn chanmon_fail_from_stale_commitment() { expect_and_process_pending_htlcs(&nodes[1], false); get_htlc_update_msgs(&nodes[1], &nodes[2].node.get_our_node_id()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Don't bother delivering the new HTLC add/commits, instead confirming the pre-HTLC commitment // transaction for nodes[1]. mine_transaction(&nodes[1], &bs_txn[0]); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[2].node.get_our_node_id()], 100000); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); expect_and_process_pending_htlcs_and_htlc_handling_failed(&nodes[1], &[HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let fail_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_updates.update_fail_htlcs[0]); @@ -140,7 +140,7 @@ fn revoked_output_htlc_resolution_timing() { // Confirm the revoked commitment transaction, closing the channel. mine_transaction(&nodes[1], &revoked_local_txn[0]); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[0].node.get_our_node_id()], 1000000); // Two justice transactions will be broadcast, one on the unpinnable, revoked to_self output, @@ -185,7 +185,7 @@ fn archive_fully_resolved_monitors() { let message = "Channel force-closed".to_owned(); nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), message.clone()).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_closed_broadcast!(nodes[0], true); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, reason, &[nodes[1].node.get_our_node_id()], 1_000_000); @@ -565,18 +565,18 @@ fn do_test_claim_value_force_close(keyed_anchors: bool, p2a_anchor: bool, prev_c sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); nodes[1].node.claim_funds(payment_preimage); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash, 3_000_100); let mut b_htlc_msgs = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); // We claim the dust payment here as well, but it won't impact our claimable balances as its // dust and thus doesn't appear on chain at all. nodes[1].node.claim_funds(dust_payment_preimage); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], dust_payment_hash, 3_000); nodes[1].node.claim_funds(timeout_payment_preimage); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], timeout_payment_hash, 4_000_200); if prev_commitment_tx { @@ -585,14 +585,14 @@ fn do_test_claim_value_force_close(keyed_anchors: bool, p2a_anchor: bool, prev_c nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), bs_fulfill); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &b_htlc_msgs.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let (as_raa, as_cs) = get_revoke_commit_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_raa); let _htlc_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_cs); let _bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); } // Once B has received the payment preimage, it includes the value of the HTLC in its @@ -681,11 +681,11 @@ fn do_test_claim_value_force_close(keyed_anchors: bool, p2a_anchor: bool, prev_c assert_eq!(remote_txn[0].output[b_broadcast_txn[1].input[0].previous_output.vout as usize].value.to_sat(), 4_000); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[1].node.get_our_node_id()], 1000000); assert!(nodes[0].node.list_channels().is_empty()); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[0].node.get_our_node_id()], 1000000); assert!(nodes[1].node.list_channels().is_empty()); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); @@ -885,7 +885,7 @@ fn do_test_balances_on_local_commitment_htlcs(keyed_anchors: bool, p2a_anchor: b let htlc_cltv_timeout = nodes[0].best_block_info().1 + TEST_FINAL_CLTV + 1; // Note ChannelManager adds one to CLTV timeouts for safety nodes[0].node.send_payment_with_route(route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); @@ -897,7 +897,7 @@ fn do_test_balances_on_local_commitment_htlcs(keyed_anchors: bool, p2a_anchor: b let (route_2, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 20_000_000); nodes[0].node.send_payment_with_route(route_2, payment_hash_2, RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); @@ -907,7 +907,7 @@ fn do_test_balances_on_local_commitment_htlcs(keyed_anchors: bool, p2a_anchor: b expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 20_000_000); nodes[1].node.claim_funds(payment_preimage_2); get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash_2, 20_000_000); let chan_feerate = get_feerate!(nodes[0], nodes[1], chan_id) as u64; @@ -918,7 +918,7 @@ fn do_test_balances_on_local_commitment_htlcs(keyed_anchors: bool, p2a_anchor: b let message = "Channel force-closed".to_owned(); let node_a_commitment_claimable = nodes[0].best_block_info().1 + BREAKDOWN_TIMEOUT as u32; nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), message.clone()).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_closed_broadcast!(nodes[0], true); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, reason, &[nodes[1].node.get_our_node_id()], 1000000); @@ -980,7 +980,7 @@ fn do_test_balances_on_local_commitment_htlcs(keyed_anchors: bool, p2a_anchor: b // Get nodes[1]'s HTLC claim tx for the second HTLC mine_transaction(&nodes[1], &commitment_tx); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[0].node.get_our_node_id()], 1000000); let bs_htlc_claim_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(bs_htlc_claim_txn.len(), 1); @@ -1210,7 +1210,7 @@ fn test_no_preimage_inbound_htlc_balances() { mine_transaction(&nodes[0], &as_txn[0]); nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[1].node.get_our_node_id()], 1000000); assert_eq!(as_pre_spend_claims, @@ -1218,7 +1218,7 @@ fn test_no_preimage_inbound_htlc_balances() { mine_transaction(&nodes[1], &as_txn[0]); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[0].node.get_our_node_id()], 1000000); let node_b_commitment_claimable = nodes[1].best_block_info().1 + ANTI_REORG_DELAY - 1; @@ -1427,12 +1427,12 @@ fn do_test_revoked_counterparty_commitment_balances(keyed_anchors: bool, p2a_anc nodes[1].node.claim_funds(claimed_payment_preimage); expect_payment_claimed!(nodes[1], claimed_payment_hash, 3_000_100); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let _b_htlc_msgs = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); connect_blocks(&nodes[0], htlc_cltv_timeout + 1 - 10); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 5); @@ -1461,7 +1461,7 @@ fn do_test_revoked_counterparty_commitment_balances(keyed_anchors: bool, p2a_anc connect_blocks(&nodes[1], htlc_cltv_timeout + 1 - 10); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_events(&nodes[1], &[ExpectedCloseEvent { channel_capacity_sats: Some(1_000_000), channel_id: Some(chan_id), @@ -1723,7 +1723,7 @@ fn do_test_revoked_counterparty_htlc_tx_balances(keyed_anchors: bool, p2a_anchor // B will generate an HTLC-Success from its revoked commitment tx mine_transaction(&nodes[1], &revoked_local_txn[0]); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[0].node.get_our_node_id()], 1000000); if keyed_anchors || p2a_anchor { handle_bump_htlc_event(&nodes[1], 1); @@ -1767,7 +1767,7 @@ fn do_test_revoked_counterparty_htlc_tx_balances(keyed_anchors: bool, p2a_anchor // A will generate justice tx from B's revoked commitment/HTLC tx mine_transaction(&nodes[0], &revoked_local_txn[0]); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[1].node.get_our_node_id()], 1000000); let to_remote_conf_height = nodes[0].best_block_info().1 + ANTI_REORG_DELAY - 1; @@ -2020,7 +2020,7 @@ fn do_test_revoked_counterparty_aggregated_claims(keyed_anchors: bool, p2a_ancho nodes[0].node.claim_funds(claimed_payment_preimage); expect_payment_claimed!(nodes[0], claimed_payment_hash, 3_000_100); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let _a_htlc_msgs = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); assert_eq!(sorted_vec(vec![Balance::ClaimableOnChannelClose { @@ -2049,7 +2049,7 @@ fn do_test_revoked_counterparty_aggregated_claims(keyed_anchors: bool, p2a_ancho mine_transaction(&nodes[1], &as_revoked_txn[0]); check_closed_broadcast!(nodes[1], true); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[0].node.get_our_node_id()], 1000000); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut claim_txn = nodes[1].tx_broadcaster.txn_broadcast(); assert_eq!(claim_txn.len(), 2); @@ -2635,9 +2635,9 @@ fn do_test_yield_anchors_events(have_htlcs: bool, p2a_anchor: bool) { } mine_transactions(&nodes[0], &[&commitment_tx, &anchor_tx]); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); mine_transactions(&nodes[1], &[&commitment_tx, &anchor_tx]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); if !have_htlcs { // If we don't have any HTLCs, we're done, the rest of the test is about HTLC transactions @@ -2828,7 +2828,7 @@ fn do_test_anchors_aggregated_revoked_htlc_tx(p2a_anchor: bool) { } } check_closed_broadcast(&nodes[0], 2, true); - check_added_monitors!(&nodes[0], 2); + check_added_monitors(&nodes[0], 2); check_closed_event(&nodes[0], 2, ClosureReason::CommitmentTxConfirmed, &[nodes[1].node.get_our_node_id(); 2], 1000000); // Alice should detect the confirmed revoked commitments, and attempt to claim all of the @@ -3167,13 +3167,13 @@ fn do_test_monitor_claims_with_random_signatures(keyed_anchors: bool, p2a_anchor mine_transaction(closing_node, anchor_tx.as_ref().unwrap()); } check_closed_broadcast!(closing_node, true); - check_added_monitors!(closing_node, 1); + check_added_monitors(&closing_node, 1); let message = "ChannelMonitor-initiated commitment transaction broadcast".to_string(); check_closed_event(&closing_node, 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }, &[other_node.node.get_our_node_id()], 1_000_000); mine_transaction(other_node, &commitment_tx); check_closed_broadcast!(other_node, true); - check_added_monitors!(other_node, 1); + check_added_monitors(&other_node, 1); check_closed_event(&other_node, 1, ClosureReason::CommitmentTxConfirmed, &[closing_node.node.get_our_node_id()], 1_000_000); // If we update the best block to the new height before providing the confirmed transactions, diff --git a/lightning/src/ln/offers_tests.rs b/lightning/src/ln/offers_tests.rs index 4c53aefe58d..906d9e247ce 100644 --- a/lightning/src/ln/offers_tests.rs +++ b/lightning/src/ln/offers_tests.rs @@ -2414,7 +2414,7 @@ fn rejects_keysend_to_non_static_invoice_path() { Some(payment_preimage), RecipientOnionFields::spontaneous_empty(), keysend_payment_id, route_params, Retry::Attempts(0) ).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events); @@ -2482,7 +2482,7 @@ fn no_double_pay_with_stale_channelmanager() { let expected_route: &[&[&Node]] = &[&[&nodes[1]], &[&nodes[1]]]; let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); - check_added_monitors!(nodes[0], 2); + check_added_monitors(&nodes[0], 2); let ev = remove_first_msg_event_to_node(&bob_id, &mut events); let args = PassAlongPathArgs::new(&nodes[0], expected_route[0], amt_msat, payment_hash, ev) @@ -2507,7 +2507,7 @@ fn no_double_pay_with_stale_channelmanager() { reload_node!(nodes[0], &alice_chan_manager_serialized, &[&monitor_0, &monitor_1], persister, chain_monitor, alice_deserialized); // The stale manager results in closing the channels. check_closed_event(&nodes[0], 2, ClosureReason::OutdatedChannelManager, &[bob_id, bob_id], 10_000_000); - check_added_monitors!(nodes[0], 2); + check_added_monitors(&nodes[0], 2); // Alice receives a duplicate invoice, but the payment should be transitioned to Retryable by now. nodes[0].onion_messenger.handle_onion_message(bob_id, &invoice_om); diff --git a/lightning/src/ln/onion_route_tests.rs b/lightning/src/ln/onion_route_tests.rs index f9b4ab28e88..03557469537 100644 --- a/lightning/src/ln/onion_route_tests.rs +++ b/lightning/src/ln/onion_route_tests.rs @@ -133,7 +133,7 @@ fn run_onion_failure_test_with_fail_intercept( .node .send_payment_with_route(route.clone(), *payment_hash, recipient_onion, payment_id) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); // temper update_add (0 => 1) let mut update_add_0 = update_0.update_add_htlcs[0].clone(); @@ -170,7 +170,7 @@ fn run_onion_failure_test_with_fail_intercept( expect_htlc_forward!(&nodes[1]); let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[2].node.get_our_node_id()); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert_eq!(update_1.update_add_htlcs.len(), 1); // tamper update_add (1 => 2) let mut update_add_1 = update_1.update_add_htlcs[0].clone(); @@ -202,7 +202,7 @@ fn run_onion_failure_test_with_fail_intercept( }, _ => {}, } - check_added_monitors!(&nodes[2], 1); + check_added_monitors(&nodes[2], 1); let update_2_1 = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); assert!(update_2_1.update_fail_htlcs.len() == 1); @@ -405,7 +405,7 @@ fn test_fee_failures() { .node .send_payment_with_route(route.clone(), payment_hash_success, recipient_onion, payment_id) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); pass_along_route( &nodes[0], &[&[&nodes[1], &nodes[2]]], @@ -456,7 +456,7 @@ fn test_fee_failures() { .node .send_payment_with_route(route, payment_hash_success, recipient_onion, payment_id) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); pass_along_route( &nodes[0], &[&[&nodes[1], &nodes[2]]], @@ -1548,7 +1548,7 @@ fn test_overshoot_final_cltv() { .send_payment_with_route(route, payment_hash, recipient_onion, payment_id) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add_0 = update_0.update_add_htlcs[0].clone(); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &update_add_0); @@ -1567,7 +1567,7 @@ fn test_overshoot_final_cltv() { } expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[2].node.get_our_node_id()); let mut update_add_1 = update_1.update_add_htlcs[0].clone(); nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &update_add_1); @@ -2285,7 +2285,7 @@ fn do_test_fail_htlc_backwards_with_reason(failure_code: FailureCode) { .node .send_payment_with_route(route, payment_hash, recipient_onion, PaymentId(payment_hash.0)) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); let mut payment_event = SendEvent::from_event(events.pop().unwrap()); @@ -2300,7 +2300,7 @@ fn do_test_fail_htlc_backwards_with_reason(failure_code: FailureCode) { &nodes[1], &[HTLCHandlingFailureType::Receive { payment_hash }], ); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -2435,7 +2435,7 @@ fn test_phantom_onion_hmac_failure() { .node .send_payment_with_route(route, payment_hash, recipient_onion, PaymentId(payment_hash.0)) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); @@ -2470,7 +2470,7 @@ fn test_phantom_onion_hmac_failure() { ); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(update_1.update_fail_htlcs.len() == 1); let fail_msg = update_1.update_fail_htlcs[0].clone(); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_msg); @@ -2508,7 +2508,7 @@ fn test_phantom_invalid_onion_payload() { .node .send_payment_with_route(route.clone(), payment_hash, recipient_onion, payment_id) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); @@ -2571,7 +2571,7 @@ fn test_phantom_invalid_onion_payload() { ); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(update_1.update_fail_htlcs.len() == 1); let fail_msg = update_1.update_fail_htlcs[0].clone(); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_msg); @@ -2607,7 +2607,7 @@ fn test_phantom_final_incorrect_cltv_expiry() { .node .send_payment_with_route(route, payment_hash, recipient_onion, PaymentId(payment_hash.0)) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); @@ -2637,7 +2637,7 @@ fn test_phantom_final_incorrect_cltv_expiry() { ); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(update_1.update_fail_htlcs.len() == 1); let fail_msg = update_1.update_fail_htlcs[0].clone(); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_msg); @@ -2676,7 +2676,7 @@ fn test_phantom_failure_too_low_cltv() { .node .send_payment_with_route(route, payment_hash, recipient_onion, PaymentId(payment_hash.0)) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); @@ -2691,7 +2691,7 @@ fn test_phantom_failure_too_low_cltv() { ); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(update_1.update_fail_htlcs.len() == 1); let fail_msg = update_1.update_fail_htlcs[0].clone(); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_msg); @@ -2729,7 +2729,7 @@ fn test_phantom_failure_modified_cltv() { .node .send_payment_with_route(route, payment_hash, recipient_onion, PaymentId(payment_hash.0)) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); @@ -2784,7 +2784,7 @@ fn test_phantom_failure_expires_too_soon() { .node .send_payment_with_route(route, payment_hash, recipient_onion, PaymentId(payment_hash.0)) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); @@ -2834,7 +2834,7 @@ fn test_phantom_failure_too_low_recv_amt() { .node .send_payment_with_route(route, payment_hash, recipient_onion, PaymentId(payment_hash.0)) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); @@ -2851,7 +2851,7 @@ fn test_phantom_failure_too_low_recv_amt() { ); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(update_1.update_fail_htlcs.len() == 1); let fail_msg = update_1.update_fail_htlcs[0].clone(); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_msg); @@ -2904,7 +2904,7 @@ fn do_test_phantom_dust_exposure_failure(multiplier_dust_limit: bool) { .node .send_payment_with_route(route.clone(), payment_hash, recipient_onion, payment_id) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); @@ -2954,7 +2954,7 @@ fn test_phantom_failure_reject_payment() { .node .send_payment_with_route(route.clone(), payment_hash, recipient_onion, payment_id) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); @@ -2981,7 +2981,7 @@ fn test_phantom_failure_reject_payment() { nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(update_1.update_fail_htlcs.len() == 1); let fail_msg = update_1.update_fail_htlcs[0].clone(); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_msg); diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 6c982738a52..f9894fa8819 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -144,7 +144,7 @@ fn mpp_retry() { let onion = RecipientOnionFields::secret_only(pay_secret); let retry = Retry::Attempts(1); nodes[0].node.send_payment(hash, onion, id, route_params.clone(), retry).unwrap(); - check_added_monitors!(nodes[0], 2); // one monitor per path + check_added_monitors(&nodes[0], 2); // one monitor per path let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); @@ -169,7 +169,7 @@ fn mpp_retry() { assert_eq!(htlc_updates.update_fail_htlcs.len(), 1); assert!(htlc_updates.update_fulfill_htlcs.is_empty()); assert!(htlc_updates.update_fail_malformed_htlcs.is_empty()); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); nodes[0].node.handle_update_fail_htlc(node_c_id, &htlc_updates.update_fail_htlcs[0]); do_commitment_signed_dance(&nodes[0], &nodes[2], &htlc_updates.commitment_signed, false, false); let mut events = nodes[0].node.get_and_clear_pending_events(); @@ -191,7 +191,7 @@ fn mpp_retry() { route.route_params = Some(route_params.clone()); nodes[0].router.expect_find_route(route_params, Ok(route)); expect_and_process_pending_htlcs(&nodes[0], false); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let event = events.pop().unwrap(); @@ -262,7 +262,7 @@ fn mpp_retry_overpay() { let onion = RecipientOnionFields::secret_only(pay_secret); let retry = Retry::Attempts(1); nodes[0].node.send_payment(hash, onion, id, route_params.clone(), retry).unwrap(); - check_added_monitors!(nodes[0], 2); // one monitor per path + check_added_monitors(&nodes[0], 2); // one monitor per path let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); @@ -288,7 +288,7 @@ fn mpp_retry_overpay() { assert_eq!(htlc_updates.update_fail_htlcs.len(), 1); assert!(htlc_updates.update_fulfill_htlcs.is_empty()); assert!(htlc_updates.update_fail_malformed_htlcs.is_empty()); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); nodes[0].node.handle_update_fail_htlc(node_c_id, &htlc_updates.update_fail_htlcs[0]); do_commitment_signed_dance(&nodes[0], &nodes[2], &htlc_updates.commitment_signed, false, false); let mut events = nodes[0].node.get_and_clear_pending_events(); @@ -314,7 +314,7 @@ fn mpp_retry_overpay() { nodes[0].router.expect_find_route(route_params, Ok(route)); nodes[0].node.process_pending_htlc_forwards(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let event = events.pop().unwrap(); @@ -362,7 +362,7 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) { // Initiate the MPP payment. let onion = RecipientOnionFields::secret_only(payment_secret); nodes[0].node.send_payment_with_route(route, hash, onion, PaymentId(hash.0)).unwrap(); - check_added_monitors!(nodes[0], 2); // one monitor per path + check_added_monitors(&nodes[0], 2); // one monitor per path let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); @@ -384,7 +384,7 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) { let htlc_fail_updates = get_htlc_update_msgs(&nodes[3], &node_b_id); assert_eq!(htlc_fail_updates.update_fail_htlcs.len(), 1); nodes[1].node.handle_update_fail_htlc(node_d_id, &htlc_fail_updates.update_fail_htlcs[0]); - check_added_monitors!(nodes[3], 1); + check_added_monitors(&nodes[3], 1); let commitment = &htlc_fail_updates.commitment_signed; do_commitment_signed_dance(&nodes[1], &nodes[3], commitment, false, false); @@ -397,7 +397,7 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) { let htlc_fail_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); assert_eq!(htlc_fail_updates.update_fail_htlcs.len(), 1); nodes[0].node.handle_update_fail_htlc(node_b_id, &htlc_fail_updates.update_fail_htlcs[0]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let commitment = &htlc_fail_updates.commitment_signed; do_commitment_signed_dance(&nodes[0], &nodes[1], commitment, false, false); @@ -461,7 +461,7 @@ fn do_test_keysend_payments(public_node: bool) { nodes[0].node.send_spontaneous_payment(preimage, onion, id, route_params, retry).unwrap(); } - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let send_event = SendEvent::from_node(&nodes[0]); nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); do_commitment_signed_dance(&nodes[1], &nodes[0], &send_event.commitment_msg, false, false); @@ -510,7 +510,7 @@ fn test_mpp_keysend() { let id = PaymentId([42; 32]); let hash = nodes[0].node.send_spontaneous_payment(preimage, onion, id, route_params, retry).unwrap(); - check_added_monitors!(nodes[0], 2); + check_added_monitors(&nodes[0], 2); let route: &[&[&Node]] = &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]]; let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -553,7 +553,7 @@ fn test_fulfill_hold_times() { let id = PaymentId([42; 32]); let hash = nodes[0].node.send_spontaneous_payment(preimage, onion, id, route_params, retry).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let route: &[&[&Node]] = &[&[&nodes[1], &nodes[2]]]; let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -621,7 +621,7 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { let onion = RecipientOnionFields::spontaneous_empty(); let retry = Retry::Attempts(0); nodes[0].node.send_spontaneous_payment(preimage, onion, payment_id_0, params, retry).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &node_b_id); let update_add_0 = update_0.update_add_htlcs[0].clone(); @@ -629,7 +629,7 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { do_commitment_signed_dance(&nodes[1], &nodes[0], &update_0.commitment_signed, false, true); expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); let update_1 = get_htlc_update_msgs(&nodes[1], &node_d_id); let update_add_1 = update_1.update_add_htlcs[0].clone(); nodes[3].node.handle_update_add_htlc(node_b_id, &update_add_1); @@ -670,7 +670,7 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { let params = route.route_params.clone().unwrap(); let retry = Retry::Attempts(0); nodes[0].node.send_spontaneous_payment(preimage, onion, payment_id_1, params, retry).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_2 = get_htlc_update_msgs(&nodes[0], &node_c_id); let update_add_2 = update_2.update_add_htlcs[0].clone(); @@ -678,7 +678,7 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { do_commitment_signed_dance(&nodes[2], &nodes[0], &update_2.commitment_signed, false, true); expect_and_process_pending_htlcs(&nodes[2], false); - check_added_monitors!(&nodes[2], 1); + check_added_monitors(&nodes[2], 1); let update_3 = get_htlc_update_msgs(&nodes[2], &node_d_id); let update_add_3 = update_3.update_add_htlcs[0].clone(); nodes[3].node.handle_update_add_htlc(node_c_id, &update_add_3); @@ -710,7 +710,7 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { nodes[3].node.process_pending_htlc_forwards(); let fail_type = HTLCHandlingFailureType::Receive { payment_hash }; expect_and_process_pending_htlcs_and_htlc_handling_failed(&nodes[3], &[fail_type]); - check_added_monitors!(nodes[3], 1); + check_added_monitors(&nodes[3], 1); // Fail back along nodes[2] let update_fail_0 = get_htlc_update_msgs(&nodes[3], &node_c_id); @@ -721,7 +721,7 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { let fail_type = HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_4_chan_id }; expect_and_process_pending_htlcs_and_htlc_handling_failed(&nodes[2], &[fail_type]); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let update_fail_1 = get_htlc_update_msgs(&nodes[2], &node_a_id); nodes[0].node.handle_update_fail_htlc(node_c_id, &update_fail_1.update_fail_htlcs[0]); @@ -806,7 +806,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[0].node.send_payment(payment_hash, onion, id, route_params, Retry::Attempts(1)).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -862,7 +862,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { } else { assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty()); } - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[1].node.peer_disconnected(node_a_id); @@ -890,7 +890,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { nodes[1].node.handle_error(node_a_id, msg); check_closed_event(&nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &node_b_id)) }, &[node_a_id], 100000); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); }, @@ -901,13 +901,13 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { // Now claim the first payment, which should allow nodes[1] to claim the payment on-chain when // we close in a moment. nodes[2].node.claim_funds(payment_preimage_1); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash_1, 1_000_000); let mut htlc_fulfill = get_htlc_update_msgs(&nodes[2], &node_b_id); let fulfill_msg = htlc_fulfill.update_fulfill_htlcs.remove(0); nodes[1].node.handle_update_fulfill_htlc(node_c_id, fulfill_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); do_commitment_signed_dance(&nodes[1], &nodes[2], &htlc_fulfill.commitment_signed, false, false); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], None, true, false); @@ -990,7 +990,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { let id = PaymentId(payment_hash.0); let onion = RecipientOnionFields::secret_only(payment_secret); nodes[0].node.send_payment_with_route(new_route.clone(), payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -1071,7 +1071,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { assert!(nodes[0].node.list_channels().is_empty()); assert!(nodes[0].node.has_pending_payments()); assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let init_msg = msgs::Init { features: nodes[1].node.init_features(), @@ -1102,7 +1102,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { ); let reason = ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(msg) }; check_closed_event(&nodes[1], 1, reason, &[node_a_id], 100000); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); bs_commitment_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); }, _ => panic!("Unexpected event"), @@ -1115,7 +1115,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { nodes[2].node.fail_htlc_backwards(&hash); let fail_type = HTLCHandlingFailureType::Receive { payment_hash: hash }; expect_and_process_pending_htlcs_and_htlc_handling_failed(&nodes[2], &[fail_type]); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let htlc_fulfill_updates = get_htlc_update_msgs(&nodes[2], &node_b_id); nodes[1].node.handle_update_fail_htlc(node_c_id, &htlc_fulfill_updates.update_fail_htlcs[0]); @@ -1197,7 +1197,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { // the payment is not (spuriously) listed as still pending. let onion = RecipientOnionFields::secret_only(payment_secret); nodes[0].node.send_payment_with_route(new_route.clone(), hash, onion, payment_id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], amt, hash, payment_secret); claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); @@ -1271,7 +1271,7 @@ fn do_test_dup_htlc_onchain_doesnt_fail_on_reload( .force_close_broadcasting_latest_txn(&chan_id, &node_b_id, message.clone()) .unwrap(); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, reason, &[node_b_id], 100000); @@ -1289,12 +1289,12 @@ fn do_test_dup_htlc_onchain_doesnt_fail_on_reload( }; nodes[1].node.claim_funds(payment_preimage); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash, 10_000_000); mine_transaction(&nodes[1], &commitment_tx); check_closed_broadcast(&nodes[1], 1, false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[1], 1, reason, &[node_a_id], 100000); let htlc_success_tx = { @@ -1450,7 +1450,7 @@ fn test_fulfill_restart_failure() { let mon_ser = get_monitor!(nodes[1], chan_id).encode(); nodes[1].node.claim_funds(payment_preimage); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash, 100_000); let mut htlc_fulfill = get_htlc_update_msgs(&nodes[1], &node_a_id); @@ -1467,7 +1467,7 @@ fn test_fulfill_restart_failure() { nodes[1].node.fail_htlc_backwards(&payment_hash); let fail_type = HTLCHandlingFailureType::Receive { payment_hash }; expect_and_process_pending_htlcs_and_htlc_handling_failed(&nodes[1], &[fail_type]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let htlc_fail_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fail_htlc(node_b_id, &htlc_fail_updates.update_fail_htlcs[0]); @@ -1517,7 +1517,7 @@ fn get_ldk_payment_preimage() { let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route.unwrap(), payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); // Make sure to use `get_payment_preimage` let preimage = Some(nodes[1].node.get_payment_preimage(payment_hash, payment_secret).unwrap()); @@ -1560,7 +1560,7 @@ fn sent_probe_is_probe_of_sending_node() { } get_htlc_update_msgs(&nodes[0], &node_b_id); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); } #[test] @@ -1607,20 +1607,20 @@ fn failed_probe_yields_event() { let (payment_hash, payment_id) = nodes[0].node.send_probe(route.paths[0].clone()).unwrap(); // node[0] -- update_add_htlcs -> node[1] - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let updates = get_htlc_update_msgs(&nodes[0], &node_b_id); let probe_event = SendEvent::from_commitment_update(node_b_id, channel_id, updates); nodes[1].node.handle_update_add_htlc(node_a_id, &probe_event.msgs[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &probe_event.commitment_msg, false, false); expect_and_process_pending_htlcs(&nodes[1], true); // node[0] <- update_fail_htlcs -- node[1] - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let updates = get_htlc_update_msgs(&nodes[1], &node_a_id); let _events = nodes[1].node.get_and_clear_pending_events(); nodes[0].node.handle_update_fail_htlc(node_b_id, &updates.update_fail_htlcs[0]); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false); let mut events = nodes[0].node.get_and_clear_pending_events(); @@ -1658,15 +1658,15 @@ fn onchain_failed_probe_yields_event() { let (payment_hash, payment_id) = nodes[0].node.send_probe(route.paths[0].clone()).unwrap(); // node[0] -- update_add_htlcs -> node[1] - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let updates = get_htlc_update_msgs(&nodes[0], &node_b_id); let probe_event = SendEvent::from_commitment_update(node_b_id, chan_id, updates); nodes[1].node.handle_update_add_htlc(node_a_id, &probe_event.msgs[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &probe_event.commitment_msg, false, false); expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let _ = get_htlc_update_msgs(&nodes[1], &node_c_id); // Don't bother forwarding the HTLC onwards and just confirm the force-close transaction on @@ -1674,7 +1674,7 @@ fn onchain_failed_probe_yields_event() { let bs_txn = get_local_commitment_txn!(nodes[1], chan_id); confirm_transaction(&nodes[0], &bs_txn[0]); check_closed_broadcast!(&nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_added_monitors(&nodes[0], 0); let mut events = nodes[0].node.get_and_clear_pending_events(); @@ -1925,7 +1925,7 @@ fn claimed_send_payment_idempotent() { let onion = RecipientOnionFields::secret_only(second_payment_secret); nodes[0].node.send_payment_with_route(route, hash_b, onion, payment_id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); pass_along_route(&nodes[0], &[&[&nodes[1]]], 100_000, hash_b, second_payment_secret); claim_payment(&nodes[0], &[&nodes[1]], preimage_b); } @@ -1994,7 +1994,7 @@ fn abandoned_send_payment_idempotent() { // failed payment back. let onion = RecipientOnionFields::secret_only(second_payment_secret); nodes[0].node.send_payment_with_route(route, hash_b, onion, payment_id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); pass_along_route(&nodes[0], &[&[&nodes[1]]], 100_000, hash_b, second_payment_secret); claim_payment(&nodes[0], &[&nodes[1]], second_payment_preimage); } @@ -2163,12 +2163,12 @@ fn test_holding_cell_inflight_htlcs() { let onion = RecipientOnionFields::secret_only(payment_secret_1); let id = PaymentId(payment_hash_1.0); nodes[0].node.send_payment_with_route(route.clone(), payment_hash_1, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let onion = RecipientOnionFields::secret_only(payment_secret_2); let id = PaymentId(payment_hash_2.0); nodes[0].node.send_payment_with_route(route, payment_hash_2, onion, id).unwrap(); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); } let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs(); @@ -2309,7 +2309,7 @@ fn do_test_intercepted_payment(test: InterceptTest) { expect_htlc_failure_conditions(nodes[1].node.get_and_clear_pending_events(), &[fail]); nodes[1].node.process_pending_htlc_forwards(); let update_fail = get_htlc_update_msgs(&nodes[1], &node_a_id); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(update_fail.update_fail_htlcs.len() == 1); let fail_msg = update_fail.update_fail_htlcs[0].clone(); nodes[0].node.handle_update_fail_htlc(node_b_id, &fail_msg); @@ -2394,7 +2394,7 @@ fn do_test_intercepted_payment(test: InterceptTest) { let fail_type = HTLCHandlingFailureType::InvalidForward { requested_forward_scid: intercept_scid }; expect_and_process_pending_htlcs_and_htlc_handling_failed(&nodes[1], &[fail_type]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let htlc_fail = get_htlc_update_msgs(&nodes[1], &node_a_id); assert!(htlc_fail.update_add_htlcs.is_empty()); @@ -2490,7 +2490,7 @@ fn do_accept_underpaying_htlcs_config(num_mpp_parts: usize) { let id = PaymentId(payment_hash.0); nodes[0].node.send_payment(payment_hash, onion, id, route_params, Retry::Attempts(0)).unwrap(); - check_added_monitors!(nodes[0], num_mpp_parts); // one monitor per path + check_added_monitors(&nodes[0], num_mpp_parts); // one monitor per path let events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), num_mpp_parts); @@ -2647,7 +2647,7 @@ fn do_automatic_retries(test: AutoRetry) { macro_rules! pass_failed_attempt_with_retry_along_path { ($failing_channel_id: expr, $expect_pending_htlcs_forwardable: expr) => { // Send a payment attempt that fails due to lack of liquidity on the second hop - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &node_b_id); let mut update_add = update_0.update_add_htlcs[0].clone(); nodes[1].node.handle_update_add_htlc(node_a_id, &update_add); @@ -2664,7 +2664,7 @@ fn do_automatic_retries(test: AutoRetry) { ); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs(&nodes[1], &node_a_id); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(update_1.update_fail_htlcs.len() == 1); let fail_msg = update_1.update_fail_htlcs[0].clone(); nodes[0].node.handle_update_fail_htlc(node_b_id, &fail_msg); @@ -2710,7 +2710,7 @@ fn do_automatic_retries(test: AutoRetry) { // We retry payments in `process_pending_htlc_forwards` nodes[0].node.process_pending_htlc_forwards(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); @@ -2738,7 +2738,7 @@ fn do_automatic_retries(test: AutoRetry) { // We retry payments in `process_pending_htlc_forwards` nodes[0].node.process_pending_htlc_forwards(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); @@ -3008,7 +3008,7 @@ fn auto_retry_partial_failure() { } // Pass the first part of the payment along the path. - check_added_monitors!(nodes[0], 1); // only one HTLC actually made it out + check_added_monitors(&nodes[0], 1); // only one HTLC actually made it out let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events(); // Only one HTLC/channel update actually made it out @@ -3017,35 +3017,35 @@ fn auto_retry_partial_failure() { nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_first_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_2nd_htlcs = SendEvent::from_node(&nodes[0]); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_first_cs); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_first_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.handle_update_add_htlc(node_a_id, &as_2nd_htlcs.msgs[0]); nodes[1].node.handle_update_add_htlc(node_a_id, &as_2nd_htlcs.msgs[1]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_2nd_htlcs.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let (bs_second_raa, bs_second_cs) = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_second_cs); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_second_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_htlc_failure_conditions(nodes[1].node.get_and_clear_pending_events(), &[]); nodes[1].node.process_pending_htlc_forwards(); @@ -3058,19 +3058,19 @@ fn auto_retry_partial_failure() { nodes[0].node.handle_update_fulfill_htlc(node_b_id, bs_claim.update_fulfill_htlcs.remove(0)); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_claim.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let (as_third_raa, as_third_cs) = get_revoke_commit_msgs(&nodes[0], &node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_third_raa); - check_added_monitors!(nodes[1], 4); + check_added_monitors(&nodes[1], 4); let mut bs_2nd_claim = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_third_cs); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_third_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); expect_payment_path_successful!(nodes[0]); let bs_second_fulfill_a = bs_2nd_claim.update_fulfill_htlcs.remove(0); @@ -3078,18 +3078,18 @@ fn auto_retry_partial_failure() { nodes[0].node.handle_update_fulfill_htlc(node_b_id, bs_second_fulfill_a); nodes[0].node.handle_update_fulfill_htlc(node_b_id, bs_second_fulfill_b); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_2nd_claim.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let (as_fourth_raa, as_fourth_cs) = get_revoke_commit_msgs(&nodes[0], &node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_fourth_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_fourth_cs); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); if let Event::PaymentPathSuccessful { .. } = events[0] { @@ -3167,7 +3167,7 @@ fn auto_retry_zero_attempts_send_error() { } else { panic!(); } - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); } #[test] @@ -3203,12 +3203,12 @@ fn fails_paying_after_rejected_by_payee() { let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[0].node.send_payment(payment_hash, onion, id, route_params, Retry::Attempts(1)).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let mut payment_event = SendEvent::from_event(events.pop().unwrap()); nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, false, false); expect_and_process_pending_htlcs(&nodes[1], false); expect_payment_claimable!(&nodes[1], payment_hash, payment_secret, amt_msat); @@ -3336,7 +3336,7 @@ fn retry_multi_path_single_failed_payment() { } let htlc_msgs = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(htlc_msgs.len(), 2); - check_added_monitors!(nodes[0], 2); + check_added_monitors(&nodes[0], 2); } #[test] @@ -3417,7 +3417,7 @@ fn immediate_retry_on_failure() { } let htlc_msgs = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(htlc_msgs.len(), 2); - check_added_monitors!(nodes[0], 2); + check_added_monitors(&nodes[0], 2); } #[test] @@ -3541,40 +3541,40 @@ fn no_extra_retries_on_back_to_back_fail() { nodes[0].node.send_payment(payment_hash, onion, id, route_params, Retry::Attempts(1)).unwrap(); let first_htlc = SendEvent::from_node(&nodes[0]); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert_eq!(first_htlc.msgs.len(), 1); nodes[1].node.handle_update_add_htlc(node_a_id, &first_htlc.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &first_htlc.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_first_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let second_htlc = SendEvent::from_node(&nodes[0]); assert_eq!(second_htlc.msgs.len(), 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_first_cs); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_first_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.handle_update_add_htlc(node_a_id, &second_htlc.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &second_htlc.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let (bs_second_raa, bs_second_cs) = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_second_cs); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_second_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_and_process_pending_htlcs(&nodes[1], false); let next_hop_failure = @@ -3631,7 +3631,7 @@ fn no_extra_retries_on_back_to_back_fail() { nodes[0].node.process_pending_htlc_forwards(); let retry_htlc_updates = SendEvent::from_node(&nodes[0]); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[1].node.handle_update_add_htlc(node_a_id, &retry_htlc_updates.msgs[0]); let commitment = &retry_htlc_updates.commitment_msg; @@ -3785,26 +3785,26 @@ fn test_simple_partial_retry() { let id = PaymentId(payment_hash.0); nodes[0].node.send_payment(payment_hash, onion, id, route_params, Retry::Attempts(1)).unwrap(); let first_htlc = SendEvent::from_node(&nodes[0]); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert_eq!(first_htlc.msgs.len(), 1); nodes[1].node.handle_update_add_htlc(node_a_id, &first_htlc.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &first_htlc.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_first_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let second_htlc_updates = SendEvent::from_node(&nodes[0]); assert_eq!(second_htlc_updates.msgs.len(), 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_first_cs); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_first_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.handle_update_add_htlc(node_a_id, &second_htlc_updates.msgs[0]); let commitment = &second_htlc_updates.commitment_msg; @@ -3860,14 +3860,14 @@ fn test_simple_partial_retry() { nodes[0].node.process_pending_htlc_forwards(); let retry_htlc_updates = SendEvent::from_node(&nodes[0]); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[1].node.handle_update_add_htlc(node_a_id, &retry_htlc_updates.msgs[0]); let commitment = &retry_htlc_updates.commitment_msg; do_commitment_signed_dance(&nodes[1], &nodes[0], commitment, false, true); expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_second_forward = get_htlc_update_msgs(&nodes[1], &node_c_id); nodes[2].node.handle_update_add_htlc(node_b_id, &bs_second_forward.update_add_htlcs[0]); @@ -3987,7 +3987,7 @@ fn test_threaded_payment_retries() { let id = PaymentId(payment_hash.0); let retry = Retry::Attempts(0xdeadbeef); nodes[0].node.send_payment(payment_hash, onion, id, route_params.clone(), retry).unwrap(); - check_added_monitors!(nodes[0], 2); + check_added_monitors(&nodes[0], 2); let mut send_msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(send_msg_events.len(), 2); send_msg_events.retain(|msg| { @@ -4086,7 +4086,7 @@ fn test_threaded_payment_retries() { nodes[0].node.process_pending_htlc_forwards(); send_msg_events = nodes[0].node.get_and_clear_pending_msg_events(); - check_added_monitors!(nodes[0], 2); + check_added_monitors(&nodes[0], 2); if cur_time > end_time { break; @@ -4124,14 +4124,14 @@ fn do_no_missing_sent_on_reload(persist_manager_with_payment: bool, at_midpoint: } nodes[1].node.claim_funds(our_payment_preimage); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], our_payment_hash, 1_000_000); if at_midpoint { let mut updates = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fulfill_htlc(node_b_id, updates.update_fulfill_htlcs.remove(0)); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &updates.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); } else { let mut fulfill = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fulfill_htlc(node_b_id, fulfill.update_fulfill_htlcs.remove(0)); @@ -4466,7 +4466,7 @@ fn do_test_custom_tlvs(spontaneous: bool, even_tlvs: bool, known_tlvs: bool) { let mut payment_event = SendEvent::from_event(ev); nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); - check_added_monitors!(&nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, false, false); expect_and_process_pending_htlcs(&nodes[1], false); @@ -4536,7 +4536,7 @@ fn test_retry_custom_tlvs() { nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone())); nodes[0].node.send_payment(hash, onion, id, route_params.clone(), Retry::Attempts(1)).unwrap(); - check_added_monitors!(nodes[0], 1); // one monitor per path + check_added_monitors(&nodes[0], 1); // one monitor per path // Add the HTLC along the first hop. let htlc_updates = get_htlc_update_msgs(&nodes[0], &node_b_id); @@ -4550,7 +4550,7 @@ fn test_retry_custom_tlvs() { let events = nodes[1].node.get_and_clear_pending_events(); let fail = HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2_id }; expect_htlc_failure_conditions(events, &[fail]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let htlc_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); let msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. } = htlc_updates; @@ -4571,7 +4571,7 @@ fn test_retry_custom_tlvs() { route.route_params = Some(route_params.clone()); nodes[0].router.expect_find_route(route_params, Ok(route)); nodes[0].node.process_pending_htlc_forwards(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let path = &[&nodes[1], &nodes[2]]; @@ -4673,7 +4673,7 @@ fn do_test_custom_tlvs_consistency( .node .test_send_payment_along_path(path_a, &hash, onion, amt_msat, cur_height, id, &None, priv_a) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -4695,7 +4695,7 @@ fn do_test_custom_tlvs_consistency( .node .test_send_payment_along_path(path_b, &hash, onion, amt_msat, cur_height, id, &None, priv_b) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); { let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -4707,14 +4707,14 @@ fn do_test_custom_tlvs_consistency( do_commitment_signed_dance(&nodes[2], &nodes[0], commitment, false, false); expect_and_process_pending_htlcs(&nodes[2], false); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let mut events = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); nodes[3].node.handle_update_add_htlc(node_c_id, &payment_event.msgs[0]); - check_added_monitors!(nodes[3], 0); + check_added_monitors(&nodes[3], 0); do_commitment_signed_dance(&nodes[3], &nodes[2], &payment_event.commitment_msg, true, true); } expect_htlc_failure_conditions(nodes[3].node.get_and_clear_pending_events(), &[]); @@ -4743,7 +4743,7 @@ fn do_test_custom_tlvs_consistency( &nodes[3], &expected_destinations, ); - check_added_monitors!(nodes[3], 1); + check_added_monitors(&nodes[3], 1); let fail_updates_1 = get_htlc_update_msgs(&nodes[3], &node_c_id); nodes[2].node.handle_update_fail_htlc(node_d_id, &fail_updates_1.update_fail_htlcs[0]); @@ -4753,7 +4753,7 @@ fn do_test_custom_tlvs_consistency( let fail = HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_2_3.2 }; expect_and_process_pending_htlcs_and_htlc_handling_failed(&nodes[2], &[fail]); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let fail_updates_2 = get_htlc_update_msgs(&nodes[2], &node_a_id); nodes[0].node.handle_update_fail_htlc(node_c_id, &fail_updates_2.update_fail_htlcs[0]); @@ -4815,7 +4815,7 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { }; let retry = Retry::Attempts(1); nodes[0].node.send_payment(payment_hash, onion, payment_id, route_params, retry).unwrap(); - check_added_monitors!(nodes[0], 2); + check_added_monitors(&nodes[0], 2); let mut send_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(send_events.len(), 2); @@ -5009,7 +5009,7 @@ fn test_htlc_forward_considers_anchor_outputs_value() { let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -5169,7 +5169,7 @@ fn test_non_strict_forwarding() { let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route.clone(), payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); let mut send_event = SendEvent::from_event(msg_events.remove(0)); @@ -5177,7 +5177,7 @@ fn test_non_strict_forwarding() { do_commitment_signed_dance(&nodes[1], &nodes[0], &send_event.commitment_msg, false, false); expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); msg_events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); send_event = SendEvent::from_event(msg_events.remove(0)); @@ -5209,7 +5209,7 @@ fn test_non_strict_forwarding() { let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route.clone(), payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); let mut send_event = SendEvent::from_event(msg_events.remove(0)); @@ -5217,7 +5217,7 @@ fn test_non_strict_forwarding() { do_commitment_signed_dance(&nodes[1], &nodes[0], &send_event.commitment_msg, false, false); expect_and_process_pending_htlcs(&nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let routed_scid = route.paths[0].hops[1].short_channel_id; let routed_chan_id = match routed_scid { scid if scid == chan_update_1.contents.short_channel_id => channel_id_1, @@ -5346,7 +5346,7 @@ fn pay_route_without_params() { let id = PaymentId(hash.0); nodes[0].node.send_payment_with_route(route, hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let node_1_msgs = remove_first_msg_event_to_node(&node_b_id, &mut events); diff --git a/lightning/src/ln/priv_short_conf_tests.rs b/lightning/src/ln/priv_short_conf_tests.rs index ab7cad9be44..83aaca24203 100644 --- a/lightning/src/ln/priv_short_conf_tests.rs +++ b/lightning/src/ln/priv_short_conf_tests.rs @@ -83,7 +83,7 @@ fn test_priv_forwarding_rejection() { let onion = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route.clone(), our_payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let payment_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); @@ -166,7 +166,7 @@ fn test_priv_forwarding_rejection() { let onion = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); pass_along_route( &nodes[0], &[&[&nodes[1], &nodes[2]]], @@ -350,7 +350,7 @@ fn test_routed_scid_alias() { let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], 100_000, payment_hash, payment_secret); @@ -514,7 +514,7 @@ fn test_inbound_scid_privacy() { node_b_id, &get_event_msg!(nodes[1], MessageSendEvent::SendFundingCreated, node_c_id), ); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let cs_funding_signed = get_event_msg!(nodes[2], MessageSendEvent::SendFundingSigned, node_b_id); @@ -522,7 +522,7 @@ fn test_inbound_scid_privacy() { nodes[1].node.handle_funding_signed(node_c_id, &cs_funding_signed); expect_channel_pending_event(&nodes[1], &node_c_id); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let conf_height = core::cmp::max(nodes[1].best_block_info().1 + 1, nodes[2].best_block_info().1 + 1); @@ -580,7 +580,7 @@ fn test_inbound_scid_privacy() { let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], 100_000, payment_hash, payment_secret); claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); @@ -601,7 +601,7 @@ fn test_inbound_scid_privacy() { let onion = RecipientOnionFields::secret_only(payment_secret_2); let id = PaymentId(payment_hash_2.0); nodes[0].node.send_payment_with_route(route_2, payment_hash_2, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let payment_event = SendEvent::from_node(&nodes[0]); assert_eq!(node_b_id, payment_event.node_id); @@ -698,7 +698,7 @@ fn test_scid_alias_returned() { let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route.clone(), payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_updates = get_htlc_update_msgs(&nodes[0], &node_b_id); nodes[1].node.handle_update_add_htlc(node_a_id, &as_updates.update_add_htlcs[0]); do_commitment_signed_dance(&nodes[1], &nodes[0], &as_updates.commitment_signed, false, true); @@ -710,7 +710,7 @@ fn test_scid_alias_returned() { channel_id: chan.0.channel_id, }]; expect_htlc_failure_conditions(events, &expected_failures); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fail_htlc(node_b_id, &bs_updates.update_fail_htlcs[0]); @@ -735,7 +735,7 @@ fn test_scid_alias_returned() { let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_updates = get_htlc_update_msgs(&nodes[0], &node_b_id); nodes[1].node.handle_update_add_htlc(node_a_id, &as_updates.update_add_htlcs[0]); do_commitment_signed_dance(&nodes[1], &nodes[0], &as_updates.commitment_signed, false, true); @@ -845,7 +845,7 @@ fn test_0conf_channel_with_async_monitor() { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.handle_funding_created(node_a_id, &funding_created); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); let channel_id = ChannelId::v1_from_funding_outpoint(funding_output); @@ -860,7 +860,7 @@ fn test_0conf_channel_with_async_monitor() { MessageSendEvent::SendFundingSigned { node_id, msg } => { assert_eq!(*node_id, node_a_id); nodes[0].node.handle_funding_signed(node_b_id, &msg); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); }, _ => panic!("Unexpected event"), } @@ -938,26 +938,26 @@ fn test_0conf_channel_with_async_monitor() { let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_send = SendEvent::from_node(&nodes[0]); nodes[1].node.handle_update_add_htlc(node_a_id, &as_send.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_send.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let (bs_raa, bs_commitment_signed) = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.handle_revoke_and_ack( node_a_id, &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id), ); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); @@ -971,10 +971,10 @@ fn test_0conf_channel_with_async_monitor() { .chain_monitor .channel_monitor_updated(bs_raa.channel_id, latest_update) .unwrap(); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_send = SendEvent::from_node(&nodes[1]); nodes[2].node.handle_update_add_htlc(node_b_id, &bs_send.msgs[0]); @@ -1011,7 +1011,7 @@ fn test_0conf_close_no_early_chan_update() { send_payment(&nodes[0], &[&nodes[1]], 100_000); nodes[0].node.force_close_all_channels_broadcasting_latest_txn(message.clone()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, reason, &[node_b_id], 100000); let _ = get_err_msg(&nodes[0], &node_b_id); diff --git a/lightning/src/ln/quiescence_tests.rs b/lightning/src/ln/quiescence_tests.rs index 6daf4d65b9d..a2b14a798c4 100644 --- a/lightning/src/ln/quiescence_tests.rs +++ b/lightning/src/ln/quiescence_tests.rs @@ -101,7 +101,7 @@ fn allow_shutdown_while_awaiting_quiescence(local_shutdown: bool) { let onion = RecipientOnionFields::secret_only(payment_secret); let payment_id = PaymentId(payment_hash.0); local_node.node.send_payment_with_route(route, payment_hash, onion, payment_id).unwrap(); - check_added_monitors!(local_node, 1); + check_added_monitors(&local_node, 1); // Attempt to send an HTLC, but don't fully commit it yet. let update_add = get_htlc_update_msgs(&local_node, &remote_node_id); @@ -373,7 +373,7 @@ fn quiescence_updates_go_to_holding_cell(fail_htlc: bool) { let onion1 = RecipientOnionFields::secret_only(payment_secret1); let payment_id1 = PaymentId(payment_hash1.0); nodes[1].node.send_payment_with_route(route1, payment_hash1, onion1, payment_id1).unwrap(); - check_added_monitors!(&nodes[1], 0); + check_added_monitors(&nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); // Send a payment in the opposite direction. Since nodes[0] hasn't sent its own `stfu` yet, it's @@ -383,7 +383,7 @@ fn quiescence_updates_go_to_holding_cell(fail_htlc: bool) { let onion2 = RecipientOnionFields::secret_only(payment_secret2); let payment_id2 = PaymentId(payment_hash2.0); nodes[0].node.send_payment_with_route(route2, payment_hash2, onion2, payment_id2).unwrap(); - check_added_monitors!(&nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_add = get_htlc_update_msgs(&nodes[0], &node_id_1); nodes[1].node.handle_update_add_htlc(node_id_0, &update_add.update_add_htlcs[0]); diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index 2e9471a787d..95b993a4a90 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -259,7 +259,7 @@ fn test_manager_serialize_deserialize_events() { let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&node_a, &node_b.node.get_our_node_id(), channel_value, 42); node_a.node.funding_transaction_generated(temporary_channel_id, node_b.node.get_our_node_id(), tx.clone()).unwrap(); - check_added_monitors!(node_a, 0); + check_added_monitors(&node_a, 0); let funding_created = get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b.node.get_our_node_id()); let channel_id = ChannelId::v1_from_funding_txid( @@ -462,7 +462,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { for monitor in node_0_monitors.drain(..) { assert_eq!(nodes[0].chain_monitor.watch_channel(monitor.channel_id(), monitor), Ok(ChannelMonitorUpdateStatus::Completed)); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); } nodes[0].node = &nodes_0_deserialized; @@ -474,7 +474,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { check_spends!(txn[0], funding_tx); assert_eq!(txn[0].input[0].previous_output.txid, funding_tx.compute_txid()); } - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); // nodes[1] and nodes[2] have no lost state with nodes[0]... reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); @@ -647,7 +647,7 @@ fn do_test_data_loss_protect(reconnect_panicing: bool, substantially_old: bool, .node .force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id(), message.clone()) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, reason, &[nodes[1].node.get_our_node_id()], 1000000); @@ -697,7 +697,7 @@ fn do_test_data_loss_protect(reconnect_panicing: bool, substantially_old: bool, assert_eq!(err_msgs_0.len(), 1); nodes[1].node.handle_error(nodes[0].node.get_our_node_id(), &err_msgs_0[0]); assert!(nodes[1].node.list_usable_channels().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event(&nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id())) } , &[nodes[0].node.get_our_node_id()], 1000000); check_closed_broadcast!(nodes[1], false); @@ -754,7 +754,7 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool, double_rest nodes[0].node.send_payment_with_route(route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 2); + check_added_monitors(&nodes[0], 2); // Send the payment through to nodes[3] *without* clearing the PaymentClaimable event let mut send_events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -785,7 +785,7 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool, double_rest expect_payment_claimable!(nodes[3], payment_hash, payment_secret, 15_000_000); nodes[3].node.claim_funds(payment_preimage); - check_added_monitors!(nodes[3], 2); + check_added_monitors(&nodes[3], 2); expect_payment_claimed!(nodes[3], payment_hash, 15_000_000); // Now fetch one of the two updated ChannelMonitors from nodes[3], and restart pretending we @@ -881,7 +881,7 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool, double_rest // Once we call `get_and_clear_pending_msg_events` the holding cell is cleared and the HTLC // claim should fly. let mut ds_msgs = nodes[3].node.get_and_clear_pending_msg_events(); - check_added_monitors!(nodes[3], 1); + check_added_monitors(&nodes[3], 1); assert_eq!(ds_msgs.len(), 2); if let MessageSendEvent::SendChannelUpdate { .. } = ds_msgs[0] {} else { panic!(); } @@ -889,7 +889,7 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool, double_rest MessageSendEvent::UpdateHTLCs { mut updates, .. } => { let mut fulfill = updates.update_fulfill_htlcs.remove(0); nodes[2].node.handle_update_fulfill_htlc(nodes[3].node.get_our_node_id(), fulfill); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let cs_updates = get_htlc_update_msgs(&nodes[2], &nodes[0].node.get_our_node_id()); expect_payment_forwarded!(nodes[2], nodes[0], nodes[3], Some(1000), false, false); do_commitment_signed_dance(&nodes[2], &nodes[3], &updates.commitment_signed, false, true); @@ -951,7 +951,7 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht let htlc_expiry = nodes[0].best_block_info().1 + TEST_FINAL_CLTV; nodes[0].node.send_payment_with_route(route, payment_hash, RecipientOnionFields::secret_only(payment_secret), payment_id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let payment_event = SendEvent::from_node(&nodes[0]); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); @@ -985,7 +985,7 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht let payment_event = SendEvent::from_node(&nodes[1]); nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); nodes[2].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &payment_event.commitment_msg); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); if claim_htlc { get_monitor!(nodes[2], chan_id_2).provide_payment_preimage_unsafe_legacy( @@ -1005,7 +1005,7 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht let cs_commitment_tx = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(cs_commitment_tx.len(), if claim_htlc { 2 } else { 1 }); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[2], 1, reason, &[nodes[1].node.get_our_node_id()], 100000); check_closed_broadcast!(nodes[2], true); @@ -1031,7 +1031,7 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht nodes[1].node.timer_tick_occurred(); let bs_commitment_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(bs_commitment_tx.len(), 1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); @@ -1064,7 +1064,7 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht } else { expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, true); } - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut update = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); if claim_htlc { @@ -1124,7 +1124,7 @@ fn removed_payment_no_manager_persistence() { &nodes[2], &[HTLCHandlingFailureType::Receive { payment_hash }] ); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let events = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match &events[0] { @@ -1159,7 +1159,7 @@ fn removed_payment_no_manager_persistence() { &nodes[1], &[HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }] ); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match &events[0] { @@ -1266,7 +1266,7 @@ fn test_htlc_localremoved_persistence() { RecipientOnionFields::spontaneous_empty(), PaymentId(mismatch_payment_hash.0), &route).unwrap(); nodes[0].node.test_send_payment_internal(&route, mismatch_payment_hash, RecipientOnionFields::spontaneous_empty(), Some(test_preimage), PaymentId(mismatch_payment_hash.0), None, session_privs).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); diff --git a/lightning/src/ln/reorg_tests.rs b/lightning/src/ln/reorg_tests.rs index 043862fea90..b56caf96008 100644 --- a/lightning/src/ln/reorg_tests.rs +++ b/lightning/src/ln/reorg_tests.rs @@ -65,7 +65,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) { // Provide preimage to node 2 by claiming payment nodes[2].node.claim_funds(our_payment_preimage); expect_payment_claimed!(nodes[2], our_payment_hash, 1_000_000); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); let claim_txn = if local_commitment { @@ -79,7 +79,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) { // Give node 2 node 1's transactions and get its response (claiming the HTLC instead). connect_block(&nodes[2], &create_dummy_block(nodes[2].best_block_hash(), 42, node_1_commitment_txn.clone())); check_closed_broadcast!(nodes[2], true); // We should get a BroadcastChannelUpdate (and *only* a BroadcstChannelUpdate) - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); check_closed_event(&nodes[2], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[1].node.get_our_node_id()], 100000); let node_2_commitment_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(node_2_commitment_txn.len(), 1); // ChannelMonitor: 1 offered HTLC-Claim @@ -113,11 +113,11 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) { vec![node_2_commitment_txn.pop().unwrap()] }; check_closed_broadcast!(nodes[1], true); // We should get a BroadcastChannelUpdate (and *only* a BroadcstChannelUpdate) - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[2].node.get_our_node_id()], 100000); // Connect ANTI_REORG_DELAY - 2 blocks, giving us a confirmation count of ANTI_REORG_DELAY - 1. connect_blocks(&nodes[1], ANTI_REORG_DELAY - 2); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); assert_eq!(nodes[1].node.get_and_clear_pending_events().len(), 0); if claim { @@ -139,7 +139,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) { ); } - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Which should result in an immediate claim/fail of the HTLC: let mut htlc_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); if claim { @@ -199,7 +199,7 @@ fn test_counterparty_revoked_reorg() { nodes[0].node.claim_funds(payment_preimage_3); let _ = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); expect_payment_claimed!(nodes[0], payment_hash_3, 4_000_000); let mut unrevoked_local_txn = get_local_commitment_txn!(nodes[0], chan.2); @@ -211,7 +211,7 @@ fn test_counterparty_revoked_reorg() { // on any of the HTLCs, at least until we get six confirmations (which we won't get). mine_transaction(&nodes[1], &revoked_local_txn[0]); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[0].node.get_our_node_id()], 1000000); // Connect up to one block before the revoked transaction would be considered final, then do a @@ -313,7 +313,7 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_ assert_eq!(nodes[0].node.short_to_chan_info.read().unwrap().len(), 0); } - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); } if reload_node { @@ -380,7 +380,7 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_ // we were already running. nodes[0].node.test_process_background_events(); } - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(txn.len(), 1); @@ -389,7 +389,7 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_ let expected_err = "Funding transaction was un-confirmed, originally locked at 6 confs."; if reorg_after_reload || !reload_node { handle_announce_close_broadcast_events(&nodes, 0, 1, true, "Channel closed because of an exception: Funding transaction was un-confirmed, originally locked at 6 confs."); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let reason = ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Channel closed because of an exception: {}", expected_err)) }; check_closed_event(&nodes[1], 1, reason, &[nodes[0].node.get_our_node_id()], 100000); } @@ -477,14 +477,14 @@ fn test_set_outpoints_partial_claiming() { expect_payment_claimed!(nodes[0], payment_hash_1, 3_000_000); nodes[0].node.claim_funds(payment_preimage_2); expect_payment_claimed!(nodes[0], payment_hash_2, 3_000_000); - check_added_monitors!(nodes[0], 2); + check_added_monitors(&nodes[0], 2); nodes[0].node.get_and_clear_pending_msg_events(); // Connect blocks on node A commitment transaction mine_transaction(&nodes[0], &remote_txn[0]); check_closed_broadcast!(nodes[0], true); check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[1].node.get_our_node_id()], 1000000); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); // Verify node A broadcast tx claiming both HTLCs { let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); @@ -508,7 +508,7 @@ fn test_set_outpoints_partial_claiming() { channel_funding_txo: None, user_channel_id: None, }]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Verify node B broadcast 2 HTLC-timeout txn let partial_claim_tx = { let mut node_txn = nodes[1].tx_broadcaster.unique_txn_broadcast(); @@ -583,11 +583,11 @@ fn do_test_to_remote_after_local_detection(style: ConnectStyle) { check_closed_broadcast!(nodes[0], true); assert!(nodes[0].node.list_channels().is_empty()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[1].node.get_our_node_id()], 1000000); check_closed_broadcast!(nodes[1], true); assert!(nodes[1].node.list_channels().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[0].node.get_our_node_id()], 1000000); assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty()); diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index 5e7c7d9fd35..192bc6399e4 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -175,7 +175,7 @@ fn expect_channel_shutdown_state_with_htlc() { // Claim Funds on Node2 nodes[2].node.claim_funds(payment_preimage_0); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash_0, 100_000); // Fulfil HTLCs on node1 and node0 @@ -187,7 +187,7 @@ fn expect_channel_shutdown_state_with_htlc() { assert_eq!(updates.update_fulfill_htlcs.len(), 1); nodes[1].node.handle_update_fulfill_htlc(node_c_id, updates.update_fulfill_htlcs.remove(0)); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut updates_2 = get_htlc_update_msgs(&nodes[1], &node_a_id); do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, false, false); @@ -361,7 +361,7 @@ fn expect_channel_shutdown_state_with_force_closure() { .force_close_broadcasting_latest_txn(&chan_1.2, &node_a_id, message.clone()) .unwrap(); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_channel_shutdown_state!(nodes[0], chan_1.2, ChannelShutdownState::NotShuttingDown); assert!(nodes[1].node.list_channels().is_empty()); @@ -371,7 +371,7 @@ fn expect_channel_shutdown_state_with_force_closure() { check_spends!(node_txn[0], chan_1.3); mine_transaction(&nodes[0], &node_txn[0]); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert!(nodes[0].node.list_channels().is_empty()); assert!(nodes[1].node.list_channels().is_empty()); @@ -452,7 +452,7 @@ fn updates_shutdown_wait() { unwrap_send_err!(nodes[1], res, true, APIError::ChannelUnavailable { .. }, {}); nodes[2].node.claim_funds(payment_preimage_0); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash_0, 100_000); let mut updates = get_htlc_update_msgs(&nodes[2], &node_b_id); @@ -463,7 +463,7 @@ fn updates_shutdown_wait() { assert_eq!(updates.update_fulfill_htlcs.len(), 1); nodes[1].node.handle_update_fulfill_htlc(node_c_id, updates.update_fulfill_htlcs.remove(0)); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut updates_2 = get_htlc_update_msgs(&nodes[1], &node_a_id); do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, false, false); @@ -549,7 +549,7 @@ fn do_htlc_fail_async_shutdown(blinded_recipient: bool) { .node .send_payment(our_payment_hash, onion, id, route_params, Retry::Attempts(0)) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let updates = get_htlc_update_msgs(&nodes[0], &node_b_id); assert_eq!(updates.update_add_htlcs.len(), 1); assert!(updates.update_fulfill_htlcs.is_empty()); @@ -564,7 +564,7 @@ fn do_htlc_fail_async_shutdown(blinded_recipient: bool) { nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &updates.commitment_signed); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.handle_shutdown(node_a_id, &node_0_shutdown); assert!(commitment_signed_dance_through_cp_raa(&nodes[1], &nodes[0], false, false).is_none()); expect_and_process_pending_htlcs(&nodes[1], false); @@ -718,7 +718,7 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[2].node.claim_funds(payment_preimage); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash, 100_000); let mut updates = get_htlc_update_msgs(&nodes[2], &node_b_id); @@ -729,7 +729,7 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { assert_eq!(updates.update_fulfill_htlcs.len(), 1); nodes[1].node.handle_update_fulfill_htlc(node_c_id, updates.update_fulfill_htlcs.remove(0)); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut updates_2 = get_htlc_update_msgs(&nodes[1], &node_a_id); do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, false, false); @@ -834,7 +834,7 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { // checks it, but in this case nodes[1] didn't ever get a chance to receive a // closing_signed so we do it ourselves check_closed_broadcast!(nodes[1], false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let reason = ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &node_b_id)) }; check_closed_event(&nodes[1], 1, reason, &[node_a_id], 100000); } @@ -920,7 +920,7 @@ fn test_upfront_shutdown_script() { nodes[0].node.close_channel(&chan.2, &node_b_id).unwrap(); let node_1_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, node_b_id); nodes[1].node.handle_shutdown(node_a_id, &node_1_shutdown); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match events[0] { @@ -935,7 +935,7 @@ fn test_upfront_shutdown_script() { *nodes[0].override_init_features.borrow_mut() = None; let chan = create_announced_chan_between_nodes_with_value(&nodes, 1, 0, 1000000, 1000000); nodes[1].node.close_channel(&chan.2, &node_a_id).unwrap(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, node_a_id); nodes[0].node.handle_shutdown(node_b_id, &node_0_shutdown); let events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -951,7 +951,7 @@ fn test_upfront_shutdown_script() { //// channel smoothly let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000); nodes[1].node.close_channel(&chan.2, &node_a_id).unwrap(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, node_a_id); nodes[0].node.handle_shutdown(node_b_id, &node_0_shutdown); let events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -1088,7 +1088,7 @@ fn test_segwit_v0_shutdown_script() { let chan = create_announced_chan_between_nodes(&nodes, 0, 1); nodes[1].node.close_channel(&chan.2, &node_a_id).unwrap(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Use a segwit v0 script supported even without option_shutdown_anysegwit let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, node_a_id); @@ -1127,7 +1127,7 @@ fn test_anysegwit_shutdown_script() { let chan = create_announced_chan_between_nodes(&nodes, 0, 1); nodes[1].node.close_channel(&chan.2, &node_a_id).unwrap(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Use a non-v0 segwit script supported by option_shutdown_anysegwit let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, node_a_id); @@ -1188,7 +1188,7 @@ fn test_unsupported_anysegwit_shutdown_script() { Ok(_) => panic!("Expected error"), } nodes[1].node.close_channel(&chan.2, &node_a_id).unwrap(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Use a non-v0 segwit script unsupported without option_shutdown_anysegwit let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, node_a_id); @@ -1217,7 +1217,7 @@ fn test_invalid_shutdown_script() { let chan = create_announced_chan_between_nodes(&nodes, 0, 1); nodes[1].node.close_channel(&chan.2, &node_a_id).unwrap(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Use a segwit v0 script with an unsupported witness program let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, node_a_id); @@ -1253,7 +1253,7 @@ fn test_user_shutdown_script() { .node .close_channel_with_feerate_and_script(&chan.2, &node_a_id, None, Some(shutdown_script)) .unwrap(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, node_a_id); @@ -1390,7 +1390,7 @@ fn do_test_closing_signed_reinit_timeout(timeout_step: TimeoutStep) { && txn[0].output[0].script_pubkey.is_p2wsh()) ); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let reason = ClosureReason::ProcessingError { err: "closing_signed negotiation failed to finish within two timer ticks".to_string(), }; @@ -1819,7 +1819,7 @@ fn test_force_closure_on_low_stale_fee() { // Finally, connect one more block and check the force-close happened. connect_blocks(&nodes[1], 1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_broadcast(&nodes[1], 1, true); let reason = ClosureReason::PeerFeerateTooLow { peer_feerate_sat_per_kw: 253, diff --git a/lightning/src/ln/update_fee_tests.rs b/lightning/src/ln/update_fee_tests.rs index 060496d3bee..67a07325ad6 100644 --- a/lightning/src/ln/update_fee_tests.rs +++ b/lightning/src/ln/update_fee_tests.rs @@ -1124,7 +1124,7 @@ pub fn do_cannot_afford_on_holding_cell_release( if let MessageSendEvent::SendRevokeAndACK { node_id, msg } = events.pop().unwrap() { assert_eq!(node_id, node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &msg); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); } else { panic!(); } diff --git a/lightning/src/ln/zero_fee_commitment_tests.rs b/lightning/src/ln/zero_fee_commitment_tests.rs index f94066789c1..2503ad81cde 100644 --- a/lightning/src/ln/zero_fee_commitment_tests.rs +++ b/lightning/src/ln/zero_fee_commitment_tests.rs @@ -158,7 +158,7 @@ fn test_htlc_claim_chunking() { for (preimage, payment_hash) in node_1_preimages { nodes[1].node.claim_funds(preimage); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash, NONDUST_HTLC_AMT_MSAT); } nodes[0].node.get_and_clear_pending_msg_events(); @@ -188,12 +188,12 @@ fn test_htlc_claim_chunking() { assert_eq!(htlc_claims[1].output.len(), 24); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[0], 1, reason, &[nodes[1].node.get_our_node_id()], CHAN_CAPACITY); assert!(nodes[0].node.list_channels().is_empty()); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[1], 1, reason, &[nodes[0].node.get_our_node_id()], CHAN_CAPACITY); assert!(nodes[1].node.list_channels().is_empty()); diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index 344e76d7e6d..e15209676e3 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -1516,13 +1516,13 @@ impl From for UpdateName { mod tests { use super::*; use crate::chain::ChannelMonitorUpdateStatus; + use crate::check_closed_broadcast; use crate::events::ClosureReason; use crate::ln::functional_test_utils::*; use crate::ln::msgs::BaseMessageHandler; use crate::sync::Arc; use crate::util::test_channel_signer::TestChannelSigner; use crate::util::test_utils::{self, TestStore}; - use crate::{check_added_monitors, check_closed_broadcast}; use bitcoin::hashes::hex::FromHex; use core::cmp; @@ -1738,7 +1738,7 @@ mod tests { ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, reason, &[node_id_1], 100000); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let node_txn = nodes[0].tx_broadcaster.txn_broadcast(); assert_eq!(node_txn.len(), 1); @@ -1750,7 +1750,7 @@ mod tests { let reason = ClosureReason::CommitmentTxConfirmed; let node_id_0 = nodes[0].node.get_our_node_id(); check_closed_event(&nodes[1], 1, reason, &[node_id_0], 100000); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Make sure everything is persisted as expected after close. // We always send at least two payments, and loop up to max_pending_updates_0 * 2. From 040ce2ab40e77b087bb2e2f30545c925d29cf58c Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Wed, 10 Dec 2025 14:38:44 +0100 Subject: [PATCH 019/242] Convert channelmanager handle_error macro to fn --- lightning/src/ln/channelmanager.rs | 244 +++++++++++++++-------------- 1 file changed, 124 insertions(+), 120 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 0411d519a9d..460994e8ec2 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3201,69 +3201,6 @@ pub struct PhantomRouteHints { pub real_node_pubkey: PublicKey, } -#[rustfmt::skip] -macro_rules! handle_error { - ($self: ident, $internal: expr, $counterparty_node_id: expr) => { { - // In testing, ensure there are no deadlocks where the lock is already held upon - // entering the macro. - debug_assert_ne!($self.pending_events.held_by_thread(), LockHeldState::HeldByThread); - debug_assert_ne!($self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread); - - match $internal { - Ok(msg) => Ok(msg), - Err(MsgHandleErrInternal { err, shutdown_finish, tx_abort, .. }) => { - let mut msg_event = None; - - if let Some((shutdown_res, update_option)) = shutdown_finish { - let counterparty_node_id = shutdown_res.counterparty_node_id; - let channel_id = shutdown_res.channel_id; - let logger = WithContext::from( - &$self.logger, Some(counterparty_node_id), Some(channel_id), None - ); - log_error!(logger, "Closing channel: {}", err.err); - - $self.finish_close_channel(shutdown_res); - if let Some((update, node_id_1, node_id_2)) = update_option { - let mut pending_broadcast_messages = $self.pending_broadcast_messages.lock().unwrap(); - pending_broadcast_messages.push(MessageSendEvent::BroadcastChannelUpdate { - msg: update, node_id_1, node_id_2 - }); - } - } else { - log_error!($self.logger, "Got non-closing error: {}", err.err); - } - - if let msgs::ErrorAction::IgnoreError = err.action { - if let Some(tx_abort) = tx_abort { - msg_event = Some(MessageSendEvent::SendTxAbort { - node_id: $counterparty_node_id, - msg: tx_abort, - }); - } - } else { - msg_event = Some(MessageSendEvent::HandleError { - node_id: $counterparty_node_id, - action: err.action.clone() - }); - } - - if let Some(msg_event) = msg_event { - let per_peer_state = $self.per_peer_state.read().unwrap(); - if let Some(peer_state_mutex) = per_peer_state.get(&$counterparty_node_id) { - let mut peer_state = peer_state_mutex.lock().unwrap(); - if peer_state.is_connected { - peer_state.pending_msg_events.push(msg_event); - } - } - } - - // Return error in case higher-API need one - Err(err) - }, - } - } }; -} - macro_rules! send_channel_ready { ($self: ident, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => {{ if $channel.context.is_connected() { @@ -3752,7 +3689,7 @@ where /// When a channel is removed, two things need to happen: /// (a) This must be called in the same `per_peer_state` lock as the channel-closing action, -/// (b) [`handle_error`] needs to be called without holding any locks (except +/// (b) [`ChannelManager::handle_error`] needs to be called without holding any locks (except /// [`ChannelManager::total_consistency_lock`]), which then calls /// [`ChannelManager::finish_close_channel`]. /// @@ -4031,6 +3968,74 @@ where } } + fn handle_error( + &self, internal: Result, counterparty_node_id: PublicKey, + ) -> Result { + // In testing, ensure there are no deadlocks where the lock is already held upon + // entering the macro. + debug_assert_ne!(self.pending_events.held_by_thread(), LockHeldState::HeldByThread); + debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread); + + match internal { + Ok(msg) => Ok(msg), + Err(MsgHandleErrInternal { err, shutdown_finish, tx_abort, .. }) => { + let mut msg_event = None; + + if let Some((shutdown_res, update_option)) = shutdown_finish { + let counterparty_node_id = shutdown_res.counterparty_node_id; + let channel_id = shutdown_res.channel_id; + let logger = WithContext::from( + &self.logger, + Some(counterparty_node_id), + Some(channel_id), + None, + ); + log_error!(logger, "Closing channel: {}", err.err); + + self.finish_close_channel(shutdown_res); + if let Some((update, node_id_1, node_id_2)) = update_option { + let mut pending_broadcast_messages = + self.pending_broadcast_messages.lock().unwrap(); + pending_broadcast_messages.push(MessageSendEvent::BroadcastChannelUpdate { + msg: update, + node_id_1, + node_id_2, + }); + } + } else { + log_error!(self.logger, "Got non-closing error: {}", err.err); + } + + if let msgs::ErrorAction::IgnoreError = err.action { + if let Some(tx_abort) = tx_abort { + msg_event = Some(MessageSendEvent::SendTxAbort { + node_id: counterparty_node_id, + msg: tx_abort, + }); + } + } else { + msg_event = Some(MessageSendEvent::HandleError { + node_id: counterparty_node_id, + action: err.action.clone(), + }); + } + + if let Some(msg_event) = msg_event { + let per_peer_state = self.per_peer_state.read().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { + let mut peer_state = peer_state_mutex.lock().unwrap(); + if peer_state.is_connected { + peer_state.pending_msg_events.push(msg_event); + } + } + } + + // Return error in case higher-API need one + Err(err) + }, + } + } + /// Gets the current [`UserConfig`] which controls some global behavior and includes the /// default configuration applied to all new channels. pub fn get_current_config(&self) -> UserConfig { @@ -4398,7 +4403,7 @@ where self.fail_htlc_backwards_internal(&source, &hash, &reason, receiver, None); } - let _ = handle_error!(self, shutdown_result, *counterparty_node_id); + let _ = self.handle_error(shutdown_result, *counterparty_node_id); Ok(()) } @@ -4509,7 +4514,7 @@ where /// When a channel is removed, two things need to happen: /// (a) [`convert_channel_err`] must be called in the same `per_peer_state` lock as the /// channel-closing action, - /// (b) [`handle_error`] needs to be called without holding any locks (except + /// (b) [`ChannelManager::handle_error`] needs to be called without holding any locks (except /// [`ChannelManager::total_consistency_lock`]), which then calls this. #[rustfmt::skip] fn finish_close_channel(&self, mut shutdown_res: ShutdownResult) { @@ -4610,7 +4615,7 @@ where } } for (err, counterparty_node_id) in shutdown_results.drain(..) { - let _ = handle_error!(self, err, counterparty_node_id); + let _ = self.handle_error(err, counterparty_node_id); } } @@ -4643,7 +4648,7 @@ where // error message. e.dont_send_error_message(); } - let _ = handle_error!(self, Err::<(), _>(e), *peer_node_id); + let _ = self.handle_error(Err::<(), _>(e), *peer_node_id); Ok(()) } else if peer_state.inbound_channel_request_by_id.remove(channel_id).is_some() { log_error!(logger, "Force-closing inbound channel request"); @@ -5380,7 +5385,7 @@ where } return Ok(()); }; - match handle_error!(self, err, path.hops.first().unwrap().pubkey) { + match self.handle_error(err, path.hops.first().unwrap().pubkey) { Ok(_) => unreachable!(), Err(e) => Err(APIError::ChannelUnavailable { err: e.err }), } @@ -6073,7 +6078,7 @@ where mem::drop(peer_state_lock); mem::drop(per_peer_state); - let _: Result<(), _> = handle_error!(self, Err(err), counterparty); + let _: Result<(), _> = self.handle_error(Err(err), counterparty); Err($api_err) } } } @@ -6420,7 +6425,7 @@ where } mem::drop(funding_batch_states); for (err, counterparty_node_id) in shutdown_results { - let _ = handle_error!(self, err, counterparty_node_id); + let _ = self.handle_error(err, counterparty_node_id); } } result @@ -8367,7 +8372,7 @@ where } for (err, counterparty_node_id) in handle_errors { - let _ = handle_error!(self, err, counterparty_node_id); + let _ = self.handle_error(err, counterparty_node_id); } #[cfg(feature = "std")] @@ -8877,7 +8882,7 @@ where // Now we can handle any errors which were generated. for (counterparty_node_id, err) in errs.drain(..) { let res: Result<(), _> = Err(err); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); } } @@ -10053,10 +10058,9 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ mem::drop(peer_state_lock); mem::drop(per_peer_state); // TODO(dunxen): Find/make less icky way to do this. - match handle_error!( - self, + match self.handle_error( Result::<(), MsgHandleErrInternal>::Err(err), - *counterparty_node_id + *counterparty_node_id, ) { Ok(_) => { unreachable!("`handle_error` only returns Err as we've passed in an Err") @@ -11129,7 +11133,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some((broadcast_tx, err)) = tx_err { log_info!(logger, "Broadcasting {}", log_tx!(broadcast_tx)); self.tx_broadcaster.broadcast_transactions(&[&broadcast_tx]); - let _ = handle_error!(self, err, *counterparty_node_id); + let _ = self.handle_error(err, *counterparty_node_id); } Ok(()) } @@ -12240,7 +12244,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } for (err, counterparty_node_id) in failed_channels { - let _ = handle_error!(self, err, counterparty_node_id); + let _ = self.handle_error(err, counterparty_node_id); } has_pending_monitor_events @@ -12450,7 +12454,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } drop(per_peer_state); for (err, counterparty_node_id) in shutdown_results { - let _ = handle_error!(self, err, counterparty_node_id); + let _ = self.handle_error(err, counterparty_node_id); } } @@ -12510,7 +12514,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } for (counterparty_node_id, err) in handle_errors { - let _ = handle_error!(self, err, counterparty_node_id); + let _ = self.handle_error(err, counterparty_node_id); } has_update @@ -13956,7 +13960,7 @@ where }; for (err, counterparty_node_id) in failed_channels.drain(..) { - let _ = handle_error!(self, err, counterparty_node_id); + let _ = self.handle_error(err, counterparty_node_id); } persist @@ -14665,7 +14669,7 @@ where } for (failure, counterparty_node_id) in failed_channels { - let _ = handle_error!(self, failure, counterparty_node_id); + let _ = self.handle_error(failure, counterparty_node_id); } for (source, payment_hash, reason, destination) in timed_out_htlcs.drain(..) { @@ -14781,7 +14785,7 @@ where }, _ => NotifyOption::SkipPersistHandleEvents, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } @@ -14789,7 +14793,7 @@ where #[rustfmt::skip] fn handle_open_channel_v2(&self, counterparty_node_id: PublicKey, msg: &msgs::OpenChannelV2) { if !self.init_features().supports_dual_fund() { - let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close( + let _: Result<(), _> = self.handle_error(Err(MsgHandleErrInternal::send_err_msg_no_close( "Dual-funded channels not supported".to_owned(), msg.common_fields.temporary_channel_id.clone())), counterparty_node_id); return; @@ -14806,7 +14810,7 @@ where }, _ => NotifyOption::SkipPersistHandleEvents, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } @@ -14817,7 +14821,7 @@ where // change to the contents. let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || { let res = self.internal_accept_channel(&counterparty_node_id, msg); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); NotifyOption::SkipPersistHandleEvents }); } @@ -14829,26 +14833,26 @@ where "Dual-funded channels not supported".to_owned(), msg.common_fields.temporary_channel_id.clone(), )); - let _: Result<(), _> = handle_error!(self, err, counterparty_node_id); + let _: Result<(), _> = self.handle_error(err, counterparty_node_id); } fn handle_funding_created(&self, counterparty_node_id: PublicKey, msg: &msgs::FundingCreated) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let res = self.internal_funding_created(&counterparty_node_id, msg); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); } fn handle_funding_signed(&self, counterparty_node_id: PublicKey, msg: &msgs::FundingSigned) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let res = self.internal_funding_signed(&counterparty_node_id, msg); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); } fn handle_peer_storage(&self, counterparty_node_id: PublicKey, msg: msgs::PeerStorage) { let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || NotifyOption::SkipPersistNoEvents); let res = self.internal_peer_storage(counterparty_node_id, msg); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); } fn handle_peer_storage_retrieval( @@ -14857,7 +14861,7 @@ where let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || NotifyOption::SkipPersistNoEvents); let res = self.internal_peer_storage_retrieval(counterparty_node_id, msg); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); } fn handle_channel_ready(&self, counterparty_node_id: PublicKey, msg: &msgs::ChannelReady) { @@ -14871,7 +14875,7 @@ where Err(e) if e.closes_channel() => NotifyOption::DoPersist, _ => NotifyOption::SkipPersistHandleEvents, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } @@ -14890,7 +14894,7 @@ where } }, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } @@ -14903,7 +14907,7 @@ where Err(_) => NotifyOption::SkipPersistHandleEvents, Ok(()) => NotifyOption::SkipPersistHandleEvents, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } @@ -14916,7 +14920,7 @@ where Err(_) => NotifyOption::SkipPersistHandleEvents, Ok(()) => NotifyOption::SkipPersistHandleEvents, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } @@ -14930,7 +14934,7 @@ where Err(_) => NotifyOption::SkipPersistHandleEvents, Ok(()) => NotifyOption::DoPersist, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } @@ -14938,27 +14942,27 @@ where fn handle_shutdown(&self, counterparty_node_id: PublicKey, msg: &msgs::Shutdown) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let res = self.internal_shutdown(&counterparty_node_id, msg); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); } fn handle_closing_signed(&self, counterparty_node_id: PublicKey, msg: &msgs::ClosingSigned) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let res = self.internal_closing_signed(&counterparty_node_id, msg); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); } #[cfg(simple_close)] fn handle_closing_complete(&self, counterparty_node_id: PublicKey, msg: msgs::ClosingComplete) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let res = self.internal_closing_complete(counterparty_node_id, msg); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); } #[cfg(simple_close)] fn handle_closing_sig(&self, counterparty_node_id: PublicKey, msg: msgs::ClosingSig) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let res = self.internal_closing_sig(counterparty_node_id, msg); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); } fn handle_update_add_htlc(&self, counterparty_node_id: PublicKey, msg: &msgs::UpdateAddHTLC) { @@ -14972,7 +14976,7 @@ where Err(_) => NotifyOption::SkipPersistHandleEvents, Ok(()) => NotifyOption::SkipPersistNoEvents, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } @@ -14982,7 +14986,7 @@ where ) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let res = self.internal_update_fulfill_htlc(&counterparty_node_id, msg); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); } fn handle_update_fail_htlc(&self, counterparty_node_id: PublicKey, msg: &msgs::UpdateFailHTLC) { @@ -14996,7 +15000,7 @@ where Err(_) => NotifyOption::SkipPersistHandleEvents, Ok(()) => NotifyOption::SkipPersistNoEvents, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } @@ -15014,7 +15018,7 @@ where Err(_) => NotifyOption::SkipPersistHandleEvents, Ok(()) => NotifyOption::SkipPersistNoEvents, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } @@ -15024,7 +15028,7 @@ where ) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let res = self.internal_commitment_signed(&counterparty_node_id, msg); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); } fn handle_commitment_signed_batch( @@ -15033,13 +15037,13 @@ where ) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let res = self.internal_commitment_signed_batch(&counterparty_node_id, channel_id, batch); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); } fn handle_revoke_and_ack(&self, counterparty_node_id: PublicKey, msg: &msgs::RevokeAndACK) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let res = self.internal_revoke_and_ack(&counterparty_node_id, msg); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); } fn handle_update_fee(&self, counterparty_node_id: PublicKey, msg: &msgs::UpdateFee) { @@ -15053,7 +15057,7 @@ where Err(_) => NotifyOption::SkipPersistHandleEvents, Ok(()) => NotifyOption::SkipPersistNoEvents, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } @@ -15063,13 +15067,13 @@ where ) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let res = self.internal_announcement_signatures(&counterparty_node_id, msg); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); } fn handle_channel_update(&self, counterparty_node_id: PublicKey, msg: &msgs::ChannelUpdate) { PersistenceNotifierGuard::optionally_notify(self, || { let res = self.internal_channel_update(&counterparty_node_id, msg); - if let Ok(persist) = handle_error!(self, res, counterparty_node_id) { + if let Ok(persist) = self.handle_error(res, counterparty_node_id) { persist } else { NotifyOption::DoPersist @@ -15082,7 +15086,7 @@ where ) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let res = self.internal_channel_reestablish(&counterparty_node_id, msg); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); } #[rustfmt::skip] @@ -15209,7 +15213,7 @@ where Err(_) => NotifyOption::DoPersist, Ok(persist) => *persist, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } @@ -15221,7 +15225,7 @@ where Err(_) => NotifyOption::DoPersist, Ok(persist) => *persist, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } @@ -15233,7 +15237,7 @@ where Err(_) => NotifyOption::DoPersist, Ok(persist) => *persist, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } @@ -15245,7 +15249,7 @@ where Err(_) => NotifyOption::DoPersist, Ok(persist) => *persist, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } @@ -15257,7 +15261,7 @@ where Err(_) => NotifyOption::DoPersist, Ok(persist) => *persist, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } @@ -15265,7 +15269,7 @@ where fn handle_tx_signatures(&self, counterparty_node_id: PublicKey, msg: &msgs::TxSignatures) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let res = self.internal_tx_signatures(&counterparty_node_id, msg); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); } fn handle_tx_init_rbf(&self, counterparty_node_id: PublicKey, msg: &msgs::TxInitRbf) { @@ -15273,7 +15277,7 @@ where "Dual-funded channels not supported".to_owned(), msg.channel_id.clone(), )); - let _: Result<(), _> = handle_error!(self, err, counterparty_node_id); + let _: Result<(), _> = self.handle_error(err, counterparty_node_id); } fn handle_tx_ack_rbf(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAckRbf) { @@ -15281,7 +15285,7 @@ where "Dual-funded channels not supported".to_owned(), msg.channel_id.clone(), )); - let _: Result<(), _> = handle_error!(self, err, counterparty_node_id); + let _: Result<(), _> = self.handle_error(err, counterparty_node_id); } fn handle_tx_abort(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAbort) { @@ -15295,7 +15299,7 @@ where Err(_) => NotifyOption::SkipPersistHandleEvents, Ok(persist) => *persist, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } From deac317fa9e202d24e3e6b5cdbf3f55766e6492b Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Wed, 10 Dec 2025 14:57:34 +0100 Subject: [PATCH 020/242] Simplify channelmanager handle_error via map_err --- lightning/src/ln/channelmanager.rs | 94 +++++++++++++++--------------- 1 file changed, 46 insertions(+), 48 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 460994e8ec2..222fcf8b92e 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3968,6 +3968,7 @@ where } } + /// Handles an error by closing the channel if required and generating peer messages. fn handle_error( &self, internal: Result, counterparty_node_id: PublicKey, ) -> Result { @@ -3976,64 +3977,61 @@ where debug_assert_ne!(self.pending_events.held_by_thread(), LockHeldState::HeldByThread); debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread); - match internal { - Ok(msg) => Ok(msg), - Err(MsgHandleErrInternal { err, shutdown_finish, tx_abort, .. }) => { - let mut msg_event = None; + internal.map_err(|err_internal| { + let mut msg_event = None; - if let Some((shutdown_res, update_option)) = shutdown_finish { - let counterparty_node_id = shutdown_res.counterparty_node_id; - let channel_id = shutdown_res.channel_id; - let logger = WithContext::from( - &self.logger, - Some(counterparty_node_id), - Some(channel_id), - None, - ); - log_error!(logger, "Closing channel: {}", err.err); - - self.finish_close_channel(shutdown_res); - if let Some((update, node_id_1, node_id_2)) = update_option { - let mut pending_broadcast_messages = - self.pending_broadcast_messages.lock().unwrap(); - pending_broadcast_messages.push(MessageSendEvent::BroadcastChannelUpdate { - msg: update, - node_id_1, - node_id_2, - }); - } - } else { - log_error!(self.logger, "Got non-closing error: {}", err.err); + if let Some((shutdown_res, update_option)) = err_internal.shutdown_finish { + let counterparty_node_id = shutdown_res.counterparty_node_id; + let channel_id = shutdown_res.channel_id; + let logger = WithContext::from( + &self.logger, + Some(counterparty_node_id), + Some(channel_id), + None, + ); + log_error!(logger, "Closing channel: {}", err_internal.err.err); + + self.finish_close_channel(shutdown_res); + if let Some((update, node_id_1, node_id_2)) = update_option { + let mut pending_broadcast_messages = + self.pending_broadcast_messages.lock().unwrap(); + pending_broadcast_messages.push(MessageSendEvent::BroadcastChannelUpdate { + msg: update, + node_id_1, + node_id_2, + }); } + } else { + log_error!(self.logger, "Got non-closing error: {}", err_internal.err.err); + } - if let msgs::ErrorAction::IgnoreError = err.action { - if let Some(tx_abort) = tx_abort { - msg_event = Some(MessageSendEvent::SendTxAbort { - node_id: counterparty_node_id, - msg: tx_abort, - }); - } - } else { - msg_event = Some(MessageSendEvent::HandleError { + if let msgs::ErrorAction::IgnoreError = err_internal.err.action { + if let Some(tx_abort) = err_internal.tx_abort { + msg_event = Some(MessageSendEvent::SendTxAbort { node_id: counterparty_node_id, - action: err.action.clone(), + msg: tx_abort, }); } + } else { + msg_event = Some(MessageSendEvent::HandleError { + node_id: counterparty_node_id, + action: err_internal.err.action.clone(), + }); + } - if let Some(msg_event) = msg_event { - let per_peer_state = self.per_peer_state.read().unwrap(); - if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { - let mut peer_state = peer_state_mutex.lock().unwrap(); - if peer_state.is_connected { - peer_state.pending_msg_events.push(msg_event); - } + if let Some(msg_event) = msg_event { + let per_peer_state = self.per_peer_state.read().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { + let mut peer_state = peer_state_mutex.lock().unwrap(); + if peer_state.is_connected { + peer_state.pending_msg_events.push(msg_event); } } + } - // Return error in case higher-API need one - Err(err) - }, - } + // Return error in case higher-API need one + err_internal.err + }) } /// Gets the current [`UserConfig`] which controls some global behavior and includes the From 4f055aca878ae990280d5b467d0e1faac5691aa0 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Thu, 13 Nov 2025 14:20:46 -0500 Subject: [PATCH 021/242] Store inbound committed update_adds in Channel We have an overarching goal of (mostly) getting rid of ChannelManager persistence and rebuilding the ChannelManager's state from existing ChannelMonitors, due to issues when the two structs are out-of-sync on restart. The main issue that can arise is channel force closure. As part of this, we plan to store at least parts of Channels in ChannelMonitors, and that Channel data will be used in rebuilding the manager. Once we store update_adds in Channels, we can use them on restart when reconstructing ChannelManager maps such as forward_htlcs and pending_intercepted_htlcs. Upcoming commits will start doing this reconstruction. --- lightning/src/ln/channel.rs | 92 ++++++++++++++++++++++++------------- 1 file changed, 61 insertions(+), 31 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 5b4ac4c0aa5..ed6f6cef77f 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -211,7 +211,14 @@ enum InboundHTLCState { /// channel (before it can then get forwarded and/or removed). /// Implies AwaitingRemoteRevoke. AwaitingAnnouncedRemoteRevoke(InboundHTLCResolution), - Committed, + /// An HTLC irrevocably committed in the latest commitment transaction, ready to be forwarded or + /// removed. + Committed { + /// Used to rebuild `ChannelManager` HTLC state on restart. Previously the manager would track + /// and persist all HTLC forwards and receives itself, but newer LDK versions avoid relying on + /// its persistence and instead reconstruct state based on `Channel` and `ChannelMonitor` data. + update_add_htlc_opt: Option, + }, /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we /// created it we would have put it in the holding cell instead). When they next revoke_and_ack /// we'll drop it. @@ -235,7 +242,7 @@ impl From<&InboundHTLCState> for Option { InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => { Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToAdd) }, - InboundHTLCState::Committed => Some(InboundHTLCStateDetails::Committed), + InboundHTLCState::Committed { .. } => Some(InboundHTLCStateDetails::Committed), InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(_)) => { Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail) }, @@ -256,7 +263,7 @@ impl fmt::Display for InboundHTLCState { InboundHTLCState::RemoteAnnounced(_) => write!(f, "RemoteAnnounced"), InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => write!(f, "AwaitingRemoteRevokeToAnnounce"), InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => write!(f, "AwaitingAnnouncedRemoteRevoke"), - InboundHTLCState::Committed => write!(f, "Committed"), + InboundHTLCState::Committed { .. } => write!(f, "Committed"), InboundHTLCState::LocalRemoved(_) => write!(f, "LocalRemoved"), } } @@ -268,7 +275,7 @@ impl InboundHTLCState { InboundHTLCState::RemoteAnnounced(_) => !generated_by_local, InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => !generated_by_local, InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => true, - InboundHTLCState::Committed => true, + InboundHTLCState::Committed { .. } => true, InboundHTLCState::LocalRemoved(_) => !generated_by_local, } } @@ -296,7 +303,7 @@ impl InboundHTLCState { }, InboundHTLCResolution::Resolved { .. } => false, }, - InboundHTLCState::Committed | InboundHTLCState::LocalRemoved(_) => false, + InboundHTLCState::Committed { .. } | InboundHTLCState::LocalRemoved(_) => false, } } } @@ -4102,7 +4109,7 @@ where if self.pending_inbound_htlcs.iter() .any(|htlc| match htlc.state { - InboundHTLCState::Committed => false, + InboundHTLCState::Committed { .. } => false, // An HTLC removal from the local node is pending on the remote commitment. InboundHTLCState::LocalRemoved(_) => true, // An HTLC add from the remote node is pending on the local commitment. @@ -4531,7 +4538,7 @@ where (InboundHTLCState::RemoteAnnounced(..), _) => true, (InboundHTLCState::AwaitingRemoteRevokeToAnnounce(..), _) => true, (InboundHTLCState::AwaitingAnnouncedRemoteRevoke(..), _) => true, - (InboundHTLCState::Committed, _) => true, + (InboundHTLCState::Committed { .. }, _) => true, (InboundHTLCState::LocalRemoved(..), true) => true, (InboundHTLCState::LocalRemoved(..), false) => false, }) @@ -7320,7 +7327,7 @@ where payment_preimage_arg ); match htlc.state { - InboundHTLCState::Committed => {}, + InboundHTLCState::Committed { .. } => {}, InboundHTLCState::LocalRemoved(ref reason) => { if let &InboundHTLCRemovalReason::Fulfill { .. } = reason { } else { @@ -7413,7 +7420,7 @@ where { let htlc = &mut self.context.pending_inbound_htlcs[pending_idx]; - if let InboundHTLCState::Committed = htlc.state { + if let InboundHTLCState::Committed { .. } = htlc.state { } else { debug_assert!( false, @@ -7548,7 +7555,7 @@ where for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() { if htlc.htlc_id == htlc_id_arg { match htlc.state { - InboundHTLCState::Committed => {}, + InboundHTLCState::Committed { .. } => {}, InboundHTLCState::LocalRemoved(_) => { return Err(ChannelError::Ignore(format!("HTLC {} was already resolved", htlc.htlc_id))); }, @@ -8716,7 +8723,7 @@ where false }; if swap { - let mut state = InboundHTLCState::Committed; + let mut state = InboundHTLCState::Committed { update_add_htlc_opt: None }; mem::swap(&mut state, &mut htlc.state); if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(resolution) = state { @@ -8755,14 +8762,21 @@ where PendingHTLCStatus::Forward(forward_info) => { log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed, attempting to forward", &htlc.payment_hash); to_forward_infos.push((forward_info, htlc.htlc_id)); - htlc.state = InboundHTLCState::Committed; + htlc.state = InboundHTLCState::Committed { + // HTLCs will only be in state `InboundHTLCResolution::Resolved` if they were + // received on an old pre-0.0.123 version of LDK. In this case, the HTLC is + // required to be resolved prior to upgrading to 0.1+ per CHANGELOG.md. + update_add_htlc_opt: None, + }; }, } }, InboundHTLCResolution::Pending { update_add_htlc } => { log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash); - pending_update_adds.push(update_add_htlc); - htlc.state = InboundHTLCState::Committed; + pending_update_adds.push(update_add_htlc.clone()); + htlc.state = InboundHTLCState::Committed { + update_add_htlc_opt: Some(update_add_htlc), + }; }, } } @@ -9297,7 +9311,7 @@ where // in response to it yet, so don't touch it. true }, - InboundHTLCState::Committed => true, + InboundHTLCState::Committed { .. } => true, InboundHTLCState::LocalRemoved(_) => { // We (hopefully) sent a commitment_signed updating this HTLC (which we can // re-transmit if needed) and they may have even sent a revoke_and_ack back @@ -14518,6 +14532,7 @@ where } } let mut removed_htlc_attribution_data: Vec<&Option> = Vec::new(); + let mut inbound_committed_update_adds: Vec> = Vec::new(); (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?; for htlc in self.context.pending_inbound_htlcs.iter() { if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state { @@ -14537,8 +14552,9 @@ where 2u8.write(writer)?; htlc_resolution.write(writer)?; }, - &InboundHTLCState::Committed => { + &InboundHTLCState::Committed { ref update_add_htlc_opt } => { 3u8.write(writer)?; + inbound_committed_update_adds.push(update_add_htlc_opt.clone()); }, &InboundHTLCState::LocalRemoved(ref removal_reason) => { 4u8.write(writer)?; @@ -14914,6 +14930,7 @@ where (69, holding_cell_held_htlc_flags, optional_vec), // Added in 0.2 (71, holder_commitment_point_previous_revoked, option), // Added in 0.3 (73, holder_commitment_point_last_revoked, option), // Added in 0.3 + (75, inbound_committed_update_adds, optional_vec), }); Ok(()) @@ -14997,7 +15014,7 @@ where }; InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution) }, - 3 => InboundHTLCState::Committed, + 3 => InboundHTLCState::Committed { update_add_htlc_opt: None }, 4 => { let reason = match ::read(reader)? { 0 => InboundHTLCRemovalReason::FailRelay(msgs::OnionErrorPacket { @@ -15301,6 +15318,7 @@ where let mut pending_outbound_held_htlc_flags_opt: Option>> = None; let mut holding_cell_held_htlc_flags_opt: Option>> = None; + let mut inbound_committed_update_adds_opt: Option>> = None; read_tlv_fields!(reader, { (0, announcement_sigs, option), @@ -15350,6 +15368,7 @@ where (69, holding_cell_held_htlc_flags_opt, optional_vec), // Added in 0.2 (71, holder_commitment_point_previous_revoked_opt, option), // Added in 0.3 (73, holder_commitment_point_last_revoked_opt, option), // Added in 0.3 + (75, inbound_committed_update_adds_opt, optional_vec), }); let holder_signer = signer_provider.derive_channel_signer(channel_keys_id); @@ -15473,6 +15492,17 @@ where return Err(DecodeError::InvalidValue); } } + if let Some(update_adds) = inbound_committed_update_adds_opt { + let mut iter = update_adds.into_iter(); + for htlc in pending_inbound_htlcs.iter_mut() { + if let InboundHTLCState::Committed { ref mut update_add_htlc_opt } = htlc.state { + *update_add_htlc_opt = iter.next().ok_or(DecodeError::InvalidValue)?; + } + } + if iter.next().is_some() { + return Err(DecodeError::InvalidValue); + } + } if let Some(attribution_data_list) = removed_htlc_attribution_data { let mut removed_htlcs = pending_inbound_htlcs.iter_mut().filter_map(|status| { @@ -16057,7 +16087,7 @@ mod tests { amount_msat: htlc_amount_msat, payment_hash: PaymentHash(Sha256::hash(&[42; 32]).to_byte_array()), cltv_expiry: 300000000, - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc_opt: None }, }); node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput { @@ -16903,7 +16933,7 @@ mod tests { amount_msat: 1000000, cltv_expiry: 500, payment_hash: PaymentHash::from(payment_preimage_0), - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc_opt: None }, }); let payment_preimage_1 = @@ -16913,7 +16943,7 @@ mod tests { amount_msat: 2000000, cltv_expiry: 501, payment_hash: PaymentHash::from(payment_preimage_1), - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc_opt: None }, }); let payment_preimage_2 = @@ -16953,7 +16983,7 @@ mod tests { amount_msat: 4000000, cltv_expiry: 504, payment_hash: PaymentHash::from(payment_preimage_4), - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc_opt: None }, }); // commitment tx with all five HTLCs untrimmed (minimum feerate) @@ -17342,7 +17372,7 @@ mod tests { amount_msat: 2000000, cltv_expiry: 501, payment_hash: PaymentHash::from(payment_preimage_1), - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc_opt: None }, }); chan.context.pending_outbound_htlcs.clear(); @@ -17593,7 +17623,7 @@ mod tests { amount_msat: 5000000, cltv_expiry: 920150, payment_hash: PaymentHash::from(htlc_in_preimage), - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc_opt: None }, })); chan.context.pending_outbound_htlcs.extend( @@ -17656,7 +17686,7 @@ mod tests { amount_msat, cltv_expiry: 920150, payment_hash: PaymentHash::from(htlc_in_preimage), - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc_opt: None }, }, )); @@ -17722,7 +17752,7 @@ mod tests { amount_msat: 100000, cltv_expiry: 920125, payment_hash: htlc_0_in_hash, - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc_opt: None }, }); let htlc_1_in_preimage = @@ -17740,7 +17770,7 @@ mod tests { amount_msat: 49900000, cltv_expiry: 920125, payment_hash: htlc_1_in_hash, - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc_opt: None }, }); chan.context.pending_outbound_htlcs.extend( @@ -17792,7 +17822,7 @@ mod tests { amount_msat: 30000, payment_hash, cltv_expiry: 920125, - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc_opt: None }, }, )); @@ -17833,7 +17863,7 @@ mod tests { amount_msat: 29525, payment_hash, cltv_expiry: 920125, - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc_opt: None }, }, )); @@ -17870,7 +17900,7 @@ mod tests { amount_msat: 29525, payment_hash, cltv_expiry: 920125, - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc_opt: None }, }, )); @@ -17907,7 +17937,7 @@ mod tests { amount_msat: 29753, payment_hash, cltv_expiry: 920125, - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc_opt: None }, }, )); @@ -17959,7 +17989,7 @@ mod tests { amount_msat, cltv_expiry, payment_hash, - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc_opt: None }, }), ); From c27093ded5773473946bd21af303ee638d5b8c9c Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Mon, 1 Dec 2025 16:11:45 -0800 Subject: [PATCH 022/242] Extract util for HTLCIntercepted event creation We have an overarching goal of (mostly) getting rid of ChannelManager persistence and rebuilding the ChannelManager's state from existing ChannelMonitors, due to issues when the two structs are out-of-sync on restart. The main issue that can arise is channel force closure. As part of rebuilding ChannelManager forward HTLCs maps, we will also add a fix that will regenerate HTLCIntercepted events for HTLC intercepts that are present but have no corresponding event in the queue. That fix will use this new method. --- lightning/src/ln/channelmanager.rs | 44 +++++++++++++++++++----------- 1 file changed, 28 insertions(+), 16 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 72585d69f80..aa7871051e6 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3911,6 +3911,25 @@ macro_rules! process_events_body { } } +/// Creates an [`Event::HTLCIntercepted`] from a [`PendingAddHTLCInfo`]. We generate this event in a +/// few places so this DRYs the code. +fn create_htlc_intercepted_event( + intercept_id: InterceptId, pending_add: &PendingAddHTLCInfo, +) -> Result { + let inbound_amount_msat = pending_add.forward_info.incoming_amt_msat.ok_or(())?; + let requested_next_hop_scid = match pending_add.forward_info.routing { + PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id, + _ => return Err(()), + }; + Ok(Event::HTLCIntercepted { + requested_next_hop_scid, + payment_hash: pending_add.forward_info.payment_hash, + inbound_amount_msat, + expected_outbound_amount_msat: pending_add.forward_info.outgoing_amt_msat, + intercept_id, + }) +} + impl< M: Deref, T: Deref, @@ -11486,22 +11505,15 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let mut pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap(); match pending_intercepts.entry(intercept_id) { hash_map::Entry::Vacant(entry) => { - new_intercept_events.push_back(( - events::Event::HTLCIntercepted { - requested_next_hop_scid: scid, - payment_hash, - inbound_amount_msat: pending_add - .forward_info - .incoming_amt_msat - .unwrap(), - expected_outbound_amount_msat: pending_add - .forward_info - .outgoing_amt_msat, - intercept_id, - }, - None, - )); - entry.insert(pending_add); + if let Ok(intercept_ev) = + create_htlc_intercepted_event(intercept_id, &pending_add) + { + new_intercept_events.push_back((intercept_ev, None)); + entry.insert(pending_add); + } else { + debug_assert!(false); + fail_intercepted_htlc(pending_add); + } }, hash_map::Entry::Occupied(_) => { log_info!( From 26992e1dc88325f49f39cc55fd0985c515ab5367 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Mon, 17 Nov 2025 17:27:00 -0500 Subject: [PATCH 023/242] Extract method to dedup pre-decode update_add We have an overarching goal of (mostly) getting rid of ChannelManager persistence and rebuilding the ChannelManager's state from existing ChannelMonitors, due to issues when the two structs are out-of-sync on restart. The main issue that can arise is channel force closure. We'll use this new util when reconstructing the ChannelManager::decode_update_add_htlcs map from Channel data in upcoming commits. While the Channel data is not included in the monitors yet, it will be in future work. --- lightning/src/ln/channelmanager.rs | 50 ++++++++++++++++++++++-------- 1 file changed, 37 insertions(+), 13 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index aa7871051e6..12cfe594891 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -16819,6 +16819,38 @@ where } } +// If the HTLC corresponding to `prev_hop_data` is present in `decode_update_add_htlcs`, remove it +// from the map as it is already being stored and processed elsewhere. +fn dedup_decode_update_add_htlcs( + decode_update_add_htlcs: &mut HashMap>, + prev_hop_data: &HTLCPreviousHopData, removal_reason: &'static str, logger: &L, +) where + L::Target: Logger, +{ + decode_update_add_htlcs.retain(|src_outb_alias, update_add_htlcs| { + update_add_htlcs.retain(|update_add| { + let matches = *src_outb_alias == prev_hop_data.prev_outbound_scid_alias + && update_add.htlc_id == prev_hop_data.htlc_id; + if matches { + let logger = WithContext::from( + logger, + prev_hop_data.counterparty_node_id, + Some(update_add.channel_id), + Some(update_add.payment_hash), + ); + log_info!( + logger, + "Removing pending to-decode HTLC with id {}: {}", + update_add.htlc_id, + removal_reason + ); + } + !matches + }); + !update_add_htlcs.is_empty() + }); +} + // Implement ReadableArgs for an Arc'd ChannelManager to make it a bit easier to work with the // SipmleArcChannelManager type: impl< @@ -17686,19 +17718,11 @@ where // still have an entry for this HTLC in `forward_htlcs` or // `pending_intercepted_htlcs`, we were apparently not persisted after // the monitor was when forwarding the payment. - decode_update_add_htlcs.retain( - |src_outb_alias, update_add_htlcs| { - update_add_htlcs.retain(|update_add_htlc| { - let matches = *src_outb_alias - == prev_hop_data.prev_outbound_scid_alias - && update_add_htlc.htlc_id == prev_hop_data.htlc_id; - if matches { - log_info!(logger, "Removing pending to-decode HTLC as it was forwarded to the closed channel"); - } - !matches - }); - !update_add_htlcs.is_empty() - }, + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + &prev_hop_data, + "HTLC was forwarded to the closed channel", + &args.logger, ); forward_htlcs.retain(|_, forwards| { forwards.retain(|forward| { From 005da38e494e5ca8284e72b3916992f8b119a53e Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Thu, 20 Nov 2025 12:32:40 -0500 Subject: [PATCH 024/242] Rename manager HTLC forward maps to _legacy We have an overarching goal of (mostly) getting rid of ChannelManager persistence and rebuilding the ChannelManager's state from existing ChannelMonitors, due to issues when the two structs are out-of-sync on restart. The main issue that can arise is channel force closure. Soon we'll be reconstructing these now-legacy maps from Channel data (that will also be included in ChannelMonitors in future work), so rename them as part of moving towards not needing to persist them in ChannelManager. --- lightning/src/ln/channelmanager.rs | 38 +++++++++++++++++++----------- 1 file changed, 24 insertions(+), 14 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 12cfe594891..a48eaa46c72 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -17200,7 +17200,11 @@ where const MAX_ALLOC_SIZE: usize = 1024 * 64; let forward_htlcs_count: u64 = Readable::read(reader)?; - let mut forward_htlcs = hash_map_with_capacity(cmp::min(forward_htlcs_count as usize, 128)); + // This map is read but may no longer be used because we'll attempt to rebuild the set of HTLC + // forwards from the `Channel{Monitor}`s instead, as a step towards removing the requirement of + // regularly persisting the `ChannelManager`. + let mut forward_htlcs_legacy: HashMap> = + hash_map_with_capacity(cmp::min(forward_htlcs_count as usize, 128)); for _ in 0..forward_htlcs_count { let short_channel_id = Readable::read(reader)?; let pending_forwards_count: u64 = Readable::read(reader)?; @@ -17211,7 +17215,7 @@ where for _ in 0..pending_forwards_count { pending_forwards.push(Readable::read(reader)?); } - forward_htlcs.insert(short_channel_id, pending_forwards); + forward_htlcs_legacy.insert(short_channel_id, pending_forwards); } let claimable_htlcs_count: u64 = Readable::read(reader)?; @@ -17299,12 +17303,18 @@ where }; } + // Some maps are read but may no longer be used because we attempt to rebuild the pending HTLC + // set from the `Channel{Monitor}`s instead, as a step towards removing the requirement of + // regularly persisting the `ChannelManager`. + let mut pending_intercepted_htlcs_legacy: Option> = + Some(new_hash_map()); + let mut decode_update_add_htlcs_legacy: Option>> = + None; + // pending_outbound_payments_no_retry is for compatibility with 0.0.101 clients. let mut pending_outbound_payments_no_retry: Option>> = None; let mut pending_outbound_payments = None; - let mut pending_intercepted_htlcs: Option> = - Some(new_hash_map()); let mut received_network_pubkey: Option = None; let mut fake_scid_rand_bytes: Option<[u8; 32]> = None; let mut probing_cookie_secret: Option<[u8; 32]> = None; @@ -17322,13 +17332,12 @@ where let mut in_flight_monitor_updates: Option< HashMap<(PublicKey, ChannelId), Vec>, > = None; - let mut decode_update_add_htlcs: Option>> = None; let mut inbound_payment_id_secret = None; let mut peer_storage_dir: Option)>> = None; let mut async_receive_offer_cache: AsyncReceiveOfferCache = AsyncReceiveOfferCache::new(); read_tlv_fields!(reader, { (1, pending_outbound_payments_no_retry, option), - (2, pending_intercepted_htlcs, option), + (2, pending_intercepted_htlcs_legacy, option), (3, pending_outbound_payments, option), (4, pending_claiming_payments, option), (5, received_network_pubkey, option), @@ -17339,13 +17348,14 @@ where (10, legacy_in_flight_monitor_updates, option), (11, probing_cookie_secret, option), (13, claimable_htlc_onion_fields, optional_vec), - (14, decode_update_add_htlcs, option), + (14, decode_update_add_htlcs_legacy, option), (15, inbound_payment_id_secret, option), (17, in_flight_monitor_updates, option), (19, peer_storage_dir, optional_vec), (21, async_receive_offer_cache, (default_value, async_receive_offer_cache)), }); - let mut decode_update_add_htlcs = decode_update_add_htlcs.unwrap_or_else(|| new_hash_map()); + let mut decode_update_add_htlcs_legacy = + decode_update_add_htlcs_legacy.unwrap_or_else(|| new_hash_map()); let peer_storage_dir: Vec<(PublicKey, Vec)> = peer_storage_dir.unwrap_or_else(Vec::new); if fake_scid_rand_bytes.is_none() { fake_scid_rand_bytes = Some(args.entropy_source.get_secure_random_bytes()); @@ -17719,12 +17729,12 @@ where // `pending_intercepted_htlcs`, we were apparently not persisted after // the monitor was when forwarding the payment. dedup_decode_update_add_htlcs( - &mut decode_update_add_htlcs, + &mut decode_update_add_htlcs_legacy, &prev_hop_data, "HTLC was forwarded to the closed channel", &args.logger, ); - forward_htlcs.retain(|_, forwards| { + forward_htlcs_legacy.retain(|_, forwards| { forwards.retain(|forward| { if let HTLCForwardInfo::AddHTLC(htlc_info) = forward { if pending_forward_matches_htlc(&htlc_info) { @@ -17736,7 +17746,7 @@ where }); !forwards.is_empty() }); - pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| { + pending_intercepted_htlcs_legacy.as_mut().unwrap().retain(|intercepted_id, htlc_info| { if pending_forward_matches_htlc(&htlc_info) { log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}", &htlc.payment_hash, &monitor.channel_id()); @@ -18234,10 +18244,10 @@ where inbound_payment_key: expanded_inbound_key, pending_outbound_payments: pending_outbounds, - pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs.unwrap()), + pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs_legacy.unwrap()), - forward_htlcs: Mutex::new(forward_htlcs), - decode_update_add_htlcs: Mutex::new(decode_update_add_htlcs), + forward_htlcs: Mutex::new(forward_htlcs_legacy), + decode_update_add_htlcs: Mutex::new(decode_update_add_htlcs_legacy), claimable_payments: Mutex::new(ClaimablePayments { claimable_payments, pending_claiming_payments: pending_claiming_payments.unwrap(), From 7c4d0214d475c5ff6b12985880b3dc08c5add3bf Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Thu, 20 Nov 2025 18:24:32 -0500 Subject: [PATCH 025/242] Tweak pending_htlc_intercepts ser on manager read Makes an upcoming commit cleaner --- lightning/src/ln/channelmanager.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index a48eaa46c72..a854bb7b5d6 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -17307,7 +17307,7 @@ where // set from the `Channel{Monitor}`s instead, as a step towards removing the requirement of // regularly persisting the `ChannelManager`. let mut pending_intercepted_htlcs_legacy: Option> = - Some(new_hash_map()); + None; let mut decode_update_add_htlcs_legacy: Option>> = None; @@ -17356,6 +17356,8 @@ where }); let mut decode_update_add_htlcs_legacy = decode_update_add_htlcs_legacy.unwrap_or_else(|| new_hash_map()); + let mut pending_intercepted_htlcs_legacy = + pending_intercepted_htlcs_legacy.unwrap_or_else(|| new_hash_map()); let peer_storage_dir: Vec<(PublicKey, Vec)> = peer_storage_dir.unwrap_or_else(Vec::new); if fake_scid_rand_bytes.is_none() { fake_scid_rand_bytes = Some(args.entropy_source.get_secure_random_bytes()); @@ -17746,7 +17748,7 @@ where }); !forwards.is_empty() }); - pending_intercepted_htlcs_legacy.as_mut().unwrap().retain(|intercepted_id, htlc_info| { + pending_intercepted_htlcs_legacy.retain(|intercepted_id, htlc_info| { if pending_forward_matches_htlc(&htlc_info) { log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}", &htlc.payment_hash, &monitor.channel_id()); @@ -18244,7 +18246,7 @@ where inbound_payment_key: expanded_inbound_key, pending_outbound_payments: pending_outbounds, - pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs_legacy.unwrap()), + pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs_legacy), forward_htlcs: Mutex::new(forward_htlcs_legacy), decode_update_add_htlcs: Mutex::new(decode_update_add_htlcs_legacy), From 64de98919068d3ec9691d576e85ad80e3c7135da Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Thu, 20 Nov 2025 18:35:57 -0500 Subject: [PATCH 026/242] Gather to-decode HTLC fwds from channels on manager read We have an overarching goal of (mostly) getting rid of ChannelManager persistence and rebuilding the ChannelManager's state from existing ChannelMonitors, due to issues when the two structs are out-of-sync on restart. The main issue that can arise is channel force closure. Here we start this process by rebuilding ChannelManager::decode_update_add_htlcs from the Channels, which will soon be included in the ChannelMonitors as part of a different series of PRs. The newly built map is not yet used but will be in the next commit. --- lightning/src/ln/channel.rs | 14 ++++++++ lightning/src/ln/channelmanager.rs | 53 ++++++++++++++++++++++++++++-- 2 files changed, 64 insertions(+), 3 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index ed6f6cef77f..cb455400b5b 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -7778,6 +7778,20 @@ where Ok(()) } + /// Useful for reconstructing the set of pending HTLCs when deserializing the `ChannelManager`. + pub(super) fn get_inbound_committed_update_adds(&self) -> Vec { + self.context + .pending_inbound_htlcs + .iter() + .filter_map(|htlc| match htlc.state { + InboundHTLCState::Committed { ref update_add_htlc_opt } => { + update_add_htlc_opt.clone() + }, + _ => None, + }) + .collect() + } + /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed #[inline] fn mark_outbound_htlc_removed( diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index a854bb7b5d6..080ecef2c1f 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -17358,6 +17358,7 @@ where decode_update_add_htlcs_legacy.unwrap_or_else(|| new_hash_map()); let mut pending_intercepted_htlcs_legacy = pending_intercepted_htlcs_legacy.unwrap_or_else(|| new_hash_map()); + let mut decode_update_add_htlcs = new_hash_map(); let peer_storage_dir: Vec<(PublicKey, Vec)> = peer_storage_dir.unwrap_or_else(Vec::new); if fake_scid_rand_bytes.is_none() { fake_scid_rand_bytes = Some(args.entropy_source.get_secure_random_bytes()); @@ -17669,6 +17670,21 @@ where let mut peer_state_lock = peer_state_mtx.lock().unwrap(); let peer_state = &mut *peer_state_lock; is_channel_closed = !peer_state.channel_by_id.contains_key(channel_id); + if let Some(chan) = peer_state.channel_by_id.get(channel_id) { + if let Some(funded_chan) = chan.as_funded() { + let inbound_committed_update_adds = + funded_chan.get_inbound_committed_update_adds(); + if !inbound_committed_update_adds.is_empty() { + // Reconstruct `ChannelManager::decode_update_add_htlcs` from the serialized + // `Channel`, as part of removing the requirement to regularly persist the + // `ChannelManager`. + decode_update_add_htlcs.insert( + funded_chan.context.outbound_scid_alias(), + inbound_committed_update_adds, + ); + } + } + } } if is_channel_closed { @@ -17727,9 +17743,15 @@ where }; // The ChannelMonitor is now responsible for this HTLC's // failure/success and will let us know what its outcome is. If we - // still have an entry for this HTLC in `forward_htlcs` or - // `pending_intercepted_htlcs`, we were apparently not persisted after - // the monitor was when forwarding the payment. + // still have an entry for this HTLC in `forward_htlcs`, + // `pending_intercepted_htlcs`, or `decode_update_add_htlcs`, we were apparently not + // persisted after the monitor was when forwarding the payment. + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + &prev_hop_data, + "HTLC was forwarded to the closed channel", + &args.logger, + ); dedup_decode_update_add_htlcs( &mut decode_update_add_htlcs_legacy, &prev_hop_data, @@ -18220,6 +18242,31 @@ where } } + // De-duplicate HTLCs that are present in both `failed_htlcs` and `decode_update_add_htlcs`. + // Omitting this de-duplication could lead to redundant HTLC processing and/or bugs. + for (src, _, _, _, _, _) in failed_htlcs.iter() { + if let HTLCSource::PreviousHopData(prev_hop_data) = src { + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + prev_hop_data, + "HTLC was failed backwards during manager read", + &args.logger, + ); + } + } + + // See above comment on `failed_htlcs`. + for htlcs in claimable_payments.values().map(|pmt| &pmt.htlcs) { + for prev_hop_data in htlcs.iter().map(|h| &h.prev_hop) { + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + prev_hop_data, + "HTLC was already decoded and marked as a claimable payment", + &args.logger, + ); + } + } + let best_block = BestBlock::new(best_block_hash, best_block_height); let flow = OffersMessageFlow::new( chain_hash, From cb398f6b761edde6b45fcda93a01c564cb49a13c Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Thu, 20 Nov 2025 18:39:39 -0500 Subject: [PATCH 027/242] Rebuild manager forwarded htlcs maps from Channels We have an overarching goal of (mostly) getting rid of ChannelManager persistence and rebuilding the ChannelManager's state from existing ChannelMonitors, due to issues when the two structs are out-of-sync on restart. The main issue that can arise is channel force closure. Here we start this process by rebuilding ChannelManager::decode_update_add_htlcs, forward_htlcs, and pending_intercepted_htlcs from Channel data, which will soon be included in the ChannelMonitors as part of a different series of PRs. We also fix the reload_node test util to use the node's pre-reload config after restart. The previous behavior was a bit surprising and led to one of this commit's tests failing. --- lightning/src/ln/channelmanager.rs | 72 ++++++++++++++++++- lightning/src/ln/functional_test_utils.rs | 3 +- lightning/src/ln/reload_tests.rs | 87 ++++++++++++++++++++++- 3 files changed, 159 insertions(+), 3 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 080ecef2c1f..e2a3db8783a 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -18267,6 +18267,76 @@ where } } + // Remove HTLCs from `forward_htlcs` if they are also present in `decode_update_add_htlcs`. + // + // In the future, the full set of pending HTLCs will be pulled from `Channel{Monitor}` data and + // placed in `ChannelManager::decode_update_add_htlcs` on read, to be handled on the next call + // to `process_pending_htlc_forwards`. This is part of a larger effort to remove the requirement + // of regularly persisting the `ChannelManager`. The new pipeline is supported for HTLC forwards + // received on LDK 0.3+ but not <= 0.2, so prune non-legacy HTLCs from `forward_htlcs`. + forward_htlcs_legacy.retain(|scid, pending_fwds| { + for fwd in pending_fwds { + let (prev_scid, prev_htlc_id) = match fwd { + HTLCForwardInfo::AddHTLC(htlc) => { + (htlc.prev_outbound_scid_alias, htlc.prev_htlc_id) + }, + HTLCForwardInfo::FailHTLC { htlc_id, .. } + | HTLCForwardInfo::FailMalformedHTLC { htlc_id, .. } => (*scid, *htlc_id), + }; + if let Some(pending_update_adds) = decode_update_add_htlcs.get_mut(&prev_scid) { + if pending_update_adds + .iter() + .any(|update_add| update_add.htlc_id == prev_htlc_id) + { + return false; + } + } + } + true + }); + // Remove intercepted HTLC forwards if they are also present in `decode_update_add_htlcs`. See + // the above comment. + pending_intercepted_htlcs_legacy.retain(|id, fwd| { + let prev_scid = fwd.prev_outbound_scid_alias; + if let Some(pending_update_adds) = decode_update_add_htlcs.get_mut(&prev_scid) { + if pending_update_adds + .iter() + .any(|update_add| update_add.htlc_id == fwd.prev_htlc_id) + { + pending_events_read.retain( + |(ev, _)| !matches!(ev, Event::HTLCIntercepted { intercept_id, .. } if intercept_id == id), + ); + return false; + } + } + if !pending_events_read.iter().any( + |(ev, _)| matches!(ev, Event::HTLCIntercepted { intercept_id, .. } if intercept_id == id), + ) { + match create_htlc_intercepted_event(*id, &fwd) { + Ok(ev) => pending_events_read.push_back((ev, None)), + Err(()) => debug_assert!(false), + } + } + true + }); + // Add legacy update_adds that were received on LDK <= 0.2 that are not present in the + // `decode_update_add_htlcs` map that was rebuilt from `Channel{Monitor}` data, see above + // comment. + for (scid, legacy_update_adds) in decode_update_add_htlcs_legacy.drain() { + match decode_update_add_htlcs.entry(scid) { + hash_map::Entry::Occupied(mut update_adds) => { + for legacy_update_add in legacy_update_adds { + if !update_adds.get().contains(&legacy_update_add) { + update_adds.get_mut().push(legacy_update_add); + } + } + }, + hash_map::Entry::Vacant(entry) => { + entry.insert(legacy_update_adds); + }, + } + } + let best_block = BestBlock::new(best_block_hash, best_block_height); let flow = OffersMessageFlow::new( chain_hash, @@ -18296,7 +18366,7 @@ where pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs_legacy), forward_htlcs: Mutex::new(forward_htlcs_legacy), - decode_update_add_htlcs: Mutex::new(decode_update_add_htlcs_legacy), + decode_update_add_htlcs: Mutex::new(decode_update_add_htlcs), claimable_payments: Mutex::new(ClaimablePayments { claimable_payments, pending_claiming_payments: pending_claiming_payments.unwrap(), diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index ff33d7508b5..3a5940cb161 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1382,9 +1382,10 @@ macro_rules! reload_node { $node.onion_messenger.set_async_payments_handler(&$new_channelmanager); }; ($node: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: ident, $new_chain_monitor: ident, $new_channelmanager: ident) => { + let config = $node.node.get_current_config(); reload_node!( $node, - test_default_channel_config(), + config, $chanman_encoded, $monitors_encoded, $persister, diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index 2e9471a787d..cd560745256 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -508,7 +508,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { #[cfg(feature = "std")] fn do_test_data_loss_protect(reconnect_panicing: bool, substantially_old: bool, not_stale: bool) { - use crate::ln::channelmanager::Retry; + use crate::ln::outbound_payment::Retry; use crate::types::string::UntrustedString; // When we get a data_loss_protect proving we're behind, we immediately panic as the // chain::Watch API requirements have been violated (e.g. the user restored from a backup). The @@ -1173,6 +1173,91 @@ fn removed_payment_no_manager_persistence() { expect_payment_failed!(nodes[0], payment_hash, false); } +#[test] +fn manager_persisted_pre_outbound_edge_forward() { + do_manager_persisted_pre_outbound_edge_forward(false); +} + +#[test] +fn manager_persisted_pre_outbound_edge_intercept_forward() { + do_manager_persisted_pre_outbound_edge_forward(true); +} + +fn do_manager_persisted_pre_outbound_edge_forward(intercept_htlc: bool) { + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let persister; + let new_chain_monitor; + let mut intercept_forwards_config = test_default_channel_config(); + intercept_forwards_config.accept_intercept_htlcs = true; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(intercept_forwards_config), None]); + let nodes_1_deserialized; + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1).2; + let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2; + + // Lock in the HTLC from node_a <> node_b. + let amt_msat = 5000; + let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat); + if intercept_htlc { + route.paths[0].hops[1].short_channel_id = nodes[1].node.get_intercept_scid(); + } + nodes[0].node.send_payment_with_route(route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + check_added_monitors(&nodes[0], 1); + let updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); + nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); + do_commitment_signed_dance(&nodes[1], &nodes[0], &updates.commitment_signed, false, false); + + // Decode the HTLC onion but don't forward it to the next hop, such that the HTLC ends up in + // `ChannelManager::forward_htlcs` or `ChannelManager::pending_intercepted_htlcs`. + nodes[1].node.test_process_pending_update_add_htlcs(); + + // Disconnect peers and reload the forwarding node_b. + nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); + nodes[2].node.peer_disconnected(nodes[1].node.get_our_node_id()); + + let node_b_encoded = nodes[1].node.encode(); + + let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id_1).encode(); + let chan_1_monitor_serialized = get_monitor!(nodes[1], chan_id_2).encode(); + reload_node!(nodes[1], node_b_encoded, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized); + + reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[0])); + let mut args_b_c = ReconnectArgs::new(&nodes[1], &nodes[2]); + args_b_c.send_channel_ready = (true, true); + args_b_c.send_announcement_sigs = (true, true); + reconnect_nodes(args_b_c); + + // Forward the HTLC and ensure we can claim it post-reload. + nodes[1].node.process_pending_htlc_forwards(); + + if intercept_htlc { + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + let (intercept_id, expected_outbound_amt_msat) = match events[0] { + Event::HTLCIntercepted { intercept_id, expected_outbound_amount_msat, .. } => { + (intercept_id, expected_outbound_amount_msat) + }, + _ => panic!() + }; + nodes[1].node.forward_intercepted_htlc(intercept_id, &chan_id_2, + nodes[2].node.get_our_node_id(), expected_outbound_amt_msat).unwrap(); + nodes[1].node.process_pending_htlc_forwards(); + } + check_added_monitors(&nodes[1], 1); + + let updates = get_htlc_update_msgs(&nodes[1], &nodes[2].node.get_our_node_id()); + nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &updates.update_add_htlcs[0]); + do_commitment_signed_dance(&nodes[2], &nodes[1], &updates.commitment_signed, false, false); + expect_and_process_pending_htlcs(&nodes[2], false); + + expect_payment_claimable!(nodes[2], payment_hash, payment_secret, amt_msat, None, nodes[2].node.get_our_node_id()); + let path: &[&[_]] = &[&[&nodes[1], &nodes[2]]]; + do_claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], path, payment_preimage)); + expect_payment_sent(&nodes[0], payment_preimage, None, true, true); +} + #[test] fn test_reload_partial_funding_batch() { let chanmon_cfgs = create_chanmon_cfgs(3); From a24dcffa32766b1f97d9f36be43a193e2616ca8b Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Mon, 1 Dec 2025 16:11:18 -0800 Subject: [PATCH 028/242] Test 0.2 -> 0.3 reload with with forward htlcs present We have an overarching goal of (mostly) getting rid of ChannelManager persistence and rebuilding the ChannelManager's state from existing ChannelMonitors, due to issues when the two structs are out-of-sync on restart. The main issue that can arise is channel force closure. In the previous commit we started this process by rebuilding ChannelManager::decode_update_add_htlcs, forward_htlcs, and pending_intercepted_htlcs from the Channel data, which will soon be included in the ChannelMonitors as part of a different series of PRs. Here we test that HTLC forwards that were originally received on 0.2 can still be successfully forwarded using the new reload + legacy handling code that will be merged for 0.3. --- lightning-tests/Cargo.toml | 1 + .../src/upgrade_downgrade_tests.rs | 201 ++++++++++++++++++ 2 files changed, 202 insertions(+) diff --git a/lightning-tests/Cargo.toml b/lightning-tests/Cargo.toml index 439157e528b..4e8d330089d 100644 --- a/lightning-tests/Cargo.toml +++ b/lightning-tests/Cargo.toml @@ -15,6 +15,7 @@ lightning-types = { path = "../lightning-types", features = ["_test_utils"] } lightning-invoice = { path = "../lightning-invoice", default-features = false } lightning-macros = { path = "../lightning-macros" } lightning = { path = "../lightning", features = ["_test_utils"] } +lightning_0_2 = { package = "lightning", version = "0.2.0", features = ["_test_utils"] } lightning_0_1 = { package = "lightning", version = "0.1.7", features = ["_test_utils"] } lightning_0_0_125 = { package = "lightning", version = "0.0.125", features = ["_test_utils"] } diff --git a/lightning-tests/src/upgrade_downgrade_tests.rs b/lightning-tests/src/upgrade_downgrade_tests.rs index cef180fbd4e..19c50e870de 100644 --- a/lightning-tests/src/upgrade_downgrade_tests.rs +++ b/lightning-tests/src/upgrade_downgrade_tests.rs @@ -10,6 +10,16 @@ //! Tests which test upgrading from previous versions of LDK or downgrading to previous versions of //! LDK. +use lightning_0_2::commitment_signed_dance as commitment_signed_dance_0_2; +use lightning_0_2::events::Event as Event_0_2; +use lightning_0_2::get_monitor as get_monitor_0_2; +use lightning_0_2::ln::channelmanager::PaymentId as PaymentId_0_2; +use lightning_0_2::ln::channelmanager::RecipientOnionFields as RecipientOnionFields_0_2; +use lightning_0_2::ln::functional_test_utils as lightning_0_2_utils; +use lightning_0_2::ln::msgs::ChannelMessageHandler as _; +use lightning_0_2::routing::router as router_0_2; +use lightning_0_2::util::ser::Writeable as _; + use lightning_0_1::commitment_signed_dance as commitment_signed_dance_0_1; use lightning_0_1::events::ClosureReason as ClosureReason_0_1; use lightning_0_1::expect_pending_htlcs_forwardable_ignore as expect_pending_htlcs_forwardable_ignore_0_1; @@ -498,3 +508,194 @@ fn test_0_1_htlc_forward_after_splice() { do_test_0_1_htlc_forward_after_splice(true); do_test_0_1_htlc_forward_after_splice(false); } + +#[derive(PartialEq, Eq)] +enum MidHtlcForwardCase { + // Restart the upgraded node after locking an HTLC forward into the inbound edge, but before + // decoding the onion. + PreOnionDecode, + // Restart the upgraded node after locking an HTLC forward into the inbound edge + decoding the + // onion. + PostOnionDecode, + // Restart the upgraded node after the HTLC has been decoded and placed in the pending intercepted + // HTLCs map. + Intercept, +} + +#[test] +fn upgrade_pre_htlc_forward_onion_decode() { + do_upgrade_mid_htlc_forward(MidHtlcForwardCase::PreOnionDecode); +} + +#[test] +fn upgrade_mid_htlc_forward() { + do_upgrade_mid_htlc_forward(MidHtlcForwardCase::PostOnionDecode); +} + +#[test] +fn upgrade_mid_htlc_intercept_forward() { + do_upgrade_mid_htlc_forward(MidHtlcForwardCase::Intercept); +} + +fn do_upgrade_mid_htlc_forward(test: MidHtlcForwardCase) { + // In 0.3, we started reconstructing the `ChannelManager`'s HTLC forwards maps from the HTLCs + // contained in `Channel`s, as part of removing the requirement to regularly persist the + // `ChannelManager`. However, HTLC forwards can only be reconstructed this way if they were + // received on 0.3 or higher. Test that HTLC forwards that were serialized on <=0.2 will still + // succeed when read on 0.3+. + let (node_a_ser, node_b_ser, node_c_ser, mon_a_1_ser, mon_b_1_ser, mon_b_2_ser, mon_c_1_ser); + let (node_a_id, node_b_id, node_c_id); + let (payment_secret_bytes, payment_hash_bytes, payment_preimage_bytes); + let chan_id_bytes_b_c; + + { + let chanmon_cfgs = lightning_0_2_utils::create_chanmon_cfgs(3); + let node_cfgs = lightning_0_2_utils::create_node_cfgs(3, &chanmon_cfgs); + + let mut intercept_cfg = lightning_0_2_utils::test_default_channel_config(); + intercept_cfg.accept_intercept_htlcs = true; + let cfgs = &[None, Some(intercept_cfg), None]; + let node_chanmgrs = lightning_0_2_utils::create_node_chanmgrs(3, &node_cfgs, cfgs); + let nodes = lightning_0_2_utils::create_network(3, &node_cfgs, &node_chanmgrs); + + node_a_id = nodes[0].node.get_our_node_id(); + node_b_id = nodes[1].node.get_our_node_id(); + node_c_id = nodes[2].node.get_our_node_id(); + let chan_id_a = lightning_0_2_utils::create_announced_chan_between_nodes_with_value( + &nodes, 0, 1, 10_000_000, 0, + ) + .2; + + let chan_id_b = lightning_0_2_utils::create_announced_chan_between_nodes_with_value( + &nodes, 1, 2, 50_000, 0, + ) + .2; + chan_id_bytes_b_c = chan_id_b.0; + + // Ensure all nodes are at the same initial height. + let node_max_height = nodes.iter().map(|node| node.best_block_info().1).max().unwrap(); + for node in &nodes { + let blocks_to_mine = node_max_height - node.best_block_info().1; + if blocks_to_mine > 0 { + lightning_0_2_utils::connect_blocks(node, blocks_to_mine); + } + } + + // Initiate an HTLC to be sent over node_a -> node_b -> node_c + let (preimage, hash, secret) = + lightning_0_2_utils::get_payment_preimage_hash(&nodes[2], Some(1_000_000), None); + payment_preimage_bytes = preimage.0; + payment_hash_bytes = hash.0; + payment_secret_bytes = secret.0; + + let pay_params = router_0_2::PaymentParameters::from_node_id( + node_c_id, + lightning_0_2_utils::TEST_FINAL_CLTV, + ) + .with_bolt11_features(nodes[2].node.bolt11_invoice_features()) + .unwrap(); + + let route_params = + router_0_2::RouteParameters::from_payment_params_and_value(pay_params, 1_000_000); + let mut route = lightning_0_2_utils::get_route(&nodes[0], &route_params).unwrap(); + + if test == MidHtlcForwardCase::Intercept { + route.paths[0].hops[1].short_channel_id = nodes[1].node.get_intercept_scid(); + } + + let onion = RecipientOnionFields_0_2::secret_only(secret); + let id = PaymentId_0_2(hash.0); + nodes[0].node.send_payment_with_route(route, hash, onion, id).unwrap(); + + lightning_0_2_utils::check_added_monitors(&nodes[0], 1); + let send_event = lightning_0_2_utils::SendEvent::from_node(&nodes[0]); + + // Lock in the HTLC on the inbound edge of node_b without initiating the outbound edge. + nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); + commitment_signed_dance_0_2!(nodes[1], nodes[0], send_event.commitment_msg, false); + if test != MidHtlcForwardCase::PreOnionDecode { + nodes[1].node.test_process_pending_update_add_htlcs(); + } + let events = nodes[1].node.get_and_clear_pending_events(); + if test == MidHtlcForwardCase::Intercept { + assert_eq!(events.len(), 1); + assert!(matches!(events[0], Event_0_2::HTLCIntercepted { .. })); + } else { + assert!(events.is_empty()); + } + + node_a_ser = nodes[0].node.encode(); + node_b_ser = nodes[1].node.encode(); + node_c_ser = nodes[2].node.encode(); + mon_a_1_ser = get_monitor_0_2!(nodes[0], chan_id_a).encode(); + mon_b_1_ser = get_monitor_0_2!(nodes[1], chan_id_a).encode(); + mon_b_2_ser = get_monitor_0_2!(nodes[1], chan_id_b).encode(); + mon_c_1_ser = get_monitor_0_2!(nodes[2], chan_id_b).encode(); + } + + // Create a dummy node to reload over with the 0.2 state + let mut chanmon_cfgs = create_chanmon_cfgs(3); + + // Our TestChannelSigner will fail as we're jumping ahead, so disable its state-based checks + chanmon_cfgs[0].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[1].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[2].keys_manager.disable_all_state_policy_checks = true; + + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let (persister_a, persister_b, persister_c, chain_mon_a, chain_mon_b, chain_mon_c); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let (node_a, node_b, node_c); + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let config = test_default_channel_config(); + let a_mons = &[&mon_a_1_ser[..]]; + reload_node!(nodes[0], config.clone(), &node_a_ser, a_mons, persister_a, chain_mon_a, node_a); + let b_mons = &[&mon_b_1_ser[..], &mon_b_2_ser[..]]; + reload_node!(nodes[1], config.clone(), &node_b_ser, b_mons, persister_b, chain_mon_b, node_b); + let c_mons = &[&mon_c_1_ser[..]]; + reload_node!(nodes[2], config, &node_c_ser, c_mons, persister_c, chain_mon_c, node_c); + + reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); + let mut reconnect_b_c_args = ReconnectArgs::new(&nodes[1], &nodes[2]); + reconnect_b_c_args.send_channel_ready = (true, true); + reconnect_b_c_args.send_announcement_sigs = (true, true); + reconnect_nodes(reconnect_b_c_args); + + // Now release the HTLC from node_b to node_c, to be claimed back to node_a + nodes[1].node.process_pending_htlc_forwards(); + + if test == MidHtlcForwardCase::Intercept { + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + let (intercept_id, expected_outbound_amt_msat) = match events[0] { + Event::HTLCIntercepted { intercept_id, expected_outbound_amount_msat, .. } => { + (intercept_id, expected_outbound_amount_msat) + }, + _ => panic!(), + }; + nodes[1] + .node + .forward_intercepted_htlc( + intercept_id, + &ChannelId(chan_id_bytes_b_c), + nodes[2].node.get_our_node_id(), + expected_outbound_amt_msat, + ) + .unwrap(); + nodes[1].node.process_pending_htlc_forwards(); + } + + let pay_secret = PaymentSecret(payment_secret_bytes); + let pay_hash = PaymentHash(payment_hash_bytes); + let pay_preimage = PaymentPreimage(payment_preimage_bytes); + + check_added_monitors(&nodes[1], 1); + let forward_event = SendEvent::from_node(&nodes[1]); + nodes[2].node.handle_update_add_htlc(node_b_id, &forward_event.msgs[0]); + let commitment = &forward_event.commitment_msg; + do_commitment_signed_dance(&nodes[2], &nodes[1], commitment, false, false); + + expect_and_process_pending_htlcs(&nodes[2], false); + expect_payment_claimable!(nodes[2], pay_hash, pay_secret, 1_000_000); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], pay_preimage); +} From 004ceef48c084eb478547ee6e9f24935b2bb2412 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 11 Dec 2025 08:30:33 +0100 Subject: [PATCH 029/242] Convert convert_funded_channel_err fns to methods --- lightning/src/ln/channelmanager.rs | 188 ++++++++++++++--------------- 1 file changed, 93 insertions(+), 95 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 2a89e7b5681..a24f31158e1 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3597,96 +3597,6 @@ fn convert_channel_err_internal< } } -fn convert_funded_channel_err_internal>( - cm: &CM, closed_channel_monitor_update_ids: &mut BTreeMap, - in_flight_monitor_updates: &mut BTreeMap)>, - coop_close_shutdown_res: Option, err: ChannelError, - chan: &mut FundedChannel, -) -> (bool, MsgHandleErrInternal) -where - SP::Target: SignerProvider, - CM::Watch: Watch<::EcdsaSigner>, -{ - let chan_id = chan.context.channel_id(); - convert_channel_err_internal(err, chan_id, |reason, msg| { - let cm = cm.get_cm(); - let logger = WithChannelContext::from(&cm.logger, &chan.context, None); - - let mut shutdown_res = - if let Some(res) = coop_close_shutdown_res { res } else { chan.force_shutdown(reason) }; - let chan_update = cm.get_channel_update_for_broadcast(chan).ok(); - - log_error!(logger, "Closed channel due to close-required error: {}", msg); - - if let Some((_, funding_txo, _, update)) = shutdown_res.monitor_update.take() { - handle_new_monitor_update_locked_actions_handled_by_caller!( - cm, - funding_txo, - update, - in_flight_monitor_updates, - chan.context - ); - } - // If there's a possibility that we need to generate further monitor updates for this - // channel, we need to store the last update_id of it. However, we don't want to insert - // into the map (which prevents the `PeerState` from being cleaned up) for channels that - // never even got confirmations (which would open us up to DoS attacks). - let update_id = chan.context.get_latest_monitor_update_id(); - let funding_confirmed = chan.funding.get_funding_tx_confirmation_height().is_some(); - let chan_zero_conf = chan.context.minimum_depth(&chan.funding) == Some(0); - if funding_confirmed || chan_zero_conf || update_id > 1 { - closed_channel_monitor_update_ids.insert(chan_id, update_id); - } - let mut short_to_chan_info = cm.short_to_chan_info.write().unwrap(); - if let Some(short_id) = chan.funding.get_short_channel_id() { - short_to_chan_info.remove(&short_id); - } else { - // If the channel was never confirmed on-chain prior to its closure, remove the - // outbound SCID alias we used for it from the collision-prevention set. While we - // generally want to avoid ever re-using an outbound SCID alias across all channels, we - // also don't want a counterparty to be able to trivially cause a memory leak by simply - // opening a million channels with us which are closed before we ever reach the funding - // stage. - let outbound_alias = chan.context.outbound_scid_alias(); - let alias_removed = cm.outbound_scid_aliases.lock().unwrap().remove(&outbound_alias); - debug_assert!(alias_removed); - } - short_to_chan_info.remove(&chan.context.outbound_scid_alias()); - for scid in chan.context.historical_scids() { - short_to_chan_info.remove(scid); - } - - (shutdown_res, chan_update) - }) -} - -fn convert_unfunded_channel_err_internal( - cm: &CM, err: ChannelError, chan: &mut Channel, -) -> (bool, MsgHandleErrInternal) -where - SP::Target: SignerProvider, -{ - let chan_id = chan.context().channel_id(); - convert_channel_err_internal(err, chan_id, |reason, msg| { - let cm = cm.get_cm(); - let logger = WithChannelContext::from(&cm.logger, chan.context(), None); - - let shutdown_res = chan.force_shutdown(reason); - log_error!(logger, "Closed channel due to close-required error: {}", msg); - cm.short_to_chan_info.write().unwrap().remove(&chan.context().outbound_scid_alias()); - // If the channel was never confirmed on-chain prior to its closure, remove the - // outbound SCID alias we used for it from the collision-prevention set. While we - // generally want to avoid ever re-using an outbound SCID alias across all channels, we - // also don't want a counterparty to be able to trivially cause a memory leak by simply - // opening a million channels with us which are closed before we ever reach the funding - // stage. - let outbound_alias = chan.context().outbound_scid_alias(); - let alias_removed = cm.outbound_scid_aliases.lock().unwrap().remove(&outbound_alias); - debug_assert!(alias_removed); - (shutdown_res, None) - }) -} - /// When a channel is removed, two things need to happen: /// (a) This must be called in the same `per_peer_state` lock as the channel-closing action, /// (b) [`ChannelManager::handle_error`] needs to be called without holding any locks (except @@ -3706,7 +3616,7 @@ macro_rules! convert_channel_err { let closed_update_ids = &mut $peer_state.closed_channel_monitor_update_ids; let in_flight_updates = &mut $peer_state.in_flight_monitor_updates; let (close, mut err) = - convert_funded_channel_err_internal($self, closed_update_ids, in_flight_updates, Some($shutdown_result), reason, $funded_channel); + $self.convert_funded_channel_err_internal(closed_update_ids, in_flight_updates, Some($shutdown_result), reason, $funded_channel); err.dont_send_error_message(); debug_assert!(close); err @@ -3714,20 +3624,20 @@ macro_rules! convert_channel_err { ($self: ident, $peer_state: expr, $err: expr, $funded_channel: expr, FUNDED_CHANNEL) => { { let closed_update_ids = &mut $peer_state.closed_channel_monitor_update_ids; let in_flight_updates = &mut $peer_state.in_flight_monitor_updates; - convert_funded_channel_err_internal($self, closed_update_ids, in_flight_updates, None, $err, $funded_channel) + $self.convert_funded_channel_err_internal(closed_update_ids, in_flight_updates, None, $err, $funded_channel) } }; ($self: ident, $peer_state: expr, $err: expr, $channel: expr, UNFUNDED_CHANNEL) => { { - convert_unfunded_channel_err_internal($self, $err, $channel) + $self.convert_unfunded_channel_err_internal($err, $channel) } }; ($self: ident, $peer_state: expr, $err: expr, $channel: expr) => { match $channel.as_funded_mut() { Some(funded_channel) => { let closed_update_ids = &mut $peer_state.closed_channel_monitor_update_ids; let in_flight_updates = &mut $peer_state.in_flight_monitor_updates; - convert_funded_channel_err_internal($self, closed_update_ids, in_flight_updates, None, $err, funded_channel) + $self.convert_funded_channel_err_internal(closed_update_ids, in_flight_updates, None, $err, funded_channel) }, None => { - convert_unfunded_channel_err_internal($self, $err, $channel) + $self.convert_unfunded_channel_err_internal($err, $channel) }, } }; @@ -4034,6 +3944,94 @@ where }) } + fn convert_funded_channel_err_internal( + &self, closed_channel_monitor_update_ids: &mut BTreeMap, + in_flight_monitor_updates: &mut BTreeMap)>, + coop_close_shutdown_res: Option, err: ChannelError, + chan: &mut FundedChannel, + ) -> (bool, MsgHandleErrInternal) { + let chan_id = chan.context.channel_id(); + convert_channel_err_internal(err, chan_id, |reason, msg| { + let logger = WithChannelContext::from(&self.logger, &chan.context, None); + + let mut shutdown_res = if let Some(res) = coop_close_shutdown_res { + res + } else { + chan.force_shutdown(reason) + }; + let chan_update = self.get_channel_update_for_broadcast(chan).ok(); + + log_error!(logger, "Closed channel due to close-required error: {}", msg); + + if let Some((_, funding_txo, _, update)) = shutdown_res.monitor_update.take() { + handle_new_monitor_update_locked_actions_handled_by_caller!( + self, + funding_txo, + update, + in_flight_monitor_updates, + chan.context + ); + } + // If there's a possibility that we need to generate further monitor updates for this + // channel, we need to store the last update_id of it. However, we don't want to insert + // into the map (which prevents the `PeerState` from being cleaned up) for channels that + // never even got confirmations (which would open us up to DoS attacks). + let update_id = chan.context.get_latest_monitor_update_id(); + let funding_confirmed = chan.funding.get_funding_tx_confirmation_height().is_some(); + let chan_zero_conf = chan.context.minimum_depth(&chan.funding) == Some(0); + if funding_confirmed || chan_zero_conf || update_id > 1 { + closed_channel_monitor_update_ids.insert(chan_id, update_id); + } + let mut short_to_chan_info = self.short_to_chan_info.write().unwrap(); + if let Some(short_id) = chan.funding.get_short_channel_id() { + short_to_chan_info.remove(&short_id); + } else { + // If the channel was never confirmed on-chain prior to its closure, remove the + // outbound SCID alias we used for it from the collision-prevention set. While we + // generally want to avoid ever re-using an outbound SCID alias across all channels, we + // also don't want a counterparty to be able to trivially cause a memory leak by simply + // opening a million channels with us which are closed before we ever reach the funding + // stage. + let outbound_alias = chan.context.outbound_scid_alias(); + let alias_removed = + self.outbound_scid_aliases.lock().unwrap().remove(&outbound_alias); + debug_assert!(alias_removed); + } + short_to_chan_info.remove(&chan.context.outbound_scid_alias()); + for scid in chan.context.historical_scids() { + short_to_chan_info.remove(scid); + } + + (shutdown_res, chan_update) + }) + } + + fn convert_unfunded_channel_err_internal( + &self, err: ChannelError, chan: &mut Channel, + ) -> (bool, MsgHandleErrInternal) + where + SP::Target: SignerProvider, + { + let chan_id = chan.context().channel_id(); + convert_channel_err_internal(err, chan_id, |reason, msg| { + let logger = WithChannelContext::from(&self.logger, chan.context(), None); + + let shutdown_res = chan.force_shutdown(reason); + log_error!(logger, "Closed channel due to close-required error: {}", msg); + self.short_to_chan_info.write().unwrap().remove(&chan.context().outbound_scid_alias()); + // If the channel was never confirmed on-chain prior to its closure, remove the + // outbound SCID alias we used for it from the collision-prevention set. While we + // generally want to avoid ever re-using an outbound SCID alias across all channels, we + // also don't want a counterparty to be able to trivially cause a memory leak by simply + // opening a million channels with us which are closed before we ever reach the funding + // stage. + let outbound_alias = chan.context().outbound_scid_alias(); + let alias_removed = self.outbound_scid_aliases.lock().unwrap().remove(&outbound_alias); + debug_assert!(alias_removed); + (shutdown_res, None) + }) + } + /// Gets the current [`UserConfig`] which controls some global behavior and includes the /// default configuration applied to all new channels. pub fn get_current_config(&self) -> UserConfig { From 87e01ffdfe6d6bc5d7574f62c808885a2a37a1f6 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 11 Dec 2025 09:03:15 +0100 Subject: [PATCH 030/242] Convert macro to convert_channel_err_coop method --- lightning/src/ln/channelmanager.rs | 45 ++++++++++++++++++++---------- 1 file changed, 31 insertions(+), 14 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index a24f31158e1..872c11387d8 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3611,16 +3611,6 @@ fn convert_channel_err_internal< /// true). #[rustfmt::skip] macro_rules! convert_channel_err { - ($self: ident, $peer_state: expr, $shutdown_result: expr, $funded_channel: expr, COOP_CLOSED) => { { - let reason = ChannelError::Close(("Coop Closed".to_owned(), $shutdown_result.closure_reason.clone())); - let closed_update_ids = &mut $peer_state.closed_channel_monitor_update_ids; - let in_flight_updates = &mut $peer_state.in_flight_monitor_updates; - let (close, mut err) = - $self.convert_funded_channel_err_internal(closed_update_ids, in_flight_updates, Some($shutdown_result), reason, $funded_channel); - err.dont_send_error_message(); - debug_assert!(close); - err - } }; ($self: ident, $peer_state: expr, $err: expr, $funded_channel: expr, FUNDED_CHANNEL) => { { let closed_update_ids = &mut $peer_state.closed_channel_monitor_update_ids; let in_flight_updates = &mut $peer_state.in_flight_monitor_updates; @@ -4032,6 +4022,32 @@ where }) } + /// When a cooperatively closed channel is removed, two things need to happen: + /// (a) This must be called in the same `per_peer_state` lock as the channel-closing action, + /// (b) [`ChannelManager::handle_error`] needs to be called without holding any locks (except + /// [`ChannelManager::total_consistency_lock`]), which then calls + /// [`ChannelManager::finish_close_channel`]. + /// + /// Returns a mapped error. + fn convert_channel_err_coop( + &self, closed_update_ids: &mut BTreeMap, + in_flight_updates: &mut BTreeMap)>, + shutdown_result: ShutdownResult, funded_channel: &mut FundedChannel, + ) -> MsgHandleErrInternal { + let reason = + ChannelError::Close(("Coop Closed".to_owned(), shutdown_result.closure_reason.clone())); + let (close, mut err) = self.convert_funded_channel_err_internal( + closed_update_ids, + in_flight_updates, + Some(shutdown_result), + reason, + funded_channel, + ); + err.dont_send_error_message(); + debug_assert!(close); + err + } + /// Gets the current [`UserConfig`] which controls some global behavior and includes the /// default configuration applied to all new channels. pub fn get_current_config(&self) -> UserConfig { @@ -11146,7 +11162,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // also implies there are no pending HTLCs left on the channel, so we can // fully delete it from tracking (the channel monitor is still around to // watch for old state broadcasts)! - let err = convert_channel_err!(self, peer_state, close_res, chan, COOP_CLOSED); + let err = self.convert_channel_err_coop(&mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, close_res, chan); chan_entry.remove(); Some((tx, Err(err))) } else { @@ -12467,7 +12483,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ log_trace!(logger, "Removing channel now that the signer is unblocked"); let (remove, err) = if let Some(funded) = chan.as_funded_mut() { let err = - convert_channel_err!(self, peer_state, shutdown, funded, COOP_CLOSED); + self.convert_channel_err_coop(&mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, shutdown, funded); (true, err) } else { debug_assert!(false); @@ -12522,7 +12538,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some((tx, shutdown_res)) = tx_shutdown_result_opt { // We're done with this channel. We got a closing_signed and sent back // a closing_signed with a closing transaction to broadcast. - let err = convert_channel_err!(self, peer_state, shutdown_res, funded_chan, COOP_CLOSED); + let err = self.convert_channel_err_coop(&mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, shutdown_res, funded_chan); handle_errors.push((*cp_id, Err(err))); log_info!(logger, "Broadcasting {}", log_tx!(tx)); @@ -12532,7 +12548,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }, Err(e) => { has_update = true; - let (close_channel, res) = convert_channel_err!(self, peer_state, e, funded_chan, FUNDED_CHANNEL); + let (close_channel, res) = convert_channel_err!( + self, peer_state, e, funded_chan, FUNDED_CHANNEL); handle_errors.push((funded_chan.context.get_counterparty_node_id(), Err(res))); !close_channel } From 36cfb13a5a064d57c4403b03bc3d307eb32cc153 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 11 Dec 2025 09:10:39 +0100 Subject: [PATCH 031/242] Convert macro to convert_channel_err_funded method --- lightning/src/ln/channelmanager.rs | 41 ++++++++++++++++++++---------- 1 file changed, 28 insertions(+), 13 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 872c11387d8..e7131c63a76 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3611,11 +3611,6 @@ fn convert_channel_err_internal< /// true). #[rustfmt::skip] macro_rules! convert_channel_err { - ($self: ident, $peer_state: expr, $err: expr, $funded_channel: expr, FUNDED_CHANNEL) => { { - let closed_update_ids = &mut $peer_state.closed_channel_monitor_update_ids; - let in_flight_updates = &mut $peer_state.in_flight_monitor_updates; - $self.convert_funded_channel_err_internal(closed_update_ids, in_flight_updates, None, $err, $funded_channel) - } }; ($self: ident, $peer_state: expr, $err: expr, $channel: expr, UNFUNDED_CHANNEL) => { { $self.convert_unfunded_channel_err_internal($err, $channel) } }; @@ -4048,6 +4043,28 @@ where err } + /// When a funded channel is removed, two things need to happen: + /// (a) This must be called in the same `per_peer_state` lock as the channel-closing action, + /// (b) [`ChannelManager::handle_error`] needs to be called without holding any locks (except + /// [`ChannelManager::total_consistency_lock`]), which then calls + /// [`ChannelManager::finish_close_channel`]. + /// + /// Returns `(boolean indicating if we should remove the Channel object from memory, a mapped + /// error)`. + fn convert_channel_err_funded( + &self, closed_update_ids: &mut BTreeMap, + in_flight_updates: &mut BTreeMap)>, + err: ChannelError, funded_channel: &mut FundedChannel, + ) -> (bool, MsgHandleErrInternal) { + self.convert_funded_channel_err_internal( + closed_update_ids, + in_flight_updates, + None, + err, + funded_channel, + ) + } + /// Gets the current [`UserConfig`] which controls some global behavior and includes the /// default configuration applied to all new channels. pub fn get_current_config(&self) -> UserConfig { @@ -8192,7 +8209,7 @@ where if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } if let Err(e) = funded_chan.timer_check_closing_negotiation_progress() { - let (needs_close, err) = convert_channel_err!(self, peer_state, e, funded_chan, FUNDED_CHANNEL); + let (needs_close, err) = self.convert_channel_err_funded(&mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, e, funded_chan); handle_errors.push((Err(err), counterparty_node_id)); if needs_close { return false; } } @@ -12548,8 +12565,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }, Err(e) => { has_update = true; - let (close_channel, res) = convert_channel_err!( - self, peer_state, e, funded_chan, FUNDED_CHANNEL); + let (close_channel, res) = self.convert_channel_err_funded( + &mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, e, funded_chan); handle_errors.push((funded_chan.context.get_counterparty_node_id(), Err(res))); !close_channel } @@ -14657,12 +14674,10 @@ where // It looks like our counterparty went on-chain or funding transaction was // reorged out of the main chain. Close the channel. let err = ChannelError::Close((reason.to_string(), reason)); - let (_, e) = convert_channel_err!( - self, - peer_state, + let (_, e) = self.convert_channel_err_funded( + &mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, err, - funded_channel, - FUNDED_CHANNEL + funded_channel ); failed_channels.push((Err(e), *counterparty_node_id)); return false; From ec112c4787ee6a1544dd55b29b6ac9a52b44554d Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 11 Dec 2025 09:14:28 +0100 Subject: [PATCH 032/242] Replace macro with direct call to convert_unfunded_channel_err_internal --- lightning/src/ln/channelmanager.rs | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index e7131c63a76..5e1fe59a383 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3611,9 +3611,6 @@ fn convert_channel_err_internal< /// true). #[rustfmt::skip] macro_rules! convert_channel_err { - ($self: ident, $peer_state: expr, $err: expr, $channel: expr, UNFUNDED_CHANNEL) => { { - $self.convert_unfunded_channel_err_internal($err, $channel) - } }; ($self: ident, $peer_state: expr, $err: expr, $channel: expr) => { match $channel.as_funded_mut() { Some(funded_channel) => { @@ -10506,7 +10503,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let err = ChannelError::close($err.to_owned()); chan.unset_funding_info(); let mut chan = Channel::from(chan); - return Err(convert_channel_err!(self, peer_state, err, &mut chan, UNFUNDED_CHANNEL).1); + return Err(self.convert_unfunded_channel_err_internal(err, &mut chan).1); } } } match peer_state.channel_by_id.entry(funded_channel_id) { @@ -12506,7 +12503,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ debug_assert!(false); let reason = shutdown.closure_reason.clone(); let err = ChannelError::Close((reason.to_string(), reason)); - convert_channel_err!(self, peer_state, err, chan, UNFUNDED_CHANNEL) + self.convert_unfunded_channel_err_internal(err, chan) }; debug_assert!(remove); shutdown_results.push((Err(err), *cp_id)); From ee426703af6bd09dba258f77a8b6835397c1fa55 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 11 Dec 2025 09:53:27 +0100 Subject: [PATCH 033/242] Convert macro to convert_channel_err method --- lightning/src/ln/channelmanager.rs | 157 +++++++++++++++++++++-------- 1 file changed, 114 insertions(+), 43 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 5e1fe59a383..5fd17648034 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3597,40 +3597,17 @@ fn convert_channel_err_internal< } } -/// When a channel is removed, two things need to happen: -/// (a) This must be called in the same `per_peer_state` lock as the channel-closing action, -/// (b) [`ChannelManager::handle_error`] needs to be called without holding any locks (except -/// [`ChannelManager::total_consistency_lock`]), which then calls -/// [`ChannelManager::finish_close_channel`]. -/// -/// Note that this step can be skipped if the channel was never opened (through the creation of a -/// [`ChannelMonitor`]/channel funding transaction) to begin with. -/// -/// Returns `(boolean indicating if we should remove the Channel object from memory, a mapped -/// error)`, except in the `COOP_CLOSE` case, where the bool is elided (it is always implicitly -/// true). -#[rustfmt::skip] -macro_rules! convert_channel_err { - ($self: ident, $peer_state: expr, $err: expr, $channel: expr) => { - match $channel.as_funded_mut() { - Some(funded_channel) => { - let closed_update_ids = &mut $peer_state.closed_channel_monitor_update_ids; - let in_flight_updates = &mut $peer_state.in_flight_monitor_updates; - $self.convert_funded_channel_err_internal(closed_update_ids, in_flight_updates, None, $err, funded_channel) - }, - None => { - $self.convert_unfunded_channel_err_internal($err, $channel) - }, - } - }; -} - macro_rules! break_channel_entry { ($self: ident, $peer_state: expr, $res: expr, $entry: expr) => { match $res { Ok(res) => res, Err(e) => { - let (drop, res) = convert_channel_err!($self, $peer_state, e, $entry.get_mut()); + let (drop, res) = $self.convert_channel_err( + &mut $peer_state.closed_channel_monitor_update_ids, + &mut $peer_state.in_flight_monitor_updates, + e, + $entry.get_mut(), + ); if drop { $entry.remove_entry(); } @@ -3645,7 +3622,12 @@ macro_rules! try_channel_entry { match $res { Ok(res) => res, Err(e) => { - let (drop, res) = convert_channel_err!($self, $peer_state, e, $entry.get_mut()); + let (drop, res) = $self.convert_channel_err( + &mut $peer_state.closed_channel_monitor_update_ids, + &mut $peer_state.in_flight_monitor_updates, + e, + $entry.get_mut(), + ); if drop { $entry.remove_entry(); } @@ -4062,6 +4044,34 @@ where ) } + /// When a channel that can be funded or unfunded is removed, two things need to happen: + /// (a) This must be called in the same `per_peer_state` lock as the channel-closing action, + /// (b) [`ChannelManager::handle_error`] needs to be called without holding any locks (except + /// [`ChannelManager::total_consistency_lock`]), which then calls + /// [`ChannelManager::finish_close_channel`]. + /// + /// Note that this step can be skipped if the channel was never opened (through the creation of a + /// [`ChannelMonitor`]/channel funding transaction) to begin with. + /// + /// Returns `(boolean indicating if we should remove the Channel object from memory, a mapped + /// error)`. + fn convert_channel_err( + &self, closed_update_ids: &mut BTreeMap, + in_flight_updates: &mut BTreeMap)>, + err: ChannelError, channel: &mut Channel, + ) -> (bool, MsgHandleErrInternal) { + match channel.as_funded_mut() { + Some(funded_channel) => self.convert_funded_channel_err_internal( + closed_update_ids, + in_flight_updates, + None, + err, + funded_channel, + ), + None => self.convert_unfunded_channel_err_internal(err, channel), + } + } + /// Gets the current [`UserConfig`] which controls some global behavior and includes the /// default configuration applied to all new channels. pub fn get_current_config(&self) -> UserConfig { @@ -4405,7 +4415,13 @@ where let reason = ClosureReason::LocallyCoopClosedUnfundedChannel; let err = ChannelError::Close((reason.to_string(), reason)); let mut chan = chan_entry.remove(); - let (_, mut e) = convert_channel_err!(self, peer_state, err, &mut chan); + let (_, mut e) = self.convert_channel_err( + &mut peer_state.closed_channel_monitor_update_ids, + &mut peer_state.in_flight_monitor_updates, + err, + &mut chan, + ); + e.dont_send_error_message(); shutdown_result = Err(e); } @@ -4538,7 +4554,7 @@ where } /// When a channel is removed, two things need to happen: - /// (a) [`convert_channel_err`] must be called in the same `per_peer_state` lock as the + /// (a) [`ChannelManager::convert_channel_err`] must be called in the same `per_peer_state` lock as the /// channel-closing action, /// (b) [`ChannelManager::handle_error`] needs to be called without holding any locks (except /// [`ChannelManager::total_consistency_lock`]), which then calls this. @@ -4590,7 +4606,12 @@ where if let Some(mut chan) = peer_state.channel_by_id.remove(&channel_id) { let reason = ClosureReason::FundingBatchClosure; let err = ChannelError::Close((reason.to_string(), reason)); - let (_, e) = convert_channel_err!(self, peer_state, err, &mut chan); + let (_, e) = self.convert_channel_err( + &mut peer_state.closed_channel_monitor_update_ids, + &mut peer_state.in_flight_monitor_updates, + err, + &mut chan, + ); shutdown_results.push((Err(e), counterparty_node_id)); } } @@ -4666,7 +4687,12 @@ where if let Some(mut chan) = peer_state.channel_by_id.remove(channel_id) { log_error!(logger, "Force-closing channel"); let err = ChannelError::Close((message, reason)); - let (_, mut e) = convert_channel_err!(self, peer_state, err, &mut chan); + let (_, mut e) = self.convert_channel_err( + &mut peer_state.closed_channel_monitor_update_ids, + &mut peer_state.in_flight_monitor_updates, + err, + &mut chan, + ); mem::drop(peer_state_lock); mem::drop(per_peer_state); if is_from_counterparty { @@ -6444,7 +6470,12 @@ where let err = ChannelError::Close((e.clone(), reason)); let peer_state = &mut *peer_state_lock; let (_, e) = - convert_channel_err!(self, peer_state, err, &mut chan); + self.convert_channel_err( + &mut peer_state.closed_channel_monitor_update_ids, + &mut peer_state.in_flight_monitor_updates, + err, + &mut chan, + ); shutdown_results.push((Err(e), counterparty_node_id)); }); } @@ -8283,7 +8314,12 @@ where let reason = ClosureReason::FundingTimedOut; let msg = "Force-closing pending channel due to timeout awaiting establishment handshake".to_owned(); let err = ChannelError::Close((msg, reason)); - let (_, e) = convert_channel_err!(self, peer_state, err, chan); + let (_, e) = self.convert_channel_err( + &mut peer_state.closed_channel_monitor_update_ids, + &mut peer_state.in_flight_monitor_updates, + err, + chan, + ); handle_errors.push((Err(e), counterparty_node_id)); false } else { @@ -10481,14 +10517,24 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // concerning this channel as it is safe to do so. debug_assert!(matches!(err, ChannelError::Close(_))); let mut chan = Channel::from(inbound_chan); - return Err(convert_channel_err!(self, peer_state, err, &mut chan).1); + return Err(self.convert_channel_err( + &mut peer_state.closed_channel_monitor_update_ids, + &mut peer_state.in_flight_monitor_updates, + err, + &mut chan, + ).1); }, } }, Some(Err(mut chan)) => { let err_msg = format!("Got an unexpected funding_created message from peer with counterparty_node_id {}", counterparty_node_id); let err = ChannelError::close(err_msg); - return Err(convert_channel_err!(self, peer_state, err, &mut chan).1); + return Err(self.convert_channel_err( + &mut peer_state.closed_channel_monitor_update_ids, + &mut peer_state.in_flight_monitor_updates, + err, + &mut chan, + ).1); }, None => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id)) }; @@ -11116,7 +11162,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let reason = ClosureReason::CounterpartyCoopClosedUnfundedChannel; let err = ChannelError::Close((reason.to_string(), reason)); let mut chan = chan_entry.remove(); - let (_, mut e) = convert_channel_err!(self, peer_state, err, &mut chan); + let (_, mut e) = self.convert_channel_err( + &mut peer_state.closed_channel_monitor_update_ids, + &mut peer_state.in_flight_monitor_updates, + err, + &mut chan, + ); e.dont_send_error_message(); return Err(e); }, @@ -12272,7 +12323,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }; let err = ChannelError::Close((reason.to_string(), reason)); let mut chan = chan_entry.remove(); - let (_, e) = convert_channel_err!(self, peer_state, err, &mut chan); + let (_, e) = self.convert_channel_err( + &mut peer_state.closed_channel_monitor_update_ids, + &mut peer_state.in_flight_monitor_updates, + err, + &mut chan, + ); failed_channels.push((Err(e), counterparty_node_id)); } } @@ -12288,7 +12344,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let reason = ClosureReason::CommitmentTxConfirmed; let err = ChannelError::Close((reason.to_string(), reason)); let mut chan = chan_entry.remove(); - let (_, e) = convert_channel_err!(self, peer_state, err, &mut chan); + let (_, e) = self.convert_channel_err( + &mut peer_state.closed_channel_monitor_update_ids, + &mut peer_state.in_flight_monitor_updates, + err, + &mut chan, + ); failed_channels.push((Err(e), counterparty_node_id)); } } @@ -12485,7 +12546,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ _ => match unblock_chan(chan, &mut peer_state.pending_msg_events) { Ok(shutdown_result) => shutdown_result, Err(err) => { - let (_, err) = convert_channel_err!(self, peer_state, err, chan); + let (_, err) = self.convert_channel_err( + &mut peer_state.closed_channel_monitor_update_ids, + &mut peer_state.in_flight_monitor_updates, + err, + chan, + ); shutdown_results.push((Err(err), *cp_id)); return false; }, @@ -13930,7 +13996,12 @@ where // Clean up for removal. let reason = ClosureReason::DisconnectedPeer; let err = ChannelError::Close((reason.to_string(), reason)); - let (_, e) = convert_channel_err!(self, peer_state, err, chan); + let (_, e) = self.convert_channel_err( + &mut peer_state.closed_channel_monitor_update_ids, + &mut peer_state.in_flight_monitor_updates, + err, + chan, + ); failed_channels.push((Err(e), counterparty_node_id)); false }); From 7fe270b069b74d8cde53bf07f22bd5dd22b385a1 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 11 Dec 2025 10:28:49 +0100 Subject: [PATCH 034/242] Remove rustfmt::skip from touched methods --- lightning/src/ln/channelmanager.rs | 8 -------- 1 file changed, 8 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 5fd17648034..c78a469100e 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -4364,7 +4364,6 @@ where .collect() } - #[rustfmt::skip] fn close_channel_internal(&self, chan_id: &ChannelId, counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option, override_shutdown_script: Option) -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); @@ -4558,7 +4557,6 @@ where /// channel-closing action, /// (b) [`ChannelManager::handle_error`] needs to be called without holding any locks (except /// [`ChannelManager::total_consistency_lock`]), which then calls this. - #[rustfmt::skip] fn finish_close_channel(&self, mut shutdown_res: ShutdownResult) { debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread); #[cfg(debug_assertions)] @@ -4668,7 +4666,6 @@ where /// `peer_msg` should be set when we receive a message from a peer, but not set when the /// user closes, which will be re-exposed as the `ChannelClosed` reason. - #[rustfmt::skip] fn force_close_channel_with_peer(&self, channel_id: &ChannelId, peer_node_id: &PublicKey, reason: ClosureReason) -> Result<(), APIError> { let per_peer_state = self.per_peer_state.read().unwrap(); @@ -6354,7 +6351,6 @@ where self.batch_funding_transaction_generated_intern(temporary_channels, funding_type) } - #[rustfmt::skip] fn batch_funding_transaction_generated_intern(&self, temporary_channels: &[(&ChannelId, &PublicKey)], funding: FundingType) -> Result<(), APIError> { let mut result = Ok(()); if let FundingType::Checked(funding_transaction) | @@ -10490,7 +10486,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ Ok(()) } - #[rustfmt::skip] fn internal_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> { let best_block = *self.best_block.read().unwrap(); @@ -12443,7 +12438,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ /// attempted in every channel, or in the specifically provided channel. /// /// [`ChannelSigner`]: crate::sign::ChannelSigner - #[rustfmt::skip] pub fn signer_unblocked(&self, channel_opt: Option<(PublicKey, ChannelId)>) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); @@ -12588,7 +12582,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ /// Check whether any channels have finished removing all pending updates after a shutdown /// exchange and can now send a closing_signed. /// Returns whether any closing_signed messages were generated. - #[rustfmt::skip] fn maybe_generate_initial_closing_signed(&self) -> bool { let mut handle_errors: Vec<(PublicKey, Result<(), _>)> = Vec::new(); let mut has_update = false; @@ -14580,7 +14573,6 @@ where /// Calls a function which handles an on-chain event (blocks dis/connected, transactions /// un/confirmed, etc) on each channel, handling any resulting errors or messages generated by /// the function. - #[rustfmt::skip] fn do_chain_event) -> Result<(Option, Vec<(HTLCSource, PaymentHash)>, Option), ClosureReason>> (&self, height_opt: Option, f: FN) { // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called From d436cbf5e11289c41d59fc0eb3d5c6e4e54b5179 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 11 Dec 2025 10:30:50 +0100 Subject: [PATCH 035/242] Rustfmt touched methods --- lightning/src/ln/channelmanager.rs | 557 ++++++++++++++++++----------- 1 file changed, 353 insertions(+), 204 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index c78a469100e..4a2ebf730f5 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -4364,7 +4364,11 @@ where .collect() } - fn close_channel_internal(&self, chan_id: &ChannelId, counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option, override_shutdown_script: Option) -> Result<(), APIError> { + fn close_channel_internal( + &self, chan_id: &ChannelId, counterparty_node_id: &PublicKey, + target_feerate_sats_per_1000_weight: Option, + override_shutdown_script: Option, + ) -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)> = Vec::new(); @@ -4390,8 +4394,12 @@ where if let Some(chan) = chan_entry.get_mut().as_funded_mut() { let funding_txo_opt = chan.funding.get_funding_txo(); let their_features = &peer_state.latest_features; - let (shutdown_msg, mut monitor_update_opt, htlcs) = - chan.get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?; + let (shutdown_msg, mut monitor_update_opt, htlcs) = chan.get_shutdown( + &self.signer_provider, + their_features, + target_feerate_sats_per_1000_weight, + override_shutdown_script, + )?; failed_htlcs = htlcs; // We can send the `shutdown` message before updating the `ChannelMonitor` @@ -4402,13 +4410,22 @@ where msg: shutdown_msg, }); - debug_assert!(monitor_update_opt.is_none() || !chan.is_shutdown(), - "We can't both complete shutdown and generate a monitor update"); + debug_assert!( + monitor_update_opt.is_none() || !chan.is_shutdown(), + "We can't both complete shutdown and generate a monitor update" + ); // Update the monitor with the shutdown script if necessary. if let Some(monitor_update) = monitor_update_opt.take() { - handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update, - peer_state_lock, peer_state, per_peer_state, chan); + handle_new_monitor_update!( + self, + funding_txo_opt.unwrap(), + monitor_update, + peer_state_lock, + peer_state, + per_peer_state, + chan + ); } } else { let reason = ClosureReason::LocallyCoopClosedUnfundedChannel; @@ -4430,7 +4447,7 @@ where err: format!( "Channel with id {} not found for the passed counterparty node_id {}", chan_id, counterparty_node_id, - ) + ), }); }, } @@ -4439,7 +4456,10 @@ where for htlc_source in failed_htlcs.drain(..) { let failure_reason = LocalHTLCFailureReason::ChannelClosed; let reason = HTLCFailReason::from_failure_code(failure_reason); - let receiver = HTLCHandlingFailureType::Forward { node_id: Some(*counterparty_node_id), channel_id: *chan_id }; + let receiver = HTLCHandlingFailureType::Forward { + node_id: Some(*counterparty_node_id), + channel_id: *chan_id, + }; let (source, hash) = htlc_source; self.fail_htlc_backwards_internal(&source, &hash, &reason, receiver, None); } @@ -4565,21 +4585,36 @@ where } let logger = WithContext::from( - &self.logger, Some(shutdown_res.counterparty_node_id), Some(shutdown_res.channel_id), None + &self.logger, + Some(shutdown_res.counterparty_node_id), + Some(shutdown_res.channel_id), + None, ); - log_debug!(logger, "Finishing closure of channel due to {} with {} HTLCs to fail", - shutdown_res.closure_reason, shutdown_res.dropped_outbound_htlcs.len()); + log_debug!( + logger, + "Finishing closure of channel due to {} with {} HTLCs to fail", + shutdown_res.closure_reason, + shutdown_res.dropped_outbound_htlcs.len() + ); for htlc_source in shutdown_res.dropped_outbound_htlcs.drain(..) { let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source; let failure_reason = LocalHTLCFailureReason::ChannelClosed; let reason = HTLCFailReason::from_failure_code(failure_reason); - let receiver = HTLCHandlingFailureType::Forward { node_id: Some(counterparty_node_id), channel_id }; + let receiver = HTLCHandlingFailureType::Forward { + node_id: Some(counterparty_node_id), + channel_id, + }; self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver, None); } if let Some((_, funding_txo, _channel_id, monitor_update)) = shutdown_res.monitor_update { debug_assert!(false, "This should have been handled in `convert_channel_err`"); - self.apply_post_close_monitor_update(shutdown_res.counterparty_node_id, shutdown_res.channel_id, funding_txo, monitor_update); + self.apply_post_close_monitor_update( + shutdown_res.counterparty_node_id, + shutdown_res.channel_id, + funding_txo, + monitor_update, + ); } if self.background_events_processed_since_startup.load(Ordering::Acquire) { // If a `ChannelMonitorUpdate` was applied (i.e. any time we have a funding txo and are @@ -4588,7 +4623,11 @@ where // TODO: If we do the `in_flight_monitor_updates.is_empty()` check in // `convert_channel_err` we can skip the locks here. if shutdown_res.channel_funding_txo.is_some() { - self.channel_monitor_updated(&shutdown_res.channel_id, None, &shutdown_res.counterparty_node_id); + self.channel_monitor_updated( + &shutdown_res.channel_id, + None, + &shutdown_res.counterparty_node_id, + ); } } let mut shutdown_results: Vec<(Result, _)> = Vec::new(); @@ -4613,7 +4652,8 @@ where shutdown_results.push((Err(e), counterparty_node_id)); } } - has_uncompleted_channel = Some(has_uncompleted_channel.map_or(!state, |v| v || !state)); + has_uncompleted_channel = + Some(has_uncompleted_channel.map_or(!state, |v| v || !state)); } debug_assert!( has_uncompleted_channel.unwrap_or(true), @@ -4623,26 +4663,32 @@ where { let mut pending_events = self.pending_events.lock().unwrap(); - pending_events.push_back((events::Event::ChannelClosed { - channel_id: shutdown_res.channel_id, - user_channel_id: shutdown_res.user_channel_id, - reason: shutdown_res.closure_reason, - counterparty_node_id: Some(shutdown_res.counterparty_node_id), - channel_capacity_sats: Some(shutdown_res.channel_capacity_satoshis), - channel_funding_txo: shutdown_res.channel_funding_txo, - last_local_balance_msat: Some(shutdown_res.last_local_balance_msat), - }, None)); - - if let Some(splice_funding_failed) = shutdown_res.splice_funding_failed.take() { - pending_events.push_back((events::Event::SpliceFailed { + pending_events.push_back(( + events::Event::ChannelClosed { channel_id: shutdown_res.channel_id, - counterparty_node_id: shutdown_res.counterparty_node_id, user_channel_id: shutdown_res.user_channel_id, - abandoned_funding_txo: splice_funding_failed.funding_txo, - channel_type: splice_funding_failed.channel_type, - contributed_inputs: splice_funding_failed.contributed_inputs, - contributed_outputs: splice_funding_failed.contributed_outputs, - }, None)); + reason: shutdown_res.closure_reason, + counterparty_node_id: Some(shutdown_res.counterparty_node_id), + channel_capacity_sats: Some(shutdown_res.channel_capacity_satoshis), + channel_funding_txo: shutdown_res.channel_funding_txo, + last_local_balance_msat: Some(shutdown_res.last_local_balance_msat), + }, + None, + )); + + if let Some(splice_funding_failed) = shutdown_res.splice_funding_failed.take() { + pending_events.push_back(( + events::Event::SpliceFailed { + channel_id: shutdown_res.channel_id, + counterparty_node_id: shutdown_res.counterparty_node_id, + user_channel_id: shutdown_res.user_channel_id, + abandoned_funding_txo: splice_funding_failed.funding_txo, + channel_type: splice_funding_failed.channel_type, + contributed_inputs: splice_funding_failed.contributed_inputs, + contributed_outputs: splice_funding_failed.contributed_outputs, + }, + None, + )); } if let Some(transaction) = shutdown_res.unbroadcasted_funding_tx { @@ -4652,11 +4698,15 @@ where .expect("We had an unbroadcasted funding tx, so should also have had a funding outpoint"), } } else { - FundingInfo::Tx{ transaction } + FundingInfo::Tx { transaction } }; - pending_events.push_back((events::Event::DiscardFunding { - channel_id: shutdown_res.channel_id, funding_info - }, None)); + pending_events.push_back(( + events::Event::DiscardFunding { + channel_id: shutdown_res.channel_id, + funding_info, + }, + None, + )); } } for (err, counterparty_node_id) in shutdown_results.drain(..) { @@ -4666,11 +4716,17 @@ where /// `peer_msg` should be set when we receive a message from a peer, but not set when the /// user closes, which will be re-exposed as the `ChannelClosed` reason. - fn force_close_channel_with_peer(&self, channel_id: &ChannelId, peer_node_id: &PublicKey, reason: ClosureReason) - -> Result<(), APIError> { + fn force_close_channel_with_peer( + &self, channel_id: &ChannelId, peer_node_id: &PublicKey, reason: ClosureReason, + ) -> Result<(), APIError> { let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = per_peer_state.get(peer_node_id) - .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", peer_node_id) })?; + let peer_state_mutex = + per_peer_state.get(peer_node_id).ok_or_else(|| APIError::ChannelUnavailable { + err: format!( + "Can't find a peer matching the passed counterparty node_id {}", + peer_node_id + ), + })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; let logger = WithContext::from(&self.logger, Some(*peer_node_id), Some(*channel_id), None); @@ -4702,21 +4758,24 @@ where } else if peer_state.inbound_channel_request_by_id.remove(channel_id).is_some() { log_error!(logger, "Force-closing inbound channel request"); if !is_from_counterparty && peer_state.is_connected { - peer_state.pending_msg_events.push( - MessageSendEvent::HandleError { - node_id: *peer_node_id, - action: msgs::ErrorAction::SendErrorMessage { - msg: msgs::ErrorMessage { channel_id: *channel_id, data: message } - }, - } - ); + peer_state.pending_msg_events.push(MessageSendEvent::HandleError { + node_id: *peer_node_id, + action: msgs::ErrorAction::SendErrorMessage { + msg: msgs::ErrorMessage { channel_id: *channel_id, data: message }, + }, + }); } // N.B. that we don't send any channel close event here: we // don't have a user_channel_id, and we never sent any opening // events anyway. Ok(()) } else { - Err(APIError::ChannelUnavailable{ err: format!("Channel with id {} not found for the passed counterparty node_id {}", channel_id, peer_node_id) }) + Err(APIError::ChannelUnavailable { + err: format!( + "Channel with id {} not found for the passed counterparty node_id {}", + channel_id, peer_node_id + ), + }) } } @@ -6351,16 +6410,20 @@ where self.batch_funding_transaction_generated_intern(temporary_channels, funding_type) } - fn batch_funding_transaction_generated_intern(&self, temporary_channels: &[(&ChannelId, &PublicKey)], funding: FundingType) -> Result<(), APIError> { + fn batch_funding_transaction_generated_intern( + &self, temporary_channels: &[(&ChannelId, &PublicKey)], funding: FundingType, + ) -> Result<(), APIError> { let mut result = Ok(()); - if let FundingType::Checked(funding_transaction) | - FundingType::CheckedManualBroadcast(funding_transaction) = &funding + if let FundingType::Checked(funding_transaction) + | FundingType::CheckedManualBroadcast(funding_transaction) = &funding { if !funding_transaction.is_coinbase() { for inp in funding_transaction.input.iter() { if inp.witness.is_empty() { result = result.and(Err(APIError::APIMisuseError { - err: "Funding transaction must be fully signed and spend Segwit outputs".to_owned() + err: + "Funding transaction must be fully signed and spend Segwit outputs" + .to_owned(), })); } } @@ -6368,7 +6431,8 @@ where if funding_transaction.output.len() > u16::max_value() as usize { result = result.and(Err(APIError::APIMisuseError { - err: "Transaction had more than 2^16 outputs, which is not supported".to_owned() + err: "Transaction had more than 2^16 outputs, which is not supported" + .to_owned(), })); } let height = self.best_block.read().unwrap().height; @@ -6376,97 +6440,109 @@ where // lower than the next block height. However, the modules constituting our Lightning // node might not have perfect sync about their blockchain views. Thus, if the wallet // module is ahead of LDK, only allow one more block of headroom. - if !funding_transaction.input.iter().all(|input| input.sequence == Sequence::MAX) && - funding_transaction.lock_time.is_block_height() && - funding_transaction.lock_time.to_consensus_u32() > height + 1 + if !funding_transaction.input.iter().all(|input| input.sequence == Sequence::MAX) + && funding_transaction.lock_time.is_block_height() + && funding_transaction.lock_time.to_consensus_u32() > height + 1 { result = result.and(Err(APIError::APIMisuseError { - err: "Funding transaction absolute timelock is non-final".to_owned() + err: "Funding transaction absolute timelock is non-final".to_owned(), })); } } let txid = funding.txid(); let is_batch_funding = temporary_channels.len() > 1; - let mut funding_batch_states = if is_batch_funding { - Some(self.funding_batch_states.lock().unwrap()) - } else { - None - }; - let mut funding_batch_state = funding_batch_states.as_mut().and_then(|states| { - match states.entry(txid) { - btree_map::Entry::Occupied(_) => { - result = result.clone().and(Err(APIError::APIMisuseError { - err: "Batch funding transaction with the same txid already exists".to_owned() - })); - None - }, - btree_map::Entry::Vacant(vacant) => Some(vacant.insert(Vec::new())), - } + let mut funding_batch_states = + if is_batch_funding { Some(self.funding_batch_states.lock().unwrap()) } else { None }; + let mut funding_batch_state = funding_batch_states.as_mut().and_then(|states| match states + .entry(txid) + { + btree_map::Entry::Occupied(_) => { + result = result.clone().and(Err(APIError::APIMisuseError { + err: "Batch funding transaction with the same txid already exists".to_owned(), + })); + None + }, + btree_map::Entry::Vacant(vacant) => Some(vacant.insert(Vec::new())), }); let is_manual_broadcast = funding.is_manual_broadcast(); for &(temporary_channel_id, counterparty_node_id) in temporary_channels { - result = result.and_then(|_| self.funding_transaction_generated_intern( - *temporary_channel_id, - *counterparty_node_id, - funding.transaction_or_dummy(), - is_batch_funding, - |chan| { - let mut output_index = None; - let expected_spk = chan.funding.get_funding_redeemscript().to_p2wsh(); - let outpoint = match &funding { - FundingType::Checked(tx) | FundingType::CheckedManualBroadcast(tx) => { - for (idx, outp) in tx.output.iter().enumerate() { - if outp.script_pubkey == expected_spk && outp.value.to_sat() == chan.funding.get_value_satoshis() { - if output_index.is_some() { - return Err("Multiple outputs matched the expected script and value"); + result = result.and_then(|_| { + self.funding_transaction_generated_intern( + *temporary_channel_id, + *counterparty_node_id, + funding.transaction_or_dummy(), + is_batch_funding, + |chan| { + let mut output_index = None; + let expected_spk = chan.funding.get_funding_redeemscript().to_p2wsh(); + let outpoint = match &funding { + FundingType::Checked(tx) | FundingType::CheckedManualBroadcast(tx) => { + for (idx, outp) in tx.output.iter().enumerate() { + if outp.script_pubkey == expected_spk + && outp.value.to_sat() == chan.funding.get_value_satoshis() + { + if output_index.is_some() { + return Err("Multiple outputs matched the expected script and value"); + } + output_index = Some(idx as u16); } - output_index = Some(idx as u16); } - } - if output_index.is_none() { - return Err("No output matched the script_pubkey and value in the FundingGenerationReady event"); - } - OutPoint { txid, index: output_index.unwrap() } - }, - FundingType::Unchecked(outpoint) => outpoint.clone(), - }; - if let Some(funding_batch_state) = funding_batch_state.as_mut() { - // TODO(dual_funding): We only do batch funding for V1 channels at the moment, but we'll probably - // need to fix this somehow to not rely on using the outpoint for the channel ID if we - // want to support V2 batching here as well. - funding_batch_state.push((ChannelId::v1_from_funding_outpoint(outpoint), *counterparty_node_id, false)); - } - Ok(outpoint) - }, - is_manual_broadcast) - ); + if output_index.is_none() { + return Err("No output matched the script_pubkey and value in the FundingGenerationReady event"); + } + OutPoint { txid, index: output_index.unwrap() } + }, + FundingType::Unchecked(outpoint) => outpoint.clone(), + }; + if let Some(funding_batch_state) = funding_batch_state.as_mut() { + // TODO(dual_funding): We only do batch funding for V1 channels at the moment, but we'll probably + // need to fix this somehow to not rely on using the outpoint for the channel ID if we + // want to support V2 batching here as well. + funding_batch_state.push(( + ChannelId::v1_from_funding_outpoint(outpoint), + *counterparty_node_id, + false, + )); + } + Ok(outpoint) + }, + is_manual_broadcast, + ) + }); } if let Err(ref e) = result { // Remaining channels need to be removed on any error. let e = format!("Error in transaction funding: {:?}", e); let mut channels_to_remove = Vec::new(); - channels_to_remove.extend(funding_batch_states.as_mut() - .and_then(|states| states.remove(&txid)) - .into_iter().flatten() - .map(|(chan_id, node_id, _state)| (chan_id, node_id)) - ); - channels_to_remove.extend(temporary_channels.iter() - .map(|(&chan_id, &node_id)| (chan_id, node_id)) + channels_to_remove.extend( + funding_batch_states + .as_mut() + .and_then(|states| states.remove(&txid)) + .into_iter() + .flatten() + .map(|(chan_id, node_id, _state)| (chan_id, node_id)), ); + channels_to_remove + .extend(temporary_channels.iter().map(|(&chan_id, &node_id)| (chan_id, node_id))); let mut shutdown_results: Vec<(Result, _)> = Vec::new(); { let per_peer_state = self.per_peer_state.read().unwrap(); for (channel_id, counterparty_node_id) in channels_to_remove { - per_peer_state.get(&counterparty_node_id) + per_peer_state + .get(&counterparty_node_id) .map(|peer_state_mutex| peer_state_mutex.lock().unwrap()) - .and_then(|mut peer_state| peer_state.channel_by_id.remove(&channel_id).map(|chan| (chan, peer_state))) + .and_then(|mut peer_state| { + peer_state + .channel_by_id + .remove(&channel_id) + .map(|chan| (chan, peer_state)) + }) .map(|(mut chan, mut peer_state_lock)| { let reason = ClosureReason::ProcessingError { err: e.clone() }; let err = ChannelError::Close((e.clone(), reason)); let peer_state = &mut *peer_state_lock; - let (_, e) = - self.convert_channel_err( + let (_, e) = self.convert_channel_err( &mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, err, @@ -10486,7 +10562,9 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ Ok(()) } - fn internal_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> { + fn internal_funding_created( + &self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated, + ) -> Result<(), MsgHandleErrInternal> { let best_block = *self.best_block.read().unwrap(); let per_peer_state = self.per_peer_state.read().unwrap(); @@ -10536,16 +10614,18 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let funded_channel_id = chan.context.channel_id(); - macro_rules! fail_chan { ($err: expr) => { { - // Note that at this point we've filled in the funding outpoint on our channel, but its - // actually in conflict with another channel. Thus, if we call `convert_channel_err` - // immediately, we'll remove the existing channel from `outpoint_to_peer`. - // Thus, we must first unset the funding outpoint on the channel. - let err = ChannelError::close($err.to_owned()); - chan.unset_funding_info(); - let mut chan = Channel::from(chan); - return Err(self.convert_unfunded_channel_err_internal(err, &mut chan).1); - } } } + macro_rules! fail_chan { + ($err: expr) => {{ + // Note that at this point we've filled in the funding outpoint on our channel, but its + // actually in conflict with another channel. Thus, if we call `convert_channel_err` + // immediately, we'll remove the existing channel from `outpoint_to_peer`. + // Thus, we must first unset the funding outpoint on the channel. + let err = ChannelError::close($err.to_owned()); + chan.unset_funding_info(); + let mut chan = Channel::from(chan); + return Err(self.convert_unfunded_channel_err_internal(err, &mut chan).1); + }}; + } match peer_state.channel_by_id.entry(funded_channel_id) { hash_map::Entry::Occupied(_) => { @@ -10566,8 +10646,14 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } if let Some(funded_chan) = e.insert(Channel::from(chan)).as_funded_mut() { - handle_initial_monitor!(self, persist_state, peer_state_lock, peer_state, - per_peer_state, funded_chan); + handle_initial_monitor!( + self, + persist_state, + peer_state_lock, + peer_state, + per_peer_state, + funded_chan + ); } else { unreachable!("This must be a funded channel as we just inserted it."); } @@ -10577,7 +10663,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ log_error!(logger, "Persisting initial ChannelMonitor failed, implying the channel ID was duplicated"); fail_chan!("Duplicate channel ID"); } - } + }, } } @@ -12442,48 +12528,46 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); // Returns whether we should remove this channel as it's just been closed. - let unblock_chan = |chan: &mut Channel, pending_msg_events: &mut Vec| -> Result, ChannelError> { + let unblock_chan = |chan: &mut Channel, + pending_msg_events: &mut Vec| + -> Result, ChannelError> { let channel_id = chan.context().channel_id(); let outbound_scid_alias = chan.context().outbound_scid_alias(); let logger = WithChannelContext::from(&self.logger, &chan.context(), None); let node_id = chan.context().get_counterparty_node_id(); - let cbp = |htlc_id| self.path_for_release_held_htlc(htlc_id, outbound_scid_alias, &channel_id, &node_id); + let cbp = |htlc_id| { + self.path_for_release_held_htlc(htlc_id, outbound_scid_alias, &channel_id, &node_id) + }; let msgs = chan.signer_maybe_unblocked(self.chain_hash, &&logger, cbp)?; if let Some(msgs) = msgs { if chan.context().is_connected() { if let Some(msg) = msgs.open_channel { - pending_msg_events.push(MessageSendEvent::SendOpenChannel { - node_id, - msg, - }); + pending_msg_events.push(MessageSendEvent::SendOpenChannel { node_id, msg }); } if let Some(msg) = msgs.funding_created { - pending_msg_events.push(MessageSendEvent::SendFundingCreated { - node_id, - msg, - }); + pending_msg_events + .push(MessageSendEvent::SendFundingCreated { node_id, msg }); } if let Some(msg) = msgs.accept_channel { - pending_msg_events.push(MessageSendEvent::SendAcceptChannel { - node_id, - msg, - }); + pending_msg_events + .push(MessageSendEvent::SendAcceptChannel { node_id, msg }); } - let cu_msg = msgs.commitment_update.map(|updates| MessageSendEvent::UpdateHTLCs { - node_id, - channel_id, - updates, - }); - let raa_msg = msgs.revoke_and_ack.map(|msg| MessageSendEvent::SendRevokeAndACK { - node_id, - msg, + let cu_msg = msgs.commitment_update.map(|updates| { + MessageSendEvent::UpdateHTLCs { node_id, channel_id, updates } }); + let raa_msg = msgs + .revoke_and_ack + .map(|msg| MessageSendEvent::SendRevokeAndACK { node_id, msg }); match (cu_msg, raa_msg) { - (Some(cu), Some(raa)) if msgs.order == RAACommitmentOrder::CommitmentFirst => { + (Some(cu), Some(raa)) + if msgs.order == RAACommitmentOrder::CommitmentFirst => + { pending_msg_events.push(cu); pending_msg_events.push(raa); }, - (Some(cu), Some(raa)) if msgs.order == RAACommitmentOrder::RevokeAndACKFirst => { + (Some(cu), Some(raa)) + if msgs.order == RAACommitmentOrder::RevokeAndACKFirst => + { pending_msg_events.push(raa); pending_msg_events.push(cu); }, @@ -12492,16 +12576,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ (_, _) => {}, } if let Some(msg) = msgs.funding_signed { - pending_msg_events.push(MessageSendEvent::SendFundingSigned { - node_id, - msg, - }); + pending_msg_events + .push(MessageSendEvent::SendFundingSigned { node_id, msg }); } if let Some(msg) = msgs.closing_signed { - pending_msg_events.push(MessageSendEvent::SendClosingSigned { - node_id, - msg, - }); + pending_msg_events + .push(MessageSendEvent::SendClosingSigned { node_id, msg }); } } if let Some(funded_chan) = chan.as_funded() { @@ -12529,7 +12609,9 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let per_peer_state_iter = per_peer_state.iter().filter(|(cp_id, _)| { if let Some((counterparty_node_id, _)) = channel_opt { **cp_id == counterparty_node_id - } else { true } + } else { + true + } }); for (cp_id, peer_state_mutex) in per_peer_state_iter { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); @@ -12541,7 +12623,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ Ok(shutdown_result) => shutdown_result, Err(err) => { let (_, err) = self.convert_channel_err( - &mut peer_state.closed_channel_monitor_update_ids, + &mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, err, chan, @@ -12556,8 +12638,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let logger = WithChannelContext::from(&self.logger, context, None); log_trace!(logger, "Removing channel now that the signer is unblocked"); let (remove, err) = if let Some(funded) = chan.as_funded_mut() { - let err = - self.convert_channel_err_coop(&mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, shutdown, funded); + let err = self.convert_channel_err_coop( + &mut peer_state.closed_channel_monitor_update_ids, + &mut peer_state.in_flight_monitor_updates, + shutdown, + funded, + ); (true, err) } else { debug_assert!(false); @@ -12598,34 +12684,59 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } match chan.as_funded_mut() { Some(funded_chan) => { - let logger = WithChannelContext::from(&self.logger, &funded_chan.context, None); - match funded_chan.maybe_propose_closing_signed(&self.fee_estimator, &&logger) { + let logger = + WithChannelContext::from(&self.logger, &funded_chan.context, None); + match funded_chan + .maybe_propose_closing_signed(&self.fee_estimator, &&logger) + { Ok((msg_opt, tx_shutdown_result_opt)) => { if let Some(msg) = msg_opt { has_update = true; - pending_msg_events.push(MessageSendEvent::SendClosingSigned { - node_id: funded_chan.context.get_counterparty_node_id(), msg, - }); + pending_msg_events.push( + MessageSendEvent::SendClosingSigned { + node_id: funded_chan + .context + .get_counterparty_node_id(), + msg, + }, + ); } - debug_assert_eq!(tx_shutdown_result_opt.is_some(), funded_chan.is_shutdown()); + debug_assert_eq!( + tx_shutdown_result_opt.is_some(), + funded_chan.is_shutdown() + ); if let Some((tx, shutdown_res)) = tx_shutdown_result_opt { // We're done with this channel. We got a closing_signed and sent back // a closing_signed with a closing transaction to broadcast. - let err = self.convert_channel_err_coop(&mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, shutdown_res, funded_chan); + let err = self.convert_channel_err_coop( + &mut peer_state.closed_channel_monitor_update_ids, + &mut peer_state.in_flight_monitor_updates, + shutdown_res, + funded_chan, + ); handle_errors.push((*cp_id, Err(err))); log_info!(logger, "Broadcasting {}", log_tx!(tx)); self.tx_broadcaster.broadcast_transactions(&[&tx]); false - } else { true } + } else { + true + } }, Err(e) => { has_update = true; let (close_channel, res) = self.convert_channel_err_funded( - &mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, e, funded_chan); - handle_errors.push((funded_chan.context.get_counterparty_node_id(), Err(res))); + &mut peer_state.closed_channel_monitor_update_ids, + &mut peer_state.in_flight_monitor_updates, + e, + funded_chan, + ); + handle_errors.push(( + funded_chan.context.get_counterparty_node_id(), + Err(res), + )); !close_channel - } + }, } }, None => true, // Retain unfunded channels if present. @@ -14573,8 +14684,20 @@ where /// Calls a function which handles an on-chain event (blocks dis/connected, transactions /// un/confirmed, etc) on each channel, handling any resulting errors or messages generated by /// the function. - fn do_chain_event) -> Result<(Option, Vec<(HTLCSource, PaymentHash)>, Option), ClosureReason>> - (&self, height_opt: Option, f: FN) { + fn do_chain_event< + FN: Fn( + &mut FundedChannel, + ) -> Result< + ( + Option, + Vec<(HTLCSource, PaymentHash)>, + Option, + ), + ClosureReason, + >, + >( + &self, height_opt: Option, f: FN, + ) { // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called // during initialization prior to the chain_monitor being fully configured in some cases. // See the docs for `ChannelManagerReadArgs` for more. @@ -14754,22 +14877,34 @@ where } if let Some(height) = height_opt { - self.claimable_payments.lock().unwrap().claimable_payments.retain(|payment_hash, payment| { - payment.htlcs.retain(|htlc| { - // If height is approaching the number of blocks we think it takes us to get - // our commitment transaction confirmed before the HTLC expires, plus the - // number of blocks we generally consider it to take to do a commitment update, - // just give up on it and fail the HTLC. - if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER { - let reason = LocalHTLCFailureReason::PaymentClaimBuffer; - timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), - HTLCFailReason::reason(reason, invalid_payment_err_data(htlc.value, height)), - HTLCHandlingFailureType::Receive { payment_hash: payment_hash.clone() })); - false - } else { true } - }); - !payment.htlcs.is_empty() // Only retain this entry if htlcs has at least one entry. - }); + self.claimable_payments.lock().unwrap().claimable_payments.retain( + |payment_hash, payment| { + payment.htlcs.retain(|htlc| { + // If height is approaching the number of blocks we think it takes us to get + // our commitment transaction confirmed before the HTLC expires, plus the + // number of blocks we generally consider it to take to do a commitment update, + // just give up on it and fail the HTLC. + if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER { + let reason = LocalHTLCFailureReason::PaymentClaimBuffer; + timed_out_htlcs.push(( + HTLCSource::PreviousHopData(htlc.prev_hop.clone()), + payment_hash.clone(), + HTLCFailReason::reason( + reason, + invalid_payment_err_data(htlc.value, height), + ), + HTLCHandlingFailureType::Receive { + payment_hash: payment_hash.clone(), + }, + )); + false + } else { + true + } + }); + !payment.htlcs.is_empty() // Only retain this entry if htlcs has at least one entry. + }, + ); let mut intercepted_htlcs = self.pending_intercepted_htlcs.lock().unwrap(); intercepted_htlcs.retain(|_, htlc| { @@ -14779,15 +14914,29 @@ where PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id, _ => unreachable!(), }; - timed_out_htlcs.push((prev_hop_data, htlc.forward_info.payment_hash, - HTLCFailReason::from_failure_code(LocalHTLCFailureReason::ForwardExpiryBuffer), - HTLCHandlingFailureType::InvalidForward { requested_forward_scid })); + timed_out_htlcs.push(( + prev_hop_data, + htlc.forward_info.payment_hash, + HTLCFailReason::from_failure_code( + LocalHTLCFailureReason::ForwardExpiryBuffer, + ), + HTLCHandlingFailureType::InvalidForward { requested_forward_scid }, + )); let logger = WithContext::from( - &self.logger, None, Some(htlc.prev_channel_id), Some(htlc.forward_info.payment_hash) + &self.logger, + None, + Some(htlc.prev_channel_id), + Some(htlc.forward_info.payment_hash), + ); + log_trace!( + logger, + "Timing out intercepted HTLC with requested forward scid {}", + requested_forward_scid ); - log_trace!(logger, "Timing out intercepted HTLC with requested forward scid {}", requested_forward_scid); false - } else { true } + } else { + true + } }); } From 6ff720b9f9b9fed39a951237a25675295ef50258 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 11 Dec 2025 15:52:25 +0000 Subject: [PATCH 036/242] Allow clippy's new assertions-on-constants lint This is really dumb, `assert!(cfg!(fuzzing))` is a perfectly reasonable thing to write! --- ci/check-lint.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/ci/check-lint.sh b/ci/check-lint.sh index c1f1b08a1e1..c0724267bf8 100755 --- a/ci/check-lint.sh +++ b/ci/check-lint.sh @@ -13,6 +13,7 @@ CLIPPY() { -A clippy::unwrap-or-default \ -A clippy::upper_case_acronyms \ -A clippy::swap-with-temporary \ + -A clippy::assertions-on-constants \ `# Things where we do odd stuff on purpose ` \ -A clippy::unusual_byte_groupings \ -A clippy::unit_arg \ From 3247fad63331df38dd4c514eff29cb5ce34affb6 Mon Sep 17 00:00:00 2001 From: elnosh Date: Thu, 11 Dec 2025 14:06:03 -0500 Subject: [PATCH 037/242] Convert send_channel_ready macro to method --- lightning/src/ln/channelmanager.rs | 51 ++++++++++++++++-------------- 1 file changed, 28 insertions(+), 23 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 87f42a90fa7..bfaf1e68d6a 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3201,24 +3201,6 @@ pub struct PhantomRouteHints { pub real_node_pubkey: PublicKey, } -macro_rules! send_channel_ready { - ($self: ident, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => {{ - if $channel.context.is_connected() { - $pending_msg_events.push(MessageSendEvent::SendChannelReady { - node_id: $channel.context.get_counterparty_node_id(), - msg: $channel_ready_msg, - }); - } - // Note that we may send a `channel_ready` multiple times for a channel if we reconnect, so - // we allow collisions, but we shouldn't ever be updating the channel ID pointed to. - let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap(); - let outbound_alias_insert = short_to_chan_info.insert($channel.context.outbound_scid_alias(), ($channel.context.get_counterparty_node_id(), $channel.context.channel_id())); - assert!(outbound_alias_insert.is_none() || outbound_alias_insert.unwrap() == ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()), - "SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels"); - insert_short_channel_id!(short_to_chan_info, $channel); - }} -} - macro_rules! insert_short_channel_id { ($short_to_chan_info: ident, $channel: expr) => {{ if let Some(real_scid) = $channel.funding.get_short_channel_id() { @@ -4091,6 +4073,29 @@ where } } + fn send_channel_ready( + &self, pending_msg_events: &mut Vec, channel: &FundedChannel, + channel_ready_msg: msgs::ChannelReady, + ) { + let counterparty_node_id = channel.context.get_counterparty_node_id(); + if channel.context.is_connected() { + pending_msg_events.push(MessageSendEvent::SendChannelReady { + node_id: counterparty_node_id, + msg: channel_ready_msg, + }); + } + // Note that we may send a `channel_ready` multiple times for a channel if we reconnect, so + // we allow collisions, but we shouldn't ever be updating the channel ID pointed to. + let mut short_to_chan_info = self.short_to_chan_info.write().unwrap(); + let outbound_alias_insert = short_to_chan_info.insert( + channel.context.outbound_scid_alias(), + (counterparty_node_id, channel.context.channel_id()), + ); + assert!(outbound_alias_insert.is_none() || outbound_alias_insert.unwrap() == (counterparty_node_id, channel.context.channel_id()), + "SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels"); + insert_short_channel_id!(short_to_chan_info, channel); + } + /// Gets the current [`UserConfig`] which controls some global behavior and includes the /// default configuration applied to all new channels. pub fn get_current_config(&self) -> UserConfig { @@ -9832,7 +9837,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if channel.context.is_connected() { if let ChannelReadyOrder::ChannelReadyFirst = channel_ready_order { if let Some(msg) = &channel_ready { - send_channel_ready!(self, pending_msg_events, channel, msg.clone()); + self.send_channel_ready(pending_msg_events, channel, msg.clone()); } if let Some(msg) = &announcement_sigs { @@ -9887,7 +9892,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let ChannelReadyOrder::SignaturesFirst = channel_ready_order { if let Some(msg) = channel_ready { - send_channel_ready!(self, pending_msg_events, channel, msg); + self.send_channel_ready(pending_msg_events, channel, msg); } if let Some(msg) = announcement_sigs { @@ -9898,7 +9903,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } } else if let Some(msg) = channel_ready { - send_channel_ready!(self, pending_msg_events, channel, msg); + self.send_channel_ready(pending_msg_events, channel, msg); } if let Some(tx) = funding_broadcastable { @@ -12598,7 +12603,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } if let Some(funded_chan) = chan.as_funded() { if let Some(msg) = msgs.channel_ready { - send_channel_ready!(self, pending_msg_events, funded_chan, msg); + self.send_channel_ready(pending_msg_events, funded_chan, msg); } if let Some(broadcast_tx) = msgs.signed_closing_tx { log_info!(logger, "Broadcasting closing tx {}", log_tx!(broadcast_tx)); @@ -14740,7 +14745,7 @@ where let logger = WithChannelContext::from(&self.logger, &funded_channel.context, None); match funding_confirmed_opt { Some(FundingConfirmedMessage::Establishment(channel_ready)) => { - send_channel_ready!(self, pending_msg_events, funded_channel, channel_ready); + self.send_channel_ready(pending_msg_events, funded_channel, channel_ready); if funded_channel.context.is_usable() && peer_state.is_connected { log_trace!(logger, "Sending channel_ready with private initial channel_update for our counterparty"); if let Ok((msg, _, _)) = self.get_channel_update_for_unicast(funded_channel) { From 7fb84e66d8c18898c9081e198abc0f74052b072a Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Fri, 12 Dec 2025 08:32:14 +0100 Subject: [PATCH 038/242] Group channel closure methods together This commit is a pure move. --- lightning/src/ln/channelmanager.rs | 460 ++++++++++++++--------------- 1 file changed, 230 insertions(+), 230 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index bfaf1e68d6a..af2408726ce 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3843,236 +3843,6 @@ where } } - /// Handles an error by closing the channel if required and generating peer messages. - fn handle_error( - &self, internal: Result, counterparty_node_id: PublicKey, - ) -> Result { - // In testing, ensure there are no deadlocks where the lock is already held upon - // entering the macro. - debug_assert_ne!(self.pending_events.held_by_thread(), LockHeldState::HeldByThread); - debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread); - - internal.map_err(|err_internal| { - let mut msg_event = None; - - if let Some((shutdown_res, update_option)) = err_internal.shutdown_finish { - let counterparty_node_id = shutdown_res.counterparty_node_id; - let channel_id = shutdown_res.channel_id; - let logger = WithContext::from( - &self.logger, - Some(counterparty_node_id), - Some(channel_id), - None, - ); - log_error!(logger, "Closing channel: {}", err_internal.err.err); - - self.finish_close_channel(shutdown_res); - if let Some((update, node_id_1, node_id_2)) = update_option { - let mut pending_broadcast_messages = - self.pending_broadcast_messages.lock().unwrap(); - pending_broadcast_messages.push(MessageSendEvent::BroadcastChannelUpdate { - msg: update, - node_id_1, - node_id_2, - }); - } - } else { - log_error!(self.logger, "Got non-closing error: {}", err_internal.err.err); - } - - if let msgs::ErrorAction::IgnoreError = err_internal.err.action { - if let Some(tx_abort) = err_internal.tx_abort { - msg_event = Some(MessageSendEvent::SendTxAbort { - node_id: counterparty_node_id, - msg: tx_abort, - }); - } - } else { - msg_event = Some(MessageSendEvent::HandleError { - node_id: counterparty_node_id, - action: err_internal.err.action.clone(), - }); - } - - if let Some(msg_event) = msg_event { - let per_peer_state = self.per_peer_state.read().unwrap(); - if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { - let mut peer_state = peer_state_mutex.lock().unwrap(); - if peer_state.is_connected { - peer_state.pending_msg_events.push(msg_event); - } - } - } - - // Return error in case higher-API need one - err_internal.err - }) - } - - fn convert_funded_channel_err_internal( - &self, closed_channel_monitor_update_ids: &mut BTreeMap, - in_flight_monitor_updates: &mut BTreeMap)>, - coop_close_shutdown_res: Option, err: ChannelError, - chan: &mut FundedChannel, - ) -> (bool, MsgHandleErrInternal) { - let chan_id = chan.context.channel_id(); - convert_channel_err_internal(err, chan_id, |reason, msg| { - let logger = WithChannelContext::from(&self.logger, &chan.context, None); - - let mut shutdown_res = if let Some(res) = coop_close_shutdown_res { - res - } else { - chan.force_shutdown(reason) - }; - let chan_update = self.get_channel_update_for_broadcast(chan).ok(); - - log_error!(logger, "Closed channel due to close-required error: {}", msg); - - if let Some((_, funding_txo, _, update)) = shutdown_res.monitor_update.take() { - handle_new_monitor_update_locked_actions_handled_by_caller!( - self, - funding_txo, - update, - in_flight_monitor_updates, - chan.context - ); - } - // If there's a possibility that we need to generate further monitor updates for this - // channel, we need to store the last update_id of it. However, we don't want to insert - // into the map (which prevents the `PeerState` from being cleaned up) for channels that - // never even got confirmations (which would open us up to DoS attacks). - let update_id = chan.context.get_latest_monitor_update_id(); - let funding_confirmed = chan.funding.get_funding_tx_confirmation_height().is_some(); - let chan_zero_conf = chan.context.minimum_depth(&chan.funding) == Some(0); - if funding_confirmed || chan_zero_conf || update_id > 1 { - closed_channel_monitor_update_ids.insert(chan_id, update_id); - } - let mut short_to_chan_info = self.short_to_chan_info.write().unwrap(); - if let Some(short_id) = chan.funding.get_short_channel_id() { - short_to_chan_info.remove(&short_id); - } else { - // If the channel was never confirmed on-chain prior to its closure, remove the - // outbound SCID alias we used for it from the collision-prevention set. While we - // generally want to avoid ever re-using an outbound SCID alias across all channels, we - // also don't want a counterparty to be able to trivially cause a memory leak by simply - // opening a million channels with us which are closed before we ever reach the funding - // stage. - let outbound_alias = chan.context.outbound_scid_alias(); - let alias_removed = - self.outbound_scid_aliases.lock().unwrap().remove(&outbound_alias); - debug_assert!(alias_removed); - } - short_to_chan_info.remove(&chan.context.outbound_scid_alias()); - for scid in chan.context.historical_scids() { - short_to_chan_info.remove(scid); - } - - (shutdown_res, chan_update) - }) - } - - fn convert_unfunded_channel_err_internal( - &self, err: ChannelError, chan: &mut Channel, - ) -> (bool, MsgHandleErrInternal) - where - SP::Target: SignerProvider, - { - let chan_id = chan.context().channel_id(); - convert_channel_err_internal(err, chan_id, |reason, msg| { - let logger = WithChannelContext::from(&self.logger, chan.context(), None); - - let shutdown_res = chan.force_shutdown(reason); - log_error!(logger, "Closed channel due to close-required error: {}", msg); - self.short_to_chan_info.write().unwrap().remove(&chan.context().outbound_scid_alias()); - // If the channel was never confirmed on-chain prior to its closure, remove the - // outbound SCID alias we used for it from the collision-prevention set. While we - // generally want to avoid ever re-using an outbound SCID alias across all channels, we - // also don't want a counterparty to be able to trivially cause a memory leak by simply - // opening a million channels with us which are closed before we ever reach the funding - // stage. - let outbound_alias = chan.context().outbound_scid_alias(); - let alias_removed = self.outbound_scid_aliases.lock().unwrap().remove(&outbound_alias); - debug_assert!(alias_removed); - (shutdown_res, None) - }) - } - - /// When a cooperatively closed channel is removed, two things need to happen: - /// (a) This must be called in the same `per_peer_state` lock as the channel-closing action, - /// (b) [`ChannelManager::handle_error`] needs to be called without holding any locks (except - /// [`ChannelManager::total_consistency_lock`]), which then calls - /// [`ChannelManager::finish_close_channel`]. - /// - /// Returns a mapped error. - fn convert_channel_err_coop( - &self, closed_update_ids: &mut BTreeMap, - in_flight_updates: &mut BTreeMap)>, - shutdown_result: ShutdownResult, funded_channel: &mut FundedChannel, - ) -> MsgHandleErrInternal { - let reason = - ChannelError::Close(("Coop Closed".to_owned(), shutdown_result.closure_reason.clone())); - let (close, mut err) = self.convert_funded_channel_err_internal( - closed_update_ids, - in_flight_updates, - Some(shutdown_result), - reason, - funded_channel, - ); - err.dont_send_error_message(); - debug_assert!(close); - err - } - - /// When a funded channel is removed, two things need to happen: - /// (a) This must be called in the same `per_peer_state` lock as the channel-closing action, - /// (b) [`ChannelManager::handle_error`] needs to be called without holding any locks (except - /// [`ChannelManager::total_consistency_lock`]), which then calls - /// [`ChannelManager::finish_close_channel`]. - /// - /// Returns `(boolean indicating if we should remove the Channel object from memory, a mapped - /// error)`. - fn convert_channel_err_funded( - &self, closed_update_ids: &mut BTreeMap, - in_flight_updates: &mut BTreeMap)>, - err: ChannelError, funded_channel: &mut FundedChannel, - ) -> (bool, MsgHandleErrInternal) { - self.convert_funded_channel_err_internal( - closed_update_ids, - in_flight_updates, - None, - err, - funded_channel, - ) - } - - /// When a channel that can be funded or unfunded is removed, two things need to happen: - /// (a) This must be called in the same `per_peer_state` lock as the channel-closing action, - /// (b) [`ChannelManager::handle_error`] needs to be called without holding any locks (except - /// [`ChannelManager::total_consistency_lock`]), which then calls - /// [`ChannelManager::finish_close_channel`]. - /// - /// Note that this step can be skipped if the channel was never opened (through the creation of a - /// [`ChannelMonitor`]/channel funding transaction) to begin with. - /// - /// Returns `(boolean indicating if we should remove the Channel object from memory, a mapped - /// error)`. - fn convert_channel_err( - &self, closed_update_ids: &mut BTreeMap, - in_flight_updates: &mut BTreeMap)>, - err: ChannelError, channel: &mut Channel, - ) -> (bool, MsgHandleErrInternal) { - match channel.as_funded_mut() { - Some(funded_channel) => self.convert_funded_channel_err_internal( - closed_update_ids, - in_flight_updates, - None, - err, - funded_channel, - ), - None => self.convert_unfunded_channel_err_internal(err, channel), - } - } - fn send_channel_ready( &self, pending_msg_events: &mut Vec, channel: &FundedChannel, channel_ready_msg: msgs::ChannelReady, @@ -4847,6 +4617,236 @@ where } } + /// Handles an error by closing the channel if required and generating peer messages. + fn handle_error( + &self, internal: Result, counterparty_node_id: PublicKey, + ) -> Result { + // In testing, ensure there are no deadlocks where the lock is already held upon + // entering the macro. + debug_assert_ne!(self.pending_events.held_by_thread(), LockHeldState::HeldByThread); + debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread); + + internal.map_err(|err_internal| { + let mut msg_event = None; + + if let Some((shutdown_res, update_option)) = err_internal.shutdown_finish { + let counterparty_node_id = shutdown_res.counterparty_node_id; + let channel_id = shutdown_res.channel_id; + let logger = WithContext::from( + &self.logger, + Some(counterparty_node_id), + Some(channel_id), + None, + ); + log_error!(logger, "Closing channel: {}", err_internal.err.err); + + self.finish_close_channel(shutdown_res); + if let Some((update, node_id_1, node_id_2)) = update_option { + let mut pending_broadcast_messages = + self.pending_broadcast_messages.lock().unwrap(); + pending_broadcast_messages.push(MessageSendEvent::BroadcastChannelUpdate { + msg: update, + node_id_1, + node_id_2, + }); + } + } else { + log_error!(self.logger, "Got non-closing error: {}", err_internal.err.err); + } + + if let msgs::ErrorAction::IgnoreError = err_internal.err.action { + if let Some(tx_abort) = err_internal.tx_abort { + msg_event = Some(MessageSendEvent::SendTxAbort { + node_id: counterparty_node_id, + msg: tx_abort, + }); + } + } else { + msg_event = Some(MessageSendEvent::HandleError { + node_id: counterparty_node_id, + action: err_internal.err.action.clone(), + }); + } + + if let Some(msg_event) = msg_event { + let per_peer_state = self.per_peer_state.read().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { + let mut peer_state = peer_state_mutex.lock().unwrap(); + if peer_state.is_connected { + peer_state.pending_msg_events.push(msg_event); + } + } + } + + // Return error in case higher-API need one + err_internal.err + }) + } + + fn convert_funded_channel_err_internal( + &self, closed_channel_monitor_update_ids: &mut BTreeMap, + in_flight_monitor_updates: &mut BTreeMap)>, + coop_close_shutdown_res: Option, err: ChannelError, + chan: &mut FundedChannel, + ) -> (bool, MsgHandleErrInternal) { + let chan_id = chan.context.channel_id(); + convert_channel_err_internal(err, chan_id, |reason, msg| { + let logger = WithChannelContext::from(&self.logger, &chan.context, None); + + let mut shutdown_res = if let Some(res) = coop_close_shutdown_res { + res + } else { + chan.force_shutdown(reason) + }; + let chan_update = self.get_channel_update_for_broadcast(chan).ok(); + + log_error!(logger, "Closed channel due to close-required error: {}", msg); + + if let Some((_, funding_txo, _, update)) = shutdown_res.monitor_update.take() { + handle_new_monitor_update_locked_actions_handled_by_caller!( + self, + funding_txo, + update, + in_flight_monitor_updates, + chan.context + ); + } + // If there's a possibility that we need to generate further monitor updates for this + // channel, we need to store the last update_id of it. However, we don't want to insert + // into the map (which prevents the `PeerState` from being cleaned up) for channels that + // never even got confirmations (which would open us up to DoS attacks). + let update_id = chan.context.get_latest_monitor_update_id(); + let funding_confirmed = chan.funding.get_funding_tx_confirmation_height().is_some(); + let chan_zero_conf = chan.context.minimum_depth(&chan.funding) == Some(0); + if funding_confirmed || chan_zero_conf || update_id > 1 { + closed_channel_monitor_update_ids.insert(chan_id, update_id); + } + let mut short_to_chan_info = self.short_to_chan_info.write().unwrap(); + if let Some(short_id) = chan.funding.get_short_channel_id() { + short_to_chan_info.remove(&short_id); + } else { + // If the channel was never confirmed on-chain prior to its closure, remove the + // outbound SCID alias we used for it from the collision-prevention set. While we + // generally want to avoid ever re-using an outbound SCID alias across all channels, we + // also don't want a counterparty to be able to trivially cause a memory leak by simply + // opening a million channels with us which are closed before we ever reach the funding + // stage. + let outbound_alias = chan.context.outbound_scid_alias(); + let alias_removed = + self.outbound_scid_aliases.lock().unwrap().remove(&outbound_alias); + debug_assert!(alias_removed); + } + short_to_chan_info.remove(&chan.context.outbound_scid_alias()); + for scid in chan.context.historical_scids() { + short_to_chan_info.remove(scid); + } + + (shutdown_res, chan_update) + }) + } + + fn convert_unfunded_channel_err_internal( + &self, err: ChannelError, chan: &mut Channel, + ) -> (bool, MsgHandleErrInternal) + where + SP::Target: SignerProvider, + { + let chan_id = chan.context().channel_id(); + convert_channel_err_internal(err, chan_id, |reason, msg| { + let logger = WithChannelContext::from(&self.logger, chan.context(), None); + + let shutdown_res = chan.force_shutdown(reason); + log_error!(logger, "Closed channel due to close-required error: {}", msg); + self.short_to_chan_info.write().unwrap().remove(&chan.context().outbound_scid_alias()); + // If the channel was never confirmed on-chain prior to its closure, remove the + // outbound SCID alias we used for it from the collision-prevention set. While we + // generally want to avoid ever re-using an outbound SCID alias across all channels, we + // also don't want a counterparty to be able to trivially cause a memory leak by simply + // opening a million channels with us which are closed before we ever reach the funding + // stage. + let outbound_alias = chan.context().outbound_scid_alias(); + let alias_removed = self.outbound_scid_aliases.lock().unwrap().remove(&outbound_alias); + debug_assert!(alias_removed); + (shutdown_res, None) + }) + } + + /// When a cooperatively closed channel is removed, two things need to happen: + /// (a) This must be called in the same `per_peer_state` lock as the channel-closing action, + /// (b) [`ChannelManager::handle_error`] needs to be called without holding any locks (except + /// [`ChannelManager::total_consistency_lock`]), which then calls + /// [`ChannelManager::finish_close_channel`]. + /// + /// Returns a mapped error. + fn convert_channel_err_coop( + &self, closed_update_ids: &mut BTreeMap, + in_flight_updates: &mut BTreeMap)>, + shutdown_result: ShutdownResult, funded_channel: &mut FundedChannel, + ) -> MsgHandleErrInternal { + let reason = + ChannelError::Close(("Coop Closed".to_owned(), shutdown_result.closure_reason.clone())); + let (close, mut err) = self.convert_funded_channel_err_internal( + closed_update_ids, + in_flight_updates, + Some(shutdown_result), + reason, + funded_channel, + ); + err.dont_send_error_message(); + debug_assert!(close); + err + } + + /// When a funded channel is removed, two things need to happen: + /// (a) This must be called in the same `per_peer_state` lock as the channel-closing action, + /// (b) [`ChannelManager::handle_error`] needs to be called without holding any locks (except + /// [`ChannelManager::total_consistency_lock`]), which then calls + /// [`ChannelManager::finish_close_channel`]. + /// + /// Returns `(boolean indicating if we should remove the Channel object from memory, a mapped + /// error)`. + fn convert_channel_err_funded( + &self, closed_update_ids: &mut BTreeMap, + in_flight_updates: &mut BTreeMap)>, + err: ChannelError, funded_channel: &mut FundedChannel, + ) -> (bool, MsgHandleErrInternal) { + self.convert_funded_channel_err_internal( + closed_update_ids, + in_flight_updates, + None, + err, + funded_channel, + ) + } + + /// When a channel that can be funded or unfunded is removed, two things need to happen: + /// (a) This must be called in the same `per_peer_state` lock as the channel-closing action, + /// (b) [`ChannelManager::handle_error`] needs to be called without holding any locks (except + /// [`ChannelManager::total_consistency_lock`]), which then calls + /// [`ChannelManager::finish_close_channel`]. + /// + /// Note that this step can be skipped if the channel was never opened (through the creation of a + /// [`ChannelMonitor`]/channel funding transaction) to begin with. + /// + /// Returns `(boolean indicating if we should remove the Channel object from memory, a mapped + /// error)`. + fn convert_channel_err( + &self, closed_update_ids: &mut BTreeMap, + in_flight_updates: &mut BTreeMap)>, + err: ChannelError, channel: &mut Channel, + ) -> (bool, MsgHandleErrInternal) { + match channel.as_funded_mut() { + Some(funded_channel) => self.convert_funded_channel_err_internal( + closed_update_ids, + in_flight_updates, + None, + err, + funded_channel, + ), + None => self.convert_unfunded_channel_err_internal(err, channel), + } + } + /// Initiate a splice in order to add value to (splice-in) or remove value from (splice-out) /// the channel. This will spend the channel's funding transaction output, effectively replacing /// it with a new one. From c7d1ba70eb84107ebb89da15acbc404e80148bf9 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 12 Dec 2025 17:25:45 +0100 Subject: [PATCH 039/242] LDK Node Integration CI: Also patch LDK dependencies if `git` By now, we switched our LDK Node `main` to a specific commit on LDK's `main`. Since we don't have the `crates.io` dependencies in the `Cargo.toml`, the patch command won't actually do anything but silently fail, i.e., *not* check the PR changes against the LDK Node main branch. Here we fix this by also patching the git repository path. --- .github/workflows/ldk-node-integration.yml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/.github/workflows/ldk-node-integration.yml b/.github/workflows/ldk-node-integration.yml index 136a60bd98a..446abd40a07 100644 --- a/.github/workflows/ldk-node-integration.yml +++ b/.github/workflows/ldk-node-integration.yml @@ -39,6 +39,19 @@ jobs: lightning-transaction-sync = { path = "../rust-lightning/lightning-transaction-sync" } lightning-liquidity = { path = "../rust-lightning/lightning-liquidity" } lightning-macros = { path = "../rust-lightning/lightning-macros" } + + [patch."https://github.com/lightningdevkit/rust-lightning"] + lightning = { path = "../rust-lightning/lightning" } + lightning-types = { path = "../rust-lightning/lightning-types" } + lightning-invoice = { path = "../rust-lightning/lightning-invoice" } + lightning-net-tokio = { path = "../rust-lightning/lightning-net-tokio" } + lightning-persister = { path = "../rust-lightning/lightning-persister" } + lightning-background-processor = { path = "../rust-lightning/lightning-background-processor" } + lightning-rapid-gossip-sync = { path = "../rust-lightning/lightning-rapid-gossip-sync" } + lightning-block-sync = { path = "../rust-lightning/lightning-block-sync" } + lightning-transaction-sync = { path = "../rust-lightning/lightning-transaction-sync" } + lightning-liquidity = { path = "../rust-lightning/lightning-liquidity" } + lightning-macros = { path = "../rust-lightning/lightning-macros" } EOF cargo check cargo check --features uniffi From 275b00629036cbe8193dbb3a16ff8ec94f927d34 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Fri, 12 Dec 2025 08:41:49 +0100 Subject: [PATCH 040/242] Rename convert_err methods Make the names more descriptive and link shared documentation. --- lightning/src/ln/channelmanager.rs | 124 ++++++++++++++++------------- 1 file changed, 68 insertions(+), 56 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index af2408726ce..a1bf543ebf8 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3584,7 +3584,7 @@ macro_rules! break_channel_entry { match $res { Ok(res) => res, Err(e) => { - let (drop, res) = $self.convert_channel_err( + let (drop, res) = $self.locked_handle_force_close( &mut $peer_state.closed_channel_monitor_update_ids, &mut $peer_state.in_flight_monitor_updates, e, @@ -3604,7 +3604,7 @@ macro_rules! try_channel_entry { match $res { Ok(res) => res, Err(e) => { - let (drop, res) = $self.convert_channel_err( + let (drop, res) = $self.locked_handle_force_close( &mut $peer_state.closed_channel_monitor_update_ids, &mut $peer_state.in_flight_monitor_updates, e, @@ -4225,7 +4225,7 @@ where let reason = ClosureReason::LocallyCoopClosedUnfundedChannel; let err = ChannelError::Close((reason.to_string(), reason)); let mut chan = chan_entry.remove(); - let (_, mut e) = self.convert_channel_err( + let (_, mut e) = self.locked_handle_force_close( &mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, err, @@ -4367,8 +4367,11 @@ where } /// When a channel is removed, two things need to happen: - /// (a) [`ChannelManager::convert_channel_err`] must be called in the same `per_peer_state` lock as the - /// channel-closing action, + /// (a) Handle the initial within-lock closure for the channel via one of the following methods: + /// [`ChannelManager::locked_handle_unfunded_close`], + /// [`ChannelManager::locked_handle_funded_coop_close`], + /// [`ChannelManager::locked_handle_funded_force_close`] or + /// [`ChannelManager::locked_handle_force_close`]. /// (b) [`ChannelManager::handle_error`] needs to be called without holding any locks (except /// [`ChannelManager::total_consistency_lock`]), which then calls this. fn finish_close_channel(&self, mut shutdown_res: ShutdownResult) { @@ -4437,7 +4440,7 @@ where if let Some(mut chan) = peer_state.channel_by_id.remove(&channel_id) { let reason = ClosureReason::FundingBatchClosure; let err = ChannelError::Close((reason.to_string(), reason)); - let (_, e) = self.convert_channel_err( + let (_, e) = self.locked_handle_force_close( &mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, err, @@ -4534,7 +4537,7 @@ where if let Some(mut chan) = peer_state.channel_by_id.remove(channel_id) { log_error!(logger, "Force-closing channel"); let err = ChannelError::Close((message, reason)); - let (_, mut e) = self.convert_channel_err( + let (_, mut e) = self.locked_handle_force_close( &mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, err, @@ -4683,7 +4686,12 @@ where }) } - fn convert_funded_channel_err_internal( + /// Handle the initial within-lock closure for a funded channel that is either force-closed or cooperatively + /// closed (as indicated by `coop_close_shutdown_res`). + /// + /// Returns `(boolean indicating if we should remove the Channel object from memory, a mapped + /// error)`. + fn locked_handle_funded_close_internal( &self, closed_channel_monitor_update_ids: &mut BTreeMap, in_flight_monitor_updates: &mut BTreeMap)>, coop_close_shutdown_res: Option, err: ChannelError, @@ -4745,7 +4753,13 @@ where }) } - fn convert_unfunded_channel_err_internal( + /// Handle the initial within-lock closure for an unfunded channel. + /// + /// Returns `(boolean indicating if we should remove the Channel object from memory, a mapped + /// error)`. + /// + /// The same closure semantics as described in [`ChannelManager::locked_handle_force_close`] apply. + fn locked_handle_unfunded_close( &self, err: ChannelError, chan: &mut Channel, ) -> (bool, MsgHandleErrInternal) where @@ -4771,21 +4785,19 @@ where }) } - /// When a cooperatively closed channel is removed, two things need to happen: - /// (a) This must be called in the same `per_peer_state` lock as the channel-closing action, - /// (b) [`ChannelManager::handle_error`] needs to be called without holding any locks (except - /// [`ChannelManager::total_consistency_lock`]), which then calls - /// [`ChannelManager::finish_close_channel`]. + /// Handle the initial within-lock closure for a channel that is cooperatively closed. /// /// Returns a mapped error. - fn convert_channel_err_coop( + /// + /// The same closure semantics as described in [`ChannelManager::locked_handle_force_close`] apply. + fn locked_handle_funded_coop_close( &self, closed_update_ids: &mut BTreeMap, in_flight_updates: &mut BTreeMap)>, shutdown_result: ShutdownResult, funded_channel: &mut FundedChannel, ) -> MsgHandleErrInternal { let reason = ChannelError::Close(("Coop Closed".to_owned(), shutdown_result.closure_reason.clone())); - let (close, mut err) = self.convert_funded_channel_err_internal( + let (close, mut err) = self.locked_handle_funded_close_internal( closed_update_ids, in_flight_updates, Some(shutdown_result), @@ -4797,20 +4809,18 @@ where err } - /// When a funded channel is removed, two things need to happen: - /// (a) This must be called in the same `per_peer_state` lock as the channel-closing action, - /// (b) [`ChannelManager::handle_error`] needs to be called without holding any locks (except - /// [`ChannelManager::total_consistency_lock`]), which then calls - /// [`ChannelManager::finish_close_channel`]. + /// Handle the initial within-lock closure for a funded channel that is force-closed. /// /// Returns `(boolean indicating if we should remove the Channel object from memory, a mapped /// error)`. - fn convert_channel_err_funded( + /// + /// The same closure semantics as described in [`ChannelManager::locked_handle_force_close`] apply. + fn locked_handle_funded_force_close( &self, closed_update_ids: &mut BTreeMap, in_flight_updates: &mut BTreeMap)>, err: ChannelError, funded_channel: &mut FundedChannel, ) -> (bool, MsgHandleErrInternal) { - self.convert_funded_channel_err_internal( + self.locked_handle_funded_close_internal( closed_update_ids, in_flight_updates, None, @@ -4819,31 +4829,32 @@ where ) } - /// When a channel that can be funded or unfunded is removed, two things need to happen: - /// (a) This must be called in the same `per_peer_state` lock as the channel-closing action, - /// (b) [`ChannelManager::handle_error`] needs to be called without holding any locks (except - /// [`ChannelManager::total_consistency_lock`]), which then calls - /// [`ChannelManager::finish_close_channel`]. - /// - /// Note that this step can be skipped if the channel was never opened (through the creation of a - /// [`ChannelMonitor`]/channel funding transaction) to begin with. + /// Handle the initial within-lock closure for a channel that is force-closed. /// /// Returns `(boolean indicating if we should remove the Channel object from memory, a mapped /// error)`. - fn convert_channel_err( + /// + /// # Closure semantics + /// + /// Two things need to happen: + /// (a) This method must be called in the same `per_peer_state` lock as the channel-closing action, + /// (b) [`ChannelManager::handle_error`] needs to be called without holding any locks (except + /// [`ChannelManager::total_consistency_lock`]), which then calls + /// [`ChannelManager::finish_close_channel`]. + fn locked_handle_force_close( &self, closed_update_ids: &mut BTreeMap, in_flight_updates: &mut BTreeMap)>, err: ChannelError, channel: &mut Channel, ) -> (bool, MsgHandleErrInternal) { match channel.as_funded_mut() { - Some(funded_channel) => self.convert_funded_channel_err_internal( + Some(funded_channel) => self.locked_handle_funded_close_internal( closed_update_ids, in_flight_updates, None, err, funded_channel, ), - None => self.convert_unfunded_channel_err_internal(err, channel), + None => self.locked_handle_unfunded_close(err, channel), } } @@ -6566,7 +6577,7 @@ where let reason = ClosureReason::ProcessingError { err: e.clone() }; let err = ChannelError::Close((e.clone(), reason)); let peer_state = &mut *peer_state_lock; - let (_, e) = self.convert_channel_err( + let (_, e) = self.locked_handle_force_close( &mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, err, @@ -8333,7 +8344,7 @@ where if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } if let Err(e) = funded_chan.timer_check_closing_negotiation_progress() { - let (needs_close, err) = self.convert_channel_err_funded(&mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, e, funded_chan); + let (needs_close, err) = self.locked_handle_funded_force_close(&mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, e, funded_chan); handle_errors.push((Err(err), counterparty_node_id)); if needs_close { return false; } } @@ -8410,7 +8421,7 @@ where let reason = ClosureReason::FundingTimedOut; let msg = "Force-closing pending channel due to timeout awaiting establishment handshake".to_owned(); let err = ChannelError::Close((msg, reason)); - let (_, e) = self.convert_channel_err( + let (_, e) = self.locked_handle_force_close( &mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, err, @@ -10614,7 +10625,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // concerning this channel as it is safe to do so. debug_assert!(matches!(err, ChannelError::Close(_))); let mut chan = Channel::from(inbound_chan); - return Err(self.convert_channel_err( + return Err(self.locked_handle_force_close( &mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, err, @@ -10626,7 +10637,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ Some(Err(mut chan)) => { let err_msg = format!("Got an unexpected funding_created message from peer with counterparty_node_id {}", counterparty_node_id); let err = ChannelError::close(err_msg); - return Err(self.convert_channel_err( + return Err(self.locked_handle_force_close( &mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, err, @@ -10647,7 +10658,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let err = ChannelError::close($err.to_owned()); chan.unset_funding_info(); let mut chan = Channel::from(chan); - return Err(self.convert_unfunded_channel_err_internal(err, &mut chan).1); + return Err(self.locked_handle_unfunded_close(err, &mut chan).1); }}; } @@ -11267,7 +11278,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let reason = ClosureReason::CounterpartyCoopClosedUnfundedChannel; let err = ChannelError::Close((reason.to_string(), reason)); let mut chan = chan_entry.remove(); - let (_, mut e) = self.convert_channel_err( + let (_, mut e) = self.locked_handle_force_close( &mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, err, @@ -11332,7 +11343,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // also implies there are no pending HTLCs left on the channel, so we can // fully delete it from tracking (the channel monitor is still around to // watch for old state broadcasts)! - let err = self.convert_channel_err_coop(&mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, close_res, chan); + let err = self.locked_handle_funded_coop_close(&mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, close_res, chan); chan_entry.remove(); Some((tx, Err(err))) } else { @@ -12421,7 +12432,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }; let err = ChannelError::Close((reason.to_string(), reason)); let mut chan = chan_entry.remove(); - let (_, e) = self.convert_channel_err( + let (_, e) = self.locked_handle_force_close( &mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, err, @@ -12442,7 +12453,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let reason = ClosureReason::CommitmentTxConfirmed; let err = ChannelError::Close((reason.to_string(), reason)); let mut chan = chan_entry.remove(); - let (_, e) = self.convert_channel_err( + let (_, e) = self.locked_handle_force_close( &mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, err, @@ -12639,7 +12650,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ _ => match unblock_chan(chan, &mut peer_state.pending_msg_events) { Ok(shutdown_result) => shutdown_result, Err(err) => { - let (_, err) = self.convert_channel_err( + let (_, err) = self.locked_handle_force_close( &mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, err, @@ -12655,7 +12666,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let logger = WithChannelContext::from(&self.logger, context, None); log_trace!(logger, "Removing channel now that the signer is unblocked"); let (remove, err) = if let Some(funded) = chan.as_funded_mut() { - let err = self.convert_channel_err_coop( + let err = self.locked_handle_funded_coop_close( &mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, shutdown, @@ -12666,7 +12677,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ debug_assert!(false); let reason = shutdown.closure_reason.clone(); let err = ChannelError::Close((reason.to_string(), reason)); - self.convert_unfunded_channel_err_internal(err, chan) + self.locked_handle_unfunded_close(err, chan) }; debug_assert!(remove); shutdown_results.push((Err(err), *cp_id)); @@ -12725,7 +12736,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some((tx, shutdown_res)) = tx_shutdown_result_opt { // We're done with this channel. We got a closing_signed and sent back // a closing_signed with a closing transaction to broadcast. - let err = self.convert_channel_err_coop( + let err = self.locked_handle_funded_coop_close( &mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, shutdown_res, @@ -12742,12 +12753,13 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }, Err(e) => { has_update = true; - let (close_channel, res) = self.convert_channel_err_funded( - &mut peer_state.closed_channel_monitor_update_ids, - &mut peer_state.in_flight_monitor_updates, - e, - funded_chan, - ); + let (close_channel, res) = self + .locked_handle_funded_force_close( + &mut peer_state.closed_channel_monitor_update_ids, + &mut peer_state.in_flight_monitor_updates, + e, + funded_chan, + ); handle_errors.push(( funded_chan.context.get_counterparty_node_id(), Err(res), @@ -14117,7 +14129,7 @@ where // Clean up for removal. let reason = ClosureReason::DisconnectedPeer; let err = ChannelError::Close((reason.to_string(), reason)); - let (_, e) = self.convert_channel_err( + let (_, e) = self.locked_handle_force_close( &mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, err, @@ -14874,7 +14886,7 @@ where // It looks like our counterparty went on-chain or funding transaction was // reorged out of the main chain. Close the channel. let err = ChannelError::Close((reason.to_string(), reason)); - let (_, e) = self.convert_channel_err_funded( + let (_, e) = self.locked_handle_funded_force_close( &mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, err, funded_channel From 42a993d5e9259a2acc7449f745d464fea4a06ab8 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Mon, 15 Dec 2025 10:55:03 +0100 Subject: [PATCH 041/242] Use more specific locked_handle_unfunded_close when possible --- lightning/src/ln/channelmanager.rs | 19 +++---------------- 1 file changed, 3 insertions(+), 16 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index a1bf543ebf8..f2419b21b67 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -4225,13 +4225,7 @@ where let reason = ClosureReason::LocallyCoopClosedUnfundedChannel; let err = ChannelError::Close((reason.to_string(), reason)); let mut chan = chan_entry.remove(); - let (_, mut e) = self.locked_handle_force_close( - &mut peer_state.closed_channel_monitor_update_ids, - &mut peer_state.in_flight_monitor_updates, - err, - &mut chan, - ); - + let (_, mut e) = self.locked_handle_unfunded_close(err, &mut chan); e.dont_send_error_message(); shutdown_result = Err(e); } @@ -8421,9 +8415,7 @@ where let reason = ClosureReason::FundingTimedOut; let msg = "Force-closing pending channel due to timeout awaiting establishment handshake".to_owned(); let err = ChannelError::Close((msg, reason)); - let (_, e) = self.locked_handle_force_close( - &mut peer_state.closed_channel_monitor_update_ids, - &mut peer_state.in_flight_monitor_updates, + let (_, e) = self.locked_handle_unfunded_close( err, chan, ); @@ -11278,12 +11270,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let reason = ClosureReason::CounterpartyCoopClosedUnfundedChannel; let err = ChannelError::Close((reason.to_string(), reason)); let mut chan = chan_entry.remove(); - let (_, mut e) = self.locked_handle_force_close( - &mut peer_state.closed_channel_monitor_update_ids, - &mut peer_state.in_flight_monitor_updates, - err, - &mut chan, - ); + let (_, mut e) = self.locked_handle_unfunded_close(err, &mut chan); e.dont_send_error_message(); return Err(e); }, From e50280cbc3fe70f3c592f8a769d0507cf770c16e Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Fri, 12 Dec 2025 09:04:46 +0100 Subject: [PATCH 042/242] Inline format args --- lightning/src/ln/channelmanager.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index f2419b21b67..f2e8fa70e4f 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -4514,8 +4514,7 @@ where let peer_state_mutex = per_peer_state.get(peer_node_id).ok_or_else(|| APIError::ChannelUnavailable { err: format!( - "Can't find a peer matching the passed counterparty node_id {}", - peer_node_id + "Can't find a peer matching the passed counterparty node_id {peer_node_id}", ), })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); @@ -4563,8 +4562,7 @@ where } else { Err(APIError::ChannelUnavailable { err: format!( - "Channel with id {} not found for the passed counterparty node_id {}", - channel_id, peer_node_id + "Channel with id {channel_id} not found for the passed counterparty node_id {peer_node_id}", ), }) } From ded972b5e0d8b52a5de1442c1a9f354b78f358d3 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 16 Dec 2025 16:46:32 +0000 Subject: [PATCH 043/242] Automatically archive resolved `ChannelMonitor`s in the BP When we first added auto-archiving of resolved `ChannelMonitor`s, we wanted to be somewhat cautious of flipping it on by default as archiving a `ChannelMonitor` too soon would be a critical bug and, while we were confident in it, we weren't 100%. Since then its been used extensively in various LDK deployments, including `ldk-node`. Given its now seen substantial use, and performs an important anti-DoS function, here we flip to calling it by default on a new timer in `lightning-background-processor`. Fixes #218 --- lightning-background-processor/src/lib.rs | 192 ++++++++++++++++++++-- 1 file changed, 182 insertions(+), 10 deletions(-) diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index aae738ab1c1..c794e4663c9 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -171,6 +171,16 @@ const SWEEPER_TIMER: Duration = Duration::from_secs(30); #[cfg(test)] const SWEEPER_TIMER: Duration = Duration::from_secs(1); +#[cfg(not(test))] +const FIRST_ARCHIVE_STALE_MONITORS_TIMER: Duration = Duration::from_secs(15); +#[cfg(test)] +const FIRST_ARCHIVE_STALE_MONITORS_TIMER: Duration = Duration::ZERO; + +#[cfg(not(test))] +const ARCHIVE_STALE_MONITORS_TIMER: Duration = Duration::from_secs(60 * 10); +#[cfg(test)] +const ARCHIVE_STALE_MONITORS_TIMER: Duration = Duration::from_secs(1); + /// core::cmp::min is not currently const, so we define a trivial (and equivalent) replacement const fn min_duration(a: Duration, b: Duration) -> Duration { if a.as_nanos() < b.as_nanos() { @@ -1018,8 +1028,10 @@ where let mut last_scorer_persist_call = sleeper(SCORER_PERSIST_TIMER); let mut last_rebroadcast_call = sleeper(REBROADCAST_TIMER); let mut last_sweeper_call = sleeper(SWEEPER_TIMER); + let mut last_archive_call = sleeper(FIRST_ARCHIVE_STALE_MONITORS_TIMER); let mut have_pruned = false; let mut have_decayed_scorer = false; + let mut have_archived = false; let mut last_forwards_processing_call = sleeper(batch_delay.get()); @@ -1147,11 +1159,31 @@ where log_trace!(logger, "Done persisting ChannelManager."); } - // Note that we want to run a graph prune once not long after startup before - // falling back to our usual hourly prunes. This avoids short-lived clients never - // pruning their network graph. We run once 60 seconds after startup before - // continuing our normal cadence. For RGS, since 60 seconds is likely too long, - // we prune after an initial sync completes. + // Note that we want to archive stale ChannelMonitors and run a network graph prune once + // not long after startup before falling back to their usual infrequent runs. This avoids + // short-lived clients never archiving stale ChannelMonitors or pruning their network + // graph. For network graph pruning, in the case of RGS sync, we run a prune immediately + // after initial sync completes, otherwise we do so on a timer which should be long enough + // to give us a chance to get most of the network graph from our peers. + let archive_timer = if have_archived { + ARCHIVE_STALE_MONITORS_TIMER + } else { + FIRST_ARCHIVE_STALE_MONITORS_TIMER + }; + let archive_timer_elapsed = { + match check_and_reset_sleeper(&mut last_archive_call, || sleeper(archive_timer)) { + Some(false) => true, + Some(true) => break, + None => false, + } + }; + if archive_timer_elapsed { + log_trace!(logger, "Archiving stale ChannelMonitors."); + chain_monitor.archive_fully_resolved_channel_monitors(); + have_archived = true; + log_trace!(logger, "Archived stale ChannelMonitors."); + } + let prune_timer = if gossip_sync.prunable_network_graph().is_some() { NETWORK_PRUNE_TIMER } else { @@ -1601,8 +1633,10 @@ impl BackgroundProcessor { let mut last_scorer_persist_call = Instant::now(); let mut last_rebroadcast_call = Instant::now(); let mut last_sweeper_call = Instant::now(); + let mut last_archive_call = Instant::now(); let mut have_pruned = false; let mut have_decayed_scorer = false; + let mut have_archived = false; let mut cur_batch_delay = batch_delay.get(); let mut last_forwards_processing_call = Instant::now(); @@ -1691,11 +1725,26 @@ impl BackgroundProcessor { }); } - // Note that we want to run a graph prune once not long after startup before - // falling back to our usual hourly prunes. This avoids short-lived clients never - // pruning their network graph. We run once 60 seconds after startup before - // continuing our normal cadence. For RGS, since 60 seconds is likely too long, - // we prune after an initial sync completes. + // Note that we want to archive stale ChannelMonitors and run a network graph prune once + // not long after startup before falling back to their usual infrequent runs. This avoids + // short-lived clients never archiving stale ChannelMonitors or pruning their network + // graph. For network graph pruning, in the case of RGS sync, we run a prune immediately + // after initial sync completes, otherwise we do so on a timer which should be long enough + // to give us a chance to get most of the network graph from our peers. + let archive_timer = if have_archived { + ARCHIVE_STALE_MONITORS_TIMER + } else { + FIRST_ARCHIVE_STALE_MONITORS_TIMER + }; + let archive_timer_elapsed = last_archive_call.elapsed() > archive_timer; + if archive_timer_elapsed { + log_trace!(logger, "Archiving stale ChannelMonitors."); + chain_monitor.archive_fully_resolved_channel_monitors(); + have_archived = true; + last_archive_call = Instant::now(); + log_trace!(logger, "Archived stale ChannelMonitors."); + } + let prune_timer = if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER }; let prune_timer_elapsed = last_prune_call.elapsed() > prune_timer; @@ -3698,4 +3747,127 @@ mod tests { exit_sender.send(()).unwrap(); t1.await.unwrap().unwrap(); } + + #[test] + fn test_monitor_archive() { + let (persist_dir, nodes) = create_nodes(2, "test_monitor_archive"); + // Open a channel, but don't confirm it so that it prunes immediately on FC. + open_channel!(nodes[0], nodes[1], 100000); + + let data_dir = nodes[1].kv_store.get_data_dir(); + let persister = Arc::new(Persister::new(data_dir)); + let event_handler = |_: _| Ok(()); + let bp = BackgroundProcessor::start( + persister, + event_handler, + Arc::clone(&nodes[1].chain_monitor), + Arc::clone(&nodes[1].node), + Some(Arc::clone(&nodes[1].messenger)), + nodes[1].p2p_gossip_sync(), + Arc::clone(&nodes[1].peer_manager), + Some(Arc::clone(&nodes[1].liquidity_manager)), + Some(Arc::clone(&nodes[1].sweeper)), + Arc::clone(&nodes[1].logger), + Some(Arc::clone(&nodes[1].scorer)), + ); + + let dir = format!("{}_persister_1/monitors", &persist_dir); + let mut mons = std::fs::read_dir(&dir).unwrap(); + let mut mon = mons.next().unwrap().unwrap(); + if mon.path().to_str().unwrap().ends_with(".tmp") { + mon = mons.next().unwrap().unwrap(); + assert_eq!(mon.path().extension(), None); + } + assert!(mons.next().is_none()); + + // Because the channel wasn't funded, we'll archive the ChannelMonitor immedaitely after + // its force-closed (at least on node B, which didn't put their money into it). + nodes[1].node.force_close_all_channels_broadcasting_latest_txn("".to_owned()); + loop { + let mut mons = std::fs::read_dir(&dir).unwrap(); + if let Some(new_mon) = mons.next() { + let mut new_mon = new_mon.unwrap(); + if new_mon.path().to_str().unwrap().ends_with(".tmp") { + new_mon = mons.next().unwrap().unwrap(); + assert_eq!(new_mon.path().extension(), None); + } + assert_eq!(new_mon.path(), mon.path()); + assert!(mons.next().is_none()); + } else { + break; + } + } + + bp.stop().unwrap(); + } + + #[tokio::test] + #[cfg(not(c_bindings))] + async fn test_monitor_archive_async() { + let (persist_dir, nodes) = create_nodes(2, "test_monitor_archive_async"); + // Open a channel, but don't confirm it so that it prunes immediately on FC. + open_channel!(nodes[0], nodes[1], 100000); + + let kv_store = KVStoreSyncWrapper(Arc::clone(&nodes[0].kv_store)); + let sweeper_async: &'static OutputSweeper<_, _, _, _, _, _, _> = unsafe { + &*(nodes[0].sweeper.sweeper_async() as *const OutputSweeper<_, _, _, _, _, _, _>) + as &'static OutputSweeper<_, _, _, _, _, _, _> + }; + let (exit_sender, exit_receiver) = tokio::sync::watch::channel(()); + let bp_future = tokio::spawn(super::process_events_async( + kv_store, + move |_: Event| async move { Ok(()) }, + Arc::clone(&nodes[1].chain_monitor), + Arc::clone(&nodes[1].node), + crate::NO_ONION_MESSENGER, + nodes[1].no_gossip_sync(), + Arc::clone(&nodes[1].peer_manager), + crate::NO_LIQUIDITY_MANAGER, + Some(sweeper_async), + Arc::clone(&nodes[1].logger), + Some(Arc::clone(&nodes[1].scorer)), + move |dur: Duration| { + let mut exit_receiver = exit_receiver.clone(); + Box::pin(async move { + tokio::select! { + _ = tokio::time::sleep(dur) => false, + _ = exit_receiver.changed() => true, + } + }) + }, + false, + || Some(Duration::ZERO), + )); + + let dir = format!("{}_persister_1/monitors", &persist_dir); + let mut mons = std::fs::read_dir(&dir).unwrap(); + let mut mon = mons.next().unwrap().unwrap(); + if mon.path().to_str().unwrap().ends_with(".tmp") { + mon = mons.next().unwrap().unwrap(); + assert_eq!(mon.path().extension(), None); + } + assert!(mons.next().is_none()); + + // Because the channel wasn't funded, we'll archive the ChannelMonitor immedaitely after + // its force-closed (at least on node B, which didn't put their money into it). + nodes[1].node.force_close_all_channels_broadcasting_latest_txn("".to_owned()); + loop { + let mut mons = std::fs::read_dir(&dir).unwrap(); + if let Some(new_mon) = mons.next() { + let mut new_mon = new_mon.unwrap(); + if new_mon.path().to_str().unwrap().ends_with(".tmp") { + new_mon = mons.next().unwrap().unwrap(); + assert_eq!(new_mon.path().extension(), None); + } + assert_eq!(new_mon.path(), mon.path()); + assert!(mons.next().is_none()); + } else { + break; + } + tokio::task::yield_now().await; + } + + exit_sender.send(()).unwrap(); + bp_future.await.unwrap().unwrap(); + } } From 5ff4051d9361d1699be383edcc71d371896e9552 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 31 Dec 2025 15:03:06 +0000 Subject: [PATCH 044/242] Simplify `Sleeper` init in sync `lightning-background-processor` Rather than `match`ing on on several optional objects (with another one to come in a future commit), build an iterator over the futures using the fact that an `Option` is an iterator. --- lightning-background-processor/src/lib.rs | 33 +++++---------- lightning/src/util/wakers.rs | 50 ++++++----------------- 2 files changed, 23 insertions(+), 60 deletions(-) diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index aae738ab1c1..36b563f5925 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -1635,28 +1635,17 @@ impl BackgroundProcessor { log_trace!(logger, "Terminating background processor."); break; } - let sleeper = match (onion_messenger.as_ref(), liquidity_manager.as_ref()) { - (Some(om), Some(lm)) => Sleeper::from_four_futures( - &channel_manager.get_cm().get_event_or_persistence_needed_future(), - &chain_monitor.get_update_future(), - &om.get_om().get_update_future(), - &lm.get_lm().get_pending_msgs_or_needs_persist_future(), - ), - (Some(om), None) => Sleeper::from_three_futures( - &channel_manager.get_cm().get_event_or_persistence_needed_future(), - &chain_monitor.get_update_future(), - &om.get_om().get_update_future(), - ), - (None, Some(lm)) => Sleeper::from_three_futures( - &channel_manager.get_cm().get_event_or_persistence_needed_future(), - &chain_monitor.get_update_future(), - &lm.get_lm().get_pending_msgs_or_needs_persist_future(), - ), - (None, None) => Sleeper::from_two_futures( - &channel_manager.get_cm().get_event_or_persistence_needed_future(), - &chain_monitor.get_update_future(), - ), - }; + let om_fut = onion_messenger.as_ref().map(|om| om.get_om().get_update_future()); + let lm_fut = liquidity_manager + .as_ref() + .map(|lm| lm.get_lm().get_pending_msgs_or_needs_persist_future()); + let always_futures = [ + channel_manager.get_cm().get_event_or_persistence_needed_future(), + chain_monitor.get_update_future(), + ]; + let futures = always_futures.into_iter().chain(om_fut).chain(lm_fut); + let sleeper = Sleeper::from_futures(futures); + let batch_delay = if channel_manager.get_cm().needs_pending_htlc_processing() { batch_delay.get() } else { diff --git a/lightning/src/util/wakers.rs b/lightning/src/util/wakers.rs index a84d90960d8..17edadfd822 100644 --- a/lightning/src/util/wakers.rs +++ b/lightning/src/util/wakers.rs @@ -253,37 +253,13 @@ impl Sleeper { pub fn from_single_future(future: &Future) -> Self { Self { notifiers: vec![Arc::clone(&future.state)] } } - /// Constructs a new sleeper from two futures, allowing blocking on both at once. - pub fn from_two_futures(fut_a: &Future, fut_b: &Future) -> Self { - Self { notifiers: vec![Arc::clone(&fut_a.state), Arc::clone(&fut_b.state)] } - } - /// Constructs a new sleeper from three futures, allowing blocking on all three at once. - /// - // Note that this is the common case - a ChannelManager, a ChainMonitor, and an - // OnionMessenger. - pub fn from_three_futures(fut_a: &Future, fut_b: &Future, fut_c: &Future) -> Self { - let notifiers = - vec![Arc::clone(&fut_a.state), Arc::clone(&fut_b.state), Arc::clone(&fut_c.state)]; - Self { notifiers } - } - /// Constructs a new sleeper from four futures, allowing blocking on all four at once. - /// - // Note that this is another common case - a ChannelManager, a ChainMonitor, an - // OnionMessenger, and a LiquidityManager. - pub fn from_four_futures( - fut_a: &Future, fut_b: &Future, fut_c: &Future, fut_d: &Future, - ) -> Self { - let notifiers = vec![ - Arc::clone(&fut_a.state), - Arc::clone(&fut_b.state), - Arc::clone(&fut_c.state), - Arc::clone(&fut_d.state), - ]; - Self { notifiers } + /// Constructs an iterator of futures, allowing blocking on all at once. + pub fn from_futures>(futures: I) -> Self { + Self { notifiers: futures.into_iter().map(|f| Arc::clone(&f.state)).collect() } } /// Constructs a new sleeper on many futures, allowing blocking on all at once. pub fn new(futures: Vec) -> Self { - Self { notifiers: futures.into_iter().map(|f| Arc::clone(&f.state)).collect() } + Self::from_futures(futures) } /// Prepares to go into a wait loop body, creating a condition variable which we can block on /// and an `Arc>>` which gets set to the waking `Future`'s state prior to the @@ -506,15 +482,13 @@ mod tests { // Wait on the other thread to finish its sleep, note that the leak only happened if we // actually have to sleep here, not if we immediately return. - Sleeper::from_two_futures(&future_a, &future_b).wait(); + Sleeper::from_futures([future_a, future_b]).wait(); join_handle.join().unwrap(); // then drop the notifiers and make sure the future states are gone. mem::drop(notifier_a); mem::drop(notifier_b); - mem::drop(future_a); - mem::drop(future_b); assert!(future_state_a.upgrade().is_none() && future_state_b.upgrade().is_none()); } @@ -736,18 +710,18 @@ mod tests { // Set both notifiers as woken without sleeping yet. notifier_a.notify(); notifier_b.notify(); - Sleeper::from_two_futures(¬ifier_a.get_future(), ¬ifier_b.get_future()).wait(); + Sleeper::from_futures([notifier_a.get_future(), notifier_b.get_future()]).wait(); // One future has woken us up, but the other should still have a pending notification. - Sleeper::from_two_futures(¬ifier_a.get_future(), ¬ifier_b.get_future()).wait(); + Sleeper::from_futures([notifier_a.get_future(), notifier_b.get_future()]).wait(); // However once we've slept twice, we should no longer have any pending notifications - assert!(!Sleeper::from_two_futures(¬ifier_a.get_future(), ¬ifier_b.get_future()) + assert!(!Sleeper::from_futures([notifier_a.get_future(), notifier_b.get_future()]) .wait_timeout(Duration::from_millis(10))); // Test ordering somewhat more. notifier_a.notify(); - Sleeper::from_two_futures(¬ifier_a.get_future(), ¬ifier_b.get_future()).wait(); + Sleeper::from_futures([notifier_a.get_future(), notifier_b.get_future()]).wait(); } #[test] @@ -765,7 +739,7 @@ mod tests { // After sleeping one future (not guaranteed which one, however) will have its notification // bit cleared. - Sleeper::from_two_futures(¬ifier_a.get_future(), ¬ifier_b.get_future()).wait(); + Sleeper::from_futures([notifier_a.get_future(), notifier_b.get_future()]).wait(); // By registering a callback on the futures for both notifiers, one will complete // immediately, but one will remain tied to the notifier, and will complete once the @@ -788,8 +762,8 @@ mod tests { notifier_b.notify(); assert!(callback_a.load(Ordering::SeqCst) && callback_b.load(Ordering::SeqCst)); - Sleeper::from_two_futures(¬ifier_a.get_future(), ¬ifier_b.get_future()).wait(); - assert!(!Sleeper::from_two_futures(¬ifier_a.get_future(), ¬ifier_b.get_future()) + Sleeper::from_futures([notifier_a.get_future(), notifier_b.get_future()]).wait(); + assert!(!Sleeper::from_futures([notifier_a.get_future(), notifier_b.get_future()]) .wait_timeout(Duration::from_millis(10))); } From efaadf57f4f550245152e5bf7426c00de631331f Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 31 Dec 2025 02:08:22 +0000 Subject: [PATCH 045/242] Pass a new `Notifier` through to `UtxoFuture`s `P2PGossipSync` is a rather poor design. It currently basically requires two circular `Arc` references, leaving `NetworkGraph`s to leak if LDK is un-loaded: * `P2PGossipSync` owns/holds a reference to the `GossipVerifier` and `GossipVerifier` holds an `Arc` to the `P2PGossipSync` and * `PeerManager` holds a reference to the `P2PGossipSync` (as the gossip message handler) which owns/holds a reference to the `GossipVerifier`, which has a `Deref` (likely an `Arc` in practice) to the `PeerManager`. Instead, we should move towards the same design we have elsewhere - hold a `Notifier` and expose waiting on it to the background processor then poll for completion from there (in this case, as in others by checking for completion when handling `get_and_clear_pending_msg_events` calls). Here we take the first step towards this, adding a shared `Notifier` to `PendingChecks` and piping it through to `UtxoFuture`s so that they can be simply resolved and wake the background processor (once it waits on the new `Notifier`). --- fuzz/src/router.rs | 9 +-- lightning-block-sync/src/gossip.rs | 7 ++- lightning/src/routing/utxo.rs | 91 +++++++++++++++++++++--------- lightning/src/util/test_utils.rs | 3 +- 4 files changed, 75 insertions(+), 35 deletions(-) diff --git a/fuzz/src/router.rs b/fuzz/src/router.rs index af29a0221a9..e6508d06d0e 100644 --- a/fuzz/src/router.rs +++ b/fuzz/src/router.rs @@ -31,6 +31,7 @@ use lightning::types::features::{BlindedHopFeatures, Bolt12InvoiceFeatures}; use lightning::util::config::UserConfig; use lightning::util::hash_tables::*; use lightning::util::ser::LengthReadable; +use lightning::util::wakers::Notifier; use bitcoin::hashes::Hash; use bitcoin::network::Network; @@ -93,7 +94,7 @@ struct FuzzChainSource<'a, 'b, Out: test_logger::Output> { net_graph: &'a NetworkGraph<&'b test_logger::TestLogger>, } impl UtxoLookup for FuzzChainSource<'_, '_, Out> { - fn get_utxo(&self, _chain_hash: &ChainHash, _short_channel_id: u64) -> UtxoResult { + fn get_utxo(&self, _chain_hash: &ChainHash, _scid: u64, notifier: Arc) -> UtxoResult { let input_slice = self.input.get_slice(2); if input_slice.is_none() { return UtxoResult::Sync(Err(UtxoLookupError::UnknownTx)); @@ -107,17 +108,17 @@ impl UtxoLookup for FuzzChainSource<'_, '_, Out> { &[0, _] => UtxoResult::Sync(Err(UtxoLookupError::UnknownChain)), &[1, _] => UtxoResult::Sync(Err(UtxoLookupError::UnknownTx)), &[2, _] => { - let future = UtxoFuture::new(); + let future = UtxoFuture::new(notifier); future.resolve_without_forwarding(self.net_graph, Ok(txo_res)); UtxoResult::Async(future.clone()) }, &[3, _] => { - let future = UtxoFuture::new(); + let future = UtxoFuture::new(notifier); future.resolve_without_forwarding(self.net_graph, Err(UtxoLookupError::UnknownTx)); UtxoResult::Async(future.clone()) }, &[4, _] => { - UtxoResult::Async(UtxoFuture::new()) // the future will never resolve + UtxoResult::Async(UtxoFuture::new(notifier)) // the future will never resolve }, &[..] => UtxoResult::Sync(Ok(txo_res)), } diff --git a/lightning-block-sync/src/gossip.rs b/lightning-block-sync/src/gossip.rs index 596098350c7..2c5dadf57e1 100644 --- a/lightning-block-sync/src/gossip.rs +++ b/lightning-block-sync/src/gossip.rs @@ -14,6 +14,7 @@ use lightning::routing::gossip::{NetworkGraph, P2PGossipSync}; use lightning::routing::utxo::{UtxoFuture, UtxoLookup, UtxoLookupError, UtxoResult}; use lightning::util::logger::Logger; use lightning::util::native_async::FutureSpawner; +use lightning::util::wakers::Notifier; use std::collections::VecDeque; use std::future::Future; @@ -273,15 +274,15 @@ where Blocks::Target: UtxoSource, L::Target: Logger, { - fn get_utxo(&self, _chain_hash: &ChainHash, short_channel_id: u64) -> UtxoResult { - let res = UtxoFuture::new(); + fn get_utxo(&self, _chain_hash: &ChainHash, scid: u64, notifier: Arc) -> UtxoResult { + let res = UtxoFuture::new(notifier); let fut = res.clone(); let source = self.source.clone(); let gossiper = Arc::clone(&self.gossiper); let block_cache = Arc::clone(&self.block_cache); let pmw = Arc::clone(&self.peer_manager_wake); self.spawn.spawn(async move { - let res = Self::retrieve_utxo(source, block_cache, short_channel_id).await; + let res = Self::retrieve_utxo(source, block_cache, scid).await; fut.resolve(gossiper.network_graph(), &*gossiper, res); (pmw)(); }); diff --git a/lightning/src/routing/utxo.rs b/lightning/src/routing/utxo.rs index 4299dffb90f..8e0dc113817 100644 --- a/lightning/src/routing/utxo.rs +++ b/lightning/src/routing/utxo.rs @@ -23,6 +23,7 @@ use crate::ln::chan_utils::make_funding_redeemscript_from_slices; use crate::ln::msgs::{self, ErrorAction, LightningError, MessageSendEvent}; use crate::routing::gossip::{NetworkGraph, NodeId, P2PGossipSync}; use crate::util::logger::{Level, Logger}; +use crate::util::wakers::Notifier; use crate::prelude::*; @@ -64,8 +65,14 @@ pub trait UtxoLookup { /// Returns an error if `chain_hash` is for a different chain or if such a transaction output is /// unknown. /// + /// An `async_completion_notifier` is provided which should be [`Notifier::notify`]ed upon + /// resolution of the [`UtxoFuture`] in case this method returns [`UtxoResult::Async`]. + /// /// [`short_channel_id`]: https://github.com/lightning/bolts/blob/master/07-routing-gossip.md#definition-of-short_channel_id - fn get_utxo(&self, chain_hash: &ChainHash, short_channel_id: u64) -> UtxoResult; + fn get_utxo( + &self, chain_hash: &ChainHash, short_channel_id: u64, + async_completion_notifier: Arc, + ) -> UtxoResult; } enum ChannelAnnouncement { @@ -108,6 +115,7 @@ impl ChannelUpdate { } struct UtxoMessages { + notifier: Arc, complete: Option>, channel_announce: Option, latest_node_announce_a: Option, @@ -128,23 +136,25 @@ pub struct UtxoFuture { /// once we have a concrete resolution of a request. pub(crate) struct UtxoResolver(Result); impl UtxoLookup for UtxoResolver { - fn get_utxo(&self, _chain_hash: &ChainHash, _short_channel_id: u64) -> UtxoResult { + fn get_utxo(&self, _hash: &ChainHash, _scid: u64, _notifier: Arc) -> UtxoResult { UtxoResult::Sync(self.0.clone()) } } impl UtxoFuture { /// Builds a new future for later resolution. - #[rustfmt::skip] - pub fn new() -> Self { - Self { state: Arc::new(Mutex::new(UtxoMessages { - complete: None, - channel_announce: None, - latest_node_announce_a: None, - latest_node_announce_b: None, - latest_channel_update_a: None, - latest_channel_update_b: None, - }))} + pub fn new(notifier: Arc) -> Self { + Self { + state: Arc::new(Mutex::new(UtxoMessages { + notifier, + complete: None, + channel_announce: None, + latest_node_announce_a: None, + latest_node_announce_b: None, + latest_channel_update_a: None, + latest_channel_update_b: None, + })), + } } /// Resolves this future against the given `graph` and with the given `result`. @@ -202,6 +212,7 @@ impl UtxoFuture { let (announcement, node_a, node_b, update_a, update_b) = { let mut pending_checks = graph.pending_checks.internal.lock().unwrap(); let mut async_messages = self.state.lock().unwrap(); + async_messages.notifier.notify(); if async_messages.channel_announce.is_none() { // We raced returning to `check_channel_announcement` which hasn't updated @@ -321,14 +332,18 @@ impl PendingChecksContext { /// A set of messages which are pending UTXO lookups for processing. pub(super) struct PendingChecks { internal: Mutex, + pub(super) completion_notifier: Arc, } impl PendingChecks { - #[rustfmt::skip] pub(super) fn new() -> Self { - PendingChecks { internal: Mutex::new(PendingChecksContext { - channels: new_hash_map(), nodes: new_hash_map(), - }) } + PendingChecks { + internal: Mutex::new(PendingChecksContext { + channels: new_hash_map(), + nodes: new_hash_map(), + }), + completion_notifier: Arc::new(Notifier::new()), + } } /// Checks if there is a pending `channel_update` UTXO validation for the given channel, @@ -519,7 +534,8 @@ impl PendingChecks { Ok(None) }, &Some(ref utxo_lookup) => { - match utxo_lookup.get_utxo(&msg.chain_hash, msg.short_channel_id) { + let notifier = Arc::clone(&self.completion_notifier); + match utxo_lookup.get_utxo(&msg.chain_hash, msg.short_channel_id, notifier) { UtxoResult::Sync(res) => handle_result(res), UtxoResult::Async(future) => { let mut pending_checks = self.internal.lock().unwrap(); @@ -636,9 +652,11 @@ mod tests { // `get_utxo` call can read it still resolve properly. let (valid_announcement, chain_source, network_graph, good_script, ..) = get_test_objects(); - let future = UtxoFuture::new(); + let notifier = Arc::new(Notifier::new()); + let future = UtxoFuture::new(Arc::clone(¬ifier)); future.resolve_without_forwarding(&network_graph, Ok(TxOut { value: Amount::from_sat(1_000_000), script_pubkey: good_script })); + assert!(notifier.notify_pending()); *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(future.clone()); network_graph.update_channel_from_announcement(&valid_announcement, &Some(&chain_source)).unwrap(); @@ -652,7 +670,8 @@ mod tests { let (valid_announcement, chain_source, network_graph, good_script, node_a_announce, node_b_announce, ..) = get_test_objects(); - let future = UtxoFuture::new(); + let notifier = Arc::new(Notifier::new()); + let future = UtxoFuture::new(Arc::clone(¬ifier)); *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(future.clone()); assert_eq!( @@ -662,6 +681,7 @@ mod tests { future.resolve_without_forwarding(&network_graph, Ok(TxOut { value: Amount::ZERO, script_pubkey: good_script })); + assert!(notifier.notify_pending()); network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id).unwrap(); network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id).unwrap(); @@ -681,7 +701,8 @@ mod tests { // Test an async lookup which returns an incorrect script let (valid_announcement, chain_source, network_graph, ..) = get_test_objects(); - let future = UtxoFuture::new(); + let notifier = Arc::new(Notifier::new()); + let future = UtxoFuture::new(Arc::clone(¬ifier)); *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(future.clone()); assert_eq!( @@ -691,6 +712,7 @@ mod tests { future.resolve_without_forwarding(&network_graph, Ok(TxOut { value: Amount::from_sat(1_000_000), script_pubkey: bitcoin::ScriptBuf::new() })); + assert!(notifier.notify_pending()); assert!(network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id).is_none()); } @@ -700,7 +722,8 @@ mod tests { // Test an async lookup which returns an error let (valid_announcement, chain_source, network_graph, ..) = get_test_objects(); - let future = UtxoFuture::new(); + let notifier = Arc::new(Notifier::new()); + let future = UtxoFuture::new(Arc::clone(¬ifier)); *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(future.clone()); assert_eq!( @@ -709,6 +732,7 @@ mod tests { assert!(network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id).is_none()); future.resolve_without_forwarding(&network_graph, Err(UtxoLookupError::UnknownTx)); + assert!(notifier.notify_pending()); assert!(network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id).is_none()); } @@ -720,7 +744,8 @@ mod tests { let (valid_announcement, chain_source, network_graph, good_script, node_a_announce, node_b_announce, chan_update_a, chan_update_b, ..) = get_test_objects(); - let future = UtxoFuture::new(); + let notifier = Arc::new(Notifier::new()); + let future = UtxoFuture::new(Arc::clone(¬ifier)); *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(future.clone()); assert_eq!( @@ -740,8 +765,10 @@ mod tests { assert_eq!(network_graph.update_channel(&chan_update_b).unwrap_err().err, "Awaiting channel_announcement validation to accept channel_update"); + assert!(!notifier.notify_pending()); future.resolve_without_forwarding(&network_graph, Ok(TxOut { value: Amount::from_sat(1_000_000), script_pubkey: good_script })); + assert!(notifier.notify_pending()); assert!(network_graph.read_only().channels() .get(&valid_announcement.contents.short_channel_id).unwrap().one_to_two.is_some()); @@ -762,7 +789,8 @@ mod tests { let (valid_announcement, chain_source, network_graph, good_script, _, _, chan_update_a, chan_update_b, chan_update_c, ..) = get_test_objects(); - let future = UtxoFuture::new(); + let notifier = Arc::new(Notifier::new()); + let future = UtxoFuture::new(Arc::clone(¬ifier)); *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(future.clone()); assert_eq!( @@ -777,8 +805,10 @@ mod tests { assert_eq!(network_graph.update_channel(&chan_update_c).unwrap_err().err, "Awaiting channel_announcement validation to accept channel_update"); + assert!(!notifier.notify_pending()); future.resolve_without_forwarding(&network_graph, Ok(TxOut { value: Amount::from_sat(1_000_000), script_pubkey: good_script })); + assert!(notifier.notify_pending()); assert_eq!(chan_update_a.contents.timestamp, chan_update_b.contents.timestamp); let graph_lock = network_graph.read_only(); @@ -797,7 +827,8 @@ mod tests { // only if the channel_announcement message is identical. let (valid_announcement, chain_source, network_graph, good_script, ..) = get_test_objects(); - let future = UtxoFuture::new(); + let notifier_a = Arc::new(Notifier::new()); + let future = UtxoFuture::new(Arc::clone(¬ifier_a)); *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(future.clone()); assert_eq!( @@ -806,7 +837,8 @@ mod tests { assert_eq!(chain_source.get_utxo_call_count.load(Ordering::Relaxed), 1); // If we make a second request with the same message, the call count doesn't increase... - let future_b = UtxoFuture::new(); + let notifier_b = Arc::new(Notifier::new()); + let future_b = UtxoFuture::new(Arc::clone(¬ifier_b)); *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(future_b.clone()); assert_eq!( network_graph.update_channel_from_announcement(&valid_announcement, &Some(&chain_source)).unwrap_err().err, @@ -827,6 +859,8 @@ mod tests { // Still, if we resolve the original future, the original channel will be accepted. future.resolve_without_forwarding(&network_graph, Ok(TxOut { value: Amount::from_sat(1_000_000), script_pubkey: good_script })); + assert!(notifier_a.notify_pending()); + assert!(!notifier_b.notify_pending()); assert!(!network_graph.read_only().channels() .get(&valid_announcement.contents.short_channel_id).unwrap() .announcement_message.as_ref().unwrap() @@ -842,7 +876,8 @@ mod tests { let (chain_source, network_graph) = get_network(); // We cheat and use a single future for all the lookups to complete them all at once. - let future = UtxoFuture::new(); + let notifier = Arc::new(Notifier::new()); + let future = UtxoFuture::new(Arc::clone(¬ifier)); *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(future.clone()); let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap(); @@ -862,6 +897,7 @@ mod tests { // Once the future completes the "too many checks" flag should reset. future.resolve_without_forwarding(&network_graph, Err(UtxoLookupError::UnknownTx)); + assert!(notifier.notify_pending()); assert!(!network_graph.pending_checks.too_many_checks_pending()); } @@ -874,7 +910,8 @@ mod tests { let (chain_source, network_graph) = get_network(); // We cheat and use a single future for all the lookups to complete them all at once. - *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(UtxoFuture::new()); + let notifier = Arc::new(Notifier::new()); + *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(UtxoFuture::new(notifier)); let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap(); let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index 50514e0a894..34f5d5fe36e 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -61,6 +61,7 @@ use crate::util::mut_global::MutGlobal; use crate::util::persist::{KVStore, KVStoreSync, MonitorName}; use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer}; use crate::util::test_channel_signer::{EnforcementState, TestChannelSigner}; +use crate::util::wakers::Notifier; use bitcoin::amount::Amount; use bitcoin::block::Block; @@ -2101,7 +2102,7 @@ impl TestChainSource { } impl UtxoLookup for TestChainSource { - fn get_utxo(&self, chain_hash: &ChainHash, _short_channel_id: u64) -> UtxoResult { + fn get_utxo(&self, chain_hash: &ChainHash, _scid: u64, _notifier: Arc) -> UtxoResult { self.get_utxo_call_count.fetch_add(1, Ordering::Relaxed); if self.chain_hash != *chain_hash { return UtxoResult::Sync(Err(UtxoLookupError::UnknownChain)); From 91a16c53ca6fd3a02171b7f05f64df8de4130adb Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 31 Dec 2025 11:39:45 +0000 Subject: [PATCH 046/242] Poll for resolved `UtxoFuture`s rather than resolving on the graph `P2PGossipSync` is a rather poor design. It currently basically requires two circular `Arc` references, leaving `NetworkGraph`s to leak if LDK is un-loaded: * `P2PGossipSync` owns/holds a reference to the `GossipVerifier` and `GossipVerifier` holds an `Arc` to the `P2PGossipSync` and * `PeerManager` holds a reference to the `P2PGossipSync` (as the gossip message handler) which owns/holds a reference to the `GossipVerifier`, which has a `Deref` (likely an `Arc` in practice) to the `PeerManager`. Instead, we should move towards the same design we have elsewhere - hold a `Notifier` and expose waiting on it to the background processor then poll for completion from there (in this case, as in others by checking for completion when handling `get_and_clear_pending_msg_events` calls). Here we do the bulk of this work, moving `UtxoFuture` resolution to a simple function that signals the `Notifier` and stores the result. We then poll to convert the result into forwarded messages in `P2PGossipSync::get_and_clear_pending_message_events`. Note that we still rely on manual wakeups from the gossip validator, but that will be fixed in the next commit. --- fuzz/src/router.rs | 15 +- lightning-block-sync/src/gossip.rs | 2 +- lightning/src/routing/gossip.rs | 66 +++--- lightning/src/routing/utxo.rs | 341 ++++++++++++++--------------- 4 files changed, 204 insertions(+), 220 deletions(-) diff --git a/fuzz/src/router.rs b/fuzz/src/router.rs index e6508d06d0e..2e5b15fc7f4 100644 --- a/fuzz/src/router.rs +++ b/fuzz/src/router.rs @@ -89,11 +89,10 @@ impl InputData { } } -struct FuzzChainSource<'a, 'b, Out: test_logger::Output> { +struct FuzzChainSource { input: Arc, - net_graph: &'a NetworkGraph<&'b test_logger::TestLogger>, } -impl UtxoLookup for FuzzChainSource<'_, '_, Out> { +impl UtxoLookup for FuzzChainSource { fn get_utxo(&self, _chain_hash: &ChainHash, _scid: u64, notifier: Arc) -> UtxoResult { let input_slice = self.input.get_slice(2); if input_slice.is_none() { @@ -109,12 +108,12 @@ impl UtxoLookup for FuzzChainSource<'_, '_, Out> { &[1, _] => UtxoResult::Sync(Err(UtxoLookupError::UnknownTx)), &[2, _] => { let future = UtxoFuture::new(notifier); - future.resolve_without_forwarding(self.net_graph, Ok(txo_res)); + future.resolve(Ok(txo_res)); UtxoResult::Async(future.clone()) }, &[3, _] => { let future = UtxoFuture::new(notifier); - future.resolve_without_forwarding(self.net_graph, Err(UtxoLookupError::UnknownTx)); + future.resolve(Err(UtxoLookupError::UnknownTx)); UtxoResult::Async(future.clone()) }, &[4, _] => { @@ -198,7 +197,7 @@ pub fn do_test(data: &[u8], out: Out) { let our_pubkey = get_pubkey!(); let net_graph = NetworkGraph::new(Network::Bitcoin, &logger); - let chain_source = FuzzChainSource { input: Arc::clone(&input), net_graph: &net_graph }; + let chain_source = FuzzChainSource { input: Arc::clone(&input) }; let mut node_pks = new_hash_map(); let mut scid = 42; @@ -336,9 +335,7 @@ pub fn do_test(data: &[u8], out: Out) { node_pks.insert(get_pubkey_from_node_id!(msg.node_id_1), ()); node_pks.insert(get_pubkey_from_node_id!(msg.node_id_2), ()); let _ = net_graph - .update_channel_from_unsigned_announcement::<&FuzzChainSource<'_, '_, Out>>( - &msg, &None, - ); + .update_channel_from_unsigned_announcement::<&FuzzChainSource>(&msg, &None); }, 2 => { let msg = diff --git a/lightning-block-sync/src/gossip.rs b/lightning-block-sync/src/gossip.rs index 2c5dadf57e1..63045b6cd92 100644 --- a/lightning-block-sync/src/gossip.rs +++ b/lightning-block-sync/src/gossip.rs @@ -283,7 +283,7 @@ where let pmw = Arc::clone(&self.peer_manager_wake); self.spawn.spawn(async move { let res = Self::retrieve_utxo(source, block_cache, scid).await; - fut.resolve(gossiper.network_graph(), &*gossiper, res); + fut.resolve(res); (pmw)(); }); UtxoResult::Async(res) diff --git a/lightning/src/routing/gossip.rs b/lightning/src/routing/gossip.rs index ae317ad1ac3..46ca3322ae7 100644 --- a/lightning/src/routing/gossip.rs +++ b/lightning/src/routing/gossip.rs @@ -378,39 +378,42 @@ where } } - /// Used to broadcast forward gossip messages which were validated async. - /// - /// Note that this will ignore events other than `Broadcast*` or messages with too much excess - /// data. - pub(super) fn forward_gossip_msg(&self, mut ev: MessageSendEvent) { - match &mut ev { - MessageSendEvent::BroadcastChannelAnnouncement { msg, ref mut update_msg } => { - if msg.contents.excess_data.len() > MAX_EXCESS_BYTES_FOR_RELAY { - return; - } - if update_msg.as_ref().map(|msg| msg.contents.excess_data.len()).unwrap_or(0) - > MAX_EXCESS_BYTES_FOR_RELAY - { - *update_msg = None; - } - }, - MessageSendEvent::BroadcastChannelUpdate { msg, .. } => { - if msg.contents.excess_data.len() > MAX_EXCESS_BYTES_FOR_RELAY { - return; - } - }, - MessageSendEvent::BroadcastNodeAnnouncement { msg } => { - if msg.contents.excess_data.len() > MAX_EXCESS_BYTES_FOR_RELAY - || msg.contents.excess_address_data.len() > MAX_EXCESS_BYTES_FOR_RELAY - || msg.contents.excess_data.len() + msg.contents.excess_address_data.len() + /// Walks the list of pending UTXO validations and removes completed ones, adding any messages + /// we should forward as a result to [`Self::pending_events`]. + fn process_completed_checks(&self) { + let msgs = self.network_graph.pending_checks.check_resolved_futures(&*self.network_graph); + let mut pending_events = self.pending_events.lock().unwrap(); + pending_events.reserve(msgs.len()); + for mut message in msgs { + match &mut message { + MessageSendEvent::BroadcastChannelAnnouncement { msg, ref mut update_msg } => { + if msg.contents.excess_data.len() > MAX_EXCESS_BYTES_FOR_RELAY { + continue; + } + if update_msg.as_ref().map(|msg| msg.contents.excess_data.len()).unwrap_or(0) > MAX_EXCESS_BYTES_FOR_RELAY - { - return; - } - }, - _ => return, + { + *update_msg = None; + } + }, + MessageSendEvent::BroadcastChannelUpdate { msg, .. } => { + if msg.contents.excess_data.len() > MAX_EXCESS_BYTES_FOR_RELAY { + continue; + } + }, + MessageSendEvent::BroadcastNodeAnnouncement { msg } => { + if msg.contents.excess_data.len() > MAX_EXCESS_BYTES_FOR_RELAY + || msg.contents.excess_address_data.len() > MAX_EXCESS_BYTES_FOR_RELAY + || msg.contents.excess_data.len() + msg.contents.excess_address_data.len() + > MAX_EXCESS_BYTES_FOR_RELAY + { + continue; + } + }, + _ => continue, + } + pending_events.push(message); } - self.pending_events.lock().unwrap().push(ev); } } @@ -884,6 +887,7 @@ where } fn get_and_clear_pending_msg_events(&self) -> Vec { + self.process_completed_checks(); let mut ret = Vec::new(); let mut pending_events = self.pending_events.lock().unwrap(); core::mem::swap(&mut ret, &mut pending_events); diff --git a/lightning/src/routing/utxo.rs b/lightning/src/routing/utxo.rs index 8e0dc113817..f46160f1f14 100644 --- a/lightning/src/routing/utxo.rs +++ b/lightning/src/routing/utxo.rs @@ -21,7 +21,7 @@ use bitcoin::hex::DisplayHex; use crate::ln::chan_utils::make_funding_redeemscript_from_slices; use crate::ln::msgs::{self, ErrorAction, LightningError, MessageSendEvent}; -use crate::routing::gossip::{NetworkGraph, NodeId, P2PGossipSync}; +use crate::routing::gossip::{NetworkGraph, NodeId}; use crate::util::logger::{Level, Logger}; use crate::util::wakers::Notifier; @@ -157,148 +157,11 @@ impl UtxoFuture { } } - /// Resolves this future against the given `graph` and with the given `result`. - /// - /// This is identical to calling [`UtxoFuture::resolve`] with a dummy `gossip`, disabling - /// forwarding the validated gossip message onwards to peers. - /// - /// Because this may cause the [`NetworkGraph`]'s [`processing_queue_high`] to flip, in order - /// to allow us to interact with peers again, you should call [`PeerManager::process_events`] - /// after this. - /// - /// [`processing_queue_high`]: crate::ln::msgs::RoutingMessageHandler::processing_queue_high - /// [`PeerManager::process_events`]: crate::ln::peer_handler::PeerManager::process_events - pub fn resolve_without_forwarding( - &self, graph: &NetworkGraph, result: Result, - ) where - L::Target: Logger, - { - self.do_resolve(graph, result); - } - - /// Resolves this future against the given `graph` and with the given `result`. - /// - /// The given `gossip` is used to broadcast any validated messages onwards to all peers which - /// have available buffer space. - /// - /// Because this may cause the [`NetworkGraph`]'s [`processing_queue_high`] to flip, in order - /// to allow us to interact with peers again, you should call [`PeerManager::process_events`] - /// after this. - /// - /// [`processing_queue_high`]: crate::ln::msgs::RoutingMessageHandler::processing_queue_high - /// [`PeerManager::process_events`]: crate::ln::peer_handler::PeerManager::process_events - pub fn resolve< - L: Deref, - G: Deref>, - U: Deref, - GS: Deref>, - >( - &self, graph: &NetworkGraph, gossip: GS, result: Result, - ) where - L::Target: Logger, - U::Target: UtxoLookup, - { - let mut res = self.do_resolve(graph, result); - for msg_opt in res.iter_mut() { - if let Some(msg) = msg_opt.take() { - gossip.forward_gossip_msg(msg); - } - } - } - - #[rustfmt::skip] - fn do_resolve(&self, graph: &NetworkGraph, result: Result) - -> [Option; 5] where L::Target: Logger { - let (announcement, node_a, node_b, update_a, update_b) = { - let mut pending_checks = graph.pending_checks.internal.lock().unwrap(); - let mut async_messages = self.state.lock().unwrap(); - async_messages.notifier.notify(); - - if async_messages.channel_announce.is_none() { - // We raced returning to `check_channel_announcement` which hasn't updated - // `channel_announce` yet. That's okay, we can set the `complete` field which it will - // check once it gets control again. - async_messages.complete = Some(result); - return [None, None, None, None, None]; - } - - let announcement_msg = match async_messages.channel_announce.as_ref().unwrap() { - ChannelAnnouncement::Full(signed_msg) => &signed_msg.contents, - ChannelAnnouncement::Unsigned(msg) => &msg, - }; - - pending_checks.lookup_completed(announcement_msg, &Arc::downgrade(&self.state)); - - (async_messages.channel_announce.take().unwrap(), - async_messages.latest_node_announce_a.take(), - async_messages.latest_node_announce_b.take(), - async_messages.latest_channel_update_a.take(), - async_messages.latest_channel_update_b.take()) - }; - - let mut res = [None, None, None, None, None]; - let mut res_idx = 0; - - // Now that we've updated our internal state, pass the pending messages back through the - // network graph with a different `UtxoLookup` which will resolve immediately. - // Note that we ignore errors as we don't disconnect peers anyway, so there's nothing to do - // with them. - let resolver = UtxoResolver(result); - let (node_id_1, node_id_2) = match &announcement { - ChannelAnnouncement::Full(signed_msg) => (signed_msg.contents.node_id_1, signed_msg.contents.node_id_2), - ChannelAnnouncement::Unsigned(msg) => (msg.node_id_1, msg.node_id_2), - }; - match announcement { - ChannelAnnouncement::Full(signed_msg) => { - if graph.update_channel_from_announcement(&signed_msg, &Some(&resolver)).is_ok() { - res[res_idx] = Some(MessageSendEvent::BroadcastChannelAnnouncement { - msg: signed_msg, update_msg: None, - }); - res_idx += 1; - } - }, - ChannelAnnouncement::Unsigned(msg) => { - let _ = graph.update_channel_from_unsigned_announcement(&msg, &Some(&resolver)); - }, - } - - for announce in core::iter::once(node_a).chain(core::iter::once(node_b)) { - match announce { - Some(NodeAnnouncement::Full(signed_msg)) => { - if graph.update_node_from_announcement(&signed_msg).is_ok() { - res[res_idx] = Some(MessageSendEvent::BroadcastNodeAnnouncement { - msg: signed_msg, - }); - res_idx += 1; - } - }, - Some(NodeAnnouncement::Unsigned(msg)) => { - let _ = graph.update_node_from_unsigned_announcement(&msg); - }, - None => {}, - } - } - - for update in core::iter::once(update_a).chain(core::iter::once(update_b)) { - match update { - Some(ChannelUpdate::Full(signed_msg)) => { - if graph.update_channel(&signed_msg).is_ok() { - res[res_idx] = Some(MessageSendEvent::BroadcastChannelUpdate { - msg: signed_msg, - node_id_1, - node_id_2, - }); - res_idx += 1; - } - }, - Some(ChannelUpdate::Unsigned(msg)) => { - let _ = graph.update_channel_unsigned(&msg); - }, - None => {}, - } - } - - res + /// Resolves this future with the given result. + pub fn resolve(&self, result: Result) { + let mut state = self.state.lock().unwrap(); + state.complete = Some(result); + state.notifier.notify(); } } @@ -307,28 +170,6 @@ struct PendingChecksContext { nodes: HashMap>>>, } -impl PendingChecksContext { - #[rustfmt::skip] - fn lookup_completed(&mut self, - msg: &msgs::UnsignedChannelAnnouncement, completed_state: &Weak> - ) { - if let hash_map::Entry::Occupied(e) = self.channels.entry(msg.short_channel_id) { - if Weak::ptr_eq(e.get(), &completed_state) { - e.remove(); - } - } - - if let hash_map::Entry::Occupied(mut e) = self.nodes.entry(msg.node_id_1) { - e.get_mut().retain(|elem| !Weak::ptr_eq(&elem, &completed_state)); - if e.get().is_empty() { e.remove(); } - } - if let hash_map::Entry::Occupied(mut e) = self.nodes.entry(msg.node_id_2) { - e.get_mut().retain(|elem| !Weak::ptr_eq(&elem, &completed_state)); - if e.get().is_empty() { e.remove(); } - } - } -} - /// A set of messages which are pending UTXO lookups for processing. pub(super) struct PendingChecks { internal: Mutex, @@ -597,6 +438,142 @@ impl PendingChecks { false } } + + fn resolve_single_future( + &self, graph: &NetworkGraph, entry: Arc>, + new_messages: &mut Vec, + ) where + L::Target: Logger, + { + let (announcement, result, announce_a, announce_b, update_a, update_b); + { + let mut state = entry.lock().unwrap(); + announcement = if let Some(announcement) = state.channel_announce.take() { + announcement + } else { + // We raced returning to `check_channel_announcement` which hasn't updated + // `channel_announce` yet. That's okay, we can set the `complete` field which it will + // check once it gets control again. + return; + }; + + result = if let Some(result) = state.complete.take() { + result + } else { + debug_assert!(false, "Future should have been resolved"); + return; + }; + + announce_a = state.latest_node_announce_a.take(); + announce_b = state.latest_node_announce_b.take(); + update_a = state.latest_channel_update_a.take(); + update_b = state.latest_channel_update_b.take(); + } + + // Now that we've updated our internal state, pass the pending messages back through the + // network graph with a different `UtxoLookup` which will resolve immediately. + // Note that we ignore errors as we don't disconnect peers anyway, so there's nothing to do + // with them. + let resolver = UtxoResolver(result); + let (node_id_1, node_id_2) = match &announcement { + ChannelAnnouncement::Full(signed_msg) => { + (signed_msg.contents.node_id_1, signed_msg.contents.node_id_2) + }, + ChannelAnnouncement::Unsigned(msg) => (msg.node_id_1, msg.node_id_2), + }; + match announcement { + ChannelAnnouncement::Full(signed_msg) => { + if graph.update_channel_from_announcement(&signed_msg, &Some(&resolver)).is_ok() { + new_messages.push(MessageSendEvent::BroadcastChannelAnnouncement { + msg: signed_msg, + update_msg: None, + }); + } + }, + ChannelAnnouncement::Unsigned(msg) => { + let _ = graph.update_channel_from_unsigned_announcement(&msg, &Some(&resolver)); + }, + } + + for announce in [announce_a, announce_b] { + match announce { + Some(NodeAnnouncement::Full(signed_msg)) => { + if graph.update_node_from_announcement(&signed_msg).is_ok() { + new_messages + .push(MessageSendEvent::BroadcastNodeAnnouncement { msg: signed_msg }); + } + }, + Some(NodeAnnouncement::Unsigned(msg)) => { + let _ = graph.update_node_from_unsigned_announcement(&msg); + }, + None => {}, + } + } + + for update in [update_a, update_b] { + match update { + Some(ChannelUpdate::Full(signed_msg)) => { + if graph.update_channel(&signed_msg).is_ok() { + new_messages.push(MessageSendEvent::BroadcastChannelUpdate { + msg: signed_msg, + node_id_1, + node_id_2, + }); + } + }, + Some(ChannelUpdate::Unsigned(msg)) => { + let _ = graph.update_channel_unsigned(&msg); + }, + None => {}, + } + } + } + + pub(super) fn check_resolved_futures( + &self, graph: &NetworkGraph, + ) -> Vec + where + L::Target: Logger, + { + let mut completed_states = Vec::new(); + { + let mut lck = self.internal.lock().unwrap(); + lck.channels.retain(|_, state| { + if let Some(state) = state.upgrade() { + if state.lock().unwrap().complete.is_some() { + completed_states.push(state); + false + } else { + true + } + } else { + // The UtxoFuture has been dropped, drop the pending-lookup state. + false + } + }); + lck.nodes.retain(|_, lookups| { + lookups.retain(|state| { + if let Some(state) = state.upgrade() { + if state.lock().unwrap().complete.is_some() { + completed_states.push(state); + false + } else { + true + } + } else { + // The UtxoFuture has been dropped, drop the pending-lookup state. + false + } + }); + !lookups.is_empty() + }); + } + let mut res = Vec::with_capacity(completed_states.len() * 5); + for state in completed_states { + self.resolve_single_future(graph, state, &mut res); + } + res + } } #[cfg(test)] @@ -654,9 +631,10 @@ mod tests { let notifier = Arc::new(Notifier::new()); let future = UtxoFuture::new(Arc::clone(¬ifier)); - future.resolve_without_forwarding(&network_graph, - Ok(TxOut { value: Amount::from_sat(1_000_000), script_pubkey: good_script })); + future + .resolve(Ok(TxOut { value: Amount::from_sat(1_000_000), script_pubkey: good_script })); assert!(notifier.notify_pending()); + network_graph.pending_checks.check_resolved_futures(&network_graph); *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(future.clone()); network_graph.update_channel_from_announcement(&valid_announcement, &Some(&chain_source)).unwrap(); @@ -679,9 +657,9 @@ mod tests { "Channel being checked async"); assert!(network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id).is_none()); - future.resolve_without_forwarding(&network_graph, - Ok(TxOut { value: Amount::ZERO, script_pubkey: good_script })); + future.resolve(Ok(TxOut { value: Amount::ZERO, script_pubkey: good_script })); assert!(notifier.notify_pending()); + network_graph.pending_checks.check_resolved_futures(&network_graph); network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id).unwrap(); network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id).unwrap(); @@ -710,9 +688,10 @@ mod tests { "Channel being checked async"); assert!(network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id).is_none()); - future.resolve_without_forwarding(&network_graph, - Ok(TxOut { value: Amount::from_sat(1_000_000), script_pubkey: bitcoin::ScriptBuf::new() })); + let value = Amount::from_sat(1_000_000); + future.resolve(Ok(TxOut { value, script_pubkey: bitcoin::ScriptBuf::new() })); assert!(notifier.notify_pending()); + network_graph.pending_checks.check_resolved_futures(&network_graph); assert!(network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id).is_none()); } @@ -731,8 +710,9 @@ mod tests { "Channel being checked async"); assert!(network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id).is_none()); - future.resolve_without_forwarding(&network_graph, Err(UtxoLookupError::UnknownTx)); + future.resolve(Err(UtxoLookupError::UnknownTx)); assert!(notifier.notify_pending()); + network_graph.pending_checks.check_resolved_futures(&network_graph); assert!(network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id).is_none()); } @@ -766,9 +746,10 @@ mod tests { "Awaiting channel_announcement validation to accept channel_update"); assert!(!notifier.notify_pending()); - future.resolve_without_forwarding(&network_graph, - Ok(TxOut { value: Amount::from_sat(1_000_000), script_pubkey: good_script })); + future + .resolve(Ok(TxOut { value: Amount::from_sat(1_000_000), script_pubkey: good_script })); assert!(notifier.notify_pending()); + network_graph.pending_checks.check_resolved_futures(&network_graph); assert!(network_graph.read_only().channels() .get(&valid_announcement.contents.short_channel_id).unwrap().one_to_two.is_some()); @@ -806,9 +787,9 @@ mod tests { "Awaiting channel_announcement validation to accept channel_update"); assert!(!notifier.notify_pending()); - future.resolve_without_forwarding(&network_graph, - Ok(TxOut { value: Amount::from_sat(1_000_000), script_pubkey: good_script })); + future.resolve(Ok(TxOut { value: Amount::from_sat(1_000_000), script_pubkey: good_script })); assert!(notifier.notify_pending()); + network_graph.pending_checks.check_resolved_futures(&network_graph); assert_eq!(chan_update_a.contents.timestamp, chan_update_b.contents.timestamp); let graph_lock = network_graph.read_only(); @@ -857,10 +838,11 @@ mod tests { assert_eq!(chain_source.get_utxo_call_count.load(Ordering::Relaxed), 2); // Still, if we resolve the original future, the original channel will be accepted. - future.resolve_without_forwarding(&network_graph, - Ok(TxOut { value: Amount::from_sat(1_000_000), script_pubkey: good_script })); + future + .resolve(Ok(TxOut { value: Amount::from_sat(1_000_000), script_pubkey: good_script })); assert!(notifier_a.notify_pending()); assert!(!notifier_b.notify_pending()); + network_graph.pending_checks.check_resolved_futures(&network_graph); assert!(!network_graph.read_only().channels() .get(&valid_announcement.contents.short_channel_id).unwrap() .announcement_message.as_ref().unwrap() @@ -896,8 +878,9 @@ mod tests { assert!(network_graph.pending_checks.too_many_checks_pending()); // Once the future completes the "too many checks" flag should reset. - future.resolve_without_forwarding(&network_graph, Err(UtxoLookupError::UnknownTx)); + future.resolve(Err(UtxoLookupError::UnknownTx)); assert!(notifier.notify_pending()); + network_graph.pending_checks.check_resolved_futures(&network_graph); assert!(!network_graph.pending_checks.too_many_checks_pending()); } From ad78799b881b3007d3d57b4434bdf2468164ceb6 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 31 Dec 2025 20:26:29 +0000 Subject: [PATCH 047/242] Move to awaiting gossip validation in the background processor `P2PGossipSync` is a rather poor design. It currently basically requires two circular `Arc` references, leaving `NetworkGraph`s to leak if LDK is un-loaded: * `P2PGossipSync` owns/holds a reference to the `GossipVerifier` and `GossipVerifier` holds an `Arc` to the `P2PGossipSync` and * `PeerManager` holds a reference to the `P2PGossipSync` (as the gossip message handler) which owns/holds a reference to the `GossipVerifier`, which has a `Deref` (likely an `Arc` in practice) to the `PeerManager`. Instead, we should move towards the same design we have elsewhere - hold a `Notifier` and expose waiting on it to the background processor then poll for completion from there (in this case, as in others by checking for completion when handling `get_and_clear_pending_msg_events` calls). After the last few commits of setup, here we finally switch to waking the background processor directly when we detect async gossip validation completion, allowing us to drop the circular references in `P2PGossipSync`/`GossipVerifier` entirely. Fixes #3369 --- lightning-background-processor/src/lib.rs | 56 +++++++++++++++++------ lightning-block-sync/src/gossip.rs | 44 ++++-------------- lightning/src/routing/gossip.rs | 12 +++++ 3 files changed, 61 insertions(+), 51 deletions(-) diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index 36b563f5925..7361d026d5a 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -64,6 +64,7 @@ use lightning::util::persist::{ SCORER_PERSISTENCE_PRIMARY_NAMESPACE, SCORER_PERSISTENCE_SECONDARY_NAMESPACE, }; use lightning::util::sweep::{OutputSweeper, OutputSweeperSync}; +use lightning::util::wakers::Future; #[cfg(feature = "std")] use lightning::util::wakers::Sleeper; use lightning_rapid_gossip_sync::RapidGossipSync; @@ -235,6 +236,14 @@ where GossipSync::None => None, } } + + fn validation_completion_future(&self) -> Option { + match self { + GossipSync::P2P(gossip_sync) => Some(gossip_sync.validation_completion_future()), + GossipSync::Rapid(_) => None, + GossipSync::None => None, + } + } } /// This is not exported to bindings users as the bindings concretize everything and have constructors for us @@ -520,12 +529,14 @@ pub(crate) mod futures_util { C: Future + Unpin, D: Future + Unpin, E: Future + Unpin, + F: Future + Unpin, > { pub a: A, pub b: B, pub c: C, pub d: D, pub e: E, + pub f: F, } pub(crate) enum SelectorOutput { @@ -534,6 +545,7 @@ pub(crate) mod futures_util { C, D, E, + F, } impl< @@ -542,7 +554,8 @@ pub(crate) mod futures_util { C: Future + Unpin, D: Future + Unpin, E: Future + Unpin, - > Future for Selector + F: Future + Unpin, + > Future for Selector { type Output = SelectorOutput; fn poll( @@ -580,6 +593,12 @@ pub(crate) mod futures_util { }, Poll::Pending => {}, } + match Pin::new(&mut self.f).poll(ctx) { + Poll::Ready(()) => { + return Poll::Ready(SelectorOutput::F); + }, + Poll::Pending => {}, + } Poll::Pending } } @@ -606,6 +625,12 @@ pub(crate) mod futures_util { } } + impl + Unpin> From> for OptionalSelector { + fn from(optional_future: Option) -> Self { + Self { optional_future } + } + } + // If we want to poll a future without an async context to figure out if it has completed or // not without awaiting, we need a Waker, which needs a vtable...we fill it with dummy values // but sadly there's a good bit of boilerplate here. @@ -1058,18 +1083,13 @@ where if mobile_interruptable_platform { await_start = Some(sleeper(Duration::from_secs(1))); } - let om_fut = if let Some(om) = onion_messenger.as_ref() { - let fut = om.get_om().get_update_future(); - OptionalSelector { optional_future: Some(fut) } - } else { - OptionalSelector { optional_future: None } - }; - let lm_fut = if let Some(lm) = liquidity_manager.as_ref() { - let fut = lm.get_lm().get_pending_msgs_or_needs_persist_future(); - OptionalSelector { optional_future: Some(fut) } - } else { - OptionalSelector { optional_future: None } - }; + let om_fut: OptionalSelector<_> = + onion_messenger.as_ref().map(|om| om.get_om().get_update_future()).into(); + let lm_fut: OptionalSelector<_> = liquidity_manager + .as_ref() + .map(|lm| lm.get_lm().get_pending_msgs_or_needs_persist_future()) + .into(); + let gv_fut: OptionalSelector<_> = gossip_sync.validation_completion_future().into(); let needs_processing = channel_manager.get_cm().needs_pending_htlc_processing(); let sleep_delay = match (needs_processing, mobile_interruptable_platform) { (true, true) => batch_delay.get().min(Duration::from_millis(100)), @@ -1083,9 +1103,14 @@ where c: chain_monitor.get_update_future(), d: om_fut, e: lm_fut, + f: gv_fut, }; match fut.await { - SelectorOutput::B | SelectorOutput::C | SelectorOutput::D | SelectorOutput::E => {}, + SelectorOutput::B + | SelectorOutput::C + | SelectorOutput::D + | SelectorOutput::E + | SelectorOutput::F => {}, SelectorOutput::A(exit) => { if exit { break; @@ -1639,11 +1664,12 @@ impl BackgroundProcessor { let lm_fut = liquidity_manager .as_ref() .map(|lm| lm.get_lm().get_pending_msgs_or_needs_persist_future()); + let gv_fut = gossip_sync.validation_completion_future(); let always_futures = [ channel_manager.get_cm().get_event_or_persistence_needed_future(), chain_monitor.get_update_future(), ]; - let futures = always_futures.into_iter().chain(om_fut).chain(lm_fut); + let futures = always_futures.into_iter().chain(om_fut).chain(lm_fut).chain(gv_fut); let sleeper = Sleeper::from_futures(futures); let batch_delay = if channel_manager.get_cm().needs_pending_htlc_processing() { diff --git a/lightning-block-sync/src/gossip.rs b/lightning-block-sync/src/gossip.rs index 63045b6cd92..00d321669ca 100644 --- a/lightning-block-sync/src/gossip.rs +++ b/lightning-block-sync/src/gossip.rs @@ -9,10 +9,7 @@ use bitcoin::constants::ChainHash; use bitcoin::hash_types::BlockHash; use bitcoin::transaction::{OutPoint, TxOut}; -use lightning::ln::peer_handler::APeerManager; -use lightning::routing::gossip::{NetworkGraph, P2PGossipSync}; use lightning::routing::utxo::{UtxoFuture, UtxoLookup, UtxoLookupError, UtxoResult}; -use lightning::util::logger::Logger; use lightning::util::native_async::FutureSpawner; use lightning::util::wakers::Notifier; @@ -128,46 +125,28 @@ impl< /// value of 1024 should more than suffice), and ensure you have sufficient file descriptors /// available on both Bitcoin Core and your LDK application for each request to hold its own /// connection. -pub struct GossipVerifier< - S: FutureSpawner, - Blocks: Deref + Send + Sync + 'static + Clone, - L: Deref + Send + Sync + 'static, -> where +pub struct GossipVerifier +where Blocks::Target: UtxoSource, - L::Target: Logger, { source: Blocks, - peer_manager_wake: Arc, - gossiper: Arc>, Arc, L>>, spawn: S, block_cache: Arc>>, } const BLOCK_CACHE_SIZE: usize = 5; -impl - GossipVerifier +impl GossipVerifier where Blocks::Target: UtxoSource, - L::Target: Logger, { - /// Constructs a new [`GossipVerifier`]. + /// Constructs a new [`GossipVerifier`] for use in a [`P2PGossipSync`]. /// - /// This is expected to be given to a [`P2PGossipSync`] (initially constructed with `None` for - /// the UTXO lookup) via [`P2PGossipSync::add_utxo_lookup`]. - pub fn new( - source: Blocks, spawn: S, gossiper: Arc>, Arc, L>>, - peer_manager: APM, - ) -> Self - where - APM::Target: APeerManager, - { - let peer_manager_wake = Arc::new(move || peer_manager.as_ref().process_events()); + /// [`P2PGossipSync`]: lightning::routing::gossip::P2PGossipSync + pub fn new(source: Blocks, spawn: S) -> Self { Self { source, spawn, - gossiper, - peer_manager_wake, block_cache: Arc::new(Mutex::new(VecDeque::with_capacity(BLOCK_CACHE_SIZE))), } } @@ -256,11 +235,9 @@ where } } -impl Deref - for GossipVerifier +impl Deref for GossipVerifier where Blocks::Target: UtxoSource, - L::Target: Logger, { type Target = Self; fn deref(&self) -> &Self { @@ -268,23 +245,18 @@ where } } -impl UtxoLookup - for GossipVerifier +impl UtxoLookup for GossipVerifier where Blocks::Target: UtxoSource, - L::Target: Logger, { fn get_utxo(&self, _chain_hash: &ChainHash, scid: u64, notifier: Arc) -> UtxoResult { let res = UtxoFuture::new(notifier); let fut = res.clone(); let source = self.source.clone(); - let gossiper = Arc::clone(&self.gossiper); let block_cache = Arc::clone(&self.block_cache); - let pmw = Arc::clone(&self.peer_manager_wake); self.spawn.spawn(async move { let res = Self::retrieve_utxo(source, block_cache, scid).await; fut.resolve(res); - (pmw)(); }); UtxoResult::Async(res) } diff --git a/lightning/src/routing/gossip.rs b/lightning/src/routing/gossip.rs index 46ca3322ae7..e8fcb7b19d1 100644 --- a/lightning/src/routing/gossip.rs +++ b/lightning/src/routing/gossip.rs @@ -43,6 +43,7 @@ use crate::util::indexed_map::{ use crate::util::logger::{Level, Logger}; use crate::util::scid_utils::{block_from_scid, scid_from_parts, MAX_SCID_BLOCK}; use crate::util::ser::{MaybeReadable, Readable, ReadableArgs, RequiredWrapper, Writeable, Writer}; +use crate::util::wakers::Future; use crate::io; use crate::io_extras::{copy, sink}; @@ -367,6 +368,17 @@ where &self.network_graph } + /// Gets a [`Future`] which will resolve the next time an async validation of gossip data + /// completes. + /// + /// If the [`UtxoLookup`] provided in [`P2PGossipSync::new`] does not return + /// [`UtxoResult::Async`] values, the returned [`Future`] will never resolve + /// + /// [`UtxoResult::Async`]: crate::routing::utxo::UtxoResult::Async + pub fn validation_completion_future(&self) -> Future { + self.network_graph.pending_checks.completion_notifier.get_future() + } + /// Returns true when a full routing table sync should be performed with a peer. fn should_request_full_sync(&self) -> bool { const FULL_SYNCS_TO_REQUEST: usize = 5; From 15ddb3167939edc56f1bc76578e9f32cf51cab97 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 31 Dec 2025 15:27:39 +0000 Subject: [PATCH 048/242] Drop the async-setting of the `P2PGossipSync` `utxo_verifier` Now that we do not rely on circular references for `P2PGossipSync` validation, we no longer need the hacky `P2PGossipSync::add_utxo_lookup` method to add the gossip validator after building the `P2PGossipSync` first. Thus, we remove it here, updating some tests that relied on it. --- lightning/src/routing/gossip.rs | 18 ++++------ lightning/src/routing/router.rs | 12 +++---- lightning/src/routing/test_utils.rs | 53 ++++++++++++++++++++++++++--- 3 files changed, 59 insertions(+), 24 deletions(-) diff --git a/lightning/src/routing/gossip.rs b/lightning/src/routing/gossip.rs index e8fcb7b19d1..040a28cddae 100644 --- a/lightning/src/routing/gossip.rs +++ b/lightning/src/routing/gossip.rs @@ -328,7 +328,10 @@ where L::Target: Logger, { network_graph: G, - utxo_lookup: RwLock>, + #[cfg(any(feature = "_test_utils", test))] + pub(super) utxo_lookup: Option, + #[cfg(not(any(feature = "_test_utils", test)))] + utxo_lookup: Option, full_syncs_requested: AtomicUsize, pending_events: Mutex>, logger: L, @@ -341,25 +344,19 @@ where { /// Creates a new tracker of the actual state of the network of channels and nodes, /// assuming an existing [`NetworkGraph`]. + /// /// UTXO lookup is used to make sure announced channels exist on-chain, channel data is /// correct, and the announcement is signed with channel owners' keys. pub fn new(network_graph: G, utxo_lookup: Option, logger: L) -> Self { P2PGossipSync { network_graph, full_syncs_requested: AtomicUsize::new(0), - utxo_lookup: RwLock::new(utxo_lookup), + utxo_lookup, pending_events: Mutex::new(vec![]), logger, } } - /// Adds a provider used to check new announcements. Does not affect - /// existing announcements unless they are updated. - /// Add, update or remove the provider would replace the current one. - pub fn add_utxo_lookup(&self, utxo_lookup: Option) { - *self.utxo_lookup.write().unwrap() = utxo_lookup; - } - /// Gets a reference to the underlying [`NetworkGraph`] which was provided in /// [`P2PGossipSync::new`]. /// @@ -564,8 +561,7 @@ where fn handle_channel_announcement( &self, _their_node_id: Option, msg: &msgs::ChannelAnnouncement, ) -> Result { - self.network_graph - .update_channel_from_announcement(msg, &*self.utxo_lookup.read().unwrap())?; + self.network_graph.update_channel_from_announcement(msg, &self.utxo_lookup)?; Ok(msg.contents.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY) } diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index c06e5174263..40580a09c8c 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -3943,10 +3943,7 @@ mod tests { ChannelUsage, FixedPenaltyScorer, ProbabilisticScorer, ProbabilisticScoringDecayParameters, ProbabilisticScoringFeeParameters, ScoreLookUp, }; - use crate::routing::test_utils::{ - add_channel, add_or_update_node, build_graph, build_line_graph, get_nodes, - id_to_feature_flags, update_channel, - }; + use crate::routing::test_utils::*; use crate::routing::utxo::UtxoResult; use crate::types::features::{BlindedHopFeatures, ChannelFeatures, InitFeatures, NodeFeatures}; use crate::util::config::UserConfig; @@ -5368,7 +5365,7 @@ mod tests { fn available_amount_while_routing_test() { // Tests whether we choose the correct available channel amount while routing. - let (secp_ctx, network_graph, gossip_sync, chain_monitor, logger) = build_graph(); + let (secp_ctx, network_graph, gossip_sync, chain_monitor, logger) = build_graph_with_gossip_validation(); let (our_privkey, our_id, privkeys, nodes) = get_nodes(&secp_ctx); let scorer = ln_test_utils::TestScorer::new(); let random_seed_bytes = [42; 32]; @@ -5588,11 +5585,10 @@ mod tests { .push_opcode(opcodes::all::OP_PUSHNUM_2) .push_opcode(opcodes::all::OP_CHECKMULTISIG).into_script().to_p2wsh(); + *chain_monitor.utxo_ret.lock().unwrap() = UtxoResult::Sync(Ok(TxOut { value: Amount::from_sat(15), script_pubkey: good_script.clone() })); - gossip_sync.add_utxo_lookup(Some(chain_monitor)); - - add_channel(&gossip_sync, &secp_ctx, &privkeys[0], &privkeys[2], ChannelFeatures::from_le_bytes(id_to_feature_flags(3)), 333); + add_channel_skipping_utxo_update(&gossip_sync, &secp_ctx, &privkeys[0], &privkeys[2], ChannelFeatures::from_le_bytes(id_to_feature_flags(3)), 333); update_channel(&gossip_sync, &secp_ctx, &privkeys[0], UnsignedChannelUpdate { chain_hash: ChainHash::using_genesis_block(Network::Testnet), short_channel_id: 333, diff --git a/lightning/src/routing/test_utils.rs b/lightning/src/routing/test_utils.rs index c5c35c9ce77..a433fa30c5b 100644 --- a/lightning/src/routing/test_utils.rs +++ b/lightning/src/routing/test_utils.rs @@ -10,7 +10,9 @@ // licenses. use crate::routing::gossip::{NetworkGraph, NodeAlias, P2PGossipSync}; +use crate::routing::utxo::UtxoResult; use crate::types::features::{ChannelFeatures, NodeFeatures}; +use crate::ln::chan_utils::make_funding_redeemscript; use crate::ln::msgs::{ChannelAnnouncement, ChannelUpdate, MAX_VALUE_MSAT, NodeAnnouncement, RoutingMessageHandler, SocketAddress, UnsignedChannelAnnouncement, UnsignedChannelUpdate, UnsignedNodeAnnouncement}; use crate::util::test_utils; use crate::util::ser::Writeable; @@ -22,6 +24,7 @@ use bitcoin::hex::FromHex; use bitcoin::network::Network; use bitcoin::secp256k1::{PublicKey,SecretKey}; use bitcoin::secp256k1::{Secp256k1, All}; +use bitcoin::{Amount, TxOut}; #[allow(unused)] use crate::prelude::*; @@ -58,19 +61,34 @@ pub(crate) fn channel_announcement( } // Using the same keys for LN and BTC ids -pub(crate) fn add_channel( +pub(crate) fn add_channel_skipping_utxo_update( gossip_sync: &P2PGossipSync>>, Arc, Arc>, - secp_ctx: &Secp256k1, node_1_privkey: &SecretKey, node_2_privkey: &SecretKey, features: ChannelFeatures, short_channel_id: u64 + secp_ctx: &Secp256k1, node_1_privkey: &SecretKey, node_2_privkey: &SecretKey, features: ChannelFeatures, short_channel_id: u64, ) { let valid_announcement = channel_announcement(node_1_privkey, node_2_privkey, features, short_channel_id, secp_ctx); - let node_1_pubkey = PublicKey::from_secret_key(&secp_ctx, node_1_privkey); + + let node_1_pubkey = PublicKey::from_secret_key(&secp_ctx, &node_1_privkey); match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &valid_announcement) { Ok(res) => assert!(res), - _ => panic!() + Err(e) => panic!("{:?}", e), }; } +pub(crate) fn add_channel( + gossip_sync: &P2PGossipSync>>, Arc, Arc>, + secp_ctx: &Secp256k1, node_1_privkey: &SecretKey, node_2_privkey: &SecretKey, features: ChannelFeatures, short_channel_id: u64, +) { + gossip_sync.utxo_lookup.as_ref().map(|checker| { + let node_1_pubkey = PublicKey::from_secret_key(&secp_ctx, &node_1_privkey); + let node_2_pubkey = PublicKey::from_secret_key(&secp_ctx, &node_2_privkey); + let script_pubkey = make_funding_redeemscript(&node_1_pubkey, &node_2_pubkey).to_p2wsh(); + *checker.utxo_ret.lock().unwrap() = + UtxoResult::Sync(Ok(TxOut { value: Amount::from_sat(21_000_000_0000_0000), script_pubkey })); + }); + add_channel_skipping_utxo_update(gossip_sync, secp_ctx, node_1_privkey, node_2_privkey, features, short_channel_id); +} + pub(crate) fn add_or_update_node( gossip_sync: &P2PGossipSync>>, Arc, Arc>, secp_ctx: &Secp256k1, node_privkey: &SecretKey, features: NodeFeatures, timestamp: u32 @@ -197,18 +215,43 @@ pub(super) fn build_line_graph() -> ( (secp_ctx, network_graph, gossip_sync, chain_monitor, logger) } +pub(super) fn build_graph_with_gossip_validation() -> ( + Secp256k1, + sync::Arc>>, + P2PGossipSync>>, sync::Arc, sync::Arc>, + sync::Arc, + sync::Arc, +) { + do_build_graph(true) +} + pub(super) fn build_graph() -> ( Secp256k1, sync::Arc>>, P2PGossipSync>>, sync::Arc, sync::Arc>, sync::Arc, sync::Arc, +) { + do_build_graph(false) +} + +fn do_build_graph(with_validation: bool) -> ( + Secp256k1, + sync::Arc>>, + P2PGossipSync>>, sync::Arc, sync::Arc>, + sync::Arc, + sync::Arc, ) { let secp_ctx = Secp256k1::new(); let logger = Arc::new(test_utils::TestLogger::new()); let chain_monitor = Arc::new(test_utils::TestChainSource::new(Network::Testnet)); let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, Arc::clone(&logger))); - let gossip_sync = P2PGossipSync::new(Arc::clone(&network_graph), None, Arc::clone(&logger)); + let checker = if with_validation { + Some(Arc::clone(&chain_monitor)) + } else { + None + }; + let gossip_sync = P2PGossipSync::new(Arc::clone(&network_graph), checker, Arc::clone(&logger)); // Build network from our_id to node6: // // -1(1)2- node0 -1(3)2- From 9c802c25d95f513a20640523ed9d11ea0db97dfc Mon Sep 17 00:00:00 2001 From: psychemist Date: Fri, 26 Dec 2025 18:18:47 +0100 Subject: [PATCH 049/242] Refactor payment_hash to return PaymentHash This commit fixes the payment_hash function of Bolt11Invoice to return a PaymentHash type instead of a sha256 byte stream. Code and test files dependent on this function have also been modified to adhere to the updated changes. --- lightning-invoice/src/lib.rs | 9 +++++---- .../tests/lsps2_integration_tests.rs | 14 +++++++------- lightning/src/ln/channelmanager.rs | 8 ++++---- lightning/src/ln/invoice_utils.rs | 9 ++++----- lightning/src/ln/outbound_payment.rs | 2 +- 5 files changed, 21 insertions(+), 21 deletions(-) diff --git a/lightning-invoice/src/lib.rs b/lightning-invoice/src/lib.rs index d96d730fac0..a83130ab799 100644 --- a/lightning-invoice/src/lib.rs +++ b/lightning-invoice/src/lib.rs @@ -54,7 +54,7 @@ use core::time::Duration; use serde::{de::Error, Deserialize, Deserializer, Serialize, Serializer}; #[doc(no_inline)] -pub use lightning_types::payment::PaymentSecret; +pub use lightning_types::payment::{PaymentHash, PaymentSecret}; #[doc(no_inline)] pub use lightning_types::routing::{RouteHint, RouteHintHop, RoutingFees}; use lightning_types::string::UntrustedString; @@ -1460,8 +1460,9 @@ impl Bolt11Invoice { } /// Returns the hash to which we will receive the preimage on completion of the payment - pub fn payment_hash(&self) -> &sha256::Hash { - &self.signed_invoice.payment_hash().expect("checked by constructor").0 + pub fn payment_hash(&self) -> PaymentHash { + let hash = self.signed_invoice.payment_hash().expect("checked by constructor").0; + PaymentHash(hash.to_byte_array()) } /// Return the description or a hash of it for longer ones @@ -2339,7 +2340,7 @@ mod test { sha256::Hash::from_slice(&[3; 32][..]).unwrap() )) ); - assert_eq!(invoice.payment_hash(), &sha256::Hash::from_slice(&[21; 32][..]).unwrap()); + assert_eq!(invoice.payment_hash(), PaymentHash([21; 32])); assert_eq!(invoice.payment_secret(), &PaymentSecret([42; 32])); let mut expected_features = Bolt11InvoiceFeatures::empty(); diff --git a/lightning-liquidity/tests/lsps2_integration_tests.rs b/lightning-liquidity/tests/lsps2_integration_tests.rs index e4ace27b715..2e469d149b0 100644 --- a/lightning-liquidity/tests/lsps2_integration_tests.rs +++ b/lightning-liquidity/tests/lsps2_integration_tests.rs @@ -1211,7 +1211,7 @@ fn client_trusts_lsp_end_to_end_test() { .node .pay_for_bolt11_invoice( &invoice, - PaymentId(invoice.payment_hash().to_byte_array()), + PaymentId(invoice.payment_hash().0), None, Default::default(), Retry::Attempts(3), @@ -1684,7 +1684,7 @@ fn late_payment_forwarded_and_safe_after_force_close_does_not_broadcast() { .node .pay_for_bolt11_invoice( &invoice, - PaymentId(invoice.payment_hash().to_byte_array()), + PaymentId(invoice.payment_hash().0), None, Default::default(), Retry::Attempts(3), @@ -1714,7 +1714,7 @@ fn late_payment_forwarded_and_safe_after_force_close_does_not_broadcast() { *requested_next_hop_scid, *intercept_id, *expected_outbound_amount_msat, - PaymentHash(invoice.payment_hash().to_byte_array()), + invoice.payment_hash(), ) .unwrap(); }, @@ -1875,7 +1875,7 @@ fn htlc_timeout_before_client_claim_results_in_handling_failed() { .node .pay_for_bolt11_invoice( &invoice, - PaymentId(invoice.payment_hash().to_byte_array()), + PaymentId(invoice.payment_hash().0), None, Default::default(), Retry::Attempts(3), @@ -1905,7 +1905,7 @@ fn htlc_timeout_before_client_claim_results_in_handling_failed() { *requested_next_hop_scid, *intercept_id, *expected_outbound_amount_msat, - PaymentHash(invoice.payment_hash().to_byte_array()), + invoice.payment_hash(), ) .unwrap(); }, @@ -1984,7 +1984,7 @@ fn htlc_timeout_before_client_claim_results_in_handling_failed() { match &client_events[0] { Event::HTLCHandlingFailed { failure_type, .. } => match failure_type { lightning::events::HTLCHandlingFailureType::Receive { payment_hash } => { - assert_eq!(*payment_hash, PaymentHash(invoice.payment_hash().to_byte_array())); + assert_eq!(*payment_hash, invoice.payment_hash()); }, _ => panic!("Unexpected failure_type: {:?}", failure_type), }, @@ -2212,7 +2212,7 @@ fn client_trusts_lsp_partial_fee_does_not_trigger_broadcast() { .node .pay_for_bolt11_invoice( &invoice, - PaymentId(invoice.payment_hash().to_byte_array()), + PaymentId(invoice.payment_hash().0), None, Default::default(), Retry::Attempts(3), diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index bfaf1e68d6a..9aa86d1b12c 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -2230,7 +2230,7 @@ where /// match event { /// Event::PaymentClaimable { payment_hash, purpose, .. } => match purpose { /// PaymentPurpose::Bolt11InvoicePayment { payment_preimage: Some(payment_preimage), .. } => { -/// assert_eq!(payment_hash.0, invoice.payment_hash().as_ref()); +/// assert_eq!(payment_hash, invoice.payment_hash()); /// println!("Claiming payment {}", payment_hash); /// channel_manager.claim_funds(payment_preimage); /// }, @@ -2238,7 +2238,7 @@ where /// println!("Unknown payment hash: {}", payment_hash); /// }, /// PaymentPurpose::SpontaneousPayment(payment_preimage) => { -/// assert_ne!(payment_hash.0, invoice.payment_hash().as_ref()); +/// assert_ne!(payment_hash, invoice.payment_hash()); /// println!("Claiming spontaneous payment {}", payment_hash); /// channel_manager.claim_funds(payment_preimage); /// }, @@ -2246,7 +2246,7 @@ where /// # _ => {}, /// }, /// Event::PaymentClaimed { payment_hash, amount_msat, .. } => { -/// assert_eq!(payment_hash.0, invoice.payment_hash().as_ref()); +/// assert_eq!(payment_hash, invoice.payment_hash()); /// println!("Claimed {} msats", amount_msat); /// }, /// // ... @@ -2271,7 +2271,7 @@ where /// # ) { /// # let channel_manager = channel_manager.get_cm(); /// # let payment_id = PaymentId([42; 32]); -/// # let payment_hash = PaymentHash((*invoice.payment_hash()).to_byte_array()); +/// # let payment_hash = invoice.payment_hash(); /// match channel_manager.pay_for_bolt11_invoice( /// invoice, payment_id, None, route_params_config, retry /// ) { diff --git a/lightning/src/ln/invoice_utils.rs b/lightning/src/ln/invoice_utils.rs index 425cc4d7eb6..e72ea4518a4 100644 --- a/lightning/src/ln/invoice_utils.rs +++ b/lightning/src/ln/invoice_utils.rs @@ -627,7 +627,7 @@ mod test { use crate::util::dyn_signer::{DynKeysInterface, DynPhantomKeysInterface}; use crate::util::test_utils; use bitcoin::hashes::sha256::Hash as Sha256; - use bitcoin::hashes::{sha256, Hash}; + use bitcoin::hashes::Hash; use bitcoin::network::Network; use core::time::Duration; use lightning_invoice::{ @@ -829,7 +829,7 @@ mod test { invoice.description(), Bolt11InvoiceDescriptionRef::Direct(&Description::new("test".to_string()).unwrap()) ); - assert_eq!(invoice.payment_hash(), &sha256::Hash::from_slice(&payment_hash.0[..]).unwrap()); + assert_eq!(invoice.payment_hash(), payment_hash); } #[cfg(not(feature = "std"))] @@ -1257,8 +1257,7 @@ mod test { Duration::from_secs(genesis_timestamp), ) .unwrap(); - let (payment_hash, payment_secret) = - (PaymentHash(invoice.payment_hash().to_byte_array()), *invoice.payment_secret()); + let (payment_hash, payment_secret) = (invoice.payment_hash(), *invoice.payment_secret()); let payment_preimage = if user_generated_pmt_hash { user_payment_preimage } else { @@ -1290,7 +1289,7 @@ mod test { invoice.amount_milli_satoshis().unwrap(), ); - let payment_hash = PaymentHash(invoice.payment_hash().to_byte_array()); + let payment_hash = invoice.payment_hash(); let id = PaymentId(payment_hash.0); let onion = RecipientOnionFields::secret_only(*invoice.payment_secret()); nodes[0].node.send_payment(payment_hash, onion, id, params, Retry::Attempts(0)).unwrap(); diff --git a/lightning/src/ln/outbound_payment.rs b/lightning/src/ln/outbound_payment.rs index 75fe55bfeac..65493829635 100644 --- a/lightning/src/ln/outbound_payment.rs +++ b/lightning/src/ln/outbound_payment.rs @@ -933,7 +933,7 @@ where IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, { - let payment_hash = PaymentHash((*invoice.payment_hash()).to_byte_array()); + let payment_hash = invoice.payment_hash(); let amount = match (invoice.amount_milli_satoshis(), amount_msats) { (Some(amt), None) | (None, Some(amt)) => amt, From c06aa96e5541f2f6745614b0e52bfa2d1fadee3a Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 1 Jan 2026 17:15:36 +0000 Subject: [PATCH 050/242] Add a trivial helper to LSPS5's `WebhookNotification` `LSPS5ServiceEvent::SendWebhookNotification`'s docs say to send the `WebhookNotification` as the HTTP request body "as JSON", which is great, but it leaves the dev to figure out how to do that. Its nice to have a helper to do that, which is trivial so we provide it here. --- lightning-liquidity/src/lsps5/event.rs | 6 +++--- lightning-liquidity/src/lsps5/msgs.rs | 6 ++++++ 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/lightning-liquidity/src/lsps5/event.rs b/lightning-liquidity/src/lsps5/event.rs index c12273808ef..30e3aea5687 100644 --- a/lightning-liquidity/src/lsps5/event.rs +++ b/lightning-liquidity/src/lsps5/event.rs @@ -30,9 +30,9 @@ pub enum LSPS5ServiceEvent { /// via their registered webhook. /// /// The LSP should send an HTTP POST to the [`url`], using the - /// JSON-serialized [`notification`] as the body and including the `headers`. - /// If the HTTP request fails, the LSP may implement a retry policy according to its - /// implementation preferences. + /// JSON-serialized [`notification`] (via [`WebhookNotification::to_request_body`]) as the body + /// and including the `headers`. If the HTTP request fails, the LSP may implement a retry + /// policy according to its implementation preferences. /// /// The notification is signed using the LSP's node ID to ensure authenticity /// when received by the client. The client verifies this signature using diff --git a/lightning-liquidity/src/lsps5/msgs.rs b/lightning-liquidity/src/lsps5/msgs.rs index e457c299bfe..363a3255f92 100644 --- a/lightning-liquidity/src/lsps5/msgs.rs +++ b/lightning-liquidity/src/lsps5/msgs.rs @@ -565,6 +565,12 @@ impl WebhookNotification { pub fn onion_message_incoming() -> Self { Self { method: WebhookNotificationMethod::LSPS5OnionMessageIncoming } } + + /// Encodes this notification into JSON which can be sent as the body of an HTTP request to + /// deliver the notification. + pub fn to_request_body(&self) -> String { + serde_json::to_string(self).unwrap() + } } impl Serialize for WebhookNotification { From 920381883e1d877f3990c0fc678722c5a532e7cd Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sun, 9 Nov 2025 22:46:51 +0000 Subject: [PATCH 051/242] Split `OffersContext::OutboundPayment` into `InRefund`/`InInvReq` Because they end up both being used to validate a `Bolt12Invoice`, we ended up with a single `OffersContext` both for inclusion in a `Refund` and an `InvoiceRequest`. However, this is ambiguous, and while it doesn't seem like an issue, it also seems like a nice property to only use a given `OffersContext` in one place. Further, in the next commit, we use `OffersContext` to figure out what we're building a blinded path for and changing behavior based on it, so its nice to be unambiguous. Thus, we split the single existing context into `OutboundPaymentInRefund` and `OutboundPaymentInInvReq`. --- lightning/src/blinded_path/message.rs | 39 ++++++++++++++++++++------- lightning/src/ln/channelmanager.rs | 24 +++-------------- lightning/src/offers/flow.rs | 30 +++++++++++++++------ lightning/src/offers/invoice.rs | 27 +++++++++++++++++++ 4 files changed, 83 insertions(+), 37 deletions(-) diff --git a/lightning/src/blinded_path/message.rs b/lightning/src/blinded_path/message.rs index ed55ca5dc9b..8210d2dc007 100644 --- a/lightning/src/blinded_path/message.rs +++ b/lightning/src/blinded_path/message.rs @@ -416,28 +416,45 @@ pub enum OffersContext { /// Useful to timeout async recipients that are no longer supported as clients. path_absolute_expiry: Duration, }, - /// Context used by a [`BlindedMessagePath`] within a [`Refund`] or as a reply path for an - /// [`InvoiceRequest`]. + /// Context used by a [`BlindedMessagePath`] within a [`Refund`]. /// /// This variant is intended to be received when handling a [`Bolt12Invoice`] or an /// [`InvoiceError`]. /// /// [`Refund`]: crate::offers::refund::Refund - /// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice /// [`InvoiceError`]: crate::offers::invoice_error::InvoiceError - OutboundPayment { - /// Payment ID used when creating a [`Refund`] or [`InvoiceRequest`]. + OutboundPaymentForRefund { + /// Payment ID used when creating a [`Refund`]. /// /// [`Refund`]: crate::offers::refund::Refund - /// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest payment_id: PaymentId, - /// A nonce used for authenticating that a [`Bolt12Invoice`] is for a valid [`Refund`] or - /// [`InvoiceRequest`] and for deriving their signing keys. + /// A nonce used for authenticating that a [`Bolt12Invoice`] is for a valid [`Refund`] and + /// for deriving its signing keys. /// /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice /// [`Refund`]: crate::offers::refund::Refund + nonce: Nonce, + }, + /// Context used by a [`BlindedMessagePath`] as a reply path for an [`InvoiceRequest`]. + /// + /// This variant is intended to be received when handling a [`Bolt12Invoice`] or an + /// [`InvoiceError`]. + /// + /// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest + /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice + /// [`InvoiceError`]: crate::offers::invoice_error::InvoiceError + OutboundPaymentForOffer { + /// Payment ID used when creating an [`InvoiceRequest`]. + /// + /// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest + payment_id: PaymentId, + + /// A nonce used for authenticating that a [`Bolt12Invoice`] is for a valid + /// [`InvoiceRequest`] and for deriving its signing keys. + /// + /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice /// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest nonce: Nonce, }, @@ -619,7 +636,7 @@ impl_writeable_tlv_based_enum!(OffersContext, (0, InvoiceRequest) => { (0, nonce, required), }, - (1, OutboundPayment) => { + (1, OutboundPaymentForRefund) => { (0, payment_id, required), (1, nonce, required), }, @@ -631,6 +648,10 @@ impl_writeable_tlv_based_enum!(OffersContext, (2, invoice_slot, required), (4, path_absolute_expiry, required), }, + (4, OutboundPaymentForOffer) => { + (0, payment_id, required), + (1, nonce, required), + }, ); impl_writeable_tlv_based_enum!(AsyncPaymentsContext, diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 644920557d2..79a678f91be 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -5593,29 +5593,12 @@ where pub fn send_payment_for_bolt12_invoice( &self, invoice: &Bolt12Invoice, context: Option<&OffersContext>, ) -> Result<(), Bolt12PaymentError> { - match self.verify_bolt12_invoice(invoice, context) { + match self.flow.verify_bolt12_invoice(invoice, context) { Ok(payment_id) => self.send_payment_for_verified_bolt12_invoice(invoice, payment_id), Err(()) => Err(Bolt12PaymentError::UnexpectedInvoice), } } - fn verify_bolt12_invoice( - &self, invoice: &Bolt12Invoice, context: Option<&OffersContext>, - ) -> Result { - let secp_ctx = &self.secp_ctx; - let expanded_key = &self.inbound_payment_key; - - match context { - None if invoice.is_for_refund_without_paths() => { - invoice.verify_using_metadata(expanded_key, secp_ctx) - }, - Some(&OffersContext::OutboundPayment { payment_id, nonce, .. }) => { - invoice.verify_using_payer_data(payment_id, nonce, expanded_key, secp_ctx) - }, - _ => Err(()), - } - } - fn send_payment_for_verified_bolt12_invoice( &self, invoice: &Bolt12Invoice, payment_id: PaymentId, ) -> Result<(), Bolt12PaymentError> { @@ -15366,7 +15349,7 @@ where }, OffersMessage::StaticInvoice(invoice) => { let payment_id = match context { - Some(OffersContext::OutboundPayment { payment_id, .. }) => payment_id, + Some(OffersContext::OutboundPaymentForOffer { payment_id, .. }) => payment_id, _ => return None }; let res = self.initiate_async_payment(&invoice, payment_id); @@ -15382,7 +15365,8 @@ where log_trace!(logger, "Received invoice_error: {}", invoice_error); match context { - Some(OffersContext::OutboundPayment { payment_id, .. }) => { + Some(OffersContext::OutboundPaymentForOffer { payment_id, .. }) + |Some(OffersContext::OutboundPaymentForRefund { payment_id, .. }) => { self.abandon_payment_with_reason( payment_id, PaymentFailureReason::InvoiceRequestRejected, ); diff --git a/lightning/src/offers/flow.rs b/lightning/src/offers/flow.rs index 88f0cc5079c..05e488fe842 100644 --- a/lightning/src/offers/flow.rs +++ b/lightning/src/offers/flow.rs @@ -495,11 +495,12 @@ where Ok(InvreqResponseInstructions::SendInvoice(invoice_request)) } - /// Verifies a [`Bolt12Invoice`] using the provided [`OffersContext`] or the invoice's payer metadata, - /// returning the corresponding [`PaymentId`] if successful. + /// Verifies a [`Bolt12Invoice`] using the provided [`OffersContext`] or the invoice's payer + /// metadata, returning the corresponding [`PaymentId`] if successful. /// - /// - If an [`OffersContext::OutboundPayment`] with a `nonce` is provided, verification is performed - /// using this to form the payer metadata. + /// - If an [`OffersContext::OutboundPaymentForOffer`] or + /// [`OffersContext::OutboundPaymentForRefund`] with a `nonce` is provided, verification is + /// performed using this to form the payer metadata. /// - If no context is provided and the invoice corresponds to a [`Refund`] without blinded paths, /// verification is performed using the [`Bolt12Invoice::payer_metadata`]. /// - If neither condition is met, verification fails. @@ -513,8 +514,19 @@ where None if invoice.is_for_refund_without_paths() => { invoice.verify_using_metadata(expanded_key, secp_ctx) }, - Some(&OffersContext::OutboundPayment { payment_id, nonce, .. }) => { - invoice.verify_using_payer_data(payment_id, nonce, expanded_key, secp_ctx) + Some(&OffersContext::OutboundPaymentForOffer { payment_id, nonce, .. }) => { + if invoice.is_for_offer() { + invoice.verify_using_payer_data(payment_id, nonce, expanded_key, secp_ctx) + } else { + Err(()) + } + }, + Some(&OffersContext::OutboundPaymentForRefund { payment_id, nonce, .. }) => { + if invoice.is_for_refund() { + invoice.verify_using_payer_data(payment_id, nonce, expanded_key, secp_ctx) + } else { + Err(()) + } }, _ => Err(()), } @@ -680,7 +692,8 @@ where let secp_ctx = &self.secp_ctx; let nonce = Nonce::from_entropy_source(entropy); - let context = MessageContext::Offers(OffersContext::OutboundPayment { payment_id, nonce }); + let context = + MessageContext::Offers(OffersContext::OutboundPaymentForRefund { payment_id, nonce }); // Create the base builder with common properties let mut builder = RefundBuilder::deriving_signing_pubkey( @@ -1116,7 +1129,8 @@ where &self, invoice_request: InvoiceRequest, payment_id: PaymentId, nonce: Nonce, peers: Vec, ) -> Result<(), Bolt12SemanticError> { - let context = MessageContext::Offers(OffersContext::OutboundPayment { payment_id, nonce }); + let context = + MessageContext::Offers(OffersContext::OutboundPaymentForOffer { payment_id, nonce }); let reply_paths = self .create_blinded_paths(peers, context) .map_err(|_| Bolt12SemanticError::MissingPaths)?; diff --git a/lightning/src/offers/invoice.rs b/lightning/src/offers/invoice.rs index 6dfd6eac508..8d83225f117 100644 --- a/lightning/src/offers/invoice.rs +++ b/lightning/src/offers/invoice.rs @@ -778,6 +778,19 @@ struct InvoiceFields { } macro_rules! invoice_accessors { ($self: ident, $contents: expr) => { + /// Whether the invoice was created in response to a [`Refund`]. + pub fn is_for_refund(&$self) -> bool { + $contents.is_for_refund() + } + + /// Whether the invoice was created in response to an [`InvoiceRequest`] created from an + /// [`Offer`]. + /// + /// [`Offer`]: crate::offers::offer::Offer + pub fn is_for_offer(&$self) -> bool { + $contents.is_for_offer() + } + /// The chains that may be used when paying a requested invoice. /// /// From [`Offer::chains`]; `None` if the invoice was created in response to a [`Refund`]. @@ -1093,6 +1106,20 @@ impl InvoiceContents { } } + fn is_for_refund(&self) -> bool { + match self { + InvoiceContents::ForRefund { .. } => true, + InvoiceContents::ForOffer { .. } => false, + } + } + + fn is_for_offer(&self) -> bool { + match self { + InvoiceContents::ForRefund { .. } => false, + InvoiceContents::ForOffer { .. } => true, + } + } + fn offer_chains(&self) -> Option> { match self { InvoiceContents::ForOffer { invoice_request, .. } => { From 485ae4e088df10a2e790bb20d2f2c6fea99248a7 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 16 Oct 2025 15:34:19 +0000 Subject: [PATCH 052/242] Make `DefaultMessageRouter` use the context to pad/compact paths After much discussion in #3246 we mostly decided to allow downstream developers to override whatever decisions the `DefaultMessageRouter` makes regarding blinded path selection by providing easy overrides for the selected `OnionMessageRouter`. We did not, however, actually select good defaults for `DefaultMessageRouter`. Here we add those defaults, taking advantage of the `MessageContext` we're given to detect why we're building a blinded path and selecting blinding and compaction parameters based on it. Specifically, if the blinded path is not being built for an offers context, we always use a non-compact blinded path and always pad it to four hops (including the recipient). However, if the blinded path is being built for an `Offers` context which implies it might need to fit in a QR code (or, worse, a payment onion), we reduce our padding and try to build a compact blinded path if possible. We retain the `NodeIdMessageRouter` to disable compact blinded path creation but use the same path-padding heuristic as for `DefaultMessageRouter`. --- lightning/src/ln/offers_tests.rs | 61 ++++++++++------- lightning/src/offers/flow.rs | 4 +- lightning/src/onion_message/messenger.rs | 87 ++++++++++++++++-------- 3 files changed, 98 insertions(+), 54 deletions(-) diff --git a/lightning/src/ln/offers_tests.rs b/lightning/src/ln/offers_tests.rs index 3a6965c6646..49733fbedd5 100644 --- a/lightning/src/ln/offers_tests.rs +++ b/lightning/src/ln/offers_tests.rs @@ -60,7 +60,7 @@ use crate::offers::invoice_error::InvoiceError; use crate::offers::invoice_request::{InvoiceRequest, InvoiceRequestFields, InvoiceRequestVerifiedFromOffer}; use crate::offers::nonce::Nonce; use crate::offers::parse::Bolt12SemanticError; -use crate::onion_message::messenger::{DefaultMessageRouter, Destination, MessageSendInstructions, NodeIdMessageRouter, NullMessageRouter, PeeledOnion, PADDED_PATH_LENGTH}; +use crate::onion_message::messenger::{DefaultMessageRouter, Destination, MessageSendInstructions, NodeIdMessageRouter, NullMessageRouter, PeeledOnion, DUMMY_HOPS_PATH_LENGTH, QR_CODED_DUMMY_HOPS_PATH_LENGTH}; use crate::onion_message::offers::OffersMessage; use crate::routing::gossip::{NodeAlias, NodeId}; use crate::routing::router::{PaymentParameters, RouteParameters, RouteParametersConfig}; @@ -163,6 +163,20 @@ fn check_compact_path_introduction_node<'a, 'b, 'c>( && matches!(path.introduction_node(), IntroductionNode::DirectedShortChannelId(..)) } +fn check_dummy_hopped_path_length<'a, 'b, 'c>( + path: &BlindedMessagePath, + lookup_node: &Node<'a, 'b, 'c>, + expected_introduction_node: PublicKey, + expected_path_length: usize, +) -> bool { + let introduction_node_id = resolve_introduction_node(lookup_node, path); + let first_hop_len = path.blinded_hops().first().unwrap().encrypted_payload.len(); + let hops = path.blinded_hops(); + introduction_node_id == expected_introduction_node + && hops.len() == expected_path_length + && hops.iter().take(hops.len() - 1).all(|hop| hop.encrypted_payload.len() == first_hop_len) +} + fn route_bolt12_payment<'a, 'b, 'c>( node: &Node<'a, 'b, 'c>, path: &[&Node<'a, 'b, 'c>], invoice: &Bolt12Invoice ) { @@ -455,7 +469,7 @@ fn check_dummy_hop_pattern_in_offer() { let bob_id = bob.node.get_our_node_id(); // Case 1: DefaultMessageRouter → uses compact blinded paths (via SCIDs) - // Expected: No dummy hops; each path contains only the recipient. + // Expected: Padded to QR_CODED_DUMMY_HOPS_PATH_LENGTH for QR code size optimization let default_router = DefaultMessageRouter::new(alice.network_graph, alice.keys_manager); let compact_offer = alice.node @@ -467,8 +481,8 @@ fn check_dummy_hop_pattern_in_offer() { for path in compact_offer.paths() { assert_eq!( - path.blinded_hops().len(), 1, - "Compact paths must include only the recipient" + path.blinded_hops().len(), QR_CODED_DUMMY_HOPS_PATH_LENGTH, + "Compact offer paths are padded to QR_CODED_DUMMY_HOPS_PATH_LENGTH" ); } @@ -480,10 +494,10 @@ fn check_dummy_hop_pattern_in_offer() { assert_eq!(invoice_request.amount_msats(), Some(10_000_000)); assert_ne!(invoice_request.payer_signing_pubkey(), bob_id); - assert!(check_compact_path_introduction_node(&reply_path, alice, bob_id)); + assert!(check_dummy_hopped_path_length(&reply_path, alice, bob_id, DUMMY_HOPS_PATH_LENGTH)); // Case 2: NodeIdMessageRouter → uses node ID-based blinded paths - // Expected: 0 to MAX_DUMMY_HOPS_COUNT dummy hops, followed by recipient. + // Expected: Also padded to QR_CODED_DUMMY_HOPS_PATH_LENGTH for QR code size optimization let node_id_router = NodeIdMessageRouter::new(alice.network_graph, alice.keys_manager); let padded_offer = alice.node @@ -492,7 +506,7 @@ fn check_dummy_hop_pattern_in_offer() { .build().unwrap(); assert!(!padded_offer.paths().is_empty()); - assert!(padded_offer.paths().iter().all(|path| path.blinded_hops().len() == PADDED_PATH_LENGTH)); + assert!(padded_offer.paths().iter().all(|path| path.blinded_hops().len() == QR_CODED_DUMMY_HOPS_PATH_LENGTH)); let payment_id = PaymentId([2; 32]); bob.node.pay_for_offer(&padded_offer, None, payment_id, Default::default()).unwrap(); @@ -502,7 +516,7 @@ fn check_dummy_hop_pattern_in_offer() { assert_eq!(invoice_request.amount_msats(), Some(10_000_000)); assert_ne!(invoice_request.payer_signing_pubkey(), bob_id); - assert!(check_compact_path_introduction_node(&reply_path, alice, bob_id)); + assert!(check_dummy_hopped_path_length(&reply_path, alice, bob_id, DUMMY_HOPS_PATH_LENGTH)); } /// Checks that blinded paths are compact for short-lived offers. @@ -687,7 +701,7 @@ fn creates_and_pays_for_offer_using_two_hop_blinded_path() { }); assert_eq!(invoice_request.amount_msats(), Some(10_000_000)); assert_ne!(invoice_request.payer_signing_pubkey(), david_id); - assert!(check_compact_path_introduction_node(&reply_path, bob, charlie_id)); + assert!(check_dummy_hopped_path_length(&reply_path, bob, charlie_id, DUMMY_HOPS_PATH_LENGTH)); let onion_message = alice.onion_messenger.next_onion_message_for_peer(charlie_id).unwrap(); charlie.onion_messenger.handle_onion_message(alice_id, &onion_message); @@ -706,8 +720,8 @@ fn creates_and_pays_for_offer_using_two_hop_blinded_path() { // to Alice when she's handling the message. Therefore, either Bob or Charlie could // serve as the introduction node for the reply path back to Alice. assert!( - check_compact_path_introduction_node(&reply_path, david, bob_id) || - check_compact_path_introduction_node(&reply_path, david, charlie_id) + check_dummy_hopped_path_length(&reply_path, david, bob_id, DUMMY_HOPS_PATH_LENGTH) || + check_dummy_hopped_path_length(&reply_path, david, charlie_id, DUMMY_HOPS_PATH_LENGTH) ); route_bolt12_payment(david, &[charlie, bob, alice], &invoice); @@ -790,7 +804,7 @@ fn creates_and_pays_for_refund_using_two_hop_blinded_path() { for path in invoice.payment_paths() { assert_eq!(path.introduction_node(), &IntroductionNode::NodeId(bob_id)); } - assert!(check_compact_path_introduction_node(&reply_path, alice, bob_id)); + assert!(check_dummy_hopped_path_length(&reply_path, alice, bob_id, DUMMY_HOPS_PATH_LENGTH)); route_bolt12_payment(david, &[charlie, bob, alice], &invoice); expect_recent_payment!(david, RecentPaymentDetails::Pending, payment_id); @@ -845,7 +859,7 @@ fn creates_and_pays_for_offer_using_one_hop_blinded_path() { }); assert_eq!(invoice_request.amount_msats(), Some(10_000_000)); assert_ne!(invoice_request.payer_signing_pubkey(), bob_id); - assert!(check_compact_path_introduction_node(&reply_path, alice, bob_id)); + assert!(check_dummy_hopped_path_length(&reply_path, alice, bob_id, DUMMY_HOPS_PATH_LENGTH)); let onion_message = alice.onion_messenger.next_onion_message_for_peer(bob_id).unwrap(); bob.onion_messenger.handle_onion_message(alice_id, &onion_message); @@ -857,7 +871,7 @@ fn creates_and_pays_for_offer_using_one_hop_blinded_path() { for path in invoice.payment_paths() { assert_eq!(path.introduction_node(), &IntroductionNode::NodeId(alice_id)); } - assert!(check_compact_path_introduction_node(&reply_path, bob, alice_id)); + assert!(check_dummy_hopped_path_length(&reply_path, bob, alice_id, DUMMY_HOPS_PATH_LENGTH)); route_bolt12_payment(bob, &[alice], &invoice); expect_recent_payment!(bob, RecentPaymentDetails::Pending, payment_id); @@ -913,7 +927,7 @@ fn creates_and_pays_for_refund_using_one_hop_blinded_path() { for path in invoice.payment_paths() { assert_eq!(path.introduction_node(), &IntroductionNode::NodeId(alice_id)); } - assert!(check_compact_path_introduction_node(&reply_path, bob, alice_id)); + assert!(check_dummy_hopped_path_length(&reply_path, bob, alice_id, DUMMY_HOPS_PATH_LENGTH)); route_bolt12_payment(bob, &[alice], &invoice); expect_recent_payment!(bob, RecentPaymentDetails::Pending, payment_id); @@ -1059,6 +1073,7 @@ fn send_invoice_requests_with_distinct_reply_path() { let bob_id = bob.node.get_our_node_id(); let charlie_id = charlie.node.get_our_node_id(); let david_id = david.node.get_our_node_id(); + let frank_id = nodes[6].node.get_our_node_id(); disconnect_peers(alice, &[charlie, david, &nodes[4], &nodes[5], &nodes[6]]); disconnect_peers(david, &[bob, &nodes[4], &nodes[5]]); @@ -1089,7 +1104,7 @@ fn send_invoice_requests_with_distinct_reply_path() { alice.onion_messenger.handle_onion_message(bob_id, &onion_message); let (_, reply_path) = extract_invoice_request(alice, &onion_message); - assert!(check_compact_path_introduction_node(&reply_path, alice, charlie_id)); + assert!(check_dummy_hopped_path_length(&reply_path, alice, charlie_id, DUMMY_HOPS_PATH_LENGTH)); // Send, extract and verify the second Invoice Request message let onion_message = david.onion_messenger.next_onion_message_for_peer(bob_id).unwrap(); @@ -1099,7 +1114,7 @@ fn send_invoice_requests_with_distinct_reply_path() { alice.onion_messenger.handle_onion_message(bob_id, &onion_message); let (_, reply_path) = extract_invoice_request(alice, &onion_message); - assert!(check_compact_path_introduction_node(&reply_path, alice, nodes[6].node.get_our_node_id())); + assert!(check_dummy_hopped_path_length(&reply_path, alice, frank_id, DUMMY_HOPS_PATH_LENGTH)); } /// This test checks that when multiple potential introduction nodes are available for the payee, @@ -1170,7 +1185,7 @@ fn send_invoice_for_refund_with_distinct_reply_path() { let onion_message = bob.onion_messenger.next_onion_message_for_peer(alice_id).unwrap(); let (_, reply_path) = extract_invoice(alice, &onion_message); - assert!(check_compact_path_introduction_node(&reply_path, alice, charlie_id)); + assert!(check_dummy_hopped_path_length(&reply_path, alice, charlie_id, DUMMY_HOPS_PATH_LENGTH)); // Send, extract and verify the second Invoice Request message let onion_message = david.onion_messenger.next_onion_message_for_peer(bob_id).unwrap(); @@ -1179,7 +1194,7 @@ fn send_invoice_for_refund_with_distinct_reply_path() { let onion_message = bob.onion_messenger.next_onion_message_for_peer(alice_id).unwrap(); let (_, reply_path) = extract_invoice(alice, &onion_message); - assert!(check_compact_path_introduction_node(&reply_path, alice, nodes[6].node.get_our_node_id())); + assert!(check_dummy_hopped_path_length(&reply_path, alice, nodes[6].node.get_our_node_id(), DUMMY_HOPS_PATH_LENGTH)); } /// Verifies that the invoice request message can be retried if it fails to reach the @@ -1233,7 +1248,7 @@ fn creates_and_pays_for_offer_with_retry() { }); assert_eq!(invoice_request.amount_msats(), Some(10_000_000)); assert_ne!(invoice_request.payer_signing_pubkey(), bob_id); - assert!(check_compact_path_introduction_node(&reply_path, alice, bob_id)); + assert!(check_dummy_hopped_path_length(&reply_path, alice, bob_id, DUMMY_HOPS_PATH_LENGTH)); let onion_message = alice.onion_messenger.next_onion_message_for_peer(bob_id).unwrap(); bob.onion_messenger.handle_onion_message(alice_id, &onion_message); @@ -1534,7 +1549,7 @@ fn fails_authentication_when_handling_invoice_request() { let (invoice_request, reply_path) = extract_invoice_request(alice, &onion_message); assert_eq!(invoice_request.amount_msats(), Some(10_000_000)); assert_ne!(invoice_request.payer_signing_pubkey(), david_id); - assert!(check_compact_path_introduction_node(&reply_path, david, charlie_id)); + assert!(check_dummy_hopped_path_length(&reply_path, david, charlie_id, DUMMY_HOPS_PATH_LENGTH)); assert_eq!(alice.onion_messenger.next_onion_message_for_peer(charlie_id), None); @@ -1563,7 +1578,7 @@ fn fails_authentication_when_handling_invoice_request() { let (invoice_request, reply_path) = extract_invoice_request(alice, &onion_message); assert_eq!(invoice_request.amount_msats(), Some(10_000_000)); assert_ne!(invoice_request.payer_signing_pubkey(), david_id); - assert!(check_compact_path_introduction_node(&reply_path, david, charlie_id)); + assert!(check_dummy_hopped_path_length(&reply_path, david, charlie_id, DUMMY_HOPS_PATH_LENGTH)); assert_eq!(alice.onion_messenger.next_onion_message_for_peer(charlie_id), None); } @@ -1663,7 +1678,7 @@ fn fails_authentication_when_handling_invoice_for_offer() { let (invoice_request, reply_path) = extract_invoice_request(alice, &onion_message); assert_eq!(invoice_request.amount_msats(), Some(10_000_000)); assert_ne!(invoice_request.payer_signing_pubkey(), david_id); - assert!(check_compact_path_introduction_node(&reply_path, david, charlie_id)); + assert!(check_dummy_hopped_path_length(&reply_path, david, charlie_id, DUMMY_HOPS_PATH_LENGTH)); let onion_message = alice.onion_messenger.next_onion_message_for_peer(charlie_id).unwrap(); charlie.onion_messenger.handle_onion_message(alice_id, &onion_message); diff --git a/lightning/src/offers/flow.rs b/lightning/src/offers/flow.rs index 05e488fe842..f9bd109b190 100644 --- a/lightning/src/offers/flow.rs +++ b/lightning/src/offers/flow.rs @@ -52,7 +52,7 @@ use crate::onion_message::async_payments::{ StaticInvoicePersisted, }; use crate::onion_message::messenger::{ - Destination, MessageRouter, MessageSendInstructions, Responder, PADDED_PATH_LENGTH, + Destination, MessageRouter, MessageSendInstructions, Responder, DUMMY_HOPS_PATH_LENGTH, }; use crate::onion_message::offers::OffersMessage; use crate::onion_message::packet::OnionMessageContents; @@ -1312,7 +1312,7 @@ where prev_outbound_scid_alias, htlc_id, }); - let num_dummy_hops = PADDED_PATH_LENGTH.saturating_sub(1); + let num_dummy_hops = DUMMY_HOPS_PATH_LENGTH.saturating_sub(1); BlindedMessagePath::new_with_dummy_hops( &[], self.get_our_node_id(), diff --git a/lightning/src/onion_message/messenger.rs b/lightning/src/onion_message/messenger.rs index 9a2c06bb72f..7de55cd8185 100644 --- a/lightning/src/onion_message/messenger.rs +++ b/lightning/src/onion_message/messenger.rs @@ -524,9 +524,11 @@ pub trait MessageRouter { /// A [`MessageRouter`] that can only route to a directly connected [`Destination`]. /// -/// [`DefaultMessageRouter`] constructs compact [`BlindedMessagePath`]s on a best-effort basis. -/// That is, if appropriate SCID information is available for the intermediate peers, it will -/// default to creating compact paths. +/// [`DefaultMessageRouter`] tries to construct compact or private [`BlindedMessagePath`]s based on +/// the [`MessageContext`] given to [`MessageRouter::create_blinded_paths`]. That is, if the +/// provided context implies the path may be used in a BOLT 12 object which might appear in a QR +/// code, it reduces the amount of padding and dummy hops and prefers building compact paths when +/// short channel IDs (SCIDs) are available for intermediate peers. /// /// # Compact Blinded Paths /// @@ -545,7 +547,8 @@ pub trait MessageRouter { /// Creating [`BlindedMessagePath`]s may affect privacy since, if a suitable path cannot be found, /// it will create a one-hop path using the recipient as the introduction node if it is an announced /// node. Otherwise, there is no way to find a path to the introduction node in order to send a -/// message, and thus an `Err` is returned. +/// message, and thus an `Err` is returned. The impact of this may be somewhat muted when +/// additional dummy hops are added to the blinded path, but this protection is not complete. pub struct DefaultMessageRouter>, L: Deref, ES: Deref> where L::Target: Logger, @@ -555,13 +558,16 @@ where entropy_source: ES, } -// Target total length (in hops) for non-compact blinded paths. -// We pad with dummy hops until the path reaches this length, -// obscuring the recipient's true position. +// Target total length (in hops) for blinded paths used outside of QR codes. // -// Compact paths are optimized for minimal size, so we avoid -// adding dummy hops to them. -pub(crate) const PADDED_PATH_LENGTH: usize = 4; +// We add dummy hops until the path reaches this length (including the recipient). +pub(crate) const DUMMY_HOPS_PATH_LENGTH: usize = 4; + +// Target total length (in hops) for blinded paths included in objects which may appear in a QR +// code. +// +// We add dummy hops until the path reaches this length (including the recipient). +pub(crate) const QR_CODED_DUMMY_HOPS_PATH_LENGTH: usize = 2; impl>, L: Deref, ES: Deref> DefaultMessageRouter where @@ -574,12 +580,12 @@ where } pub(crate) fn create_blinded_paths_from_iter< - I: ExactSizeIterator, + I: ExactSizeIterator + Clone, T: secp256k1::Signing + secp256k1::Verification, >( network_graph: &G, recipient: PublicKey, local_node_receive_key: ReceiveAuthKey, context: MessageContext, peers: I, entropy_source: &ES, secp_ctx: &Secp256k1, - compact_paths: bool, + never_compact_path: bool, ) -> Result, ()> { // Limit the number of blinded paths that are computed. const MAX_PATHS: usize = 3; @@ -592,6 +598,33 @@ where let is_recipient_announced = network_graph.nodes().contains_key(&NodeId::from_pubkey(&recipient)); + let (mut compact_paths, dummy_hopd_path_len) = match &context { + MessageContext::Offers(OffersContext::InvoiceRequest { .. }) + | MessageContext::Offers(OffersContext::OutboundPaymentForRefund { .. }) => { + // When embedding blinded paths within BOLT 12 objects which are generally embedded + // in QR codes, we sadly need to be conservative about size, especially if the QR + // code ultimately also includes an on-chain address. + (true, QR_CODED_DUMMY_HOPS_PATH_LENGTH) + }, + MessageContext::Offers(OffersContext::StaticInvoiceRequested { .. }) => { + // Async Payments aggressively embeds the entire `InvoiceRequest` in the payment + // onion. In a future version it should likely move to embedding only the + // `InvoiceRequest`-specific fields instead, but until then we have to be + // incredibly strict in the size of the blinded path we include in a static payment + // `Offer`. + (true, 0) + }, + _ => { + // If there's no need to be small, add additional dummy hops and never use + // SCID-based next-hops as they carry additional expiry risk. + (false, DUMMY_HOPS_PATH_LENGTH) + }, + }; + + if never_compact_path { + compact_paths = false; + } + let has_one_peer = peers.len() == 1; let mut peer_info = peers .map(|peer| MessageForwardNode { @@ -619,12 +652,8 @@ where }); let build_path = |intermediate_hops: &[MessageForwardNode]| { - let dummy_hops_count = if compact_paths { - 0 - } else { - // Add one for the final recipient TLV - PADDED_PATH_LENGTH.saturating_sub(intermediate_hops.len() + 1) - }; + // Calculate the dummy hops given the total hop count target (including the recipient). + let dummy_hops_count = dummy_hopd_path_len.saturating_sub(intermediate_hops.len() + 1); BlindedMessagePath::new_with_dummy_hops( intermediate_hops, @@ -651,12 +680,6 @@ where } } - // Sanity check: Ones the paths are created for the non-compact case, ensure - // each of them are of the length `PADDED_PATH_LENGTH`. - if !compact_paths { - debug_assert!(paths.iter().all(|path| path.blinded_hops().len() == PADDED_PATH_LENGTH)); - } - if compact_paths { for path in &mut paths { path.use_compact_introduction_node(&network_graph); @@ -740,13 +763,15 @@ where peers.into_iter(), &self.entropy_source, secp_ctx, - true, + false, ) } } /// This message router is similar to [`DefaultMessageRouter`], but it always creates -/// full-length blinded paths, using the peer's [`NodeId`]. +/// non-compact blinded paths, using the peer's [`NodeId`]. It uses the same heuristics as +/// [`DefaultMessageRouter`] for deciding when to add additional dummy hops to the generated blinded +/// paths. /// /// This message router can only route to a directly connected [`Destination`]. /// @@ -755,7 +780,8 @@ where /// Creating [`BlindedMessagePath`]s may affect privacy since, if a suitable path cannot be found, /// it will create a one-hop path using the recipient as the introduction node if it is an announced /// node. Otherwise, there is no way to find a path to the introduction node in order to send a -/// message, and thus an `Err` is returned. +/// message, and thus an `Err` is returned. The impact of this may be somewhat muted when +/// additional dummy hops are added to the blinded path, but this protection is not complete. pub struct NodeIdMessageRouter>, L: Deref, ES: Deref> where L::Target: Logger, @@ -790,8 +816,11 @@ where fn create_blinded_paths( &self, recipient: PublicKey, local_node_receive_key: ReceiveAuthKey, - context: MessageContext, peers: Vec, secp_ctx: &Secp256k1, + context: MessageContext, mut peers: Vec, secp_ctx: &Secp256k1, ) -> Result, ()> { + for peer in peers.iter_mut() { + peer.short_channel_id = None; + } DefaultMessageRouter::create_blinded_paths_from_iter( &self.network_graph, recipient, @@ -800,7 +829,7 @@ where peers.into_iter(), &self.entropy_source, secp_ctx, - false, + true, ) } } From c589389f04a55a9de591f007a5bd86e4f5bdd6dd Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 5 Jan 2026 17:22:02 +0000 Subject: [PATCH 053/242] Always pad `BlindedMessagePath` hop data to a consistent length If we're building a blinded message path with extra dummy hops, we have to ensure we at least hide the length of the data in pre-final hops as otherwise the dummy hops are trivially obvious. Here we do so, taking an extra `bool` parameter to `BlindedMessagePath` constructors to decide whether to pad every hop to the existing `MESSAGE_PADDING_ROUND_OFF` or whether to only ensure that each non-final hop has an identical hop data length. In cases where the `DefaultMessageRouter` opts to use compact paths, it now also selects compact padding, whether short channel IDs are available or not. --- lightning-dns-resolver/src/lib.rs | 2 + lightning/src/blinded_path/message.rs | 92 +++++-- lightning/src/blinded_path/utils.rs | 9 +- lightning/src/offers/flow.rs | 1 + .../src/onion_message/functional_tests.rs | 233 +++++++++--------- lightning/src/onion_message/messenger.rs | 17 +- 6 files changed, 208 insertions(+), 146 deletions(-) diff --git a/lightning-dns-resolver/src/lib.rs b/lightning-dns-resolver/src/lib.rs index f5b1d53fc8a..765557db8df 100644 --- a/lightning-dns-resolver/src/lib.rs +++ b/lightning-dns-resolver/src/lib.rs @@ -236,6 +236,7 @@ mod test { recipient, local_node_receive_key, context, + false, &keys, secp_ctx, )]) @@ -345,6 +346,7 @@ mod test { payer_id, receive_key, query_context, + false, &*payer_keys, &secp_ctx, ); diff --git a/lightning/src/blinded_path/message.rs b/lightning/src/blinded_path/message.rs index 8210d2dc007..84a42ff1be2 100644 --- a/lightning/src/blinded_path/message.rs +++ b/lightning/src/blinded_path/message.rs @@ -54,21 +54,38 @@ impl Readable for BlindedMessagePath { impl BlindedMessagePath { /// Create a one-hop blinded path for a message. + /// + /// `compact_padding` selects between space-inefficient padding which better hides contents and + /// a space-constrained padding which does very little to hide the contents, especially for the + /// last hop. It should only be set when the blinded path needs to be as compact as possible. pub fn one_hop( recipient_node_id: PublicKey, local_node_receive_key: ReceiveAuthKey, - context: MessageContext, entropy_source: ES, secp_ctx: &Secp256k1, + context: MessageContext, compact_padding: bool, entropy_source: ES, + secp_ctx: &Secp256k1, ) -> Self where ES::Target: EntropySource, { - Self::new(&[], recipient_node_id, local_node_receive_key, context, entropy_source, secp_ctx) + Self::new( + &[], + recipient_node_id, + local_node_receive_key, + context, + compact_padding, + entropy_source, + secp_ctx, + ) } /// Create a path for an onion message, to be forwarded along `node_pks`. + /// + /// `compact_padding` selects between space-inefficient padding which better hides contents and + /// a space-constrained padding which does very little to hide the contents, especially for the + /// last hop. It should only be set when the blinded path needs to be as compact as possible. pub fn new( intermediate_nodes: &[MessageForwardNode], recipient_node_id: PublicKey, - local_node_receive_key: ReceiveAuthKey, context: MessageContext, entropy_source: ES, - secp_ctx: &Secp256k1, + local_node_receive_key: ReceiveAuthKey, context: MessageContext, compact_padding: bool, + entropy_source: ES, secp_ctx: &Secp256k1, ) -> Self where ES::Target: EntropySource, @@ -79,6 +96,7 @@ impl BlindedMessagePath { 0, local_node_receive_key, context, + compact_padding, entropy_source, secp_ctx, ) @@ -86,12 +104,15 @@ impl BlindedMessagePath { /// Same as [`BlindedMessagePath::new`], but allows specifying a number of dummy hops. /// - /// Note: - /// At most [`MAX_DUMMY_HOPS_COUNT`] dummy hops can be added to the blinded path. + /// `compact_padding` selects between space-inefficient padding which better hides contents and + /// a space-constrained padding which does very little to hide the contents, especially for the + /// last hop. It should only be set when the blinded path needs to be as compact as possible. + /// + /// Note: At most [`MAX_DUMMY_HOPS_COUNT`] dummy hops can be added to the blinded path. pub fn new_with_dummy_hops( intermediate_nodes: &[MessageForwardNode], recipient_node_id: PublicKey, dummy_hop_count: usize, local_node_receive_key: ReceiveAuthKey, context: MessageContext, - entropy_source: ES, secp_ctx: &Secp256k1, + compact_padding: bool, entropy_source: ES, secp_ctx: &Secp256k1, ) -> Self where ES::Target: EntropySource, @@ -114,6 +135,7 @@ impl BlindedMessagePath { context, &blinding_secret, local_node_receive_key, + compact_padding, ), }) } @@ -714,7 +736,7 @@ pub const MAX_DUMMY_HOPS_COUNT: usize = 10; pub(super) fn blinded_hops( secp_ctx: &Secp256k1, intermediate_nodes: &[MessageForwardNode], recipient_node_id: PublicKey, dummy_hop_count: usize, context: MessageContext, - session_priv: &SecretKey, local_node_receive_key: ReceiveAuthKey, + session_priv: &SecretKey, local_node_receive_key: ReceiveAuthKey, compact_padding: bool, ) -> Vec { let dummy_count = cmp::min(dummy_hop_count, MAX_DUMMY_HOPS_COUNT); let pks = intermediate_nodes @@ -724,9 +746,8 @@ pub(super) fn blinded_hops( core::iter::repeat((recipient_node_id, Some(local_node_receive_key))).take(dummy_count), ) .chain(core::iter::once((recipient_node_id, Some(local_node_receive_key)))); - let is_compact = intermediate_nodes.iter().any(|node| node.short_channel_id.is_some()); - let tlvs = pks + let intermediate_tlvs = pks .clone() .skip(1) // The first node's TLVs contains the next node's pubkey .zip(intermediate_nodes.iter().map(|node| node.short_channel_id)) @@ -737,18 +758,43 @@ pub(super) fn blinded_hops( .map(|next_hop| { ControlTlvs::Forward(ForwardTlvs { next_hop, next_blinding_override: None }) }) - .chain((0..dummy_count).map(|_| ControlTlvs::Dummy)) - .chain(core::iter::once(ControlTlvs::Receive(ReceiveTlvs { context: Some(context) }))); - - if is_compact { - let path = pks.zip(tlvs); - utils::construct_blinded_hops(secp_ctx, path, session_priv) + .chain((0..dummy_count).map(|_| ControlTlvs::Dummy)); + + let max_intermediate_len = + intermediate_tlvs.clone().map(|tlvs| tlvs.serialized_length()).max().unwrap_or(0); + let have_intermediate_one_byte_smaller = + intermediate_tlvs.clone().any(|tlvs| tlvs.serialized_length() == max_intermediate_len - 1); + + let round_off = if compact_padding { + // We can only pad by a minimum of two bytes (we can only go from no-TLV to a type + length + // byte). Thus, if there are any intermediate hops that need to be padded by exactly one + // byte, we have to instead pad everything by two. + if have_intermediate_one_byte_smaller { + max_intermediate_len + 2 + } else { + max_intermediate_len + } } else { - let path = - pks.zip(tlvs.map(|tlv| BlindedPathWithPadding { - tlvs: tlv, - round_off: MESSAGE_PADDING_ROUND_OFF, - })); - utils::construct_blinded_hops(secp_ctx, path, session_priv) - } + MESSAGE_PADDING_ROUND_OFF + }; + + let tlvs = intermediate_tlvs + .map(|tlvs| { + let res = BlindedPathWithPadding { tlvs, round_off }; + if compact_padding { + debug_assert_eq!(res.serialized_length(), max_intermediate_len); + } else { + // We don't currently ever push extra fields to intermediate hops, so they should + // never go over `MESSAGE_PADDING_ROUND_OFF`. + debug_assert_eq!(res.serialized_length(), MESSAGE_PADDING_ROUND_OFF); + } + res + }) + .chain(core::iter::once(BlindedPathWithPadding { + tlvs: ControlTlvs::Receive(ReceiveTlvs { context: Some(context) }), + round_off: if compact_padding { 0 } else { MESSAGE_PADDING_ROUND_OFF }, + })); + + let path = pks.zip(tlvs); + utils::construct_blinded_hops(secp_ctx, path, session_priv) } diff --git a/lightning/src/blinded_path/utils.rs b/lightning/src/blinded_path/utils.rs index 8894f37ad33..339b4337eb3 100644 --- a/lightning/src/blinded_path/utils.rs +++ b/lightning/src/blinded_path/utils.rs @@ -256,9 +256,12 @@ impl Writeable for BlindedPathWithPadding { let tlv_length = self.tlvs.serialized_length(); let total_length = tlv_length + TLV_OVERHEAD; - let padding_length = total_length.div_ceil(self.round_off) * self.round_off - total_length; - - let padding = Some(BlindedPathPadding::new(padding_length)); + let padding = if self.round_off == 0 || tlv_length % self.round_off == 0 { + None + } else { + let length = total_length.div_ceil(self.round_off) * self.round_off - total_length; + Some(BlindedPathPadding::new(length)) + }; encode_tlv_stream!(writer, { (1, padding, option), diff --git a/lightning/src/offers/flow.rs b/lightning/src/offers/flow.rs index f9bd109b190..00ef0aca277 100644 --- a/lightning/src/offers/flow.rs +++ b/lightning/src/offers/flow.rs @@ -1319,6 +1319,7 @@ where num_dummy_hops, self.receive_auth_key, context, + false, &*entropy, &self.secp_ctx, ) diff --git a/lightning/src/onion_message/functional_tests.rs b/lightning/src/onion_message/functional_tests.rs index 605a81a4f95..75e2aaf3c5f 100644 --- a/lightning/src/onion_message/functional_tests.rs +++ b/lightning/src/onion_message/functional_tests.rs @@ -436,8 +436,9 @@ fn one_blinded_hop() { let context = MessageContext::Custom(Vec::new()); let entropy = &*nodes[1].entropy_source; let receive_key = nodes[1].messenger.node_signer.get_receive_auth_key(); + let node_id = nodes[1].node_id; let blinded_path = - BlindedMessagePath::new(&[], nodes[1].node_id, receive_key, context, entropy, &secp_ctx); + BlindedMessagePath::new(&[], node_id, receive_key, context, false, entropy, &secp_ctx); let destination = Destination::BlindedPath(blinded_path); let instructions = MessageSendInstructions::WithoutReplyPath { destination }; nodes[0].messenger.send_onion_message(test_msg, instructions).unwrap(); @@ -450,18 +451,15 @@ fn blinded_path_with_dummy_hops() { let nodes = create_nodes(2); let test_msg = TestCustomMessage::Pong; - let secp_ctx = Secp256k1::new(); - let context = MessageContext::Custom(Vec::new()); - let entropy = &*nodes[1].entropy_source; - let receive_key = nodes[1].messenger.node_signer.get_receive_auth_key(); let blinded_path = BlindedMessagePath::new_with_dummy_hops( &[], nodes[1].node_id, TEST_DUMMY_HOP_COUNT, - receive_key, - context, - entropy, - &secp_ctx, + nodes[1].messenger.node_signer.get_receive_auth_key(), + MessageContext::Custom(Vec::new()), + false, + &*nodes[1].entropy_source, + &Secp256k1::new(), ); // Ensure that dummy hops are added to the blinded path. assert_eq!(blinded_path.blinded_hops().len(), 6); @@ -477,19 +475,16 @@ fn two_unblinded_two_blinded() { let nodes = create_nodes(5); let test_msg = TestCustomMessage::Pong; - let secp_ctx = Secp256k1::new(); let intermediate_nodes = [MessageForwardNode { node_id: nodes[3].node_id, short_channel_id: None }]; - let context = MessageContext::Custom(Vec::new()); - let entropy = &*nodes[4].entropy_source; - let receive_key = nodes[4].messenger.node_signer.get_receive_auth_key(); let blinded_path = BlindedMessagePath::new( &intermediate_nodes, nodes[4].node_id, - receive_key, - context, - entropy, - &secp_ctx, + nodes[4].messenger.node_signer.get_receive_auth_key(), + MessageContext::Custom(Vec::new()), + false, + &*nodes[4].entropy_source, + &Secp256k1::new(), ); let path = OnionMessagePath { intermediate_nodes: vec![nodes[1].node_id, nodes[2].node_id], @@ -507,21 +502,18 @@ fn three_blinded_hops() { let nodes = create_nodes(4); let test_msg = TestCustomMessage::Pong; - let secp_ctx = Secp256k1::new(); let intermediate_nodes = [ MessageForwardNode { node_id: nodes[1].node_id, short_channel_id: None }, MessageForwardNode { node_id: nodes[2].node_id, short_channel_id: None }, ]; - let context = MessageContext::Custom(Vec::new()); - let entropy = &*nodes[3].entropy_source; - let receive_key = nodes[3].messenger.node_signer.get_receive_auth_key(); let blinded_path = BlindedMessagePath::new( &intermediate_nodes, nodes[3].node_id, - receive_key, - context, - entropy, - &secp_ctx, + nodes[3].messenger.node_signer.get_receive_auth_key(), + MessageContext::Custom(Vec::new()), + false, + &*nodes[3].entropy_source, + &Secp256k1::new(), ); let destination = Destination::BlindedPath(blinded_path); let instructions = MessageSendInstructions::WithoutReplyPath { destination }; @@ -548,8 +540,9 @@ fn async_response_over_one_blinded_hop() { let context = MessageContext::Custom(Vec::new()); let entropy = &*nodes[1].entropy_source; let receive_key = nodes[1].messenger.node_signer.get_receive_auth_key(); + let node_id = nodes[1].node_id; let reply_path = - BlindedMessagePath::new(&[], nodes[1].node_id, receive_key, context, entropy, &secp_ctx); + BlindedMessagePath::new(&[], node_id, receive_key, context, false, entropy, &secp_ctx); // 4. Create a responder using the reply path for Alice. let responder = Some(Responder::new(reply_path)); @@ -590,7 +583,7 @@ fn async_response_with_reply_path_succeeds() { let entropy = &*bob.entropy_source; let receive_key = bob.messenger.node_signer.get_receive_auth_key(); let reply_path = - BlindedMessagePath::new(&[], bob.node_id, receive_key, context, entropy, &secp_ctx); + BlindedMessagePath::new(&[], bob.node_id, receive_key, context, false, entropy, &secp_ctx); // Alice asynchronously responds to Bob, expecting a response back from him. let responder = Responder::new(reply_path); @@ -632,7 +625,7 @@ fn async_response_with_reply_path_fails() { let entropy = &*bob.entropy_source; let receive_key = bob.messenger.node_signer.get_receive_auth_key(); let reply_path = - BlindedMessagePath::new(&[], bob.node_id, receive_key, context, entropy, &secp_ctx); + BlindedMessagePath::new(&[], bob.node_id, receive_key, context, false, entropy, &secp_ctx); // Alice tries to asynchronously respond to Bob, but fails because the nodes are unannounced and // disconnected. Thus, a reply path could no be created for the response. @@ -668,28 +661,26 @@ fn too_big_packet_error() { #[test] fn test_blinded_path_padding_for_full_length_path() { - // Check that for a full blinded path, all encrypted payload are padded to rounded-off length. + // Check that for a full blinded path without compact padding, all encrypted payload are padded + // to rounded-off length. let nodes = create_nodes(4); let test_msg = TestCustomMessage::Pong; - let secp_ctx = Secp256k1::new(); let intermediate_nodes = [ MessageForwardNode { node_id: nodes[1].node_id, short_channel_id: None }, MessageForwardNode { node_id: nodes[2].node_id, short_channel_id: None }, ]; - // Update the context to create a larger final receive TLVs, ensuring that - // the hop sizes vary before padding. - let context = MessageContext::Custom(vec![0u8; 42]); - let entropy = &*nodes[3].entropy_source; - let receive_key = nodes[3].messenger.node_signer.get_receive_auth_key(); + // Build with a larger context to create a larger final receive TLVs, ensuring that the hop + // sizes vary before padding. let blinded_path = BlindedMessagePath::new_with_dummy_hops( &intermediate_nodes, nodes[3].node_id, TEST_DUMMY_HOP_COUNT, - receive_key, - context, - entropy, - &secp_ctx, + nodes[3].messenger.node_signer.get_receive_auth_key(), + MessageContext::Custom(vec![0u8; 42]), + false, + &*nodes[3].entropy_source, + &Secp256k1::new(), ); assert!(is_padded(&blinded_path.blinded_hops(), MESSAGE_PADDING_ROUND_OFF)); @@ -703,32 +694,72 @@ fn test_blinded_path_padding_for_full_length_path() { } #[test] -fn test_blinded_path_no_padding_for_compact_path() { - // Check that for a compact blinded path, no padding is applied. +fn test_blinded_path_compact_padding() { + // Check that for a blinded path with non-SCID intermediate hops with compact padding, no extra + // padding is applied. let nodes = create_nodes(4); - let secp_ctx = Secp256k1::new(); - // Include some short_channel_id, so that MessageRouter uses this to create compact blinded paths. + let intermediate_nodes = [ + MessageForwardNode { node_id: nodes[1].node_id, short_channel_id: None }, + MessageForwardNode { node_id: nodes[2].node_id, short_channel_id: None }, + ]; + // Build with a larger context to create a larger final receive TLVs, ensuring that the hop + // sizes vary before padding. + let blinded_path = BlindedMessagePath::new_with_dummy_hops( + &intermediate_nodes, + nodes[3].node_id, + TEST_DUMMY_HOP_COUNT, + nodes[3].messenger.node_signer.get_receive_auth_key(), + MessageContext::Custom(vec![0u8; 42]), + true, + &*nodes[3].entropy_source, + &Secp256k1::new(), + ); + + let hops = blinded_path.blinded_hops(); + assert!(!is_padded(&hops, MESSAGE_PADDING_ROUND_OFF)); + assert_eq!(hops.len(), TEST_DUMMY_HOP_COUNT + 3); + for hop in hops.iter().take(TEST_DUMMY_HOP_COUNT + 2) { + assert_eq!(hops[0].encrypted_payload.len(), hop.encrypted_payload.len()); + } + // Check the actual encrypted payload lengths, which may change in the future but serves to + // ensure that this and test_compact_blinded_path_compact_padding, below, differ. + assert_eq!(hops[0].encrypted_payload.len(), 51); +} + +#[test] +fn test_compact_blinded_path_compact_padding() { + // Check that for a blinded path with SCID intermediate hops with compact padding, no extra + // padding is applied. + let nodes = create_nodes(4); + + // Include some short_channel_id, so that MessageRouter uses this to create compact blinded paths let intermediate_nodes = [ MessageForwardNode { node_id: nodes[1].node_id, short_channel_id: Some(24) }, MessageForwardNode { node_id: nodes[2].node_id, short_channel_id: Some(25) }, ]; - // Update the context to create a larger final receive TLVs, ensuring that - // the hop sizes vary before padding. - let context = MessageContext::Custom(vec![0u8; 42]); - let entropy = &*nodes[3].entropy_source; - let receive_key = nodes[3].messenger.node_signer.get_receive_auth_key(); + // Build with a larger context to create a larger final receive TLVs, ensuring that the hop + // sizes vary before padding. let blinded_path = BlindedMessagePath::new_with_dummy_hops( &intermediate_nodes, nodes[3].node_id, TEST_DUMMY_HOP_COUNT, - receive_key, - context, - entropy, - &secp_ctx, + nodes[3].messenger.node_signer.get_receive_auth_key(), + MessageContext::Custom(vec![0u8; 42]), + true, + &*nodes[3].entropy_source, + &Secp256k1::new(), ); - assert!(!is_padded(&blinded_path.blinded_hops(), MESSAGE_PADDING_ROUND_OFF)); + let hops = blinded_path.blinded_hops(); + assert!(!is_padded(&hops, MESSAGE_PADDING_ROUND_OFF)); + assert_eq!(hops.len(), TEST_DUMMY_HOP_COUNT + 3); + for hop in hops.iter().take(TEST_DUMMY_HOP_COUNT + 2) { + assert_eq!(hops[0].encrypted_payload.len(), hop.encrypted_payload.len()); + } + // Check the actual encrypted payload lengths, which may change in the future but serves to + // ensure that this and test_blinded_path_compact_padding, above, differ. + assert_eq!(hops[0].encrypted_payload.len(), 26); } #[test] @@ -743,15 +774,13 @@ fn we_are_intro_node() { MessageForwardNode { node_id: nodes[0].node_id, short_channel_id: None }, MessageForwardNode { node_id: nodes[1].node_id, short_channel_id: None }, ]; - let context = MessageContext::Custom(Vec::new()); - let entropy = &*nodes[2].entropy_source; - let receive_key = nodes[2].messenger.node_signer.get_receive_auth_key(); let blinded_path = BlindedMessagePath::new( &intermediate_nodes, nodes[2].node_id, - receive_key, - context, - entropy, + nodes[2].messenger.node_signer.get_receive_auth_key(), + MessageContext::Custom(Vec::new()), + false, + &*nodes[2].entropy_source, &secp_ctx, ); let destination = Destination::BlindedPath(blinded_path); @@ -764,15 +793,13 @@ fn we_are_intro_node() { // Try with a two-hop blinded path where we are the introduction node. let intermediate_nodes = [MessageForwardNode { node_id: nodes[0].node_id, short_channel_id: None }]; - let context = MessageContext::Custom(Vec::new()); - let entropy = &*nodes[1].entropy_source; - let receive_key = nodes[1].messenger.node_signer.get_receive_auth_key(); let blinded_path = BlindedMessagePath::new( &intermediate_nodes, nodes[1].node_id, - receive_key, - context, - entropy, + nodes[1].messenger.node_signer.get_receive_auth_key(), + MessageContext::Custom(Vec::new()), + false, + &*nodes[1].entropy_source, &secp_ctx, ); let destination = Destination::BlindedPath(blinded_path); @@ -790,19 +817,16 @@ fn invalid_blinded_path_error() { let nodes = create_nodes(3); let test_msg = TestCustomMessage::Pong; - let secp_ctx = Secp256k1::new(); let intermediate_nodes = [MessageForwardNode { node_id: nodes[1].node_id, short_channel_id: None }]; - let context = MessageContext::Custom(Vec::new()); - let entropy = &*nodes[2].entropy_source; - let receive_key = nodes[2].messenger.node_signer.get_receive_auth_key(); let mut blinded_path = BlindedMessagePath::new( &intermediate_nodes, nodes[2].node_id, - receive_key, - context, - entropy, - &secp_ctx, + nodes[2].messenger.node_signer.get_receive_auth_key(), + MessageContext::Custom(Vec::new()), + false, + &*nodes[2].entropy_source, + &Secp256k1::new(), ); blinded_path.clear_blinded_hops(); let destination = Destination::BlindedPath(blinded_path); @@ -828,15 +852,13 @@ fn reply_path() { MessageForwardNode { node_id: nodes[2].node_id, short_channel_id: None }, MessageForwardNode { node_id: nodes[1].node_id, short_channel_id: None }, ]; - let context = MessageContext::Custom(Vec::new()); - let entropy = &*nodes[0].entropy_source; - let receive_key = nodes[0].messenger.node_signer.get_receive_auth_key(); let reply_path = BlindedMessagePath::new( &intermediate_nodes, nodes[0].node_id, - receive_key, - context, - entropy, + nodes[0].messenger.node_signer.get_receive_auth_key(), + MessageContext::Custom(Vec::new()), + false, + &*nodes[0].entropy_source, &secp_ctx, ); nodes[0] @@ -855,15 +877,13 @@ fn reply_path() { MessageForwardNode { node_id: nodes[1].node_id, short_channel_id: None }, MessageForwardNode { node_id: nodes[2].node_id, short_channel_id: None }, ]; - let context = MessageContext::Custom(Vec::new()); - let entropy = &*nodes[3].entropy_source; - let receive_key = nodes[3].messenger.node_signer.get_receive_auth_key(); let blinded_path = BlindedMessagePath::new( &intermediate_nodes, nodes[3].node_id, - receive_key, - context, - entropy, + nodes[3].messenger.node_signer.get_receive_auth_key(), + MessageContext::Custom(Vec::new()), + false, + &*nodes[3].entropy_source, &secp_ctx, ); let destination = Destination::BlindedPath(blinded_path); @@ -871,15 +891,13 @@ fn reply_path() { MessageForwardNode { node_id: nodes[2].node_id, short_channel_id: None }, MessageForwardNode { node_id: nodes[1].node_id, short_channel_id: None }, ]; - let context = MessageContext::Custom(Vec::new()); - let entropy = &*nodes[0].entropy_source; - let receive_key = nodes[0].messenger.node_signer.get_receive_auth_key(); let reply_path = BlindedMessagePath::new( &intermediate_nodes, nodes[0].node_id, - receive_key, - context, - entropy, + nodes[0].messenger.node_signer.get_receive_auth_key(), + MessageContext::Custom(Vec::new()), + false, + &*nodes[0].entropy_source, &secp_ctx, ); let instructions = MessageSendInstructions::WithSpecifiedReplyPath { destination, reply_path }; @@ -975,15 +993,13 @@ fn requests_peer_connection_for_buffered_messages() { let intermediate_nodes = [MessageForwardNode { node_id: nodes[1].node_id, short_channel_id: None }]; - let context = MessageContext::Custom(Vec::new()); - let entropy = &*nodes[0].entropy_source; - let receive_key = nodes[0].messenger.node_signer.get_receive_auth_key(); let blinded_path = BlindedMessagePath::new( &intermediate_nodes, nodes[2].node_id, - receive_key, - context, - entropy, + nodes[0].messenger.node_signer.get_receive_auth_key(), + MessageContext::Custom(Vec::new()), + false, + &*nodes[0].entropy_source, &secp_ctx, ); let destination = Destination::BlindedPath(blinded_path); @@ -1046,15 +1062,13 @@ fn drops_buffered_messages_waiting_for_peer_connection() { let intermediate_nodes = [MessageForwardNode { node_id: nodes[1].node_id, short_channel_id: None }]; - let context = MessageContext::Custom(Vec::new()); - let entropy = &*nodes[0].entropy_source; - let receive_key = nodes[0].messenger.node_signer.get_receive_auth_key(); let blinded_path = BlindedMessagePath::new( &intermediate_nodes, nodes[2].node_id, - receive_key, - context, - entropy, + nodes[0].messenger.node_signer.get_receive_auth_key(), + MessageContext::Custom(Vec::new()), + false, + &*nodes[0].entropy_source, &secp_ctx, ); let destination = Destination::BlindedPath(blinded_path); @@ -1107,19 +1121,16 @@ fn intercept_offline_peer_oms() { } let message = TestCustomMessage::Pong; - let secp_ctx = Secp256k1::new(); let intermediate_nodes = [MessageForwardNode { node_id: nodes[1].node_id, short_channel_id: None }]; - let context = MessageContext::Custom(Vec::new()); - let entropy = &*nodes[2].entropy_source; - let receive_key = nodes[2].messenger.node_signer.get_receive_auth_key(); let blinded_path = BlindedMessagePath::new( &intermediate_nodes, nodes[2].node_id, - receive_key, - context, - entropy, - &secp_ctx, + nodes[2].messenger.node_signer.get_receive_auth_key(), + MessageContext::Custom(Vec::new()), + false, + &*nodes[2].entropy_source, + &Secp256k1::new(), ); let destination = Destination::BlindedPath(blinded_path); let instructions = MessageSendInstructions::WithoutReplyPath { destination }; diff --git a/lightning/src/onion_message/messenger.rs b/lightning/src/onion_message/messenger.rs index 7de55cd8185..a61abae414f 100644 --- a/lightning/src/onion_message/messenger.rs +++ b/lightning/src/onion_message/messenger.rs @@ -272,7 +272,7 @@ where /// ]; /// let context = MessageContext::Custom(Vec::new()); /// let receive_key = keys_manager.get_receive_auth_key(); -/// let blinded_path = BlindedMessagePath::new(&hops, your_node_id, receive_key, context, &keys_manager, &secp_ctx); +/// let blinded_path = BlindedMessagePath::new(&hops, your_node_id, receive_key, context, false, &keys_manager, &secp_ctx); /// /// // Send a custom onion message to a blinded path. /// let destination = Destination::BlindedPath(blinded_path); @@ -598,12 +598,12 @@ where let is_recipient_announced = network_graph.nodes().contains_key(&NodeId::from_pubkey(&recipient)); - let (mut compact_paths, dummy_hopd_path_len) = match &context { + let (size_constrained, path_len_incl_dummys) = match &context { MessageContext::Offers(OffersContext::InvoiceRequest { .. }) | MessageContext::Offers(OffersContext::OutboundPaymentForRefund { .. }) => { - // When embedding blinded paths within BOLT 12 objects which are generally embedded - // in QR codes, we sadly need to be conservative about size, especially if the QR - // code ultimately also includes an on-chain address. + // When including blinded paths within BOLT 12 objects that appear in QR codes, we + // sadly need to be conservative about size, especially if the QR code ultimately + // also includes an on-chain address. (true, QR_CODED_DUMMY_HOPS_PATH_LENGTH) }, MessageContext::Offers(OffersContext::StaticInvoiceRequested { .. }) => { @@ -621,9 +621,7 @@ where }, }; - if never_compact_path { - compact_paths = false; - } + let compact_paths = !never_compact_path && size_constrained; let has_one_peer = peers.len() == 1; let mut peer_info = peers @@ -653,7 +651,7 @@ where let build_path = |intermediate_hops: &[MessageForwardNode]| { // Calculate the dummy hops given the total hop count target (including the recipient). - let dummy_hops_count = dummy_hopd_path_len.saturating_sub(intermediate_hops.len() + 1); + let dummy_hops_count = path_len_incl_dummys.saturating_sub(intermediate_hops.len() + 1); BlindedMessagePath::new_with_dummy_hops( intermediate_hops, @@ -661,6 +659,7 @@ where dummy_hops_count, local_node_receive_key, context.clone(), + size_constrained, &**entropy_source, secp_ctx, ) From dc623d3c5245cd17ad25aa6ca8b2b9558af4323d Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 9 Dec 2025 21:16:35 +0000 Subject: [PATCH 054/242] Add additional documentation on when to use `NodeIdMessageRouter` --- lightning/src/onion_message/messenger.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lightning/src/onion_message/messenger.rs b/lightning/src/onion_message/messenger.rs index a61abae414f..dbeab3937d0 100644 --- a/lightning/src/onion_message/messenger.rs +++ b/lightning/src/onion_message/messenger.rs @@ -772,6 +772,9 @@ where /// [`DefaultMessageRouter`] for deciding when to add additional dummy hops to the generated blinded /// paths. /// +/// This may be useful in cases where you want a long-lived blinded path and anticipate channel(s) +/// may close, but connections to specific peers will remain stable. +/// /// This message router can only route to a directly connected [`Destination`]. /// /// # Privacy From d7b86e58a7511740694f20b7656f5510ed628b49 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 11 Dec 2025 20:57:35 +0000 Subject: [PATCH 055/242] Add a pending changelog note for breaking invreq blinded paths --- pending_changelog/4213.txt | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 pending_changelog/4213.txt diff --git a/pending_changelog/4213.txt b/pending_changelog/4213.txt new file mode 100644 index 00000000000..791edd47804 --- /dev/null +++ b/pending_changelog/4213.txt @@ -0,0 +1,5 @@ +Backwards compat +================ + + * Outbound payments which are awaiting a response to a BOLT 12 invoice request + will not be able to complete after upgrading to 0.3 (#4213). From cafbd56858ea969d2f3d01084c5ab2d72453bae9 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Mon, 5 Jan 2026 13:07:00 +0100 Subject: [PATCH 056/242] Add logger to monitor_updating_paused --- lightning/src/ln/channel.rs | 100 ++++++++++++++++++++++++----- lightning/src/ln/channelmanager.rs | 8 ++- 2 files changed, 91 insertions(+), 17 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 128091ccdd5..51475e188a5 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -7503,6 +7503,7 @@ where Vec::new(), Vec::new(), Vec::new(), + logger, ); UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat } }, @@ -7912,7 +7913,7 @@ where log_info!(logger, "Received initial commitment_signed from peer for channel {}", &self.context.channel_id()); - self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); + self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new(), logger); self.context.interactive_tx_signing_session.as_mut().expect("signing session should be present").received_commitment_signed(); Ok(channel_monitor) } @@ -8016,7 +8017,15 @@ where .as_mut() .expect("Signing session must exist for negotiated pending splice") .received_commitment_signed(); - self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); + self.monitor_updating_paused( + false, + false, + false, + Vec::new(), + Vec::new(), + Vec::new(), + logger, + ); Ok(self.push_ret_blockable_mon_update(monitor_update)) } @@ -8328,6 +8337,7 @@ where Vec::new(), Vec::new(), Vec::new(), + logger, ); return Ok(self.push_ret_blockable_mon_update(monitor_update)); } @@ -8533,7 +8543,15 @@ where if update_fee.is_some() { "a fee update, " } else { "" }, update_add_count, update_fulfill_count, update_fail_count); - self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new()); + self.monitor_updating_paused( + false, + true, + false, + Vec::new(), + Vec::new(), + Vec::new(), + logger, + ); (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail) } else { (None, Vec::new()) @@ -8911,6 +8929,7 @@ where to_forward_infos, revoked_htlcs, finalized_claimed_htlcs, + logger, ); return_with_htlcs_to_fail!(htlcs_to_fail); }, @@ -8956,6 +8975,7 @@ where to_forward_infos, revoked_htlcs, finalized_claimed_htlcs, + logger, ); return_with_htlcs_to_fail!(htlcs_to_fail); } else { @@ -8969,6 +8989,7 @@ where to_forward_infos, revoked_htlcs, finalized_claimed_htlcs, + logger, ); return_with_htlcs_to_fail!(htlcs_to_fail); } @@ -9369,12 +9390,17 @@ where /// [`ChannelManager`]: super::channelmanager::ChannelManager /// [`chain::Watch`]: crate::chain::Watch /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress - fn monitor_updating_paused( + fn monitor_updating_paused( &mut self, resend_raa: bool, resend_commitment: bool, resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>, mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, mut pending_finalized_claimed_htlcs: Vec<(HTLCSource, Option)>, - ) { + logger: &L, + ) where + L::Target: Logger, + { + log_trace!(logger, "Pausing channel monitor updates"); + self.context.monitor_pending_revoke_and_ack |= resend_raa; self.context.monitor_pending_commitment_signed |= resend_commitment; self.context.monitor_pending_channel_ready |= resend_channel_ready; @@ -10425,12 +10451,16 @@ where } } - pub fn shutdown( - &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown, + pub fn shutdown( + &mut self, logger: &L, signer_provider: &SP, their_features: &InitFeatures, + msg: &msgs::Shutdown, ) -> Result< (Option, Option, Vec<(HTLCSource, PaymentHash)>), ChannelError, - > { + > + where + L::Target: Logger, + { if self.context.channel_state.is_peer_disconnected() { return Err(ChannelError::close( "Peer sent shutdown when we needed a channel_reestablish".to_owned(), @@ -10535,7 +10565,15 @@ where }], channel_id: Some(self.context.channel_id()), }; - self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); + self.monitor_updating_paused( + false, + false, + false, + Vec::new(), + Vec::new(), + Vec::new(), + logger, + ); self.push_ret_blockable_mon_update(monitor_update) } else { None @@ -11292,7 +11330,15 @@ where }], channel_id: Some(self.context.channel_id()), }; - self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); + self.monitor_updating_paused( + false, + false, + false, + Vec::new(), + Vec::new(), + Vec::new(), + logger, + ); let monitor_update = self.push_ret_blockable_mon_update(monitor_update); let announcement_sigs = @@ -13016,7 +13062,15 @@ where let can_add_htlc = send_res.map_err(|(_, msg)| ChannelError::Ignore(msg))?; if can_add_htlc { let monitor_update = self.build_commitment_no_status_check(logger); - self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new()); + self.monitor_updating_paused( + false, + true, + false, + Vec::new(), + Vec::new(), + Vec::new(), + logger, + ); Ok(self.push_ret_blockable_mon_update(monitor_update)) } else { Ok(None) @@ -13042,13 +13096,19 @@ where /// Begins the shutdown process, getting a message for the remote peer and returning all /// holding cell HTLCs for payment failure. - pub fn get_shutdown( + pub fn get_shutdown( &mut self, signer_provider: &SP, their_features: &InitFeatures, target_feerate_sats_per_kw: Option, override_shutdown_script: Option, + logger: &L, ) -> Result< (msgs::Shutdown, Option, Vec<(HTLCSource, PaymentHash)>), APIError, - > { + > + where + L::Target: Logger, + { + let logger = WithChannelContext::from(logger, &self.context, None); + if self.context.channel_state.is_local_stfu_sent() || self.context.channel_state.is_remote_stfu_sent() || self.context.channel_state.is_quiescent() @@ -13133,7 +13193,15 @@ where }], channel_id: Some(self.context.channel_id()), }; - self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); + self.monitor_updating_paused( + false, + false, + false, + Vec::new(), + Vec::new(), + Vec::new(), + &&logger, + ); self.push_ret_blockable_mon_update(monitor_update) } else { None @@ -13743,7 +13811,7 @@ where let need_channel_ready = channel.check_get_channel_ready(0, logger).is_some() || channel.context.signer_pending_channel_ready; - channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new()); + channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new(), logger); Ok((channel, channel_monitor)) } @@ -14030,7 +14098,7 @@ where }; let need_channel_ready = channel.check_get_channel_ready(0, logger).is_some() || channel.context.signer_pending_channel_ready; - channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new()); + channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new(), logger); Ok((channel, funding_signed, channel_monitor)) } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index f2e8fa70e4f..290995268b0 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -4193,6 +4193,7 @@ where their_features, target_feerate_sats_per_1000_weight, override_shutdown_script, + &self.logger, )?; failed_htlcs = htlcs; @@ -11231,7 +11232,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let (shutdown, monitor_update_opt, htlcs) = try_channel_entry!( self, peer_state, - chan.shutdown(&self.signer_provider, &peer_state.latest_features, &msg), + chan.shutdown( + &self.logger, + &self.signer_provider, + &peer_state.latest_features, + &msg + ), chan_entry ); dropped_htlcs = htlcs; From 88485d2409a266c677286b3b52aac591f0b39491 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 6 Jan 2026 13:31:02 +0100 Subject: [PATCH 057/242] Consume vectors in monitor_updating_paused Using extend is slightly cleaner because it doesn't require mut on the parameters. --- lightning/src/ln/channel.rs | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 51475e188a5..f2775b6350d 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -9392,10 +9392,9 @@ where /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress fn monitor_updating_paused( &mut self, resend_raa: bool, resend_commitment: bool, resend_channel_ready: bool, - mut pending_forwards: Vec<(PendingHTLCInfo, u64)>, - mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, - mut pending_finalized_claimed_htlcs: Vec<(HTLCSource, Option)>, - logger: &L, + pending_forwards: Vec<(PendingHTLCInfo, u64)>, + pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, + pending_finalized_claimed_htlcs: Vec<(HTLCSource, Option)>, logger: &L, ) where L::Target: Logger, { @@ -9404,11 +9403,9 @@ where self.context.monitor_pending_revoke_and_ack |= resend_raa; self.context.monitor_pending_commitment_signed |= resend_commitment; self.context.monitor_pending_channel_ready |= resend_channel_ready; - self.context.monitor_pending_forwards.append(&mut pending_forwards); - self.context.monitor_pending_failures.append(&mut pending_fails); - self.context - .monitor_pending_finalized_fulfills - .append(&mut pending_finalized_claimed_htlcs); + self.context.monitor_pending_forwards.extend(pending_forwards); + self.context.monitor_pending_failures.extend(pending_fails); + self.context.monitor_pending_finalized_fulfills.extend(pending_finalized_claimed_htlcs); self.context.channel_state.set_monitor_update_in_progress(); } From 8ae40eed1b1cc3976bca76104e7a3288ab7280b3 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Mon, 5 Jan 2026 13:10:18 +0100 Subject: [PATCH 058/242] Rustfmt channel methods --- lightning/src/ln/channel.rs | 189 ++++++++++++++++++++++++++++-------- 1 file changed, 147 insertions(+), 42 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index f2775b6350d..3667e083f77 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -7886,11 +7886,12 @@ where Ok(()) } - #[rustfmt::skip] pub fn initial_commitment_signed_v2( - &mut self, msg: &msgs::CommitmentSigned, best_block: BestBlock, signer_provider: &SP, logger: &L + &mut self, msg: &msgs::CommitmentSigned, best_block: BestBlock, signer_provider: &SP, + logger: &L, ) -> Result::EcdsaSigner>, ChannelError> - where L::Target: Logger + where + L::Target: Logger, { if let Some(signing_session) = self.context.interactive_tx_signing_session.as_ref() { if signing_session.has_received_tx_signatures() { @@ -7905,16 +7906,41 @@ where }; let holder_commitment_point = &mut self.holder_commitment_point.clone(); - self.context.assert_no_commitment_advancement(holder_commitment_point.next_transaction_number(), "initial commitment_signed"); + self.context.assert_no_commitment_advancement( + holder_commitment_point.next_transaction_number(), + "initial commitment_signed", + ); let (channel_monitor, _) = self.initial_commitment_signed( - self.context.channel_id(), msg.signature, holder_commitment_point, best_block, signer_provider, logger)?; + self.context.channel_id(), + msg.signature, + holder_commitment_point, + best_block, + signer_provider, + logger, + )?; self.holder_commitment_point = *holder_commitment_point; - log_info!(logger, "Received initial commitment_signed from peer for channel {}", &self.context.channel_id()); + log_info!( + logger, + "Received initial commitment_signed from peer for channel {}", + &self.context.channel_id() + ); - self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new(), logger); - self.context.interactive_tx_signing_session.as_mut().expect("signing session should be present").received_commitment_signed(); + self.monitor_updating_paused( + false, + false, + false, + Vec::new(), + Vec::new(), + Vec::new(), + logger, + ); + self.context + .interactive_tx_signing_session + .as_mut() + .expect("signing session should be present") + .received_commitment_signed(); Ok(channel_monitor) } @@ -13769,34 +13795,61 @@ where /// Handles a funding_signed message from the remote end. /// If this call is successful, broadcast the funding transaction (and not before!) - #[rustfmt::skip] pub fn funding_signed( - mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L - ) -> Result<(FundedChannel, ChannelMonitor<::EcdsaSigner>), (OutboundV1Channel, ChannelError)> + mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, + logger: &L, + ) -> Result< + (FundedChannel, ChannelMonitor<::EcdsaSigner>), + (OutboundV1Channel, ChannelError), + > where - L::Target: Logger + L::Target: Logger, { if !self.funding.is_outbound() { - return Err((self, ChannelError::close("Received funding_signed for an inbound channel?".to_owned()))); + return Err(( + self, + ChannelError::close("Received funding_signed for an inbound channel?".to_owned()), + )); } if !matches!(self.context.channel_state, ChannelState::FundingNegotiated(_)) { - return Err((self, ChannelError::close("Received funding_signed in strange state!".to_owned()))); + return Err(( + self, + ChannelError::close("Received funding_signed in strange state!".to_owned()), + )); } - let mut holder_commitment_point = match self.unfunded_context.holder_commitment_point { - Some(point) => point, - None => return Err((self, ChannelError::close("Received funding_signed before our first commitment point was available".to_owned()))), - }; - self.context.assert_no_commitment_advancement(holder_commitment_point.next_transaction_number(), "funding_signed"); + let mut holder_commitment_point = + match self.unfunded_context.holder_commitment_point { + Some(point) => point, + None => return Err(( + self, + ChannelError::close( + "Received funding_signed before our first commitment point was available" + .to_owned(), + ), + )), + }; + self.context.assert_no_commitment_advancement( + holder_commitment_point.next_transaction_number(), + "funding_signed", + ); let (channel_monitor, _) = match self.initial_commitment_signed( - self.context.channel_id(), msg.signature, - &mut holder_commitment_point, best_block, signer_provider, logger + self.context.channel_id(), + msg.signature, + &mut holder_commitment_point, + best_block, + signer_provider, + logger, ) { Ok(channel_monitor) => channel_monitor, Err(err) => return Err((self, err)), }; - log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id()); + log_info!( + logger, + "Received funding_signed from peer for channel {}", + &self.context.channel_id() + ); let mut channel = FundedChannel { funding: self.funding, @@ -13808,7 +13861,15 @@ where let need_channel_ready = channel.check_get_channel_ready(0, logger).is_some() || channel.context.signer_pending_channel_ready; - channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new(), logger); + channel.monitor_updating_paused( + false, + false, + need_channel_ready, + Vec::new(), + Vec::new(), + Vec::new(), + logger, + ); Ok((channel, channel_monitor)) } @@ -14041,15 +14102,25 @@ where self.generate_accept_channel_message(logger) } - #[rustfmt::skip] pub fn funding_created( - mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L - ) -> Result<(FundedChannel, Option, ChannelMonitor<::EcdsaSigner>), (Self, ChannelError)> + mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, + logger: &L, + ) -> Result< + ( + FundedChannel, + Option, + ChannelMonitor<::EcdsaSigner>, + ), + (Self, ChannelError), + > where - L::Target: Logger + L::Target: Logger, { if self.funding.is_outbound() { - return Err((self, ChannelError::close("Received funding_created for an outbound channel?".to_owned()))); + return Err(( + self, + ChannelError::close("Received funding_created for an outbound channel?".to_owned()), + )); } if !matches!( self.context.channel_state, ChannelState::NegotiatingFunding(flags) @@ -14058,31 +14129,57 @@ where // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT // remember the channel, so it's safe to just send an error_message here and drop the // channel. - return Err((self, ChannelError::close("Received funding_created after we got the channel!".to_owned()))); + return Err(( + self, + ChannelError::close( + "Received funding_created after we got the channel!".to_owned(), + ), + )); } - let mut holder_commitment_point = match self.unfunded_context.holder_commitment_point { - Some(point) => point, - None => return Err((self, ChannelError::close("Received funding_created before our first commitment point was available".to_owned()))), - }; - self.context.assert_no_commitment_advancement(holder_commitment_point.next_transaction_number(), "funding_created"); + let mut holder_commitment_point = + match self.unfunded_context.holder_commitment_point { + Some(point) => point, + None => return Err(( + self, + ChannelError::close( + "Received funding_created before our first commitment point was available" + .to_owned(), + ), + )), + }; + self.context.assert_no_commitment_advancement( + holder_commitment_point.next_transaction_number(), + "funding_created", + ); let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index }; self.funding.channel_transaction_parameters.funding_outpoint = Some(funding_txo); - let (channel_monitor, counterparty_initial_commitment_tx) = match self.initial_commitment_signed( - ChannelId::v1_from_funding_outpoint(funding_txo), msg.signature, - &mut holder_commitment_point, best_block, signer_provider, logger - ) { + let (channel_monitor, counterparty_initial_commitment_tx) = match self + .initial_commitment_signed( + ChannelId::v1_from_funding_outpoint(funding_txo), + msg.signature, + &mut holder_commitment_point, + best_block, + signer_provider, + logger, + ) { Ok(channel_monitor) => channel_monitor, Err(err) => return Err((self, err)), }; let funding_signed = self.context.get_funding_signed_msg( - &self.funding.channel_transaction_parameters, logger, counterparty_initial_commitment_tx + &self.funding.channel_transaction_parameters, + logger, + counterparty_initial_commitment_tx, ); - log_info!(logger, "{} funding_signed for peer for channel {}", - if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, &self.context.channel_id()); + log_info!( + logger, + "{} funding_signed for peer for channel {}", + if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, + &self.context.channel_id() + ); // Promote the channel to a full-fledged one now that we have updated the state and have a // `ChannelMonitor`. @@ -14095,7 +14192,15 @@ where }; let need_channel_ready = channel.check_get_channel_ready(0, logger).is_some() || channel.context.signer_pending_channel_ready; - channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new(), logger); + channel.monitor_updating_paused( + false, + false, + need_channel_ready, + Vec::new(), + Vec::new(), + Vec::new(), + logger, + ); Ok((channel, funding_signed, channel_monitor)) } From 51b5ef798f65fcd786719d31db7b89d87d259dfb Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 6 Jan 2026 13:36:47 +0100 Subject: [PATCH 059/242] Simplify error return patterns in channel.rs Extract error message strings into local variables before constructing ChannelError return tuples, reducing nesting and improving readability. --- lightning/src/ln/channel.rs | 72 +++++++++++++------------------------ 1 file changed, 25 insertions(+), 47 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 3667e083f77..57f10207e17 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1914,13 +1914,8 @@ where .handle_tx_complete(msg) .map_err(|reason| self.fail_interactive_tx_negotiation(reason, logger))?, None => { - return Err(( - ChannelError::WarnAndDisconnect( - "Received unexpected interactive transaction negotiation message" - .to_owned(), - ), - None, - )) + let err = "Received unexpected interactive transaction negotiation message"; + return Err((ChannelError::WarnAndDisconnect(err.to_owned()), None)); }, }; @@ -13806,28 +13801,20 @@ where L::Target: Logger, { if !self.funding.is_outbound() { - return Err(( - self, - ChannelError::close("Received funding_signed for an inbound channel?".to_owned()), - )); + let err = "Received funding_signed for an inbound channel?"; + return Err((self, ChannelError::close(err.to_owned()))); } if !matches!(self.context.channel_state, ChannelState::FundingNegotiated(_)) { - return Err(( - self, - ChannelError::close("Received funding_signed in strange state!".to_owned()), - )); + let err = "Received funding_signed in strange state!"; + return Err((self, ChannelError::close(err.to_owned()))); } - let mut holder_commitment_point = - match self.unfunded_context.holder_commitment_point { - Some(point) => point, - None => return Err(( - self, - ChannelError::close( - "Received funding_signed before our first commitment point was available" - .to_owned(), - ), - )), - }; + let mut holder_commitment_point = match self.unfunded_context.holder_commitment_point { + Some(point) => point, + None => { + let err = "Received funding_signed before our first commitment point was available"; + return Err((self, ChannelError::close(err.to_owned()))); + }, + }; self.context.assert_no_commitment_advancement( holder_commitment_point.next_transaction_number(), "funding_signed", @@ -14117,10 +14104,8 @@ where L::Target: Logger, { if self.funding.is_outbound() { - return Err(( - self, - ChannelError::close("Received funding_created for an outbound channel?".to_owned()), - )); + let err = "Received funding_created for an outbound channel?"; + return Err((self, ChannelError::close(err.to_owned()))); } if !matches!( self.context.channel_state, ChannelState::NegotiatingFunding(flags) @@ -14129,24 +14114,17 @@ where // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT // remember the channel, so it's safe to just send an error_message here and drop the // channel. - return Err(( - self, - ChannelError::close( - "Received funding_created after we got the channel!".to_owned(), - ), - )); + let err = "Received funding_created after we got the channel!"; + return Err((self, ChannelError::close(err.to_owned()))); } - let mut holder_commitment_point = - match self.unfunded_context.holder_commitment_point { - Some(point) => point, - None => return Err(( - self, - ChannelError::close( - "Received funding_created before our first commitment point was available" - .to_owned(), - ), - )), - }; + let mut holder_commitment_point = match self.unfunded_context.holder_commitment_point { + Some(point) => point, + None => { + let err = + "Received funding_created before our first commitment point was available"; + return Err((self, ChannelError::close(err.to_owned()))); + }, + }; self.context.assert_no_commitment_advancement( holder_commitment_point.next_transaction_number(), "funding_created", From 5b211158f7f2d4e17e79271b2ba2b4cfb856ecd7 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Mon, 5 Jan 2026 11:31:23 +0100 Subject: [PATCH 060/242] Make test block connect style configurable for deterministic runs Tests previously selected a random block connect style to improve coverage. However, this randomness made test output non-deterministic and significantly cluttered diffs when comparing logs before and after changes. To address this, an LDK_TEST_CONNECT_STYLE environment variable is added to override the random selection and enable deterministic test runs. Note that broader coverage may be better achieved via targeted tests per connect style or a test matrix cycling through all styles, but this change focuses on improving reproducibility and debuggability. --- CONTRIBUTING.md | 14 +++++++++++++ lightning/src/ln/functional_test_utils.rs | 24 ++++++++++++++++++++++- 2 files changed, 37 insertions(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1bc431a3110..e7825ac3f17 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -176,6 +176,20 @@ Fuzzing is heavily encouraged: you will find all related material under `fuzz/` Mutation testing is work-in-progress; any contribution there would be warmly welcomed. +### Environment Variables + +* `LDK_TEST_CONNECT_STYLE` - Override the random block connect style used in tests for deterministic runs. Valid values: + * `BEST_BLOCK_FIRST` + * `BEST_BLOCK_FIRST_SKIPPING_BLOCKS` + * `BEST_BLOCK_FIRST_REORGS_ONLY_TIP` + * `TRANSACTIONS_FIRST` + * `TRANSACTIONS_FIRST_SKIPPING_BLOCKS` + * `TRANSACTIONS_DUPLICATIVELY_FIRST_SKIPPING_BLOCKS` + * `HIGHLY_REDUNDANT_TRANSACTIONS_FIRST_SKIPPING_BLOCKS` + * `TRANSACTIONS_FIRST_REORGS_ONLY_TIP` + * `FULL_BLOCK_VIA_LISTEN` + * `FULL_BLOCK_DISCONNECTIONS_SKIPPING_VIA_LISTEN` + C/C++ Bindings -------------- diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index e072deb6a97..e9cb13dbd2a 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -4563,7 +4563,29 @@ pub fn create_network<'a, 'b: 'a, 'c: 'b>( let mut nodes = Vec::new(); let chan_count = Rc::new(RefCell::new(0)); let payment_count = Rc::new(RefCell::new(0)); - let connect_style = Rc::new(RefCell::new(ConnectStyle::random_style())); + + let connect_style = Rc::new(RefCell::new(match std::env::var("LDK_TEST_CONNECT_STYLE") { + Ok(val) => match val.as_str() { + "BEST_BLOCK_FIRST" => ConnectStyle::BestBlockFirst, + "BEST_BLOCK_FIRST_SKIPPING_BLOCKS" => ConnectStyle::BestBlockFirstSkippingBlocks, + "BEST_BLOCK_FIRST_REORGS_ONLY_TIP" => ConnectStyle::BestBlockFirstReorgsOnlyTip, + "TRANSACTIONS_FIRST" => ConnectStyle::TransactionsFirst, + "TRANSACTIONS_FIRST_SKIPPING_BLOCKS" => ConnectStyle::TransactionsFirstSkippingBlocks, + "TRANSACTIONS_DUPLICATIVELY_FIRST_SKIPPING_BLOCKS" => { + ConnectStyle::TransactionsDuplicativelyFirstSkippingBlocks + }, + "HIGHLY_REDUNDANT_TRANSACTIONS_FIRST_SKIPPING_BLOCKS" => { + ConnectStyle::HighlyRedundantTransactionsFirstSkippingBlocks + }, + "TRANSACTIONS_FIRST_REORGS_ONLY_TIP" => ConnectStyle::TransactionsFirstReorgsOnlyTip, + "FULL_BLOCK_VIA_LISTEN" => ConnectStyle::FullBlockViaListen, + "FULL_BLOCK_DISCONNECTIONS_SKIPPING_VIA_LISTEN" => { + ConnectStyle::FullBlockDisconnectionsSkippingViaListen + }, + _ => panic!("Unknown ConnectStyle '{}'", val), + }, + Err(_) => ConnectStyle::random_style(), + })); for i in 0..node_count { let dedicated_entropy = DedicatedEntropy(RandomBytes::new([i as u8; 32])); From 075bc349f3f0684c5ea0424c7030dcc949d3c698 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Wed, 7 Jan 2026 13:22:33 +0100 Subject: [PATCH 061/242] Make test hash map iteration order configurable for deterministic runs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a `RandomState` hasher implementation for tests that supports deterministic behavior via the `LDK_TEST_DETERMINISTIC_HASHES=1` environment variable. When set, hash maps use fixed keys ensuring consistent iteration order across test runs. By default, tests continue to use std's RandomState for random hashing, keeping test behavior close to production. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 --- CONTRIBUTING.md | 2 + lightning/src/util/hash_tables.rs | 67 ++++++++++++++++++++++++++++++- 2 files changed, 68 insertions(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e7825ac3f17..d837c873efa 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -190,6 +190,8 @@ welcomed. * `FULL_BLOCK_VIA_LISTEN` * `FULL_BLOCK_DISCONNECTIONS_SKIPPING_VIA_LISTEN` +* `LDK_TEST_DETERMINISTIC_HASHES` - When set to `1`, uses deterministic hash map iteration order in tests. This ensures consistent test output across runs, useful for comparing logs before and after changes. + C/C++ Bindings -------------- diff --git a/lightning/src/util/hash_tables.rs b/lightning/src/util/hash_tables.rs index 00341d57b45..b6555975191 100644 --- a/lightning/src/util/hash_tables.rs +++ b/lightning/src/util/hash_tables.rs @@ -6,10 +6,75 @@ pub use hashbrown::hash_map; mod hashbrown_tables { - #[cfg(feature = "std")] + #[cfg(all(feature = "std", not(test)))] mod hasher { pub use std::collections::hash_map::RandomState; } + #[cfg(all(feature = "std", test))] + mod hasher { + #![allow(deprecated)] // hash::SipHasher was deprecated in favor of something only in std. + use core::hash::{BuildHasher, Hasher}; + + /// A [`BuildHasher`] for tests that supports deterministic behavior via environment variable. + /// + /// When `LDK_TEST_DETERMINISTIC_HASHES` is set, uses fixed keys for deterministic iteration. + /// Otherwise, delegates to std's RandomState for random hashing. + #[derive(Clone)] + pub enum RandomState { + Std(std::collections::hash_map::RandomState), + Deterministic, + } + + impl RandomState { + pub fn new() -> RandomState { + if std::env::var("LDK_TEST_DETERMINISTIC_HASHES").map(|v| v == "1").unwrap_or(false) + { + RandomState::Deterministic + } else { + RandomState::Std(std::collections::hash_map::RandomState::new()) + } + } + } + + impl Default for RandomState { + fn default() -> RandomState { + RandomState::new() + } + } + + /// A hasher wrapper that delegates to either std's DefaultHasher or a deterministic SipHasher. + pub enum RandomStateHasher { + Std(std::collections::hash_map::DefaultHasher), + Deterministic(core::hash::SipHasher), + } + + impl Hasher for RandomStateHasher { + fn finish(&self) -> u64 { + match self { + RandomStateHasher::Std(h) => h.finish(), + RandomStateHasher::Deterministic(h) => h.finish(), + } + } + fn write(&mut self, bytes: &[u8]) { + match self { + RandomStateHasher::Std(h) => h.write(bytes), + RandomStateHasher::Deterministic(h) => h.write(bytes), + } + } + } + + impl BuildHasher for RandomState { + type Hasher = RandomStateHasher; + fn build_hasher(&self) -> RandomStateHasher { + match self { + RandomState::Std(s) => RandomStateHasher::Std(s.build_hasher()), + RandomState::Deterministic => { + RandomStateHasher::Deterministic(core::hash::SipHasher::new_with_keys(0, 0)) + }, + } + } + } + } #[cfg(not(feature = "std"))] mod hasher { #![allow(deprecated)] // hash::SipHasher was deprecated in favor of something only in std. From 5fbbd41db171a41525705425fdcbeabbdb1cb8ac Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 8 Oct 2025 13:15:38 +0000 Subject: [PATCH 062/242] Parallelize `ChannelMonitor` loading from async `KVStore`s Reading `ChannelMonitor`s on startup is one of the slowest parts of LDK initialization. Now that we have an async `KVStore`, there's no need for that, we can simply paralellize their loading, which we do here. Sadly, because Rust futures are pretty unergonomic, we have to add some `unsafe {}` here, but arguing its fine is relatively straightforward. --- lightning/src/util/async_poll.rs | 25 +++++++++++++++---------- lightning/src/util/persist.rs | 17 ++++++++++++----- 2 files changed, 27 insertions(+), 15 deletions(-) diff --git a/lightning/src/util/async_poll.rs b/lightning/src/util/async_poll.rs index 9c2ca4c247f..931d2817e30 100644 --- a/lightning/src/util/async_poll.rs +++ b/lightning/src/util/async_poll.rs @@ -15,26 +15,31 @@ use core::marker::Unpin; use core::pin::Pin; use core::task::{Context, Poll, RawWaker, RawWakerVTable, Waker}; -pub(crate) enum ResultFuture>, E: Unpin> { +pub(crate) enum ResultFuture + Unpin, O> { Pending(F), - Ready(Result<(), E>), + Ready(O), } -pub(crate) struct MultiResultFuturePoller> + Unpin, E: Unpin> { - futures_state: Vec>, +pub(crate) struct MultiResultFuturePoller + Unpin, O> { + futures_state: Vec>, } -impl> + Unpin, E: Unpin> MultiResultFuturePoller { - pub fn new(futures_state: Vec>) -> Self { +impl + Unpin, O> MultiResultFuturePoller { + pub fn new(futures_state: Vec>) -> Self { Self { futures_state } } } -impl> + Unpin, E: Unpin> Future for MultiResultFuturePoller { - type Output = Vec>; - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll>> { +impl + Unpin, O> Future for MultiResultFuturePoller { + type Output = Vec; + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let mut have_pending_futures = false; - let futures_state = &mut self.get_mut().futures_state; + // SAFETY: While we are pinned, we can't get direct access to `futures_state` because we + // aren't `Unpin`. However, we don't actually need the `Pin` - we only use it below on the + // `Future` in the `ResultFuture::Pending` case, and the `Future` is bound by `Unpin`. + // Thus, the `Pin` is not actually used, and its safe to bypass it and access the inner + // reference directly. + let futures_state = unsafe { &mut self.get_unchecked_mut().futures_state }; for state in futures_state.iter_mut() { match state { ResultFuture::Pending(ref mut fut) => match Pin::new(fut).poll(cx) { diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index e15209676e3..69b5c85a5ef 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -34,7 +34,9 @@ use crate::chain::transaction::OutPoint; use crate::ln::types::ChannelId; use crate::sign::{ecdsa::EcdsaChannelSigner, EntropySource, SignerProvider}; use crate::sync::Mutex; -use crate::util::async_poll::{dummy_waker, MaybeSend, MaybeSync}; +use crate::util::async_poll::{ + dummy_waker, MaybeSend, MaybeSync, MultiResultFuturePoller, ResultFuture, +}; use crate::util::logger::Logger; use crate::util::native_async::FutureSpawner; use crate::util::ser::{Readable, ReadableArgs, Writeable}; @@ -875,11 +877,16 @@ where let primary = CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE; let secondary = CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE; let monitor_list = self.0.kv_store.list(primary, secondary).await?; - let mut res = Vec::with_capacity(monitor_list.len()); + let mut futures = Vec::with_capacity(monitor_list.len()); for monitor_key in monitor_list { - let result = - self.0.maybe_read_channel_monitor_with_updates(monitor_key.as_str()).await?; - if let Some(read_res) = result { + futures.push(ResultFuture::Pending(Box::pin(async move { + self.0.maybe_read_channel_monitor_with_updates(monitor_key.as_str()).await + }))); + } + let future_results = MultiResultFuturePoller::new(futures).await; + let mut res = Vec::with_capacity(future_results.len()); + for result in future_results { + if let Some(read_res) = result? { res.push(read_res); } } From b4eab2c7f95159521f1857121da663b676f5b1a0 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 9 Oct 2025 00:43:28 +0000 Subject: [PATCH 063/242] Allow `FutureSpawner` to return the result of the spawned future `tokio::spawn` can be use both to spawn a forever-running background task or to spawn a task which gets `poll`ed independently and eventually returns a result which the callsite wants. In LDK, we have only ever needed the first, and thus didn't bother defining a return type for `FutureSpawner::spawn`. However, in the next commit we'll start using `FutureSpawner` in a context where we actually do want the spawned future's result. Thus, here, we add a result output to `FutureSpawner::spawn`, mirroring the `tokio::spawn` API. --- lightning-block-sync/src/gossip.rs | 10 ++- lightning/src/util/native_async.rs | 123 +++++++++++++++++++++++++++-- lightning/src/util/persist.rs | 13 ++- 3 files changed, 133 insertions(+), 13 deletions(-) diff --git a/lightning-block-sync/src/gossip.rs b/lightning-block-sync/src/gossip.rs index 00d321669ca..263fa4027ff 100644 --- a/lightning-block-sync/src/gossip.rs +++ b/lightning-block-sync/src/gossip.rs @@ -47,8 +47,12 @@ pub trait UtxoSource: BlockSource + 'static { pub struct TokioSpawner; #[cfg(feature = "tokio")] impl FutureSpawner for TokioSpawner { - fn spawn + Send + 'static>(&self, future: T) { - tokio::spawn(future); + type E = tokio::task::JoinError; + type SpawnedFutureResult = tokio::task::JoinHandle; + fn spawn + Send + 'static>( + &self, future: F, + ) -> Self::SpawnedFutureResult { + tokio::spawn(future) } } @@ -254,7 +258,7 @@ where let fut = res.clone(); let source = self.source.clone(); let block_cache = Arc::clone(&self.block_cache); - self.spawn.spawn(async move { + let _not_polled = self.spawn.spawn(async move { let res = Self::retrieve_utxo(source, block_cache, scid).await; fut.resolve(res); }); diff --git a/lightning/src/util/native_async.rs b/lightning/src/util/native_async.rs index 886146e976d..0c380f2b1d1 100644 --- a/lightning/src/util/native_async.rs +++ b/lightning/src/util/native_async.rs @@ -8,23 +8,44 @@ //! environment. #[cfg(all(test, feature = "std"))] -use crate::sync::Mutex; +use crate::sync::{Arc, Mutex}; use crate::util::async_poll::{MaybeSend, MaybeSync}; +#[cfg(all(test, not(feature = "std")))] +use alloc::rc::Rc; + #[cfg(all(test, not(feature = "std")))] use core::cell::RefCell; +#[cfg(test)] +use core::convert::Infallible; use core::future::Future; #[cfg(test)] use core::pin::Pin; +#[cfg(test)] +use core::task::{Context, Poll}; -/// A generic trait which is able to spawn futures in the background. +/// A generic trait which is able to spawn futures to be polled in the background. +/// +/// When the spawned future completes, the returned [`Self::SpawnedFutureResult`] should resolve +/// with the output of the spawned future. +/// +/// Spawned futures must be polled independently in the background even if the returned +/// [`Self::SpawnedFutureResult`] is dropped without being polled. This matches the semantics of +/// `tokio::spawn`. /// /// This is not exported to bindings users as async is only supported in Rust. pub trait FutureSpawner: MaybeSend + MaybeSync + 'static { + /// The error type of [`Self::SpawnedFutureResult`]. This can be used to indicate that the + /// spawned future was cancelled or panicked. + type E; + /// The result of [`Self::spawn`], a future which completes when the spawned future completes. + type SpawnedFutureResult: Future> + Unpin; /// Spawns the given future as a background task. /// /// This method MUST NOT block on the given future immediately. - fn spawn + MaybeSend + 'static>(&self, future: T); + fn spawn + MaybeSend + 'static>( + &self, future: T, + ) -> Self::SpawnedFutureResult; } #[cfg(test)] @@ -39,6 +60,77 @@ pub(crate) struct FutureQueue(Mutex>>>); #[cfg(all(test, not(feature = "std")))] pub(crate) struct FutureQueue(RefCell>>>); +/// A simple future which can be completed later. Used to implement [`FutureQueue`]. +#[cfg(all(test, feature = "std"))] +pub struct FutureQueueCompletion(Arc>>); +#[cfg(all(test, not(feature = "std")))] +pub struct FutureQueueCompletion(Rc>>); + +#[cfg(all(test, feature = "std"))] +impl FutureQueueCompletion { + fn new() -> Self { + Self(Arc::new(Mutex::new(None))) + } + + fn complete(&self, o: O) { + *self.0.lock().unwrap() = Some(o); + } +} + +#[cfg(all(test, feature = "std"))] +impl Clone for FutureQueueCompletion { + fn clone(&self) -> Self { + #[cfg(all(test, feature = "std"))] + { + Self(Arc::clone(&self.0)) + } + #[cfg(all(test, not(feature = "std")))] + { + Self(Rc::clone(&self.0)) + } + } +} + +#[cfg(all(test, not(feature = "std")))] +impl FutureQueueCompletion { + fn new() -> Self { + Self(Rc::new(RefCell::new(None))) + } + + fn complete(&self, o: O) { + *self.0.borrow_mut() = Some(o); + } +} + +#[cfg(all(test, not(feature = "std")))] +impl Clone for FutureQueueCompletion { + fn clone(&self) -> Self { + Self(self.0.clone()) + } +} + +#[cfg(all(test, feature = "std"))] +impl Future for FutureQueueCompletion { + type Output = Result; + fn poll(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + match Pin::into_inner(self).0.lock().unwrap().take() { + None => Poll::Pending, + Some(o) => Poll::Ready(Ok(o)), + } + } +} + +#[cfg(all(test, not(feature = "std")))] +impl Future for FutureQueueCompletion { + type Output = Result; + fn poll(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + match Pin::into_inner(self).0.borrow_mut().take() { + None => Poll::Pending, + Some(o) => Poll::Ready(Ok(o)), + } + } +} + #[cfg(test)] impl FutureQueue { pub(crate) fn new() -> Self { @@ -74,7 +166,6 @@ impl FutureQueue { futures = self.0.borrow_mut(); } futures.retain_mut(|fut| { - use core::task::{Context, Poll}; let waker = crate::util::async_poll::dummy_waker(); match fut.as_mut().poll(&mut Context::from_waker(&waker)) { Poll::Ready(()) => false, @@ -86,7 +177,16 @@ impl FutureQueue { #[cfg(test)] impl FutureSpawner for FutureQueue { - fn spawn + MaybeSend + 'static>(&self, future: T) { + type E = Infallible; + type SpawnedFutureResult = FutureQueueCompletion; + fn spawn + MaybeSend + 'static>( + &self, f: F, + ) -> FutureQueueCompletion { + let completion = FutureQueueCompletion::new(); + let compl_ref = completion.clone(); + let future = async move { + compl_ref.complete(f.await); + }; #[cfg(feature = "std")] { self.0.lock().unwrap().push(Box::pin(future)); @@ -95,6 +195,7 @@ impl FutureSpawner for FutureQueue { { self.0.borrow_mut().push(Box::pin(future)); } + completion } } @@ -102,7 +203,16 @@ impl FutureSpawner for FutureQueue { impl + MaybeSend + MaybeSync + 'static> FutureSpawner for D { - fn spawn + MaybeSend + 'static>(&self, future: T) { + type E = Infallible; + type SpawnedFutureResult = FutureQueueCompletion; + fn spawn + MaybeSend + 'static>( + &self, f: F, + ) -> FutureQueueCompletion { + let completion = FutureQueueCompletion::new(); + let compl_ref = completion.clone(); + let future = async move { + compl_ref.complete(f.await); + }; #[cfg(feature = "std")] { self.0.lock().unwrap().push(Box::pin(future)); @@ -111,5 +221,6 @@ impl + MaybeSend + MaybeSync + 'static { self.0.borrow_mut().push(Box::pin(future)); } + completion } } diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index 69b5c85a5ef..ab4f761fb34 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -16,6 +16,7 @@ use alloc::sync::Arc; use bitcoin::hashes::hex::FromHex; use bitcoin::{BlockHash, Txid}; +use core::convert::Infallible; use core::future::Future; use core::mem; use core::ops::Deref; @@ -491,7 +492,11 @@ where struct PanicingSpawner; impl FutureSpawner for PanicingSpawner { - fn spawn + MaybeSend + 'static>(&self, _: T) { + type E = Infallible; + type SpawnedFutureResult = Box> + Unpin>; + fn spawn + MaybeSend + 'static>( + &self, _: T, + ) -> Self::SpawnedFutureResult { unreachable!(); } } @@ -959,7 +964,7 @@ where let future = inner.persist_new_channel(monitor_name, monitor); let channel_id = monitor.channel_id(); let completion = (monitor.channel_id(), monitor.get_latest_update_id()); - self.0.future_spawner.spawn(async move { + let _runs_free = self.0.future_spawner.spawn(async move { match future.await { Ok(()) => { inner.async_completed_updates.lock().unwrap().push(completion); @@ -991,7 +996,7 @@ where None }; let inner = Arc::clone(&self.0); - self.0.future_spawner.spawn(async move { + let _runs_free = self.0.future_spawner.spawn(async move { match future.await { Ok(()) => if let Some(completion) = completion { inner.async_completed_updates.lock().unwrap().push(completion); @@ -1009,7 +1014,7 @@ where pub(crate) fn spawn_async_archive_persisted_channel(&self, monitor_name: MonitorName) { let inner = Arc::clone(&self.0); - self.0.future_spawner.spawn(async move { + let _runs_free = self.0.future_spawner.spawn(async move { inner.archive_persisted_channel(monitor_name).await; }); } From 18f7a8f50b99e837474700f5c0eb26726e0f9397 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sun, 12 Oct 2025 23:00:36 +0000 Subject: [PATCH 064/242] Add an option to deserialize monitors in parallel in async load `MonitorUpdatingPersister::read_all_channel_monitors_with_updates` was made to do the IO operations in parallel in a previous commit, however in practice this doesn't provide material parallelism for large routing nodes. Because deserializing `ChannelMonitor`s is the bulk of the work (when IO operations are sufficiently fast), we end up blocked in single-threaded work nearly the entire time. Here, we add an alternative option - a new `read_all_channel_monitors_with_updates_parallel` method which uses the `FutureSpawner` to cause the deserialization operations to proceed in parallel. --- lightning/src/util/persist.rs | 59 +++++++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index ab4f761fb34..09d509bfd80 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -870,6 +870,14 @@ where /// Reads all stored channel monitors, along with any stored updates for them. /// + /// While the reads themselves are performed in parallel, deserializing the + /// [`ChannelMonitor`]s is not. For large [`ChannelMonitor`]s actively used for forwarding, + /// this may substantially limit the parallelism of this method. + /// + /// If you can move this object into an `Arc`, consider using + /// [`Self::read_all_channel_monitors_with_updates_parallel`] to parallelize the CPU-bound + /// deserialization as well. + /// /// It is extremely important that your [`KVStore::read`] implementation uses the /// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the /// documentation for [`MonitorUpdatingPersister`]. @@ -898,6 +906,57 @@ where Ok(res) } + /// Reads all stored channel monitors, along with any stored updates for them, in parallel. + /// + /// Because deserializing large [`ChannelMonitor`]s from forwarding nodes is often CPU-bound, + /// this version of [`Self::read_all_channel_monitors_with_updates`] uses the [`FutureSpawner`] + /// to parallelize deserialization as well as the IO operations. + /// + /// Because [`FutureSpawner`] requires that the spawned future be `'static` (matching `tokio` + /// and other multi-threaded runtime requirements), this method requires that `self` be an + /// `Arc` that can live for `'static` and be sent and accessed across threads. + /// + /// It is extremely important that your [`KVStore::read`] implementation uses the + /// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the + /// documentation for [`MonitorUpdatingPersister`]. + pub async fn read_all_channel_monitors_with_updates_parallel( + self: &Arc, + ) -> Result< + Vec<(BlockHash, ChannelMonitor<::EcdsaSigner>)>, + io::Error, + > + where + K: MaybeSend + MaybeSync + 'static, + L: MaybeSend + MaybeSync + 'static, + ES: MaybeSend + MaybeSync + 'static, + SP: MaybeSend + MaybeSync + 'static, + BI: MaybeSend + MaybeSync + 'static, + FE: MaybeSend + MaybeSync + 'static, + ::EcdsaSigner: MaybeSend, + { + let primary = CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE; + let secondary = CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE; + let monitor_list = self.0.kv_store.list(primary, secondary).await?; + let mut futures = Vec::with_capacity(monitor_list.len()); + for monitor_key in monitor_list { + let us = Arc::clone(&self); + futures.push(ResultFuture::Pending(self.0.future_spawner.spawn(async move { + us.0.maybe_read_channel_monitor_with_updates(monitor_key.as_str()).await + }))); + } + let future_results = MultiResultFuturePoller::new(futures).await; + let mut res = Vec::with_capacity(future_results.len()); + for result in future_results { + match result { + Err(_) => return Err(io::Error::new(io::ErrorKind::Other, "Future was cancelled")), + Ok(Err(e)) => return Err(e), + Ok(Ok(Some(read_res))) => res.push(read_res), + Ok(Ok(None)) => {}, + } + } + Ok(res) + } + /// Read a single channel monitor, along with any stored updates for it. /// /// It is extremely important that your [`KVStoreSync::read`] implementation uses the From 7471d05d078a7c86f8f6fdbf53e16862b8a869f7 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 8 Oct 2025 14:38:03 +0000 Subject: [PATCH 065/242] Avoid a storage RTT when loading `ChannelMonitor`s without updates When reading `ChannelMonitor`s from a `MonitorUpdatingPersister` on startup, we have to make sure to load any `ChannelMonitorUpdate`s and re-apply them as well. For users of async persistence who don't have any `ChannelMonitorUpdate`s (e.g. because they set `maximum_pending_updates` to 0 or, in the future, we avoid persisting updates for small `ChannelMonitor`s), this means two round-trips to the storage backend, one to load the `ChannelMonitor` and one to try to read the next `ChannelMonitorUpdate` only to have it fail. Instead, here, we use `KVStore::list` to fetch the list of stored `ChannelMonitorUpdate`s, which for async `KVStore` users allows us to parallelize the list of update fetching and the `ChannelMonitor` loading itself. Then we know exactly when to stop reading `ChannelMonitorUpdate`s, including reading none if there are none to read. This also avoids relying on `KVStore::read` correctly returning `NotFound` in order to correctly discover when to stop reading `ChannelMonitorUpdate`s. --- lightning/src/util/async_poll.rs | 69 ++++++++++++++++++++++++ lightning/src/util/persist.rs | 92 +++++++++++--------------------- 2 files changed, 99 insertions(+), 62 deletions(-) diff --git a/lightning/src/util/async_poll.rs b/lightning/src/util/async_poll.rs index 931d2817e30..57df5b26cb0 100644 --- a/lightning/src/util/async_poll.rs +++ b/lightning/src/util/async_poll.rs @@ -20,6 +20,75 @@ pub(crate) enum ResultFuture + Unpin, O> { Ready(O), } +pub(crate) struct TwoFutureJoiner< + AO, + BO, + AF: Future + Unpin, + BF: Future + Unpin, +> { + a: Option>, + b: Option>, +} + +impl + Unpin, BF: Future + Unpin> + TwoFutureJoiner +{ + pub fn new(future_a: AF, future_b: BF) -> Self { + Self { a: Some(ResultFuture::Pending(future_a)), b: Some(ResultFuture::Pending(future_b)) } + } +} + +impl + Unpin, BF: Future + Unpin> Future + for TwoFutureJoiner +{ + type Output = (AO, BO); + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<(AO, BO)> { + let mut have_pending_futures = false; + // SAFETY: While we are pinned, we can't get direct access to our internal state because we + // aren't `Unpin`. However, we don't actually need the `Pin` - we only use it below on the + // `Future` in the `ResultFuture::Pending` case, and the `Future` is bound by `Unpin`. + // Thus, the `Pin` is not actually used, and its safe to bypass it and access the inner + // reference directly. + let state = unsafe { &mut self.get_unchecked_mut() }; + macro_rules! poll_future { + ($future: ident) => { + match state.$future { + Some(ResultFuture::Pending(ref mut fut)) => match Pin::new(fut).poll(cx) { + Poll::Ready(res) => { + state.$future = Some(ResultFuture::Ready(res)); + }, + Poll::Pending => { + have_pending_futures = true; + }, + }, + Some(ResultFuture::Ready(_)) => {}, + None => { + debug_assert!(false, "Future polled after Ready"); + return Poll::Pending; + }, + } + }; + } + poll_future!(a); + poll_future!(b); + + if have_pending_futures { + Poll::Pending + } else { + Poll::Ready(( + match state.a.take() { + Some(ResultFuture::Ready(a)) => a, + _ => unreachable!(), + }, + match state.b.take() { + Some(ResultFuture::Ready(b)) => b, + _ => unreachable!(), + }, + )) + } + } +} + pub(crate) struct MultiResultFuturePoller + Unpin, O> { futures_state: Vec>, } diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index 09d509bfd80..3eedfc42938 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -36,7 +36,7 @@ use crate::ln::types::ChannelId; use crate::sign::{ecdsa::EcdsaChannelSigner, EntropySource, SignerProvider}; use crate::sync::Mutex; use crate::util::async_poll::{ - dummy_waker, MaybeSend, MaybeSync, MultiResultFuturePoller, ResultFuture, + dummy_waker, MaybeSend, MaybeSync, MultiResultFuturePoller, ResultFuture, TwoFutureJoiner, }; use crate::util::logger::Logger; use crate::util::native_async::FutureSpawner; @@ -576,15 +576,6 @@ fn poll_sync_future(future: F) -> F::Output { /// list channel monitors themselves and load channels individually using /// [`MonitorUpdatingPersister::read_channel_monitor_with_updates`]. /// -/// ## EXTREMELY IMPORTANT -/// -/// It is extremely important that your [`KVStoreSync::read`] implementation uses the -/// [`io::ErrorKind::NotFound`] variant correctly: that is, when a file is not found, and _only_ in -/// that circumstance (not when there is really a permissions error, for example). This is because -/// neither channel monitor reading function lists updates. Instead, either reads the monitor, and -/// using its stored `update_id`, synthesizes update storage keys, and tries them in sequence until -/// one is not found. All _other_ errors will be bubbled up in the function's [`Result`]. -/// /// # Pruning stale channel updates /// /// Stale updates are pruned when the consolidation threshold is reached according to `maximum_pending_updates`. @@ -658,10 +649,6 @@ where } /// Reads all stored channel monitors, along with any stored updates for them. - /// - /// It is extremely important that your [`KVStoreSync::read`] implementation uses the - /// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the - /// documentation for [`MonitorUpdatingPersister`]. pub fn read_all_channel_monitors_with_updates( &self, ) -> Result< @@ -673,10 +660,6 @@ where /// Read a single channel monitor, along with any stored updates for it. /// - /// It is extremely important that your [`KVStoreSync::read`] implementation uses the - /// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the - /// documentation for [`MonitorUpdatingPersister`]. - /// /// For `monitor_key`, channel storage keys can be the channel's funding [`OutPoint`], with an /// underscore `_` between txid and index for v1 channels. For example, given: /// @@ -877,10 +860,6 @@ where /// If you can move this object into an `Arc`, consider using /// [`Self::read_all_channel_monitors_with_updates_parallel`] to parallelize the CPU-bound /// deserialization as well. - /// - /// It is extremely important that your [`KVStore::read`] implementation uses the - /// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the - /// documentation for [`MonitorUpdatingPersister`]. pub async fn read_all_channel_monitors_with_updates( &self, ) -> Result< @@ -915,10 +894,6 @@ where /// Because [`FutureSpawner`] requires that the spawned future be `'static` (matching `tokio` /// and other multi-threaded runtime requirements), this method requires that `self` be an /// `Arc` that can live for `'static` and be sent and accessed across threads. - /// - /// It is extremely important that your [`KVStore::read`] implementation uses the - /// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the - /// documentation for [`MonitorUpdatingPersister`]. pub async fn read_all_channel_monitors_with_updates_parallel( self: &Arc, ) -> Result< @@ -959,10 +934,6 @@ where /// Read a single channel monitor, along with any stored updates for it. /// - /// It is extremely important that your [`KVStoreSync::read`] implementation uses the - /// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the - /// documentation for [`MonitorUpdatingPersister`]. - /// /// For `monitor_key`, channel storage keys can be the channel's funding [`OutPoint`], with an /// underscore `_` between txid and index for v1 channels. For example, given: /// @@ -1121,40 +1092,37 @@ where io::Error, > { let monitor_name = MonitorName::from_str(monitor_key)?; - let read_res = self.maybe_read_monitor(&monitor_name, monitor_key).await?; - let (block_hash, monitor) = match read_res { + let read_future = pin!(self.maybe_read_monitor(&monitor_name, monitor_key)); + let list_future = pin!(self + .kv_store + .list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_key)); + let (read_res, list_res) = TwoFutureJoiner::new(read_future, list_future).await; + let (block_hash, monitor) = match read_res? { Some(res) => res, None => return Ok(None), }; - let mut current_update_id = monitor.get_latest_update_id(); - // TODO: Parallelize this loop by speculatively reading a batch of updates - loop { - current_update_id = match current_update_id.checked_add(1) { - Some(next_update_id) => next_update_id, - None => break, - }; - let update_name = UpdateName::from(current_update_id); - let update = match self.read_monitor_update(monitor_key, &update_name).await { - Ok(update) => update, - Err(err) if err.kind() == io::ErrorKind::NotFound => { - // We can't find any more updates, so we are done. - break; - }, - Err(err) => return Err(err), - }; - - monitor - .update_monitor(&update, &self.broadcaster, &self.fee_estimator, &self.logger) - .map_err(|e| { - log_error!( - self.logger, - "Monitor update failed. monitor: {} update: {} reason: {:?}", - monitor_key, - update_name.as_str(), - e - ); - io::Error::new(io::ErrorKind::Other, "Monitor update failed") - })?; + let current_update_id = monitor.get_latest_update_id(); + let updates: Result, _> = + list_res?.into_iter().map(|name| UpdateName::new(name)).collect(); + let mut updates = updates?; + updates.sort_unstable(); + // TODO: Parallelize this loop + for update_name in updates { + if update_name.0 > current_update_id { + let update = self.read_monitor_update(monitor_key, &update_name).await?; + monitor + .update_monitor(&update, &self.broadcaster, &self.fee_estimator, &self.logger) + .map_err(|e| { + log_error!( + self.logger, + "Monitor update failed. monitor: {} update: {} reason: {:?}", + monitor_key, + update_name.as_str(), + e + ); + io::Error::new(io::ErrorKind::Other, "Monitor update failed") + })?; + } } Ok(Some((block_hash, monitor))) } @@ -1529,7 +1497,7 @@ impl core::fmt::Display for MonitorName { /// let monitor_name = "some_monitor_name"; /// let storage_key = format!("channel_monitor_updates/{}/{}", monitor_name, update_name.as_str()); /// ``` -#[derive(Debug)] +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)] pub struct UpdateName(pub u64, String); impl UpdateName { From 6ef22babcc6f76a5bb999f46344727adf1cac18f Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 9 Oct 2025 13:11:07 +0000 Subject: [PATCH 066/242] Parallelize `ChannelMonitorUpdate` loading When reading `ChannelMonitor`s from a `MonitorUpdatingPersister` on startup, we have to make sure to load any `ChannelMonitorUpdate`s and re-apply them as well. Now that we know which `ChannelMonitorUpdate`s to load from `list`ing the entries from the `KVStore` we can parallelize the reads themselves, which we do here. Now, loading all `ChannelMonitor`s from an async `KVStore` requires only three full RTTs - one to list the set of `ChannelMonitor`s, one to both fetch the `ChanelMonitor` and list the set of `ChannelMonitorUpdate`s, and one to fetch all the `ChannelMonitorUpdate`s (with the last one skipped when there are no `ChannelMonitorUpdate`s to read). --- lightning/src/util/persist.rs | 38 +++++++++++++++++++---------------- 1 file changed, 21 insertions(+), 17 deletions(-) diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index 3eedfc42938..2e1e8805d0a 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -1106,23 +1106,27 @@ where list_res?.into_iter().map(|name| UpdateName::new(name)).collect(); let mut updates = updates?; updates.sort_unstable(); - // TODO: Parallelize this loop - for update_name in updates { - if update_name.0 > current_update_id { - let update = self.read_monitor_update(monitor_key, &update_name).await?; - monitor - .update_monitor(&update, &self.broadcaster, &self.fee_estimator, &self.logger) - .map_err(|e| { - log_error!( - self.logger, - "Monitor update failed. monitor: {} update: {} reason: {:?}", - monitor_key, - update_name.as_str(), - e - ); - io::Error::new(io::ErrorKind::Other, "Monitor update failed") - })?; - } + let updates_to_load = updates.iter().filter(|update| update.0 > current_update_id); + let mut update_futures = Vec::with_capacity(updates_to_load.clone().count()); + for update_name in updates_to_load { + update_futures.push(ResultFuture::Pending(Box::pin(async move { + (update_name, self.read_monitor_update(monitor_key, update_name).await) + }))); + } + for (update_name, update_res) in MultiResultFuturePoller::new(update_futures).await { + let update = update_res?; + monitor + .update_monitor(&update, &self.broadcaster, &self.fee_estimator, &self.logger) + .map_err(|e| { + log_error!( + self.logger, + "Monitor update failed. monitor: {} update: {} reason: {:?}", + monitor_key, + update_name.as_str(), + e + ); + io::Error::new(io::ErrorKind::Other, "Monitor update failed") + })?; } Ok(Some((block_hash, monitor))) } From b858b4445304d75dc54dc9e3fa8cc165e4d14bcb Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Tue, 16 Dec 2025 09:59:49 +0200 Subject: [PATCH 067/242] util: add support for option with custom encoding --- lightning/src/util/ser_macros.rs | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/lightning/src/util/ser_macros.rs b/lightning/src/util/ser_macros.rs index 647e7c77a6c..86b24e1b849 100644 --- a/lightning/src/util/ser_macros.rs +++ b/lightning/src/util/ser_macros.rs @@ -852,6 +852,9 @@ macro_rules! _init_tlv_based_struct_field { ($field: ident, (required_vec, encoding: ($fieldty: ty, $encoding: ident))) => { $crate::_init_tlv_based_struct_field!($field, required) }; + ($field: ident, (option, encoding: ($fieldty: ty, $encoding: ident))) => { + $crate::_init_tlv_based_struct_field!($field, option) + }; ($field: ident, optional_vec) => { $field.unwrap() }; @@ -1924,4 +1927,31 @@ mod tests { LengthReadable::read_from_fixed_length_buffer(&mut &encoded[..]).unwrap(); assert_eq!(decoded, instance); } + + #[test] + fn test_option_with_encoding() { + // Ensure that serializing an option with a specified encoding will survive a ser round + // trip for Some and None options. + #[derive(PartialEq, Eq, Debug)] + struct MyCustomStruct { + tlv_field: Option, + } + + impl_writeable_msg!(MyCustomStruct, {}, { + (1, tlv_field, (option, encoding: (u64, HighZeroBytesDroppedBigSize))), + }); + + for tlv_field in [None, Some(0u64), Some(255u64)] { + let instance = MyCustomStruct { tlv_field }; + let encoded = instance.encode(); + let decoded: MyCustomStruct = + LengthReadable::read_from_fixed_length_buffer(&mut &encoded[..]).unwrap(); + assert_eq!( + decoded, + MyCustomStruct { tlv_field }, + "option custom encoding failed for: {:?}", + tlv_field + ); + } + } } From 46aa1cf8532104808df190b216074971248d922a Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Thu, 23 Oct 2025 10:15:14 -0400 Subject: [PATCH 068/242] ln: add experimental accountable signal to update_add_htlc --- lightning/src/ln/blinded_payment_tests.rs | 1 + lightning/src/ln/channel.rs | 1 + lightning/src/ln/functional_tests.rs | 1 + lightning/src/ln/htlc_reserve_unit_tests.rs | 5 + lightning/src/ln/msgs.rs | 110 +++++++++++++++++++- lightning/src/ln/onion_payment.rs | 1 + lightning/src/ln/payment_tests.rs | 1 + 7 files changed, 119 insertions(+), 1 deletion(-) diff --git a/lightning/src/ln/blinded_payment_tests.rs b/lightning/src/ln/blinded_payment_tests.rs index 7941a81f61e..914f5360472 100644 --- a/lightning/src/ln/blinded_payment_tests.rs +++ b/lightning/src/ln/blinded_payment_tests.rs @@ -1526,6 +1526,7 @@ fn update_add_msg( skimmed_fee_msat: None, blinding_point, hold_htlc: None, + accountable: None, } } diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 128091ccdd5..6a05f15d3cf 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -9747,6 +9747,7 @@ where skimmed_fee_msat: htlc.skimmed_fee_msat, blinding_point: htlc.blinding_point, hold_htlc: htlc.hold_htlc, + accountable: None, }); } } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index e2963dbeb09..58ef44c4939 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -2270,6 +2270,7 @@ pub fn fail_backward_pending_htlc_upon_channel_failure() { skimmed_fee_msat: None, blinding_point: None, hold_htlc: None, + accountable: None, }; nodes[0].node.handle_update_add_htlc(node_b_id, &update_add_htlc); } diff --git a/lightning/src/ln/htlc_reserve_unit_tests.rs b/lightning/src/ln/htlc_reserve_unit_tests.rs index 86c95721d47..4c4fbada7dd 100644 --- a/lightning/src/ln/htlc_reserve_unit_tests.rs +++ b/lightning/src/ln/htlc_reserve_unit_tests.rs @@ -839,6 +839,7 @@ pub fn do_test_fee_spike_buffer(cfg: Option, htlc_fails: bool) { skimmed_fee_msat: None, blinding_point: None, hold_htlc: None, + accountable: None, }; nodes[1].node.handle_update_add_htlc(node_a_id, &msg); @@ -1082,6 +1083,7 @@ pub fn test_chan_reserve_violation_inbound_htlc_outbound_channel() { skimmed_fee_msat: None, blinding_point: None, hold_htlc: None, + accountable: None, }; nodes[0].node.handle_update_add_htlc(node_b_id, &msg); @@ -1266,6 +1268,7 @@ pub fn test_chan_reserve_violation_inbound_htlc_inbound_chan() { skimmed_fee_msat: None, blinding_point: None, hold_htlc: None, + accountable: None, }; nodes[1].node.handle_update_add_htlc(node_a_id, &msg); @@ -1650,6 +1653,7 @@ pub fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() { skimmed_fee_msat: None, blinding_point: None, hold_htlc: None, + accountable: None, }; for i in 0..50 { @@ -2256,6 +2260,7 @@ pub fn do_test_dust_limit_fee_accounting(can_afford: bool) { skimmed_fee_msat: None, blinding_point: None, hold_htlc: None, + accountable: None, }; nodes[1].node.handle_update_add_htlc(node_a_id, &msg); diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index f237d73e533..dd9c8ff7756 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -768,6 +768,45 @@ pub struct UpdateAddHTLC { /// /// [`ReleaseHeldHtlc`]: crate::onion_message::async_payments::ReleaseHeldHtlc pub hold_htlc: Option<()>, + /// An experimental field indicating whether the receiving node's reputation would be held + /// accountable for the timely resolution of the HTLC. + /// + /// Note that this field is [`experimental`] so should not be used for forwarding decisions. + /// + /// [`experimental`]: https://github.com/lightning/blips/blob/master/blip-0004.md + pub accountable: Option, +} + +struct AccountableBool(T); + +impl Writeable for AccountableBool { + #[inline] + fn write(&self, writer: &mut W) -> Result<(), io::Error> { + let wire_value = if self.0 { 7u8 } else { 0u8 }; + writer.write_all(&[wire_value]) + } +} + +impl Readable for AccountableBool { + #[inline] + fn read(reader: &mut R) -> Result, DecodeError> { + let mut buf = [0u8; 1]; + reader.read_exact(&mut buf)?; + let bool_value = buf[0] == 7; + Ok(AccountableBool(bool_value)) + } +} + +impl From for AccountableBool { + fn from(val: bool) -> Self { + Self(val) + } +} + +impl From> for bool { + fn from(val: AccountableBool) -> Self { + val.0 + } } /// An [`onion message`] to be sent to or received from a peer. @@ -3375,6 +3414,7 @@ impl_writeable_msg!(UpdateAddHTLC, { // TODO: currently we may fail to read the `ChannelManager` if we write a new even TLV in this message // and then downgrade. Once this is fixed, update the type here to match BOLTs PR 989. (75537, hold_htlc, option), + (106823, accountable, (option, encoding: (bool, AccountableBool))), }); impl LengthReadable for OnionMessage { @@ -4374,7 +4414,7 @@ mod tests { }; use crate::types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; use crate::util::ser::{BigSize, Hostname, LengthReadable, Readable, ReadableArgs, Writeable}; - use crate::util::test_utils; + use crate::util::test_utils::{self, pubkey}; use bitcoin::hex::DisplayHex; use bitcoin::{Amount, ScriptBuf, Sequence, Transaction, TxIn, TxOut, Witness}; @@ -5874,6 +5914,7 @@ mod tests { skimmed_fee_msat: None, blinding_point: None, hold_htlc: None, + accountable: None, }; let encoded_value = update_add_htlc.encode(); let target_value = >::from_hex("020202020202020202020202020202020202020202020202020202020202020200083a840000034d32144668701144760101010101010101010101010101010101010101010101010101010101010101000c89d4ff031b84c5567b126440995d3ed5aaba0565d71e1834604819ff9c17f5e9d5dd078f010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010202020202020202020202020202020202020202020202020202020202020202").unwrap(); @@ -6761,4 +6802,71 @@ mod tests { .to_socket_addrs() .is_err()); } + + fn test_update_add_htlc() -> msgs::UpdateAddHTLC { + msgs::UpdateAddHTLC { + channel_id: ChannelId::from_bytes([2; 32]), + htlc_id: 42, + amount_msat: 1000, + payment_hash: PaymentHash([1; 32]), + cltv_expiry: 500000, + skimmed_fee_msat: None, + onion_routing_packet: msgs::OnionPacket { + version: 0, + public_key: Ok(pubkey(42)), + hop_data: [1; 20 * 65], + hmac: [2; 32], + }, + blinding_point: None, + hold_htlc: None, + accountable: None, + } + } + + #[test] + fn test_update_add_htlc_accountable_encoding() { + // Tests that accountable boolean values are written to the wire with correct u8 values. + for (bool_signal, wire_value) in [(Some(false), 0u8), (Some(true), 7u8)] { + let mut base_msg = test_update_add_htlc(); + base_msg.accountable = bool_signal; + let encoded = base_msg.encode(); + assert_eq!( + *encoded.last().unwrap(), + wire_value, + "wrong wire value for accountable={:?}", + bool_signal + ); + } + } + + fn do_test_htlc_accountable_from_u8(accountable_override: Option, expected: Option) { + // Tests custom encoding conversion of u8 wire values to appropriate boolean, manually + // writing to support values that we wouldn't encode ourselves but should be able to read. + let base_msg = test_update_add_htlc(); + let mut encoded = base_msg.encode(); + if let Some(value) = accountable_override { + encoded.extend_from_slice(&[0xfe, 0x00, 0x01, 0xa1, 0x47]); + encoded.push(1); + encoded.push(value); + } + + let decoded: msgs::UpdateAddHTLC = + LengthReadable::read_from_fixed_length_buffer(&mut &encoded[..]).unwrap(); + + assert_eq!( + decoded.accountable, expected, + "accountable={:?} with override={:?} not eq to expected={:?}", + decoded.accountable, accountable_override, expected + ); + } + + #[test] + fn update_add_htlc_accountable_from_u8() { + // Tests that accountable signals encoded as a u8 are properly translated to a bool. + do_test_htlc_accountable_from_u8(None, None); + do_test_htlc_accountable_from_u8(Some(8), Some(false)); // 8 is an invalid value + do_test_htlc_accountable_from_u8(Some(7), Some(true)); + do_test_htlc_accountable_from_u8(Some(3), Some(false)); + do_test_htlc_accountable_from_u8(Some(0), Some(false)); + } } diff --git a/lightning/src/ln/onion_payment.rs b/lightning/src/ln/onion_payment.rs index 1abe4330a25..2b1b3a1876c 100644 --- a/lightning/src/ln/onion_payment.rs +++ b/lightning/src/ln/onion_payment.rs @@ -814,6 +814,7 @@ mod tests { skimmed_fee_msat: None, blinding_point: None, hold_htlc: None, + accountable: None, } } diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index f9894fa8819..8f209c88e25 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -5103,6 +5103,7 @@ fn peel_payment_onion_custom_tlvs() { onion_routing_packet, blinding_point: None, hold_htlc: None, + accountable: None, }; let peeled_onion = crate::ln::onion_payment::peel_payment_onion( &update_add, From 413aa95ca6a9e1092f4e3a02ea5998b6dd8e0704 Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Thu, 23 Oct 2025 14:08:03 -0400 Subject: [PATCH 069/242] ln: add incoming_accountable to PendingHTLCInfo Persist as a bool so that we don't need to use Option when we will just inevitably unwrap_or(false) the field. This means that we won't be able to distinguish between an incoming htlc that has no TLV set, and one that has the TLV set with a false value in it. We accept this loss of information for the sake of simplicity in the codebase. --- lightning/src/ln/channelmanager.rs | 14 ++++++++++---- lightning/src/ln/onion_payment.rs | 7 +++++-- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index f3399ff8787..6284dedfa4b 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -427,6 +427,9 @@ pub struct PendingHTLCInfo { /// This is used to allow LSPs to take fees as a part of payments, without the sender having to /// shoulder them. pub skimmed_fee_msat: Option, + /// An experimental field indicating whether our node's reputation would be held accountable + /// for the timely resolution of the received HTLC. + pub incoming_accountable: bool, } #[derive(Clone, Debug)] // See FundedChannel::revoke_and_ack for why, tl;dr: Rust bug @@ -5249,7 +5252,7 @@ where let current_height: u32 = self.best_block.read().unwrap().height; create_recv_pending_htlc_info(decoded_hop, shared_secret, msg.payment_hash, msg.amount_msat, msg.cltv_expiry, None, allow_underpay, msg.skimmed_fee_msat, - current_height) + msg.accountable.unwrap_or(false), current_height) }, onion_utils::Hop::Forward { .. } | onion_utils::Hop::BlindedForward { .. } => { create_fwd_pending_htlc_info(msg, decoded_hop, shared_secret, next_packet_pubkey_opt) @@ -7375,6 +7378,7 @@ where payment_hash, outgoing_amt_msat, outgoing_cltv_value, + incoming_accountable, .. }, } = payment; @@ -7473,6 +7477,7 @@ where Some(phantom_shared_secret), false, None, + incoming_accountable, current_height, ); match create_res { @@ -16248,6 +16253,7 @@ impl_writeable_tlv_based!(PendingHTLCInfo, { (8, outgoing_cltv_value, required), (9, incoming_amt_msat, option), (10, skimmed_fee_msat, option), + (11, incoming_accountable, (default_value, false)), }); impl Writeable for HTLCFailureMsg { @@ -19837,7 +19843,7 @@ mod tests { if let Err(crate::ln::channelmanager::InboundHTLCErr { reason, .. }) = create_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]), sender_intended_amt_msat - extra_fee_msat - 1, 42, None, true, Some(extra_fee_msat), - current_height) + false, current_height) { assert_eq!(reason, LocalHTLCFailureReason::FinalIncorrectHTLCAmount); } else { panic!(); } @@ -19860,7 +19866,7 @@ mod tests { let current_height: u32 = node[0].node.best_block.read().unwrap().height; assert!(create_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]), sender_intended_amt_msat - extra_fee_msat, 42, None, true, Some(extra_fee_msat), - current_height).is_ok()); + false, current_height).is_ok()); } #[test] @@ -19885,7 +19891,7 @@ mod tests { custom_tlvs: Vec::new(), }, shared_secret: SharedSecret::from_bytes([0; 32]), - }, [0; 32], PaymentHash([0; 32]), 100, TEST_FINAL_CLTV + 1, None, true, None, current_height); + }, [0; 32], PaymentHash([0; 32]), 100, TEST_FINAL_CLTV + 1, None, true, None, false, current_height); // Should not return an error as this condition: // https://github.com/lightning/bolts/blob/4dcc377209509b13cf89a4b91fde7d478f5b46d8/04-onion-routing.md?plain=1#L334 diff --git a/lightning/src/ln/onion_payment.rs b/lightning/src/ln/onion_payment.rs index 2b1b3a1876c..6c841f5e17e 100644 --- a/lightning/src/ln/onion_payment.rs +++ b/lightning/src/ln/onion_payment.rs @@ -267,6 +267,7 @@ pub(super) fn create_fwd_pending_htlc_info( outgoing_amt_msat: amt_to_forward, outgoing_cltv_value, skimmed_fee_msat: None, + incoming_accountable: msg.accountable.unwrap_or(false), }) } @@ -274,7 +275,7 @@ pub(super) fn create_fwd_pending_htlc_info( pub(super) fn create_recv_pending_htlc_info( hop_data: onion_utils::Hop, shared_secret: [u8; 32], payment_hash: PaymentHash, amt_msat: u64, cltv_expiry: u32, phantom_shared_secret: Option<[u8; 32]>, allow_underpay: bool, - counterparty_skimmed_fee_msat: Option, current_height: u32 + counterparty_skimmed_fee_msat: Option, incoming_accountable: bool, current_height: u32 ) -> Result { let ( payment_data, keysend_preimage, custom_tlvs, onion_amt_msat, onion_cltv_expiry, @@ -456,6 +457,7 @@ pub(super) fn create_recv_pending_htlc_info( outgoing_amt_msat: onion_amt_msat, outgoing_cltv_value: onion_cltv_expiry, skimmed_fee_msat: counterparty_skimmed_fee_msat, + incoming_accountable, }) } @@ -520,7 +522,8 @@ where let shared_secret = hop.shared_secret().secret_bytes(); create_recv_pending_htlc_info( hop, shared_secret, msg.payment_hash, msg.amount_msat, msg.cltv_expiry, - None, allow_skimmed_fees, msg.skimmed_fee_msat, cur_height, + None, allow_skimmed_fees, msg.skimmed_fee_msat, + msg.accountable.unwrap_or(false), cur_height, )? } }) From d084a9da4f53deb74b1f7541c7d98622ff41efd2 Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Thu, 23 Oct 2025 10:48:24 -0400 Subject: [PATCH 070/242] ln: add accountable signal to HTLCUpdateAwaitingACK::AddHTLC --- lightning/src/ln/channel.rs | 37 ++++++++++++++++++++++++++---- lightning/src/ln/channelmanager.rs | 3 +++ 2 files changed, 36 insertions(+), 4 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 6a05f15d3cf..93b79a8d02d 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -469,6 +469,7 @@ enum HTLCUpdateAwaitingACK { skimmed_fee_msat: Option, blinding_point: Option, hold_htlc: Option<()>, + accountable: bool, }, ClaimHTLC { payment_preimage: PaymentPreimage, @@ -8405,7 +8406,7 @@ where skimmed_fee_msat, blinding_point, hold_htlc, - .. + accountable, } => { match self.send_htlc( amount_msat, @@ -8417,6 +8418,7 @@ where skimmed_fee_msat, blinding_point, hold_htlc.is_some(), + accountable, fee_estimator, logger, ) { @@ -12593,7 +12595,8 @@ where pub fn queue_add_htlc( &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option, - blinding_point: Option, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, + blinding_point: Option, accountable: bool, + fee_estimator: &LowerBoundedFeeEstimator, logger: &L, ) -> Result<(), (LocalHTLCFailureReason, String)> where F::Target: FeeEstimator, @@ -12610,6 +12613,7 @@ where blinding_point, // This method is only called for forwarded HTLCs, which are never held at the next hop false, + accountable, fee_estimator, logger, ) @@ -12641,7 +12645,7 @@ where &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool, skimmed_fee_msat: Option, blinding_point: Option, hold_htlc: bool, - fee_estimator: &LowerBoundedFeeEstimator, logger: &L, + accountable: bool, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, ) -> Result where F::Target: FeeEstimator, @@ -12723,6 +12727,7 @@ where skimmed_fee_msat, blinding_point, hold_htlc: hold_htlc.then(|| ()), + accountable, }); return Ok(false); } @@ -12994,7 +12999,8 @@ where pub fn send_htlc_and_commit( &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option, - hold_htlc: bool, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, + hold_htlc: bool, accountable: bool, fee_estimator: &LowerBoundedFeeEstimator, + logger: &L, ) -> Result, ChannelError> where F::Target: FeeEstimator, @@ -13010,6 +13016,7 @@ where skimmed_fee_msat, None, hold_htlc, + accountable, fee_estimator, logger, ); @@ -14679,6 +14686,8 @@ where Vec::with_capacity(holding_cell_htlc_update_count); let mut holding_cell_held_htlc_flags: Vec> = Vec::with_capacity(holding_cell_htlc_update_count); + let mut holding_cell_accountable_flags: Vec = + Vec::with_capacity(holding_cell_htlc_update_count); // Vec of (htlc_id, failure_code, sha256_of_onion) let mut malformed_htlcs: Vec<(u64, u16, [u8; 32])> = Vec::new(); (holding_cell_htlc_update_count as u64).write(writer)?; @@ -14693,6 +14702,7 @@ where blinding_point, skimmed_fee_msat, hold_htlc, + accountable, } => { 0u8.write(writer)?; amount_msat.write(writer)?; @@ -14704,6 +14714,7 @@ where holding_cell_skimmed_fees.push(skimmed_fee_msat); holding_cell_blinding_points.push(blinding_point); holding_cell_held_htlc_flags.push(hold_htlc); + holding_cell_accountable_flags.push(accountable); }, &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, @@ -14965,6 +14976,7 @@ where (71, holder_commitment_point_previous_revoked, option), // Added in 0.3 (73, holder_commitment_point_last_revoked, option), // Added in 0.3 (75, inbound_committed_update_adds, optional_vec), + (77, holding_cell_accountable_flags, optional_vec), // Added in 0.3 }); Ok(()) @@ -15151,6 +15163,7 @@ where skimmed_fee_msat: None, blinding_point: None, hold_htlc: None, + accountable: false, }, 1 => HTLCUpdateAwaitingACK::ClaimHTLC { payment_preimage: Readable::read(reader)?, @@ -15353,6 +15366,7 @@ where let mut pending_outbound_held_htlc_flags_opt: Option>> = None; let mut holding_cell_held_htlc_flags_opt: Option>> = None; let mut inbound_committed_update_adds_opt: Option>> = None; + let mut holding_cell_accountable: Option> = None; read_tlv_fields!(reader, { (0, announcement_sigs, option), @@ -15403,6 +15417,7 @@ where (71, holder_commitment_point_previous_revoked_opt, option), // Added in 0.3 (73, holder_commitment_point_last_revoked_opt, option), // Added in 0.3 (75, inbound_committed_update_adds_opt, optional_vec), + (77, holding_cell_accountable, optional_vec), // Added in 0.3 }); let holder_signer = signer_provider.derive_channel_signer(channel_keys_id); @@ -15538,6 +15553,19 @@ where } } + if let Some(accountable_htlcs) = holding_cell_accountable { + let mut iter = accountable_htlcs.into_iter(); + for htlc in holding_cell_htlc_updates.iter_mut() { + if let HTLCUpdateAwaitingACK::AddHTLC { ref mut accountable, .. } = htlc { + *accountable = iter.next().ok_or(DecodeError::InvalidValue)?; + } + } + // We expect all accountable HTLC signals to be consumed above + if iter.next().is_some() { + return Err(DecodeError::InvalidValue); + } + } + if let Some(attribution_data_list) = removed_htlc_attribution_data { let mut removed_htlcs = pending_inbound_htlcs.iter_mut().filter_map(|status| { if let InboundHTLCState::LocalRemoved(reason) = &mut status.state { @@ -16622,6 +16650,7 @@ mod tests { skimmed_fee_msat: None, blinding_point: None, hold_htlc: None, + accountable: false, }; let dummy_holding_cell_claim_htlc = |attribution_data| HTLCUpdateAwaitingACK::ClaimHTLC { payment_preimage: PaymentPreimage([42; 32]), diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 6284dedfa4b..26255b9a833 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -5479,6 +5479,7 @@ where onion_packet, None, hold_htlc_at_next_hop, + false, // Not accountable by default for sender. &self.fee_estimator, &&logger, ); @@ -7587,6 +7588,7 @@ where outgoing_cltv_value, routing, skimmed_fee_msat, + incoming_accountable, .. }, .. @@ -7687,6 +7689,7 @@ where onion_packet.clone(), *skimmed_fee_msat, next_blinding_point, + *incoming_accountable, &self.fee_estimator, &&logger, ) { From d3b58e3b9e8b2c4c5e212571d12d4b936732ed7f Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Thu, 8 Jan 2026 11:40:09 -0500 Subject: [PATCH 071/242] ln: add accountable signal to OutboundHTLCOutput --- lightning/src/ln/channel.rs | 35 +++++++++++++++++++++++++++++++++-- 1 file changed, 33 insertions(+), 2 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 93b79a8d02d..7a17c79a409 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -451,6 +451,7 @@ struct OutboundHTLCOutput { skimmed_fee_msat: Option, send_timestamp: Option, hold_htlc: Option<()>, + accountable: bool, } /// See AwaitingRemoteRevoke ChannelState for more info @@ -9749,7 +9750,7 @@ where skimmed_fee_msat: htlc.skimmed_fee_msat, blinding_point: htlc.blinding_point, hold_htlc: htlc.hold_htlc, - accountable: None, + accountable: Some(htlc.accountable), }); } } @@ -12750,6 +12751,7 @@ where skimmed_fee_msat, send_timestamp, hold_htlc: hold_htlc.then(|| ()), + accountable, }); self.context.next_holder_htlc_id += 1; @@ -14632,6 +14634,7 @@ where let mut pending_outbound_skimmed_fees: Vec> = Vec::new(); let mut pending_outbound_blinding_points: Vec> = Vec::new(); let mut pending_outbound_held_htlc_flags: Vec> = Vec::new(); + let mut pending_outbound_accountable: Vec = Vec::new(); (self.context.pending_outbound_htlcs.len() as u64).write(writer)?; for htlc in self.context.pending_outbound_htlcs.iter() { @@ -14675,6 +14678,7 @@ where pending_outbound_skimmed_fees.push(htlc.skimmed_fee_msat); pending_outbound_blinding_points.push(htlc.blinding_point); pending_outbound_held_htlc_flags.push(htlc.hold_htlc); + pending_outbound_accountable.push(htlc.accountable); } let holding_cell_htlc_update_count = self.context.holding_cell_htlc_updates.len(); @@ -14977,6 +14981,7 @@ where (73, holder_commitment_point_last_revoked, option), // Added in 0.3 (75, inbound_committed_update_adds, optional_vec), (77, holding_cell_accountable_flags, optional_vec), // Added in 0.3 + (79, pending_outbound_accountable, optional_vec), // Added in 0.3 }); Ok(()) @@ -15144,6 +15149,7 @@ where blinding_point: None, send_timestamp: None, hold_htlc: None, + accountable: false, }); } @@ -15367,6 +15373,7 @@ where let mut holding_cell_held_htlc_flags_opt: Option>> = None; let mut inbound_committed_update_adds_opt: Option>> = None; let mut holding_cell_accountable: Option> = None; + let mut pending_outbound_accountable: Option> = None; read_tlv_fields!(reader, { (0, announcement_sigs, option), @@ -15418,6 +15425,7 @@ where (73, holder_commitment_point_last_revoked_opt, option), // Added in 0.3 (75, inbound_committed_update_adds_opt, optional_vec), (77, holding_cell_accountable, optional_vec), // Added in 0.3 + (79, pending_outbound_accountable, optional_vec), // Added in 0.3 }); let holder_signer = signer_provider.derive_channel_signer(channel_keys_id); @@ -15565,7 +15573,16 @@ where return Err(DecodeError::InvalidValue); } } - + if let Some(accountable_htlcs) = pending_outbound_accountable { + let mut iter = accountable_htlcs.into_iter(); + for htlc in pending_outbound_htlcs.iter_mut() { + htlc.accountable = iter.next().ok_or(DecodeError::InvalidValue)?; + } + // We expect all accountable HTLC signals to be consumed above + if iter.next().is_some() { + return Err(DecodeError::InvalidValue); + } + } if let Some(attribution_data_list) = removed_htlc_attribution_data { let mut removed_htlcs = pending_inbound_htlcs.iter_mut().filter_map(|status| { if let InboundHTLCState::LocalRemoved(reason) = &mut status.state { @@ -16169,6 +16186,7 @@ mod tests { blinding_point: None, send_timestamp: None, hold_htlc: None, + accountable: false, }); // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass @@ -16624,6 +16642,7 @@ mod tests { blinding_point: None, send_timestamp: None, hold_htlc: None, + accountable: false, }; let mut pending_outbound_htlcs = vec![dummy_outbound_output.clone(); 10]; for (idx, htlc) in pending_outbound_htlcs.iter_mut().enumerate() { @@ -17022,6 +17041,7 @@ mod tests { blinding_point: None, send_timestamp: None, hold_htlc: None, + accountable: false, }); let payment_preimage_3 = @@ -17037,6 +17057,7 @@ mod tests { blinding_point: None, send_timestamp: None, hold_htlc: None, + accountable: false, }); let payment_preimage_4 = @@ -17452,6 +17473,7 @@ mod tests { blinding_point: None, send_timestamp: None, hold_htlc: None, + accountable: false, }); chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput { @@ -17465,6 +17487,7 @@ mod tests { blinding_point: None, send_timestamp: None, hold_htlc: None, + accountable: false, }); test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8", @@ -17706,6 +17729,7 @@ mod tests { blinding_point: None, send_timestamp: None, hold_htlc: None, + accountable: false, }), ); @@ -17769,6 +17793,7 @@ mod tests { blinding_point: None, send_timestamp: None, hold_htlc: None, + accountable: false, }), ); @@ -17851,6 +17876,7 @@ mod tests { blinding_point: None, send_timestamp: None, hold_htlc: None, + accountable: false, } }), ); @@ -17907,6 +17933,7 @@ mod tests { blinding_point: None, send_timestamp: None, hold_htlc: None, + accountable: false, }), ); @@ -17943,6 +17970,7 @@ mod tests { blinding_point: None, send_timestamp: None, hold_htlc: None, + accountable: false, }, ), ); @@ -17980,6 +18008,7 @@ mod tests { blinding_point: None, send_timestamp: None, hold_htlc: None, + accountable: false, }, ), ); @@ -18017,6 +18046,7 @@ mod tests { blinding_point: None, send_timestamp: None, hold_htlc: None, + accountable: false, }, ), ); @@ -18077,6 +18107,7 @@ mod tests { blinding_point: None, send_timestamp: None, hold_htlc: None, + accountable: false, }), ); From 7c97f3c08738851e66d3daa229a4a31de8656817 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Tue, 16 Dec 2025 10:55:14 -0500 Subject: [PATCH 072/242] Remove unnecessary update_add clone on Channel ser --- lightning/src/ln/channel.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 57f10207e17..306d3c9f6ef 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -14713,7 +14713,7 @@ where } } let mut removed_htlc_attribution_data: Vec<&Option> = Vec::new(); - let mut inbound_committed_update_adds: Vec> = Vec::new(); + let mut inbound_committed_update_adds: Vec<&Option> = Vec::new(); (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?; for htlc in self.context.pending_inbound_htlcs.iter() { if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state { @@ -14735,7 +14735,7 @@ where }, &InboundHTLCState::Committed { ref update_add_htlc_opt } => { 3u8.write(writer)?; - inbound_committed_update_adds.push(update_add_htlc_opt.clone()); + inbound_committed_update_adds.push(update_add_htlc_opt); }, &InboundHTLCState::LocalRemoved(ref removal_reason) => { 4u8.write(writer)?; From 351bd3cec970f43916b27f923d7b63ef97e83f54 Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Wed, 12 Nov 2025 16:31:57 -0500 Subject: [PATCH 073/242] ln/test: add test coverage for accountable signal propagation --- lightning/src/ln/accountable_tests.rs | 102 ++++++++++++++++++++++++++ lightning/src/ln/mod.rs | 2 + 2 files changed, 104 insertions(+) create mode 100644 lightning/src/ln/accountable_tests.rs diff --git a/lightning/src/ln/accountable_tests.rs b/lightning/src/ln/accountable_tests.rs new file mode 100644 index 00000000000..442186b376a --- /dev/null +++ b/lightning/src/ln/accountable_tests.rs @@ -0,0 +1,102 @@ +// This file is Copyright its original authors, visible in version control +// history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license +// , at your option. +// You may not use this file except in accordance with one or both of these +// licenses. + +//! Tests for verifying the correct relay of accountable signals between nodes. + +use crate::ln::channelmanager::{ + HTLCForwardInfo, PaymentId, PendingAddHTLCInfo, PendingHTLCInfo, RecipientOnionFields, Retry, +}; +use crate::ln::functional_test_utils::*; +use crate::ln::msgs::ChannelMessageHandler; +use crate::routing::router::{PaymentParameters, RouteParameters}; + +fn test_accountable_forwarding_with_override( + override_accountable: Option, expected_forwarded: bool, +) { + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let _chan_ab = create_announced_chan_between_nodes(&nodes, 0, 1); + let _chan_bc = create_announced_chan_between_nodes(&nodes, 1, 2); + + let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[2]); + let route_params = RouteParameters::from_payment_params_and_value( + PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV), + 100_000, + ); + let onion_fields = RecipientOnionFields::secret_only(payment_secret); + let payment_id = PaymentId(payment_hash.0); + nodes[0] + .node + .send_payment(payment_hash, onion_fields, payment_id, route_params, Retry::Attempts(0)) + .unwrap(); + check_added_monitors(&nodes[0], 1); + + let updates_ab = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); + assert_eq!(updates_ab.update_add_htlcs.len(), 1); + let mut htlc_ab = updates_ab.update_add_htlcs[0].clone(); + assert_eq!(htlc_ab.accountable, Some(false)); + + // Override accountable value if requested + if let Some(override_value) = override_accountable { + htlc_ab.accountable = Some(override_value); + } + + nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &htlc_ab); + do_commitment_signed_dance(&nodes[1], &nodes[0], &updates_ab.commitment_signed, false, false); + expect_and_process_pending_htlcs(&nodes[1], false); + check_added_monitors(&nodes[1], 1); + + let updates_bc = get_htlc_update_msgs(&nodes[1], &nodes[2].node.get_our_node_id()); + assert_eq!(updates_bc.update_add_htlcs.len(), 1); + let htlc_bc = &updates_bc.update_add_htlcs[0]; + assert_eq!( + htlc_bc.accountable, + Some(expected_forwarded), + "B -> C should have accountable = {:?}", + expected_forwarded + ); + + nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), htlc_bc); + do_commitment_signed_dance(&nodes[2], &nodes[1], &updates_bc.commitment_signed, false, false); + + // Accountable signal is not surfaced in PaymentClaimable, so we do our next-best and check + // that the received htlcs that will be processed has the signal set as we expect. We manually + // process pending update adds so that we can access the htlc in forward_htlcs. + nodes[2].node.test_process_pending_update_add_htlcs(); + { + let fwds_lock = nodes[2].node.forward_htlcs.lock().unwrap(); + let recvs = fwds_lock.get(&0).unwrap(); + assert_eq!(recvs.len(), 1); + match recvs[0] { + HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { + forward_info: PendingHTLCInfo { incoming_accountable, .. }, + .. + }) => { + assert_eq!(incoming_accountable, expected_forwarded) + }, + _ => panic!("Unexpected forward"), + } + } + + expect_and_process_pending_htlcs(&nodes[2], false); + check_added_monitors(&nodes[2], 0); + expect_payment_claimable!(nodes[2], payment_hash, payment_secret, 100_000); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); +} + +#[test] +fn test_accountable_signal() { + // Tests forwarding of accountable signal for various incoming signal values. + test_accountable_forwarding_with_override(None, false); + test_accountable_forwarding_with_override(Some(true), true); + test_accountable_forwarding_with_override(Some(false), false); +} diff --git a/lightning/src/ln/mod.rs b/lightning/src/ln/mod.rs index 04aa8181b92..e782fee92f6 100644 --- a/lightning/src/ln/mod.rs +++ b/lightning/src/ln/mod.rs @@ -52,6 +52,8 @@ pub(crate) mod interactivetxs; // without the node parameter being mut. This is incorrect, and thus newer rustcs will complain // about an unnecessary mut. Thus, we silence the unused_mut warning in two test modules below. +#[cfg(test)] +mod accountable_tests; #[cfg(test)] #[allow(unused_mut)] mod async_payments_tests; From 6578b88dafe08c507a58809876dc907969eeafa1 Mon Sep 17 00:00:00 2001 From: Leo Nash Date: Wed, 10 Dec 2025 20:43:02 +0000 Subject: [PATCH 074/242] Clarify splicing feature flag requirements --- lightning/src/ln/channelmanager.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index f2e8fa70e4f..b0ed9af7992 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -4854,6 +4854,12 @@ where /// the channel. This will spend the channel's funding transaction output, effectively replacing /// it with a new one. /// + /// # Required Feature Flags + /// + /// Initiating a splice requires that the channel counterparty supports splicing. Any + /// channel (no matter the type) can be spliced, as long as the counterparty is currently + /// connected. + /// /// # Arguments /// /// Provide a `contribution` to determine if value is spliced in or out. The splice initiator is From 2a1273be7f00a03d6a663816059c9b5818e1174c Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Mon, 5 Jan 2026 14:04:19 -0500 Subject: [PATCH 075/242] Move is_chan_closed check into loop Necessary for the next commit and makes it easier to read. --- lightning/src/ln/channelmanager.rs | 285 +++++++++++++++-------------- 1 file changed, 147 insertions(+), 138 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 0b6cc07738a..b774467215b 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -18005,156 +18005,165 @@ where is_channel_closed = !peer_state.channel_by_id.contains_key(channel_id); } - if is_channel_closed { - for (htlc_source, (htlc, preimage_opt)) in - monitor.get_all_current_outbound_htlcs() - { - let logger = WithChannelMonitor::from( - &args.logger, - monitor, - Some(htlc.payment_hash), - ); - let htlc_id = SentHTLCId::from_source(&htlc_source); - match htlc_source { - HTLCSource::PreviousHopData(prev_hop_data) => { - let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| { - info.prev_funding_outpoint == prev_hop_data.outpoint - && info.prev_htlc_id == prev_hop_data.htlc_id - }; - // The ChannelMonitor is now responsible for this HTLC's - // failure/success and will let us know what its outcome is. If we - // still have an entry for this HTLC in `forward_htlcs`, - // `pending_intercepted_htlcs`, or `decode_update_add_htlcs`, we were apparently not - // persisted after the monitor was when forwarding the payment. - dedup_decode_update_add_htlcs( - &mut decode_update_add_htlcs, - &prev_hop_data, - "HTLC was forwarded to the closed channel", - &args.logger, - ); - dedup_decode_update_add_htlcs( - &mut decode_update_add_htlcs_legacy, - &prev_hop_data, - "HTLC was forwarded to the closed channel", - &args.logger, - ); - forward_htlcs_legacy.retain(|_, forwards| { - forwards.retain(|forward| { - if let HTLCForwardInfo::AddHTLC(htlc_info) = forward { - if pending_forward_matches_htlc(&htlc_info) { - log_info!(logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}", - &htlc.payment_hash, &monitor.channel_id()); - false - } else { true } + for (htlc_source, (htlc, preimage_opt)) in monitor.get_all_current_outbound_htlcs() + { + let logger = + WithChannelMonitor::from(&args.logger, monitor, Some(htlc.payment_hash)); + let htlc_id = SentHTLCId::from_source(&htlc_source); + match htlc_source { + HTLCSource::PreviousHopData(prev_hop_data) => { + let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| { + info.prev_funding_outpoint == prev_hop_data.outpoint + && info.prev_htlc_id == prev_hop_data.htlc_id + }; + if !is_channel_closed { + continue; + } + // The ChannelMonitor is now responsible for this HTLC's + // failure/success and will let us know what its outcome is. If we + // still have an entry for this HTLC in `forward_htlcs`, + // `pending_intercepted_htlcs`, or `decode_update_add_htlcs`, we were apparently not + // persisted after the monitor was when forwarding the payment. + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + &prev_hop_data, + "HTLC was forwarded to the closed channel", + &args.logger, + ); + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs_legacy, + &prev_hop_data, + "HTLC was forwarded to the closed channel", + &args.logger, + ); + forward_htlcs_legacy.retain(|_, forwards| { + forwards.retain(|forward| { + if let HTLCForwardInfo::AddHTLC(htlc_info) = forward { + if pending_forward_matches_htlc(&htlc_info) { + log_info!(logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}", + &htlc.payment_hash, &monitor.channel_id()); + false } else { true } - }); - !forwards.is_empty() - }); - pending_intercepted_htlcs_legacy.retain(|intercepted_id, htlc_info| { - if pending_forward_matches_htlc(&htlc_info) { - log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}", - &htlc.payment_hash, &monitor.channel_id()); - pending_events_read.retain(|(event, _)| { - if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event { - intercepted_id != ev_id - } else { true } - }); - false } else { true } }); - }, - HTLCSource::OutboundRoute { - payment_id, - session_priv, - path, - bolt12_invoice, - .. - } => { - if let Some(preimage) = preimage_opt { - let pending_events = Mutex::new(pending_events_read); - let update = PaymentCompleteUpdate { - counterparty_node_id: monitor.get_counterparty_node_id(), - channel_funding_outpoint: monitor.get_funding_txo(), - channel_id: monitor.channel_id(), - htlc_id, - }; - let mut compl_action = Some( - EventCompletionAction::ReleasePaymentCompleteChannelMonitorUpdate(update) - ); - pending_outbounds.claim_htlc( - payment_id, - preimage, - bolt12_invoice, - session_priv, - path, - true, - &mut compl_action, - &pending_events, - ); - // If the completion action was not consumed, then there was no - // payment to claim, and we need to tell the `ChannelMonitor` - // we don't need to hear about the HTLC again, at least as long - // as the PaymentSent event isn't still sitting around in our - // event queue. - let have_action = if compl_action.is_some() { - let pending_events = pending_events.lock().unwrap(); - pending_events.iter().any(|(_, act)| *act == compl_action) - } else { - false - }; - if !have_action && compl_action.is_some() { - let mut peer_state = per_peer_state - .get(&counterparty_node_id) - .map(|state| state.lock().unwrap()) - .expect("Channels originating a preimage must have peer state"); - let update_id = peer_state - .closed_channel_monitor_update_ids - .get_mut(channel_id) - .expect("Channels originating a preimage must have a monitor"); - // Note that for channels closed pre-0.1, the latest - // update_id is `u64::MAX`. - *update_id = update_id.saturating_add(1); - - pending_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup { - counterparty_node_id: monitor.get_counterparty_node_id(), + !forwards.is_empty() + }); + pending_intercepted_htlcs_legacy.retain(|intercepted_id, htlc_info| { + if pending_forward_matches_htlc(&htlc_info) { + log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}", + &htlc.payment_hash, &monitor.channel_id()); + pending_events_read.retain(|(event, _)| { + if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event { + intercepted_id != ev_id + } else { true } + }); + false + } else { true } + }); + }, + HTLCSource::OutboundRoute { + payment_id, + session_priv, + path, + bolt12_invoice, + .. + } => { + if !is_channel_closed { + continue; + } + if let Some(preimage) = preimage_opt { + let pending_events = Mutex::new(pending_events_read); + let update = PaymentCompleteUpdate { + counterparty_node_id: monitor.get_counterparty_node_id(), + channel_funding_outpoint: monitor.get_funding_txo(), + channel_id: monitor.channel_id(), + htlc_id, + }; + let mut compl_action = Some( + EventCompletionAction::ReleasePaymentCompleteChannelMonitorUpdate(update) + ); + pending_outbounds.claim_htlc( + payment_id, + preimage, + bolt12_invoice, + session_priv, + path, + true, + &mut compl_action, + &pending_events, + ); + // If the completion action was not consumed, then there was no + // payment to claim, and we need to tell the `ChannelMonitor` + // we don't need to hear about the HTLC again, at least as long + // as the PaymentSent event isn't still sitting around in our + // event queue. + let have_action = if compl_action.is_some() { + let pending_events = pending_events.lock().unwrap(); + pending_events.iter().any(|(_, act)| *act == compl_action) + } else { + false + }; + if !have_action && compl_action.is_some() { + let mut peer_state = per_peer_state + .get(&counterparty_node_id) + .map(|state| state.lock().unwrap()) + .expect( + "Channels originating a preimage must have peer state", + ); + let update_id = peer_state + .closed_channel_monitor_update_ids + .get_mut(channel_id) + .expect( + "Channels originating a preimage must have a monitor", + ); + // Note that for channels closed pre-0.1, the latest + // update_id is `u64::MAX`. + *update_id = update_id.saturating_add(1); + + pending_background_events.push( + BackgroundEvent::MonitorUpdateRegeneratedOnStartup { + counterparty_node_id: monitor + .get_counterparty_node_id(), funding_txo: monitor.get_funding_txo(), channel_id: monitor.channel_id(), update: ChannelMonitorUpdate { update_id: *update_id, channel_id: Some(monitor.channel_id()), - updates: vec![ChannelMonitorUpdateStep::ReleasePaymentComplete { - htlc: htlc_id, - }], + updates: vec![ + ChannelMonitorUpdateStep::ReleasePaymentComplete { + htlc: htlc_id, + }, + ], }, - }); - } - pending_events_read = pending_events.into_inner().unwrap(); + }, + ); } - }, - } + pending_events_read = pending_events.into_inner().unwrap(); + } + }, } - for (htlc_source, payment_hash) in monitor.get_onchain_failed_outbound_htlcs() { - log_info!( - args.logger, - "Failing HTLC with payment hash {} as it was resolved on-chain.", - payment_hash - ); - let completion_action = Some(PaymentCompleteUpdate { - counterparty_node_id: monitor.get_counterparty_node_id(), - channel_funding_outpoint: monitor.get_funding_txo(), - channel_id: monitor.channel_id(), - htlc_id: SentHTLCId::from_source(&htlc_source), - }); + } + for (htlc_source, payment_hash) in monitor.get_onchain_failed_outbound_htlcs() { + log_info!( + args.logger, + "Failing HTLC with payment hash {} as it was resolved on-chain.", + payment_hash + ); + let completion_action = Some(PaymentCompleteUpdate { + counterparty_node_id: monitor.get_counterparty_node_id(), + channel_funding_outpoint: monitor.get_funding_txo(), + channel_id: monitor.channel_id(), + htlc_id: SentHTLCId::from_source(&htlc_source), + }); - failed_htlcs.push(( - htlc_source, - payment_hash, - monitor.get_counterparty_node_id(), - monitor.channel_id(), - LocalHTLCFailureReason::OnChainTimeout, - completion_action, - )); - } + failed_htlcs.push(( + htlc_source, + payment_hash, + monitor.get_counterparty_node_id(), + monitor.channel_id(), + LocalHTLCFailureReason::OnChainTimeout, + completion_action, + )); } // Whether the downstream channel was closed or not, try to re-apply any payment From 55f89112341c750b55fe331130f4f1599d446e26 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Fri, 12 Dec 2025 16:41:55 -0500 Subject: [PATCH 076/242] Don't double-forward HTLCs in rebuilt update_adds map We recently began reconstructing ChannelManager::decode_update_add_htlcs on startup, using data present in the Channels. However, we failed to prune HTLCs from this rebuilt map if a given HTLC was already forwarded to the outbound edge (we pruned correctly if the outbound edge was a closed channel, but not otherwise). Here we fix this bug that would have caused us to double-forward inbound HTLC forwards. --- lightning/src/ln/channelmanager.rs | 16 +++++---- lightning/src/ln/reload_tests.rs | 58 ++++++++++++++++++++++++++++++ 2 files changed, 68 insertions(+), 6 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index b774467215b..7d145e0f266 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -18016,6 +18016,16 @@ where info.prev_funding_outpoint == prev_hop_data.outpoint && info.prev_htlc_id == prev_hop_data.htlc_id }; + // We always add all inbound committed HTLCs to `decode_update_add_htlcs` in the above + // loop, but we need to prune from those added HTLCs if they were already forwarded to + // the outbound edge. Otherwise, we'll double-forward. + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + &prev_hop_data, + "HTLC already forwarded to the outbound edge", + &args.logger, + ); + if !is_channel_closed { continue; } @@ -18024,12 +18034,6 @@ where // still have an entry for this HTLC in `forward_htlcs`, // `pending_intercepted_htlcs`, or `decode_update_add_htlcs`, we were apparently not // persisted after the monitor was when forwarding the payment. - dedup_decode_update_add_htlcs( - &mut decode_update_add_htlcs, - &prev_hop_data, - "HTLC was forwarded to the closed channel", - &args.logger, - ); dedup_decode_update_add_htlcs( &mut decode_update_add_htlcs_legacy, &prev_hop_data, diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index d143082821d..a38262e6952 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -1258,6 +1258,64 @@ fn do_manager_persisted_pre_outbound_edge_forward(intercept_htlc: bool) { expect_payment_sent(&nodes[0], payment_preimage, None, true, true); } +#[test] +fn test_manager_persisted_post_outbound_edge_forward() { + // Test that we will not double-forward an HTLC after restart if it has already been forwarded to + // the outbound edge, which was previously broken. + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let persister; + let new_chain_monitor; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes_1_deserialized; + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1).2; + let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2; + + // Lock in the HTLC from node_a <> node_b. + let amt_msat = 5000; + let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat); + nodes[0].node.send_payment_with_route(route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + check_added_monitors(&nodes[0], 1); + let updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); + nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); + do_commitment_signed_dance(&nodes[1], &nodes[0], &updates.commitment_signed, false, false); + + // Add the HTLC to the outbound edge, node_b <> node_c. + nodes[1].node.process_pending_htlc_forwards(); + check_added_monitors(&nodes[1], 1); + + // Disconnect peers and reload the forwarding node_b. + nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); + nodes[2].node.peer_disconnected(nodes[1].node.get_our_node_id()); + + let node_b_encoded = nodes[1].node.encode(); + let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id_1).encode(); + let chan_1_monitor_serialized = get_monitor!(nodes[1], chan_id_2).encode(); + reload_node!(nodes[1], node_b_encoded, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized); + + reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[0])); + let mut args_b_c = ReconnectArgs::new(&nodes[1], &nodes[2]); + args_b_c.send_channel_ready = (true, true); + args_b_c.send_announcement_sigs = (true, true); + args_b_c.pending_htlc_adds = (0, 1); + // While reconnecting, we re-send node_b's outbound update_add and commit the HTLC to the b<>c + // channel. + reconnect_nodes(args_b_c); + + // Ensure node_b won't double-forward the outbound HTLC (this was previously broken). + nodes[1].node.process_pending_htlc_forwards(); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + // Claim the HTLC backwards to node_a. + expect_and_process_pending_htlcs(&nodes[2], false); + expect_payment_claimable!(nodes[2], payment_hash, payment_secret, amt_msat, None, nodes[2].node.get_our_node_id()); + let path: &[&[_]] = &[&[&nodes[1], &nodes[2]]]; + do_claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], path, payment_preimage)); + expect_payment_sent(&nodes[0], payment_preimage, None, true, true); +} + #[test] fn test_reload_partial_funding_batch() { let chanmon_cfgs = create_chanmon_cfgs(3); From 03882bda9f728a4e6167fde9ffbd8a9c281e2945 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Tue, 16 Dec 2025 11:09:08 -0500 Subject: [PATCH 077/242] Optimize dedup_decode_update_add_htlcs No need to iterate through all entries in the map, we can instead pull out the specific entry that we want. --- lightning/src/ln/channelmanager.rs | 46 ++++++++++++++++-------------- 1 file changed, 25 insertions(+), 21 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 7d145e0f266..1728cedc492 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -17106,28 +17106,32 @@ fn dedup_decode_update_add_htlcs( ) where L::Target: Logger, { - decode_update_add_htlcs.retain(|src_outb_alias, update_add_htlcs| { - update_add_htlcs.retain(|update_add| { - let matches = *src_outb_alias == prev_hop_data.prev_outbound_scid_alias - && update_add.htlc_id == prev_hop_data.htlc_id; - if matches { - let logger = WithContext::from( - logger, - prev_hop_data.counterparty_node_id, - Some(update_add.channel_id), - Some(update_add.payment_hash), - ); - log_info!( - logger, - "Removing pending to-decode HTLC with id {}: {}", - update_add.htlc_id, - removal_reason - ); + match decode_update_add_htlcs.entry(prev_hop_data.prev_outbound_scid_alias) { + hash_map::Entry::Occupied(mut update_add_htlcs) => { + update_add_htlcs.get_mut().retain(|update_add| { + let matches = update_add.htlc_id == prev_hop_data.htlc_id; + if matches { + let logger = WithContext::from( + logger, + prev_hop_data.counterparty_node_id, + Some(update_add.channel_id), + Some(update_add.payment_hash), + ); + log_info!( + logger, + "Removing pending to-decode HTLC with id {}: {}", + update_add.htlc_id, + removal_reason + ); + } + !matches + }); + if update_add_htlcs.get().is_empty() { + update_add_htlcs.remove(); } - !matches - }); - !update_add_htlcs.is_empty() - }); + }, + _ => {}, + } } // Implement ReadableArgs for an Arc'd ChannelManager to make it a bit easier to work with the From e76159ac0442c810c2a8efbb58c75c749d5cebc1 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Tue, 16 Dec 2025 16:52:58 -0500 Subject: [PATCH 078/242] Prefer legacy forward maps on manager read We are working on removing the requirement of regularly persisting the ChannelManager, and as a result began reconstructing the manager's forwards maps from Channel data on startup in a recent PR, see cb398f6b761edde6b45fcda93a01c564cb49a13c and parent commits. At the time, we implemented ChannelManager::read to prefer to use the newly reconstructed maps, partly to ensure we have test coverage of the new maps' usage. This resulted in a lot of code that would deduplicate HTLCs that were present in the old maps to avoid redundant HTLC handling/duplicate forwards, adding extra complexity. Instead, always use the old maps in prod, but randomly use the newly reconstructed maps in testing, to exercise the new codepaths (see reconstruct_manager_from_monitors in ChannelManager::read). --- CONTRIBUTING.md | 6 + lightning/src/ln/channelmanager.rs | 281 +++++++++++------------------ 2 files changed, 113 insertions(+), 174 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d837c873efa..ad25fb10558 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -192,6 +192,12 @@ welcomed. * `LDK_TEST_DETERMINISTIC_HASHES` - When set to `1`, uses deterministic hash map iteration order in tests. This ensures consistent test output across runs, useful for comparing logs before and after changes. +* `LDK_TEST_REBUILD_MGR_FROM_MONITORS` - If set to `1`, on test node reload the `ChannelManager`'s + HTLC set will be reconstructed from `Channel{Monitor}` persisted data. If `0`, test nodes will be + reloaded from persisted `ChannelManager` data using legacy code paths. This ensures consistent + test output across runs, useful for comparing logs before and after changes, since otherwise the + selection of which codepaths to be used on reload will be chosen randomly. + C/C++ Bindings -------------- diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 1728cedc492..92ab422acd9 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -11747,6 +11747,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if !new_intercept_events.is_empty() { let mut events = self.pending_events.lock().unwrap(); + // It's possible we processed this intercept forward, generated an event, then re-processed + // it here after restart, in which case the intercept event should not be pushed + // redundantly. + new_intercept_events.retain(|ev| !events.contains(ev)); events.append(&mut new_intercept_events); } } @@ -17484,9 +17488,9 @@ where const MAX_ALLOC_SIZE: usize = 1024 * 64; let forward_htlcs_count: u64 = Readable::read(reader)?; - // This map is read but may no longer be used because we'll attempt to rebuild the set of HTLC - // forwards from the `Channel{Monitor}`s instead, as a step towards removing the requirement of - // regularly persisting the `ChannelManager`. + // Marked `_legacy` because in versions > 0.2 we are taking steps to remove the requirement of + // regularly persisting the `ChannelManager` and instead rebuild the set of HTLC forwards from + // `Channel{Monitor}` data. See `reconstruct_manager_from_monitors` usage below. let mut forward_htlcs_legacy: HashMap> = hash_map_with_capacity(cmp::min(forward_htlcs_count as usize, 128)); for _ in 0..forward_htlcs_count { @@ -17587,9 +17591,9 @@ where }; } - // Some maps are read but may no longer be used because we attempt to rebuild the pending HTLC - // set from the `Channel{Monitor}`s instead, as a step towards removing the requirement of - // regularly persisting the `ChannelManager`. + // Marked `_legacy` because in versions > 0.2 we are taking steps to remove the requirement of + // regularly persisting the `ChannelManager` and instead rebuild the set of HTLC forwards from + // `Channel{Monitor}` data. See `reconstruct_manager_from_monitors` below. let mut pending_intercepted_htlcs_legacy: Option> = None; let mut decode_update_add_htlcs_legacy: Option>> = @@ -17930,6 +17934,36 @@ where pending_background_events.push(new_event); } + // In LDK 0.2 and below, the `ChannelManager` would track all payments and HTLCs internally and + // persist that state, relying on it being up-to-date on restart. Newer versions are moving + // towards reducing this reliance on regular persistence of the `ChannelManager`, and instead + // reconstruct HTLC/payment state based on `Channel{Monitor}` data if + // `reconstruct_manager_from_monitors` is set below. Currently it is only set in tests, randomly + // to ensure the legacy codepaths also have test coverage. + #[cfg(not(test))] + let reconstruct_manager_from_monitors = false; + #[cfg(test)] + let reconstruct_manager_from_monitors = { + use core::hash::{BuildHasher, Hasher}; + + match std::env::var("LDK_TEST_REBUILD_MGR_FROM_MONITORS") { + Ok(val) => match val.as_str() { + "1" => true, + "0" => false, + _ => panic!("LDK_TEST_REBUILD_MGR_FROM_MONITORS must be 0 or 1, got: {}", val), + }, + Err(_) => { + let rand_val = + std::collections::hash_map::RandomState::new().build_hasher().finish(); + if rand_val % 2 == 0 { + true + } else { + false + } + }, + } + }; + // If there's any preimages for forwarded HTLCs hanging around in ChannelMonitors we // should ensure we try them again on the inbound edge. We put them here and do so after we // have a fully-constructed `ChannelManager` at the end. @@ -17954,18 +17988,20 @@ where let mut peer_state_lock = peer_state_mtx.lock().unwrap(); let peer_state = &mut *peer_state_lock; is_channel_closed = !peer_state.channel_by_id.contains_key(channel_id); - if let Some(chan) = peer_state.channel_by_id.get(channel_id) { - if let Some(funded_chan) = chan.as_funded() { - let inbound_committed_update_adds = - funded_chan.get_inbound_committed_update_adds(); - if !inbound_committed_update_adds.is_empty() { - // Reconstruct `ChannelManager::decode_update_add_htlcs` from the serialized - // `Channel`, as part of removing the requirement to regularly persist the - // `ChannelManager`. - decode_update_add_htlcs.insert( - funded_chan.context.outbound_scid_alias(), - inbound_committed_update_adds, - ); + if reconstruct_manager_from_monitors { + if let Some(chan) = peer_state.channel_by_id.get(channel_id) { + if let Some(funded_chan) = chan.as_funded() { + let inbound_committed_update_adds = + funded_chan.get_inbound_committed_update_adds(); + if !inbound_committed_update_adds.is_empty() { + // Reconstruct `ChannelManager::decode_update_add_htlcs` from the serialized + // `Channel`, as part of removing the requirement to regularly persist the + // `ChannelManager`. + decode_update_add_htlcs.insert( + funded_chan.context.outbound_scid_alias(), + inbound_committed_update_adds, + ); + } } } } @@ -18020,17 +18056,20 @@ where info.prev_funding_outpoint == prev_hop_data.outpoint && info.prev_htlc_id == prev_hop_data.htlc_id }; - // We always add all inbound committed HTLCs to `decode_update_add_htlcs` in the above - // loop, but we need to prune from those added HTLCs if they were already forwarded to - // the outbound edge. Otherwise, we'll double-forward. - dedup_decode_update_add_htlcs( - &mut decode_update_add_htlcs, - &prev_hop_data, - "HTLC already forwarded to the outbound edge", - &args.logger, - ); + // If `reconstruct_manager_from_monitors` is set, we always add all inbound committed + // HTLCs to `decode_update_add_htlcs` in the above loop, but we need to prune from + // those added HTLCs if they were already forwarded to the outbound edge. Otherwise, + // we'll double-forward. + if reconstruct_manager_from_monitors { + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + &prev_hop_data, + "HTLC already forwarded to the outbound edge", + &args.logger, + ); + } - if !is_channel_closed { + if !is_channel_closed || reconstruct_manager_from_monitors { continue; } // The ChannelMonitor is now responsible for this HTLC's @@ -18539,99 +18578,55 @@ where } } - // De-duplicate HTLCs that are present in both `failed_htlcs` and `decode_update_add_htlcs`. - // Omitting this de-duplication could lead to redundant HTLC processing and/or bugs. - for (src, _, _, _, _, _) in failed_htlcs.iter() { - if let HTLCSource::PreviousHopData(prev_hop_data) = src { - dedup_decode_update_add_htlcs( - &mut decode_update_add_htlcs, - prev_hop_data, - "HTLC was failed backwards during manager read", - &args.logger, - ); - } - } - - // See above comment on `failed_htlcs`. - for htlcs in claimable_payments.values().map(|pmt| &pmt.htlcs) { - for prev_hop_data in htlcs.iter().map(|h| &h.prev_hop) { - dedup_decode_update_add_htlcs( - &mut decode_update_add_htlcs, - prev_hop_data, - "HTLC was already decoded and marked as a claimable payment", - &args.logger, - ); - } - } - - // Remove HTLCs from `forward_htlcs` if they are also present in `decode_update_add_htlcs`. - // - // In the future, the full set of pending HTLCs will be pulled from `Channel{Monitor}` data and - // placed in `ChannelManager::decode_update_add_htlcs` on read, to be handled on the next call - // to `process_pending_htlc_forwards`. This is part of a larger effort to remove the requirement - // of regularly persisting the `ChannelManager`. The new pipeline is supported for HTLC forwards - // received on LDK 0.3+ but not <= 0.2, so prune non-legacy HTLCs from `forward_htlcs`. - forward_htlcs_legacy.retain(|scid, pending_fwds| { - for fwd in pending_fwds { - let (prev_scid, prev_htlc_id) = match fwd { - HTLCForwardInfo::AddHTLC(htlc) => { - (htlc.prev_outbound_scid_alias, htlc.prev_htlc_id) - }, - HTLCForwardInfo::FailHTLC { htlc_id, .. } - | HTLCForwardInfo::FailMalformedHTLC { htlc_id, .. } => (*scid, *htlc_id), - }; - if let Some(pending_update_adds) = decode_update_add_htlcs.get_mut(&prev_scid) { - if pending_update_adds - .iter() - .any(|update_add| update_add.htlc_id == prev_htlc_id) - { - return false; - } + if reconstruct_manager_from_monitors { + // De-duplicate HTLCs that are present in both `failed_htlcs` and `decode_update_add_htlcs`. + // Omitting this de-duplication could lead to redundant HTLC processing and/or bugs. + for (src, _, _, _, _, _) in failed_htlcs.iter() { + if let HTLCSource::PreviousHopData(prev_hop_data) = src { + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + prev_hop_data, + "HTLC was failed backwards during manager read", + &args.logger, + ); } } - true - }); - // Remove intercepted HTLC forwards if they are also present in `decode_update_add_htlcs`. See - // the above comment. - pending_intercepted_htlcs_legacy.retain(|id, fwd| { - let prev_scid = fwd.prev_outbound_scid_alias; - if let Some(pending_update_adds) = decode_update_add_htlcs.get_mut(&prev_scid) { - if pending_update_adds - .iter() - .any(|update_add| update_add.htlc_id == fwd.prev_htlc_id) - { - pending_events_read.retain( - |(ev, _)| !matches!(ev, Event::HTLCIntercepted { intercept_id, .. } if intercept_id == id), + + // See above comment on `failed_htlcs`. + for htlcs in claimable_payments.values().map(|pmt| &pmt.htlcs) { + for prev_hop_data in htlcs.iter().map(|h| &h.prev_hop) { + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + prev_hop_data, + "HTLC was already decoded and marked as a claimable payment", + &args.logger, ); - return false; } } + } + + let (decode_update_add_htlcs, forward_htlcs, pending_intercepted_htlcs) = + if reconstruct_manager_from_monitors { + (decode_update_add_htlcs, new_hash_map(), new_hash_map()) + } else { + ( + decode_update_add_htlcs_legacy, + forward_htlcs_legacy, + pending_intercepted_htlcs_legacy, + ) + }; + + // If we have a pending intercept HTLC present but no corresponding event, add that now rather + // than relying on the user having persisted the event prior to shutdown. + for (id, fwd) in pending_intercepted_htlcs.iter() { if !pending_events_read.iter().any( |(ev, _)| matches!(ev, Event::HTLCIntercepted { intercept_id, .. } if intercept_id == id), ) { - match create_htlc_intercepted_event(*id, &fwd) { + match create_htlc_intercepted_event(*id, fwd) { Ok(ev) => pending_events_read.push_back((ev, None)), Err(()) => debug_assert!(false), } } - true - }); - // Add legacy update_adds that were received on LDK <= 0.2 that are not present in the - // `decode_update_add_htlcs` map that was rebuilt from `Channel{Monitor}` data, see above - // comment. - for (scid, legacy_update_adds) in decode_update_add_htlcs_legacy.drain() { - match decode_update_add_htlcs.entry(scid) { - hash_map::Entry::Occupied(mut update_adds) => { - for legacy_update_add in legacy_update_adds { - if !update_adds.get().contains(&legacy_update_add) { - update_adds.get_mut().push(legacy_update_add); - } - } - }, - hash_map::Entry::Vacant(entry) => { - entry.insert(legacy_update_adds); - }, - } } let best_block = BestBlock::new(best_block_hash, best_block_height); @@ -18660,9 +18655,9 @@ where inbound_payment_key: expanded_inbound_key, pending_outbound_payments: pending_outbounds, - pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs_legacy), + pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs), - forward_htlcs: Mutex::new(forward_htlcs_legacy), + forward_htlcs: Mutex::new(forward_htlcs), decode_update_add_htlcs: Mutex::new(decode_update_add_htlcs), claimable_payments: Mutex::new(ClaimablePayments { claimable_payments, @@ -18998,12 +18993,11 @@ where mod tests { use crate::events::{ClosureReason, Event, HTLCHandlingFailureType}; use crate::ln::channelmanager::{ - create_recv_pending_htlc_info, inbound_payment, HTLCForwardInfo, InterceptId, PaymentId, + create_recv_pending_htlc_info, inbound_payment, InterceptId, PaymentId, RecipientOnionFields, }; use crate::ln::functional_test_utils::*; use crate::ln::msgs::{self, BaseMessageHandler, ChannelMessageHandler, MessageSendEvent}; - use crate::ln::onion_utils::AttributionData; use crate::ln::onion_utils::{self, LocalHTLCFailureReason}; use crate::ln::outbound_payment::Retry; use crate::ln::types::ChannelId; @@ -19013,7 +19007,6 @@ mod tests { use crate::types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; use crate::util::config::{ChannelConfig, ChannelConfigUpdate}; use crate::util::errors::APIError; - use crate::util::ser::Writeable; use crate::util::test_utils; use bitcoin::secp256k1::ecdh::SharedSecret; use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey}; @@ -20071,66 +20064,6 @@ mod tests { check_spends!(txn[0], funding_tx); } } - - #[test] - #[rustfmt::skip] - fn test_malformed_forward_htlcs_ser() { - // Ensure that `HTLCForwardInfo::FailMalformedHTLC`s are (de)serialized properly. - let chanmon_cfg = create_chanmon_cfgs(1); - let node_cfg = create_node_cfgs(1, &chanmon_cfg); - let persister; - let chain_monitor; - let chanmgrs = create_node_chanmgrs(1, &node_cfg, &[None]); - let deserialized_chanmgr; - let mut nodes = create_network(1, &node_cfg, &chanmgrs); - - let dummy_failed_htlc = |htlc_id| { - HTLCForwardInfo::FailHTLC { htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42], attribution_data: Some(AttributionData::new()) } } - }; - let dummy_malformed_htlc = |htlc_id| { - HTLCForwardInfo::FailMalformedHTLC { - htlc_id, - failure_code: LocalHTLCFailureReason::InvalidOnionPayload.failure_code(), - sha256_of_onion: [0; 32], - } - }; - - let dummy_htlcs_1: Vec = (1..10).map(|htlc_id| { - if htlc_id % 2 == 0 { - dummy_failed_htlc(htlc_id) - } else { - dummy_malformed_htlc(htlc_id) - } - }).collect(); - - let dummy_htlcs_2: Vec = (1..10).map(|htlc_id| { - if htlc_id % 2 == 1 { - dummy_failed_htlc(htlc_id) - } else { - dummy_malformed_htlc(htlc_id) - } - }).collect(); - - - let (scid_1, scid_2) = (42, 43); - let mut forward_htlcs = new_hash_map(); - forward_htlcs.insert(scid_1, dummy_htlcs_1.clone()); - forward_htlcs.insert(scid_2, dummy_htlcs_2.clone()); - - let mut chanmgr_fwd_htlcs = nodes[0].node.forward_htlcs.lock().unwrap(); - *chanmgr_fwd_htlcs = forward_htlcs.clone(); - core::mem::drop(chanmgr_fwd_htlcs); - - reload_node!(nodes[0], nodes[0].node.encode(), &[], persister, chain_monitor, deserialized_chanmgr); - - let mut deserialized_fwd_htlcs = nodes[0].node.forward_htlcs.lock().unwrap(); - for scid in [scid_1, scid_2].iter() { - let deserialized_htlcs = deserialized_fwd_htlcs.remove(scid).unwrap(); - assert_eq!(forward_htlcs.remove(scid).unwrap(), deserialized_htlcs); - } - assert!(deserialized_fwd_htlcs.is_empty()); - core::mem::drop(deserialized_fwd_htlcs); - } } #[cfg(ldk_bench)] From 5a4912c1fce451f7ee4958a33ab419e1bcf75e55 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Mon, 12 Jan 2026 16:14:54 -0500 Subject: [PATCH 079/242] Update outdated comment due to var renames --- lightning/src/ln/channelmanager.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 92ab422acd9..bc55c4e7afd 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -18074,9 +18074,10 @@ where } // The ChannelMonitor is now responsible for this HTLC's // failure/success and will let us know what its outcome is. If we - // still have an entry for this HTLC in `forward_htlcs`, - // `pending_intercepted_htlcs`, or `decode_update_add_htlcs`, we were apparently not - // persisted after the monitor was when forwarding the payment. + // still have an entry for this HTLC in `forward_htlcs_legacy`, + // `pending_intercepted_htlcs_legacy`, or + // `decode_update_add_htlcs_legacy`, we were apparently not persisted + // after the monitor was when forwarding the payment. dedup_decode_update_add_htlcs( &mut decode_update_add_htlcs_legacy, &prev_hop_data, From b6b622f0c49e389d3cd0e6609fd444c502c2b0aa Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Fri, 19 Dec 2025 09:51:24 +0100 Subject: [PATCH 080/242] Convert internal update handling fns to methods Pure code move except for the context logger which is now instantiated when needed in update_channel_monitor. --- lightning/src/ln/channelmanager.rs | 190 +++++++++++++++-------------- 1 file changed, 98 insertions(+), 92 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index f2e8fa70e4f..80ee0e243f7 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3370,42 +3370,10 @@ macro_rules! handle_monitor_update_completion { }}; } -/// Returns whether the monitor update is completed, `false` if the update is in-progress. -fn handle_monitor_update_res( - cm: &CM, update_res: ChannelMonitorUpdateStatus, logger: LG, -) -> bool { - debug_assert!(cm.get_cm().background_events_processed_since_startup.load(Ordering::Acquire)); - match update_res { - ChannelMonitorUpdateStatus::UnrecoverableError => { - let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down."; - log_error!(logger, "{}", err_str); - panic!("{}", err_str); - }, - ChannelMonitorUpdateStatus::InProgress => { - #[cfg(not(any(test, feature = "_externalize_tests")))] - if cm.get_cm().monitor_update_type.swap(1, Ordering::Relaxed) == 2 { - panic!("Cannot use both ChannelMonitorUpdateStatus modes InProgress and Completed without restart"); - } - log_debug!( - logger, - "ChannelMonitor update in flight, holding messages until the update completes.", - ); - false - }, - ChannelMonitorUpdateStatus::Completed => { - #[cfg(not(any(test, feature = "_externalize_tests")))] - if cm.get_cm().monitor_update_type.swap(2, Ordering::Relaxed) == 1 { - panic!("Cannot use both ChannelMonitorUpdateStatus modes InProgress and Completed without restart"); - } - true - }, - } -} - macro_rules! handle_initial_monitor { ($self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { let logger = WithChannelContext::from(&$self.logger, &$chan.context, None); - let update_completed = handle_monitor_update_res($self, $update_res, logger); + let update_completed = $self.handle_monitor_update_res($update_res, logger); if update_completed { handle_monitor_update_completion!( $self, @@ -3418,69 +3386,17 @@ macro_rules! handle_initial_monitor { }; } -fn handle_new_monitor_update_internal( - cm: &CM, - in_flight_monitor_updates: &mut BTreeMap)>, - channel_id: ChannelId, funding_txo: OutPoint, counterparty_node_id: PublicKey, - new_update: ChannelMonitorUpdate, logger: LG, -) -> (bool, bool) { - let in_flight_updates = &mut in_flight_monitor_updates - .entry(channel_id) - .or_insert_with(|| (funding_txo, Vec::new())) - .1; - // During startup, we push monitor updates as background events through to here in - // order to replay updates that were in-flight when we shut down. Thus, we have to - // filter for uniqueness here. - let update_idx = - in_flight_updates.iter().position(|upd| upd == &new_update).unwrap_or_else(|| { - in_flight_updates.push(new_update); - in_flight_updates.len() - 1 - }); - - if cm.get_cm().background_events_processed_since_startup.load(Ordering::Acquire) { - let update_res = - cm.get_cm().chain_monitor.update_channel(channel_id, &in_flight_updates[update_idx]); - let update_completed = handle_monitor_update_res(cm, update_res, logger); - if update_completed { - let _ = in_flight_updates.remove(update_idx); - } - (update_completed, update_completed && in_flight_updates.is_empty()) - } else { - // We blindly assume that the ChannelMonitorUpdate will be regenerated on startup if we - // fail to persist it. This is a fairly safe assumption, however, since anything we do - // during the startup sequence should be replayed exactly if we immediately crash. - let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup { - counterparty_node_id, - funding_txo, - channel_id, - update: in_flight_updates[update_idx].clone(), - }; - // We want to track the in-flight update both in `in_flight_monitor_updates` and in - // `pending_background_events` to avoid a race condition during - // `pending_background_events` processing where we complete one - // `ChannelMonitorUpdate` (but there are more pending as background events) but we - // conclude that all pending `ChannelMonitorUpdate`s have completed and its safe to - // run post-completion actions. - // We could work around that with some effort, but its simpler to just track updates - // twice. - cm.get_cm().pending_background_events.lock().unwrap().push(event); - (false, false) - } -} - macro_rules! handle_post_close_monitor_update { ( $self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $counterparty_node_id: expr, $channel_id: expr ) => {{ - let (update_completed, all_updates_complete) = handle_new_monitor_update_internal( - $self, + let (update_completed, all_updates_complete) = $self.update_channel_monitor( &mut $peer_state.in_flight_monitor_updates, $channel_id, $funding_txo, $counterparty_node_id, $update, - WithContext::from(&$self.logger, Some($counterparty_node_id), Some($channel_id), None), ); if all_updates_complete { let update_actions = $peer_state @@ -3510,14 +3426,12 @@ macro_rules! handle_new_monitor_update_locked_actions_handled_by_caller { ( $self: ident, $funding_txo: expr, $update: expr, $in_flight_monitor_updates: expr, $chan_context: expr ) => {{ - let (update_completed, _all_updates_complete) = handle_new_monitor_update_internal( - $self, + let (update_completed, _all_updates_complete) = $self.update_channel_monitor( $in_flight_monitor_updates, $chan_context.channel_id(), $funding_txo, $chan_context.get_counterparty_node_id(), $update, - WithChannelContext::from(&$self.logger, &$chan_context, None), ); update_completed }}; @@ -3528,14 +3442,12 @@ macro_rules! handle_new_monitor_update { $self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr ) => {{ - let (update_completed, all_updates_complete) = handle_new_monitor_update_internal( - $self, + let (update_completed, all_updates_complete) = $self.update_channel_monitor( &mut $peer_state.in_flight_monitor_updates, $chan.context.channel_id(), $funding_txo, $chan.context.get_counterparty_node_id(), $update, - WithChannelContext::from(&$self.logger, &$chan.context, None), ); if all_updates_complete { handle_monitor_update_completion!( @@ -9795,6 +9707,100 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } + /// Applies a [`ChannelMonitorUpdate`] to the channel monitor. + /// + /// Monitor updates must be applied while holding the same lock under which they were generated + /// to ensure correct ordering. However, completion handling requires releasing those locks. + /// This method applies the update immediately (while locks are held) and returns whether the + /// update completed, allowing the caller to handle completion separately after releasing locks. + /// + /// Returns a tuple of `(update_completed, all_updates_completed)`: + /// - `update_completed`: whether this specific monitor update finished persisting + /// - `all_updates_completed`: whether all in-flight updates for this channel are now complete + fn update_channel_monitor( + &self, + in_flight_monitor_updates: &mut BTreeMap)>, + channel_id: ChannelId, funding_txo: OutPoint, counterparty_node_id: PublicKey, + new_update: ChannelMonitorUpdate, + ) -> (bool, bool) { + let in_flight_updates = &mut in_flight_monitor_updates + .entry(channel_id) + .or_insert_with(|| (funding_txo, Vec::new())) + .1; + // During startup, we push monitor updates as background events through to here in + // order to replay updates that were in-flight when we shut down. Thus, we have to + // filter for uniqueness here. + let update_idx = + in_flight_updates.iter().position(|upd| upd == &new_update).unwrap_or_else(|| { + in_flight_updates.push(new_update); + in_flight_updates.len() - 1 + }); + + if self.background_events_processed_since_startup.load(Ordering::Acquire) { + let update_res = + self.chain_monitor.update_channel(channel_id, &in_flight_updates[update_idx]); + let logger = + WithContext::from(&self.logger, Some(counterparty_node_id), Some(channel_id), None); + let update_completed = self.handle_monitor_update_res(update_res, logger); + if update_completed { + let _ = in_flight_updates.remove(update_idx); + } + (update_completed, update_completed && in_flight_updates.is_empty()) + } else { + // We blindly assume that the ChannelMonitorUpdate will be regenerated on startup if we + // fail to persist it. This is a fairly safe assumption, however, since anything we do + // during the startup sequence should be replayed exactly if we immediately crash. + let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup { + counterparty_node_id, + funding_txo, + channel_id, + update: in_flight_updates[update_idx].clone(), + }; + // We want to track the in-flight update both in `in_flight_monitor_updates` and in + // `pending_background_events` to avoid a race condition during + // `pending_background_events` processing where we complete one + // `ChannelMonitorUpdate` (but there are more pending as background events) but we + // conclude that all pending `ChannelMonitorUpdate`s have completed and its safe to + // run post-completion actions. + // We could work around that with some effort, but its simpler to just track updates + // twice. + self.pending_background_events.lock().unwrap().push(event); + (false, false) + } + } + + /// Returns whether the monitor update is completed, `false` if the update is in-progress. + fn handle_monitor_update_res( + &self, update_res: ChannelMonitorUpdateStatus, logger: LG, + ) -> bool { + debug_assert!(self.background_events_processed_since_startup.load(Ordering::Acquire)); + match update_res { + ChannelMonitorUpdateStatus::UnrecoverableError => { + let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down."; + log_error!(logger, "{}", err_str); + panic!("{}", err_str); + }, + ChannelMonitorUpdateStatus::InProgress => { + #[cfg(not(any(test, feature = "_externalize_tests")))] + if self.monitor_update_type.swap(1, Ordering::Relaxed) == 2 { + panic!("Cannot use both ChannelMonitorUpdateStatus modes InProgress and Completed without restart"); + } + log_debug!( + logger, + "ChannelMonitor update in flight, holding messages until the update completes.", + ); + false + }, + ChannelMonitorUpdateStatus::Completed => { + #[cfg(not(any(test, feature = "_externalize_tests")))] + if self.monitor_update_type.swap(2, Ordering::Relaxed) == 1 { + panic!("Cannot use both ChannelMonitorUpdateStatus modes InProgress and Completed without restart"); + } + true + }, + } + } + /// Handles a channel reentering a functional state, either due to reconnect or a monitor /// update completion. #[rustfmt::skip] From b540cd6ae1831b1286ee437c07df4d47ffe04572 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 8 Jan 2026 10:31:49 +0100 Subject: [PATCH 081/242] Refactor monitor update completion into helper methods Extract the monitor update completion logic from the handle_monitor_update_completion macro into two new helper methods: - try_resume_channel_post_monitor_update: Attempts to resume a channel after a monitor update completes while locks are still held - handle_post_monitor_update_chan_resume: Completes channel resumption after locks have been released This refactoring improves code organization by separating the locked phase (which may resume the channel) from the unlocked phase (which processes remaining work). The macro is now a thin wrapper that calls these two methods with proper lock management. The new PostMonitorUpdateChanResume enum captures the result of the resume attempt, containing any remaining work to process after locks are released. --- lightning/src/ln/channelmanager.rs | 255 +++++++++++++++++++---------- 1 file changed, 170 insertions(+), 85 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 80ee0e243f7..69b435404d3 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1366,6 +1366,25 @@ impl_writeable_tlv_based_enum_upgradable!(MonitorUpdateCompletionAction, }, ); +/// Result of attempting to resume a channel after a monitor update completes while locks are held. +/// Contains remaining work to be processed after locks are released. +#[must_use] +enum PostMonitorUpdateChanResume { + /// Channel still has blocked monitor updates pending. Contains only update actions to process. + Blocked { update_actions: Vec }, + /// Channel was fully unblocked and has been resumed. Contains remaining data to process. + Unblocked { + channel_id: ChannelId, + counterparty_node_id: PublicKey, + unbroadcasted_batch_funding_txid: Option, + update_actions: Vec, + htlc_forwards: Option, + decode_update_add_htlcs: Option<(u64, Vec)>, + finalized_claimed_htlcs: Vec<(HTLCSource, Option)>, + failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, + }, +} + #[derive(Clone, Debug, PartialEq, Eq)] pub(crate) struct PaymentCompleteUpdate { counterparty_node_id: PublicKey, @@ -3280,93 +3299,18 @@ macro_rules! emit_initial_channel_ready_event { /// Requires that the in-flight monitor update set for this channel is empty! macro_rules! handle_monitor_update_completion { ($self: ident, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => {{ - let chan_id = $chan.context.channel_id(); - let outbound_alias = $chan.context().outbound_scid_alias(); - let cp_node_id = $chan.context.get_counterparty_node_id(); - - #[cfg(debug_assertions)] - { - let in_flight_updates = $peer_state.in_flight_monitor_updates.get(&chan_id); - assert!(in_flight_updates.map(|(_, updates)| updates.is_empty()).unwrap_or(true)); - assert!($chan.is_awaiting_monitor_update()); - } - - let logger = WithChannelContext::from(&$self.logger, &$chan.context, None); - - let update_actions = - $peer_state.monitor_update_blocked_actions.remove(&chan_id).unwrap_or(Vec::new()); - - if $chan.blocked_monitor_updates_pending() != 0 { - mem::drop($peer_state_lock); - mem::drop($per_peer_state_lock); - - log_debug!(logger, "Channel has blocked monitor updates, completing update actions but leaving channel blocked"); - $self.handle_monitor_update_completion_actions(update_actions); - } else { - log_debug!(logger, "Channel is open and awaiting update, resuming it"); - let updates = $chan.monitor_updating_restored( - &&logger, - &$self.node_signer, - $self.chain_hash, - &*$self.config.read().unwrap(), - $self.best_block.read().unwrap().height, - |htlc_id| { - $self.path_for_release_held_htlc(htlc_id, outbound_alias, &chan_id, &cp_node_id) - }, - ); - let channel_update = if updates.channel_ready.is_some() - && $chan.context.is_usable() - && $peer_state.is_connected - { - // We only send a channel_update in the case where we are just now sending a - // channel_ready and the channel is in a usable state. We may re-send a - // channel_update later through the announcement_signatures process for public - // channels, but there's no reason not to just inform our counterparty of our fees - // now. - if let Ok((msg, _, _)) = $self.get_channel_update_for_unicast($chan) { - Some(MessageSendEvent::SendChannelUpdate { node_id: cp_node_id, msg }) - } else { - None - } - } else { - None - }; - - let (htlc_forwards, decode_update_add_htlcs) = $self.handle_channel_resumption( - &mut $peer_state.pending_msg_events, - $chan, - updates.raa, - updates.commitment_update, - updates.commitment_order, - updates.accepted_htlcs, - updates.pending_update_adds, - updates.funding_broadcastable, - updates.channel_ready, - updates.announcement_sigs, - updates.tx_signatures, - None, - updates.channel_ready_order, - ); - if let Some(upd) = channel_update { - $peer_state.pending_msg_events.push(upd); - } + let completion_data = $self.try_resume_channel_post_monitor_update( + &mut $peer_state.in_flight_monitor_updates, + &mut $peer_state.monitor_update_blocked_actions, + &mut $peer_state.pending_msg_events, + $peer_state.is_connected, + $chan, + ); - let unbroadcasted_batch_funding_txid = - $chan.context.unbroadcasted_batch_funding_txid(&$chan.funding); - core::mem::drop($peer_state_lock); - core::mem::drop($per_peer_state_lock); + mem::drop($peer_state_lock); + mem::drop($per_peer_state_lock); - $self.post_monitor_update_unlock( - chan_id, - cp_node_id, - unbroadcasted_batch_funding_txid, - update_actions, - htlc_forwards, - decode_update_add_htlcs, - updates.finalized_claimed_htlcs, - updates.failed_htlcs, - ); - } + $self.handle_post_monitor_update_chan_resume(completion_data); }}; } @@ -9801,6 +9745,147 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } + /// Attempts to resume a channel after a monitor update completes, while locks are still held. + /// + /// If the channel has no more blocked monitor updates, this resumes normal operation by + /// calling [`Self::handle_channel_resumption`] and returns the remaining work to process + /// after locks are released. If blocked updates remain, only the update actions are returned. + /// + /// Note: This method takes individual fields from [`PeerState`] rather than the whole struct + /// to avoid borrow checker issues when the channel is borrowed from `peer_state.channel_by_id`. + fn try_resume_channel_post_monitor_update( + &self, + in_flight_monitor_updates: &mut BTreeMap)>, + monitor_update_blocked_actions: &mut BTreeMap< + ChannelId, + Vec, + >, + pending_msg_events: &mut Vec, is_connected: bool, + chan: &mut FundedChannel, + ) -> PostMonitorUpdateChanResume { + let chan_id = chan.context.channel_id(); + let outbound_alias = chan.context.outbound_scid_alias(); + let counterparty_node_id = chan.context.get_counterparty_node_id(); + + #[cfg(debug_assertions)] + { + let in_flight_updates = in_flight_monitor_updates.get(&chan_id); + assert!(in_flight_updates.map(|(_, updates)| updates.is_empty()).unwrap_or(true)); + assert!(chan.is_awaiting_monitor_update()); + } + + let logger = WithChannelContext::from(&self.logger, &chan.context, None); + + let update_actions = monitor_update_blocked_actions.remove(&chan_id).unwrap_or(Vec::new()); + + if chan.blocked_monitor_updates_pending() != 0 { + log_debug!(logger, "Channel has blocked monitor updates, completing update actions but leaving channel blocked"); + PostMonitorUpdateChanResume::Blocked { update_actions } + } else { + log_debug!(logger, "Channel is open and awaiting update, resuming it"); + let updates = chan.monitor_updating_restored( + &&logger, + &self.node_signer, + self.chain_hash, + &*self.config.read().unwrap(), + self.best_block.read().unwrap().height, + |htlc_id| { + self.path_for_release_held_htlc( + htlc_id, + outbound_alias, + &chan_id, + &counterparty_node_id, + ) + }, + ); + let channel_update = if updates.channel_ready.is_some() + && chan.context.is_usable() + && is_connected + { + if let Ok((msg, _, _)) = self.get_channel_update_for_unicast(chan) { + Some(MessageSendEvent::SendChannelUpdate { node_id: counterparty_node_id, msg }) + } else { + None + } + } else { + None + }; + + let (htlc_forwards, decode_update_add_htlcs) = self.handle_channel_resumption( + pending_msg_events, + chan, + updates.raa, + updates.commitment_update, + updates.commitment_order, + updates.accepted_htlcs, + updates.pending_update_adds, + updates.funding_broadcastable, + updates.channel_ready, + updates.announcement_sigs, + updates.tx_signatures, + None, + updates.channel_ready_order, + ); + if let Some(upd) = channel_update { + pending_msg_events.push(upd); + } + + let unbroadcasted_batch_funding_txid = + chan.context.unbroadcasted_batch_funding_txid(&chan.funding); + + PostMonitorUpdateChanResume::Unblocked { + channel_id: chan_id, + counterparty_node_id, + unbroadcasted_batch_funding_txid, + update_actions, + htlc_forwards, + decode_update_add_htlcs, + finalized_claimed_htlcs: updates.finalized_claimed_htlcs, + failed_htlcs: updates.failed_htlcs, + } + } + } + + /// Completes channel resumption after locks have been released. + /// + /// Processes the [`PostMonitorUpdateChanResume`] returned by + /// [`Self::try_resume_channel_post_monitor_update`], handling update actions and any + /// remaining work that requires locks to be released (e.g., forwarding HTLCs, failing HTLCs). + fn handle_post_monitor_update_chan_resume(&self, data: PostMonitorUpdateChanResume) { + debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread); + #[cfg(debug_assertions)] + for (_, peer) in self.per_peer_state.read().unwrap().iter() { + debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread); + } + + match data { + PostMonitorUpdateChanResume::Blocked { update_actions } => { + self.handle_monitor_update_completion_actions(update_actions); + }, + PostMonitorUpdateChanResume::Unblocked { + channel_id, + counterparty_node_id, + unbroadcasted_batch_funding_txid, + update_actions, + htlc_forwards, + decode_update_add_htlcs, + finalized_claimed_htlcs, + failed_htlcs, + } => { + self.post_monitor_update_unlock( + channel_id, + counterparty_node_id, + unbroadcasted_batch_funding_txid, + update_actions, + htlc_forwards, + decode_update_add_htlcs, + finalized_claimed_htlcs, + failed_htlcs, + ); + }, + } + } + /// Handles a channel reentering a functional state, either due to reconnect or a monitor /// update completion. #[rustfmt::skip] From d89a1af5acb2a0513ddd77932e9374f506ec2003 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 8 Jan 2026 12:02:09 +0100 Subject: [PATCH 082/242] Inline handle_monitor_update_completion! macro MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Expand the macro at its three call sites and remove the macro definition, as it no longer provides significant abstraction benefit after refactoring the core logic into helper methods. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 --- lightning/src/ln/channelmanager.rs | 70 +++++++++++++++--------------- 1 file changed, 34 insertions(+), 36 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 69b435404d3..1747bdfb07f 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3291,41 +3291,23 @@ macro_rules! emit_initial_channel_ready_event { }; } -/// Handles the completion steps for when a [`ChannelMonitorUpdate`] is applied to a live channel. -/// -/// You should not add new direct calls to this, generally, rather rely on -/// `handle_new_monitor_update` or [`ChannelManager::channel_monitor_updated`] to call it for you. -/// -/// Requires that the in-flight monitor update set for this channel is empty! -macro_rules! handle_monitor_update_completion { - ($self: ident, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => {{ - let completion_data = $self.try_resume_channel_post_monitor_update( - &mut $peer_state.in_flight_monitor_updates, - &mut $peer_state.monitor_update_blocked_actions, - &mut $peer_state.pending_msg_events, - $peer_state.is_connected, - $chan, - ); - - mem::drop($peer_state_lock); - mem::drop($per_peer_state_lock); - - $self.handle_post_monitor_update_chan_resume(completion_data); - }}; -} - macro_rules! handle_initial_monitor { ($self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { let logger = WithChannelContext::from(&$self.logger, &$chan.context, None); let update_completed = $self.handle_monitor_update_res($update_res, logger); if update_completed { - handle_monitor_update_completion!( - $self, - $peer_state_lock, - $peer_state, - $per_peer_state_lock, - $chan + let completion_data = $self.try_resume_channel_post_monitor_update( + &mut $peer_state.in_flight_monitor_updates, + &mut $peer_state.monitor_update_blocked_actions, + &mut $peer_state.pending_msg_events, + $peer_state.is_connected, + $chan, ); + + mem::drop($peer_state_lock); + mem::drop($per_peer_state_lock); + + $self.handle_post_monitor_update_chan_resume(completion_data); } }; } @@ -3394,13 +3376,18 @@ macro_rules! handle_new_monitor_update { $update, ); if all_updates_complete { - handle_monitor_update_completion!( - $self, - $peer_state_lock, - $peer_state, - $per_peer_state_lock, - $chan + let completion_data = $self.try_resume_channel_post_monitor_update( + &mut $peer_state.in_flight_monitor_updates, + &mut $peer_state.monitor_update_blocked_actions, + &mut $peer_state.pending_msg_events, + $peer_state.is_connected, + $chan, ); + + mem::drop($peer_state_lock); + mem::drop($per_peer_state_lock); + + $self.handle_post_monitor_update_chan_resume(completion_data); } update_completed }}; @@ -10146,7 +10133,18 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ .and_then(Channel::as_funded_mut) { if chan.is_awaiting_monitor_update() { - handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan); + let completion_data = self.try_resume_channel_post_monitor_update( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + &mut peer_state.pending_msg_events, + peer_state.is_connected, + chan, + ); + + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + + self.handle_post_monitor_update_chan_resume(completion_data); } else { log_trace!(logger, "Channel is open but not awaiting update"); } From a9656dd41ee02b2f8a2c7a64ae47db920de3d48d Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 8 Jan 2026 10:49:40 +0100 Subject: [PATCH 083/242] Remove handle_new_monitor_update_locked_actions_handled_by_caller macro MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 --- lightning/src/ln/channelmanager.rs | 41 ++++++------------------------ 1 file changed, 8 insertions(+), 33 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 1747bdfb07f..26a45e5b604 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3338,31 +3338,6 @@ macro_rules! handle_post_close_monitor_update { update_completed }}; } - -/// Handles a new monitor update without dropping peer_state locks and calling -/// [`ChannelManager::handle_monitor_update_completion_actions`] if the monitor update completed -/// synchronously. -/// -/// Useful because monitor updates need to be handled in the same mutex where the channel generated -/// them (otherwise they can end up getting applied out-of-order) but it's not always possible to -/// drop the aforementioned peer state locks at a given callsite. In this situation, use this macro -/// to apply the monitor update immediately and handle the monitor update completion actions at a -/// later time. -macro_rules! handle_new_monitor_update_locked_actions_handled_by_caller { - ( - $self: ident, $funding_txo: expr, $update: expr, $in_flight_monitor_updates: expr, $chan_context: expr - ) => {{ - let (update_completed, _all_updates_complete) = $self.update_channel_monitor( - $in_flight_monitor_updates, - $chan_context.channel_id(), - $funding_txo, - $chan_context.get_counterparty_node_id(), - $update, - ); - update_completed - }}; -} - macro_rules! handle_new_monitor_update { ( $self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, @@ -4546,12 +4521,12 @@ where log_error!(logger, "Closed channel due to close-required error: {}", msg); if let Some((_, funding_txo, _, update)) = shutdown_res.monitor_update.take() { - handle_new_monitor_update_locked_actions_handled_by_caller!( - self, + self.update_channel_monitor( + in_flight_monitor_updates, + chan.context.channel_id(), funding_txo, + chan.context.get_counterparty_node_id(), update, - in_flight_monitor_updates, - chan.context ); } // If there's a possibility that we need to generate further monitor updates for this @@ -14853,12 +14828,12 @@ where insert_short_channel_id!(short_to_chan_info, funded_channel); if let Some(monitor_update) = monitor_update_opt { - handle_new_monitor_update_locked_actions_handled_by_caller!( - self, + self.update_channel_monitor( + &mut peer_state.in_flight_monitor_updates, + funded_channel.context.channel_id(), funding_txo, + funded_channel.context.get_counterparty_node_id(), monitor_update, - &mut peer_state.in_flight_monitor_updates, - funded_channel.context ); to_process_monitor_update_actions.push(( counterparty_node_id, channel_id From d0cc60f8af88caf0b2e25f7fe38d2fd6131b83be Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 8 Jan 2026 10:56:10 +0100 Subject: [PATCH 084/242] Remove handle_initial_monitor macro MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Convert the handle_initial_monitor! macro to a method that returns optional completion data, allowing callers to release locks before processing the completion. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 --- lightning/src/ln/channelmanager.rs | 96 ++++++++++++++++++++---------- 1 file changed, 65 insertions(+), 31 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 26a45e5b604..655ee11e83e 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3291,27 +3291,6 @@ macro_rules! emit_initial_channel_ready_event { }; } -macro_rules! handle_initial_monitor { - ($self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { - let logger = WithChannelContext::from(&$self.logger, &$chan.context, None); - let update_completed = $self.handle_monitor_update_res($update_res, logger); - if update_completed { - let completion_data = $self.try_resume_channel_post_monitor_update( - &mut $peer_state.in_flight_monitor_updates, - &mut $peer_state.monitor_update_blocked_actions, - &mut $peer_state.pending_msg_events, - $peer_state.is_connected, - $chan, - ); - - mem::drop($peer_state_lock); - mem::drop($per_peer_state_lock); - - $self.handle_post_monitor_update_chan_resume(completion_data); - } - }; -} - macro_rules! handle_post_close_monitor_update { ( $self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, @@ -9707,6 +9686,36 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } + /// Handles the initial monitor persistence, returning optionally data to process after locks + /// are released. + /// + /// Note: This method takes individual fields from `PeerState` rather than the whole struct + /// to avoid borrow checker issues when the channel is borrowed from `peer_state.channel_by_id`. + fn handle_initial_monitor( + &self, + in_flight_monitor_updates: &mut BTreeMap)>, + monitor_update_blocked_actions: &mut BTreeMap< + ChannelId, + Vec, + >, + pending_msg_events: &mut Vec, is_connected: bool, + chan: &mut FundedChannel, update_res: ChannelMonitorUpdateStatus, + ) -> Option { + let logger = WithChannelContext::from(&self.logger, &chan.context, None); + let update_completed = self.handle_monitor_update_res(update_res, logger); + if update_completed { + Some(self.try_resume_channel_post_monitor_update( + in_flight_monitor_updates, + monitor_update_blocked_actions, + pending_msg_events, + is_connected, + chan, + )) + } else { + None + } + } + /// Attempts to resume a channel after a monitor update completes, while locks are still held. /// /// If the channel has no more blocked monitor updates, this resumes normal operation by @@ -10735,14 +10744,18 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } if let Some(funded_chan) = e.insert(Channel::from(chan)).as_funded_mut() { - handle_initial_monitor!( - self, + if let Some(data) = self.handle_initial_monitor( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + &mut peer_state.pending_msg_events, + peer_state.is_connected, + funded_chan, persist_state, - peer_state_lock, - peer_state, - per_peer_state, - funded_chan - ); + ) { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_post_monitor_update_chan_resume(data); + } } else { unreachable!("This must be a funded channel as we just inserted it."); } @@ -10905,7 +10918,18 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }) { Ok((funded_chan, persist_status)) => { - handle_initial_monitor!(self, persist_status, peer_state_lock, peer_state, per_peer_state, funded_chan); + if let Some(data) = self.handle_initial_monitor( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + &mut peer_state.pending_msg_events, + peer_state.is_connected, + funded_chan, + persist_status, + ) { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_post_monitor_update_chan_resume(data); + } Ok(()) }, Err(e) => try_channel_entry!(self, peer_state, Err(e), chan_entry), @@ -11611,8 +11635,18 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some(monitor) = monitor_opt { let monitor_res = self.chain_monitor.watch_channel(monitor.channel_id(), monitor); if let Ok(persist_state) = monitor_res { - handle_initial_monitor!(self, persist_state, peer_state_lock, peer_state, - per_peer_state, chan); + if let Some(data) = self.handle_initial_monitor( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + &mut peer_state.pending_msg_events, + peer_state.is_connected, + chan, + persist_state, + ) { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_post_monitor_update_chan_resume(data); + } } else { let logger = WithChannelContext::from(&self.logger, &chan.context, None); log_error!(logger, "Persisting initial ChannelMonitor failed, implying the channel ID was duplicated"); From d0dc4f0ac98c07519c1051e68dc94122b76ee780 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 8 Jan 2026 11:01:47 +0100 Subject: [PATCH 085/242] Remove handle_post_close_monitor_update macro MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Convert the handle_post_close_monitor_update! macro to a method that returns optional completion actions, allowing callers to release locks before processing the completion. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 --- lightning/src/ln/channelmanager.rs | 105 ++++++++++++++++------------- 1 file changed, 60 insertions(+), 45 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 655ee11e83e..76da3819532 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3291,32 +3291,6 @@ macro_rules! emit_initial_channel_ready_event { }; } -macro_rules! handle_post_close_monitor_update { - ( - $self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, - $per_peer_state_lock: expr, $counterparty_node_id: expr, $channel_id: expr - ) => {{ - let (update_completed, all_updates_complete) = $self.update_channel_monitor( - &mut $peer_state.in_flight_monitor_updates, - $channel_id, - $funding_txo, - $counterparty_node_id, - $update, - ); - if all_updates_complete { - let update_actions = $peer_state - .monitor_update_blocked_actions - .remove(&$channel_id) - .unwrap_or(Vec::new()); - - mem::drop($peer_state_lock); - mem::drop($per_peer_state_lock); - - $self.handle_monitor_update_completion_actions(update_actions); - } - update_completed - }}; -} macro_rules! handle_new_monitor_update { ( $self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, @@ -4151,10 +4125,18 @@ where hash_map::Entry::Vacant(_) => {}, } - handle_post_close_monitor_update!( - self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, - counterparty_node_id, channel_id - ); + if let Some(actions) = self.handle_post_close_monitor_update( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + funding_txo, + monitor_update, + counterparty_node_id, + channel_id, + ) { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_monitor_update_completion_actions(actions); + } } /// When a channel is removed, two things need to happen: @@ -9120,16 +9102,18 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ .push(action); } - handle_post_close_monitor_update!( - self, + if let Some(actions) = self.handle_post_close_monitor_update( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, prev_hop.funding_txo, preimage_update, - peer_state_lock, - peer_state, - per_peer_state, prev_hop.counterparty_node_id, - chan_id - ); + chan_id, + ) { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_monitor_update_completion_actions(actions); + } } fn finalize_claims(&self, sources: Vec<(HTLCSource, Option)>) { @@ -9654,6 +9638,34 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } + /// Handles a monitor update for a closed channel, returning optionally the completion actions + /// to process after locks are released. + /// + /// Returns `Some` if all in-flight updates are complete. + fn handle_post_close_monitor_update( + &self, + in_flight_monitor_updates: &mut BTreeMap)>, + monitor_update_blocked_actions: &mut BTreeMap< + ChannelId, + Vec, + >, + funding_txo: OutPoint, update: ChannelMonitorUpdate, counterparty_node_id: PublicKey, + channel_id: ChannelId, + ) -> Option> { + let (_update_completed, all_updates_complete) = self.update_channel_monitor( + in_flight_monitor_updates, + channel_id, + funding_txo, + counterparty_node_id, + update, + ); + if all_updates_complete { + Some(monitor_update_blocked_actions.remove(&channel_id).unwrap_or(Vec::new())) + } else { + None + } + } + /// Returns whether the monitor update is completed, `false` if the update is in-progress. fn handle_monitor_update_res( &self, update_res: ChannelMonitorUpdateStatus, logger: LG, @@ -14083,10 +14095,11 @@ where }, ) => { let per_peer_state = self.per_peer_state.read().unwrap(); - let mut peer_state = per_peer_state + let mut peer_state_lock = per_peer_state .get(&counterparty_node_id) .map(|state| state.lock().unwrap()) .expect("Channels originating a payment resolution must have peer state"); + let peer_state = &mut *peer_state_lock; let update_id = peer_state .closed_channel_monitor_update_ids .get_mut(&channel_id) @@ -14113,16 +14126,18 @@ where }; self.pending_background_events.lock().unwrap().push(event); } else { - handle_post_close_monitor_update!( - self, + if let Some(actions) = self.handle_post_close_monitor_update( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, channel_funding_outpoint, update, - peer_state, - peer_state, - per_peer_state, counterparty_node_id, - channel_id - ); + channel_id, + ) { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_monitor_update_completion_actions(actions); + } } }, } From a160bbfda3a3e5351eaca2ea831afbf0c96bc0ba Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 8 Jan 2026 11:10:44 +0100 Subject: [PATCH 086/242] Remove handle_new_monitor_update macro MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Convert the handle_new_monitor_update macro to methods and update all call sites. Adds handle_new_monitor_update and handle_new_monitor_update_with_status methods that return completion data for processing after locks are released. Also removes handle_monitor_update_completion macro as it's no longer needed. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 --- lightning/src/ln/channelmanager.rs | 290 ++++++++++++++++++++--------- 1 file changed, 203 insertions(+), 87 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 76da3819532..ce5e88e4a7f 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3291,36 +3291,6 @@ macro_rules! emit_initial_channel_ready_event { }; } -macro_rules! handle_new_monitor_update { - ( - $self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, - $per_peer_state_lock: expr, $chan: expr - ) => {{ - let (update_completed, all_updates_complete) = $self.update_channel_monitor( - &mut $peer_state.in_flight_monitor_updates, - $chan.context.channel_id(), - $funding_txo, - $chan.context.get_counterparty_node_id(), - $update, - ); - if all_updates_complete { - let completion_data = $self.try_resume_channel_post_monitor_update( - &mut $peer_state.in_flight_monitor_updates, - &mut $peer_state.monitor_update_blocked_actions, - &mut $peer_state.pending_msg_events, - $peer_state.is_connected, - $chan, - ); - - mem::drop($peer_state_lock); - mem::drop($per_peer_state_lock); - - $self.handle_post_monitor_update_chan_resume(completion_data); - } - update_completed - }}; -} - fn convert_channel_err_internal< Close: FnOnce(ClosureReason, &str) -> (ShutdownResult, Option<(msgs::ChannelUpdate, NodeId, NodeId)>), >( @@ -3982,15 +3952,19 @@ where // Update the monitor with the shutdown script if necessary. if let Some(monitor_update) = monitor_update_opt.take() { - handle_new_monitor_update!( - self, + if let Some(data) = self.handle_new_monitor_update( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + &mut peer_state.pending_msg_events, + peer_state.is_connected, + chan, funding_txo_opt.unwrap(), monitor_update, - peer_state_lock, - peer_state, - per_peer_state, - chan - ); + ) { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_post_monitor_update_chan_resume(data); + } } } else { let reason = ClosureReason::LocallyCoopClosedUnfundedChannel; @@ -4115,8 +4089,19 @@ where match peer_state.channel_by_id.entry(channel_id) { hash_map::Entry::Occupied(mut chan_entry) => { if let Some(chan) = chan_entry.get_mut().as_funded_mut() { - handle_new_monitor_update!(self, funding_txo, - monitor_update, peer_state_lock, peer_state, per_peer_state, chan); + if let Some(data) = self.handle_new_monitor_update( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + &mut peer_state.pending_msg_events, + peer_state.is_connected, + chan, + funding_txo, + monitor_update, + ) { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_post_monitor_update_chan_resume(data); + } return; } else { debug_assert!(false, "We shouldn't have an update for a non-funded channel"); @@ -5260,16 +5245,22 @@ where ); match break_channel_entry!(self, peer_state, send_res, chan_entry) { Some(monitor_update) => { - let ok = handle_new_monitor_update!( - self, - funding_txo, - monitor_update, - peer_state_lock, - peer_state, - per_peer_state, - chan - ); - if !ok { + let (update_completed, completion_data) = self + .handle_new_monitor_update_with_status( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + &mut peer_state.pending_msg_events, + peer_state.is_connected, + chan, + funding_txo, + monitor_update, + ); + if let Some(data) = completion_data { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_post_monitor_update_chan_resume(data); + } + if !update_completed { // Note that MonitorUpdateInProgress here indicates (per function // docs) that we will resend the commitment update once monitor // updating completes. Therefore, we must return an error @@ -8933,15 +8924,19 @@ where .or_insert_with(Vec::new) .push(raa_blocker); } - handle_new_monitor_update!( - self, + if let Some(data) = self.handle_new_monitor_update( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + &mut peer_state.pending_msg_events, + peer_state.is_connected, + chan, prev_hop.funding_txo, monitor_update, - peer_state_lock, - peer_state, - per_peer_state, - chan - ); + ) { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_post_monitor_update_chan_resume(data); + } }, UpdateFulfillCommitFetch::DuplicateClaim {} => { let (action_opt, raa_blocker_opt) = completion_action(None, true); @@ -9728,6 +9723,73 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } + /// Applies a new monitor update and attempts to resume the channel if all updates are complete. + /// + /// Returns [`PostMonitorUpdateChanResume`] if all in-flight updates are complete, which should + /// be passed to [`Self::handle_post_monitor_update_chan_resume`] after releasing locks. + /// + /// Note: This method takes individual fields from [`PeerState`] rather than the whole struct + /// to avoid borrow checker issues when the channel is borrowed from `peer_state.channel_by_id`. + fn handle_new_monitor_update( + &self, + in_flight_monitor_updates: &mut BTreeMap)>, + monitor_update_blocked_actions: &mut BTreeMap< + ChannelId, + Vec, + >, + pending_msg_events: &mut Vec, is_connected: bool, + chan: &mut FundedChannel, funding_txo: OutPoint, update: ChannelMonitorUpdate, + ) -> Option { + self.handle_new_monitor_update_with_status( + in_flight_monitor_updates, + monitor_update_blocked_actions, + pending_msg_events, + is_connected, + chan, + funding_txo, + update, + ) + .1 + } + + /// Like [`Self::handle_new_monitor_update`], but also returns whether this specific update + /// completed (as opposed to being in-progress). + fn handle_new_monitor_update_with_status( + &self, + in_flight_monitor_updates: &mut BTreeMap)>, + monitor_update_blocked_actions: &mut BTreeMap< + ChannelId, + Vec, + >, + pending_msg_events: &mut Vec, is_connected: bool, + chan: &mut FundedChannel, funding_txo: OutPoint, update: ChannelMonitorUpdate, + ) -> (bool, Option) { + let chan_id = chan.context.channel_id(); + let counterparty_node_id = chan.context.get_counterparty_node_id(); + + let (update_completed, all_updates_complete) = self.update_channel_monitor( + in_flight_monitor_updates, + chan_id, + funding_txo, + counterparty_node_id, + update, + ); + + let completion_data = if all_updates_complete { + Some(self.try_resume_channel_post_monitor_update( + in_flight_monitor_updates, + monitor_update_blocked_actions, + pending_msg_events, + is_connected, + chan, + )) + } else { + None + }; + + (update_completed, completion_data) + } + /// Attempts to resume a channel after a monitor update completes, while locks are still held. /// /// If the channel has no more blocked monitor updates, this resumes normal operation by @@ -11347,15 +11409,19 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } // Update the monitor with the shutdown script if necessary. if let Some(monitor_update) = monitor_update_opt { - handle_new_monitor_update!( - self, + if let Some(data) = self.handle_new_monitor_update( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + &mut peer_state.pending_msg_events, + peer_state.is_connected, + chan, funding_txo_opt.unwrap(), monitor_update, - peer_state_lock, - peer_state, - per_peer_state, - chan - ); + ) { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_post_monitor_update_chan_resume(data); + } } }, None => { @@ -11668,8 +11734,19 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ try_channel_entry!(self, peer_state, Err(err), chan_entry) } } else if let Some(monitor_update) = monitor_update_opt { - handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock, - peer_state, per_peer_state, chan); + if let Some(data) = self.handle_new_monitor_update( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + &mut peer_state.pending_msg_events, + peer_state.is_connected, + chan, + funding_txo.unwrap(), + monitor_update, + ) { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_post_monitor_update_chan_resume(data); + } } } Ok(()) @@ -11699,10 +11776,19 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ ); if let Some(monitor_update) = monitor_update_opt { - handle_new_monitor_update!( - self, funding_txo.unwrap(), monitor_update, peer_state_lock, peer_state, - per_peer_state, chan - ); + if let Some(data) = self.handle_new_monitor_update( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + &mut peer_state.pending_msg_events, + peer_state.is_connected, + chan, + funding_txo.unwrap(), + monitor_update, + ) { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_post_monitor_update_chan_resume(data); + } } } Ok(()) @@ -11939,8 +12025,19 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some(monitor_update) = monitor_update_opt { let funding_txo = funding_txo_opt .expect("Funding outpoint must have been set for RAA handling to succeed"); - handle_new_monitor_update!(self, funding_txo, monitor_update, - peer_state_lock, peer_state, per_peer_state, chan); + if let Some(data) = self.handle_new_monitor_update( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + &mut peer_state.pending_msg_events, + peer_state.is_connected, + chan, + funding_txo, + monitor_update, + ) { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_post_monitor_update_chan_resume(data); + } } (htlcs_to_fail, static_invoices) } else { @@ -12418,15 +12515,19 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } if let Some(monitor_update) = splice_promotion.monitor_update { - handle_new_monitor_update!( - self, + if let Some(data) = self.handle_new_monitor_update( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + &mut peer_state.pending_msg_events, + peer_state.is_connected, + chan, splice_promotion.funding_txo, monitor_update, - peer_state_lock, - peer_state, - per_peer_state, - chan - ); + ) { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_post_monitor_update_chan_resume(data); + } } } } else { @@ -12614,15 +12715,19 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some(monitor_update) = monitor_opt { has_monitor_update = true; - handle_new_monitor_update!( - self, + if let Some(data) = self.handle_new_monitor_update( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + &mut peer_state.pending_msg_events, + peer_state.is_connected, + chan, funding_txo.unwrap(), monitor_update, - peer_state_lock, - peer_state, - per_peer_state, - chan - ); + ) { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_post_monitor_update_chan_resume(data); + } continue 'peer_loop; } } @@ -14051,8 +14156,19 @@ where if let Some((monitor_update, further_update_exists)) = chan.unblock_next_blocked_monitor_update() { log_debug!(logger, "Unlocking monitor updating and updating monitor", ); - handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update, - peer_state_lck, peer_state, per_peer_state, chan); + if let Some(data) = self.handle_new_monitor_update( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + &mut peer_state.pending_msg_events, + peer_state.is_connected, + chan, + channel_funding_outpoint, + monitor_update, + ) { + mem::drop(peer_state_lck); + mem::drop(per_peer_state); + self.handle_post_monitor_update_chan_resume(data); + } if further_update_exists { // If there are more `ChannelMonitorUpdate`s to process, restart at the // top of the loop. From b524b9bbbcbddac8a2896fe929611bc0ad1e23a4 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 14 Jan 2026 01:26:21 +0000 Subject: [PATCH 087/242] Remove spurious debug assertion added in 0.2 In 20877b3e229ffedee9483e2b021fdcb98c7a378a we added a `debug_assert`ion to validate that if we call `maybe_free_holding_cell_htlcs` and it doesn't manage to generate a new commitment (implying `!can_generate_new_commitment()`) that we don't have any HTLCs to fail, but there was no reason for that, and its reachable. Here we simply remove the spurious debug assertion and add a test that exercises it. --- lightning/src/ln/channel.rs | 1 - lightning/src/ln/functional_test_utils.rs | 4 +- lightning/src/ln/functional_tests.rs | 151 ++++++++++++++++++++++ 3 files changed, 153 insertions(+), 3 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index d22359935fa..659735cc0a2 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -8981,7 +8981,6 @@ where update_fail_htlcs.len() + update_fail_malformed_htlcs.len(), &self.context.channel_id); } else { - debug_assert!(htlcs_to_fail.is_empty()); let reason = if self.context.channel_state.is_local_stfu_sent() { "exits quiescence" } else if self.context.channel_state.is_monitor_update_in_progress() { diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index e9cb13dbd2a..00fbca201cd 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -971,7 +971,7 @@ pub fn get_revoke_commit_msgs>( assert_eq!(node_id, recipient); (*msg).clone() }, - _ => panic!("Unexpected event"), + _ => panic!("Unexpected event: {events:?}"), }, match events[1] { MessageSendEvent::UpdateHTLCs { ref node_id, ref channel_id, ref updates } => { @@ -984,7 +984,7 @@ pub fn get_revoke_commit_msgs>( assert!(updates.commitment_signed.iter().all(|cs| cs.channel_id == *channel_id)); updates.commitment_signed.clone() }, - _ => panic!("Unexpected event"), + _ => panic!("Unexpected event: {events:?}"), }, ) } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 58ef44c4939..fcb348c690d 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -9900,3 +9900,154 @@ pub fn test_multi_post_event_actions() { do_test_multi_post_event_actions(true); do_test_multi_post_event_actions(false); } + +#[xtest(feature = "_externalize_tests")] +pub fn test_dust_exposure_holding_cell_assertion() { + // Test that we properly move forward if we pop an HTLC-add from the holding cell but fail to + // add it to the channel. In 0.2 this cause a (harmless in prod) debug assertion failure. We + // try to ensure that this won't happen by checking that an HTLC will be able to be added + // before we add it to the holding cell, so getting into this state takes a bit of work. + // + // Here we accomplish this by using the dust exposure limit. This has the unique feature that + // node C can increase node B's dust exposure on the B <-> C channel without B doing anything. + // To exploit this, we get node B one HTLC away from being over-exposed to dust, give it one + // more HTLC in the holding cell, then have node C add an HTLC. By the time the holding-cell + // HTLC is released we are at max-dust-exposure and will fail it. + + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + + // Configure nodes with specific dust limits + let mut config = test_default_channel_config(); + // Use a fixed dust exposure limit to make the test simpler + const DUST_HTLC_VALUE_MSAT: u64 = 500_000; + config.channel_config.max_dust_htlc_exposure = MaxDustHTLCExposure::FixedLimitMsat(5_000_000); + config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100; + + let configs = [Some(config.clone()), Some(config.clone()), Some(config.clone())]; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &configs); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + + // Create channels: A <-> B <-> C + create_announced_chan_between_nodes(&nodes, 0, 1); + let bc_chan_id = create_announced_chan_between_nodes(&nodes, 1, 2).2; + send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 10_000_000); + + // Send multiple dust HTLCs from B to C to approach the dust limit (including transaction fees) + for _ in 0..4 { + route_payment(&nodes[1], &[&nodes[2]], DUST_HTLC_VALUE_MSAT); + } + + // At this point we shouldn't be over the dust limit, and should still be able to send HTLCs. + let bs_chans = nodes[1].node.list_channels(); + let bc_chan = bs_chans.iter().find(|chan| chan.counterparty.node_id == node_c_id).unwrap(); + assert_eq!( + bc_chan.next_outbound_htlc_minimum_msat, + config.channel_handshake_config.our_htlc_minimum_msat + ); + + // Add a further HTLC from B to C, but don't deliver the send messages. + // After this we'll only have the ability to add one more HTLC, but by not delivering the send + // messages (leaving B waiting on C's RAA) the next HTLC will go into B's holding cell. + let (route_bc, payment_hash_bc, _payment_preimage_bc, payment_secret_bc) = + get_route_and_payment_hash!(nodes[1], nodes[2], DUST_HTLC_VALUE_MSAT); + let onion_bc = RecipientOnionFields::secret_only(payment_secret_bc); + let id = PaymentId(payment_hash_bc.0); + nodes[1].node.send_payment_with_route(route_bc, payment_hash_bc, onion_bc, id).unwrap(); + check_added_monitors(&nodes[1], 1); + let send_bc = SendEvent::from_node(&nodes[1]); + + let bs_chans = nodes[1].node.list_channels(); + let bc_chan = bs_chans.iter().find(|chan| chan.counterparty.node_id == node_c_id).unwrap(); + assert_eq!( + bc_chan.next_outbound_htlc_minimum_msat, + config.channel_handshake_config.our_htlc_minimum_msat + ); + + // Forward an additional HTLC from A through B to C. This will go in B's holding cell for node + // C as it is waiting on a response to the above messages. + let payment_params_ac = PaymentParameters::from_node_id(node_c_id, TEST_FINAL_CLTV) + .with_bolt11_features(nodes[2].node.bolt11_invoice_features()) + .unwrap(); + let (route_ac, payment_hash_cell, _, payment_secret_ac) = + get_route_and_payment_hash!(nodes[0], nodes[2], payment_params_ac, DUST_HTLC_VALUE_MSAT); + let onion_ac = RecipientOnionFields::secret_only(payment_secret_ac); + let id = PaymentId(payment_hash_cell.0); + nodes[0].node.send_payment_with_route(route_ac, payment_hash_cell, onion_ac, id).unwrap(); + check_added_monitors(&nodes[0], 1); + + let send_ab = SendEvent::from_node(&nodes[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &send_ab.msgs[0]); + do_commitment_signed_dance(&nodes[1], &nodes[0], &send_ab.commitment_msg, false, true); + + // At this point when we process pending forwards the HTLC will go into the holding cell and no + // further messages will be generated. Node B will also be at its maximum dust exposure and + // will refuse to send any dust HTLCs (when it includes the holding cell HTLC). + expect_and_process_pending_htlcs(&nodes[1], false); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + let bs_chans = nodes[1].node.list_channels(); + let bc_chan = bs_chans.iter().find(|chan| chan.counterparty.node_id == node_c_id).unwrap(); + assert!(bc_chan.next_outbound_htlc_minimum_msat > DUST_HTLC_VALUE_MSAT); + + // Send an additional HTLC from C to B. This will make B unable to forward the HTLC already in + // its holding cell as it would be over-exposed to dust. + let (route_cb, payment_hash_cb, payment_preimage_cb, payment_secret_cb) = + get_route_and_payment_hash!(nodes[2], nodes[1], DUST_HTLC_VALUE_MSAT); + let onion_cb = RecipientOnionFields::secret_only(payment_secret_cb); + let id = PaymentId(payment_hash_cb.0); + nodes[2].node.send_payment_with_route(route_cb, payment_hash_cb, onion_cb, id).unwrap(); + check_added_monitors(&nodes[2], 1); + + // Now deliver all the messages and make sure that the HTLC is failed-back. + let send_event_cb = SendEvent::from_node(&nodes[2]); + nodes[1].node.handle_update_add_htlc(node_c_id, &send_event_cb.msgs[0]); + nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &send_event_cb.commitment_msg); + check_added_monitors(&nodes[1], 1); + + nodes[2].node.handle_update_add_htlc(node_b_id, &send_bc.msgs[0]); + nodes[2].node.handle_commitment_signed_batch_test(node_b_id, &send_bc.commitment_msg); + check_added_monitors(&nodes[2], 1); + + let cs_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, node_b_id); + nodes[1].node.handle_revoke_and_ack(node_c_id, &cs_raa); + check_added_monitors(&nodes[1], 1); + let (bs_raa, bs_cs) = get_revoke_commit_msgs(&nodes[1], &node_c_id); + + // When we delivered the RAA above, we attempted (and failed) to add the HTLC to the channel, + // causing it to be ready to fail-back, which we do here: + let next_hop = + HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: bc_chan_id }; + expect_htlc_forwarding_fails(&nodes[1], &[next_hop]); + check_added_monitors(&nodes[1], 1); + fail_payment_along_path(&[&nodes[0], &nodes[1]]); + let conditions = PaymentFailedConditions::new(); + expect_payment_failed_conditions(&nodes[0], payment_hash_cell, false, conditions); + + nodes[2].node.handle_revoke_and_ack(node_b_id, &bs_raa); + check_added_monitors(&nodes[2], 1); + let cs_cs = get_htlc_update_msgs(&nodes[2], &node_b_id); + + nodes[2].node.handle_commitment_signed_batch_test(node_b_id, &bs_cs); + check_added_monitors(&nodes[2], 1); + let cs_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, node_b_id); + + nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &cs_cs.commitment_signed); + check_added_monitors(&nodes[1], 1); + let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_c_id); + + nodes[1].node.handle_revoke_and_ack(node_c_id, &cs_raa); + check_added_monitors(&nodes[1], 1); + expect_and_process_pending_htlcs(&nodes[1], false); + expect_payment_claimable!(nodes[1], payment_hash_cb, payment_secret_cb, DUST_HTLC_VALUE_MSAT); + + nodes[2].node.handle_revoke_and_ack(node_b_id, &bs_raa); + check_added_monitors(&nodes[2], 1); + + // Now that everything has settled, make sure the channels still work with a simple claim. + claim_payment(&nodes[2], &[&nodes[1]], payment_preimage_cb); +} From 20e8526552442389c0a3c5680ca58b9bda98e100 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Wed, 14 Jan 2026 19:30:07 +0100 Subject: [PATCH 088/242] Rename update_channel_monitor to handle_new_monitor_update_locked_actions_handled_by_caller The previous name didn't capture that this function is unusual and requires the caller to handle post-update actions manually. The new name makes it clear that callers are responsible for handling locked actions themselves, reducing the risk of misuse. Co-Authored-By: Claude Opus 4.5 --- lightning/src/ln/channelmanager.rs | 36 ++++++++++++++++-------------- 1 file changed, 19 insertions(+), 17 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index ce5e88e4a7f..54f2cf458d6 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -4467,7 +4467,7 @@ where log_error!(logger, "Closed channel due to close-required error: {}", msg); if let Some((_, funding_txo, _, update)) = shutdown_res.monitor_update.take() { - self.update_channel_monitor( + self.handle_new_monitor_update_locked_actions_handled_by_caller( in_flight_monitor_updates, chan.context.channel_id(), funding_txo, @@ -9581,7 +9581,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ /// Returns a tuple of `(update_completed, all_updates_completed)`: /// - `update_completed`: whether this specific monitor update finished persisting /// - `all_updates_completed`: whether all in-flight updates for this channel are now complete - fn update_channel_monitor( + fn handle_new_monitor_update_locked_actions_handled_by_caller( &self, in_flight_monitor_updates: &mut BTreeMap)>, channel_id: ChannelId, funding_txo: OutPoint, counterparty_node_id: PublicKey, @@ -9647,13 +9647,14 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ funding_txo: OutPoint, update: ChannelMonitorUpdate, counterparty_node_id: PublicKey, channel_id: ChannelId, ) -> Option> { - let (_update_completed, all_updates_complete) = self.update_channel_monitor( - in_flight_monitor_updates, - channel_id, - funding_txo, - counterparty_node_id, - update, - ); + let (_update_completed, all_updates_complete) = self + .handle_new_monitor_update_locked_actions_handled_by_caller( + in_flight_monitor_updates, + channel_id, + funding_txo, + counterparty_node_id, + update, + ); if all_updates_complete { Some(monitor_update_blocked_actions.remove(&channel_id).unwrap_or(Vec::new())) } else { @@ -9767,13 +9768,14 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let chan_id = chan.context.channel_id(); let counterparty_node_id = chan.context.get_counterparty_node_id(); - let (update_completed, all_updates_complete) = self.update_channel_monitor( - in_flight_monitor_updates, - chan_id, - funding_txo, - counterparty_node_id, - update, - ); + let (update_completed, all_updates_complete) = self + .handle_new_monitor_update_locked_actions_handled_by_caller( + in_flight_monitor_updates, + chan_id, + funding_txo, + counterparty_node_id, + update, + ); let completion_data = if all_updates_complete { Some(self.try_resume_channel_post_monitor_update( @@ -14993,7 +14995,7 @@ where insert_short_channel_id!(short_to_chan_info, funded_channel); if let Some(monitor_update) = monitor_update_opt { - self.update_channel_monitor( + self.handle_new_monitor_update_locked_actions_handled_by_caller( &mut peer_state.in_flight_monitor_updates, funded_channel.context.channel_id(), funding_txo, From bf6017da28dd1687ef01442687aa17e8ec16b314 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 12 Jan 2026 09:29:33 +0100 Subject: [PATCH 089/242] `NetworkGraph`: Update node and channel count estimates for Jan 2026 --- lightning/src/routing/gossip.rs | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/lightning/src/routing/gossip.rs b/lightning/src/routing/gossip.rs index 040a28cddae..ea72d97d7fd 100644 --- a/lightning/src/routing/gossip.rs +++ b/lightning/src/routing/gossip.rs @@ -1772,13 +1772,15 @@ where } } -// In Jan, 2025 there were about 49K channels. -// We over-allocate by a bit because 20% more is better than the double we get if we're slightly -// too low -const CHAN_COUNT_ESTIMATE: usize = 60_000; -// In Jan, 2025 there were about 15K nodes -// We over-allocate by a bit because 33% more is better than the double we get if we're slightly -// too low +/// In Jan, 2026 there were about 54K channels. +/// +/// We over-allocate by a bit because ~15% more is better than the double we get if we're slightly +/// too low. +const CHAN_COUNT_ESTIMATE: usize = 63_000; +/// In Jan, 2026 there were about 17K nodes +/// +/// We over-allocate by a bit because 15% more is better than the double we get if we're slightly +/// too low. const NODE_COUNT_ESTIMATE: usize = 20_000; impl NetworkGraph From 9859bb93c4ca4cf95a6ef14892b1e69295c1dfe3 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 15 Jan 2026 12:50:41 +0100 Subject: [PATCH 090/242] fuzz: document -D flag for faster development builds --- fuzz/README.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/fuzz/README.md b/fuzz/README.md index 4b6e0d12457..cfdab4940bc 100644 --- a/fuzz/README.md +++ b/fuzz/README.md @@ -68,6 +68,19 @@ cargo +nightly fuzz run --features "libfuzzer_fuzz" msg_ping_target Note: If you encounter a `SIGKILL` during run/build check for OOM in kernel logs and consider increasing RAM size for VM. +##### Fast builds for development + +The default build uses LTO and single codegen unit, which is slow. For faster iteration during +development, use the `-D` (dev) flag: + +```shell +cargo +nightly fuzz run --features "libfuzzer_fuzz" -D msg_ping_target +``` + +The `-D` flag builds in development mode with faster compilation (still has optimizations via +`opt-level = 1`). The first build will be slow as it rebuilds the standard library with +sanitizer instrumentation, but subsequent builds will be fast. + If you wish to just generate fuzzing binary executables for `libFuzzer` and not run them: ```shell cargo +nightly fuzz build --features "libfuzzer_fuzz" msg_ping_target From 1f2e903825afbcedc34f9886317ddadcf1d1ceba Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 15 Jan 2026 12:50:46 +0100 Subject: [PATCH 091/242] fuzz: support initial async monitor persistence in chanmon_consistency Read the first byte of fuzz input to determine initial monitor styles for each node (bit 0 = node A, bit 1 = node B, bit 2 = node C). When set, the node starts with InProgress persistence mode from the beginning. This allows fuzzing the async persistence path during initial channel creation, not just after reload. The make_channel macro now completes pending monitor updates after watch_channel calls to allow the channel handshake to proceed. Co-Authored-By: Claude Opus 4.5 --- fuzz/src/chanmon_consistency.rs | 69 +++++++++++++++++++++++++-------- 1 file changed, 53 insertions(+), 16 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index aca232471d6..03d170b1bc0 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -299,8 +299,10 @@ impl chain::Watch for TestChainMonitor { persisted_monitor: ser.0, pending_monitors: Vec::new(), }, - Ok(chain::ChannelMonitorUpdateStatus::InProgress) => { - panic!("The test currently doesn't test initial-persistence via the async pipeline") + Ok(chain::ChannelMonitorUpdateStatus::InProgress) => LatestMonitorState { + persisted_monitor_id: monitor_id, + persisted_monitor: Vec::new(), + pending_monitors: vec![(monitor_id, ser.0)], }, Ok(chain::ChannelMonitorUpdateStatus::UnrecoverableError) => panic!(), Err(()) => panic!(), @@ -706,6 +708,26 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { let broadcast = Arc::new(TestBroadcaster {}); let router = FuzzRouter {}; + // Read initial monitor styles from fuzz input (1 byte: 2 bits per node) + let initial_mon_styles = if !data.is_empty() { data[0] } else { 0 }; + let mon_style = [ + RefCell::new(if initial_mon_styles & 0b01 != 0 { + ChannelMonitorUpdateStatus::InProgress + } else { + ChannelMonitorUpdateStatus::Completed + }), + RefCell::new(if initial_mon_styles & 0b10 != 0 { + ChannelMonitorUpdateStatus::InProgress + } else { + ChannelMonitorUpdateStatus::Completed + }), + RefCell::new(if initial_mon_styles & 0b100 != 0 { + ChannelMonitorUpdateStatus::InProgress + } else { + ChannelMonitorUpdateStatus::Completed + }), + ]; + macro_rules! make_node { ($node_id: expr, $fee_estimator: expr) => {{ let logger: Arc = @@ -725,7 +747,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { logger.clone(), $fee_estimator.clone(), Arc::new(TestPersister { - update_ret: Mutex::new(ChannelMonitorUpdateStatus::Completed), + update_ret: Mutex::new(mon_style[$node_id as usize].borrow().clone()), }), Arc::clone(&keys_manager), )); @@ -762,9 +784,6 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { }}; } - let default_mon_style = RefCell::new(ChannelMonitorUpdateStatus::Completed); - let mon_style = [default_mon_style.clone(), default_mon_style.clone(), default_mon_style]; - let reload_node = |ser: &Vec, node_id: u8, old_monitors: &TestChainMonitor, @@ -860,8 +879,21 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { }; let mut channel_txn = Vec::new(); + macro_rules! complete_all_pending_monitor_updates { + ($monitor: expr) => {{ + for (channel_id, state) in $monitor.latest_monitors.lock().unwrap().iter_mut() { + for (id, data) in state.pending_monitors.drain(..) { + $monitor.chain_monitor.channel_monitor_updated(*channel_id, id).unwrap(); + if id >= state.persisted_monitor_id { + state.persisted_monitor_id = id; + state.persisted_monitor = data; + } + } + } + }}; + } macro_rules! make_channel { - ($source: expr, $dest: expr, $dest_keys_manager: expr, $chan_id: expr) => {{ + ($source: expr, $dest: expr, $source_monitor: expr, $dest_monitor: expr, $dest_keys_manager: expr, $chan_id: expr) => {{ let init_dest = Init { features: $dest.init_features(), networks: None, @@ -965,12 +997,14 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { } }; $dest.handle_funding_created($source.get_our_node_id(), &funding_created); + // Complete any pending monitor updates for dest after watch_channel + complete_all_pending_monitor_updates!($dest_monitor); - let funding_signed = { + let (funding_signed, channel_id) = { let events = $dest.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); if let MessageSendEvent::SendFundingSigned { ref msg, .. } = events[0] { - msg.clone() + (msg.clone(), msg.channel_id.clone()) } else { panic!("Wrong event type"); } @@ -984,19 +1018,22 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { } $source.handle_funding_signed($dest.get_our_node_id(), &funding_signed); + // Complete any pending monitor updates for source after watch_channel + complete_all_pending_monitor_updates!($source_monitor); + let events = $source.get_and_clear_pending_events(); assert_eq!(events.len(), 1); - let channel_id = if let events::Event::ChannelPending { + if let events::Event::ChannelPending { ref counterparty_node_id, - ref channel_id, + channel_id: ref event_channel_id, .. } = events[0] { assert_eq!(counterparty_node_id, &$dest.get_our_node_id()); - channel_id.clone() + assert_eq!(*event_channel_id, channel_id); } else { panic!("Wrong event type"); - }; + } channel_id }}; @@ -1087,8 +1124,8 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { let mut nodes = [node_a, node_b, node_c]; - let chan_1_id = make_channel!(nodes[0], nodes[1], keys_manager_b, 0); - let chan_2_id = make_channel!(nodes[1], nodes[2], keys_manager_c, 1); + let chan_1_id = make_channel!(nodes[0], nodes[1], monitor_a, monitor_b, keys_manager_b, 0); + let chan_2_id = make_channel!(nodes[1], nodes[2], monitor_b, monitor_c, keys_manager_c, 1); for node in nodes.iter() { confirm_txn!(node); @@ -1124,7 +1161,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { }}; } - let mut read_pos = 0; + let mut read_pos = 1; // First byte was consumed for initial mon_style macro_rules! get_slice { ($len: expr) => {{ let slice_len = $len as usize; From 429a55aac07ead77b9643549dc690e9c7c1817ff Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 12 Jan 2026 09:32:56 +0100 Subject: [PATCH 092/242] `NetworkGraph`: Determine pre-allocations using actual numbers when reading When reading a persisted network graph, we previously pre-allocated our default node/channels estimate count for the respective `IndexedMap` capacities. However, this might unnecessarily allocate memory on reading, for example if we have an (almost) empty network graph for one reason or another. As we have the actual counts of persisted nodes and channels available, we here simply opt to allocate these numbers (plus 15%). This will also ensure that our pre-allocations will keep up-to-date over time as the network grows or shrinks. --- lightning/src/routing/gossip.rs | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/lightning/src/routing/gossip.rs b/lightning/src/routing/gossip.rs index ea72d97d7fd..42eab6dc382 100644 --- a/lightning/src/routing/gossip.rs +++ b/lightning/src/routing/gossip.rs @@ -1682,9 +1682,17 @@ where fn read(reader: &mut R, logger: L) -> Result, DecodeError> { let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION); + const MAX_CHAN_COUNT_LIMIT: usize = 100_000_000; + const MAX_NODE_COUNT_LIMIT: usize = 10_000_000; + let chain_hash: ChainHash = Readable::read(reader)?; let channels_count: u64 = Readable::read(reader)?; - let mut channels = IndexedMap::with_capacity(CHAN_COUNT_ESTIMATE); + // Pre-allocate 115% of the known channel count to avoid unnecessary reallocations. + let channels_map_capacity = (channels_count as u128 * 115 / 100) + .try_into() + .map(|v: usize| v.min(MAX_CHAN_COUNT_LIMIT)) + .map_err(|_| DecodeError::InvalidValue)?; + let mut channels = IndexedMap::with_capacity(channels_map_capacity); for _ in 0..channels_count { let chan_id: u64 = Readable::read(reader)?; let chan_info: ChannelInfo = Readable::read(reader)?; @@ -1696,7 +1704,12 @@ where if nodes_count > u32::max_value() as u64 / 2 { return Err(DecodeError::InvalidValue); } - let mut nodes = IndexedMap::with_capacity(NODE_COUNT_ESTIMATE); + // Pre-allocate 115% of the known channel count to avoid unnecessary reallocations. + let nodes_map_capacity: usize = (nodes_count as u128 * 115 / 100) + .try_into() + .map(|v: usize| v.min(MAX_NODE_COUNT_LIMIT)) + .map_err(|_| DecodeError::InvalidValue)?; + let mut nodes = IndexedMap::with_capacity(nodes_map_capacity); for i in 0..nodes_count { let node_id = Readable::read(reader)?; let mut node_info: NodeInfo = Readable::read(reader)?; From 39ca7cb1d6e80b9d2ed297185f67ab65f2e310ee Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 14 Jan 2026 13:46:15 +0100 Subject: [PATCH 093/242] `NetworkGraph`: One pre-allocate memory on mainnet Previously, we'd always pre-allocate memory for the node and channel maps based on mainnet numbers, even if we're on another network like `Regest`. Here, we only apply the estimates if we're actually on `Network::Bitcoin`, which should reduce the `NetworkGraph`'s memory footprint considerably in tests. --- lightning/src/routing/gossip.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/lightning/src/routing/gossip.rs b/lightning/src/routing/gossip.rs index 42eab6dc382..534bebe7618 100644 --- a/lightning/src/routing/gossip.rs +++ b/lightning/src/routing/gossip.rs @@ -1802,12 +1802,18 @@ where { /// Creates a new, empty, network graph. pub fn new(network: Network, logger: L) -> NetworkGraph { + let (node_map_cap, chan_map_cap) = if matches!(network, Network::Bitcoin) { + (NODE_COUNT_ESTIMATE, CHAN_COUNT_ESTIMATE) + } else { + (0, 0) + }; + Self { secp_ctx: Secp256k1::verification_only(), chain_hash: ChainHash::using_genesis_block(network), logger, - channels: RwLock::new(IndexedMap::with_capacity(CHAN_COUNT_ESTIMATE)), - nodes: RwLock::new(IndexedMap::with_capacity(NODE_COUNT_ESTIMATE)), + channels: RwLock::new(IndexedMap::with_capacity(chan_map_cap)), + nodes: RwLock::new(IndexedMap::with_capacity(node_map_cap)), next_node_counter: AtomicUsize::new(0), removed_node_counters: Mutex::new(Vec::new()), last_rapid_gossip_sync_timestamp: Mutex::new(None), From 94078ca6db14db72fad816137ba280a1711967a0 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Tue, 2 Dec 2025 15:02:32 -0600 Subject: [PATCH 094/242] Use struct instead of enum for SpliceContribution When adding support for mixed splice-in and splice-out, the contribution amount will need to be computed based on the splice-in and splice-out values. Rather than add a third variant to SpliceContribution, which could have an invalid contribution amount, use a more general struct that can represent splice-in, splice-out, and mixed. Constructors are provided for the typical splice-in and splice-out case whereas support for the mixed case will be added in an independent change. --- fuzz/src/chanmon_consistency.rs | 68 +++---- .../src/upgrade_downgrade_tests.rs | 10 +- lightning/src/ln/funding.rs | 93 ++++----- lightning/src/ln/splicing_tests.rs | 187 ++++++++---------- 4 files changed, 153 insertions(+), 205 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index aca232471d6..ba3fc9077e8 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -1860,11 +1860,8 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { 0xa0 => { let input = FundingTxInput::new_p2wpkh(coinbase_tx.clone(), 0).unwrap(); - let contribution = SpliceContribution::SpliceIn { - value: Amount::from_sat(10_000), - inputs: vec![input], - change_script: None, - }; + let contribution = + SpliceContribution::splice_in(Amount::from_sat(10_000), vec![input], None); let funding_feerate_sat_per_kw = fee_est_a.ret_val.load(atomic::Ordering::Acquire); if let Err(e) = nodes[0].splice_channel( &chan_a_id, @@ -1882,11 +1879,8 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { }, 0xa1 => { let input = FundingTxInput::new_p2wpkh(coinbase_tx.clone(), 1).unwrap(); - let contribution = SpliceContribution::SpliceIn { - value: Amount::from_sat(10_000), - inputs: vec![input], - change_script: None, - }; + let contribution = + SpliceContribution::splice_in(Amount::from_sat(10_000), vec![input], None); let funding_feerate_sat_per_kw = fee_est_b.ret_val.load(atomic::Ordering::Acquire); if let Err(e) = nodes[1].splice_channel( &chan_a_id, @@ -1904,11 +1898,8 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { }, 0xa2 => { let input = FundingTxInput::new_p2wpkh(coinbase_tx.clone(), 0).unwrap(); - let contribution = SpliceContribution::SpliceIn { - value: Amount::from_sat(10_000), - inputs: vec![input], - change_script: None, - }; + let contribution = + SpliceContribution::splice_in(Amount::from_sat(10_000), vec![input], None); let funding_feerate_sat_per_kw = fee_est_b.ret_val.load(atomic::Ordering::Acquire); if let Err(e) = nodes[1].splice_channel( &chan_b_id, @@ -1926,11 +1917,8 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { }, 0xa3 => { let input = FundingTxInput::new_p2wpkh(coinbase_tx.clone(), 1).unwrap(); - let contribution = SpliceContribution::SpliceIn { - value: Amount::from_sat(10_000), - inputs: vec![input], - change_script: None, - }; + let contribution = + SpliceContribution::splice_in(Amount::from_sat(10_000), vec![input], None); let funding_feerate_sat_per_kw = fee_est_c.ret_val.load(atomic::Ordering::Acquire); if let Err(e) = nodes[2].splice_channel( &chan_b_id, @@ -1958,12 +1946,10 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { .map(|chan| chan.outbound_capacity_msat) .unwrap(); if outbound_capacity_msat >= 20_000_000 { - let contribution = SpliceContribution::SpliceOut { - outputs: vec![TxOut { - value: Amount::from_sat(MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS), - script_pubkey: coinbase_tx.output[0].script_pubkey.clone(), - }], - }; + let contribution = SpliceContribution::splice_out(vec![TxOut { + value: Amount::from_sat(MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS), + script_pubkey: coinbase_tx.output[0].script_pubkey.clone(), + }]); let funding_feerate_sat_per_kw = fee_est_a.ret_val.load(atomic::Ordering::Acquire); if let Err(e) = nodes[0].splice_channel( @@ -1989,12 +1975,10 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { .map(|chan| chan.outbound_capacity_msat) .unwrap(); if outbound_capacity_msat >= 20_000_000 { - let contribution = SpliceContribution::SpliceOut { - outputs: vec![TxOut { - value: Amount::from_sat(MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS), - script_pubkey: coinbase_tx.output[1].script_pubkey.clone(), - }], - }; + let contribution = SpliceContribution::splice_out(vec![TxOut { + value: Amount::from_sat(MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS), + script_pubkey: coinbase_tx.output[1].script_pubkey.clone(), + }]); let funding_feerate_sat_per_kw = fee_est_b.ret_val.load(atomic::Ordering::Acquire); if let Err(e) = nodes[1].splice_channel( @@ -2020,12 +2004,10 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { .map(|chan| chan.outbound_capacity_msat) .unwrap(); if outbound_capacity_msat >= 20_000_000 { - let contribution = SpliceContribution::SpliceOut { - outputs: vec![TxOut { - value: Amount::from_sat(MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS), - script_pubkey: coinbase_tx.output[1].script_pubkey.clone(), - }], - }; + let contribution = SpliceContribution::splice_out(vec![TxOut { + value: Amount::from_sat(MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS), + script_pubkey: coinbase_tx.output[1].script_pubkey.clone(), + }]); let funding_feerate_sat_per_kw = fee_est_b.ret_val.load(atomic::Ordering::Acquire); if let Err(e) = nodes[1].splice_channel( @@ -2051,12 +2033,10 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { .map(|chan| chan.outbound_capacity_msat) .unwrap(); if outbound_capacity_msat >= 20_000_000 { - let contribution = SpliceContribution::SpliceOut { - outputs: vec![TxOut { - value: Amount::from_sat(MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS), - script_pubkey: coinbase_tx.output[2].script_pubkey.clone(), - }], - }; + let contribution = SpliceContribution::splice_out(vec![TxOut { + value: Amount::from_sat(MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS), + script_pubkey: coinbase_tx.output[2].script_pubkey.clone(), + }]); let funding_feerate_sat_per_kw = fee_est_c.ret_val.load(atomic::Ordering::Acquire); if let Err(e) = nodes[2].splice_channel( diff --git a/lightning-tests/src/upgrade_downgrade_tests.rs b/lightning-tests/src/upgrade_downgrade_tests.rs index 19c50e870de..8df670321be 100644 --- a/lightning-tests/src/upgrade_downgrade_tests.rs +++ b/lightning-tests/src/upgrade_downgrade_tests.rs @@ -451,12 +451,10 @@ fn do_test_0_1_htlc_forward_after_splice(fail_htlc: bool) { reconnect_b_c_args.send_announcement_sigs = (true, true); reconnect_nodes(reconnect_b_c_args); - let contribution = SpliceContribution::SpliceOut { - outputs: vec![TxOut { - value: Amount::from_sat(1_000), - script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), - }], - }; + let contribution = SpliceContribution::splice_out(vec![TxOut { + value: Amount::from_sat(1_000), + script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), + }]); let splice_tx = splice_channel(&nodes[0], &nodes[1], ChannelId(chan_id_bytes_a), contribution); for node in nodes.iter() { mine_transaction(node, &splice_tx); diff --git a/lightning/src/ln/funding.rs b/lightning/src/ln/funding.rs index f80b2b6daea..b7f8740f737 100644 --- a/lightning/src/ln/funding.rs +++ b/lightning/src/ln/funding.rs @@ -20,69 +20,62 @@ use crate::sign::{P2TR_KEY_PATH_WITNESS_WEIGHT, P2WPKH_WITNESS_WEIGHT}; /// The components of a splice's funding transaction that are contributed by one party. #[derive(Debug, Clone)] -pub enum SpliceContribution { - /// When funds are added to a channel. - SpliceIn { - /// The amount to contribute to the splice. - value: Amount, - - /// The inputs included in the splice's funding transaction to meet the contributed amount - /// plus fees. Any excess amount will be sent to a change output. - inputs: Vec, - - /// An optional change output script. This will be used if needed or, when not set, - /// generated using [`SignerProvider::get_destination_script`]. - /// - /// [`SignerProvider::get_destination_script`]: crate::sign::SignerProvider::get_destination_script - change_script: Option, - }, - /// When funds are removed from a channel. - SpliceOut { - /// The outputs to include in the splice's funding transaction. The total value of all - /// outputs plus fees will be the amount that is removed. - outputs: Vec, - }, +pub struct SpliceContribution { + /// The amount to contribute to the splice. + value: SignedAmount, + + /// The inputs included in the splice's funding transaction to meet the contributed amount + /// plus fees. Any excess amount will be sent to a change output. + inputs: Vec, + + /// The outputs to include in the splice's funding transaction. The total value of all + /// outputs plus fees will be the amount that is removed. + outputs: Vec, + + /// An optional change output script. This will be used if needed or, when not set, + /// generated using [`SignerProvider::get_destination_script`]. + /// + /// [`SignerProvider::get_destination_script`]: crate::sign::SignerProvider::get_destination_script + change_script: Option, } impl SpliceContribution { + /// Creates a contribution for when funds are only added to a channel. + pub fn splice_in( + value: Amount, inputs: Vec, change_script: Option, + ) -> Self { + let value_added = value.to_signed().unwrap_or(SignedAmount::MAX); + + Self { value: value_added, inputs, outputs: vec![], change_script } + } + + /// Creates a contribution for when funds are only removed from a channel. + pub fn splice_out(outputs: Vec) -> Self { + let value_removed = outputs + .iter() + .map(|txout| txout.value) + .sum::() + .to_signed() + .unwrap_or(SignedAmount::MAX); + + Self { value: -value_removed, inputs: vec![], outputs, change_script: None } + } + pub(super) fn value(&self) -> SignedAmount { - match self { - SpliceContribution::SpliceIn { value, .. } => { - value.to_signed().unwrap_or(SignedAmount::MAX) - }, - SpliceContribution::SpliceOut { outputs } => { - let value_removed = outputs - .iter() - .map(|txout| txout.value) - .sum::() - .to_signed() - .unwrap_or(SignedAmount::MAX); - -value_removed - }, - } + self.value } pub(super) fn inputs(&self) -> &[FundingTxInput] { - match self { - SpliceContribution::SpliceIn { inputs, .. } => &inputs[..], - SpliceContribution::SpliceOut { .. } => &[], - } + &self.inputs[..] } pub(super) fn outputs(&self) -> &[TxOut] { - match self { - SpliceContribution::SpliceIn { .. } => &[], - SpliceContribution::SpliceOut { outputs } => &outputs[..], - } + &self.outputs[..] } pub(super) fn into_tx_parts(self) -> (Vec, Vec, Option) { - match self { - SpliceContribution::SpliceIn { inputs, change_script, .. } => { - (inputs, vec![], change_script) - }, - SpliceContribution::SpliceOut { outputs } => (vec![], outputs, None), - } + let SpliceContribution { value: _, inputs, outputs, change_script } = self; + (inputs, outputs, change_script) } } diff --git a/lightning/src/ln/splicing_tests.rs b/lightning/src/ln/splicing_tests.rs index a05c0bd92d8..5ffdafd1813 100644 --- a/lightning/src/ln/splicing_tests.rs +++ b/lightning/src/ln/splicing_tests.rs @@ -47,11 +47,7 @@ fn test_splicing_not_supported_api_error() { let (_, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1); - let bs_contribution = SpliceContribution::SpliceIn { - value: Amount::ZERO, - inputs: Vec::new(), - change_script: None, - }; + let bs_contribution = SpliceContribution::splice_in(Amount::ZERO, Vec::new(), None); let res = nodes[1].node.splice_channel( &channel_id, @@ -113,11 +109,8 @@ fn test_v1_splice_in_negative_insufficient_inputs() { let funding_inputs = create_dual_funding_utxos_with_prev_txs(&nodes[0], &[extra_splice_funding_input_sats]); - let contribution = SpliceContribution::SpliceIn { - value: Amount::from_sat(splice_in_sats), - inputs: funding_inputs, - change_script: None, - }; + let contribution = + SpliceContribution::splice_in(Amount::from_sat(splice_in_sats), funding_inputs, None); // Initiate splice-in, with insufficient input contribution let res = nodes[0].node.splice_channel( @@ -490,12 +483,10 @@ fn do_test_splice_state_reset_on_disconnect(reload: bool) { let (_, _, channel_id, _) = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 50_000_000); - let contribution = SpliceContribution::SpliceOut { - outputs: vec![TxOut { - value: Amount::from_sat(1_000), - script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), - }], - }; + let contribution = SpliceContribution::splice_out(vec![TxOut { + value: Amount::from_sat(1_000), + script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), + }]); nodes[0] .node .splice_channel( @@ -748,12 +739,10 @@ fn test_config_reject_inbound_splices() { let (_, _, channel_id, _) = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 50_000_000); - let contribution = SpliceContribution::SpliceOut { - outputs: vec![TxOut { - value: Amount::from_sat(1_000), - script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), - }], - }; + let contribution = SpliceContribution::splice_out(vec![TxOut { + value: Amount::from_sat(1_000), + script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), + }]); nodes[0] .node .splice_channel( @@ -811,14 +800,14 @@ fn test_splice_in() { let coinbase_tx1 = provide_anchor_reserves(&nodes); let coinbase_tx2 = provide_anchor_reserves(&nodes); - let initiator_contribution = SpliceContribution::SpliceIn { - value: Amount::from_sat(initial_channel_value_sat * 2), - inputs: vec![ + let initiator_contribution = SpliceContribution::splice_in( + Amount::from_sat(initial_channel_value_sat * 2), + vec![ FundingTxInput::new_p2wpkh(coinbase_tx1, 0).unwrap(), FundingTxInput::new_p2wpkh(coinbase_tx2, 0).unwrap(), ], - change_script: Some(nodes[0].wallet_source.get_change_script().unwrap()), - }; + Some(nodes[0].wallet_source.get_change_script().unwrap()), + ); let splice_tx = splice_channel(&nodes[0], &nodes[1], channel_id, initiator_contribution); mine_transaction(&nodes[0], &splice_tx); @@ -850,18 +839,16 @@ fn test_splice_out() { let _ = send_payment(&nodes[0], &[&nodes[1]], 100_000); - let initiator_contribution = SpliceContribution::SpliceOut { - outputs: vec![ - TxOut { - value: Amount::from_sat(initial_channel_value_sat / 4), - script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), - }, - TxOut { - value: Amount::from_sat(initial_channel_value_sat / 4), - script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), - }, - ], - }; + let initiator_contribution = SpliceContribution::splice_out(vec![ + TxOut { + value: Amount::from_sat(initial_channel_value_sat / 4), + script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), + }, + TxOut { + value: Amount::from_sat(initial_channel_value_sat / 4), + script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), + }, + ]); let splice_tx = splice_channel(&nodes[0], &nodes[1], channel_id, initiator_contribution); mine_transaction(&nodes[0], &splice_tx); @@ -919,11 +906,11 @@ fn do_test_splice_commitment_broadcast(splice_status: SpliceStatus, claim_htlcs: let payment_amount = 1_000_000; let (preimage1, payment_hash1, ..) = route_payment(&nodes[0], &[&nodes[1]], payment_amount); let splice_in_amount = initial_channel_capacity / 2; - let initiator_contribution = SpliceContribution::SpliceIn { - value: Amount::from_sat(splice_in_amount), - inputs: vec![FundingTxInput::new_p2wpkh(coinbase_tx.clone(), 0).unwrap()], - change_script: Some(nodes[0].wallet_source.get_change_script().unwrap()), - }; + let initiator_contribution = SpliceContribution::splice_in( + Amount::from_sat(splice_in_amount), + vec![FundingTxInput::new_p2wpkh(coinbase_tx.clone(), 0).unwrap()], + Some(nodes[0].wallet_source.get_change_script().unwrap()), + ); let splice_tx = splice_channel(&nodes[0], &nodes[1], channel_id, initiator_contribution); let (preimage2, payment_hash2, ..) = route_payment(&nodes[0], &[&nodes[1]], payment_amount); let htlc_expiry = nodes[0].best_block_info().1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS; @@ -1117,18 +1104,16 @@ fn do_test_splice_reestablish(reload: bool, async_monitor_update: bool) { route_payment(&nodes[0], &[&nodes[1]], 1_000_000); // Negotiate the splice up until the nodes exchange `tx_complete`. - let initiator_contribution = SpliceContribution::SpliceOut { - outputs: vec![ - TxOut { - value: Amount::from_sat(initial_channel_value_sat / 4), - script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), - }, - TxOut { - value: Amount::from_sat(initial_channel_value_sat / 4), - script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), - }, - ], - }; + let initiator_contribution = SpliceContribution::splice_out(vec![ + TxOut { + value: Amount::from_sat(initial_channel_value_sat / 4), + script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), + }, + TxOut { + value: Amount::from_sat(initial_channel_value_sat / 4), + script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), + }, + ]); let initial_commit_sig_for_acceptor = negotiate_splice_tx(&nodes[0], &nodes[1], channel_id, initiator_contribution); assert_eq!(initial_commit_sig_for_acceptor.htlc_signatures.len(), 1); @@ -1405,12 +1390,10 @@ fn do_test_propose_splice_while_disconnected(reload: bool, use_0conf: bool) { nodes[1].node.peer_disconnected(node_id_0); let splice_out_sat = initial_channel_value_sat / 4; - let node_0_contribution = SpliceContribution::SpliceOut { - outputs: vec![TxOut { - value: Amount::from_sat(splice_out_sat), - script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), - }], - }; + let node_0_contribution = SpliceContribution::splice_out(vec![TxOut { + value: Amount::from_sat(splice_out_sat), + script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), + }]); nodes[0] .node .splice_channel( @@ -1423,12 +1406,10 @@ fn do_test_propose_splice_while_disconnected(reload: bool, use_0conf: bool) { .unwrap(); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - let node_1_contribution = SpliceContribution::SpliceOut { - outputs: vec![TxOut { - value: Amount::from_sat(splice_out_sat), - script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), - }], - }; + let node_1_contribution = SpliceContribution::splice_out(vec![TxOut { + value: Amount::from_sat(splice_out_sat), + script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), + }]); nodes[1] .node .splice_channel( @@ -1681,11 +1662,11 @@ fn disconnect_on_unexpected_interactive_tx_message() { let coinbase_tx = provide_anchor_reserves(&nodes); let splice_in_amount = initial_channel_capacity / 2; - let contribution = SpliceContribution::SpliceIn { - value: Amount::from_sat(splice_in_amount), - inputs: vec![FundingTxInput::new_p2wpkh(coinbase_tx, 0).unwrap()], - change_script: Some(nodes[0].wallet_source.get_change_script().unwrap()), - }; + let contribution = SpliceContribution::splice_in( + Amount::from_sat(splice_in_amount), + vec![FundingTxInput::new_p2wpkh(coinbase_tx, 0).unwrap()], + Some(nodes[0].wallet_source.get_change_script().unwrap()), + ); // Complete interactive-tx construction, but fail by having the acceptor send a duplicate // tx_complete instead of commitment_signed. @@ -1721,11 +1702,11 @@ fn fail_splice_on_interactive_tx_error() { let coinbase_tx = provide_anchor_reserves(&nodes); let splice_in_amount = initial_channel_capacity / 2; - let contribution = SpliceContribution::SpliceIn { - value: Amount::from_sat(splice_in_amount), - inputs: vec![FundingTxInput::new_p2wpkh(coinbase_tx, 0).unwrap()], - change_script: Some(nodes[0].wallet_source.get_change_script().unwrap()), - }; + let contribution = SpliceContribution::splice_in( + Amount::from_sat(splice_in_amount), + vec![FundingTxInput::new_p2wpkh(coinbase_tx, 0).unwrap()], + Some(nodes[0].wallet_source.get_change_script().unwrap()), + ); // Fail during interactive-tx construction by having the acceptor echo back tx_add_input instead // of sending tx_complete. The failure occurs because the serial id will have the wrong parity. @@ -1827,11 +1808,11 @@ fn fail_splice_on_tx_abort() { let coinbase_tx = provide_anchor_reserves(&nodes); let splice_in_amount = initial_channel_capacity / 2; - let contribution = SpliceContribution::SpliceIn { - value: Amount::from_sat(splice_in_amount), - inputs: vec![FundingTxInput::new_p2wpkh(coinbase_tx, 0).unwrap()], - change_script: Some(nodes[0].wallet_source.get_change_script().unwrap()), - }; + let contribution = SpliceContribution::splice_in( + Amount::from_sat(splice_in_amount), + vec![FundingTxInput::new_p2wpkh(coinbase_tx, 0).unwrap()], + Some(nodes[0].wallet_source.get_change_script().unwrap()), + ); // Fail during interactive-tx construction by having the acceptor send tx_abort instead of // tx_complete. @@ -1881,11 +1862,11 @@ fn fail_splice_on_channel_close() { let coinbase_tx = provide_anchor_reserves(&nodes); let splice_in_amount = initial_channel_capacity / 2; - let contribution = SpliceContribution::SpliceIn { - value: Amount::from_sat(splice_in_amount), - inputs: vec![FundingTxInput::new_p2wpkh(coinbase_tx, 0).unwrap()], - change_script: Some(nodes[0].wallet_source.get_change_script().unwrap()), - }; + let contribution = SpliceContribution::splice_in( + Amount::from_sat(splice_in_amount), + vec![FundingTxInput::new_p2wpkh(coinbase_tx, 0).unwrap()], + Some(nodes[0].wallet_source.get_change_script().unwrap()), + ); // Close the channel before completion of interactive-tx construction. let _ = complete_splice_handshake(initiator, acceptor, channel_id, contribution.clone()); @@ -1932,11 +1913,11 @@ fn fail_quiescent_action_on_channel_close() { let coinbase_tx = provide_anchor_reserves(&nodes); let splice_in_amount = initial_channel_capacity / 2; - let contribution = SpliceContribution::SpliceIn { - value: Amount::from_sat(splice_in_amount), - inputs: vec![FundingTxInput::new_p2wpkh(coinbase_tx, 0).unwrap()], - change_script: Some(nodes[0].wallet_source.get_change_script().unwrap()), - }; + let contribution = SpliceContribution::splice_in( + Amount::from_sat(splice_in_amount), + vec![FundingTxInput::new_p2wpkh(coinbase_tx, 0).unwrap()], + Some(nodes[0].wallet_source.get_change_script().unwrap()), + ); // Close the channel before completion of STFU handshake. initiator @@ -2025,23 +2006,19 @@ fn do_test_splice_with_inflight_htlc_forward_and_resolution(expire_scid_pre_forw // Splice both channels, lock them, and connect enough blocks to trigger the legacy SCID pruning // logic while the HTLC is still pending. - let contribution = SpliceContribution::SpliceOut { - outputs: vec![TxOut { - value: Amount::from_sat(1_000), - script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), - }], - }; + let contribution = SpliceContribution::splice_out(vec![TxOut { + value: Amount::from_sat(1_000), + script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), + }]); let splice_tx_0_1 = splice_channel(&nodes[0], &nodes[1], channel_id_0_1, contribution); for node in &nodes { mine_transaction(node, &splice_tx_0_1); } - let contribution = SpliceContribution::SpliceOut { - outputs: vec![TxOut { - value: Amount::from_sat(1_000), - script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), - }], - }; + let contribution = SpliceContribution::splice_out(vec![TxOut { + value: Amount::from_sat(1_000), + script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), + }]); let splice_tx_1_2 = splice_channel(&nodes[1], &nodes[2], channel_id_1_2, contribution); for node in &nodes { mine_transaction(node, &splice_tx_1_2); From 76e73a4f676b781a71dd9b3a4355171a154ac06b Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Thu, 4 Dec 2025 14:52:10 -0600 Subject: [PATCH 095/242] Use Amount in calculate_change_output_value --- lightning/src/ln/channel.rs | 6 ++--- lightning/src/ln/interactivetxs.rs | 40 ++++++++++++++++++------------ 2 files changed, 27 insertions(+), 19 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 659735cc0a2..a1c48b6cc21 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -6707,12 +6707,12 @@ impl FundingNegotiationContext { }, } }; - let mut change_output = - TxOut { value: Amount::from_sat(change_value), script_pubkey: change_script }; + let mut change_output = TxOut { value: change_value, script_pubkey: change_script }; let change_output_weight = get_output_weight(&change_output.script_pubkey).to_wu(); let change_output_fee = fee_for_weight(self.funding_feerate_sat_per_1000_weight, change_output_weight); - let change_value_decreased_with_fee = change_value.saturating_sub(change_output_fee); + let change_value_decreased_with_fee = + change_value.to_sat().saturating_sub(change_output_fee); // Check dust limit again if change_value_decreased_with_fee > context.holder_dust_limit_satoshis { change_output.value = Amount::from_sat(change_value_decreased_with_fee); diff --git a/lightning/src/ln/interactivetxs.rs b/lightning/src/ln/interactivetxs.rs index 4340aad420a..1ab9c6c68ee 100644 --- a/lightning/src/ln/interactivetxs.rs +++ b/lightning/src/ln/interactivetxs.rs @@ -2337,22 +2337,23 @@ impl InteractiveTxConstructor { pub(super) fn calculate_change_output_value( context: &FundingNegotiationContext, is_splice: bool, shared_output_funding_script: &ScriptBuf, change_output_dust_limit: u64, -) -> Result, AbortReason> { +) -> Result, AbortReason> { assert!(context.our_funding_contribution > SignedAmount::ZERO); - let our_funding_contribution_satoshis = context.our_funding_contribution.to_sat() as u64; + let our_funding_contribution = context.our_funding_contribution.to_unsigned().unwrap(); - let mut total_input_satoshis = 0u64; + let mut total_input_value = Amount::ZERO; let mut our_funding_inputs_weight = 0u64; for FundingTxInput { utxo, .. } in context.our_funding_inputs.iter() { - total_input_satoshis = total_input_satoshis.saturating_add(utxo.output.value.to_sat()); + total_input_value = total_input_value.checked_add(utxo.output.value).unwrap_or(Amount::MAX); let weight = BASE_INPUT_WEIGHT + utxo.satisfaction_weight; our_funding_inputs_weight = our_funding_inputs_weight.saturating_add(weight); } let funding_outputs = &context.our_funding_outputs; - let total_output_satoshis = - funding_outputs.iter().fold(0u64, |total, out| total.saturating_add(out.value.to_sat())); + let total_output_value = funding_outputs + .iter() + .fold(Amount::ZERO, |total, out| total.checked_add(out.value).unwrap_or(Amount::MAX)); let our_funding_outputs_weight = funding_outputs.iter().fold(0u64, |weight, out| { weight.saturating_add(get_output_weight(&out.script_pubkey).to_wu()) }); @@ -2376,15 +2377,22 @@ pub(super) fn calculate_change_output_value( } } - let fees_sats = fee_for_weight(context.funding_feerate_sat_per_1000_weight, weight); - let net_total_less_fees = - total_input_satoshis.saturating_sub(total_output_satoshis).saturating_sub(fees_sats); - if net_total_less_fees < our_funding_contribution_satoshis { + let contributed_fees = + Amount::from_sat(fee_for_weight(context.funding_feerate_sat_per_1000_weight, weight)); + let net_total_less_fees = total_input_value + .checked_sub(total_output_value) + .unwrap_or(Amount::ZERO) + .checked_sub(contributed_fees) + .unwrap_or(Amount::ZERO); + if net_total_less_fees < our_funding_contribution { // Not enough to cover contribution plus fees return Err(AbortReason::InsufficientFees); } - let remaining_value = net_total_less_fees.saturating_sub(our_funding_contribution_satoshis); - if remaining_value < change_output_dust_limit { + + let remaining_value = net_total_less_fees + .checked_sub(our_funding_contribution) + .expect("remaining_value should not be negative"); + if remaining_value.to_sat() < change_output_dust_limit { // Enough to cover contribution plus fees, but leftover is below dust limit; no change Ok(None) } else { @@ -3440,14 +3448,14 @@ mod tests { total_inputs - total_outputs - context.our_funding_contribution.to_unsigned().unwrap(); assert_eq!( calculate_change_output_value(&context, false, &ScriptBuf::new(), 300), - Ok(Some((gross_change - fees - common_fees).to_sat())), + Ok(Some(gross_change - fees - common_fees)), ); // There is leftover for change, without common fees let context = FundingNegotiationContext { is_initiator: false, ..context }; assert_eq!( calculate_change_output_value(&context, false, &ScriptBuf::new(), 300), - Ok(Some((gross_change - fees).to_sat())), + Ok(Some(gross_change - fees)), ); // Insufficient inputs, no leftover @@ -3482,7 +3490,7 @@ mod tests { total_inputs - total_outputs - context.our_funding_contribution.to_unsigned().unwrap(); assert_eq!( calculate_change_output_value(&context, false, &ScriptBuf::new(), 100), - Ok(Some((gross_change - fees).to_sat())), + Ok(Some(gross_change - fees)), ); // Larger fee, smaller change @@ -3496,7 +3504,7 @@ mod tests { total_inputs - total_outputs - context.our_funding_contribution.to_unsigned().unwrap(); assert_eq!( calculate_change_output_value(&context, false, &ScriptBuf::new(), 300), - Ok(Some((gross_change - fees * 3 - common_fees * 3).to_sat())), + Ok(Some(gross_change - fees * 3 - common_fees * 3)), ); } From e58cfbcdd100575c0f998069f01b072d39accb5c Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Thu, 4 Dec 2025 17:18:33 -0600 Subject: [PATCH 096/242] Check change value in test_splice_in --- lightning/src/ln/splicing_tests.rs | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/lightning/src/ln/splicing_tests.rs b/lightning/src/ln/splicing_tests.rs index 5ffdafd1813..58a81bb2a36 100644 --- a/lightning/src/ln/splicing_tests.rs +++ b/lightning/src/ln/splicing_tests.rs @@ -29,8 +29,9 @@ use crate::util::errors::APIError; use crate::util::ser::Writeable; use crate::util::test_channel_signer::SignerOp; +use bitcoin::hashes::Hash; use bitcoin::secp256k1::PublicKey; -use bitcoin::{Amount, OutPoint as BitcoinOutPoint, ScriptBuf, Transaction, TxOut}; +use bitcoin::{Amount, OutPoint as BitcoinOutPoint, ScriptBuf, Transaction, TxOut, WPubkeyHash}; #[test] fn test_splicing_not_supported_api_error() { @@ -800,16 +801,27 @@ fn test_splice_in() { let coinbase_tx1 = provide_anchor_reserves(&nodes); let coinbase_tx2 = provide_anchor_reserves(&nodes); + + let added_value = Amount::from_sat(initial_channel_value_sat * 2); + let change_script = ScriptBuf::new_p2wpkh(&WPubkeyHash::all_zeros()); + let fees = Amount::from_sat(321); + let initiator_contribution = SpliceContribution::splice_in( - Amount::from_sat(initial_channel_value_sat * 2), + added_value, vec![ FundingTxInput::new_p2wpkh(coinbase_tx1, 0).unwrap(), FundingTxInput::new_p2wpkh(coinbase_tx2, 0).unwrap(), ], - Some(nodes[0].wallet_source.get_change_script().unwrap()), + Some(change_script.clone()), ); let splice_tx = splice_channel(&nodes[0], &nodes[1], channel_id, initiator_contribution); + let expected_change = Amount::ONE_BTC * 2 - added_value - fees; + assert_eq!( + splice_tx.output.iter().find(|txout| txout.script_pubkey == change_script).unwrap().value, + expected_change, + ); + mine_transaction(&nodes[0], &splice_tx); mine_transaction(&nodes[1], &splice_tx); From 900ffda4792121d8dcceaf64f3fc722d8506d5f2 Mon Sep 17 00:00:00 2001 From: shaavan Date: Thu, 9 Oct 2025 23:00:40 +0530 Subject: [PATCH 097/242] Introduce Dummy BlindedPaymentTlv Dummy BlindedPaymentTlvs is an empty TLV inserted immediately before the actual ReceiveTlvs in a blinded path. Receivers treat these dummy hops as real hops, which prevents timing-based attacks. Allowing arbitrary dummy hops before the final ReceiveTlvs obscures the recipient's true position in the route and makes it harder for an onlooker to infer the destination, strengthening recipient privacy. --- lightning/src/blinded_path/payment.rs | 103 ++++++++++++++++++++------ lightning/src/ln/channelmanager.rs | 14 ++++ lightning/src/ln/msgs.rs | 27 ++++++- lightning/src/ln/onion_payment.rs | 16 ++++ lightning/src/ln/onion_utils.rs | 12 +++ 5 files changed, 149 insertions(+), 23 deletions(-) diff --git a/lightning/src/blinded_path/payment.rs b/lightning/src/blinded_path/payment.rs index 13ade222f5b..549eb38c38f 100644 --- a/lightning/src/blinded_path/payment.rs +++ b/lightning/src/blinded_path/payment.rs @@ -328,6 +328,37 @@ pub struct TrampolineForwardTlvs { pub next_blinding_override: Option, } +/// TLVs carried by a dummy hop within a blinded payment path. +/// +/// Dummy hops do not correspond to real forwarding decisions, but are processed +/// identically to real hops at the protocol level. The TLVs contained here define +/// the relay requirements and constraints that must be satisfied for the payment +/// to continue through this hop. +/// +/// By enforcing realistic relay semantics on dummy hops, the payment path remains +/// indistinguishable from a fully real route with respect to fees, CLTV deltas, and +/// validation behavior. +#[derive(Clone, Copy)] +pub struct DummyTlvs { + /// Relay requirements (fees and CLTV delta) that must be satisfied when + /// processing this dummy hop. + pub payment_relay: PaymentRelay, + /// Constraints that apply to the payment when relaying over this dummy hop. + pub payment_constraints: PaymentConstraints, +} + +impl Default for DummyTlvs { + fn default() -> Self { + let payment_relay = + PaymentRelay { cltv_expiry_delta: 0, fee_proportional_millionths: 0, fee_base_msat: 0 }; + + let payment_constraints = + PaymentConstraints { max_cltv_expiry: u32::MAX, htlc_minimum_msat: 0 }; + + Self { payment_relay, payment_constraints } + } +} + /// Data to construct a [`BlindedHop`] for receiving a payment. This payload is custom to LDK and /// may not be valid if received by another lightning implementation. #[derive(Clone, Debug)] @@ -346,6 +377,8 @@ pub struct ReceiveTlvs { pub(crate) enum BlindedPaymentTlvs { /// This blinded payment data is for a forwarding node. Forward(ForwardTlvs), + /// This blinded payment data is dummy and is to be peeled by receiving node. + Dummy(DummyTlvs), /// This blinded payment data is for the receiving node. Receive(ReceiveTlvs), } @@ -363,13 +396,14 @@ pub(crate) enum BlindedTrampolineTlvs { // Used to include forward and receive TLVs in the same iterator for encoding. enum BlindedPaymentTlvsRef<'a> { Forward(&'a ForwardTlvs), + Dummy(&'a DummyTlvs), Receive(&'a ReceiveTlvs), } /// Parameters for relaying over a given [`BlindedHop`]. /// /// [`BlindedHop`]: crate::blinded_path::BlindedHop -#[derive(Clone, Debug, PartialEq)] +#[derive(Clone, Copy, Debug, PartialEq)] pub struct PaymentRelay { /// Number of blocks subtracted from an incoming HTLC's `cltv_expiry` for this [`BlindedHop`]. pub cltv_expiry_delta: u16, @@ -383,7 +417,7 @@ pub struct PaymentRelay { /// Constraints for relaying over a given [`BlindedHop`]. /// /// [`BlindedHop`]: crate::blinded_path::BlindedHop -#[derive(Clone, Debug, PartialEq)] +#[derive(Clone, Copy, Debug, PartialEq)] pub struct PaymentConstraints { /// The maximum total CLTV that is acceptable when relaying a payment over this [`BlindedHop`]. pub max_cltv_expiry: u32, @@ -512,6 +546,17 @@ impl Writeable for TrampolineForwardTlvs { } } +impl Writeable for DummyTlvs { + fn write(&self, w: &mut W) -> Result<(), io::Error> { + encode_tlv_stream!(w, { + (10, self.payment_relay, required), + (12, self.payment_constraints, required), + (65539, (), required), + }); + Ok(()) + } +} + // Note: The `authentication` TLV field was removed in LDK v0.3 following // the introduction of `ReceiveAuthKey`-based authentication for inbound // `BlindedPaymentPaths`s. Because we do not support receiving to those @@ -532,6 +577,7 @@ impl<'a> Writeable for BlindedPaymentTlvsRef<'a> { fn write(&self, w: &mut W) -> Result<(), io::Error> { match self { Self::Forward(tlvs) => tlvs.write(w)?, + Self::Dummy(tlvs) => tlvs.write(w)?, Self::Receive(tlvs) => tlvs.write(w)?, } Ok(()) @@ -552,28 +598,41 @@ impl Readable for BlindedPaymentTlvs { (14, features, (option, encoding: (BlindedHopFeatures, WithoutLength))), (65536, payment_secret, option), (65537, payment_context, option), + (65539, is_dummy, option) }); - if let Some(short_channel_id) = scid { - if payment_secret.is_some() { - return Err(DecodeError::InvalidValue); - } - Ok(BlindedPaymentTlvs::Forward(ForwardTlvs { - short_channel_id, - payment_relay: payment_relay.ok_or(DecodeError::InvalidValue)?, - payment_constraints: payment_constraints.0.unwrap(), - next_blinding_override, - features: features.unwrap_or_else(BlindedHopFeatures::empty), - })) - } else { - if payment_relay.is_some() || features.is_some() { - return Err(DecodeError::InvalidValue); - } - Ok(BlindedPaymentTlvs::Receive(ReceiveTlvs { - payment_secret: payment_secret.ok_or(DecodeError::InvalidValue)?, - payment_constraints: payment_constraints.0.unwrap(), - payment_context: payment_context.ok_or(DecodeError::InvalidValue)?, - })) + match ( + scid, + next_blinding_override, + payment_relay, + features, + payment_secret, + payment_context, + is_dummy, + ) { + (Some(short_channel_id), next_override, Some(relay), features, None, None, None) => { + Ok(BlindedPaymentTlvs::Forward(ForwardTlvs { + short_channel_id, + payment_relay: relay, + payment_constraints: payment_constraints.0.unwrap(), + next_blinding_override: next_override, + features: features.unwrap_or_else(BlindedHopFeatures::empty), + })) + }, + (None, None, None, None, Some(secret), Some(context), None) => { + Ok(BlindedPaymentTlvs::Receive(ReceiveTlvs { + payment_secret: secret, + payment_constraints: payment_constraints.0.unwrap(), + payment_context: context, + })) + }, + (None, None, Some(relay), None, None, None, Some(())) => { + Ok(BlindedPaymentTlvs::Dummy(DummyTlvs { + payment_relay: relay, + payment_constraints: payment_constraints.0.unwrap(), + })) + }, + _ => return Err(DecodeError::InvalidValue), } } } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 72585d69f80..aef57a66612 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -5105,6 +5105,20 @@ where onion_utils::Hop::Forward { .. } | onion_utils::Hop::BlindedForward { .. } => { create_fwd_pending_htlc_info(msg, decoded_hop, shared_secret, next_packet_pubkey_opt) }, + onion_utils::Hop::Dummy { .. } => { + debug_assert!( + false, + "Reached unreachable dummy-hop HTLC. Dummy hops are peeled in \ + `process_pending_update_add_htlcs`, and the resulting HTLC is \ + re-enqueued for processing. Hitting this means the peel-and-requeue \ + step was missed." + ); + return Err(InboundHTLCErr { + msg: "Failed to decode update add htlc onion", + reason: LocalHTLCFailureReason::InvalidOnionPayload, + err_data: Vec::new(), + }) + }, onion_utils::Hop::TrampolineForward { .. } | onion_utils::Hop::TrampolineBlindedForward { .. } => { create_fwd_pending_htlc_info(msg, decoded_hop, shared_secret, next_packet_pubkey_opt) }, diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index 8e230fab1d9..1a7d52ebca8 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -32,7 +32,7 @@ use bitcoin::secp256k1::PublicKey; use bitcoin::{secp256k1, Transaction, Witness}; use crate::blinded_path::message::BlindedMessagePath; -use crate::blinded_path::payment::{BlindedPaymentTlvs, ForwardTlvs, ReceiveTlvs}; +use crate::blinded_path::payment::{BlindedPaymentTlvs, DummyTlvs, ForwardTlvs, ReceiveTlvs}; use crate::blinded_path::payment::{BlindedTrampolineTlvs, TrampolineForwardTlvs}; use crate::ln::onion_utils; use crate::ln::types::ChannelId; @@ -2336,6 +2336,11 @@ mod fuzzy_internal_msgs { pub intro_node_blinding_point: Option, pub next_blinding_override: Option, } + pub struct InboundOnionDummyPayload { + pub payment_relay: PaymentRelay, + pub payment_constraints: PaymentConstraints, + pub intro_node_blinding_point: Option, + } pub struct InboundOnionBlindedReceivePayload { pub sender_intended_htlc_amt_msat: u64, pub total_msat: u64, @@ -2355,6 +2360,7 @@ mod fuzzy_internal_msgs { Receive(InboundOnionReceivePayload), BlindedForward(InboundOnionBlindedForwardPayload), BlindedReceive(InboundOnionBlindedReceivePayload), + Dummy(InboundOnionDummyPayload), } pub struct InboundTrampolineForwardPayload { @@ -3694,6 +3700,25 @@ where next_blinding_override, })) }, + ChaChaDualPolyReadAdapter { + readable: + BlindedPaymentTlvs::Dummy(DummyTlvs { payment_relay, payment_constraints }), + used_aad, + } => { + if amt.is_some() + || cltv_value.is_some() || total_msat.is_some() + || keysend_preimage.is_some() + || invoice_request.is_some() + || !used_aad + { + return Err(DecodeError::InvalidValue); + } + Ok(Self::Dummy(InboundOnionDummyPayload { + payment_relay, + payment_constraints, + intro_node_blinding_point, + })) + }, ChaChaDualPolyReadAdapter { readable: BlindedPaymentTlvs::Receive(receive_tlvs), used_aad, diff --git a/lightning/src/ln/onion_payment.rs b/lightning/src/ln/onion_payment.rs index 1abe4330a25..c1d07f70486 100644 --- a/lightning/src/ln/onion_payment.rs +++ b/lightning/src/ln/onion_payment.rs @@ -149,6 +149,14 @@ pub(super) fn create_fwd_pending_htlc_info( (RoutingInfo::Direct { short_channel_id, new_packet_bytes, next_hop_hmac }, amt_to_forward, outgoing_cltv_value, intro_node_blinding_point, next_blinding_override) }, + onion_utils::Hop::Dummy { .. } => { + debug_assert!(false, "Dummy hop should have been peeled earlier"); + return Err(InboundHTLCErr { + msg: "Dummy Hop OnionHopData provided for us as an intermediary node", + reason: LocalHTLCFailureReason::InvalidOnionPayload, + err_data: Vec::new(), + }) + }, onion_utils::Hop::Receive { .. } | onion_utils::Hop::BlindedReceive { .. } => return Err(InboundHTLCErr { msg: "Final Node OnionHopData provided for us as an intermediary node", @@ -364,6 +372,14 @@ pub(super) fn create_recv_pending_htlc_info( msg: "Got blinded non final data with an HMAC of 0", }) }, + onion_utils::Hop::Dummy { .. } => { + debug_assert!(false, "Dummy hop should have been peeled earlier"); + return Err(InboundHTLCErr { + reason: LocalHTLCFailureReason::InvalidOnionBlinding, + err_data: vec![0; 32], + msg: "Got blinded non final data with an HMAC of 0", + }) + } onion_utils::Hop::TrampolineForward { .. } | onion_utils::Hop::TrampolineBlindedForward { .. } => { return Err(InboundHTLCErr { reason: LocalHTLCFailureReason::InvalidOnionPayload, diff --git a/lightning/src/ln/onion_utils.rs b/lightning/src/ln/onion_utils.rs index 18aa43e27c6..7e879542eef 100644 --- a/lightning/src/ln/onion_utils.rs +++ b/lightning/src/ln/onion_utils.rs @@ -2223,6 +2223,17 @@ pub(crate) enum Hop { /// Bytes of the onion packet we're forwarding. new_packet_bytes: [u8; ONION_DATA_LEN], }, + /// This onion payload is dummy, and needs to be peeled by us. + Dummy { + /// Blinding point for introduction-node dummy hops. + dummy_hop_data: msgs::InboundOnionDummyPayload, + /// Shared secret for decrypting the next-hop public key. + shared_secret: SharedSecret, + /// HMAC of the next hop's onion packet. + next_hop_hmac: [u8; 32], + /// Onion packet bytes after this dummy layer is peeled. + new_packet_bytes: [u8; ONION_DATA_LEN], + }, /// This onion payload was for us, not for forwarding to a next-hop. Contains information for /// verifying the incoming payment. Receive { @@ -2277,6 +2288,7 @@ impl Hop { match self { Hop::Forward { shared_secret, .. } => shared_secret, Hop::BlindedForward { shared_secret, .. } => shared_secret, + Hop::Dummy { shared_secret, .. } => shared_secret, Hop::TrampolineForward { outer_shared_secret, .. } => outer_shared_secret, Hop::TrampolineBlindedForward { outer_shared_secret, .. } => outer_shared_secret, Hop::Receive { shared_secret, .. } => shared_secret, From cad88af1784cf63f7da04bed3860c81ad2222293 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sat, 17 Jan 2026 02:01:33 +0000 Subject: [PATCH 098/242] Free holding cells immediately rather than in message sending I noted in review for an unrelated PR that adding more per-channel logic in `ChannelManager::get_and_clear_pending_msg_events` really sucks for our performance, especially if it ends up hitting a sync monitor persistence. This made me wonder how far we actually are from not needing the holding `check_free_holding_cells` call that's currently there. Turns out, at least according to our functional test coverage, the answer is "not very far". Thus, here we drop it in favor of consistently calling a new util method on channels that might have the ability to release holding cell updates in the same lock where they change state, rather than waiting until `get_and_clear_pending_msg_events`. We still process async monitor events in `get_and_clear_pending_msg_events`, which can lead to channel (and monitor) updates, but that should only be the case for async persist applications, which then are likely to have fast `ChannelMonitorUpdate` in-line handling logic (cause its async). --- lightning/src/ln/chanmon_update_fail_tests.rs | 25 +- lightning/src/ln/channelmanager.rs | 240 +++++++++++------- 2 files changed, 159 insertions(+), 106 deletions(-) diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 57f0ca87d45..ff499d049d4 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -2784,17 +2784,13 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { } // If we finish updating the monitor, we should free the holding cell right away (this did - // not occur prior to #756). + // not occur prior to #756). This should result in a new monitor update. chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (mon_id, _) = get_latest_mon_update_id(&nodes[0], chan_id); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_id, mon_id); expect_payment_claimed!(nodes[0], payment_hash_0, 100_000); - - // New outbound messages should be generated immediately upon a call to - // get_and_clear_pending_msg_events (but not before). - check_added_monitors(&nodes[0], 0); - let mut events = nodes[0].node.get_and_clear_pending_msg_events(); check_added_monitors(&nodes[0], 1); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); // Deliver the pending in-flight CS @@ -3556,12 +3552,10 @@ fn do_test_blocked_chan_preimage_release(completion_mode: BlockedUpdateComplMode } // The event processing should release the last RAA update. - check_added_monitors(&nodes[1], 1); - - // When we fetch the next update the message getter will generate the next update for nodes[2], - // generating a further monitor update. + // It should also generate the next update for nodes[2]. + check_added_monitors(&nodes[1], 2); let mut bs_htlc_fulfill = get_htlc_update_msgs(&nodes[1], &node_c_id); - check_added_monitors(&nodes[1], 1); + check_added_monitors(&nodes[1], 0); nodes[2] .node @@ -5142,13 +5136,12 @@ fn test_mpp_claim_to_holding_cell() { nodes[3].chain_monitor.chain_monitor.channel_monitor_updated(chan_4_id, latest_id).unwrap(); // Once we process monitor events (in this case by checking for the `PaymentClaimed` event, the // RAA monitor update blocked above will be released. + // At the same time, the RAA monitor update completion will allow the C <-> D channel to + // generate its fulfill update. expect_payment_claimed!(nodes[3], paymnt_hash_1, 500_000); - check_added_monitors(&nodes[3], 1); - - // After the RAA monitor update completes, the C <-> D channel will be able to generate its - // fulfill updates as well. + check_added_monitors(&nodes[3], 2); let mut c_claim = get_htlc_update_msgs(&nodes[3], &node_c_id); - check_added_monitors(&nodes[3], 1); + check_added_monitors(&nodes[3], 0); // Finally, clear all the pending payments. let path = [&[&nodes[1], &nodes[3]][..], &[&nodes[2], &nodes[3]][..]]; diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 1ab87a70c72..778897bc292 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3223,6 +3223,14 @@ pub struct PhantomRouteHints { pub real_node_pubkey: PublicKey, } +/// The return type of [`ChannelManager::check_free_peer_holding_cells`] +type FreeHoldingCellsResult = Vec<( + ChannelId, + PublicKey, + Option, + Vec<(HTLCSource, PaymentHash)>, +)>; + macro_rules! insert_short_channel_id { ($short_to_chan_info: ident, $channel: expr) => {{ if let Some(real_scid) = $channel.funding.get_short_channel_id() { @@ -3430,6 +3438,7 @@ macro_rules! process_events_body { } if !post_event_actions.is_empty() { + let _read_guard = $self.total_consistency_lock.read().unwrap(); $self.handle_post_event_actions(post_event_actions); // If we had some actions, go around again as we may have more events now processed_all_events = false; @@ -7115,10 +7124,6 @@ where } self.forward_htlcs(&mut phantom_receives); - // Freeing the holding cell here is relatively redundant - in practice we'll do it when we - // next get a `get_and_clear_pending_msg_events` call, but some tests rely on it, and it's - // nice to do the work now if we can rather than while we're trying to get messages in the - // network stack. if self.check_free_holding_cells() { should_persist = NotifyOption::DoPersist; } @@ -8316,10 +8321,21 @@ where self.check_refresh_async_receive_offer_cache(true); - // Technically we don't need to do this here, but if we have holding cell entries in a - // channel that need freeing, it's better to do that here and block a background task - // than block the message queueing pipeline. if self.check_free_holding_cells() { + // While we try to ensure we clear holding cells immediately, its possible we miss + // one somewhere. Thus, its useful to try regularly to ensure even if something + // gets stuck its only for a minute or so. Still, good to panic here in debug to + // ensure we discover the missing free. + // Note that in cases where we had a fee update in the loop above, we expect to + // need to free holding cells now, thus we only report an error if `should_persist` + // has not been updated to `DoPersist`. + if should_persist != NotifyOption::DoPersist { + debug_assert!(false, "Holding cells are cleared immediately"); + log_error!( + self.logger, + "Holding cells were freed in last-ditch cleanup. Please report this (performance) bug." + ); + } should_persist = NotifyOption::DoPersist; } @@ -10199,10 +10215,13 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ chan, ); + let holding_cell_res = self.check_free_peer_holding_cells(peer_state); + mem::drop(peer_state_lock); mem::drop(per_peer_state); self.handle_post_monitor_update_chan_resume(completion_data); + self.handle_holding_cell_free_result(holding_cell_res); } else { log_trace!(logger, "Channel is open but not awaiting update"); } @@ -12246,7 +12265,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ #[rustfmt::skip] fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> { - let (inferred_splice_locked, need_lnd_workaround) = { + let (inferred_splice_locked, need_lnd_workaround, holding_cell_res) = { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) @@ -12307,7 +12326,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ peer_state.pending_msg_events.push(upd); } - (responses.inferred_splice_locked, need_lnd_workaround) + let holding_cell_res = self.check_free_peer_holding_cells(peer_state); + (responses.inferred_splice_locked, need_lnd_workaround, holding_cell_res) } else { return try_channel_entry!(self, peer_state, Err(ChannelError::close( "Got a channel_reestablish message for an unfunded channel!".into())), chan_entry); @@ -12350,6 +12370,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } }; + self.handle_holding_cell_free_result(holding_cell_res); + if let Some(channel_ready_msg) = need_lnd_workaround { self.internal_channel_ready(counterparty_node_id, &channel_ready_msg)?; } @@ -12686,70 +12708,83 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ has_pending_monitor_events } + fn handle_holding_cell_free_result(&self, result: FreeHoldingCellsResult) { + debug_assert_ne!( + self.total_consistency_lock.held_by_thread(), + LockHeldState::NotHeldByThread + ); + for (chan_id, cp_node_id, post_update_data, failed_htlcs) in result { + if let Some(data) = post_update_data { + self.handle_post_monitor_update_chan_resume(data); + } + + self.fail_holding_cell_htlcs(failed_htlcs, chan_id, &cp_node_id); + self.needs_persist_flag.store(true, Ordering::Release); + self.event_persist_notifier.notify(); + } + } + + /// Frees all holding cells in all the channels for a peer. + /// + /// Includes elements in the returned Vec only for channels which changed (implying persistence + /// is required). + #[must_use] + fn check_free_peer_holding_cells( + &self, peer_state: &mut PeerState, + ) -> FreeHoldingCellsResult { + debug_assert_ne!( + self.total_consistency_lock.held_by_thread(), + LockHeldState::NotHeldByThread + ); + + let mut updates = Vec::new(); + let funded_chan_iter = peer_state + .channel_by_id + .iter_mut() + .filter_map(|(chan_id, chan)| chan.as_funded_mut().map(|chan| (chan_id, chan))); + for (chan_id, chan) in funded_chan_iter { + let (monitor_opt, holding_cell_failed_htlcs) = chan.maybe_free_holding_cell_htlcs( + &self.fee_estimator, + &&WithChannelContext::from(&self.logger, &chan.context, None), + ); + if monitor_opt.is_some() || !holding_cell_failed_htlcs.is_empty() { + let update_res = monitor_opt + .map(|monitor_update| { + self.handle_new_monitor_update( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + &mut peer_state.pending_msg_events, + peer_state.is_connected, + chan, + chan.funding.get_funding_txo().unwrap(), + monitor_update, + ) + }) + .flatten(); + let cp_node_id = chan.context.get_counterparty_node_id(); + updates.push((*chan_id, cp_node_id, update_res, holding_cell_failed_htlcs)); + } + } + updates + } + /// Check the holding cell in each channel and free any pending HTLCs in them if possible. /// Returns whether there were any updates such as if pending HTLCs were freed or a monitor /// update was applied. fn check_free_holding_cells(&self) -> bool { - let mut has_monitor_update = false; - let mut failed_htlcs = Vec::new(); + let mut unlocked_results = Vec::new(); - // Walk our list of channels and find any that need to update. Note that when we do find an - // update, if it includes actions that must be taken afterwards, we have to drop the - // per-peer state lock as well as the top level per_peer_state lock. Thus, we loop until we - // manage to go through all our peers without finding a single channel to update. - 'peer_loop: loop { + { let per_peer_state = self.per_peer_state.read().unwrap(); for (_cp_id, peer_state_mutex) in per_peer_state.iter() { - 'chan_loop: loop { - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state: &mut PeerState<_> = &mut *peer_state_lock; - for (channel_id, chan) in - peer_state.channel_by_id.iter_mut().filter_map(|(chan_id, chan)| { - chan.as_funded_mut().map(|chan| (chan_id, chan)) - }) { - let counterparty_node_id = chan.context.get_counterparty_node_id(); - let funding_txo = chan.funding.get_funding_txo(); - let (monitor_opt, holding_cell_failed_htlcs) = chan - .maybe_free_holding_cell_htlcs( - &self.fee_estimator, - &&WithChannelContext::from(&self.logger, &chan.context, None), - ); - if !holding_cell_failed_htlcs.is_empty() { - failed_htlcs.push(( - holding_cell_failed_htlcs, - *channel_id, - counterparty_node_id, - )); - } - if let Some(monitor_update) = monitor_opt { - has_monitor_update = true; - - if let Some(data) = self.handle_new_monitor_update( - &mut peer_state.in_flight_monitor_updates, - &mut peer_state.monitor_update_blocked_actions, - &mut peer_state.pending_msg_events, - peer_state.is_connected, - chan, - funding_txo.unwrap(), - monitor_update, - ) { - mem::drop(peer_state_lock); - mem::drop(per_peer_state); - self.handle_post_monitor_update_chan_resume(data); - } - continue 'peer_loop; - } - } - break 'chan_loop; - } + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state: &mut PeerState<_> = &mut *peer_state_lock; + unlocked_results.append(&mut self.check_free_peer_holding_cells(peer_state)); } - break 'peer_loop; } - let has_update = has_monitor_update || !failed_htlcs.is_empty(); - for (failures, channel_id, counterparty_node_id) in failed_htlcs.drain(..) { - self.fail_holding_cell_htlcs(failures, channel_id, &counterparty_node_id); - } + let has_update = !unlocked_results.is_empty(); + self.handle_holding_cell_free_result(unlocked_results); has_update } @@ -13081,27 +13116,32 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ #[cfg(any(test, fuzzing))] #[rustfmt::skip] pub fn exit_quiescence(&self, counterparty_node_id: &PublicKey, channel_id: &ChannelId) -> Result { - let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| APIError::ChannelUnavailable { - err: format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}") - })?; - let mut peer_state = peer_state_mutex.lock().unwrap(); - let initiator = match peer_state.channel_by_id.entry(*channel_id) { - hash_map::Entry::Occupied(mut chan_entry) => { - if let Some(chan) = chan_entry.get_mut().as_funded_mut() { - chan.exit_quiescence() - } else { - return Err(APIError::APIMisuseError { - err: format!("Unfunded channel {} cannot be quiescent", channel_id), - }) - } - }, - hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable { - err: format!("Channel with id {} not found for the passed counterparty node_id {}", - channel_id, counterparty_node_id), - }), + let _read_guard = self.total_consistency_lock.read().unwrap(); + + let initiator = { + let per_peer_state = self.per_peer_state.read().unwrap(); + let peer_state_mutex = per_peer_state.get(counterparty_node_id) + .ok_or_else(|| APIError::ChannelUnavailable { + err: format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}") + })?; + let mut peer_state = peer_state_mutex.lock().unwrap(); + match peer_state.channel_by_id.entry(*channel_id) { + hash_map::Entry::Occupied(mut chan_entry) => { + if let Some(chan) = chan_entry.get_mut().as_funded_mut() { + chan.exit_quiescence() + } else { + return Err(APIError::APIMisuseError { + err: format!("Unfunded channel {} cannot be quiescent", channel_id), + }) + } + }, + hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable { + err: format!("Channel with id {} not found for the passed counterparty node_id {}", + channel_id, counterparty_node_id), + }), + } }; + self.check_free_holding_cells(); Ok(initiator) } @@ -14165,7 +14205,7 @@ where if let Some((monitor_update, further_update_exists)) = chan.unblock_next_blocked_monitor_update() { log_debug!(logger, "Unlocking monitor updating and updating monitor", ); - if let Some(data) = self.handle_new_monitor_update( + let post_update_data = self.handle_new_monitor_update( &mut peer_state.in_flight_monitor_updates, &mut peer_state.monitor_update_blocked_actions, &mut peer_state.pending_msg_events, @@ -14173,11 +14213,18 @@ where chan, channel_funding_outpoint, monitor_update, - ) { - mem::drop(peer_state_lck); - mem::drop(per_peer_state); + ); + let holding_cell_res = self.check_free_peer_holding_cells(peer_state); + + mem::drop(peer_state_lck); + mem::drop(per_peer_state); + + if let Some(data) = post_update_data { self.handle_post_monitor_update_chan_resume(data); } + + self.handle_holding_cell_free_result(holding_cell_res); + if further_update_exists { // If there are more `ChannelMonitorUpdate`s to process, restart at the // top of the loop. @@ -14596,19 +14643,32 @@ where PersistenceNotifierGuard::optionally_notify(self, || { let mut result = NotifyOption::SkipPersistNoEvents; + // This method is quite performance-sensitive. Not only is it called very often, but it + // *is* the critical path between generating a message for a peer and giving it to the + // `PeerManager` to send. Thus, we should avoid adding any more logic here than we + // need, especially anything that might end up causing I/O (like a + // `ChannelMonitorUpdate`)! + // TODO: This behavior should be documented. It's unintuitive that we query // ChannelMonitors when clearing other events. if self.process_pending_monitor_events() { result = NotifyOption::DoPersist; } - if self.check_free_holding_cells() { - result = NotifyOption::DoPersist; - } if self.maybe_generate_initial_closing_signed() { result = NotifyOption::DoPersist; } + #[cfg(test)] + if self.check_free_holding_cells() { + // In tests, we want to ensure that we never forget to free holding cells + // immediately, so we check it here. + // Note that we can't turn this on for `debug_assertions` because there's a race in + // (at least) the fee-update logic in `timer_tick_occurred` which can lead to us + // freeing holding cells here while its running. + debug_assert!(false, "Holding cells should always be auto-free'd"); + } + // Quiescence is an in-memory protocol, so we don't have to persist because of it. self.maybe_send_stfu(); From 923949bcc0d8eb6b185043ee150641a6ed5b7ddb Mon Sep 17 00:00:00 2001 From: shaavan Date: Thu, 9 Oct 2025 23:09:50 +0530 Subject: [PATCH 099/242] Introduce Dummy Hop support in Blinded Path Constructor MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds a new constructor for blinded paths that allows specifying the number of dummy hops. This enables users to insert arbitrary hops before the real destination, enhancing privacy by making it harder to infer the sender–receiver distance or identify the final destination. Lays the groundwork for future use of dummy hops in blinded path construction. --- lightning/src/blinded_path/payment.rs | 119 ++++++++++++++++++++++++-- 1 file changed, 112 insertions(+), 7 deletions(-) diff --git a/lightning/src/blinded_path/payment.rs b/lightning/src/blinded_path/payment.rs index 549eb38c38f..f0bf3f92c3e 100644 --- a/lightning/src/blinded_path/payment.rs +++ b/lightning/src/blinded_path/payment.rs @@ -121,6 +121,61 @@ impl BlindedPaymentPath { local_node_receive_key: ReceiveAuthKey, payee_tlvs: ReceiveTlvs, htlc_maximum_msat: u64, min_final_cltv_expiry_delta: u16, entropy_source: ES, secp_ctx: &Secp256k1, ) -> Result + where + ES::Target: EntropySource, + { + BlindedPaymentPath::new_inner( + intermediate_nodes, + payee_node_id, + local_node_receive_key, + &[], + payee_tlvs, + htlc_maximum_msat, + min_final_cltv_expiry_delta, + entropy_source, + secp_ctx, + ) + } + + /// Same as [`BlindedPaymentPath::new`], but allows specifying a number of dummy hops. + /// + /// Dummy TLVs allow callers to override the payment relay values used for dummy hops. + /// Any additional fees introduced by these dummy hops are ultimately paid to the final + /// recipient as part of the total amount. + /// + /// This improves privacy by making path-length analysis based on fee and CLTV delta + /// values less reliable. + /// + /// TODO: Add end-to-end tests validating fee aggregation, CLTV deltas, and + /// HTLC bounds when dummy hops are present, before exposing this API publicly. + pub(crate) fn new_with_dummy_hops( + intermediate_nodes: &[PaymentForwardNode], payee_node_id: PublicKey, + dummy_tlvs: &[DummyTlvs], local_node_receive_key: ReceiveAuthKey, payee_tlvs: ReceiveTlvs, + htlc_maximum_msat: u64, min_final_cltv_expiry_delta: u16, entropy_source: ES, + secp_ctx: &Secp256k1, + ) -> Result + where + ES::Target: EntropySource, + { + BlindedPaymentPath::new_inner( + intermediate_nodes, + payee_node_id, + local_node_receive_key, + dummy_tlvs, + payee_tlvs, + htlc_maximum_msat, + min_final_cltv_expiry_delta, + entropy_source, + secp_ctx, + ) + } + + fn new_inner( + intermediate_nodes: &[PaymentForwardNode], payee_node_id: PublicKey, + local_node_receive_key: ReceiveAuthKey, dummy_tlvs: &[DummyTlvs], payee_tlvs: ReceiveTlvs, + htlc_maximum_msat: u64, min_final_cltv_expiry_delta: u16, entropy_source: ES, + secp_ctx: &Secp256k1, + ) -> Result where ES::Target: EntropySource, { @@ -133,6 +188,7 @@ impl BlindedPaymentPath { let blinded_payinfo = compute_payinfo( intermediate_nodes, + dummy_tlvs, &payee_tlvs, htlc_maximum_msat, min_final_cltv_expiry_delta, @@ -145,6 +201,7 @@ impl BlindedPaymentPath { secp_ctx, intermediate_nodes, payee_node_id, + dummy_tlvs, payee_tlvs, &blinding_secret, local_node_receive_key, @@ -394,6 +451,7 @@ pub(crate) enum BlindedTrampolineTlvs { } // Used to include forward and receive TLVs in the same iterator for encoding. +#[derive(Clone)] enum BlindedPaymentTlvsRef<'a> { Forward(&'a ForwardTlvs), Dummy(&'a DummyTlvs), @@ -679,21 +737,46 @@ pub(crate) const PAYMENT_PADDING_ROUND_OFF: usize = 30; /// Construct blinded payment hops for the given `intermediate_nodes` and payee info. pub(super) fn blinded_hops( secp_ctx: &Secp256k1, intermediate_nodes: &[PaymentForwardNode], payee_node_id: PublicKey, - payee_tlvs: ReceiveTlvs, session_priv: &SecretKey, local_node_receive_key: ReceiveAuthKey, + dummy_tlvs: &[DummyTlvs], payee_tlvs: ReceiveTlvs, session_priv: &SecretKey, + local_node_receive_key: ReceiveAuthKey, ) -> Vec { let pks = intermediate_nodes .iter() .map(|node| (node.node_id, None)) + .chain(dummy_tlvs.iter().map(|_| (payee_node_id, Some(local_node_receive_key)))) .chain(core::iter::once((payee_node_id, Some(local_node_receive_key)))); let tlvs = intermediate_nodes .iter() .map(|node| BlindedPaymentTlvsRef::Forward(&node.tlvs)) + .chain(dummy_tlvs.iter().map(|tlvs| BlindedPaymentTlvsRef::Dummy(tlvs))) .chain(core::iter::once(BlindedPaymentTlvsRef::Receive(&payee_tlvs))); let path = pks.zip( tlvs.map(|tlv| BlindedPathWithPadding { tlvs: tlv, round_off: PAYMENT_PADDING_ROUND_OFF }), ); + // Debug invariant: all non-final hops must have identical serialized size. + #[cfg(debug_assertions)] + { + let mut iter = path.clone(); + if let Some((_, first)) = iter.next() { + let remaining = iter.clone().count(); // includes intermediate + final + + // At least one intermediate hop + if remaining > 1 { + let expected = first.serialized_length(); + + // skip final hop: take(remaining - 1) + for (_, hop) in iter.take(remaining - 1) { + debug_assert!( + hop.serialized_length() == expected, + "All intermediate blinded hops must have identical serialized size" + ); + } + } + } + } + utils::construct_blinded_hops(secp_ctx, path, session_priv) } @@ -753,14 +836,22 @@ where } pub(super) fn compute_payinfo( - intermediate_nodes: &[PaymentForwardNode], payee_tlvs: &ReceiveTlvs, + intermediate_nodes: &[PaymentForwardNode], dummy_tlvs: &[DummyTlvs], payee_tlvs: &ReceiveTlvs, payee_htlc_maximum_msat: u64, min_final_cltv_expiry_delta: u16, ) -> Result { - let (aggregated_base_fee, aggregated_prop_fee) = - compute_aggregated_base_prop_fee(intermediate_nodes.iter().map(|node| RoutingFees { + let routing_fees = intermediate_nodes + .iter() + .map(|node| RoutingFees { base_msat: node.tlvs.payment_relay.fee_base_msat, proportional_millionths: node.tlvs.payment_relay.fee_proportional_millionths, - }))?; + }) + .chain(dummy_tlvs.iter().map(|tlvs| RoutingFees { + base_msat: tlvs.payment_relay.fee_base_msat, + proportional_millionths: tlvs.payment_relay.fee_proportional_millionths, + })); + + let (aggregated_base_fee, aggregated_prop_fee) = + compute_aggregated_base_prop_fee(routing_fees)?; let mut htlc_minimum_msat: u64 = 1; let mut htlc_maximum_msat: u64 = 21_000_000 * 100_000_000 * 1_000; // Total bitcoin supply @@ -789,6 +880,16 @@ pub(super) fn compute_payinfo( ) .ok_or(())?; // If underflow occurs, we cannot send to this hop without exceeding their max } + for dummy_tlvs in dummy_tlvs.iter() { + cltv_expiry_delta = + cltv_expiry_delta.checked_add(dummy_tlvs.payment_relay.cltv_expiry_delta).ok_or(())?; + + htlc_minimum_msat = amt_to_forward_msat( + core::cmp::max(dummy_tlvs.payment_constraints.htlc_minimum_msat, htlc_minimum_msat), + &dummy_tlvs.payment_relay, + ) + .unwrap_or(1); // If underflow occurs, we definitely reached this node's min + } htlc_minimum_msat = core::cmp::max(payee_tlvs.payment_constraints.htlc_minimum_msat, htlc_minimum_msat); htlc_maximum_msat = core::cmp::min(payee_htlc_maximum_msat, htlc_maximum_msat); @@ -933,7 +1034,7 @@ mod tests { }; let htlc_maximum_msat = 100_000; let blinded_payinfo = - super::compute_payinfo(&intermediate_nodes[..], &recv_tlvs, htlc_maximum_msat, 12) + super::compute_payinfo(&intermediate_nodes[..], &[], &recv_tlvs, htlc_maximum_msat, 12) .unwrap(); assert_eq!(blinded_payinfo.fee_base_msat, 201); assert_eq!(blinded_payinfo.fee_proportional_millionths, 1001); @@ -950,7 +1051,7 @@ mod tests { payment_context: PaymentContext::Bolt12Refund(Bolt12RefundContext {}), }; let blinded_payinfo = - super::compute_payinfo(&[], &recv_tlvs, 4242, TEST_FINAL_CLTV as u16).unwrap(); + super::compute_payinfo(&[], &[], &recv_tlvs, 4242, TEST_FINAL_CLTV as u16).unwrap(); assert_eq!(blinded_payinfo.fee_base_msat, 0); assert_eq!(blinded_payinfo.fee_proportional_millionths, 0); assert_eq!(blinded_payinfo.cltv_expiry_delta, TEST_FINAL_CLTV as u16); @@ -1009,6 +1110,7 @@ mod tests { let htlc_maximum_msat = 100_000; let blinded_payinfo = super::compute_payinfo( &intermediate_nodes[..], + &[], &recv_tlvs, htlc_maximum_msat, TEST_FINAL_CLTV as u16, @@ -1068,6 +1170,7 @@ mod tests { let htlc_minimum_msat = 3798; assert!(super::compute_payinfo( &intermediate_nodes[..], + &[], &recv_tlvs, htlc_minimum_msat - 1, TEST_FINAL_CLTV as u16 @@ -1077,6 +1180,7 @@ mod tests { let htlc_maximum_msat = htlc_minimum_msat + 1; let blinded_payinfo = super::compute_payinfo( &intermediate_nodes[..], + &[], &recv_tlvs, htlc_maximum_msat, TEST_FINAL_CLTV as u16, @@ -1137,6 +1241,7 @@ mod tests { let blinded_payinfo = super::compute_payinfo( &intermediate_nodes[..], + &[], &recv_tlvs, 10_000, TEST_FINAL_CLTV as u16, From d2def54da241d4e740d19ee210f024adeb6cac53 Mon Sep 17 00:00:00 2001 From: shaavan Date: Mon, 20 Oct 2025 17:00:05 +0530 Subject: [PATCH 100/242] Introduce Payment Dummy Hop parsing mechanism --- lightning/src/blinded_path/payment.rs | 54 +++++++++++----------- lightning/src/ln/channelmanager.rs | 57 +++++++++++++++++++++++- lightning/src/ln/onion_payment.rs | 43 +++++++++++++++++- lightning/src/ln/onion_utils.rs | 64 ++++++++++++++++++++++++++- 4 files changed, 189 insertions(+), 29 deletions(-) diff --git a/lightning/src/blinded_path/payment.rs b/lightning/src/blinded_path/payment.rs index f0bf3f92c3e..b68be811cb4 100644 --- a/lightning/src/blinded_path/payment.rs +++ b/lightning/src/blinded_path/payment.rs @@ -33,7 +33,6 @@ use crate::util::ser::{ Writeable, Writer, }; -use core::mem; use core::ops::Deref; #[allow(unused_imports)] @@ -248,28 +247,31 @@ impl BlindedPaymentPath { NL::Target: NodeIdLookUp, T: secp256k1::Signing + secp256k1::Verification, { - match self.decrypt_intro_payload::(node_signer) { - Ok(( - BlindedPaymentTlvs::Forward(ForwardTlvs { short_channel_id, .. }), - control_tlvs_ss, - )) => { - let next_node_id = match node_id_lookup.next_node_id(short_channel_id) { - Some(node_id) => node_id, - None => return Err(()), - }; - let mut new_blinding_point = onion_utils::next_hop_pubkey( - secp_ctx, - self.inner_path.blinding_point, - control_tlvs_ss.as_ref(), - ) - .map_err(|_| ())?; - mem::swap(&mut self.inner_path.blinding_point, &mut new_blinding_point); - self.inner_path.introduction_node = IntroductionNode::NodeId(next_node_id); - self.inner_path.blinded_hops.remove(0); - Ok(()) - }, - _ => Err(()), - } + let (next_node_id, control_tlvs_ss) = + match self.decrypt_intro_payload::(node_signer).map_err(|_| ())? { + (BlindedPaymentTlvs::Forward(ForwardTlvs { short_channel_id, .. }), ss) => { + let node_id = node_id_lookup.next_node_id(short_channel_id).ok_or(())?; + (node_id, ss) + }, + (BlindedPaymentTlvs::Dummy(_), ss) => { + let node_id = node_signer.get_node_id(Recipient::Node)?; + (node_id, ss) + }, + _ => return Err(()), + }; + + let new_blinding_point = onion_utils::next_hop_pubkey( + secp_ctx, + self.inner_path.blinding_point, + control_tlvs_ss.as_ref(), + ) + .map_err(|_| ())?; + + self.inner_path.blinding_point = new_blinding_point; + self.inner_path.introduction_node = IntroductionNode::NodeId(next_node_id); + self.inner_path.blinded_hops.remove(0); + + Ok(()) } pub(crate) fn decrypt_intro_payload( @@ -291,9 +293,9 @@ impl BlindedPaymentPath { .map_err(|_| ())?; match (&readable, used_aad) { - (BlindedPaymentTlvs::Forward(_), false) | (BlindedPaymentTlvs::Receive(_), true) => { - Ok((readable, control_tlvs_ss)) - }, + (BlindedPaymentTlvs::Forward(_), false) + | (BlindedPaymentTlvs::Dummy(_), true) + | (BlindedPaymentTlvs::Receive(_), true) => Ok((readable, control_tlvs_ss)), _ => Err(()), } } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index aef57a66612..b55b1798d45 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -4974,6 +4974,11 @@ where ) -> Result<(), LocalHTLCFailureReason> { let outgoing_scid = match next_packet_details.outgoing_connector { HopConnector::ShortChannelId(scid) => scid, + HopConnector::Dummy => { + // Dummy hops are only used for path padding and must not reach HTLC processing. + debug_assert!(false, "Dummy hop reached HTLC handling."); + return Err(LocalHTLCFailureReason::InvalidOnionPayload); + } HopConnector::Trampoline(_) => { return Err(LocalHTLCFailureReason::InvalidTrampolineForward); } @@ -6878,6 +6883,7 @@ where fn process_pending_update_add_htlcs(&self) -> bool { let mut should_persist = false; let mut decode_update_add_htlcs = new_hash_map(); + let mut dummy_update_add_htlcs = new_hash_map(); mem::swap(&mut decode_update_add_htlcs, &mut self.decode_update_add_htlcs.lock().unwrap()); let get_htlc_failure_type = |outgoing_scid_opt: Option, payment_hash: PaymentHash| { @@ -6941,7 +6947,36 @@ where &*self.logger, &self.secp_ctx, ) { - Ok(decoded_onion) => decoded_onion, + Ok(decoded_onion) => match decoded_onion { + ( + onion_utils::Hop::Dummy { + dummy_hop_data, + next_hop_hmac, + new_packet_bytes, + .. + }, + Some(next_packet_details), + ) => { + let new_update_add_htlc = + onion_utils::peel_dummy_hop_update_add_htlc( + update_add_htlc, + dummy_hop_data, + next_hop_hmac, + new_packet_bytes, + next_packet_details, + &*self.node_signer, + &self.secp_ctx, + ); + + dummy_update_add_htlcs + .entry(incoming_scid_alias) + .or_insert_with(Vec::new) + .push(new_update_add_htlc); + + continue; + }, + _ => decoded_onion, + }, Err((htlc_fail, reason)) => { let failure_type = HTLCHandlingFailureType::InvalidOnion; @@ -6954,6 +6989,13 @@ where let outgoing_scid_opt = next_packet_details_opt.as_ref().and_then(|d| match d.outgoing_connector { HopConnector::ShortChannelId(scid) => Some(scid), + HopConnector::Dummy => { + debug_assert!( + false, + "Dummy hops must never be processed at this stage." + ); + None + }, HopConnector::Trampoline(_) => None, }); let shared_secret = next_hop.shared_secret().secret_bytes(); @@ -7097,6 +7139,19 @@ where )); } } + + // Merge peeled dummy HTLCs into the existing decode queue so they can be + // processed in the next iteration. We avoid replacing the whole queue + // (e.g. via mem::swap) because other threads may have enqueued new HTLCs + // meanwhile; merging preserves everything safely. + if !dummy_update_add_htlcs.is_empty() { + let mut decode_update_add_htlc_source = self.decode_update_add_htlcs.lock().unwrap(); + + for (incoming_scid_alias, htlcs) in dummy_update_add_htlcs.into_iter() { + decode_update_add_htlc_source.entry(incoming_scid_alias).or_default().extend(htlcs); + } + } + should_persist } diff --git a/lightning/src/ln/onion_payment.rs b/lightning/src/ln/onion_payment.rs index c1d07f70486..9e8672ab48c 100644 --- a/lightning/src/ln/onion_payment.rs +++ b/lightning/src/ln/onion_payment.rs @@ -494,7 +494,7 @@ where L::Target: Logger, { let (hop, next_packet_details_opt) = - decode_incoming_update_add_htlc_onion(msg, node_signer, logger, secp_ctx + decode_incoming_update_add_htlc_onion(msg, &*node_signer, &*logger, secp_ctx ).map_err(|(msg, failure_reason)| { let (reason, err_data) = match msg { HTLCFailureMsg::Malformed(_) => (failure_reason, Vec::new()), @@ -532,6 +532,29 @@ where // onion here and check it. create_fwd_pending_htlc_info(msg, hop, shared_secret.secret_bytes(), Some(next_packet_pubkey))? }, + onion_utils::Hop::Dummy { dummy_hop_data, next_hop_hmac, new_packet_bytes, .. } => { + let next_packet_details = match next_packet_details_opt { + Some(next_packet_details) => next_packet_details, + // Dummy Hops should always include the next hop details + None => return Err(InboundHTLCErr { + msg: "Failed to decode update add htlc onion", + reason: LocalHTLCFailureReason::InvalidOnionPayload, + err_data: Vec::new(), + }), + }; + + let new_update_add_htlc = onion_utils::peel_dummy_hop_update_add_htlc( + msg, + dummy_hop_data, + next_hop_hmac, + new_packet_bytes, + next_packet_details, + &*node_signer, + secp_ctx + ); + + peel_payment_onion(&new_update_add_htlc, node_signer, logger, secp_ctx, cur_height, allow_skimmed_fees)? + }, _ => { let shared_secret = hop.shared_secret().secret_bytes(); create_recv_pending_htlc_info( @@ -545,6 +568,8 @@ where pub(super) enum HopConnector { // scid-based routing ShortChannelId(u64), + // Dummy hop for path padding + Dummy, // Trampoline-based routing #[allow(unused)] Trampoline(PublicKey), @@ -649,6 +674,22 @@ where outgoing_cltv_value }) } + onion_utils::Hop::Dummy { dummy_hop_data: msgs::InboundOnionDummyPayload { ref payment_relay, ref payment_constraints, .. }, shared_secret, .. } => { + let (amt_to_forward, outgoing_cltv_value) = match check_blinded_forward( + msg.amount_msat, msg.cltv_expiry, &payment_relay, &payment_constraints, &BlindedHopFeatures::empty() + ) { + Ok((amt, cltv)) => (amt, cltv), + Err(()) => { + return encode_relay_error("Underflow calculating outbound amount or cltv value for blinded forward", + LocalHTLCFailureReason::InvalidOnionBlinding, shared_secret.secret_bytes(), None, &[0; 32]); + } + }; + + let next_packet_pubkey = onion_utils::next_hop_pubkey(secp_ctx, + msg.onion_routing_packet.public_key.unwrap(), &shared_secret.secret_bytes()); + + Some(NextPacketDetails { next_packet_pubkey, outgoing_connector: HopConnector::Dummy, outgoing_amt_msat: amt_to_forward, outgoing_cltv_value }) + } onion_utils::Hop::TrampolineForward { next_trampoline_hop_data: msgs::InboundTrampolineForwardPayload { amt_to_forward, outgoing_cltv_value, next_trampoline }, trampoline_shared_secret, incoming_trampoline_public_key, .. } => { let next_trampoline_packet_pubkey = onion_utils::next_hop_pubkey(secp_ctx, incoming_trampoline_public_key, &trampoline_shared_secret.secret_bytes()); diff --git a/lightning/src/ln/onion_utils.rs b/lightning/src/ln/onion_utils.rs index 7e879542eef..b82c60a896d 100644 --- a/lightning/src/ln/onion_utils.rs +++ b/lightning/src/ln/onion_utils.rs @@ -14,7 +14,8 @@ use crate::crypto::streams::ChaChaReader; use crate::events::HTLCHandlingFailureReason; use crate::ln::channel::TOTAL_BITCOIN_SUPPLY_SATOSHIS; use crate::ln::channelmanager::{HTLCSource, RecipientOnionFields}; -use crate::ln::msgs::{self, DecodeError}; +use crate::ln::msgs::{self, DecodeError, InboundOnionDummyPayload, OnionPacket, UpdateAddHTLC}; +use crate::ln::onion_payment::{HopConnector, NextPacketDetails}; use crate::offers::invoice_request::InvoiceRequest; use crate::routing::gossip::NetworkUpdate; use crate::routing::router::{BlindedTail, Path, RouteHop, RouteParameters, TrampolineHop}; @@ -2356,6 +2357,12 @@ where new_packet_bytes, }) }, + msgs::InboundOnionPayload::Dummy(dummy_hop_data) => Ok(Hop::Dummy { + dummy_hop_data, + shared_secret, + next_hop_hmac, + new_packet_bytes, + }), _ => { if blinding_point.is_some() { return Err(OnionDecodeErr::Malformed { @@ -2533,6 +2540,61 @@ where } } +/// Peels a single dummy hop from an inbound `UpdateAddHTLC` by reconstructing the next +/// onion packet and HTLC state. +/// +/// This helper is used when processing dummy hops in a blinded path. Dummy hops are not +/// forwarded on the network; instead, their onion layer is removed locally and a new +/// `UpdateAddHTLC` is constructed with the next onion packet and updated amount/CLTV +/// values. +/// +/// This function performs no validation and does not enqueue or forward the HTLC. +/// It only reconstructs the next `UpdateAddHTLC` for further local processing. +pub(super) fn peel_dummy_hop_update_add_htlc( + msg: &UpdateAddHTLC, dummy_hop_data: InboundOnionDummyPayload, next_hop_hmac: [u8; 32], + new_packet_bytes: [u8; ONION_DATA_LEN], next_packet_details: NextPacketDetails, + node_signer: NS, secp_ctx: &Secp256k1, +) -> UpdateAddHTLC +where + NS::Target: NodeSigner, +{ + let NextPacketDetails { + next_packet_pubkey, + outgoing_amt_msat, + outgoing_connector, + outgoing_cltv_value, + } = next_packet_details; + + debug_assert!( + matches!(outgoing_connector, HopConnector::Dummy), + "Dummy hop must always map to HopConnector::Dummy" + ); + + let next_blinding_point = dummy_hop_data + .intro_node_blinding_point + .or(msg.blinding_point) + .and_then(|blinding_point| { + let ss = node_signer.ecdh(Recipient::Node, &blinding_point, None).ok()?.secret_bytes(); + + next_hop_pubkey(secp_ctx, blinding_point, &ss).ok() + }); + + let new_onion_packet = OnionPacket { + version: 0, + public_key: next_packet_pubkey, + hop_data: new_packet_bytes, + hmac: next_hop_hmac, + }; + + UpdateAddHTLC { + onion_routing_packet: new_onion_packet, + blinding_point: next_blinding_point, + amount_msat: outgoing_amt_msat, + cltv_expiry: outgoing_cltv_value, + ..msg.clone() + } +} + /// Build a payment onion, returning the first hop msat and cltv values as well. /// `cur_block_height` should be set to the best known block height + 1. pub fn create_payment_onion( From 6abf36e1366115d499821f039bbfb86336ce0b34 Mon Sep 17 00:00:00 2001 From: shaavan Date: Tue, 21 Oct 2025 18:58:53 +0530 Subject: [PATCH 101/242] Update PaymentPath, and ClaimAlongRoute arguments Upcoming commits will need the ability to specify whether a blinded path contains dummy hops. This change adds that support to the testing framework ahead of time, so later tests can express dummy-hop scenarios explicitly. --- lightning/src/ln/functional_test_utils.rs | 62 +++++++++++++++++++++-- lightning/src/ln/offers_tests.rs | 48 +++++++++++++----- 2 files changed, 93 insertions(+), 17 deletions(-) diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index ff33d7508b5..6d607eb2771 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -10,6 +10,7 @@ //! A bunch of useful utilities for building networks of nodes and exchanging messages between //! nodes for functional tests. +use crate::blinded_path::payment::DummyTlvs; use crate::chain::channelmonitor::ChannelMonitor; use crate::chain::transaction::OutPoint; use crate::chain::{BestBlock, ChannelMonitorUpdateStatus, Confirm, Listen, Watch}; @@ -3435,6 +3436,7 @@ fn fail_payment_along_path<'a, 'b, 'c>(expected_path: &[&Node<'a, 'b, 'c>]) { pub struct PassAlongPathArgs<'a, 'b, 'c, 'd> { pub origin_node: &'a Node<'b, 'c, 'd>, pub expected_path: &'a [&'a Node<'b, 'c, 'd>], + pub dummy_tlvs: Vec, pub recv_value: u64, pub payment_hash: PaymentHash, pub payment_secret: Option, @@ -3456,6 +3458,7 @@ impl<'a, 'b, 'c, 'd> PassAlongPathArgs<'a, 'b, 'c, 'd> { Self { origin_node, expected_path, + dummy_tlvs: vec![], recv_value, payment_hash, payment_secret: None, @@ -3503,12 +3506,17 @@ impl<'a, 'b, 'c, 'd> PassAlongPathArgs<'a, 'b, 'c, 'd> { self.expected_failure = Some(failure); self } + pub fn with_dummy_tlvs(mut self, dummy_tlvs: &[DummyTlvs]) -> Self { + self.dummy_tlvs = dummy_tlvs.to_vec(); + self + } } pub fn do_pass_along_path<'a, 'b, 'c>(args: PassAlongPathArgs) -> Option { let PassAlongPathArgs { origin_node, expected_path, + dummy_tlvs, recv_value, payment_hash: our_payment_hash, payment_secret: our_payment_secret, @@ -3543,6 +3551,16 @@ pub fn do_pass_along_path<'a, 'b, 'c>(args: PassAlongPathArgs) -> Option node.node.process_pending_htlc_forwards(); } + if is_last_hop { + // At the final hop, the incoming packet contains N dummy-hop layers + // before the real HTLC. Each call to `process_pending_htlc_forwards` + // strips exactly one dummy layer, so we call it N times. + for _ in 0..dummy_tlvs.len() { + assert!(node.node.needs_pending_htlc_processing()); + node.node.process_pending_htlc_forwards(); + } + } + if is_last_hop && clear_recipient_events { let events_2 = node.node.get_and_clear_pending_events(); if payment_claimable_expected { @@ -3755,6 +3773,29 @@ pub struct ClaimAlongRouteArgs<'a, 'b, 'c, 'd> { pub origin_node: &'a Node<'b, 'c, 'd>, pub expected_paths: &'a [&'a [&'a Node<'b, 'c, 'd>]], pub expected_extra_fees: Vec, + /// A one-off adjustment used only in tests to account for an existing + /// fee-handling trade-off in LDK. + /// + /// When the payer is the introduction node of a blinded path, LDK does not + /// subtract the forward fee for the `payer -> next_hop` channel + /// (see [`BlindedPaymentPath::advance_path_by_one`]). This keeps the fee + /// logic simpler at the cost of a small, intentional overpayment. + /// + /// In the simple two-hop case (payer as introduction node → payee), + /// this overpayment has historically been avoided by simply not charging + /// the payer the forward fee, since the payer knows there is only + /// a single hop after them. + /// + /// However, with the introduction of dummy hops in LDK v0.3, even a + /// two-node real path (payer as introduction node → payee) may appear as a + /// multi-hop blinded path. This makes the existing overpayment surface in + /// tests. + /// + /// Until the fee-handling trade-off is revisited, this field allows tests + /// to compensate for that expected difference. + /// + /// [`BlindedPaymentPath::advance_path_by_one`]: crate::blinded_path::payment::BlindedPaymentPath::advance_path_by_one + pub expected_extra_total_fees_msat: u64, pub expected_min_htlc_overpay: Vec, pub skip_last: bool, pub payment_preimage: PaymentPreimage, @@ -3778,6 +3819,7 @@ impl<'a, 'b, 'c, 'd> ClaimAlongRouteArgs<'a, 'b, 'c, 'd> { origin_node, expected_paths, expected_extra_fees: vec![0; expected_paths.len()], + expected_extra_total_fees_msat: 0, expected_min_htlc_overpay: vec![0; expected_paths.len()], skip_last: false, payment_preimage, @@ -3793,6 +3835,10 @@ impl<'a, 'b, 'c, 'd> ClaimAlongRouteArgs<'a, 'b, 'c, 'd> { self.expected_extra_fees = extra_fees; self } + pub fn with_expected_extra_total_fees_msat(mut self, extra_total_fees: u64) -> Self { + self.expected_extra_total_fees_msat = extra_total_fees; + self + } pub fn with_expected_min_htlc_overpay(mut self, extra_fees: Vec) -> Self { self.expected_min_htlc_overpay = extra_fees; self @@ -4060,13 +4106,21 @@ pub fn pass_claimed_payment_along_route_from_ev( expected_total_fee_msat } + pub fn claim_payment_along_route( args: ClaimAlongRouteArgs, ) -> (Option, Vec) { - let origin_node = args.origin_node; - let payment_preimage = args.payment_preimage; - let skip_last = args.skip_last; - let expected_total_fee_msat = do_claim_payment_along_route(args); + let ClaimAlongRouteArgs { + origin_node, + payment_preimage, + skip_last, + expected_extra_total_fees_msat, + .. + } = args; + + let expected_total_fee_msat = + do_claim_payment_along_route(args) + expected_extra_total_fees_msat; + if !skip_last { expect_payment_sent!(origin_node, payment_preimage, Some(expected_total_fee_msat)) } else { diff --git a/lightning/src/ln/offers_tests.rs b/lightning/src/ln/offers_tests.rs index 4c53aefe58d..0b2d5b86add 100644 --- a/lightning/src/ln/offers_tests.rs +++ b/lightning/src/ln/offers_tests.rs @@ -185,7 +185,20 @@ fn route_bolt12_payment<'a, 'b, 'c>( fn claim_bolt12_payment<'a, 'b, 'c>( node: &Node<'a, 'b, 'c>, path: &[&Node<'a, 'b, 'c>], expected_payment_context: PaymentContext, invoice: &Bolt12Invoice ) { - let recipient = &path[path.len() - 1]; + claim_bolt12_payment_with_extra_fees( + node, + path, + expected_payment_context, + invoice, + None, + ) +} + +fn claim_bolt12_payment_with_extra_fees<'a, 'b, 'c>( + node: &Node<'a, 'b, 'c>, path: &[&Node<'a, 'b, 'c>], expected_payment_context: PaymentContext, invoice: &Bolt12Invoice, + expected_extra_fees_msat: Option, +) { + let recipient = path.last().expect("Empty path?"); let payment_purpose = match get_event!(recipient, Event::PaymentClaimable) { Event::PaymentClaimable { purpose, .. } => purpose, _ => panic!("No Event::PaymentClaimable"), @@ -194,20 +207,29 @@ fn claim_bolt12_payment<'a, 'b, 'c>( Some(preimage) => preimage, None => panic!("No preimage in Event::PaymentClaimable"), }; - match payment_purpose { - PaymentPurpose::Bolt12OfferPayment { payment_context, .. } => { - assert_eq!(PaymentContext::Bolt12Offer(payment_context), expected_payment_context); - }, - PaymentPurpose::Bolt12RefundPayment { payment_context, .. } => { - assert_eq!(PaymentContext::Bolt12Refund(payment_context), expected_payment_context); - }, + let context = match payment_purpose { + PaymentPurpose::Bolt12OfferPayment { payment_context, .. } => + PaymentContext::Bolt12Offer(payment_context), + PaymentPurpose::Bolt12RefundPayment { payment_context, .. } => + PaymentContext::Bolt12Refund(payment_context), _ => panic!("Unexpected payment purpose: {:?}", payment_purpose), - } - if let Some(inv) = claim_payment(node, path, payment_preimage) { - assert_eq!(inv, PaidBolt12Invoice::Bolt12Invoice(invoice.to_owned())); - } else { - panic!("Expected PaidInvoice::Bolt12Invoice"); }; + + assert_eq!(context, expected_payment_context); + + let expected_paths = [path]; + let mut args = ClaimAlongRouteArgs::new( + node, + &expected_paths, + payment_preimage, + ); + + if let Some(extra) = expected_extra_fees_msat { + args = args.with_expected_extra_total_fees_msat(extra); + } + + let (inv, _) = claim_payment_along_route(args); + assert_eq!(inv, Some(PaidBolt12Invoice::Bolt12Invoice(invoice.clone()))); } fn extract_offer_nonce<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, message: &OnionMessage) -> Nonce { From 5981c172cbba5c3f3400923c1c143771fe0fc6b5 Mon Sep 17 00:00:00 2001 From: shaavan Date: Tue, 18 Nov 2025 23:26:03 +0530 Subject: [PATCH 102/242] Introduce payment dummy hops in DefaultRouter --- lightning-dns-resolver/src/lib.rs | 7 +++ lightning/src/ln/async_payments_tests.rs | 65 ++++++++++++++++-------- lightning/src/ln/offers_tests.rs | 35 +++++++++---- lightning/src/routing/router.rs | 19 ++++--- 4 files changed, 89 insertions(+), 37 deletions(-) diff --git a/lightning-dns-resolver/src/lib.rs b/lightning-dns-resolver/src/lib.rs index 125d4316d12..b7f429de6c8 100644 --- a/lightning-dns-resolver/src/lib.rs +++ b/lightning-dns-resolver/src/lib.rs @@ -175,6 +175,7 @@ mod test { use lightning::onion_message::messenger::{ AOnionMessenger, Destination, MessageRouter, OnionMessagePath, OnionMessenger, }; + use lightning::routing::router::DEFAULT_PAYMENT_DUMMY_HOPS; use lightning::sign::{KeysManager, NodeSigner, ReceiveAuthKey, Recipient}; use lightning::types::features::InitFeatures; use lightning::types::payment::PaymentHash; @@ -419,6 +420,12 @@ mod test { let updates = get_htlc_update_msgs(&nodes[0], &payee_id); nodes[1].node.handle_update_add_htlc(payer_id, &updates.update_add_htlcs[0]); do_commitment_signed_dance(&nodes[1], &nodes[0], &updates.commitment_signed, false, false); + + for _ in 0..DEFAULT_PAYMENT_DUMMY_HOPS { + assert!(nodes[1].node.needs_pending_htlc_processing()); + nodes[1].node.process_pending_htlc_forwards(); + } + expect_and_process_pending_htlcs(&nodes[1], false); let claimable_events = nodes[1].node.get_and_clear_pending_events(); diff --git a/lightning/src/ln/async_payments_tests.rs b/lightning/src/ln/async_payments_tests.rs index 8e7fbdf94fd..a485e77f76b 100644 --- a/lightning/src/ln/async_payments_tests.rs +++ b/lightning/src/ln/async_payments_tests.rs @@ -10,8 +10,8 @@ use crate::blinded_path::message::{ BlindedMessagePath, MessageContext, NextMessageHop, OffersContext, }; -use crate::blinded_path::payment::PaymentContext; use crate::blinded_path::payment::{AsyncBolt12OfferContext, BlindedPaymentTlvs}; +use crate::blinded_path::payment::{DummyTlvs, PaymentContext}; use crate::chain::channelmonitor::{HTLC_FAIL_BACK_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS}; use crate::events::{ Event, EventsProvider, HTLCHandlingFailureReason, HTLCHandlingFailureType, PaidBolt12Invoice, @@ -55,7 +55,7 @@ use crate::onion_message::messenger::{ use crate::onion_message::offers::OffersMessage; use crate::onion_message::packet::ParsedOnionMessageContents; use crate::prelude::*; -use crate::routing::router::{Payee, PaymentParameters}; +use crate::routing::router::{Payee, PaymentParameters, DEFAULT_PAYMENT_DUMMY_HOPS}; use crate::sign::NodeSigner; use crate::sync::Mutex; use crate::types::features::Bolt12InvoiceFeatures; @@ -984,7 +984,8 @@ fn ignore_duplicate_invoice() { check_added_monitors!(sender, 1); let route: &[&[&Node]] = &[&[always_online_node, async_recipient]]; - let args = PassAlongPathArgs::new(sender, route[0], amt_msat, payment_hash, ev); + let args = PassAlongPathArgs::new(sender, route[0], amt_msat, payment_hash, ev) + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); let claimable_ev = do_pass_along_path(args).unwrap(); let keysend_preimage = extract_payment_preimage(&claimable_ev); let (res, _) = @@ -1063,7 +1064,8 @@ fn ignore_duplicate_invoice() { check_added_monitors!(sender, 1); let args = PassAlongPathArgs::new(sender, route[0], amt_msat, payment_hash, ev) - .without_clearing_recipient_events(); + .without_clearing_recipient_events() + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); do_pass_along_path(args); let payment_preimage = match get_event!(async_recipient, Event::PaymentClaimable) { @@ -1138,7 +1140,8 @@ fn async_receive_flow_success() { assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); let route: &[&[&Node]] = &[&[&nodes[1], &nodes[2]]]; - let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev); + let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev) + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); let claimable_ev = do_pass_along_path(args).unwrap(); let keysend_preimage = extract_payment_preimage(&claimable_ev); let (res, _) = @@ -1375,11 +1378,13 @@ fn async_receive_mpp() { }; let args = PassAlongPathArgs::new(&nodes[0], expected_route[0], amt_msat, payment_hash, ev) - .without_claimable_event(); + .without_claimable_event() + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); do_pass_along_path(args); let ev = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events); - let args = PassAlongPathArgs::new(&nodes[0], expected_route[1], amt_msat, payment_hash, ev); + let args = PassAlongPathArgs::new(&nodes[0], expected_route[1], amt_msat, payment_hash, ev) + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); let claimable_ev = do_pass_along_path(args).unwrap(); let keysend_preimage = match claimable_ev { Event::PaymentClaimable { @@ -1497,7 +1502,8 @@ fn amount_doesnt_match_invreq() { let route: &[&[&Node]] = &[&[&nodes[1], &nodes[3]]]; let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev) .without_claimable_event() - .expect_failure(HTLCHandlingFailureType::Receive { payment_hash }); + .expect_failure(HTLCHandlingFailureType::Receive { payment_hash }) + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); do_pass_along_path(args); // Modify the invoice request stored in our outbounds to be the correct one, to make sure the @@ -1521,7 +1527,8 @@ fn amount_doesnt_match_invreq() { ev, MessageSendEvent::UpdateHTLCs { ref updates, .. } if updates.update_add_htlcs.len() == 1)); check_added_monitors!(nodes[0], 1); let route: &[&[&Node]] = &[&[&nodes[2], &nodes[3]]]; - let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev); + let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev) + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); let claimable_ev = do_pass_along_path(args).unwrap(); let keysend_preimage = extract_payment_preimage(&claimable_ev); claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], route, keysend_preimage)); @@ -1712,7 +1719,8 @@ fn invalid_async_receive_with_retry( let payment_hash = extract_payment_hash(&ev); let route: &[&[&Node]] = &[&[&nodes[1], &nodes[2]]]; - let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev); + let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev) + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); do_pass_along_path(args); // Fail the HTLC backwards to enable us to more easily modify the now-Retryable outbound to test @@ -1739,7 +1747,8 @@ fn invalid_async_receive_with_retry( let route: &[&[&Node]] = &[&[&nodes[1], &nodes[2]]]; let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev) .without_claimable_event() - .expect_failure(HTLCHandlingFailureType::Receive { payment_hash }); + .expect_failure(HTLCHandlingFailureType::Receive { payment_hash }) + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); do_pass_along_path(args); fail_blinded_htlc_backwards(payment_hash, 1, &[&nodes[0], &nodes[1], &nodes[2]], true); @@ -1751,7 +1760,8 @@ fn invalid_async_receive_with_retry( let mut ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events); check_added_monitors!(nodes[0], 1); let route: &[&[&Node]] = &[&[&nodes[1], &nodes[2]]]; - let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev); + let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev) + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); let claimable_ev = do_pass_along_path(args).unwrap(); let keysend_preimage = extract_payment_preimage(&claimable_ev); claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], route, keysend_preimage)); @@ -1858,6 +1868,13 @@ fn expired_static_invoice_payment_path() { blinded_path .advance_path_by_one(&nodes[1].keys_manager, &nodes[1].node, &secp_ctx) .unwrap(); + + for _ in 0..DEFAULT_PAYMENT_DUMMY_HOPS { + blinded_path + .advance_path_by_one(&nodes[2].keys_manager, &nodes[2].node, &secp_ctx) + .unwrap(); + } + match blinded_path.decrypt_intro_payload(&nodes[2].keys_manager).unwrap().0 { BlindedPaymentTlvs::Receive(tlvs) => tlvs.payment_constraints.max_cltv_expiry, _ => panic!(), @@ -1920,7 +1937,8 @@ fn expired_static_invoice_payment_path() { let route: &[&[&Node]] = &[&[&nodes[1], &nodes[2]]]; let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev) .without_claimable_event() - .expect_failure(HTLCHandlingFailureType::Receive { payment_hash }); + .expect_failure(HTLCHandlingFailureType::Receive { payment_hash }) + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); do_pass_along_path(args); fail_blinded_htlc_backwards(payment_hash, 1, &[&nodes[0], &nodes[1], &nodes[2]], false); nodes[2].logger.assert_log_contains( @@ -2363,7 +2381,8 @@ fn refresh_static_invoices_for_used_offers() { check_added_monitors!(sender, 1); let route: &[&[&Node]] = &[&[server, recipient]]; - let args = PassAlongPathArgs::new(sender, route[0], amt_msat, payment_hash, ev); + let args = PassAlongPathArgs::new(sender, route[0], amt_msat, payment_hash, ev) + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); let claimable_ev = do_pass_along_path(args).unwrap(); let keysend_preimage = extract_payment_preimage(&claimable_ev); let res = claim_payment_along_route(ClaimAlongRouteArgs::new(sender, route, keysend_preimage)); @@ -2697,7 +2716,8 @@ fn invoice_server_is_not_channel_peer() { check_added_monitors!(sender, 1); let route: &[&[&Node]] = &[&[forwarding_node, recipient]]; - let args = PassAlongPathArgs::new(sender, route[0], amt_msat, payment_hash, ev); + let args = PassAlongPathArgs::new(sender, route[0], amt_msat, payment_hash, ev) + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); let claimable_ev = do_pass_along_path(args).unwrap(); let keysend_preimage = extract_payment_preimage(&claimable_ev); let res = claim_payment_along_route(ClaimAlongRouteArgs::new(sender, route, keysend_preimage)); @@ -2936,7 +2956,8 @@ fn async_payment_e2e() { check_added_monitors!(sender_lsp, 1); let path: &[&Node] = &[invoice_server, recipient]; - let args = PassAlongPathArgs::new(sender_lsp, path, amt_msat, payment_hash, ev); + let args = PassAlongPathArgs::new(sender_lsp, path, amt_msat, payment_hash, ev) + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); let claimable_ev = do_pass_along_path(args).unwrap(); let route: &[&[&Node]] = &[&[sender_lsp, invoice_server, recipient]]; @@ -3173,7 +3194,8 @@ fn intercepted_hold_htlc() { check_added_monitors!(lsp, 1); let path: &[&Node] = &[recipient]; - let args = PassAlongPathArgs::new(lsp, path, amt_msat, payment_hash, ev); + let args = PassAlongPathArgs::new(lsp, path, amt_msat, payment_hash, ev) + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); let claimable_ev = do_pass_along_path(args).unwrap(); let route: &[&[&Node]] = &[&[lsp, recipient]]; @@ -3276,7 +3298,8 @@ fn async_payment_mpp() { assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&recipient.node.get_our_node_id(), &mut events); let args = PassAlongPathArgs::new(lsp_a, expected_path, amt_msat, payment_hash, ev) - .without_claimable_event(); + .without_claimable_event() + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); do_pass_along_path(args); lsp_b.node.process_pending_htlc_forwards(); @@ -3284,7 +3307,8 @@ fn async_payment_mpp() { let mut events = lsp_b.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&recipient.node.get_our_node_id(), &mut events); - let args = PassAlongPathArgs::new(lsp_b, expected_path, amt_msat, payment_hash, ev); + let args = PassAlongPathArgs::new(lsp_b, expected_path, amt_msat, payment_hash, ev) + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); let claimable_ev = do_pass_along_path(args).unwrap(); let keysend_preimage = match claimable_ev { @@ -3420,7 +3444,8 @@ fn release_htlc_races_htlc_onion_decode() { check_added_monitors!(sender_lsp, 1); let path: &[&Node] = &[invoice_server, recipient]; - let args = PassAlongPathArgs::new(sender_lsp, path, amt_msat, payment_hash, ev); + let args = PassAlongPathArgs::new(sender_lsp, path, amt_msat, payment_hash, ev) + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); let claimable_ev = do_pass_along_path(args).unwrap(); let route: &[&[&Node]] = &[&[sender_lsp, invoice_server, recipient]]; diff --git a/lightning/src/ln/offers_tests.rs b/lightning/src/ln/offers_tests.rs index 0b2d5b86add..cd24c2de7ef 100644 --- a/lightning/src/ln/offers_tests.rs +++ b/lightning/src/ln/offers_tests.rs @@ -47,7 +47,7 @@ use bitcoin::secp256k1::{PublicKey, Secp256k1}; use core::time::Duration; use crate::blinded_path::IntroductionNode; use crate::blinded_path::message::BlindedMessagePath; -use crate::blinded_path::payment::{Bolt12OfferContext, Bolt12RefundContext, PaymentContext}; +use crate::blinded_path::payment::{Bolt12OfferContext, Bolt12RefundContext, DummyTlvs, PaymentContext}; use crate::blinded_path::message::OffersContext; use crate::events::{ClosureReason, Event, HTLCHandlingFailureType, PaidBolt12Invoice, PaymentFailureReason, PaymentPurpose}; use crate::ln::channelmanager::{Bolt12PaymentError, PaymentId, RecentPaymentDetails, RecipientOnionFields, Retry, self}; @@ -63,7 +63,7 @@ use crate::offers::parse::Bolt12SemanticError; use crate::onion_message::messenger::{DefaultMessageRouter, Destination, MessageSendInstructions, NodeIdMessageRouter, NullMessageRouter, PeeledOnion, PADDED_PATH_LENGTH}; use crate::onion_message::offers::OffersMessage; use crate::routing::gossip::{NodeAlias, NodeId}; -use crate::routing::router::{PaymentParameters, RouteParameters, RouteParametersConfig}; +use crate::routing::router::{DEFAULT_PAYMENT_DUMMY_HOPS, PaymentParameters, RouteParameters, RouteParametersConfig}; use crate::sign::{NodeSigner, Recipient}; use crate::util::ser::Writeable; @@ -178,7 +178,8 @@ fn route_bolt12_payment<'a, 'b, 'c>( let amount_msats = invoice.amount_msats(); let payment_hash = invoice.payment_hash(); let args = PassAlongPathArgs::new(node, path, amount_msats, payment_hash, ev) - .without_clearing_recipient_events(); + .without_clearing_recipient_events() + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); do_pass_along_path(args); } @@ -1432,7 +1433,20 @@ fn creates_offer_with_blinded_path_using_unannounced_introduction_node() { route_bolt12_payment(bob, &[alice], &invoice); expect_recent_payment!(bob, RecentPaymentDetails::Pending, payment_id); - claim_bolt12_payment(bob, &[alice], payment_context, &invoice); + // When the payer is the introduction node of a blinded path, LDK doesn't + // subtract the forward fee for the `payer -> next_hop` channel (see + // `BlindedPaymentPath::advance_path_by_one`). This keeps fee logic simple, + // at the cost of a small, intentional overpayment. + // + // In the old two-hop case (payer as introduction node → payee), this never + // surfaced because the payer simply wasn’t charged the forward fee. + // + // With dummy hops in LDK v0.3, even a real two-node path can appear as a + // longer blinded route, so the overpayment shows up in tests. + // + // Until the fee-handling trade-off is revisited, we pass an expected extra + // fee here so tests can compensate for it. + claim_bolt12_payment_with_extra_fees(bob, &[alice], payment_context, &invoice, Some(1000)); expect_recent_payment!(bob, RecentPaymentDetails::Fulfilled, payment_id); } @@ -2444,12 +2458,13 @@ fn rejects_keysend_to_non_static_invoice_path() { let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev) .with_payment_preimage(payment_preimage) - .expect_failure(HTLCHandlingFailureType::Receive { payment_hash }); + .expect_failure(HTLCHandlingFailureType::Receive { payment_hash }) + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); do_pass_along_path(args); let mut updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); + nodes[0].node.handle_update_fail_malformed_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_malformed_htlcs[0]); do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false); - expect_payment_failed_conditions(&nodes[0], payment_hash, true, PaymentFailedConditions::new()); + expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new()); } #[test] @@ -2508,12 +2523,14 @@ fn no_double_pay_with_stale_channelmanager() { let ev = remove_first_msg_event_to_node(&bob_id, &mut events); let args = PassAlongPathArgs::new(&nodes[0], expected_route[0], amt_msat, payment_hash, ev) - .without_clearing_recipient_events(); + .without_clearing_recipient_events() + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); do_pass_along_path(args); let ev = remove_first_msg_event_to_node(&bob_id, &mut events); let args = PassAlongPathArgs::new(&nodes[0], expected_route[0], amt_msat, payment_hash, ev) - .without_clearing_recipient_events(); + .without_clearing_recipient_events() + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); do_pass_along_path(args); expect_recent_payment!(nodes[0], RecentPaymentDetails::Pending, payment_id); diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index c06e5174263..cb1b3d0fc1d 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -13,8 +13,8 @@ use bitcoin::secp256k1::{self, PublicKey, Secp256k1}; use lightning_invoice::Bolt11Invoice; use crate::blinded_path::payment::{ - BlindedPaymentPath, ForwardTlvs, PaymentConstraints, PaymentForwardNode, PaymentRelay, - ReceiveTlvs, + BlindedPaymentPath, DummyTlvs, ForwardTlvs, PaymentConstraints, PaymentForwardNode, + PaymentRelay, ReceiveTlvs, }; use crate::blinded_path::{BlindedHop, Direction, IntroductionNode}; use crate::crypto::chacha20::ChaCha20; @@ -74,6 +74,9 @@ pub struct DefaultRouter< score_params: SP, } +/// The number of dummy hops included in [`BlindedPaymentPath`]s created by [`DefaultRouter`]. +pub const DEFAULT_PAYMENT_DUMMY_HOPS: usize = 3; + impl< G: Deref>, L: Deref, @@ -198,9 +201,9 @@ where }) }) .map(|forward_node| { - BlindedPaymentPath::new( - &[forward_node], recipient, local_node_receive_key, tlvs.clone(), u64::MAX, MIN_FINAL_CLTV_EXPIRY_DELTA, - &*self.entropy_source, secp_ctx + BlindedPaymentPath::new_with_dummy_hops( + &[forward_node], recipient, &[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS], + local_node_receive_key, tlvs.clone(), u64::MAX, MIN_FINAL_CLTV_EXPIRY_DELTA, &*self.entropy_source, secp_ctx ) }) .take(MAX_PAYMENT_PATHS) @@ -210,9 +213,9 @@ where Ok(paths) if !paths.is_empty() => Ok(paths), _ => { if network_graph.nodes().contains_key(&NodeId::from_pubkey(&recipient)) { - BlindedPaymentPath::new( - &[], recipient, local_node_receive_key, tlvs, u64::MAX, MIN_FINAL_CLTV_EXPIRY_DELTA, &*self.entropy_source, - secp_ctx + BlindedPaymentPath::new_with_dummy_hops( + &[], recipient, &[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS], + local_node_receive_key, tlvs, u64::MAX, MIN_FINAL_CLTV_EXPIRY_DELTA, &*self.entropy_source, secp_ctx ).map(|path| vec![path]) } else { Err(()) From 2a34be1929d9ca16ab6829f589763ce1107e0d48 Mon Sep 17 00:00:00 2001 From: shaavan Date: Tue, 21 Oct 2025 19:10:53 +0530 Subject: [PATCH 103/242] Introduce Blinded Payment Dummy Path test --- lightning/src/ln/blinded_payment_tests.rs | 70 ++++++++++++++++++++++- 1 file changed, 68 insertions(+), 2 deletions(-) diff --git a/lightning/src/ln/blinded_payment_tests.rs b/lightning/src/ln/blinded_payment_tests.rs index a902cfebd12..3f7f36f454a 100644 --- a/lightning/src/ln/blinded_payment_tests.rs +++ b/lightning/src/ln/blinded_payment_tests.rs @@ -8,8 +8,8 @@ // licenses. use crate::blinded_path::payment::{ - BlindedPaymentPath, Bolt12RefundContext, ForwardTlvs, PaymentConstraints, PaymentContext, - PaymentForwardNode, PaymentRelay, ReceiveTlvs, PAYMENT_PADDING_ROUND_OFF, + BlindedPaymentPath, Bolt12RefundContext, DummyTlvs, ForwardTlvs, PaymentConstraints, + PaymentContext, PaymentForwardNode, PaymentRelay, ReceiveTlvs, PAYMENT_PADDING_ROUND_OFF, }; use crate::blinded_path::utils::is_padded; use crate::blinded_path::{self, BlindedHop}; @@ -196,6 +196,72 @@ fn do_one_hop_blinded_path(success: bool) { } } +#[test] +fn one_hop_blinded_path_with_dummy_hops() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let chan_upd = + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0).0.contents; + + let amt_msat = 5000; + let (payment_preimage, payment_hash, payment_secret) = + get_payment_preimage_hash(&nodes[1], Some(amt_msat), None); + let payee_tlvs = ReceiveTlvs { + payment_secret, + payment_constraints: PaymentConstraints { + max_cltv_expiry: u32::max_value(), + htlc_minimum_msat: chan_upd.htlc_minimum_msat, + }, + payment_context: PaymentContext::Bolt12Refund(Bolt12RefundContext {}), + }; + let receive_auth_key = chanmon_cfgs[1].keys_manager.get_receive_auth_key(); + let dummy_tlvs = [DummyTlvs::default(); 2]; + + let mut secp_ctx = Secp256k1::new(); + let blinded_path = BlindedPaymentPath::new_with_dummy_hops( + &[], + nodes[1].node.get_our_node_id(), + &dummy_tlvs, + receive_auth_key, + payee_tlvs, + u64::MAX, + TEST_FINAL_CLTV as u16, + &chanmon_cfgs[1].keys_manager, + &secp_ctx, + ) + .unwrap(); + + let route_params = RouteParameters::from_payment_params_and_value( + PaymentParameters::blinded(vec![blinded_path]), + amt_msat, + ); + nodes[0] + .node + .send_payment( + payment_hash, + RecipientOnionFields::spontaneous_empty(), + PaymentId(payment_hash.0), + route_params, + Retry::Attempts(0), + ) + .unwrap(); + check_added_monitors(&nodes[0], 1); + + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events); + + let path = &[&nodes[1]]; + let args = PassAlongPathArgs::new(&nodes[0], path, amt_msat, payment_hash, ev) + .with_dummy_tlvs(&dummy_tlvs) + .with_payment_secret(payment_secret); + + do_pass_along_path(args); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage); +} + #[test] #[rustfmt::skip] fn mpp_to_one_hop_blinded_path() { From 105521ff5ebd942f6cabe918852c888d18246ea3 Mon Sep 17 00:00:00 2001 From: Leo Nash Date: Mon, 12 Jan 2026 05:15:49 +0000 Subject: [PATCH 104/242] net-tokio: add `fn tor_connect_outbound` Routes `fn connect_outbound` through Tor. This uses a unique stream isolation parameter for each connection: the hex-encoding of 32 random bytes sourced from the `entropy_source` parameter. --- .github/workflows/build.yml | 16 +++ Cargo.toml | 1 + lightning-net-tokio/Cargo.toml | 2 +- lightning-net-tokio/src/lib.rs | 219 ++++++++++++++++++++++++++++++++- 4 files changed, 236 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 2658ff454e9..6ae6d83ddd3 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -320,3 +320,19 @@ jobs: run: cargo fmt --check - name: Run rustfmt checks on lightning-tests run: cd lightning-tests && cargo fmt --check + tor-connect: + runs-on: ubuntu-latest + env: + TOOLCHAIN: 1.75.0 + steps: + - name: Checkout source code + uses: actions/checkout@v4 + - name: Install tor + run: | + sudo apt install -y tor + - name: Install Rust ${{ env.TOOLCHAIN }} toolchain + run: | + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile=minimal --default-toolchain ${{ env.TOOLCHAIN }} + - name: Test tor connections using lightning-net-tokio + run: | + TOR_PROXY="127.0.0.1:9050" RUSTFLAGS="--cfg=tor" cargo test --verbose --color always -p lightning-net-tokio diff --git a/Cargo.toml b/Cargo.toml index a0895fe1641..1eb7b572d8b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -67,4 +67,5 @@ check-cfg = [ "cfg(require_route_graph_test)", "cfg(simple_close)", "cfg(peer_storage)", + "cfg(tor)", ] diff --git a/lightning-net-tokio/Cargo.toml b/lightning-net-tokio/Cargo.toml index 6c45f40e3c8..af4845b7397 100644 --- a/lightning-net-tokio/Cargo.toml +++ b/lightning-net-tokio/Cargo.toml @@ -19,7 +19,7 @@ rustdoc-args = ["--cfg", "docsrs"] [dependencies] bitcoin = "0.32.2" lightning = { version = "0.3.0", path = "../lightning" } -tokio = { version = "1.35", features = [ "rt", "sync", "net", "time" ] } +tokio = { version = "1.35", features = [ "rt", "sync", "net", "time", "io-util" ] } [dev-dependencies] tokio = { version = "1.35", features = [ "macros", "rt", "rt-multi-thread", "sync", "net", "time" ] } diff --git a/lightning-net-tokio/src/lib.rs b/lightning-net-tokio/src/lib.rs index 75886ebfb5f..27d309f2c18 100644 --- a/lightning-net-tokio/src/lib.rs +++ b/lightning-net-tokio/src/lib.rs @@ -37,6 +37,7 @@ use lightning::ln::msgs::SocketAddress; use lightning::ln::peer_handler; use lightning::ln::peer_handler::APeerManager; use lightning::ln::peer_handler::SocketDescriptor as LnSocketTrait; +use lightning::sign::EntropySource; use std::future::Future; use std::hash::Hash; @@ -51,6 +52,9 @@ use std::time::Duration; static ID_COUNTER: AtomicU64 = AtomicU64::new(0); +const CONNECT_OUTBOUND_TIMEOUT: u64 = 10; +const TOR_CONNECT_OUTBOUND_TIMEOUT: u64 = 30; + // We only need to select over multiple futures in one place, and taking on the full `tokio/macros` // dependency tree in order to do so (which has broken our MSRV before) is excessive. Instead, we // define a trivial two- and three- select macro with the specific types we need and just use that. @@ -462,13 +466,169 @@ where PM::Target: APeerManager, { let connect_fut = async { TcpStream::connect(&addr).await.map(|s| s.into_std().unwrap()) }; - if let Ok(Ok(stream)) = time::timeout(Duration::from_secs(10), connect_fut).await { + if let Ok(Ok(stream)) = + time::timeout(Duration::from_secs(CONNECT_OUTBOUND_TIMEOUT), connect_fut).await + { + Some(setup_outbound(peer_manager, their_node_id, stream)) + } else { + None + } +} + +/// Routes [`connect_outbound`] through Tor. Implements stream isolation for each connection +/// using a stream isolation parameter sourced from [`EntropySource::get_secure_random_bytes`]. +/// +/// Returns a future (as the fn is async) that yields another future, see [`connect_outbound`] for +/// details on this return value. +pub async fn tor_connect_outbound( + peer_manager: PM, their_node_id: PublicKey, addr: SocketAddress, tor_proxy_addr: SocketAddr, + entropy_source: ES, +) -> Option> +where + PM::Target: APeerManager, + ES::Target: EntropySource, +{ + let connect_fut = async { + tor_connect(addr, tor_proxy_addr, entropy_source).await.map(|s| s.into_std().unwrap()) + }; + if let Ok(Ok(stream)) = + time::timeout(Duration::from_secs(TOR_CONNECT_OUTBOUND_TIMEOUT), connect_fut).await + { Some(setup_outbound(peer_manager, their_node_id, stream)) } else { None } } +async fn tor_connect( + addr: SocketAddress, tor_proxy_addr: SocketAddr, entropy_source: ES, +) -> Result +where + ES::Target: EntropySource, +{ + use std::io::Write; + use tokio::io::AsyncReadExt; + + const IPV4_ADDR_LEN: usize = 4; + const IPV6_ADDR_LEN: usize = 16; + const HOSTNAME_MAX_LEN: usize = u8::MAX as usize; + + // Constants defined in RFC 1928 and RFC 1929 + const VERSION: u8 = 5; + const NMETHODS: u8 = 1; + const USERNAME_PASSWORD_AUTH: u8 = 2; + const METHOD_SELECT_REPLY_LEN: usize = 2; + const USERNAME_PASSWORD_VERSION: u8 = 1; + const USERNAME_PASSWORD_REPLY_LEN: usize = 2; + const CMD_CONNECT: u8 = 1; + const RSV: u8 = 0; + const ATYP_IPV4: u8 = 1; + const ATYP_DOMAINNAME: u8 = 3; + const ATYP_IPV6: u8 = 4; + const SUCCESS: u8 = 0; + + // Tor extensions, see https://spec.torproject.org/socks-extensions.html for further details + const USERNAME: &[u8] = b"0"; + const USERNAME_LEN: usize = USERNAME.len(); + const PASSWORD_ENTROPY_LEN: usize = 32; + // We encode the password as a hex string on the wire. RFC 1929 allows arbitrary byte sequences but we choose to be conservative. + const PASSWORD_LEN: usize = PASSWORD_ENTROPY_LEN * 2; + + const USERNAME_PASSWORD_REQUEST_LEN: usize = + 1 /* VER */ + 1 /* ULEN */ + USERNAME_LEN + 1 /* PLEN */ + PASSWORD_LEN; + const SOCKS5_REQUEST_MAX_LEN: usize = 1 /* VER */ + 1 /* CMD */ + 1 /* RSV */ + 1 /* ATYP */ + + 1 /* HOSTNAME len */ + HOSTNAME_MAX_LEN /* HOSTNAME */ + 2 /* PORT */; + const SOCKS5_REPLY_HEADER_LEN: usize = 1 /* VER */ + 1 /* REP */ + 1 /* RSV */ + 1 /* ATYP */; + + let method_selection_request = [VERSION, NMETHODS, USERNAME_PASSWORD_AUTH]; + let mut tcp_stream = TcpStream::connect(&tor_proxy_addr).await.map_err(|_| ())?; + tokio::io::AsyncWriteExt::write_all(&mut tcp_stream, &method_selection_request) + .await + .map_err(|_| ())?; + + let mut method_selection_reply = [0u8; METHOD_SELECT_REPLY_LEN]; + tcp_stream.read_exact(&mut method_selection_reply).await.map_err(|_| ())?; + if method_selection_reply != [VERSION, USERNAME_PASSWORD_AUTH] { + return Err(()); + } + + let password: [u8; PASSWORD_ENTROPY_LEN] = entropy_source.get_secure_random_bytes(); + let mut username_password_request = [0u8; USERNAME_PASSWORD_REQUEST_LEN]; + let mut stream = &mut username_password_request[..]; + stream.write_all(&[USERNAME_PASSWORD_VERSION, USERNAME_LEN as u8]).unwrap(); + stream.write_all(USERNAME).unwrap(); + stream.write_all(&[PASSWORD_LEN as u8]).unwrap(); + // Encode the password as a hex string even if RFC 1929 allows arbitrary sequences + for byte in password { + write!(stream, "{:02x}", byte).unwrap(); + } + debug_assert!(stream.is_empty()); + tokio::io::AsyncWriteExt::write_all(&mut tcp_stream, &username_password_request) + .await + .map_err(|_| ())?; + + let mut username_password_reply = [0u8; USERNAME_PASSWORD_REPLY_LEN]; + tcp_stream.read_exact(&mut username_password_reply).await.map_err(|_| ())?; + if username_password_reply != [USERNAME_PASSWORD_VERSION, SUCCESS] { + return Err(()); + } + + let mut socks5_request = [0u8; SOCKS5_REQUEST_MAX_LEN]; + let mut stream = &mut socks5_request[..]; + stream.write_all(&[VERSION, CMD_CONNECT, RSV]).unwrap(); + match addr { + SocketAddress::TcpIpV4 { addr, port } => { + stream.write_all(&[ATYP_IPV4]).unwrap(); + stream.write_all(&addr).unwrap(); + stream.write_all(&port.to_be_bytes()).unwrap(); + }, + SocketAddress::TcpIpV6 { addr, port } => { + stream.write_all(&[ATYP_IPV6]).unwrap(); + stream.write_all(&addr).unwrap(); + stream.write_all(&port.to_be_bytes()).unwrap(); + }, + ref onion_v3 @ SocketAddress::OnionV3 { port, .. } => { + let onion_v3_url = onion_v3.to_string(); + let hostname = onion_v3_url.split_once(':').ok_or(())?.0.as_bytes(); + stream.write_all(&[ATYP_DOMAINNAME, hostname.len() as u8]).unwrap(); + stream.write_all(hostname).unwrap(); + stream.write_all(&port.to_be_bytes()).unwrap(); + }, + SocketAddress::Hostname { hostname, port } => { + stream.write_all(&[ATYP_DOMAINNAME, hostname.len()]).unwrap(); + stream.write_all(hostname.as_bytes()).unwrap(); + stream.write_all(&port.to_be_bytes()).unwrap(); + }, + SocketAddress::OnionV2 { .. } => return Err(()), + }; + let bytes_remaining = stream.len(); + tokio::io::AsyncWriteExt::write_all( + &mut tcp_stream, + &socks5_request[..socks5_request.len() - bytes_remaining], + ) + .await + .map_err(|_| ())?; + + let mut socks5_reply_header = [0u8; SOCKS5_REPLY_HEADER_LEN]; + tcp_stream.read_exact(&mut socks5_reply_header).await.map_err(|_| ())?; + if socks5_reply_header[..3] != [VERSION, SUCCESS, RSV] { + return Err(()); + } + match socks5_reply_header[3] { + ATYP_IPV4 => tcp_stream.read_exact(&mut [0u8; IPV4_ADDR_LEN]).await.map_err(|_| ())?, + ATYP_DOMAINNAME => { + let hostname_len = tcp_stream.read_u8().await.map_err(|_| ())? as usize; + let mut hostname_buffer = [0u8; HOSTNAME_MAX_LEN]; + tcp_stream.read_exact(&mut hostname_buffer[..hostname_len]).await.map_err(|_| ())? + }, + ATYP_IPV6 => tcp_stream.read_exact(&mut [0u8; IPV6_ADDR_LEN]).await.map_err(|_| ())?, + _ => return Err(()), + }; + tcp_stream.read_u16().await.map_err(|_| ())?; + + Ok(tcp_stream) +} + const SOCK_WAKER_VTABLE: task::RawWakerVTable = task::RawWakerVTable::new( clone_socket_waker, wake_socket_waker, @@ -941,4 +1101,61 @@ mod tests { async fn unthreaded_race_disconnect_accept() { race_disconnect_accept().await; } + + #[cfg(tor)] + #[tokio::test] + async fn test_tor_connect() { + use super::tor_connect; + use lightning::sign::EntropySource; + use std::net::SocketAddr; + + // Set TOR_PROXY=127.0.0.1:9050 + let tor_proxy_addr: SocketAddr = std::env!("TOR_PROXY").parse().unwrap(); + + struct TestEntropySource; + + impl EntropySource for TestEntropySource { + fn get_secure_random_bytes(&self) -> [u8; 32] { + [0xffu8; 32] + } + } + + let entropy_source = TestEntropySource; + + // Success cases + + for addr_str in [ + // google.com + "142.250.189.196:80", + // google.com + "[2607:f8b0:4005:813::2004]:80", + // torproject.org + "torproject.org:80", + // torproject.org + "2gzyxa5ihm7nsggfxnu52rck2vv4rvmdlkiu3zzui5du4xyclen53wid.onion:80", + ] { + let addr: SocketAddress = addr_str.parse().unwrap(); + let tcp_stream = tor_connect(addr, tor_proxy_addr, &entropy_source).await.unwrap(); + assert_eq!( + tcp_stream.try_read(&mut [0u8; 1]).unwrap_err().kind(), + std::io::ErrorKind::WouldBlock + ); + } + + // Failure cases + + for addr_str in [ + // google.com, with some invalid port + "142.250.189.196:1234", + // google.com, with some invalid port + "[2607:f8b0:4005:813::2004]:1234", + // torproject.org, with some invalid port + "torproject.org:1234", + // torproject.org, with a typo + "3gzyxa5ihm7nsggfxnu52rck2vv4rvmdlkiu3zzui5du4xyclen53wid.onion:80", + ] { + let addr: SocketAddress = addr_str.parse().unwrap(); + assert!(tor_connect(addr, tor_proxy_addr, &entropy_source).await.is_err()); + } + } } From 8eb9e70be37e50d64769b3b893bbab7af173b662 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Thu, 4 Dec 2025 11:25:01 -0600 Subject: [PATCH 105/242] Mixed mode splicing Some splicing use cases require to simultaneously splice in and out in the same splice transaction. Add support for such splices using the funding inputs to pay the appropriate fees just like the splice-in case, opposed to using the channel value like the splice-out case. This requires using the contributed input value when checking if the inputs are sufficient to cover fees, not the net contributed value. The latter may be negative in the net splice-out case. --- lightning/src/ln/channel.rs | 148 ++++++++++++++++++++------ lightning/src/ln/funding.rs | 42 ++++++-- lightning/src/ln/interactivetxs.rs | 23 ++-- lightning/src/ln/splicing_tests.rs | 164 +++++++++++++++++++++++++++++ 4 files changed, 321 insertions(+), 56 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index a1c48b6cc21..fd780da8d91 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -6501,8 +6501,7 @@ fn get_v2_channel_reserve_satoshis(channel_value_satoshis: u64, dust_limit_satos fn check_splice_contribution_sufficient( contribution: &SpliceContribution, is_initiator: bool, funding_feerate: FeeRate, ) -> Result { - let contribution_amount = contribution.value(); - if contribution_amount < SignedAmount::ZERO { + if contribution.inputs().is_empty() { let estimated_fee = Amount::from_sat(estimate_v2_funding_transaction_fee( contribution.inputs(), contribution.outputs(), @@ -6511,20 +6510,25 @@ fn check_splice_contribution_sufficient( funding_feerate.to_sat_per_kwu() as u32, )); + let contribution_amount = contribution.net_value(); contribution_amount .checked_sub( estimated_fee.to_signed().expect("fees should never exceed Amount::MAX_MONEY"), ) - .ok_or(format!("Our {contribution_amount} contribution plus the fee estimate exceeds the total bitcoin supply")) + .ok_or(format!( + "{estimated_fee} splice-out amount plus {} fee estimate exceeds the total bitcoin supply", + contribution_amount.unsigned_abs(), + )) } else { check_v2_funding_inputs_sufficient( - contribution_amount.to_sat(), + contribution.value_added(), contribution.inputs(), + contribution.outputs(), is_initiator, true, funding_feerate.to_sat_per_kwu() as u32, ) - .map(|_| contribution_amount) + .map(|_| contribution.net_value()) } } @@ -6583,16 +6587,16 @@ fn estimate_v2_funding_transaction_fee( /// Returns estimated (partial) fees as additional information #[rustfmt::skip] fn check_v2_funding_inputs_sufficient( - contribution_amount: i64, funding_inputs: &[FundingTxInput], is_initiator: bool, - is_splice: bool, funding_feerate_sat_per_1000_weight: u32, -) -> Result { - let estimated_fee = estimate_v2_funding_transaction_fee( - funding_inputs, &[], is_initiator, is_splice, funding_feerate_sat_per_1000_weight, - ); - - let mut total_input_sats = 0u64; + contributed_input_value: Amount, funding_inputs: &[FundingTxInput], outputs: &[TxOut], + is_initiator: bool, is_splice: bool, funding_feerate_sat_per_1000_weight: u32, +) -> Result { + let estimated_fee = Amount::from_sat(estimate_v2_funding_transaction_fee( + funding_inputs, outputs, is_initiator, is_splice, funding_feerate_sat_per_1000_weight, + )); + + let mut total_input_value = Amount::ZERO; for FundingTxInput { utxo, .. } in funding_inputs.iter() { - total_input_sats = total_input_sats.checked_add(utxo.output.value.to_sat()) + total_input_value = total_input_value.checked_add(utxo.output.value) .ok_or("Sum of input values is greater than the total bitcoin supply")?; } @@ -6607,13 +6611,11 @@ fn check_v2_funding_inputs_sufficient( // TODO(splicing): refine check including the fact wether a change will be added or not. // Can be done once dual funding preparation is included. - let minimal_input_amount_needed = contribution_amount.checked_add(estimated_fee as i64) - .ok_or(format!("Our {contribution_amount} contribution plus the fee estimate exceeds the total bitcoin supply"))?; - if i64::try_from(total_input_sats).map_err(|_| "Sum of input values is greater than the total bitcoin supply")? - < minimal_input_amount_needed - { + let minimal_input_amount_needed = contributed_input_value.checked_add(estimated_fee) + .ok_or(format!("{contributed_input_value} contribution plus {estimated_fee} fee estimate exceeds the total bitcoin supply"))?; + if total_input_value < minimal_input_amount_needed { Err(format!( - "Total input amount {total_input_sats} is lower than needed for contribution {contribution_amount}, considering fees of {estimated_fee}. Need more inputs.", + "Total input amount {total_input_value} is lower than needed for splice-in contribution {contributed_input_value}, considering fees of {estimated_fee}. Need more inputs.", )) } else { Ok(estimated_fee) @@ -6679,7 +6681,7 @@ impl FundingNegotiationContext { }; // Optionally add change output - let change_value_opt = if self.our_funding_contribution > SignedAmount::ZERO { + let change_value_opt = if !self.our_funding_inputs.is_empty() { match calculate_change_output_value( &self, self.shared_funding_input.is_some(), @@ -12070,7 +12072,7 @@ where }); } - let our_funding_contribution = contribution.value(); + let our_funding_contribution = contribution.net_value(); if our_funding_contribution == SignedAmount::ZERO { return Err(APIError::APIMisuseError { err: format!( @@ -18525,6 +18527,13 @@ mod tests { FundingTxInput::new_p2wpkh(prevtx, 0).unwrap() } + fn funding_output_sats(output_value_sats: u64) -> TxOut { + TxOut { + value: Amount::from_sat(output_value_sats), + script_pubkey: ScriptBuf::new_p2wpkh(&WPubkeyHash::all_zeros()), + } + } + #[test] #[rustfmt::skip] fn test_check_v2_funding_inputs_sufficient() { @@ -18535,16 +18544,83 @@ mod tests { let expected_fee = if cfg!(feature = "grind_signatures") { 2278 } else { 2284 }; assert_eq!( check_v2_funding_inputs_sufficient( - 220_000, + Amount::from_sat(220_000), + &[ + funding_input_sats(200_000), + funding_input_sats(100_000), + ], + &[], + true, + true, + 2000, + ).unwrap(), + Amount::from_sat(expected_fee), + ); + } + + // Net splice-in + { + let expected_fee = if cfg!(feature = "grind_signatures") { 2526 } else { 2532 }; + assert_eq!( + check_v2_funding_inputs_sufficient( + Amount::from_sat(220_000), + &[ + funding_input_sats(200_000), + funding_input_sats(100_000), + ], + &[ + funding_output_sats(200_000), + ], + true, + true, + 2000, + ).unwrap(), + Amount::from_sat(expected_fee), + ); + } + + // Net splice-out + { + let expected_fee = if cfg!(feature = "grind_signatures") { 2526 } else { 2532 }; + assert_eq!( + check_v2_funding_inputs_sufficient( + Amount::from_sat(220_000), &[ funding_input_sats(200_000), funding_input_sats(100_000), ], + &[ + funding_output_sats(400_000), + ], true, true, 2000, ).unwrap(), - expected_fee, + Amount::from_sat(expected_fee), + ); + } + + // Net splice-out, inputs insufficient to cover fees + { + let expected_fee = if cfg!(feature = "grind_signatures") { 113670 } else { 113940 }; + assert_eq!( + check_v2_funding_inputs_sufficient( + Amount::from_sat(220_000), + &[ + funding_input_sats(200_000), + funding_input_sats(100_000), + ], + &[ + funding_output_sats(400_000), + ], + true, + true, + 90000, + ), + Err(format!( + "Total input amount 0.00300000 BTC is lower than needed for splice-in contribution 0.00220000 BTC, considering fees of {}. Need more inputs.", + Amount::from_sat(expected_fee), + )), ); } @@ -18553,17 +18629,18 @@ mod tests { let expected_fee = if cfg!(feature = "grind_signatures") { 1736 } else { 1740 }; assert_eq!( check_v2_funding_inputs_sufficient( - 220_000, + Amount::from_sat(220_000), &[ funding_input_sats(100_000), ], + &[], true, true, 2000, ), Err(format!( - "Total input amount 100000 is lower than needed for contribution 220000, considering fees of {}. Need more inputs.", - expected_fee, + "Total input amount 0.00100000 BTC is lower than needed for splice-in contribution 0.00220000 BTC, considering fees of {}. Need more inputs.", + Amount::from_sat(expected_fee), )), ); } @@ -18573,16 +18650,17 @@ mod tests { let expected_fee = if cfg!(feature = "grind_signatures") { 2278 } else { 2284 }; assert_eq!( check_v2_funding_inputs_sufficient( - (300_000 - expected_fee - 20) as i64, + Amount::from_sat(300_000 - expected_fee - 20), &[ funding_input_sats(200_000), funding_input_sats(100_000), ], + &[], true, true, 2000, ).unwrap(), - expected_fee, + Amount::from_sat(expected_fee), ); } @@ -18591,18 +18669,19 @@ mod tests { let expected_fee = if cfg!(feature = "grind_signatures") { 2506 } else { 2513 }; assert_eq!( check_v2_funding_inputs_sufficient( - 298032, + Amount::from_sat(298032), &[ funding_input_sats(200_000), funding_input_sats(100_000), ], + &[], true, true, 2200, ), Err(format!( - "Total input amount 300000 is lower than needed for contribution 298032, considering fees of {}. Need more inputs.", - expected_fee + "Total input amount 0.00300000 BTC is lower than needed for splice-in contribution 0.00298032 BTC, considering fees of {}. Need more inputs.", + Amount::from_sat(expected_fee), )), ); } @@ -18612,16 +18691,17 @@ mod tests { let expected_fee = if cfg!(feature = "grind_signatures") { 1084 } else { 1088 }; assert_eq!( check_v2_funding_inputs_sufficient( - (300_000 - expected_fee - 20) as i64, + Amount::from_sat(300_000 - expected_fee - 20), &[ funding_input_sats(200_000), funding_input_sats(100_000), ], + &[], false, false, 2000, ).unwrap(), - expected_fee, + Amount::from_sat(expected_fee), ); } } diff --git a/lightning/src/ln/funding.rs b/lightning/src/ln/funding.rs index b7f8740f737..8092a0e4451 100644 --- a/lightning/src/ln/funding.rs +++ b/lightning/src/ln/funding.rs @@ -21,8 +21,10 @@ use crate::sign::{P2TR_KEY_PATH_WITNESS_WEIGHT, P2WPKH_WITNESS_WEIGHT}; /// The components of a splice's funding transaction that are contributed by one party. #[derive(Debug, Clone)] pub struct SpliceContribution { - /// The amount to contribute to the splice. - value: SignedAmount, + /// The amount from [`inputs`] to contribute to the splice. + /// + /// [`inputs`]: Self::inputs + value_added: Amount, /// The inputs included in the splice's funding transaction to meet the contributed amount /// plus fees. Any excess amount will be sent to a change output. @@ -42,27 +44,45 @@ pub struct SpliceContribution { impl SpliceContribution { /// Creates a contribution for when funds are only added to a channel. pub fn splice_in( - value: Amount, inputs: Vec, change_script: Option, + value_added: Amount, inputs: Vec, change_script: Option, ) -> Self { - let value_added = value.to_signed().unwrap_or(SignedAmount::MAX); - - Self { value: value_added, inputs, outputs: vec![], change_script } + Self { value_added, inputs, outputs: vec![], change_script } } /// Creates a contribution for when funds are only removed from a channel. pub fn splice_out(outputs: Vec) -> Self { - let value_removed = outputs + Self { value_added: Amount::ZERO, inputs: vec![], outputs, change_script: None } + } + + /// Creates a contribution for when funds are both added to and removed from a channel. + /// + /// Note that `value_added` represents the value added by `inputs` but should not account for + /// value removed by `outputs`. The net value contributed can be obtained by calling + /// [`SpliceContribution::net_value`]. + pub fn splice_in_and_out( + value_added: Amount, inputs: Vec, outputs: Vec, + change_script: Option, + ) -> Self { + Self { value_added, inputs, outputs, change_script } + } + + /// The net value contributed to a channel by the splice. If negative, more value will be + /// spliced out than spliced in. + pub fn net_value(&self) -> SignedAmount { + let value_added = self.value_added.to_signed().unwrap_or(SignedAmount::MAX); + let value_removed = self + .outputs .iter() .map(|txout| txout.value) .sum::() .to_signed() .unwrap_or(SignedAmount::MAX); - Self { value: -value_removed, inputs: vec![], outputs, change_script: None } + value_added - value_removed } - pub(super) fn value(&self) -> SignedAmount { - self.value + pub(super) fn value_added(&self) -> Amount { + self.value_added } pub(super) fn inputs(&self) -> &[FundingTxInput] { @@ -74,7 +94,7 @@ impl SpliceContribution { } pub(super) fn into_tx_parts(self) -> (Vec, Vec, Option) { - let SpliceContribution { value: _, inputs, outputs, change_script } = self; + let SpliceContribution { value_added: _, inputs, outputs, change_script } = self; (inputs, outputs, change_script) } } diff --git a/lightning/src/ln/interactivetxs.rs b/lightning/src/ln/interactivetxs.rs index 1ab9c6c68ee..7ed829886c6 100644 --- a/lightning/src/ln/interactivetxs.rs +++ b/lightning/src/ln/interactivetxs.rs @@ -2338,9 +2338,6 @@ pub(super) fn calculate_change_output_value( context: &FundingNegotiationContext, is_splice: bool, shared_output_funding_script: &ScriptBuf, change_output_dust_limit: u64, ) -> Result, AbortReason> { - assert!(context.our_funding_contribution > SignedAmount::ZERO); - let our_funding_contribution = context.our_funding_contribution.to_unsigned().unwrap(); - let mut total_input_value = Amount::ZERO; let mut our_funding_inputs_weight = 0u64; for FundingTxInput { utxo, .. } in context.our_funding_inputs.iter() { @@ -2354,6 +2351,7 @@ pub(super) fn calculate_change_output_value( let total_output_value = funding_outputs .iter() .fold(Amount::ZERO, |total, out| total.checked_add(out.value).unwrap_or(Amount::MAX)); + let our_funding_outputs_weight = funding_outputs.iter().fold(0u64, |weight, out| { weight.saturating_add(get_output_weight(&out.script_pubkey).to_wu()) }); @@ -2379,18 +2377,21 @@ pub(super) fn calculate_change_output_value( let contributed_fees = Amount::from_sat(fee_for_weight(context.funding_feerate_sat_per_1000_weight, weight)); - let net_total_less_fees = total_input_value - .checked_sub(total_output_value) - .unwrap_or(Amount::ZERO) - .checked_sub(contributed_fees) - .unwrap_or(Amount::ZERO); - if net_total_less_fees < our_funding_contribution { + + let contributed_input_value = + context.our_funding_contribution + total_output_value.to_signed().unwrap(); + assert!(contributed_input_value > SignedAmount::ZERO); + let contributed_input_value = contributed_input_value.unsigned_abs(); + + let total_input_value_less_fees = + total_input_value.checked_sub(contributed_fees).unwrap_or(Amount::ZERO); + if total_input_value_less_fees < contributed_input_value { // Not enough to cover contribution plus fees return Err(AbortReason::InsufficientFees); } - let remaining_value = net_total_less_fees - .checked_sub(our_funding_contribution) + let remaining_value = total_input_value_less_fees + .checked_sub(contributed_input_value) .expect("remaining_value should not be negative"); if remaining_value.to_sat() < change_output_dust_limit { // Enough to cover contribution plus fees, but leftover is below dust limit; no change diff --git a/lightning/src/ln/splicing_tests.rs b/lightning/src/ln/splicing_tests.rs index 58a81bb2a36..db6680d963c 100644 --- a/lightning/src/ln/splicing_tests.rs +++ b/lightning/src/ln/splicing_tests.rs @@ -877,6 +877,170 @@ fn test_splice_out() { let _ = send_payment(&nodes[0], &[&nodes[1]], htlc_limit_msat); } +#[test] +fn test_splice_in_and_out() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let mut config = test_default_channel_config(); + config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100; + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(config)]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let initial_channel_value_sat = 100_000; + let (_, _, channel_id, _) = + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, initial_channel_value_sat, 0); + + let _ = send_payment(&nodes[0], &[&nodes[1]], 100_000); + + let coinbase_tx1 = provide_anchor_reserves(&nodes); + let coinbase_tx2 = provide_anchor_reserves(&nodes); + + // Contribute a net negative value, with fees taken from the contributed inputs and the + // remaining value sent to change + let htlc_limit_msat = nodes[0].node.list_channels()[0].next_outbound_htlc_limit_msat; + let added_value = Amount::from_sat(htlc_limit_msat / 1000); + let removed_value = added_value * 2; + let change_script = ScriptBuf::new_p2wpkh(&WPubkeyHash::all_zeros()); + let fees = if cfg!(feature = "grind_signatures") { + Amount::from_sat(383) + } else { + Amount::from_sat(384) + }; + + assert!(htlc_limit_msat > initial_channel_value_sat / 2 * 1000); + + let initiator_contribution = SpliceContribution::splice_in_and_out( + added_value, + vec![ + FundingTxInput::new_p2wpkh(coinbase_tx1, 0).unwrap(), + FundingTxInput::new_p2wpkh(coinbase_tx2, 0).unwrap(), + ], + vec![ + TxOut { + value: removed_value / 2, + script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), + }, + TxOut { + value: removed_value / 2, + script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), + }, + ], + Some(change_script.clone()), + ); + + let splice_tx = splice_channel(&nodes[0], &nodes[1], channel_id, initiator_contribution); + let expected_change = Amount::ONE_BTC * 2 - added_value - fees; + assert_eq!( + splice_tx.output.iter().find(|txout| txout.script_pubkey == change_script).unwrap().value, + expected_change, + ); + + mine_transaction(&nodes[0], &splice_tx); + mine_transaction(&nodes[1], &splice_tx); + + let htlc_limit_msat = nodes[0].node.list_channels()[0].next_outbound_htlc_limit_msat; + assert!(htlc_limit_msat < added_value.to_sat() * 1000); + let _ = send_payment(&nodes[0], &[&nodes[1]], htlc_limit_msat); + + lock_splice_after_blocks(&nodes[0], &nodes[1], ANTI_REORG_DELAY - 1); + + let htlc_limit_msat = nodes[0].node.list_channels()[0].next_outbound_htlc_limit_msat; + assert!(htlc_limit_msat < added_value.to_sat() * 1000); + let _ = send_payment(&nodes[0], &[&nodes[1]], htlc_limit_msat); + + let coinbase_tx1 = provide_anchor_reserves(&nodes); + let coinbase_tx2 = provide_anchor_reserves(&nodes); + + // Contribute a net positive value, with fees taken from the contributed inputs and the + // remaining value sent to change + let added_value = Amount::from_sat(initial_channel_value_sat * 2); + let removed_value = added_value / 2; + let change_script = ScriptBuf::new_p2wpkh(&WPubkeyHash::all_zeros()); + let fees = if cfg!(feature = "grind_signatures") { + Amount::from_sat(383) + } else { + Amount::from_sat(384) + }; + + let initiator_contribution = SpliceContribution::splice_in_and_out( + added_value, + vec![ + FundingTxInput::new_p2wpkh(coinbase_tx1, 0).unwrap(), + FundingTxInput::new_p2wpkh(coinbase_tx2, 0).unwrap(), + ], + vec![ + TxOut { + value: removed_value / 2, + script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), + }, + TxOut { + value: removed_value / 2, + script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), + }, + ], + Some(change_script.clone()), + ); + + let splice_tx = splice_channel(&nodes[0], &nodes[1], channel_id, initiator_contribution); + let expected_change = Amount::ONE_BTC * 2 - added_value - fees; + assert_eq!( + splice_tx.output.iter().find(|txout| txout.script_pubkey == change_script).unwrap().value, + expected_change, + ); + + mine_transaction(&nodes[0], &splice_tx); + mine_transaction(&nodes[1], &splice_tx); + + let htlc_limit_msat = nodes[0].node.list_channels()[0].next_outbound_htlc_limit_msat; + assert_eq!(htlc_limit_msat, 0); + + lock_splice_after_blocks(&nodes[0], &nodes[1], ANTI_REORG_DELAY - 1); + + let htlc_limit_msat = nodes[0].node.list_channels()[0].next_outbound_htlc_limit_msat; + assert!(htlc_limit_msat > initial_channel_value_sat / 2 * 1000); + let _ = send_payment(&nodes[0], &[&nodes[1]], htlc_limit_msat); + + let coinbase_tx1 = provide_anchor_reserves(&nodes); + let coinbase_tx2 = provide_anchor_reserves(&nodes); + + // Fail adding a net contribution value of zero + let added_value = Amount::from_sat(initial_channel_value_sat * 2); + let removed_value = added_value; + let change_script = ScriptBuf::new_p2wpkh(&WPubkeyHash::all_zeros()); + + let initiator_contribution = SpliceContribution::splice_in_and_out( + added_value, + vec![ + FundingTxInput::new_p2wpkh(coinbase_tx1, 0).unwrap(), + FundingTxInput::new_p2wpkh(coinbase_tx2, 0).unwrap(), + ], + vec![ + TxOut { + value: removed_value / 2, + script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), + }, + TxOut { + value: removed_value / 2, + script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), + }, + ], + Some(change_script), + ); + + assert_eq!( + nodes[0].node.splice_channel( + &channel_id, + &nodes[1].node.get_our_node_id(), + initiator_contribution, + FEERATE_FLOOR_SATS_PER_KW, + None, + ), + Err(APIError::APIMisuseError { + err: format!("Channel {} cannot be spliced; contribution cannot be zero", channel_id), + }), + ); +} + #[cfg(test)] #[derive(PartialEq)] enum SpliceStatus { From 8bdd1faf007c57e672e3f95f3ec4a00477c1e5c8 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 21 Jan 2026 15:43:33 +0000 Subject: [PATCH 106/242] Add `total_consistency_lock` check in `handle_post_event_actions` We expect callers of `handle_post_event_actions` to hold a read lock on `total_consistency_lock`, and found that we forgot it in `process_pending_events` until recently. Here we add a relevant assertion to avoid such issues in the future. --- lightning/src/ln/channelmanager.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index af395154760..dafeffe98bf 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3438,6 +3438,10 @@ macro_rules! process_events_body { } if !post_event_actions.is_empty() { + // `handle_post_event_actions` may update channel state, so take the total + // consistency lock now similarly to other callers of `handle_post_event_actions`. + // Note that if it needs to wake the background processor for event handling or + // persistence it will do so directly. let _read_guard = $self.total_consistency_lock.read().unwrap(); $self.handle_post_event_actions(post_event_actions); // If we had some actions, go around again as we may have more events now @@ -14315,6 +14319,10 @@ where } fn handle_post_event_actions>(&self, actions: I) { + debug_assert_ne!( + self.total_consistency_lock.held_by_thread(), + LockHeldState::NotHeldByThread + ); for action in actions.into_iter() { match action { EventCompletionAction::ReleaseRAAChannelMonitorUpdate { From 24c61fa562085879b393d548553d5ad176045b61 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Mon, 19 Jan 2026 10:26:04 +0100 Subject: [PATCH 107/242] fuzz: consolidate payment counters into single counter Replace the separate `p_id: u8` (for payment hash generation) and `p_idx: u64` (for payment IDs) with a single `p_ctr: u64` counter. Co-Authored-By: Claude Opus 4.5 Co-Authored-By: Matt Corallo --- fuzz/src/chanmon_consistency.rs | 190 ++++++++++++++------------------ 1 file changed, 82 insertions(+), 108 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 03d170b1bc0..e5783b3bfba 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -539,43 +539,40 @@ type ChanMan<'a> = ChannelManager< #[inline] fn get_payment_secret_hash( - dest: &ChanMan, payment_id: &mut u8, + dest: &ChanMan, payment_ctr: &mut u64, ) -> Option<(PaymentSecret, PaymentHash)> { let mut payment_hash; for _ in 0..256 { - payment_hash = PaymentHash(Sha256::hash(&[*payment_id; 1]).to_byte_array()); + *payment_ctr += 1; + payment_hash = PaymentHash(Sha256::hash(&[*payment_ctr as u8]).to_byte_array()); if let Ok(payment_secret) = dest.create_inbound_payment_for_hash(payment_hash, None, 3600, None) { return Some((payment_secret, payment_hash)); } - *payment_id = payment_id.wrapping_add(1); } None } #[inline] fn send_noret( - source: &ChanMan, dest: &ChanMan, dest_chan_id: u64, amt: u64, payment_id: &mut u8, - payment_idx: &mut u64, + source: &ChanMan, dest: &ChanMan, dest_chan_id: u64, amt: u64, payment_ctr: &mut u64, ) { - send_payment(source, dest, dest_chan_id, amt, payment_id, payment_idx); + send_payment(source, dest, dest_chan_id, amt, payment_ctr); } #[inline] fn send_payment( - source: &ChanMan, dest: &ChanMan, dest_chan_id: u64, amt: u64, payment_id: &mut u8, - payment_idx: &mut u64, + source: &ChanMan, dest: &ChanMan, dest_chan_id: u64, amt: u64, payment_ctr: &mut u64, ) -> bool { let (payment_secret, payment_hash) = - if let Some((secret, hash)) = get_payment_secret_hash(dest, payment_id) { + if let Some((secret, hash)) = get_payment_secret_hash(dest, payment_ctr) { (secret, hash) } else { return true; }; let mut payment_id = [0; 32]; - payment_id[0..8].copy_from_slice(&payment_idx.to_ne_bytes()); - *payment_idx += 1; + payment_id[0..8].copy_from_slice(&payment_ctr.to_ne_bytes()); let (min_value_sendable, max_value_sendable) = source .list_usable_channels() .iter() @@ -620,34 +617,24 @@ fn send_payment( #[inline] fn send_hop_noret( source: &ChanMan, middle: &ChanMan, middle_chan_id: u64, dest: &ChanMan, dest_chan_id: u64, - amt: u64, payment_id: &mut u8, payment_idx: &mut u64, + amt: u64, payment_ctr: &mut u64, ) { - send_hop_payment( - source, - middle, - middle_chan_id, - dest, - dest_chan_id, - amt, - payment_id, - payment_idx, - ); + send_hop_payment(source, middle, middle_chan_id, dest, dest_chan_id, amt, payment_ctr); } #[inline] fn send_hop_payment( source: &ChanMan, middle: &ChanMan, middle_chan_id: u64, dest: &ChanMan, dest_chan_id: u64, - amt: u64, payment_id: &mut u8, payment_idx: &mut u64, + amt: u64, payment_ctr: &mut u64, ) -> bool { let (payment_secret, payment_hash) = - if let Some((secret, hash)) = get_payment_secret_hash(dest, payment_id) { + if let Some((secret, hash)) = get_payment_secret_hash(dest, payment_ctr) { (secret, hash) } else { return true; }; let mut payment_id = [0; 32]; - payment_id[0..8].copy_from_slice(&payment_idx.to_ne_bytes()); - *payment_idx += 1; + payment_id[0..8].copy_from_slice(&payment_ctr.to_ne_bytes()); let (min_value_sendable, max_value_sendable) = source .list_usable_channels() .iter() @@ -1138,8 +1125,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { let chan_b = nodes[2].list_usable_channels()[0].short_channel_id.unwrap(); let chan_b_id = nodes[2].list_usable_channels()[0].channel_id; - let mut p_id: u8 = 0; - let mut p_idx: u64 = 0; + let mut p_ctr: u64 = 0; let mut chan_a_disconnected = false; let mut chan_b_disconnected = false; @@ -1762,93 +1748,85 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { 0x27 => process_ev_noret!(2, false), // 1/10th the channel size: - 0x30 => send_noret(&nodes[0], &nodes[1], chan_a, 10_000_000, &mut p_id, &mut p_idx), - 0x31 => send_noret(&nodes[1], &nodes[0], chan_a, 10_000_000, &mut p_id, &mut p_idx), - 0x32 => send_noret(&nodes[1], &nodes[2], chan_b, 10_000_000, &mut p_id, &mut p_idx), - 0x33 => send_noret(&nodes[2], &nodes[1], chan_b, 10_000_000, &mut p_id, &mut p_idx), + 0x30 => send_noret(&nodes[0], &nodes[1], chan_a, 10_000_000, &mut p_ctr), + 0x31 => send_noret(&nodes[1], &nodes[0], chan_a, 10_000_000, &mut p_ctr), + 0x32 => send_noret(&nodes[1], &nodes[2], chan_b, 10_000_000, &mut p_ctr), + 0x33 => send_noret(&nodes[2], &nodes[1], chan_b, 10_000_000, &mut p_ctr), 0x34 => send_hop_noret( - &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 10_000_000, &mut p_id, &mut p_idx, + &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 10_000_000, &mut p_ctr, ), 0x35 => send_hop_noret( - &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 10_000_000, &mut p_id, &mut p_idx, + &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 10_000_000, &mut p_ctr, ), - 0x38 => send_noret(&nodes[0], &nodes[1], chan_a, 1_000_000, &mut p_id, &mut p_idx), - 0x39 => send_noret(&nodes[1], &nodes[0], chan_a, 1_000_000, &mut p_id, &mut p_idx), - 0x3a => send_noret(&nodes[1], &nodes[2], chan_b, 1_000_000, &mut p_id, &mut p_idx), - 0x3b => send_noret(&nodes[2], &nodes[1], chan_b, 1_000_000, &mut p_id, &mut p_idx), + 0x38 => send_noret(&nodes[0], &nodes[1], chan_a, 1_000_000, &mut p_ctr), + 0x39 => send_noret(&nodes[1], &nodes[0], chan_a, 1_000_000, &mut p_ctr), + 0x3a => send_noret(&nodes[1], &nodes[2], chan_b, 1_000_000, &mut p_ctr), + 0x3b => send_noret(&nodes[2], &nodes[1], chan_b, 1_000_000, &mut p_ctr), 0x3c => send_hop_noret( - &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 1_000_000, &mut p_id, &mut p_idx, + &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 1_000_000, &mut p_ctr, ), 0x3d => send_hop_noret( - &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 1_000_000, &mut p_id, &mut p_idx, + &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 1_000_000, &mut p_ctr, ), - 0x40 => send_noret(&nodes[0], &nodes[1], chan_a, 100_000, &mut p_id, &mut p_idx), - 0x41 => send_noret(&nodes[1], &nodes[0], chan_a, 100_000, &mut p_id, &mut p_idx), - 0x42 => send_noret(&nodes[1], &nodes[2], chan_b, 100_000, &mut p_id, &mut p_idx), - 0x43 => send_noret(&nodes[2], &nodes[1], chan_b, 100_000, &mut p_id, &mut p_idx), - 0x44 => send_hop_noret( - &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 100_000, &mut p_id, &mut p_idx, - ), - 0x45 => send_hop_noret( - &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 100_000, &mut p_id, &mut p_idx, - ), + 0x40 => send_noret(&nodes[0], &nodes[1], chan_a, 100_000, &mut p_ctr), + 0x41 => send_noret(&nodes[1], &nodes[0], chan_a, 100_000, &mut p_ctr), + 0x42 => send_noret(&nodes[1], &nodes[2], chan_b, 100_000, &mut p_ctr), + 0x43 => send_noret(&nodes[2], &nodes[1], chan_b, 100_000, &mut p_ctr), + 0x44 => { + send_hop_noret(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 100_000, &mut p_ctr) + }, + 0x45 => { + send_hop_noret(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 100_000, &mut p_ctr) + }, - 0x48 => send_noret(&nodes[0], &nodes[1], chan_a, 10_000, &mut p_id, &mut p_idx), - 0x49 => send_noret(&nodes[1], &nodes[0], chan_a, 10_000, &mut p_id, &mut p_idx), - 0x4a => send_noret(&nodes[1], &nodes[2], chan_b, 10_000, &mut p_id, &mut p_idx), - 0x4b => send_noret(&nodes[2], &nodes[1], chan_b, 10_000, &mut p_id, &mut p_idx), - 0x4c => send_hop_noret( - &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 10_000, &mut p_id, &mut p_idx, - ), - 0x4d => send_hop_noret( - &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 10_000, &mut p_id, &mut p_idx, - ), + 0x48 => send_noret(&nodes[0], &nodes[1], chan_a, 10_000, &mut p_ctr), + 0x49 => send_noret(&nodes[1], &nodes[0], chan_a, 10_000, &mut p_ctr), + 0x4a => send_noret(&nodes[1], &nodes[2], chan_b, 10_000, &mut p_ctr), + 0x4b => send_noret(&nodes[2], &nodes[1], chan_b, 10_000, &mut p_ctr), + 0x4c => { + send_hop_noret(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 10_000, &mut p_ctr) + }, + 0x4d => { + send_hop_noret(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 10_000, &mut p_ctr) + }, - 0x50 => send_noret(&nodes[0], &nodes[1], chan_a, 1_000, &mut p_id, &mut p_idx), - 0x51 => send_noret(&nodes[1], &nodes[0], chan_a, 1_000, &mut p_id, &mut p_idx), - 0x52 => send_noret(&nodes[1], &nodes[2], chan_b, 1_000, &mut p_id, &mut p_idx), - 0x53 => send_noret(&nodes[2], &nodes[1], chan_b, 1_000, &mut p_id, &mut p_idx), - 0x54 => send_hop_noret( - &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 1_000, &mut p_id, &mut p_idx, - ), - 0x55 => send_hop_noret( - &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 1_000, &mut p_id, &mut p_idx, - ), + 0x50 => send_noret(&nodes[0], &nodes[1], chan_a, 1_000, &mut p_ctr), + 0x51 => send_noret(&nodes[1], &nodes[0], chan_a, 1_000, &mut p_ctr), + 0x52 => send_noret(&nodes[1], &nodes[2], chan_b, 1_000, &mut p_ctr), + 0x53 => send_noret(&nodes[2], &nodes[1], chan_b, 1_000, &mut p_ctr), + 0x54 => { + send_hop_noret(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 1_000, &mut p_ctr) + }, + 0x55 => { + send_hop_noret(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 1_000, &mut p_ctr) + }, - 0x58 => send_noret(&nodes[0], &nodes[1], chan_a, 100, &mut p_id, &mut p_idx), - 0x59 => send_noret(&nodes[1], &nodes[0], chan_a, 100, &mut p_id, &mut p_idx), - 0x5a => send_noret(&nodes[1], &nodes[2], chan_b, 100, &mut p_id, &mut p_idx), - 0x5b => send_noret(&nodes[2], &nodes[1], chan_b, 100, &mut p_id, &mut p_idx), - 0x5c => send_hop_noret( - &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 100, &mut p_id, &mut p_idx, - ), - 0x5d => send_hop_noret( - &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 100, &mut p_id, &mut p_idx, - ), + 0x58 => send_noret(&nodes[0], &nodes[1], chan_a, 100, &mut p_ctr), + 0x59 => send_noret(&nodes[1], &nodes[0], chan_a, 100, &mut p_ctr), + 0x5a => send_noret(&nodes[1], &nodes[2], chan_b, 100, &mut p_ctr), + 0x5b => send_noret(&nodes[2], &nodes[1], chan_b, 100, &mut p_ctr), + 0x5c => { + send_hop_noret(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 100, &mut p_ctr) + }, + 0x5d => { + send_hop_noret(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 100, &mut p_ctr) + }, - 0x60 => send_noret(&nodes[0], &nodes[1], chan_a, 10, &mut p_id, &mut p_idx), - 0x61 => send_noret(&nodes[1], &nodes[0], chan_a, 10, &mut p_id, &mut p_idx), - 0x62 => send_noret(&nodes[1], &nodes[2], chan_b, 10, &mut p_id, &mut p_idx), - 0x63 => send_noret(&nodes[2], &nodes[1], chan_b, 10, &mut p_id, &mut p_idx), - 0x64 => send_hop_noret( - &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 10, &mut p_id, &mut p_idx, - ), - 0x65 => send_hop_noret( - &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 10, &mut p_id, &mut p_idx, - ), + 0x60 => send_noret(&nodes[0], &nodes[1], chan_a, 10, &mut p_ctr), + 0x61 => send_noret(&nodes[1], &nodes[0], chan_a, 10, &mut p_ctr), + 0x62 => send_noret(&nodes[1], &nodes[2], chan_b, 10, &mut p_ctr), + 0x63 => send_noret(&nodes[2], &nodes[1], chan_b, 10, &mut p_ctr), + 0x64 => send_hop_noret(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 10, &mut p_ctr), + 0x65 => send_hop_noret(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 10, &mut p_ctr), - 0x68 => send_noret(&nodes[0], &nodes[1], chan_a, 1, &mut p_id, &mut p_idx), - 0x69 => send_noret(&nodes[1], &nodes[0], chan_a, 1, &mut p_id, &mut p_idx), - 0x6a => send_noret(&nodes[1], &nodes[2], chan_b, 1, &mut p_id, &mut p_idx), - 0x6b => send_noret(&nodes[2], &nodes[1], chan_b, 1, &mut p_id, &mut p_idx), - 0x6c => send_hop_noret( - &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 1, &mut p_id, &mut p_idx, - ), - 0x6d => send_hop_noret( - &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 1, &mut p_id, &mut p_idx, - ), + 0x68 => send_noret(&nodes[0], &nodes[1], chan_a, 1, &mut p_ctr), + 0x69 => send_noret(&nodes[1], &nodes[0], chan_a, 1, &mut p_ctr), + 0x6a => send_noret(&nodes[1], &nodes[2], chan_b, 1, &mut p_ctr), + 0x6b => send_noret(&nodes[2], &nodes[1], chan_b, 1, &mut p_ctr), + 0x6c => send_hop_noret(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 1, &mut p_ctr), + 0x6d => send_hop_noret(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 1, &mut p_ctr), 0x80 => { let mut max_feerate = last_htlc_clear_fee_a; @@ -2280,16 +2258,12 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { // Finally, make sure that at least one end of each channel can make a substantial payment assert!( - send_payment(&nodes[0], &nodes[1], chan_a, 10_000_000, &mut p_id, &mut p_idx) - || send_payment( - &nodes[1], &nodes[0], chan_a, 10_000_000, &mut p_id, &mut p_idx - ) + send_payment(&nodes[0], &nodes[1], chan_a, 10_000_000, &mut p_ctr) + || send_payment(&nodes[1], &nodes[0], chan_a, 10_000_000, &mut p_ctr) ); assert!( - send_payment(&nodes[1], &nodes[2], chan_b, 10_000_000, &mut p_id, &mut p_idx) - || send_payment( - &nodes[2], &nodes[1], chan_b, 10_000_000, &mut p_id, &mut p_idx - ) + send_payment(&nodes[1], &nodes[2], chan_b, 10_000_000, &mut p_ctr) + || send_payment(&nodes[2], &nodes[1], chan_b, 10_000_000, &mut p_ctr) ); last_htlc_clear_fee_a = fee_est_a.ret_val.load(atomic::Ordering::Acquire); From 7d51892254d29b6ab0ad1c7dc8b802d95c4f6611 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Mon, 19 Jan 2026 10:26:42 +0100 Subject: [PATCH 108/242] fuzz: simplify get_payment_secret_hash return type Change `get_payment_secret_hash` to return `(PaymentSecret, PaymentHash)` directly instead of `Option<...>`. The function calls `create_inbound_payment_for_hash` with `min_value_msat=None` and `min_final_cltv_expiry=None`, which cannot fail. Remove the retry loop and use `.expect()` since the call will always succeed with these parameters. Co-Authored-By: Claude Opus 4.5 Co-Authored-By: Matt Corallo --- fuzz/src/chanmon_consistency.rs | 35 +++++++++------------------------ 1 file changed, 9 insertions(+), 26 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index e5783b3bfba..d3d38a99dea 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -538,20 +538,13 @@ type ChanMan<'a> = ChannelManager< >; #[inline] -fn get_payment_secret_hash( - dest: &ChanMan, payment_ctr: &mut u64, -) -> Option<(PaymentSecret, PaymentHash)> { - let mut payment_hash; - for _ in 0..256 { - *payment_ctr += 1; - payment_hash = PaymentHash(Sha256::hash(&[*payment_ctr as u8]).to_byte_array()); - if let Ok(payment_secret) = - dest.create_inbound_payment_for_hash(payment_hash, None, 3600, None) - { - return Some((payment_secret, payment_hash)); - } - } - None +fn get_payment_secret_hash(dest: &ChanMan, payment_ctr: &mut u64) -> (PaymentSecret, PaymentHash) { + *payment_ctr += 1; + let payment_hash = PaymentHash(Sha256::hash(&[*payment_ctr as u8]).to_byte_array()); + let payment_secret = dest + .create_inbound_payment_for_hash(payment_hash, None, 3600, None) + .expect("create_inbound_payment_for_hash failed"); + (payment_secret, payment_hash) } #[inline] @@ -565,12 +558,7 @@ fn send_noret( fn send_payment( source: &ChanMan, dest: &ChanMan, dest_chan_id: u64, amt: u64, payment_ctr: &mut u64, ) -> bool { - let (payment_secret, payment_hash) = - if let Some((secret, hash)) = get_payment_secret_hash(dest, payment_ctr) { - (secret, hash) - } else { - return true; - }; + let (payment_secret, payment_hash) = get_payment_secret_hash(dest, payment_ctr); let mut payment_id = [0; 32]; payment_id[0..8].copy_from_slice(&payment_ctr.to_ne_bytes()); let (min_value_sendable, max_value_sendable) = source @@ -627,12 +615,7 @@ fn send_hop_payment( source: &ChanMan, middle: &ChanMan, middle_chan_id: u64, dest: &ChanMan, dest_chan_id: u64, amt: u64, payment_ctr: &mut u64, ) -> bool { - let (payment_secret, payment_hash) = - if let Some((secret, hash)) = get_payment_secret_hash(dest, payment_ctr) { - (secret, hash) - } else { - return true; - }; + let (payment_secret, payment_hash) = get_payment_secret_hash(dest, payment_ctr); let mut payment_id = [0; 32]; payment_id[0..8].copy_from_slice(&payment_ctr.to_ne_bytes()); let (min_value_sendable, max_value_sendable) = source From 151762c83ece0e3b1906dded00dae48b1db931bb Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Mon, 19 Jan 2026 10:27:10 +0100 Subject: [PATCH 109/242] fuzz: rename chan_id to scid in hop payment functions Rename `middle_chan_id` to `middle_scid` and `dest_chan_id` to `dest_scid` in `send_hop_noret` and `send_hop_payment`. These parameters are short channel IDs (SCIDs), not channel IDs, so the rename improves clarity. Co-Authored-By: Claude Opus 4.5 Co-Authored-By: Matt Corallo --- fuzz/src/chanmon_consistency.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index d3d38a99dea..de83f8065ea 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -604,15 +604,15 @@ fn send_payment( #[inline] fn send_hop_noret( - source: &ChanMan, middle: &ChanMan, middle_chan_id: u64, dest: &ChanMan, dest_chan_id: u64, + source: &ChanMan, middle: &ChanMan, middle_scid: u64, dest: &ChanMan, dest_scid: u64, amt: u64, payment_ctr: &mut u64, ) { - send_hop_payment(source, middle, middle_chan_id, dest, dest_chan_id, amt, payment_ctr); + send_hop_payment(source, middle, middle_scid, dest, dest_scid, amt, payment_ctr); } #[inline] fn send_hop_payment( - source: &ChanMan, middle: &ChanMan, middle_chan_id: u64, dest: &ChanMan, dest_chan_id: u64, + source: &ChanMan, middle: &ChanMan, middle_scid: u64, dest: &ChanMan, dest_scid: u64, amt: u64, payment_ctr: &mut u64, ) -> bool { let (payment_secret, payment_hash) = get_payment_secret_hash(dest, payment_ctr); @@ -621,7 +621,7 @@ fn send_hop_payment( let (min_value_sendable, max_value_sendable) = source .list_usable_channels() .iter() - .find(|chan| chan.short_channel_id == Some(middle_chan_id)) + .find(|chan| chan.short_channel_id == Some(middle_scid)) .map(|chan| (chan.next_outbound_htlc_minimum_msat, chan.next_outbound_htlc_limit_msat)) .unwrap_or((0, 0)); let first_hop_fee = 50_000; @@ -635,7 +635,7 @@ fn send_hop_payment( RouteHop { pubkey: middle.get_our_node_id(), node_features: middle.node_features(), - short_channel_id: middle_chan_id, + short_channel_id: middle_scid, channel_features: middle.channel_features(), fee_msat: first_hop_fee, cltv_expiry_delta: 100, @@ -644,7 +644,7 @@ fn send_hop_payment( RouteHop { pubkey: dest.get_our_node_id(), node_features: dest.node_features(), - short_channel_id: dest_chan_id, + short_channel_id: dest_scid, channel_features: dest.channel_features(), fee_msat: amt, cltv_expiry_delta: 200, From ecf502a768466e2df5ecfde30bbca48819925f4c Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Mon, 19 Jan 2026 12:44:21 +0100 Subject: [PATCH 110/242] fuzz: add transaction broadcast assertions Add transaction tracking to TestBroadcaster and verify no unexpected broadcasts occur during normal fuzzing operation: - Store all broadcast transactions in TestBroadcaster - Clear the broadcast set after initial channel opens - Assert in test_return! that no transactions were broadcast Co-Authored-By: Claude Opus 4.5 --- fuzz/src/chanmon_consistency.rs | 31 +++++++++++++++++++++++-------- 1 file changed, 23 insertions(+), 8 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index de83f8065ea..7cc7986c1cd 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -155,9 +155,15 @@ impl MessageRouter for FuzzRouter { } } -pub struct TestBroadcaster {} +pub struct TestBroadcaster { + txn_broadcasted: RefCell>, +} impl BroadcasterInterface for TestBroadcaster { - fn broadcast_transactions(&self, _txs: &[&Transaction]) {} + fn broadcast_transactions(&self, txs: &[&Transaction]) { + for tx in txs { + self.txn_broadcasted.borrow_mut().push((*tx).clone()); + } + } } pub struct VecWriter(pub Vec); @@ -334,7 +340,7 @@ impl chain::Watch for TestChainMonitor { deserialized_monitor .update_monitor( update, - &&TestBroadcaster {}, + &&TestBroadcaster { txn_broadcasted: RefCell::new(Vec::new()) }, &&FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }, &self.logger, ) @@ -604,16 +610,16 @@ fn send_payment( #[inline] fn send_hop_noret( - source: &ChanMan, middle: &ChanMan, middle_scid: u64, dest: &ChanMan, dest_scid: u64, - amt: u64, payment_ctr: &mut u64, + source: &ChanMan, middle: &ChanMan, middle_scid: u64, dest: &ChanMan, dest_scid: u64, amt: u64, + payment_ctr: &mut u64, ) { send_hop_payment(source, middle, middle_scid, dest, dest_scid, amt, payment_ctr); } #[inline] fn send_hop_payment( - source: &ChanMan, middle: &ChanMan, middle_scid: u64, dest: &ChanMan, dest_scid: u64, - amt: u64, payment_ctr: &mut u64, + source: &ChanMan, middle: &ChanMan, middle_scid: u64, dest: &ChanMan, dest_scid: u64, amt: u64, + payment_ctr: &mut u64, ) -> bool { let (payment_secret, payment_hash) = get_payment_secret_hash(dest, payment_ctr); let mut payment_id = [0; 32]; @@ -675,7 +681,7 @@ fn send_hop_payment( #[inline] pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { let out = SearchingOutput::new(underlying_out); - let broadcast = Arc::new(TestBroadcaster {}); + let broadcast = Arc::new(TestBroadcaster { txn_broadcasted: RefCell::new(Vec::new()) }); let router = FuzzRouter {}; // Read initial monitor styles from fuzz input (1 byte: 2 bits per node) @@ -1097,6 +1103,10 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { let chan_1_id = make_channel!(nodes[0], nodes[1], monitor_a, monitor_b, keys_manager_b, 0); let chan_2_id = make_channel!(nodes[1], nodes[2], monitor_b, monitor_c, keys_manager_c, 1); + // Wipe the transactions-broadcasted set to make sure we don't broadcast any transactions + // during normal operation in `test_return`. + broadcast.txn_broadcasted.borrow_mut().clear(); + for node in nodes.iter() { confirm_txn!(node); } @@ -1126,6 +1136,11 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { assert_eq!(nodes[0].list_channels().len(), 1); assert_eq!(nodes[1].list_channels().len(), 2); assert_eq!(nodes[2].list_channels().len(), 1); + + // At no point should we have broadcasted any transactions after the initial channel + // opens. + assert!(broadcast.txn_broadcasted.borrow().is_empty()); + return; }}; } From 0a48663f10c6f59fa9b3b29cb06db1b14c3ab494 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Mon, 19 Jan 2026 10:29:08 +0100 Subject: [PATCH 111/242] fuzz: convert send helpers to closures with node indices Replace the standalone `send_noret` and `send_hop_noret` functions with closures defined inside the main loop. This allows them to capture the `nodes` array and use simple node indices (0, 1, 2) instead of passing `&nodes[x]` references at each call site. The `send_payment` and `send_hop_payment` functions are modified to take pre-computed `PaymentSecret`, `PaymentHash`, and `PaymentId` parameters, with the closures handling the credential generation. This centralizes where `PaymentId` is constructed, which will make it easier to add payment tracking in a future change. Co-Authored-By: Claude Opus 4.5 Co-Authored-By: Matt Corallo --- fuzz/src/chanmon_consistency.rs | 199 +++++++++++++++----------------- 1 file changed, 91 insertions(+), 108 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 7cc7986c1cd..1bcb6697a86 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -553,20 +553,11 @@ fn get_payment_secret_hash(dest: &ChanMan, payment_ctr: &mut u64) -> (PaymentSec (payment_secret, payment_hash) } -#[inline] -fn send_noret( - source: &ChanMan, dest: &ChanMan, dest_chan_id: u64, amt: u64, payment_ctr: &mut u64, -) { - send_payment(source, dest, dest_chan_id, amt, payment_ctr); -} - #[inline] fn send_payment( - source: &ChanMan, dest: &ChanMan, dest_chan_id: u64, amt: u64, payment_ctr: &mut u64, + source: &ChanMan, dest: &ChanMan, dest_chan_id: u64, amt: u64, payment_secret: PaymentSecret, + payment_hash: PaymentHash, payment_id: PaymentId, ) -> bool { - let (payment_secret, payment_hash) = get_payment_secret_hash(dest, payment_ctr); - let mut payment_id = [0; 32]; - payment_id[0..8].copy_from_slice(&payment_ctr.to_ne_bytes()); let (min_value_sendable, max_value_sendable) = source .list_usable_channels() .iter() @@ -593,7 +584,6 @@ fn send_payment( route_params: Some(route_params.clone()), }; let onion = RecipientOnionFields::secret_only(payment_secret); - let payment_id = PaymentId(payment_id); let res = source.send_payment_with_route(route, payment_hash, onion, payment_id); match res { Err(err) => { @@ -608,22 +598,11 @@ fn send_payment( } } -#[inline] -fn send_hop_noret( - source: &ChanMan, middle: &ChanMan, middle_scid: u64, dest: &ChanMan, dest_scid: u64, amt: u64, - payment_ctr: &mut u64, -) { - send_hop_payment(source, middle, middle_scid, dest, dest_scid, amt, payment_ctr); -} - #[inline] fn send_hop_payment( source: &ChanMan, middle: &ChanMan, middle_scid: u64, dest: &ChanMan, dest_scid: u64, amt: u64, - payment_ctr: &mut u64, + payment_secret: PaymentSecret, payment_hash: PaymentHash, payment_id: PaymentId, ) -> bool { - let (payment_secret, payment_hash) = get_payment_secret_hash(dest, payment_ctr); - let mut payment_id = [0; 32]; - payment_id[0..8].copy_from_slice(&payment_ctr.to_ne_bytes()); let (min_value_sendable, max_value_sendable) = source .list_usable_channels() .iter() @@ -662,7 +641,6 @@ fn send_hop_payment( route_params: Some(route_params.clone()), }; let onion = RecipientOnionFields::secret_only(payment_secret); - let payment_id = PaymentId(payment_id); let res = source.send_payment_with_route(route, payment_hash, onion, payment_id); match res { Err(err) => { @@ -1634,6 +1612,35 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { } }; + let send = + |source_idx: usize, dest_idx: usize, dest_chan_id, amt, payment_ctr: &mut u64| { + let source = &nodes[source_idx]; + let dest = &nodes[dest_idx]; + let (secret, hash) = get_payment_secret_hash(dest, payment_ctr); + let mut id = PaymentId([0; 32]); + id.0[0..8].copy_from_slice(&payment_ctr.to_ne_bytes()); + send_payment(source, dest, dest_chan_id, amt, secret, hash, id) + }; + let send_noret = |source_idx, dest_idx, dest_chan_id, amt, payment_ctr: &mut u64| { + send(source_idx, dest_idx, dest_chan_id, amt, payment_ctr); + }; + + let send_hop_noret = |source_idx: usize, + middle_idx: usize, + middle_scid: u64, + dest_idx: usize, + dest_scid: u64, + amt: u64, + payment_ctr: &mut u64| { + let source = &nodes[source_idx]; + let middle = &nodes[middle_idx]; + let dest = &nodes[dest_idx]; + let (secret, hash) = get_payment_secret_hash(dest, payment_ctr); + let mut id = PaymentId([0; 32]); + id.0[0..8].copy_from_slice(&payment_ctr.to_ne_bytes()); + send_hop_payment(source, middle, middle_scid, dest, dest_scid, amt, secret, hash, id); + }; + let v = get_slice!(1)[0]; out.locked_write(format!("READ A BYTE! HANDLING INPUT {:x}...........\n", v).as_bytes()); match v { @@ -1746,85 +1753,61 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { 0x27 => process_ev_noret!(2, false), // 1/10th the channel size: - 0x30 => send_noret(&nodes[0], &nodes[1], chan_a, 10_000_000, &mut p_ctr), - 0x31 => send_noret(&nodes[1], &nodes[0], chan_a, 10_000_000, &mut p_ctr), - 0x32 => send_noret(&nodes[1], &nodes[2], chan_b, 10_000_000, &mut p_ctr), - 0x33 => send_noret(&nodes[2], &nodes[1], chan_b, 10_000_000, &mut p_ctr), - 0x34 => send_hop_noret( - &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 10_000_000, &mut p_ctr, - ), - 0x35 => send_hop_noret( - &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 10_000_000, &mut p_ctr, - ), - - 0x38 => send_noret(&nodes[0], &nodes[1], chan_a, 1_000_000, &mut p_ctr), - 0x39 => send_noret(&nodes[1], &nodes[0], chan_a, 1_000_000, &mut p_ctr), - 0x3a => send_noret(&nodes[1], &nodes[2], chan_b, 1_000_000, &mut p_ctr), - 0x3b => send_noret(&nodes[2], &nodes[1], chan_b, 1_000_000, &mut p_ctr), - 0x3c => send_hop_noret( - &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 1_000_000, &mut p_ctr, - ), - 0x3d => send_hop_noret( - &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 1_000_000, &mut p_ctr, - ), - - 0x40 => send_noret(&nodes[0], &nodes[1], chan_a, 100_000, &mut p_ctr), - 0x41 => send_noret(&nodes[1], &nodes[0], chan_a, 100_000, &mut p_ctr), - 0x42 => send_noret(&nodes[1], &nodes[2], chan_b, 100_000, &mut p_ctr), - 0x43 => send_noret(&nodes[2], &nodes[1], chan_b, 100_000, &mut p_ctr), - 0x44 => { - send_hop_noret(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 100_000, &mut p_ctr) - }, - 0x45 => { - send_hop_noret(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 100_000, &mut p_ctr) - }, - - 0x48 => send_noret(&nodes[0], &nodes[1], chan_a, 10_000, &mut p_ctr), - 0x49 => send_noret(&nodes[1], &nodes[0], chan_a, 10_000, &mut p_ctr), - 0x4a => send_noret(&nodes[1], &nodes[2], chan_b, 10_000, &mut p_ctr), - 0x4b => send_noret(&nodes[2], &nodes[1], chan_b, 10_000, &mut p_ctr), - 0x4c => { - send_hop_noret(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 10_000, &mut p_ctr) - }, - 0x4d => { - send_hop_noret(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 10_000, &mut p_ctr) - }, - - 0x50 => send_noret(&nodes[0], &nodes[1], chan_a, 1_000, &mut p_ctr), - 0x51 => send_noret(&nodes[1], &nodes[0], chan_a, 1_000, &mut p_ctr), - 0x52 => send_noret(&nodes[1], &nodes[2], chan_b, 1_000, &mut p_ctr), - 0x53 => send_noret(&nodes[2], &nodes[1], chan_b, 1_000, &mut p_ctr), - 0x54 => { - send_hop_noret(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 1_000, &mut p_ctr) - }, - 0x55 => { - send_hop_noret(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 1_000, &mut p_ctr) - }, - - 0x58 => send_noret(&nodes[0], &nodes[1], chan_a, 100, &mut p_ctr), - 0x59 => send_noret(&nodes[1], &nodes[0], chan_a, 100, &mut p_ctr), - 0x5a => send_noret(&nodes[1], &nodes[2], chan_b, 100, &mut p_ctr), - 0x5b => send_noret(&nodes[2], &nodes[1], chan_b, 100, &mut p_ctr), - 0x5c => { - send_hop_noret(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 100, &mut p_ctr) - }, - 0x5d => { - send_hop_noret(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 100, &mut p_ctr) - }, - - 0x60 => send_noret(&nodes[0], &nodes[1], chan_a, 10, &mut p_ctr), - 0x61 => send_noret(&nodes[1], &nodes[0], chan_a, 10, &mut p_ctr), - 0x62 => send_noret(&nodes[1], &nodes[2], chan_b, 10, &mut p_ctr), - 0x63 => send_noret(&nodes[2], &nodes[1], chan_b, 10, &mut p_ctr), - 0x64 => send_hop_noret(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 10, &mut p_ctr), - 0x65 => send_hop_noret(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 10, &mut p_ctr), - - 0x68 => send_noret(&nodes[0], &nodes[1], chan_a, 1, &mut p_ctr), - 0x69 => send_noret(&nodes[1], &nodes[0], chan_a, 1, &mut p_ctr), - 0x6a => send_noret(&nodes[1], &nodes[2], chan_b, 1, &mut p_ctr), - 0x6b => send_noret(&nodes[2], &nodes[1], chan_b, 1, &mut p_ctr), - 0x6c => send_hop_noret(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 1, &mut p_ctr), - 0x6d => send_hop_noret(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 1, &mut p_ctr), + 0x30 => send_noret(0, 1, chan_a, 10_000_000, &mut p_ctr), + 0x31 => send_noret(1, 0, chan_a, 10_000_000, &mut p_ctr), + 0x32 => send_noret(1, 2, chan_b, 10_000_000, &mut p_ctr), + 0x33 => send_noret(2, 1, chan_b, 10_000_000, &mut p_ctr), + 0x34 => send_hop_noret(0, 1, chan_a, 2, chan_b, 10_000_000, &mut p_ctr), + 0x35 => send_hop_noret(2, 1, chan_b, 0, chan_a, 10_000_000, &mut p_ctr), + + 0x38 => send_noret(0, 1, chan_a, 1_000_000, &mut p_ctr), + 0x39 => send_noret(1, 0, chan_a, 1_000_000, &mut p_ctr), + 0x3a => send_noret(1, 2, chan_b, 1_000_000, &mut p_ctr), + 0x3b => send_noret(2, 1, chan_b, 1_000_000, &mut p_ctr), + 0x3c => send_hop_noret(0, 1, chan_a, 2, chan_b, 1_000_000, &mut p_ctr), + 0x3d => send_hop_noret(2, 1, chan_b, 0, chan_a, 1_000_000, &mut p_ctr), + + 0x40 => send_noret(0, 1, chan_a, 100_000, &mut p_ctr), + 0x41 => send_noret(1, 0, chan_a, 100_000, &mut p_ctr), + 0x42 => send_noret(1, 2, chan_b, 100_000, &mut p_ctr), + 0x43 => send_noret(2, 1, chan_b, 100_000, &mut p_ctr), + 0x44 => send_hop_noret(0, 1, chan_a, 2, chan_b, 100_000, &mut p_ctr), + 0x45 => send_hop_noret(2, 1, chan_b, 0, chan_a, 100_000, &mut p_ctr), + + 0x48 => send_noret(0, 1, chan_a, 10_000, &mut p_ctr), + 0x49 => send_noret(1, 0, chan_a, 10_000, &mut p_ctr), + 0x4a => send_noret(1, 2, chan_b, 10_000, &mut p_ctr), + 0x4b => send_noret(2, 1, chan_b, 10_000, &mut p_ctr), + 0x4c => send_hop_noret(0, 1, chan_a, 2, chan_b, 10_000, &mut p_ctr), + 0x4d => send_hop_noret(2, 1, chan_b, 0, chan_a, 10_000, &mut p_ctr), + + 0x50 => send_noret(0, 1, chan_a, 1_000, &mut p_ctr), + 0x51 => send_noret(1, 0, chan_a, 1_000, &mut p_ctr), + 0x52 => send_noret(1, 2, chan_b, 1_000, &mut p_ctr), + 0x53 => send_noret(2, 1, chan_b, 1_000, &mut p_ctr), + 0x54 => send_hop_noret(0, 1, chan_a, 2, chan_b, 1_000, &mut p_ctr), + 0x55 => send_hop_noret(2, 1, chan_b, 0, chan_a, 1_000, &mut p_ctr), + + 0x58 => send_noret(0, 1, chan_a, 100, &mut p_ctr), + 0x59 => send_noret(1, 0, chan_a, 100, &mut p_ctr), + 0x5a => send_noret(1, 2, chan_b, 100, &mut p_ctr), + 0x5b => send_noret(2, 1, chan_b, 100, &mut p_ctr), + 0x5c => send_hop_noret(0, 1, chan_a, 2, chan_b, 100, &mut p_ctr), + 0x5d => send_hop_noret(2, 1, chan_b, 0, chan_a, 100, &mut p_ctr), + + 0x60 => send_noret(0, 1, chan_a, 10, &mut p_ctr), + 0x61 => send_noret(1, 0, chan_a, 10, &mut p_ctr), + 0x62 => send_noret(1, 2, chan_b, 10, &mut p_ctr), + 0x63 => send_noret(2, 1, chan_b, 10, &mut p_ctr), + 0x64 => send_hop_noret(0, 1, chan_a, 2, chan_b, 10, &mut p_ctr), + 0x65 => send_hop_noret(2, 1, chan_b, 0, chan_a, 10, &mut p_ctr), + + 0x68 => send_noret(0, 1, chan_a, 1, &mut p_ctr), + 0x69 => send_noret(1, 0, chan_a, 1, &mut p_ctr), + 0x6a => send_noret(1, 2, chan_b, 1, &mut p_ctr), + 0x6b => send_noret(2, 1, chan_b, 1, &mut p_ctr), + 0x6c => send_hop_noret(0, 1, chan_a, 2, chan_b, 1, &mut p_ctr), + 0x6d => send_hop_noret(2, 1, chan_b, 0, chan_a, 1, &mut p_ctr), 0x80 => { let mut max_feerate = last_htlc_clear_fee_a; @@ -2256,12 +2239,12 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { // Finally, make sure that at least one end of each channel can make a substantial payment assert!( - send_payment(&nodes[0], &nodes[1], chan_a, 10_000_000, &mut p_ctr) - || send_payment(&nodes[1], &nodes[0], chan_a, 10_000_000, &mut p_ctr) + send(0, 1, chan_a, 10_000_000, &mut p_ctr) + || send(1, 0, chan_a, 10_000_000, &mut p_ctr) ); assert!( - send_payment(&nodes[1], &nodes[2], chan_b, 10_000_000, &mut p_ctr) - || send_payment(&nodes[2], &nodes[1], chan_b, 10_000_000, &mut p_ctr) + send(1, 2, chan_b, 10_000_000, &mut p_ctr) + || send(2, 1, chan_b, 10_000_000, &mut p_ctr) ); last_htlc_clear_fee_a = fee_est_a.ret_val.load(atomic::Ordering::Acquire); From 246083e036dae7d7fbc492758d0a42af1f3cab69 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Mon, 19 Jan 2026 10:58:52 +0100 Subject: [PATCH 112/242] fuzz: track pending and resolved payments Track payment lifecycle by maintaining pending_payments and resolved_payments arrays per node: - When sending a payment, add its PaymentId to pending_payments - On PaymentSent/PaymentFailed events, move the PaymentId from pending to resolved (or assert it was already resolved for duplicate events) This tracking enables verifying payment state consistency after node restarts. Co-Authored-By: Claude Opus 4.5 Co-Authored-By: Matt Corallo --- fuzz/src/chanmon_consistency.rs | 51 ++++++++++++++++++++++++++++++--- 1 file changed, 47 insertions(+), 4 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 1bcb6697a86..b5f70a350b2 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -1109,6 +1109,9 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { let mut node_b_ser = nodes[1].encode(); let mut node_c_ser = nodes[2].encode(); + let pending_payments = RefCell::new([Vec::new(), Vec::new(), Vec::new()]); + let resolved_payments = RefCell::new([Vec::new(), Vec::new(), Vec::new()]); + macro_rules! test_return { () => {{ assert_eq!(nodes[0].list_channels().len(), 1); @@ -1508,6 +1511,8 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { let mut claim_set = new_hash_map(); let mut events = nodes[$node].get_and_clear_pending_events(); let had_events = !events.is_empty(); + let mut pending_payments = pending_payments.borrow_mut(); + let mut resolved_payments = resolved_payments.borrow_mut(); for event in events.drain(..) { match event { events::Event::PaymentClaimable { payment_hash, .. } => { @@ -1519,11 +1524,32 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { } } }, - events::Event::PaymentSent { .. } => {}, + events::Event::PaymentSent { payment_id, .. } => { + let sent_id = payment_id.unwrap(); + let idx_opt = + pending_payments[$node].iter().position(|id| *id == sent_id); + if let Some(idx) = idx_opt { + pending_payments[$node].remove(idx); + resolved_payments[$node].push(sent_id); + } else { + assert!(resolved_payments[$node].contains(&sent_id)); + } + }, + events::Event::PaymentFailed { payment_id, .. } => { + let idx_opt = + pending_payments[$node].iter().position(|id| *id == payment_id); + if let Some(idx) = idx_opt { + pending_payments[$node].remove(idx); + resolved_payments[$node].push(payment_id); + } else if !resolved_payments[$node].contains(&payment_id) { + // Payment failed immediately on send, so it was never added to + // pending_payments. Add it to resolved_payments to track it. + resolved_payments[$node].push(payment_id); + } + }, events::Event::PaymentClaimed { .. } => {}, events::Event::PaymentPathSuccessful { .. } => {}, events::Event::PaymentPathFailed { .. } => {}, - events::Event::PaymentFailed { .. } => {}, events::Event::ProbeSuccessful { .. } | events::Event::ProbeFailed { .. } => { // Even though we don't explicitly send probes, because probes are @@ -1619,7 +1645,11 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { let (secret, hash) = get_payment_secret_hash(dest, payment_ctr); let mut id = PaymentId([0; 32]); id.0[0..8].copy_from_slice(&payment_ctr.to_ne_bytes()); - send_payment(source, dest, dest_chan_id, amt, secret, hash, id) + let succeeded = send_payment(source, dest, dest_chan_id, amt, secret, hash, id); + if succeeded { + pending_payments.borrow_mut()[source_idx].push(id); + } + succeeded }; let send_noret = |source_idx, dest_idx, dest_chan_id, amt, payment_ctr: &mut u64| { send(source_idx, dest_idx, dest_chan_id, amt, payment_ctr); @@ -1638,7 +1668,20 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { let (secret, hash) = get_payment_secret_hash(dest, payment_ctr); let mut id = PaymentId([0; 32]); id.0[0..8].copy_from_slice(&payment_ctr.to_ne_bytes()); - send_hop_payment(source, middle, middle_scid, dest, dest_scid, amt, secret, hash, id); + let succeeded = send_hop_payment( + source, + middle, + middle_scid, + dest, + dest_scid, + amt, + secret, + hash, + id, + ); + if succeeded { + pending_payments.borrow_mut()[source_idx].push(id); + } }; let v = get_slice!(1)[0]; From 5da74997b225f040ecb7e5db1d68f22feb907f9b Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 23 Jan 2026 09:32:45 +0100 Subject: [PATCH 113/242] Drop unnecessary Rust install in SemVer CI In the last few days there was incompatibility of `cargo-semver-checks` with the new stable Rust 1.93.0. While this should fixed by today's release of `cargo-semver-checks`, we take the opportunity to drop an unnecessary install step from the CI workflow, as the action will bring their own Rust version if not configured otherwise. --- .github/workflows/semver.yml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/.github/workflows/semver.yml b/.github/workflows/semver.yml index 03017e19320..03a8e46e8a7 100644 --- a/.github/workflows/semver.yml +++ b/.github/workflows/semver.yml @@ -13,10 +13,6 @@ jobs: steps: - name: Checkout source code uses: actions/checkout@v4 - - name: Install Rust stable toolchain - run: | - curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile=minimal --default-toolchain stable - rustup override set stable - name: Check SemVer with default features uses: obi1kenobi/cargo-semver-checks-action@v2 with: From 194678d4b738ebfef047a05748cad9190f0cdf84 Mon Sep 17 00:00:00 2001 From: Thrishalmadasu Date: Fri, 23 Jan 2026 14:09:14 +0530 Subject: [PATCH 114/242] Set dont_forward on private channel updates and add tests --- lightning/src/ln/channelmanager.rs | 5 +- lightning/src/ln/priv_short_conf_tests.rs | 109 ++++++++++++++++++++++ 2 files changed, 113 insertions(+), 1 deletion(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index dafeffe98bf..56e97324e3b 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -5132,7 +5132,7 @@ where chain_hash: self.chain_hash, short_channel_id, timestamp: chan.context.get_update_time_counter(), - message_flags: 1, // Only must_be_one + message_flags: 1 | if !chan.context.should_announce() { 1 << 1 } else { 0 }, // must_be_one + dont_forward channel_flags: (!were_node_one) as u8 | ((!enabled as u8) << 1), cltv_expiry_delta: chan.context.get_cltv_expiry_delta(), htlc_minimum_msat: chan.context.get_counterparty_htlc_minimum_msat(), @@ -12290,6 +12290,9 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()), None => { // It's not a local channel + if msg.contents.message_flags & (1 << 1) != 0 { + log_warn!(self.logger, "Received channel_update for unknown channel {} with dont_forward set.\n\tYou may wish to check if an incorrect tx_index was passed to chain::Confirm::transactions_confirmed.", msg.contents.short_channel_id); + } return Ok(NotifyOption::SkipPersistNoEvents) } }; diff --git a/lightning/src/ln/priv_short_conf_tests.rs b/lightning/src/ln/priv_short_conf_tests.rs index 83aaca24203..e26776ee25c 100644 --- a/lightning/src/ln/priv_short_conf_tests.rs +++ b/lightning/src/ln/priv_short_conf_tests.rs @@ -1520,3 +1520,112 @@ fn test_0conf_ann_sigs_racing_conf() { let as_announcement = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(as_announcement.len(), 1); } + +#[test] +fn test_channel_update_dont_forward_flag() { + // Test that the `dont_forward` bit (bit 1 of message_flags) is set correctly: + // - For private channels: message_flags should have bit 1 set (value 3 = must_be_one + dont_forward) + // - For public channels: message_flags should NOT have bit 1 set (value 1 = must_be_one only) + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + + // Create a public (announced) channel between nodes[0] and nodes[1] + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000); + + // Create a private (unannounced) channel between nodes[1] and nodes[2] + create_unannounced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 500_000_000); + + // Get the channel details for both channels + let public_channel = nodes[0].node.list_channels().into_iter() + .find(|c| c.counterparty.node_id == node_b_id).unwrap(); + let private_channel = nodes[1].node.list_channels().into_iter() + .find(|c| c.counterparty.node_id == node_c_id).unwrap(); + + // Verify is_announced correctly reflects the channel type + assert!(public_channel.is_announced, "Public channel should have is_announced = true"); + assert!(!private_channel.is_announced, "Private channel should have is_announced = false"); + + // Trigger channel_update by changing config on the public channel + let mut new_config = public_channel.config.unwrap(); + new_config.forwarding_fee_base_msat += 10; + nodes[0].node.update_channel_config(&node_b_id, &[public_channel.channel_id], &new_config).unwrap(); + + // Get the channel_update for the public channel and verify dont_forward is NOT set + let events = nodes[0].node.get_and_clear_pending_msg_events(); + let public_channel_update = events.iter().find_map(|e| { + if let MessageSendEvent::BroadcastChannelUpdate { ref msg, .. } = e { + Some(msg.clone()) + } else { + None + } + }).expect("Expected BroadcastChannelUpdate for public channel"); + // message_flags should be 1 (only must_be_one bit set, dont_forward NOT set) + assert_eq!( + public_channel_update.contents.message_flags & (1 << 1), 0, + "Public channel update should NOT have dont_forward bit set" + ); + assert_eq!( + public_channel_update.contents.message_flags & 1, 1, + "Public channel update should have must_be_one bit set" + ); + + // Trigger channel_update by changing config on the private channel + let mut new_config = private_channel.config.unwrap(); + new_config.forwarding_fee_base_msat += 10; + nodes[1].node.update_channel_config(&node_c_id, &[private_channel.channel_id], &new_config).unwrap(); + + // Get the channel_update for the private channel and verify dont_forward IS set + let private_channel_update = + get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, node_c_id); + // message_flags should have dont_forward bit set + assert_ne!( + private_channel_update.contents.message_flags & (1 << 1), 0, + "Private channel update should have dont_forward bit set" + ); + assert_eq!( + private_channel_update.contents.message_flags & 1, 1, + "Private channel update should have must_be_one bit set" + ); +} + +#[test] +fn test_unknown_channel_update_with_dont_forward_logs_warning() { + use bitcoin::constants::ChainHash; + use bitcoin::secp256k1::ecdsa::Signature; + use bitcoin::secp256k1::ffi::Signature as FFISignature; + use bitcoin::Network; + + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let unknown_scid = 42; + let msg = msgs::ChannelUpdate { + signature: Signature::from(unsafe { FFISignature::new() }), + contents: msgs::UnsignedChannelUpdate { + chain_hash: ChainHash::using_genesis_block(Network::Testnet), + short_channel_id: unknown_scid, + timestamp: 0, + message_flags: 1 | (1 << 1), // must_be_one + dont_forward + channel_flags: 0, + cltv_expiry_delta: 0, + htlc_minimum_msat: 0, + htlc_maximum_msat: msgs::MAX_VALUE_MSAT, + fee_base_msat: 0, + fee_proportional_millionths: 0, + excess_data: Vec::new(), + }, + }; + + nodes[0].node.handle_channel_update(nodes[1].node.get_our_node_id(), &msg); + nodes[0].logger.assert_log_contains( + "lightning::ln::channelmanager", + "Received channel_update for unknown channel", + 1, + ); +} From b0883ba5254161f64ad7b3f336dd396e616d7a4b Mon Sep 17 00:00:00 2001 From: Thrishalmadasu Date: Fri, 23 Jan 2026 14:38:07 +0530 Subject: [PATCH 115/242] rustfmt: format priv_short_conf_tests --- lightning/src/ln/priv_short_conf_tests.rs | 55 ++++++++++++++++------- 1 file changed, 38 insertions(+), 17 deletions(-) diff --git a/lightning/src/ln/priv_short_conf_tests.rs b/lightning/src/ln/priv_short_conf_tests.rs index e26776ee25c..ed7f7577bb7 100644 --- a/lightning/src/ln/priv_short_conf_tests.rs +++ b/lightning/src/ln/priv_short_conf_tests.rs @@ -1540,10 +1540,18 @@ fn test_channel_update_dont_forward_flag() { create_unannounced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 500_000_000); // Get the channel details for both channels - let public_channel = nodes[0].node.list_channels().into_iter() - .find(|c| c.counterparty.node_id == node_b_id).unwrap(); - let private_channel = nodes[1].node.list_channels().into_iter() - .find(|c| c.counterparty.node_id == node_c_id).unwrap(); + let public_channel = nodes[0] + .node + .list_channels() + .into_iter() + .find(|c| c.counterparty.node_id == node_b_id) + .unwrap(); + let private_channel = nodes[1] + .node + .list_channels() + .into_iter() + .find(|c| c.counterparty.node_id == node_c_id) + .unwrap(); // Verify is_announced correctly reflects the channel type assert!(public_channel.is_announced, "Public channel should have is_announced = true"); @@ -1552,42 +1560,55 @@ fn test_channel_update_dont_forward_flag() { // Trigger channel_update by changing config on the public channel let mut new_config = public_channel.config.unwrap(); new_config.forwarding_fee_base_msat += 10; - nodes[0].node.update_channel_config(&node_b_id, &[public_channel.channel_id], &new_config).unwrap(); + nodes[0] + .node + .update_channel_config(&node_b_id, &[public_channel.channel_id], &new_config) + .unwrap(); // Get the channel_update for the public channel and verify dont_forward is NOT set let events = nodes[0].node.get_and_clear_pending_msg_events(); - let public_channel_update = events.iter().find_map(|e| { - if let MessageSendEvent::BroadcastChannelUpdate { ref msg, .. } = e { - Some(msg.clone()) - } else { - None - } - }).expect("Expected BroadcastChannelUpdate for public channel"); + let public_channel_update = events + .iter() + .find_map(|e| { + if let MessageSendEvent::BroadcastChannelUpdate { ref msg, .. } = e { + Some(msg.clone()) + } else { + None + } + }) + .expect("Expected BroadcastChannelUpdate for public channel"); // message_flags should be 1 (only must_be_one bit set, dont_forward NOT set) assert_eq!( - public_channel_update.contents.message_flags & (1 << 1), 0, + public_channel_update.contents.message_flags & (1 << 1), + 0, "Public channel update should NOT have dont_forward bit set" ); assert_eq!( - public_channel_update.contents.message_flags & 1, 1, + public_channel_update.contents.message_flags & 1, + 1, "Public channel update should have must_be_one bit set" ); // Trigger channel_update by changing config on the private channel let mut new_config = private_channel.config.unwrap(); new_config.forwarding_fee_base_msat += 10; - nodes[1].node.update_channel_config(&node_c_id, &[private_channel.channel_id], &new_config).unwrap(); + nodes[1] + .node + .update_channel_config(&node_c_id, &[private_channel.channel_id], &new_config) + .unwrap(); // Get the channel_update for the private channel and verify dont_forward IS set let private_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, node_c_id); // message_flags should have dont_forward bit set assert_ne!( - private_channel_update.contents.message_flags & (1 << 1), 0, + private_channel_update.contents.message_flags & (1 << 1), + 0, "Private channel update should have dont_forward bit set" ); assert_eq!( - private_channel_update.contents.message_flags & 1, 1, + private_channel_update.contents.message_flags & 1, + 1, "Private channel update should have must_be_one bit set" ); } From cc1eb1686ce9ec6dd3d2043a286010bb1a759e63 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 23 Jan 2026 14:44:04 +0100 Subject: [PATCH 116/242] `ElectrumSyncClient`: Skip unconfirmed `get_history` entries Electrum's `blockchain.scripthash.get_history` will return the *confirmed* history for any scripthash, but will then also append any matching entries from the mempool, with respective `height` fields set to 0 or -1 (depending on whether all inputs are confirmed or not). Unfortunately we previously only included a filter for confirmed `get_history` entries in the watched output case, and forgot to add such a check also when checking for watched transactions. This would have us treat the entry as confirmed, then failing on the `get_merkle` step which of course couldn't prove block inclusion. Here we simply fix this omission and skip entries that are still unconfirmed (e.g., unconfirmed funding transactions from 0conf channels). Signed-off-by: Elias Rohrer --- lightning-transaction-sync/src/electrum.rs | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/lightning-transaction-sync/src/electrum.rs b/lightning-transaction-sync/src/electrum.rs index 47489df69bb..1162b9c00c9 100644 --- a/lightning-transaction-sync/src/electrum.rs +++ b/lightning-transaction-sync/src/electrum.rs @@ -336,10 +336,21 @@ where script_history.iter().filter(|h| h.tx_hash == **txid); if let Some(history) = filtered_history.next() { let prob_conf_height = history.height as u32; + if prob_conf_height <= 0 { + // Skip if it's a an unconfirmed entry. + continue; + } let confirmed_tx = self.get_confirmed_tx(tx, prob_conf_height)?; confirmed_txs.push(confirmed_tx); } - debug_assert!(filtered_history.next().is_none()); + if filtered_history.next().is_some() { + log_error!( + self.logger, + "Failed due to server returning multiple history entries for Tx {}.", + txid + ); + return Err(InternalError::Failed); + } } for (watched_output, script_history) in @@ -347,6 +358,7 @@ where { for possible_output_spend in script_history { if possible_output_spend.height <= 0 { + // Skip if it's a an unconfirmed entry. continue; } From cea1c72cea2bc314198743772e887b797d11c093 Mon Sep 17 00:00:00 2001 From: Vincenzo Palazzo Date: Mon, 26 Jan 2026 19:39:54 +0100 Subject: [PATCH 117/242] BOLT 12: Validate bech32 padding per BIP-173 Add validation for bech32 padding in BOLT 12 offer parsing per BIP-173 which states: "Any incomplete group at the end MUST be 4 bits or less, MUST be all zeroes, and is discarded." This adds a test vector from the BOLT specification that ensures offers with invalid padding (exceeding the 4-bit limit) are properly rejected. Previously, LDK would accept offers with invalid bech32 padding. This was identified through differential fuzzing across Lightning implementations (see lightning/bolts#1312). The fix calls `validate_segwit_padding()` from the bech32 crate during offer parsing, and introduces a new `InvalidPadding` variant to `Bolt12ParseError` to surface these errors. Signed-off-by: Vincenzo Palazzo --- lightning/src/offers/offer.rs | 7 +++++++ lightning/src/offers/parse.rs | 30 ++++++++++++++++++++++++++++-- 2 files changed, 35 insertions(+), 2 deletions(-) diff --git a/lightning/src/offers/offer.rs b/lightning/src/offers/offer.rs index 7ad3c282c77..5592c50a264 100644 --- a/lightning/src/offers/offer.rs +++ b/lightning/src/offers/offer.rs @@ -2528,5 +2528,12 @@ mod bolt12_tests { "lno1pgx9getnwss8vetrw3hhyucsespjgef743p5fzqq9nqxh0ah7y87rzv3ud0eleps9kl2d5348hq2k8qzqgpqyqszqgpqyqszqgpqyqszqgpqyqszqgpqyqszqgpqyqszqgpqyqszqgpqyqszqgpqyqszqgpqyqszqgpqyqszqgpqyqszqgpqyqszqgqpqqqqqqqqqqqqqqqqqqqqqqqqqqqzqgpqyqszqgpqyqszqgpqyqszqgpqyqszqgpqyqszqgpqyqszqgpqqzq3zyg3zyg3zygszqqqqyqqqqsqqvpqyqszqgpqyqszqgpqyqszqgpqyqszqgpqyqszqgpqyqszqgpqyqsq".parse::(), Err(Bolt12ParseError::Decode(DecodeError::InvalidValue)), ); + + // Bech32 padding exceeds 4-bit limit (BOLT 12 test vector) + // See: https://github.com/lightning/bolts/pull/1312 + assert!(matches!( + "lno1zcss9mk8y3wkklfvevcrszlmu23kfrxh49px20665dqwmn4p72pkseseq".parse::(), + Err(Bolt12ParseError::InvalidPadding(_)) + )); } } diff --git a/lightning/src/offers/parse.rs b/lightning/src/offers/parse.rs index 99dd1bb938d..df71e860d2d 100644 --- a/lightning/src/offers/parse.rs +++ b/lightning/src/offers/parse.rs @@ -12,7 +12,7 @@ use crate::io; use crate::ln::msgs::DecodeError; use crate::util::ser::CursorReadable; -use bech32::primitives::decode::CheckedHrpstringError; +use bech32::primitives::decode::{CheckedHrpstringError, PaddingError}; use bitcoin::secp256k1; #[allow(unused_imports)] @@ -76,6 +76,10 @@ mod sealed { return Err(Bolt12ParseError::InvalidBech32Hrp); } + // Validate that bech32 padding is valid per BIP-173: + // "Any incomplete group at the end MUST be 4 bits or less, MUST be all zeroes" + parsed.validate_segwit_padding()?; + let data = parsed.byte_iter().collect::>(); Self::try_from(data) } @@ -146,6 +150,11 @@ pub enum Bolt12ParseError { /// This is not exported to bindings users as the details don't matter much CheckedHrpstringError, ), + /// The bech32 data has invalid padding per BIP-173 (more than 4 bits or non-zero padding). + InvalidPadding( + /// This is not exported to bindings users as the details don't matter much + PaddingError, + ), /// The bech32 decoded string could not be decoded as the expected message type. Decode(DecodeError), /// The parsed message has invalid semantics. @@ -232,6 +241,12 @@ impl From for Bolt12ParseError { } } +impl From for Bolt12ParseError { + fn from(error: PaddingError) -> Self { + Self::InvalidPadding(error) + } +} + impl From for Bolt12ParseError { fn from(error: DecodeError) -> Self { Self::Decode(error) @@ -326,7 +341,7 @@ mod bolt12_tests { #[cfg(test)] mod tests { - use super::Bolt12ParseError; + use super::{Bolt12ParseError, PaddingError}; use crate::ln::msgs::DecodeError; use crate::offers::offer::Offer; use bech32::primitives::decode::{CharError, CheckedHrpstringError, UncheckedHrpstringError}; @@ -371,4 +386,15 @@ mod tests { Err(e) => assert_eq!(e, Bolt12ParseError::Decode(DecodeError::InvalidValue)), } } + + #[test] + fn fails_parsing_bech32_encoded_offer_with_invalid_padding() { + // BOLT 12 test vector for invalid bech32 padding + // See: https://github.com/lightning/bolts/pull/1312 + let encoded_offer = "lno1zcss9mk8y3wkklfvevcrszlmu23kfrxh49px20665dqwmn4p72pkseseq"; + match encoded_offer.parse::() { + Ok(_) => panic!("Valid offer: {}", encoded_offer), + Err(e) => assert_eq!(e, Bolt12ParseError::InvalidPadding(PaddingError::TooMuch)), + } + } } From 653692eafccb362b3488777c90e5d43c134bc33f Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sun, 4 Jan 2026 20:07:20 +0000 Subject: [PATCH 118/242] Drop lockorder comments on `ChannelManager` While its nice to document things, the lockorder comment at the top of `ChannelManager` is just annoying to always update and doesn't add all that much value. Developers likely shouldn't be checking it while writing code, our automated lockorder issue detection framework more than suffices to catch any bugs in test-reachable code. That makes it basically write-only which isn't exactly a useful comment. --- lightning/src/ln/channelmanager.rs | 62 ------------------------------ 1 file changed, 62 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 10c77505408..95442ea3370 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -2629,46 +2629,6 @@ where /// [`update_channel`]: chain::Watch::update_channel /// [`ChannelUpdate`]: msgs::ChannelUpdate /// [`read`]: ReadableArgs::read -// -// Lock order: -// The tree structure below illustrates the lock order requirements for the different locks of the -// `ChannelManager`. Locks can be held at the same time if they are on the same branch in the tree, -// and should then be taken in the order of the lowest to the highest level in the tree. -// Note that locks on different branches shall not be taken at the same time, as doing so will -// create a new lock order for those specific locks in the order they were taken. -// -// Lock order tree: -// -// `pending_offers_messages` -// -// `pending_async_payments_messages` -// -// `total_consistency_lock` -// | -// |__`forward_htlcs` -// | -// |__`pending_intercepted_htlcs` -// | -// |__`decode_update_add_htlcs` -// | -// |__`per_peer_state` -// | -// |__`claimable_payments` -// | -// |__`pending_outbound_payments` // This field's struct contains a map of pending outbounds -// | -// |__`peer_state` -// | -// |__`short_to_chan_info` -// | -// |__`outbound_scid_aliases` -// | -// |__`best_block` -// | -// |__`pending_events` -// | -// |__`pending_background_events` -// pub struct ChannelManager< M: Deref, T: Deref, @@ -2702,11 +2662,9 @@ pub struct ChannelManager< #[cfg(not(test))] flow: OffersMessageFlow, - /// See `ChannelManager` struct-level documentation for lock order requirements. #[cfg(any(test, feature = "_test_utils"))] pub(super) best_block: RwLock, #[cfg(not(any(test, feature = "_test_utils")))] - /// See `ChannelManager` struct-level documentation for lock order requirements. best_block: RwLock, pub(super) secp_ctx: Secp256k1, @@ -2720,8 +2678,6 @@ pub struct ChannelManager< /// after reloading from disk while replaying blocks against ChannelMonitors. /// /// See `PendingOutboundPayment` documentation for more info. - /// - /// See `ChannelManager` struct-level documentation for lock order requirements. pending_outbound_payments: OutboundPayments, /// SCID/SCID Alias -> forward infos. Key of 0 means payments received. @@ -2732,8 +2688,6 @@ pub struct ChannelManager< /// /// Note that no consistency guarantees are made about the existence of a channel with the /// `short_channel_id` here, nor the `short_channel_id` in the `PendingHTLCInfo`! - /// - /// See `ChannelManager` struct-level documentation for lock order requirements. #[cfg(test)] pub(super) forward_htlcs: Mutex>>, #[cfg(not(test))] @@ -2746,8 +2700,6 @@ pub struct ChannelManager< /// (or timeout) /// 2. HTLCs that are being held on behalf of an often-offline sender until receipt of a /// [`ReleaseHeldHtlc`] onion message from an often-offline recipient - /// - /// See `ChannelManager` struct-level documentation for lock order requirements. pending_intercepted_htlcs: Mutex>, /// Outbound SCID Alias -> pending `update_add_htlc`s to decode. @@ -2755,22 +2707,16 @@ pub struct ChannelManager< /// /// Note that no consistency guarantees are made about the existence of a channel with the /// `short_channel_id` here, nor the `channel_id` in `UpdateAddHTLC`! - /// - /// See `ChannelManager` struct-level documentation for lock order requirements. decode_update_add_htlcs: Mutex>>, /// The sets of payments which are claimable or currently being claimed. See /// [`ClaimablePayments`]' individual field docs for more info. - /// - /// See `ChannelManager` struct-level documentation for lock order requirements. claimable_payments: Mutex, /// The set of outbound SCID aliases across all our channels, including unconfirmed channels /// and some closed channels which reached a usable state prior to being closed. This is used /// only to avoid duplicates, and is not persisted explicitly to disk, but rebuilt from the /// active channel list on load. - /// - /// See `ChannelManager` struct-level documentation for lock order requirements. outbound_scid_aliases: Mutex>, /// SCIDs (and outbound SCID aliases) -> `counterparty_node_id`s and `channel_id`s. @@ -2782,8 +2728,6 @@ pub struct ChannelManager< /// Note that while this holds `counterparty_node_id`s and `channel_id`s, no consistency /// guarantees are made about the existence of a peer with the `counterparty_node_id` nor a /// channel with the `channel_id` in our other maps. - /// - /// See `ChannelManager` struct-level documentation for lock order requirements. #[cfg(test)] pub(super) short_to_chan_info: FairRwLock>, #[cfg(not(test))] @@ -2824,8 +2768,6 @@ pub struct ChannelManager< /// channels. /// /// Note that the same thread must never acquire two inner `PeerState` locks at the same time. - /// - /// See `ChannelManager` struct-level documentation for lock order requirements. #[cfg(not(any(test, feature = "_test_utils")))] per_peer_state: FairRwLock>>>, #[cfg(any(test, feature = "_test_utils"))] @@ -2846,8 +2788,6 @@ pub struct ChannelManager< /// /// Note that events MUST NOT be removed from pending_events after deserialization, as they /// could be in the middle of being processed without the direct mutex held. - /// - /// See `ChannelManager` struct-level documentation for lock order requirements. #[cfg(not(any(test, feature = "_test_utils")))] pending_events: Mutex)>>, #[cfg(any(test, feature = "_test_utils"))] @@ -2868,8 +2808,6 @@ pub struct ChannelManager< /// /// Thus, we place them here to be handled as soon as possible once we are running normally. /// - /// See `ChannelManager` struct-level documentation for lock order requirements. - /// /// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor pending_background_events: Mutex>, /// Used when we have to take a BIG lock to make sure everything is self-consistent. From b5d1f7e2ef2d5c37cd2944f60ec90e509f9d4372 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sat, 3 Jan 2026 17:37:02 +0000 Subject: [PATCH 119/242] Move HTLC holding into HTLC decode from `forward_htlcs` When we added support for async payments (which requires holding HTLCs until we receive an onion message), we added the hold logic to `ChannelManager::forward_htlcs`. This made sense as we reused the forwarding datastructure in the holding logic so already had the right types in place, but it turns out only a single call of `forward_htlcs` should ever result in an HTLC being held. All of the other calls (un-holding an HTLC, forwarding an intercepted HTLC, forwarding an HTLC decoded by LDK prior to 0.2, or processing a phantom receive) should never result in an HTLC being held. Instead, HTLCs should actually only ever be held when the HTLC is decoded in `process_pending_update_add_htlcs` before forwarding. Because of this, and because we want to move the interception (and thus also the holding logic) out of `forward_htlcs`, here we move the holding logic into `process_pending_update_add_htlcs`. --- lightning/src/ln/channelmanager.rs | 80 ++++++++++++++++++------------ 1 file changed, 49 insertions(+), 31 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 95442ea3370..04f25bc76ff 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -6967,7 +6967,53 @@ where incoming_accept_underpaying_htlcs, next_packet_details_opt.map(|d| d.next_packet_pubkey), ) { - Ok(info) => htlc_forwards.push((info, update_add_htlc.htlc_id)), + Ok(info) => { + if info.routing.should_hold_htlc() { + let intercept_id = InterceptId::from_htlc_id_and_chan_id( + update_add_htlc.htlc_id, + &incoming_channel_id, + &incoming_counterparty_node_id, + ); + let mut held_htlcs = self.pending_intercepted_htlcs.lock().unwrap(); + match held_htlcs.entry(intercept_id) { + hash_map::Entry::Vacant(entry) => { + log_trace!( + self.logger, + "Intercepted held HTLC with id {}, holding until the recipient is online", + intercept_id + ); + let pending_add = PendingAddHTLCInfo { + prev_outbound_scid_alias: incoming_scid_alias, + prev_counterparty_node_id: incoming_counterparty_node_id, + prev_funding_outpoint: incoming_funding_txo, + prev_channel_id: incoming_channel_id, + prev_htlc_id: update_add_htlc.htlc_id, + prev_user_channel_id: incoming_user_channel_id, + forward_info: info, + }; + entry.insert(pending_add); + }, + hash_map::Entry::Occupied(_) => { + debug_assert!(false, "Should never have two HTLCs with the same channel id and htlc id"); + let reason = LocalHTLCFailureReason::TemporaryNodeFailure; + let htlc_fail = self.htlc_failure_from_update_add_err( + &update_add_htlc, + &incoming_counterparty_node_id, + reason, + is_intro_node_blinded_forward, + &shared_secret, + ); + let failure_type = get_htlc_failure_type( + outgoing_scid_opt, + update_add_htlc.payment_hash, + ); + htlc_fails.push((htlc_fail, failure_type, reason.into())); + }, + } + } else { + htlc_forwards.push((info, update_add_htlc.htlc_id)) + } + }, Err(inbound_err) => { let failure_type = get_htlc_failure_type(outgoing_scid_opt, update_add_htlc.payment_hash); @@ -6991,7 +7037,7 @@ where incoming_funding_txo, incoming_channel_id, incoming_user_channel_id, - htlc_forwards.drain(..).collect(), + htlc_forwards, ); self.forward_htlcs(&mut [pending_forwards]); for (htlc_fail, failure_type, failure_reason) in htlc_fails.drain(..) { @@ -11902,35 +11948,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ )); }; - // In the case that we have an HTLC that we're supposed to hold onto until the - // recipient comes online *and* the outbound scid is encoded as - // `fake_scid::is_valid_intercept`, we should first wait for the recipient to come - // online before generating an `HTLCIntercepted` event, since the event cannot be - // acted on until the recipient is online to cooperatively open the JIT channel. Once - // we receive the `ReleaseHeldHtlc` message from the recipient, we will circle back - // here and resume generating the event below. - if pending_add.forward_info.routing.should_hold_htlc() { - let intercept_id = InterceptId::from_htlc_id_and_chan_id( - prev_htlc_id, - &prev_channel_id, - &prev_counterparty_node_id, - ); - let mut held_htlcs = self.pending_intercepted_htlcs.lock().unwrap(); - match held_htlcs.entry(intercept_id) { - hash_map::Entry::Vacant(entry) => { - log_trace!( - logger, - "Intercepted held HTLC with id {}, holding until the recipient is online", - intercept_id - ); - entry.insert(pending_add); - }, - hash_map::Entry::Occupied(_) => { - debug_assert!(false, "Should never have two HTLCs with the same channel id and htlc id"); - fail_intercepted_htlc(pending_add); - }, - } - } else if !is_our_scid + if !is_our_scid && pending_add.forward_info.incoming_amt_msat.is_some() && fake_scid::is_valid_intercept( &self.fake_scid_rand_bytes, From d497ec206114215b81b596ae4fdda7d3bc1e90c9 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sat, 3 Jan 2026 18:51:21 +0000 Subject: [PATCH 120/242] Fix the HTLC failure reason reported when a peer is offline If a peer is offline, but only recently went offline and thus the channel has not yet been marked disabled in our gossip, we should be returning `LocalHTLCFailureReason::PeerOffline` rather than `LocalHTLCFailureReason::ChannelNotReady`. Here we fix the error returned and tweak documentation to make the cases clearer. --- lightning/src/ln/channelmanager.rs | 2 ++ lightning/src/ln/onion_utils.rs | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 04f25bc76ff..33be6cbba99 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -4807,6 +4807,8 @@ where if !chan.context.is_live() { if !chan.context.is_enabled() { return Err(LocalHTLCFailureReason::ChannelDisabled); + } else if !chan.context.is_connected() { + return Err(LocalHTLCFailureReason::PeerOffline); } else { return Err(LocalHTLCFailureReason::ChannelNotReady); } diff --git a/lightning/src/ln/onion_utils.rs b/lightning/src/ln/onion_utils.rs index eab5e721665..7cf1062a885 100644 --- a/lightning/src/ln/onion_utils.rs +++ b/lightning/src/ln/onion_utils.rs @@ -1573,7 +1573,8 @@ pub enum LocalHTLCFailureReason { /// /// The forwarding node has tampered with this value, or has a bug in its implementation. FinalIncorrectHTLCAmount, - /// The channel has been marked as disabled because the channel peer is offline. + /// The HTLC couldn't be forwarded because the channel counterparty has been offline for some + /// time. ChannelDisabled, /// The HTLC expires too far in the future, so it is rejected to avoid the worst-case outcome /// of funds being held for extended periods of time. From 4d5b0a6e375b778ab433b671010e72513204de26 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 5 Jan 2026 02:06:52 +0000 Subject: [PATCH 121/242] DRY HTLC failure paths in `process_pending_update_add_htlcs` --- lightning/src/ln/channelmanager.rs | 82 ++++++++++++------------------ 1 file changed, 32 insertions(+), 50 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 33be6cbba99..9248a0c374a 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -6887,6 +6887,22 @@ where }); let shared_secret = next_hop.shared_secret().secret_bytes(); + macro_rules! fail_htlc_continue_to_next { + ($reason: expr) => {{ + let htlc_fail = self.htlc_failure_from_update_add_err( + &update_add_htlc, + &incoming_counterparty_node_id, + $reason, + is_intro_node_blinded_forward, + &shared_secret, + ); + let failure_type = + get_htlc_failure_type(outgoing_scid_opt, update_add_htlc.payment_hash); + htlc_fails.push((htlc_fail, failure_type, $reason.into())); + continue; + }}; + } + // Nodes shouldn't expect us to hold HTLCs for them if we don't advertise htlc_hold feature // support. // @@ -6899,18 +6915,7 @@ where if update_add_htlc.hold_htlc.is_some() && !BaseMessageHandler::provided_node_features(self).supports_htlc_hold() { - let reason = LocalHTLCFailureReason::TemporaryNodeFailure; - let htlc_fail = self.htlc_failure_from_update_add_err( - &update_add_htlc, - &incoming_counterparty_node_id, - reason, - is_intro_node_blinded_forward, - &shared_secret, - ); - let failure_type = - get_htlc_failure_type(outgoing_scid_opt, update_add_htlc.payment_hash); - htlc_fails.push((htlc_fail, failure_type, reason.into())); - continue; + fail_htlc_continue_to_next!(LocalHTLCFailureReason::TemporaryNodeFailure); } // Process the HTLC on the incoming channel. @@ -6927,17 +6932,7 @@ where ) { Some(Ok(_)) => {}, Some(Err(reason)) => { - let htlc_fail = self.htlc_failure_from_update_add_err( - &update_add_htlc, - &incoming_counterparty_node_id, - reason, - is_intro_node_blinded_forward, - &shared_secret, - ); - let failure_type = - get_htlc_failure_type(outgoing_scid_opt, update_add_htlc.payment_hash); - htlc_fails.push((htlc_fail, failure_type, reason.into())); - continue; + fail_htlc_continue_to_next!(reason); }, // The incoming channel no longer exists, HTLCs should be resolved onchain instead. None => continue 'outer_loop, @@ -6948,17 +6943,7 @@ where if let Err(reason) = self.can_forward_htlc(&update_add_htlc, next_packet_details) { - let htlc_fail = self.htlc_failure_from_update_add_err( - &update_add_htlc, - &incoming_counterparty_node_id, - reason, - is_intro_node_blinded_forward, - &shared_secret, - ); - let failure_type = - get_htlc_failure_type(outgoing_scid_opt, update_add_htlc.payment_hash); - htlc_fails.push((htlc_fail, failure_type, reason.into())); - continue; + fail_htlc_continue_to_next!(reason); } } @@ -6970,6 +6955,12 @@ where next_packet_details_opt.map(|d| d.next_packet_pubkey), ) { Ok(info) => { + let logger = WithContext::from( + &self.logger, + None, + Some(incoming_channel_id), + Some(update_add_htlc.payment_hash), + ); if info.routing.should_hold_htlc() { let intercept_id = InterceptId::from_htlc_id_and_chan_id( update_add_htlc.htlc_id, @@ -6979,10 +6970,9 @@ where let mut held_htlcs = self.pending_intercepted_htlcs.lock().unwrap(); match held_htlcs.entry(intercept_id) { hash_map::Entry::Vacant(entry) => { - log_trace!( - self.logger, - "Intercepted held HTLC with id {}, holding until the recipient is online", - intercept_id + log_debug!( + logger, + "Intercepted held HTLC with id {intercept_id}, holding until the recipient is online" ); let pending_add = PendingAddHTLCInfo { prev_outbound_scid_alias: incoming_scid_alias, @@ -6997,19 +6987,11 @@ where }, hash_map::Entry::Occupied(_) => { debug_assert!(false, "Should never have two HTLCs with the same channel id and htlc id"); - let reason = LocalHTLCFailureReason::TemporaryNodeFailure; - let htlc_fail = self.htlc_failure_from_update_add_err( - &update_add_htlc, - &incoming_counterparty_node_id, - reason, - is_intro_node_blinded_forward, - &shared_secret, - ); - let failure_type = get_htlc_failure_type( - outgoing_scid_opt, - update_add_htlc.payment_hash, + log_error!(logger, "Duplicate intercept id for HTLC"); + debug_assert!(false, "Should never have two HTLCs with the same channel id and htlc id"); + fail_htlc_continue_to_next!( + LocalHTLCFailureReason::TemporaryNodeFailure ); - htlc_fails.push((htlc_fail, failure_type, reason.into())); }, } } else { From 04351fd8ab5be99e17efa46be8a7808a2f4eaf1c Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 14 Jan 2026 13:28:01 +0000 Subject: [PATCH 122/242] Move HTLC interception decisions to `forward_htlcs` callsites In the next commit we'll substantially expand the types of HTLCs which can be intercepted. In order to do so, we want to make forwarding decisions with access to the (specified) destination channel. Sadly, this isn't available in `forward_htlcs`, so here we move interception decisions out of `forward_htlcs` and into `process_pending_update_add_htlcs` and `handle_release_held_htlc`. Note that we do not handle HTLC interception when forwarding an HTLC which was decoded in LDK versions prior to 0.2, which is noted in a suggested release note. This is due to a gap where such HTLC might have had its routing decision made already and be waiting for an interception decision in `forward_htlcs`, but now we will only make an interception decision when decoding the onion. --- lightning/src/ln/channelmanager.rs | 361 +++++++++++-------- lightning/src/ln/functional_test_utils.rs | 2 +- pending_changelog/matt-full-interception.txt | 4 + 3 files changed, 207 insertions(+), 160 deletions(-) create mode 100644 pending_changelog/matt-full-interception.txt diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 9248a0c374a..8bef96835eb 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -637,14 +637,6 @@ impl Readable for PaymentId { pub struct InterceptId(pub [u8; 32]); impl InterceptId { - /// This intercept id corresponds to an HTLC that will be forwarded on - /// [`ChannelManager::forward_intercepted_htlc`]. - fn from_incoming_shared_secret(ss: &[u8; 32]) -> Self { - Self(Sha256::hash(ss).to_byte_array()) - } - - /// This intercept id corresponds to an HTLC that will be forwarded on receipt of a - /// [`ReleaseHeldHtlc`] onion message. fn from_htlc_id_and_chan_id( htlc_id: u64, channel_id: &ChannelId, counterparty_node_id: &PublicKey, ) -> Self { @@ -4776,10 +4768,27 @@ where } } + fn forward_needs_intercept( + &self, outbound_chan: Option<&FundedChannel>, outgoing_scid: u64, + ) -> bool { + if outbound_chan.is_none() { + if fake_scid::is_valid_intercept( + &self.fake_scid_rand_bytes, + outgoing_scid, + &self.chain_hash, + ) { + if self.config.read().unwrap().accept_intercept_htlcs { + return true; + } + } + } + false + } + #[rustfmt::skip] fn can_forward_htlc_to_outgoing_channel( &self, chan: &mut FundedChannel, msg: &msgs::UpdateAddHTLC, next_packet: &NextPacketDetails - ) -> Result<(), LocalHTLCFailureReason> { + ) -> Result { if !chan.context.should_announce() && !self.config.read().unwrap().accept_forwards_to_priv_channels { @@ -4788,6 +4797,7 @@ where // we don't allow forwards outbound over them. return Err(LocalHTLCFailureReason::PrivateChannelForward); } + let intercepted; if let HopConnector::ShortChannelId(outgoing_scid) = next_packet.outgoing_connector { if chan.funding.get_channel_type().supports_scid_privacy() && outgoing_scid != chan.context.outbound_scid_alias() { // `option_scid_alias` (referred to in LDK as `scid_privacy`) means @@ -4795,6 +4805,7 @@ where // we don't have the channel here. return Err(LocalHTLCFailureReason::RealSCIDForward); } + intercepted = self.forward_needs_intercept(Some(chan), outgoing_scid); } else { return Err(LocalHTLCFailureReason::InvalidTrampolineForward); } @@ -4804,7 +4815,7 @@ where // around to doing the actual forward, but better to fail early if we can and // hopefully an attacker trying to path-trace payments cannot make this occur // on a small/per-node/per-channel scale. - if !chan.context.is_live() { + if !intercepted && !chan.context.is_live() { if !chan.context.is_enabled() { return Err(LocalHTLCFailureReason::ChannelDisabled); } else if !chan.context.is_connected() { @@ -4818,7 +4829,7 @@ where } chan.htlc_satisfies_config(msg, next_packet.outgoing_amt_msat, next_packet.outgoing_cltv_value)?; - Ok(()) + Ok(intercepted) } /// Executes a callback `C` that returns some value `X` on the channel found with the given @@ -4844,42 +4855,63 @@ where } } - #[rustfmt::skip] - fn can_forward_htlc( - &self, msg: &msgs::UpdateAddHTLC, next_packet_details: &NextPacketDetails - ) -> Result<(), LocalHTLCFailureReason> { + fn can_forward_htlc_intercepted( + &self, msg: &msgs::UpdateAddHTLC, next_packet_details: &NextPacketDetails, + ) -> Result { let outgoing_scid = match next_packet_details.outgoing_connector { HopConnector::ShortChannelId(scid) => scid, HopConnector::Dummy => { // Dummy hops are only used for path padding and must not reach HTLC processing. debug_assert!(false, "Dummy hop reached HTLC handling."); return Err(LocalHTLCFailureReason::InvalidOnionPayload); - } + }, HopConnector::Trampoline(_) => { return Err(LocalHTLCFailureReason::InvalidTrampolineForward); - } + }, }; - match self.do_funded_channel_callback(outgoing_scid, |chan: &mut FundedChannel| { - self.can_forward_htlc_to_outgoing_channel(chan, msg, next_packet_details) - }) { - Some(Ok(())) => {}, - Some(Err(e)) => return Err(e), - None => { - // If we couldn't find the channel info for the scid, it may be a phantom or - // intercept forward. - if (self.config.read().unwrap().accept_intercept_htlcs && - fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, outgoing_scid, &self.chain_hash)) || - fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, outgoing_scid, &self.chain_hash) - {} else { - return Err(LocalHTLCFailureReason::UnknownNextPeer); - } - } - } + // TODO: We do the fake SCID namespace check a bunch of times here (and indirectly via + // `forward_needs_intercept`, including as called in + // `can_forward_htlc_to_outgoing_channel`), we should find a way to reduce the number of + // times we do it. + let intercept = + match self.do_funded_channel_callback(outgoing_scid, |chan: &mut FundedChannel| { + self.can_forward_htlc_to_outgoing_channel(chan, msg, next_packet_details) + }) { + Some(Ok(intercept)) => intercept, + Some(Err(e)) => return Err(e), + None => { + // Perform basic sanity checks on the amounts and CLTV being forwarded + if next_packet_details.outgoing_amt_msat > msg.amount_msat { + return Err(LocalHTLCFailureReason::FeeInsufficient); + } + let cltv_delta = + msg.cltv_expiry.saturating_sub(next_packet_details.outgoing_cltv_value); + if cltv_delta < MIN_CLTV_EXPIRY_DELTA.into() { + return Err(LocalHTLCFailureReason::IncorrectCLTVExpiry); + } + + if fake_scid::is_valid_phantom( + &self.fake_scid_rand_bytes, + outgoing_scid, + &self.chain_hash, + ) { + false + } else if self.forward_needs_intercept(None, outgoing_scid) { + true + } else { + return Err(LocalHTLCFailureReason::UnknownNextPeer); + } + }, + }; let cur_height = self.best_block.read().unwrap().height + 1; - check_incoming_htlc_cltv(cur_height, next_packet_details.outgoing_cltv_value, msg.cltv_expiry)?; + check_incoming_htlc_cltv( + cur_height, + next_packet_details.outgoing_cltv_value, + msg.cltv_expiry, + )?; - Ok(()) + Ok(intercept) } #[rustfmt::skip] @@ -6939,11 +6971,13 @@ where } // Now process the HTLC on the outgoing channel if it's a forward. + let mut intercept_forward = false; if let Some(next_packet_details) = next_packet_details_opt.as_ref() { - if let Err(reason) = - self.can_forward_htlc(&update_add_htlc, next_packet_details) - { - fail_htlc_continue_to_next!(reason); + match self.can_forward_htlc_intercepted(&update_add_htlc, next_packet_details) { + Err(reason) => { + fail_htlc_continue_to_next!(reason); + }, + Ok(intercept) => intercept_forward = intercept, } } @@ -6955,6 +6989,22 @@ where next_packet_details_opt.map(|d| d.next_packet_pubkey), ) { Ok(info) => { + let to_pending_add = |info| PendingAddHTLCInfo { + prev_outbound_scid_alias: incoming_scid_alias, + prev_counterparty_node_id: incoming_counterparty_node_id, + prev_funding_outpoint: incoming_funding_txo, + prev_channel_id: incoming_channel_id, + prev_htlc_id: update_add_htlc.htlc_id, + prev_user_channel_id: incoming_user_channel_id, + forward_info: info, + }; + let intercept_id = || { + InterceptId::from_htlc_id_and_chan_id( + update_add_htlc.htlc_id, + &incoming_channel_id, + &incoming_counterparty_node_id, + ) + }; let logger = WithContext::from( &self.logger, None, @@ -6962,32 +7012,64 @@ where Some(update_add_htlc.payment_hash), ); if info.routing.should_hold_htlc() { - let intercept_id = InterceptId::from_htlc_id_and_chan_id( - update_add_htlc.htlc_id, - &incoming_channel_id, - &incoming_counterparty_node_id, - ); let mut held_htlcs = self.pending_intercepted_htlcs.lock().unwrap(); + let intercept_id = intercept_id(); match held_htlcs.entry(intercept_id) { hash_map::Entry::Vacant(entry) => { log_debug!( logger, "Intercepted held HTLC with id {intercept_id}, holding until the recipient is online" ); - let pending_add = PendingAddHTLCInfo { - prev_outbound_scid_alias: incoming_scid_alias, - prev_counterparty_node_id: incoming_counterparty_node_id, - prev_funding_outpoint: incoming_funding_txo, - prev_channel_id: incoming_channel_id, - prev_htlc_id: update_add_htlc.htlc_id, - prev_user_channel_id: incoming_user_channel_id, - forward_info: info, - }; + let pending_add = to_pending_add(info); entry.insert(pending_add); }, hash_map::Entry::Occupied(_) => { debug_assert!(false, "Should never have two HTLCs with the same channel id and htlc id"); log_error!(logger, "Duplicate intercept id for HTLC"); + fail_htlc_continue_to_next!( + LocalHTLCFailureReason::TemporaryNodeFailure + ); + }, + } + } else if intercept_forward { + let intercept_id = intercept_id(); + let mut pending_intercepts = + self.pending_intercepted_htlcs.lock().unwrap(); + match pending_intercepts.entry(intercept_id) { + hash_map::Entry::Vacant(entry) => { + let pending_add = to_pending_add(info); + if let Ok(intercept_ev) = + create_htlc_intercepted_event(intercept_id, &pending_add) + { + log_debug!( + logger, + "Intercepted HTLC, generating intercept event with ID {intercept_id}" + ); + let ev_entry = (intercept_ev, None); + // It's possible we processed this intercept forward, + // generated an event, then re-processed it here after + // restart, in which case the intercept event should not be + // pushed redundantly. + let mut events = self.pending_events.lock().unwrap(); + events.retain(|ev| *ev != ev_entry); + events.push_back(ev_entry); + entry.insert(pending_add); + } else { + debug_assert!(false); + log_error!( + logger, + "Failed to generate an intercept event for HTLC" + ); + fail_htlc_continue_to_next!( + LocalHTLCFailureReason::TemporaryNodeFailure + ); + } + }, + hash_map::Entry::Occupied(_) => { + log_error!( + logger, + "Failed to forward incoming HTLC: detected duplicate intercepted payment", + ); debug_assert!(false, "Should never have two HTLCs with the same channel id and htlc id"); fail_htlc_continue_to_next!( LocalHTLCFailureReason::TemporaryNodeFailure @@ -11886,26 +11968,15 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ ref mut pending_forwards, ) in per_source_pending_forwards { - let mut new_intercept_events = VecDeque::new(); - let mut failed_intercept_forwards = Vec::new(); if !pending_forwards.is_empty() { for (forward_info, prev_htlc_id) in pending_forwards.drain(..) { let scid = match forward_info.routing { PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id, - PendingHTLCRouting::TrampolineForward { .. } => 0, - PendingHTLCRouting::Receive { .. } => 0, - PendingHTLCRouting::ReceiveKeysend { .. } => 0, + PendingHTLCRouting::TrampolineForward { .. } + | PendingHTLCRouting::Receive { .. } + | PendingHTLCRouting::ReceiveKeysend { .. } => 0, }; - // Pull this now to avoid introducing a lock order with `forward_htlcs`. - let is_our_scid = self.short_to_chan_info.read().unwrap().contains_key(&scid); - let payment_hash = forward_info.payment_hash; - let logger = WithContext::from( - &self.logger, - None, - Some(prev_channel_id), - Some(payment_hash), - ); let pending_add = PendingAddHTLCInfo { prev_outbound_scid_alias, prev_counterparty_node_id, @@ -11915,88 +11986,17 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ prev_user_channel_id, forward_info, }; - let mut fail_intercepted_htlc = |pending_add: PendingAddHTLCInfo| { - let htlc_source = - HTLCSource::PreviousHopData(pending_add.htlc_previous_hop_data()); - let reason = HTLCFailReason::from_failure_code( - LocalHTLCFailureReason::UnknownNextPeer, - ); - let failure_type = HTLCHandlingFailureType::InvalidForward { - requested_forward_scid: scid, - }; - failed_intercept_forwards.push(( - htlc_source, - payment_hash, - reason, - failure_type, - )); - }; - if !is_our_scid - && pending_add.forward_info.incoming_amt_msat.is_some() - && fake_scid::is_valid_intercept( - &self.fake_scid_rand_bytes, - scid, - &self.chain_hash, - ) { - let intercept_id = InterceptId::from_incoming_shared_secret( - &pending_add.forward_info.incoming_shared_secret, - ); - let mut pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap(); - match pending_intercepts.entry(intercept_id) { - hash_map::Entry::Vacant(entry) => { - if let Ok(intercept_ev) = - create_htlc_intercepted_event(intercept_id, &pending_add) - { - new_intercept_events.push_back((intercept_ev, None)); - entry.insert(pending_add); - } else { - debug_assert!(false); - fail_intercepted_htlc(pending_add); - } - }, - hash_map::Entry::Occupied(_) => { - log_info!( - logger, - "Failed to forward incoming HTLC: detected duplicate intercepted payment over short channel id {}", - scid - ); - fail_intercepted_htlc(pending_add); - }, - } - } else { - match self.forward_htlcs.lock().unwrap().entry(scid) { - hash_map::Entry::Occupied(mut entry) => { - entry.get_mut().push(HTLCForwardInfo::AddHTLC(pending_add)); - }, - hash_map::Entry::Vacant(entry) => { - entry.insert(vec![HTLCForwardInfo::AddHTLC(pending_add)]); - }, - } + match self.forward_htlcs.lock().unwrap().entry(scid) { + hash_map::Entry::Occupied(mut entry) => { + entry.get_mut().push(HTLCForwardInfo::AddHTLC(pending_add)); + }, + hash_map::Entry::Vacant(entry) => { + entry.insert(vec![HTLCForwardInfo::AddHTLC(pending_add)]); + }, } } } - - for (htlc_source, payment_hash, failure_reason, destination) in - failed_intercept_forwards.drain(..) - { - self.fail_htlc_backwards_internal( - &htlc_source, - &payment_hash, - &failure_reason, - destination, - None, - ); - } - - if !new_intercept_events.is_empty() { - let mut events = self.pending_events.lock().unwrap(); - // It's possible we processed this intercept forward, generated an event, then re-processed - // it here after restart, in which case the intercept event should not be pushed - // redundantly. - new_intercept_events.retain(|ev| !events.contains(ev)); - events.append(&mut new_intercept_events); - } } } @@ -16267,6 +16267,7 @@ where prev_outbound_scid_alias, htlc_id, } => { + let _serialize_guard = PersistenceNotifierGuard::notify_on_drop(self); // It's possible the release_held_htlc message raced ahead of us transitioning the pending // update_add to `Self::pending_intercept_htlcs`. If that's the case, update the pending // update_add to indicate that the HTLC should be released immediately. @@ -16305,16 +16306,18 @@ where }, } }; - match htlc.forward_info.routing { - PendingHTLCRouting::Forward { ref mut hold_htlc, .. } => { + let next_hop_scid = match htlc.forward_info.routing { + PendingHTLCRouting::Forward { ref mut hold_htlc, short_channel_id, .. } => { debug_assert!(hold_htlc.is_some()); *hold_htlc = None; + short_channel_id }, _ => { debug_assert!(false, "HTLC intercepts can only be forwards"); + // Let the HTLC be auto-failed before it expires. return; }, - } + }; let logger = WithContext::from( &self.logger, @@ -16324,16 +16327,56 @@ where ); log_trace!(logger, "Releasing held htlc with intercept_id {}", intercept_id); - let mut per_source_pending_forward = [( - htlc.prev_outbound_scid_alias, - htlc.prev_counterparty_node_id, - htlc.prev_funding_outpoint, - htlc.prev_channel_id, - htlc.prev_user_channel_id, - vec![(htlc.forward_info, htlc.prev_htlc_id)], - )]; - self.forward_htlcs(&mut per_source_pending_forward); - PersistenceNotifierGuard::notify_on_drop(self); + let should_intercept = self + .do_funded_channel_callback(next_hop_scid, |chan| { + self.forward_needs_intercept(Some(chan), next_hop_scid) + }) + .unwrap_or_else(|| self.forward_needs_intercept(None, next_hop_scid)); + + if should_intercept { + let intercept_id = InterceptId::from_htlc_id_and_chan_id( + htlc.prev_htlc_id, + &htlc.prev_channel_id, + &htlc.prev_counterparty_node_id, + ); + let mut pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap(); + match pending_intercepts.entry(intercept_id) { + hash_map::Entry::Vacant(entry) => { + if let Ok(intercept_ev) = + create_htlc_intercepted_event(intercept_id, &htlc) + { + self.pending_events.lock().unwrap().push_back((intercept_ev, None)); + entry.insert(htlc); + } else { + debug_assert!(false); + // Let the HTLC be auto-failed before it expires. + return; + } + }, + hash_map::Entry::Occupied(_) => { + log_error!( + logger, + "Failed to forward incoming HTLC: detected duplicate intercepted payment", + ); + debug_assert!( + false, + "Should never have two HTLCs with the same channel id and htlc id", + ); + // Let the HTLC be auto-failed before it expires. + return; + }, + } + } else { + let mut per_source_pending_forward = [( + htlc.prev_outbound_scid_alias, + htlc.prev_counterparty_node_id, + htlc.prev_funding_outpoint, + htlc.prev_channel_id, + htlc.prev_user_channel_id, + vec![(htlc.forward_info, htlc.prev_htlc_id)], + )]; + self.forward_htlcs(&mut per_source_pending_forward); + } }, _ => return, } @@ -17062,6 +17105,7 @@ where } } + let our_pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap(); // Since some FundingNegotiation variants are not persisted, any splice in such state must // be failed upon reload. However, as the necessary information for the SpliceFailed event @@ -17159,7 +17203,6 @@ where } let mut pending_intercepted_htlcs = None; - let our_pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap(); if our_pending_intercepts.len() != 0 { pending_intercepted_htlcs = Some(our_pending_intercepts); } diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 2cf5ea96acb..1eda3bdf9f7 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -2906,7 +2906,7 @@ pub fn check_payment_claimable( _ => {}, } }, - _ => panic!("Unexpected event"), + _ => panic!("Unexpected event {event:?}"), } } diff --git a/pending_changelog/matt-full-interception.txt b/pending_changelog/matt-full-interception.txt new file mode 100644 index 00000000000..2cc51a56305 --- /dev/null +++ b/pending_changelog/matt-full-interception.txt @@ -0,0 +1,4 @@ +# Backwards Compatibility + * HTLCs which were first received on an LDK version prior to LDK 0.2 will no + longer be intercepted. Instead, they will be handled as if they were not + intercepted and be forwarded/failed automatically. From a0723ad77f2907234fe6a6ac87100a2ac1b215ce Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 13 Jan 2026 19:57:32 +0000 Subject: [PATCH 123/242] Support generic HTLC interception At various points we've had requests to support more generic HTLC interception in LDK. In most cases, full HTLC interception was not, in fact, the right way to accomplish what the developer wanted, but there have been various times when it might have been. Here, we finally add full HTLC interception support, doing so with a configurable bitfield to allow developers to intercept only certain classes of HTLCs. Specifically, we currently support intercepting HTLCs: * which were to be forwarded to intercept SCIDs (as was already supported), * which were to be forwarded to offline private channels (for LSPs to accept HTLCs for offline clients so that they can attempt to wake them before failing the HTLC), * which were to be forwarded to online private channels (for LSPs to take additional fees or enforce certain policies), * which were to be forwarded over public channels (for general forwarding policy enforcement), * which were to be forwarded to unknown SCIDs (for everything else). --- .../tests/lsps2_integration_tests.rs | 9 +- lightning/src/events/mod.rs | 20 +- lightning/src/ln/async_payments_tests.rs | 4 +- lightning/src/ln/blinded_payment_tests.rs | 5 +- lightning/src/ln/channelmanager.rs | 104 ++++--- lightning/src/ln/interception_tests.rs | 290 ++++++++++++++++++ lightning/src/ln/mod.rs | 3 + lightning/src/ln/payment_tests.rs | 7 +- lightning/src/ln/reload_tests.rs | 8 +- lightning/src/util/config.rs | 127 +++++++- 10 files changed, 504 insertions(+), 73 deletions(-) create mode 100644 lightning/src/ln/interception_tests.rs diff --git a/lightning-liquidity/tests/lsps2_integration_tests.rs b/lightning-liquidity/tests/lsps2_integration_tests.rs index 2e469d149b0..45c2891227d 100644 --- a/lightning-liquidity/tests/lsps2_integration_tests.rs +++ b/lightning-liquidity/tests/lsps2_integration_tests.rs @@ -38,6 +38,7 @@ use lightning::ln::peer_handler::CustomMessageHandler; use lightning::log_error; use lightning::routing::router::{RouteHint, RouteHintHop}; use lightning::sign::NodeSigner; +use lightning::util::config::HTLCInterceptionFlags; use lightning::util::errors::APIError; use lightning::util::logger::Logger; use lightning::util::test_utils::{TestBroadcaster, TestStore}; @@ -1157,7 +1158,7 @@ fn client_trusts_lsp_end_to_end_test() { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let mut service_node_config = test_default_channel_config(); - service_node_config.accept_intercept_htlcs = true; + service_node_config.htlc_interception_flags = HTLCInterceptionFlags::ToInterceptSCIDs as u8; let mut client_node_config = test_default_channel_config(); client_node_config.manually_accept_inbound_channels = true; @@ -1630,7 +1631,7 @@ fn late_payment_forwarded_and_safe_after_force_close_does_not_broadcast() { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let mut service_node_config = test_default_channel_config(); - service_node_config.accept_intercept_htlcs = true; + service_node_config.htlc_interception_flags = HTLCInterceptionFlags::ToInterceptSCIDs as u8; let mut client_node_config = test_default_channel_config(); client_node_config.manually_accept_inbound_channels = true; @@ -1821,7 +1822,7 @@ fn htlc_timeout_before_client_claim_results_in_handling_failed() { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let mut service_node_config = test_default_channel_config(); - service_node_config.accept_intercept_htlcs = true; + service_node_config.htlc_interception_flags = HTLCInterceptionFlags::ToInterceptSCIDs as u8; let mut client_node_config = test_default_channel_config(); client_node_config.manually_accept_inbound_channels = true; @@ -2157,7 +2158,7 @@ fn client_trusts_lsp_partial_fee_does_not_trigger_broadcast() { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let mut service_node_config = test_default_channel_config(); - service_node_config.accept_intercept_htlcs = true; + service_node_config.htlc_interception_flags = HTLCInterceptionFlags::ToInterceptSCIDs as u8; let mut client_node_config = test_default_channel_config(); client_node_config.manually_accept_inbound_channels = true; diff --git a/lightning/src/events/mod.rs b/lightning/src/events/mod.rs index d97ae6097b6..277ce612494 100644 --- a/lightning/src/events/mod.rs +++ b/lightning/src/events/mod.rs @@ -1250,28 +1250,29 @@ pub enum Event { short_channel_id: Option, }, /// Used to indicate that we've intercepted an HTLC forward. This event will only be generated if - /// you've encoded an intercept scid in the receiver's invoice route hints using - /// [`ChannelManager::get_intercept_scid`] and have set [`UserConfig::accept_intercept_htlcs`]. + /// you've set some flags on [`UserConfig::htlc_interception_flags`]. /// /// [`ChannelManager::forward_intercepted_htlc`] or - /// [`ChannelManager::fail_intercepted_htlc`] MUST be called in response to this event. See - /// their docs for more information. + /// [`ChannelManager::fail_intercepted_htlc`] MUST be called in response to this event in a + /// timely manner (i.e. within some number of seconds, not minutes). See their docs for more + /// information. /// /// # Failure Behavior and Persistence /// This event will eventually be replayed after failures-to-handle (i.e., the event handler /// returning `Err(ReplayEvent ())`) and will be persisted across restarts. /// - /// [`ChannelManager::get_intercept_scid`]: crate::ln::channelmanager::ChannelManager::get_intercept_scid - /// [`UserConfig::accept_intercept_htlcs`]: crate::util::config::UserConfig::accept_intercept_htlcs + /// [`UserConfig::htlc_interception_flags`]: crate::util::config::UserConfig::htlc_interception_flags /// [`ChannelManager::forward_intercepted_htlc`]: crate::ln::channelmanager::ChannelManager::forward_intercepted_htlc /// [`ChannelManager::fail_intercepted_htlc`]: crate::ln::channelmanager::ChannelManager::fail_intercepted_htlc HTLCIntercepted { /// An id to help LDK identify which HTLC is being forwarded or failed. intercept_id: InterceptId, - /// The fake scid that was programmed as the next hop's scid, generated using - /// [`ChannelManager::get_intercept_scid`]. + /// The SCID which was selected by the sender as the next hop. It may point to one of our + /// channels, an intercept SCID generated with [`ChannelManager::get_intercept_scid`], or + /// an unknown SCID if [`HTLCInterceptionFlags::ToUnknownSCIDs`] was selected. /// /// [`ChannelManager::get_intercept_scid`]: crate::ln::channelmanager::ChannelManager::get_intercept_scid + /// [`HTLCInterceptionFlags::ToUnknownSCIDs`]: crate::util::config::HTLCInterceptionFlags::ToUnknownSCIDs requested_next_hop_scid: u64, /// The payment hash used for this HTLC. payment_hash: PaymentHash, @@ -1282,7 +1283,8 @@ pub enum Event { /// Forwarding less than this amount may break compatibility with LDK versions prior to 0.0.116. /// /// Note that LDK will NOT check that expected fees were factored into this value. You MUST - /// check that whatever fee you want has been included here or subtract it as required. Further, + /// check that whatever fee you want has been included here (by comparing with + /// [`Self::HTLCIntercepted::inbound_amount_msat`]) or subtract it as required. Further, /// LDK will not stop you from forwarding more than you received. expected_outbound_amount_msat: u64, }, diff --git a/lightning/src/ln/async_payments_tests.rs b/lightning/src/ln/async_payments_tests.rs index 0b2652920e1..b8d23217cef 100644 --- a/lightning/src/ln/async_payments_tests.rs +++ b/lightning/src/ln/async_payments_tests.rs @@ -60,7 +60,7 @@ use crate::sign::NodeSigner; use crate::sync::Mutex; use crate::types::features::Bolt12InvoiceFeatures; use crate::types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; -use crate::util::config::UserConfig; +use crate::util::config::{HTLCInterceptionFlags, UserConfig}; use crate::util::ser::Writeable; use bitcoin::constants::ChainHash; use bitcoin::network::Network; @@ -3063,7 +3063,7 @@ fn intercepted_hold_htlc() { recipient_cfg.channel_handshake_limits.force_announced_channel_preference = false; let mut lsp_cfg = test_default_channel_config(); - lsp_cfg.accept_intercept_htlcs = true; + lsp_cfg.htlc_interception_flags = HTLCInterceptionFlags::ToInterceptSCIDs as u8; lsp_cfg.accept_forwards_to_priv_channels = true; lsp_cfg.enable_htlc_hold = true; diff --git a/lightning/src/ln/blinded_payment_tests.rs b/lightning/src/ln/blinded_payment_tests.rs index 74981ead7f1..5a7c326ebaa 100644 --- a/lightning/src/ln/blinded_payment_tests.rs +++ b/lightning/src/ln/blinded_payment_tests.rs @@ -32,7 +32,7 @@ use crate::routing::router::{ use crate::sign::{NodeSigner, PeerStorageKey, ReceiveAuthKey, Recipient}; use crate::types::features::{BlindedHopFeatures, ChannelFeatures, NodeFeatures}; use crate::types::payment::{PaymentHash, PaymentSecret}; -use crate::util::config::UserConfig; +use crate::util::config::{HTLCInterceptionFlags, UserConfig}; use crate::util::ser::{WithoutLength, Writeable}; use crate::util::test_utils::{self, bytes_from_hex, pubkey_from_hex, secret_from_hex}; use bitcoin::hex::DisplayHex; @@ -769,7 +769,8 @@ fn do_blinded_intercept_payment(intercept_node_fails: bool) { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let mut intercept_forwards_config = test_default_channel_config(); - intercept_forwards_config.accept_intercept_htlcs = true; + intercept_forwards_config.htlc_interception_flags = + HTLCInterceptionFlags::ToInterceptSCIDs as u8; let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(intercept_forwards_config), None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 8bef96835eb..eeb5a536483 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -122,7 +122,9 @@ use crate::types::features::{ }; use crate::types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; use crate::types::string::UntrustedString; -use crate::util::config::{ChannelConfig, ChannelConfigOverrides, ChannelConfigUpdate, UserConfig}; +use crate::util::config::{ + ChannelConfig, ChannelConfigOverrides, ChannelConfigUpdate, HTLCInterceptionFlags, UserConfig, +}; use crate::util::errors::APIError; use crate::util::logger::{Level, Logger, WithContext}; use crate::util::scid_utils::fake_scid; @@ -4768,27 +4770,53 @@ where } } - fn forward_needs_intercept( - &self, outbound_chan: Option<&FundedChannel>, outgoing_scid: u64, - ) -> bool { - if outbound_chan.is_none() { - if fake_scid::is_valid_intercept( - &self.fake_scid_rand_bytes, - outgoing_scid, - &self.chain_hash, - ) { - if self.config.read().unwrap().accept_intercept_htlcs { + fn forward_needs_intercept_to_known_chan(&self, outbound_chan: &FundedChannel) -> bool { + let intercept_flags = self.config.read().unwrap().htlc_interception_flags; + if !outbound_chan.context.should_announce() { + if outbound_chan.context.is_connected() { + if intercept_flags & (HTLCInterceptionFlags::ToOnlinePrivateChannels as u8) != 0 { + return true; + } + } else { + if intercept_flags & (HTLCInterceptionFlags::ToOfflinePrivateChannels as u8) != 0 { return true; } } + } else { + if intercept_flags & (HTLCInterceptionFlags::ToPublicChannels as u8) != 0 { + return true; + } + } + false + } + + fn forward_needs_intercept_to_unknown_chan(&self, outgoing_scid: u64) -> bool { + let intercept_flags = self.config.read().unwrap().htlc_interception_flags; + if fake_scid::is_valid_intercept( + &self.fake_scid_rand_bytes, + outgoing_scid, + &self.chain_hash, + ) { + if intercept_flags & (HTLCInterceptionFlags::ToInterceptSCIDs as u8) != 0 { + return true; + } + } else if fake_scid::is_valid_phantom( + &self.fake_scid_rand_bytes, + outgoing_scid, + &self.chain_hash, + ) { + // Handled as a normal forward + } else if intercept_flags & (HTLCInterceptionFlags::ToUnknownSCIDs as u8) != 0 { + return true; } false } #[rustfmt::skip] fn can_forward_htlc_to_outgoing_channel( - &self, chan: &mut FundedChannel, msg: &msgs::UpdateAddHTLC, next_packet: &NextPacketDetails - ) -> Result { + &self, chan: &mut FundedChannel, msg: &msgs::UpdateAddHTLC, + next_packet: &NextPacketDetails, will_intercept: bool, + ) -> Result<(), LocalHTLCFailureReason> { if !chan.context.should_announce() && !self.config.read().unwrap().accept_forwards_to_priv_channels { @@ -4797,7 +4825,6 @@ where // we don't allow forwards outbound over them. return Err(LocalHTLCFailureReason::PrivateChannelForward); } - let intercepted; if let HopConnector::ShortChannelId(outgoing_scid) = next_packet.outgoing_connector { if chan.funding.get_channel_type().supports_scid_privacy() && outgoing_scid != chan.context.outbound_scid_alias() { // `option_scid_alias` (referred to in LDK as `scid_privacy`) means @@ -4805,7 +4832,6 @@ where // we don't have the channel here. return Err(LocalHTLCFailureReason::RealSCIDForward); } - intercepted = self.forward_needs_intercept(Some(chan), outgoing_scid); } else { return Err(LocalHTLCFailureReason::InvalidTrampolineForward); } @@ -4815,7 +4841,7 @@ where // around to doing the actual forward, but better to fail early if we can and // hopefully an attacker trying to path-trace payments cannot make this occur // on a small/per-node/per-channel scale. - if !intercepted && !chan.context.is_live() { + if !will_intercept && !chan.context.is_live() { if !chan.context.is_enabled() { return Err(LocalHTLCFailureReason::ChannelDisabled); } else if !chan.context.is_connected() { @@ -4827,9 +4853,7 @@ where if next_packet.outgoing_amt_msat < chan.context.get_counterparty_htlc_minimum_msat() { return Err(LocalHTLCFailureReason::AmountBelowMinimum); } - chan.htlc_satisfies_config(msg, next_packet.outgoing_amt_msat, next_packet.outgoing_cltv_value)?; - - Ok(intercepted) + chan.htlc_satisfies_config(msg, next_packet.outgoing_amt_msat, next_packet.outgoing_cltv_value) } /// Executes a callback `C` that returns some value `X` on the channel found with the given @@ -4855,10 +4879,10 @@ where } } - fn can_forward_htlc_intercepted( - &self, msg: &msgs::UpdateAddHTLC, next_packet_details: &NextPacketDetails, + fn can_forward_htlc_should_intercept( + &self, msg: &msgs::UpdateAddHTLC, next_hop: &NextPacketDetails, ) -> Result { - let outgoing_scid = match next_packet_details.outgoing_connector { + let outgoing_scid = match next_hop.outgoing_connector { HopConnector::ShortChannelId(scid) => scid, HopConnector::Dummy => { // Dummy hops are only used for path padding and must not reach HTLC processing. @@ -4870,22 +4894,23 @@ where }, }; // TODO: We do the fake SCID namespace check a bunch of times here (and indirectly via - // `forward_needs_intercept`, including as called in + // `forward_needs_intercept_*`, including as called in // `can_forward_htlc_to_outgoing_channel`), we should find a way to reduce the number of // times we do it. let intercept = match self.do_funded_channel_callback(outgoing_scid, |chan: &mut FundedChannel| { - self.can_forward_htlc_to_outgoing_channel(chan, msg, next_packet_details) + let intercept = self.forward_needs_intercept_to_known_chan(chan); + self.can_forward_htlc_to_outgoing_channel(chan, msg, next_hop, intercept)?; + Ok(intercept) }) { Some(Ok(intercept)) => intercept, Some(Err(e)) => return Err(e), None => { // Perform basic sanity checks on the amounts and CLTV being forwarded - if next_packet_details.outgoing_amt_msat > msg.amount_msat { + if next_hop.outgoing_amt_msat > msg.amount_msat { return Err(LocalHTLCFailureReason::FeeInsufficient); } - let cltv_delta = - msg.cltv_expiry.saturating_sub(next_packet_details.outgoing_cltv_value); + let cltv_delta = msg.cltv_expiry.saturating_sub(next_hop.outgoing_cltv_value); if cltv_delta < MIN_CLTV_EXPIRY_DELTA.into() { return Err(LocalHTLCFailureReason::IncorrectCLTVExpiry); } @@ -4896,7 +4921,7 @@ where &self.chain_hash, ) { false - } else if self.forward_needs_intercept(None, outgoing_scid) { + } else if self.forward_needs_intercept_to_unknown_chan(outgoing_scid) { true } else { return Err(LocalHTLCFailureReason::UnknownNextPeer); @@ -4905,11 +4930,7 @@ where }; let cur_height = self.best_block.read().unwrap().height + 1; - check_incoming_htlc_cltv( - cur_height, - next_packet_details.outgoing_cltv_value, - msg.cltv_expiry, - )?; + check_incoming_htlc_cltv(cur_height, next_hop.outgoing_cltv_value, msg.cltv_expiry)?; Ok(intercept) } @@ -6641,11 +6662,8 @@ where /// Intercepted HTLCs can be useful for Lightning Service Providers (LSPs) to open a just-in-time /// channel to a receiving node if the node lacks sufficient inbound liquidity. /// - /// To make use of intercepted HTLCs, set [`UserConfig::accept_intercept_htlcs`] and use - /// [`ChannelManager::get_intercept_scid`] to generate short channel id(s) to put in the - /// receiver's invoice route hints. These route hints will signal to LDK to generate an - /// [`HTLCIntercepted`] event when it receives the forwarded HTLC, and this method or - /// [`ChannelManager::fail_intercepted_htlc`] MUST be called in response to the event. + /// To make use of intercepted HTLCs, set [`UserConfig::htlc_interception_flags`] must have a + /// non-0 value. /// /// Note that LDK does not enforce fee requirements in `amt_to_forward_msat`, and will not stop /// you from forwarding more than you received. See @@ -6655,7 +6673,7 @@ where /// Errors if the event was not handled in time, in which case the HTLC was automatically failed /// backwards. /// - /// [`UserConfig::accept_intercept_htlcs`]: crate::util::config::UserConfig::accept_intercept_htlcs + /// [`UserConfig::htlc_interception_flags`]: crate::util::config::UserConfig::htlc_interception_flags /// [`HTLCIntercepted`]: events::Event::HTLCIntercepted /// [`HTLCIntercepted::expected_outbound_amount_msat`]: events::Event::HTLCIntercepted::expected_outbound_amount_msat // TODO: when we move to deciding the best outbound channel at forward time, only take @@ -6973,7 +6991,9 @@ where // Now process the HTLC on the outgoing channel if it's a forward. let mut intercept_forward = false; if let Some(next_packet_details) = next_packet_details_opt.as_ref() { - match self.can_forward_htlc_intercepted(&update_add_htlc, next_packet_details) { + match self + .can_forward_htlc_should_intercept(&update_add_htlc, next_packet_details) + { Err(reason) => { fail_htlc_continue_to_next!(reason); }, @@ -16329,9 +16349,9 @@ where let should_intercept = self .do_funded_channel_callback(next_hop_scid, |chan| { - self.forward_needs_intercept(Some(chan), next_hop_scid) + self.forward_needs_intercept_to_known_chan(chan) }) - .unwrap_or_else(|| self.forward_needs_intercept(None, next_hop_scid)); + .unwrap_or_else(|| self.forward_needs_intercept_to_unknown_chan(next_hop_scid)); if should_intercept { let intercept_id = InterceptId::from_htlc_id_and_chan_id( diff --git a/lightning/src/ln/interception_tests.rs b/lightning/src/ln/interception_tests.rs new file mode 100644 index 00000000000..11b5de166f6 --- /dev/null +++ b/lightning/src/ln/interception_tests.rs @@ -0,0 +1,290 @@ +// This file is Copyright its original authors, visible in version control +// history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license +// , at your option. +// You may not use this file except in accordance with one or both of these +// licenses. + +//! Tests that test standing up a network of ChannelManagers, creating channels, sending +//! payments/messages between them, and often checking the resulting ChannelMonitors are able to +//! claim outputs on-chain. + +use crate::events::{Event, HTLCHandlingFailureReason, HTLCHandlingFailureType}; +use crate::ln::channelmanager::{PaymentId, RecipientOnionFields}; +use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler}; +use crate::ln::onion_utils::LocalHTLCFailureReason; +use crate::routing::router::PaymentParameters; +use crate::util::config::HTLCInterceptionFlags; + +use crate::prelude::*; + +use crate::ln::functional_test_utils::*; + +#[derive(Clone, Copy, PartialEq, Eq)] +enum ForwardingMod { + FeeTooLow, + CLTVBelowConfig, + CLTVBelowMin, +} + +fn do_test_htlc_interception_flags( + flags_bitmask: u8, flag: HTLCInterceptionFlags, modification: Option, +) { + use HTLCInterceptionFlags as Flag; + + assert_eq!((flag as isize).count_ones(), 1, "We can only test one type of HTLC at once"); + + // Tests that the `htlc_interception_flags` bitmask given by `flags_bitmask` correctly + // intercepts (or doesn't intercept) an HTLC which is of type `flag` + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + + let mut intercept_config = test_default_channel_config(); + intercept_config.htlc_interception_flags = flags_bitmask; + intercept_config.channel_config.forwarding_fee_base_msat = 1000; + intercept_config.channel_config.cltv_expiry_delta = 6 * 24; + intercept_config.accept_forwards_to_priv_channels = true; + + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(intercept_config), None]); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + create_announced_chan_between_nodes(&nodes, 0, 1); + + let node_0_id = nodes[0].node.get_our_node_id(); + let node_1_id = nodes[1].node.get_our_node_id(); + let node_2_id = nodes[2].node.get_our_node_id(); + + // First open the right type of channel (and get it in the right state) for the bit we're + // testing. + let (target_scid, target_chan_id) = match flag { + Flag::ToOfflinePrivateChannels | Flag::ToOnlinePrivateChannels => { + create_unannounced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 0); + let chan_id = nodes[2].node.list_channels()[0].channel_id; + let scid = nodes[2].node.list_channels()[0].short_channel_id.unwrap(); + if flag == Flag::ToOfflinePrivateChannels { + nodes[1].node.peer_disconnected(node_2_id); + nodes[2].node.peer_disconnected(node_1_id); + } else { + assert_eq!(flag, Flag::ToOnlinePrivateChannels); + } + (scid, chan_id) + }, + Flag::ToInterceptSCIDs | Flag::ToPublicChannels | Flag::ToUnknownSCIDs => { + let (chan_upd, _, chan_id, _) = create_announced_chan_between_nodes(&nodes, 1, 2); + if flag == Flag::ToInterceptSCIDs { + (nodes[1].node.get_intercept_scid(), chan_id) + } else if flag == Flag::ToPublicChannels { + (chan_upd.contents.short_channel_id, chan_id) + } else if flag == Flag::ToUnknownSCIDs { + (42424242, chan_id) + } else { + panic!(); + } + }, + _ => panic!("Combined flags aren't allowed"), + }; + + // Start every node on the same block height to ensure we don't hit spurious CLTV issues + connect_blocks(&nodes[0], 2 * CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1); + connect_blocks(&nodes[1], 2 * CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1); + connect_blocks(&nodes[2], 2 * CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1); + + // Send the HTLC from nodes[0] to nodes[1] and process it to generate the interception (if + // we're set to intercept it). + let amt_msat = 100_000; + let bolt11 = nodes[2].node.create_bolt11_invoice(Default::default()).unwrap(); + let pay_params = PaymentParameters::from_bolt11_invoice(&bolt11); + let (mut route, payment_hash, payment_preimage, payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[2], pay_params, amt_msat); + route.paths[0].hops[1].short_channel_id = target_scid; + + let interception_bit_match = (flags_bitmask & (flag as u8)) != 0; + match modification { + Some(ForwardingMod::FeeTooLow) => { + assert!( + interception_bit_match, + "No reason to test failing if we aren't trying to intercept", + ); + route.paths[0].hops[0].fee_msat = 500; + }, + Some(ForwardingMod::CLTVBelowConfig) => { + route.paths[0].hops[0].cltv_expiry_delta = 6 * 12; + assert!( + interception_bit_match, + "No reason to test failing if we aren't trying to intercept", + ); + }, + Some(ForwardingMod::CLTVBelowMin) => { + route.paths[0].hops[0].cltv_expiry_delta = 6; + }, + None => {}, + } + + let onion = RecipientOnionFields::secret_only(payment_secret); + let payment_id = PaymentId(payment_hash.0); + nodes[0].node.send_payment_with_route(route, payment_hash, onion, payment_id).unwrap(); + check_added_monitors(&nodes[0], 1); + + let payment_event = SendEvent::from_node(&nodes[0]); + nodes[1].node.handle_update_add_htlc(node_0_id, &payment_event.msgs[0]); + do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, false, true); + expect_and_process_pending_htlcs(&nodes[1], false); + + if interception_bit_match && modification.is_none() { + // If we were set to intercept, check that we got an interception event then + // forward the HTLC on to nodes[2] and claim the payment. + let intercept_id; + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1, "{events:?}"); + if let Event::HTLCIntercepted { intercept_id: id, requested_next_hop_scid, .. } = &events[0] + { + assert_eq!(*requested_next_hop_scid, target_scid, + "Bitmask {flags_bitmask:#x}: Expected interception for bit {flag:?} to target SCID {target_scid}"); + intercept_id = *id; + } else { + panic!("{events:?}"); + } + + if flag == Flag::ToOfflinePrivateChannels { + let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]); + reconnect_args.send_channel_ready = (true, true); + reconnect_nodes(reconnect_args); + } + + nodes[1] + .node + .forward_intercepted_htlc(intercept_id, &target_chan_id, node_2_id, amt_msat) + .unwrap(); + expect_and_process_pending_htlcs(&nodes[1], false); + check_added_monitors(&nodes[1], 1); + + let forward_ev = SendEvent::from_node(&nodes[1]); + nodes[2].node.handle_update_add_htlc(node_1_id, &forward_ev.msgs[0]); + do_commitment_signed_dance(&nodes[2], &nodes[1], &forward_ev.commitment_msg, false, true); + + nodes[2].node.process_pending_htlc_forwards(); + expect_payment_claimable!(nodes[2], payment_hash, payment_secret, amt_msat); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); + } else { + // If we were not set to intercept, check that the HTLC either failed or was + // automatically forwarded as appropriate. + match (modification, flag) { + (None, Flag::ToOnlinePrivateChannels | Flag::ToPublicChannels) => { + check_added_monitors(&nodes[1], 1); + + let forward_ev = SendEvent::from_node(&nodes[1]); + assert_eq!(forward_ev.node_id, node_2_id); + nodes[2].node.handle_update_add_htlc(node_1_id, &forward_ev.msgs[0]); + let commitment = &forward_ev.commitment_msg; + do_commitment_signed_dance(&nodes[2], &nodes[1], commitment, false, true); + + nodes[2].node.process_pending_htlc_forwards(); + expect_payment_claimable!(nodes[2], payment_hash, payment_secret, amt_msat); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); + }, + _ => { + let events = nodes[1].node.get_and_clear_pending_events(); + let reason_from_mod = match modification { + Some(ForwardingMod::FeeTooLow) => Some(LocalHTLCFailureReason::FeeInsufficient), + Some(ForwardingMod::CLTVBelowConfig) => { + Some(LocalHTLCFailureReason::IncorrectCLTVExpiry) + }, + Some(ForwardingMod::CLTVBelowMin) => { + Some(LocalHTLCFailureReason::IncorrectCLTVExpiry) + }, + None => None, + }; + let (expected_failure_type, reason); + if flag == Flag::ToOfflinePrivateChannels { + expected_failure_type = HTLCHandlingFailureType::Forward { + node_id: Some(node_2_id), + channel_id: target_chan_id, + }; + reason = reason_from_mod.unwrap_or(LocalHTLCFailureReason::PeerOffline); + } else if flag == Flag::ToInterceptSCIDs { + expected_failure_type = HTLCHandlingFailureType::InvalidForward { + requested_forward_scid: target_scid, + }; + reason = reason_from_mod.unwrap_or(LocalHTLCFailureReason::UnknownNextPeer); + } else if flag == Flag::ToUnknownSCIDs { + expected_failure_type = HTLCHandlingFailureType::InvalidForward { + requested_forward_scid: target_scid, + }; + reason = reason_from_mod.unwrap_or(LocalHTLCFailureReason::UnknownNextPeer); + } else { + expected_failure_type = HTLCHandlingFailureType::Forward { + node_id: Some(node_2_id), + channel_id: target_chan_id, + }; + reason = reason_from_mod + .expect("We should only fail because of a mod or unknown next-hop"); + } + if let Event::HTLCHandlingFailed { failure_reason, failure_type, .. } = &events[0] { + assert_eq!(*failure_reason, Some(HTLCHandlingFailureReason::Local { reason })); + assert_eq!(*failure_type, expected_failure_type); + } else { + panic!("{events:?}"); + } + + check_added_monitors(&nodes[1], 1); + let fail_msgs = get_htlc_update_msgs(&nodes[1], &node_0_id); + nodes[0].node.handle_update_fail_htlc(node_1_id, &fail_msgs.update_fail_htlcs[0]); + let commitment = fail_msgs.commitment_signed; + do_commitment_signed_dance(&nodes[0], &nodes[1], &commitment, true, true); + expect_payment_failed!(nodes[0], payment_hash, false); + }, + } + } +} + +const MAX_BITMASK: u8 = HTLCInterceptionFlags::AllValidHTLCs as u8; +const ALL_FLAGS: [HTLCInterceptionFlags; 5] = [ + HTLCInterceptionFlags::ToInterceptSCIDs, + HTLCInterceptionFlags::ToOfflinePrivateChannels, + HTLCInterceptionFlags::ToOnlinePrivateChannels, + HTLCInterceptionFlags::ToPublicChannels, + HTLCInterceptionFlags::ToUnknownSCIDs, +]; + +#[test] +fn test_htlc_interception_flags() { + let mut all_flag_bits = 0; + for flag in ALL_FLAGS { + all_flag_bits |= flag as isize; + } + assert_eq!(all_flag_bits, MAX_BITMASK as isize, "all flags must test all bits"); + + // Test all 2^5 = 32 combinations of the HTLCInterceptionFlags bitmask + // For each combination, test 5 different HTLC forwards and verify correct interception behavior + for flags_bitmask in 0..=MAX_BITMASK { + for flag in ALL_FLAGS { + do_test_htlc_interception_flags(flags_bitmask, flag, None); + } + } +} + +#[test] +fn test_htlc_bad_for_chan_config() { + // Test that interception won't be done if an HTLC fails to meet the target channel's channel + // config. + let have_chan_flags = [ + HTLCInterceptionFlags::ToOfflinePrivateChannels, + HTLCInterceptionFlags::ToOnlinePrivateChannels, + HTLCInterceptionFlags::ToPublicChannels, + ]; + for flag in have_chan_flags { + do_test_htlc_interception_flags(flag as u8, flag, Some(ForwardingMod::FeeTooLow)); + do_test_htlc_interception_flags(flag as u8, flag, Some(ForwardingMod::CLTVBelowConfig)); + } +} + +#[test] +fn test_htlc_bad_no_chan() { + // Test that setting the CLTV below the hard-coded minimum fails whether we're intercepting for + // a channel or not. + for flag in ALL_FLAGS { + do_test_htlc_interception_flags(flag as u8, flag, Some(ForwardingMod::CLTVBelowMin)); + } +} diff --git a/lightning/src/ln/mod.rs b/lightning/src/ln/mod.rs index e782fee92f6..b077c98ae73 100644 --- a/lightning/src/ln/mod.rs +++ b/lightning/src/ln/mod.rs @@ -84,6 +84,9 @@ pub mod functional_tests; #[cfg(any(test, feature = "_externalize_tests"))] #[allow(unused_mut)] pub mod htlc_reserve_unit_tests; +#[cfg(any(test, feature = "_externalize_tests"))] +#[allow(unused_mut)] +pub mod interception_tests; #[cfg(test)] #[allow(unused_mut)] mod max_payment_path_len_tests; diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 8f209c88e25..8ac87fbdb10 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -45,6 +45,7 @@ use crate::sign::EntropySource; use crate::types::features::{Bolt11InvoiceFeatures, ChannelTypeFeatures}; use crate::types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; use crate::types::string::UntrustedString; +use crate::util::config::HTLCInterceptionFlags; use crate::util::errors::APIError; use crate::util::ser::Writeable; use crate::util::test_utils; @@ -2210,7 +2211,8 @@ fn do_test_intercepted_payment(test: InterceptTest) { let mut zero_conf_chan_config = test_default_channel_config(); zero_conf_chan_config.manually_accept_inbound_channels = true; let mut intercept_forwards_config = test_default_channel_config(); - intercept_forwards_config.accept_intercept_htlcs = true; + intercept_forwards_config.htlc_interception_flags = + HTLCInterceptionFlags::ToInterceptSCIDs as u8; let configs = [None, Some(intercept_forwards_config), Some(zero_conf_chan_config)]; let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &configs); @@ -2435,7 +2437,8 @@ fn do_accept_underpaying_htlcs_config(num_mpp_parts: usize) { let max_in_flight_percent = 10; let mut intercept_forwards_config = test_default_channel_config(); - intercept_forwards_config.accept_intercept_htlcs = true; + intercept_forwards_config.htlc_interception_flags = + HTLCInterceptionFlags::ToInterceptSCIDs as u8; intercept_forwards_config .channel_handshake_config .max_inbound_htlc_value_in_flight_percent_of_channel = max_in_flight_percent; diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index a38262e6952..4fb2753b6be 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -26,7 +26,7 @@ use crate::util::test_channel_signer::TestChannelSigner; use crate::util::test_utils; use crate::util::errors::APIError; use crate::util::ser::{Writeable, ReadableArgs}; -use crate::util::config::UserConfig; +use crate::util::config::{HTLCInterceptionFlags, UserConfig}; use bitcoin::hashes::Hash; use bitcoin::hash_types::BlockHash; @@ -931,7 +931,8 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht let new_chain_monitor; let mut intercept_forwards_config = test_default_channel_config(); - intercept_forwards_config.accept_intercept_htlcs = true; + intercept_forwards_config.htlc_interception_flags = + HTLCInterceptionFlags::ToInterceptSCIDs as u8; let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(intercept_forwards_config), None]); let nodes_1_deserialized; @@ -1189,7 +1190,8 @@ fn do_manager_persisted_pre_outbound_edge_forward(intercept_htlc: bool) { let persister; let new_chain_monitor; let mut intercept_forwards_config = test_default_channel_config(); - intercept_forwards_config.accept_intercept_htlcs = true; + intercept_forwards_config.htlc_interception_flags = + HTLCInterceptionFlags::ToInterceptSCIDs as u8; let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(intercept_forwards_config), None]); let nodes_1_deserialized; let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); diff --git a/lightning/src/util/config.rs b/lightning/src/util/config.rs index dd1aaa40424..feb326cfad6 100644 --- a/lightning/src/util/config.rs +++ b/lightning/src/util/config.rs @@ -855,6 +855,111 @@ impl crate::util::ser::Readable for LegacyChannelConfig { } } +/// Flags which can be set on [`UserConfig::htlc_interception_flags`]. Each flag selects some set +/// of HTLCs which are forwarded across this node to be intercepted instead, generating an +/// [`Event::HTLCIntercepted`] instead of automatically forwarding the HTLC and allowing it to be +/// forwarded or rejected manually. +/// +/// [`Event::HTLCIntercepted`]: crate::events::Event::HTLCIntercepted +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum HTLCInterceptionFlags { + /// If this flag is set, LDK will intercept HTLCs that are attempting to be forwarded over fake + /// short channel ids generated via [`ChannelManager::get_intercept_scid`]. This allows you to + /// only intercept HTLCs which are specifically marked for interception by the invoice being + /// paid. + /// + /// Note that because LDK is not aware of which channel the HTLC will be forwarded over at the + /// time of interception, only basic checks to ensure the fee the HTLC intends to pay is not + /// negative and a minimum CLTV delta between the incoming and outgoing HTLC edge are performed + /// before the [`Event::HTLCIntercepted`] is generated. You must validate the fee and CLTV + /// delta meets your requirements before forwarding the HTLC. + /// + /// [`ChannelManager::get_intercept_scid`]: crate::ln::channelmanager::ChannelManager::get_intercept_scid + /// [`Event::HTLCIntercepted`]: crate::events::Event::HTLCIntercepted + ToInterceptSCIDs = 1 << 0, + /// If this flag is set, any attempts to forward a payment to a private channel while the + /// channel counterparty is offline will instead generate an [`Event::HTLCIntercepted`] which + /// must be handled the same as any other intercepted HTLC. + /// + /// This is useful for LSPs that may need to wake the recipient node (e.g. via a mobile push + /// notification). Note that in this case you must ensure that you set a quick timeout to fail + /// the HTLC if the recipient node fails to come online (e.g. within 10 seconds). + /// + /// Before interception, the HTLC is validated against the forwarding config of the outbound + /// channel to ensure it pays sufficient fee and meets the + /// [`ChannelConfig::cltv_expiry_delta`]. + /// + /// [`Event::HTLCIntercepted`]: crate::events::Event::HTLCIntercepted + ToOfflinePrivateChannels = 1 << 1, + /// If this flag is set, any attempts to forward a payment to a private channel while the + /// channel counterparty is online will instead generate an [`Event::HTLCIntercepted`] which + /// must be handled the same as any other intercepted HTLC. + /// + /// This is the complement to [`Self::ToOfflinePrivateChannels`] and, together, they allow + /// intercepting all HTLCs destined for private channels. This may be useful for LSPs that wish + /// to take an additional fee paid by the recipient on all forwards to clients. + /// + /// Before interception, the HTLC is validated against the forwarding config of the outbound + /// channel to ensure it pays sufficient fee and meets the + /// [`ChannelConfig::cltv_expiry_delta`]. + /// + /// [`Event::HTLCIntercepted`]: crate::events::Event::HTLCIntercepted + ToOnlinePrivateChannels = 1 << 2, + /// If this flag is set, any attempts to forward a payment to a publicly announced channel will + /// instead generate an [`Event::HTLCIntercepted`] which must be handled the same as any other + /// intercepted HTLC. + /// + /// Before interception, the HTLC is validated against the forwarding config of the outbound + /// channel to ensure it pays sufficient fee and meets the + /// [`ChannelConfig::cltv_expiry_delta`]. + /// + /// [`Event::HTLCIntercepted`]: crate::events::Event::HTLCIntercepted + ToPublicChannels = 1 << 3, + /// If these flags are set, any attempts to forward a payment to a channel of ours or a fake + /// short channel id generated via [`ChannelManager::get_intercept_scid`] will instead generate + /// an [`Event::HTLCIntercepted`] which must be handled the same as any other intercepted HTLC. + /// + /// In the case of intercept SCIDs, only basic checks to ensure the fee the HTLC intends to pay + /// is not negative and a minimum CLTV delta between the incoming and outgoing HTLC edge are + /// performed before the [`Event::HTLCIntercepted`] is generated. You must validate the fee and + /// CLTV delta meets your requirements before forwarding the HTLC. + /// + /// [`ChannelManager::get_intercept_scid`]: crate::ln::channelmanager::ChannelManager::get_intercept_scid + /// [`Event::HTLCIntercepted`]: crate::events::Event::HTLCIntercepted + ToAllKnownSCIDs = Self::ToInterceptSCIDs as isize + | Self::ToOfflinePrivateChannels as isize + | Self::ToOnlinePrivateChannels as isize + | Self::ToPublicChannels as isize, + /// If this flag is set, any attempts to forward a payment to an unknown short channel id will + /// instead generate an [`Event::HTLCIntercepted`] which must be handled the same as any other + /// intercepted HTLC. + /// + /// Note that because LDK is not aware of which channel the HTLC will be forwarded over at the + /// time of interception, only basic checks to ensure the fee the HTLC intends to pay is not + /// negative and a minimum CLTV delta between the incoming and outgoing HTLC edge are performed + /// before the [`Event::HTLCIntercepted`] is generated. You must validate the fee and CLTV + /// delta meets your requirements before forwarding the HTLC. + /// + /// [`Event::HTLCIntercepted`]: crate::events::Event::HTLCIntercepted + ToUnknownSCIDs = 1 << 4, + /// If these flags are set, all HTLCs being forwarded over this node will instead generate an + /// [`Event::HTLCIntercepted`] which must be handled the same as any other intercepted HTLC. + /// + /// In the case of intercept or unknown SCIDs, only basic checks to ensure the fee the HTLC + /// intends to pay is not negative and a minimum CLTV delta between the incoming and outgoing + /// HTLC edge are performed before the [`Event::HTLCIntercepted`] is generated. You must + /// validate the fee and CLTV delta meets your requirements before forwarding the HTLC. + /// + /// [`Event::HTLCIntercepted`]: crate::events::Event::HTLCIntercepted + AllValidHTLCs = Self::ToAllKnownSCIDs as isize | Self::ToUnknownSCIDs as isize, +} + +impl Into for HTLCInterceptionFlags { + fn into(self) -> u8 { + self as u8 + } +} + /// Top-level config which holds ChannelHandshakeLimits and ChannelConfig. /// /// `Default::default()` provides sane defaults for most configurations @@ -907,17 +1012,21 @@ pub struct UserConfig { /// [`msgs::OpenChannel`]: crate::ln::msgs::OpenChannel /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel pub manually_accept_inbound_channels: bool, - /// If this is set to `true`, LDK will intercept HTLCs that are attempting to be forwarded over - /// fake short channel ids generated via [`ChannelManager::get_intercept_scid`]. Upon HTLC - /// intercept, LDK will generate an [`Event::HTLCIntercepted`] which MUST be handled by the user. + /// Flags consisting of OR'd values from [`HTLCInterceptionFlags`] which describe HTLCs + /// forwarded over this node to intercept. Any HTLCs which are intercepted will generate an + /// [`Event::HTLCIntercepted`] event which must be handled to forward or fail the HTLC. /// - /// Setting this to `true` may break backwards compatibility with LDK versions < 0.0.113. + /// Do NOT hold on to intercepted HTLCs for more than a few seconds, they must always be + /// forwarded or failed nearly immediately to avoid performing accidental denial of service + /// attacks against other lightning nodes and being punished appropriately by other nodes. /// - /// Default value: `false` + /// To ensure efficiency and reliable HTLC latency you should ensure you only intercept types + /// of HTLCs which you need to manually forward or reject. + /// + /// Default value: `0` (indicating no HTLCs will be intercepted). /// - /// [`ChannelManager::get_intercept_scid`]: crate::ln::channelmanager::ChannelManager::get_intercept_scid /// [`Event::HTLCIntercepted`]: crate::events::Event::HTLCIntercepted - pub accept_intercept_htlcs: bool, + pub htlc_interception_flags: u8, /// If this is set to `true`, the user needs to manually pay [`Bolt12Invoice`]s when received. /// /// When set to `true`, [`Event::InvoiceReceived`] will be generated for each received @@ -984,7 +1093,7 @@ impl Default for UserConfig { accept_forwards_to_priv_channels: false, accept_inbound_channels: true, manually_accept_inbound_channels: false, - accept_intercept_htlcs: false, + htlc_interception_flags: 0, manually_handle_bolt12_invoices: false, enable_dual_funded_channels: false, enable_htlc_hold: false, @@ -1007,7 +1116,7 @@ impl Readable for UserConfig { accept_forwards_to_priv_channels: Readable::read(reader)?, accept_inbound_channels: Readable::read(reader)?, manually_accept_inbound_channels: Readable::read(reader)?, - accept_intercept_htlcs: Readable::read(reader)?, + htlc_interception_flags: Readable::read(reader)?, manually_handle_bolt12_invoices: Readable::read(reader)?, enable_dual_funded_channels: Readable::read(reader)?, hold_outbound_htlcs_at_next_hop: Readable::read(reader)?, From ce91315bb057d6707ea89c55d44b55e0e0ce30a6 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Fri, 23 Jan 2026 02:15:04 +0000 Subject: [PATCH 124/242] Expose the outgoing HTLC's CLTV expiry in `Event::HTLCIntercepted` In the previous commit our interception documentation noted that in some cases devs may wish to validate the CLTV expiry of HTLCs before forwarding. Here we enable that by exposing the next-hop's CLTV value in `Event::HTLCIntercepted` --- lightning/src/events/mod.rs | 12 ++++++++++++ lightning/src/ln/channelmanager.rs | 1 + lightning/src/ln/payment_tests.rs | 4 ++++ 3 files changed, 17 insertions(+) diff --git a/lightning/src/events/mod.rs b/lightning/src/events/mod.rs index 277ce612494..b029caa30d7 100644 --- a/lightning/src/events/mod.rs +++ b/lightning/src/events/mod.rs @@ -1287,6 +1287,13 @@ pub enum Event { /// [`Self::HTLCIntercepted::inbound_amount_msat`]) or subtract it as required. Further, /// LDK will not stop you from forwarding more than you received. expected_outbound_amount_msat: u64, + /// The block height at which the forwarded HTLC sent to our peer will time out. In + /// practice, LDK will refuse to forward an HTLC several blocks before this height (as if + /// we attempted to forward an HTLC at this height we'd run some risk that our peer + /// force-closes the channel immediately). + /// + /// This will only be `None` for events generated or serialized by LDK 0.2 or prior. + outgoing_htlc_expiry_block_height: Option, }, /// Used to indicate that an output which you should know how to spend was confirmed on chain /// and is now spendable. @@ -2017,11 +2024,13 @@ impl Writeable for Event { inbound_amount_msat, expected_outbound_amount_msat, intercept_id, + outgoing_htlc_expiry_block_height, } => { 6u8.write(writer)?; let intercept_scid = InterceptNextHop::FakeScid { requested_next_hop_scid }; write_tlv_fields!(writer, { (0, intercept_id, required), + (1, outgoing_htlc_expiry_block_height, option), (2, intercept_scid, required), (4, payment_hash, required), (6, inbound_amount_msat, required), @@ -2526,8 +2535,10 @@ impl MaybeReadable for Event { InterceptNextHop::FakeScid { requested_next_hop_scid: 0 }; let mut inbound_amount_msat = 0; let mut expected_outbound_amount_msat = 0; + let mut outgoing_htlc_expiry_block_height = None; read_tlv_fields!(reader, { (0, intercept_id, required), + (1, outgoing_htlc_expiry_block_height, option), (2, requested_next_hop_scid, required), (4, payment_hash, required), (6, inbound_amount_msat, required), @@ -2542,6 +2553,7 @@ impl MaybeReadable for Event { inbound_amount_msat, expected_outbound_amount_msat, intercept_id, + outgoing_htlc_expiry_block_height, })) }, 7u8 => { diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index eeb5a536483..fd5e5d15b9f 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3409,6 +3409,7 @@ fn create_htlc_intercepted_event( inbound_amount_msat, expected_outbound_amount_msat: pending_add.forward_info.outgoing_amt_msat, intercept_id, + outgoing_htlc_expiry_block_height: Some(pending_add.forward_info.outgoing_cltv_value), }) } diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 8ac87fbdb10..14446239a31 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -2277,6 +2277,7 @@ fn do_test_intercepted_payment(test: InterceptTest) { // Check that we generate the PaymentIntercepted event when an intercept forward is detected. let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); + let expected_cltv = nodes[0].best_block_info().1 + TEST_FINAL_CLTV + 1; let (intercept_id, outbound_amt) = match events[0] { crate::events::Event::HTLCIntercepted { intercept_id, @@ -2284,10 +2285,12 @@ fn do_test_intercepted_payment(test: InterceptTest) { payment_hash, inbound_amount_msat, requested_next_hop_scid: short_channel_id, + outgoing_htlc_expiry_block_height, } => { assert_eq!(payment_hash, hash); assert_eq!(inbound_amount_msat, route.get_total_amount() + route.get_total_fees()); assert_eq!(short_channel_id, intercept_scid); + assert_eq!(outgoing_htlc_expiry_block_height.unwrap(), expected_cltv); (intercept_id, expected_outbound_amount_msat) }, _ => panic!(), @@ -2356,6 +2359,7 @@ fn do_test_intercepted_payment(test: InterceptTest) { assert_eq!(events.len(), 1); SendEvent::from_event(events.remove(0)) }; + assert_eq!(payment_event.msgs[0].cltv_expiry, expected_cltv); nodes[2].node.handle_update_add_htlc(node_b_id, &payment_event.msgs[0]); let commitment = &payment_event.commitment_msg; do_commitment_signed_dance(&nodes[2], &nodes[1], commitment, false, true); From 8b1386c78a836c17d63c46f15efd0c537f43c841 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 27 Jan 2026 12:47:13 +0100 Subject: [PATCH 125/242] Add basic `CLAUDE.md` file We add a basic `CLAUDE.md` file pointing it to some workspace specifics (in particular running `cargo fmt` on 1.75.0 MSRV, and using `./ci/ci-tests.sh` to run tests). Signed-off-by: Elias Rohrer --- CLAUDE.md | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100644 CLAUDE.md diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 00000000000..611322c12fe --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,17 @@ +# CLAUDE.md + +This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. + +See [CONTRIBUTING.md](CONTRIBUTING.md) for build commands, testing, code style, and development workflow. + +## Workspace Structure + +See [README.md](README.md) for the workspace layout and [ARCH.md](ARCH.md) for some additional remark regarding important parts of LDK's architecture. + +## Development Rules + +- Always ensure tests pass before committing. To this end, you should run the test suite via `./ci/ci-tests.sh`. +- Run `cargo +1.75.0 fmt --all` after every code change +- Never add new dependencies unless explicitly requested +- Please always disclose the use of any AI tools in commit messages and PR descriptions using a `Co-Authored-By:` line. +- When adding new `.rs` files, please ensure to always add the licensing header as found, e.g., in `lightning/src/lib.rs` and other files. From beccec2d2ac32e8f4c0055d8039921ff2f7245fd Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Wed, 28 Jan 2026 12:48:57 +0100 Subject: [PATCH 126/242] Refactor monitor file listing in tests to filter .tmp files Extract common logic for listing monitor files into a helper function that filters out temporary .tmp files created during persistence operations. This simplifies test code and improves reliability on systems where directory iteration order is non-deterministic. Co-Authored-By: Claude Opus 4.5 --- lightning-background-processor/src/lib.rs | 68 +++++++++++------------ 1 file changed, 34 insertions(+), 34 deletions(-) diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index c38d6dfe080..e5ecb5b823d 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -1984,6 +1984,26 @@ mod tests { const EVENT_DEADLINE: Duration = Duration::from_millis(5 * (FRESHNESS_TIMER.as_millis() as u64)); + /// Reads a directory and returns only non-`.tmp` files. + /// The file system may return files in any order, and during persistence + /// operations there may be temporary `.tmp` files present. + fn list_monitor_files(dir: &str) -> Vec { + std::fs::read_dir(dir) + .unwrap() + .filter_map(|entry| { + let entry = entry.unwrap(); + let path_str = entry.path().to_str().unwrap().to_lowercase(); + // Skip any .tmp files that may exist during persistence. + // On Windows, ReplaceFileW creates backup files with .TMP (uppercase). + if path_str.ends_with(".tmp") { + None + } else { + Some(entry) + } + }) + .collect() + } + #[derive(Clone, Hash, PartialEq, Eq)] struct TestDescriptor {} impl SocketDescriptor for TestDescriptor { @@ -3787,30 +3807,20 @@ mod tests { ); let dir = format!("{}_persister_1/monitors", &persist_dir); - let mut mons = std::fs::read_dir(&dir).unwrap(); - let mut mon = mons.next().unwrap().unwrap(); - if mon.path().to_str().unwrap().ends_with(".tmp") { - mon = mons.next().unwrap().unwrap(); - assert_eq!(mon.path().extension(), None); - } - assert!(mons.next().is_none()); + let mut mons = list_monitor_files(&dir); + assert_eq!(mons.len(), 1); + let mon = mons.pop().unwrap(); // Because the channel wasn't funded, we'll archive the ChannelMonitor immedaitely after // its force-closed (at least on node B, which didn't put their money into it). nodes[1].node.force_close_all_channels_broadcasting_latest_txn("".to_owned()); loop { - let mut mons = std::fs::read_dir(&dir).unwrap(); - if let Some(new_mon) = mons.next() { - let mut new_mon = new_mon.unwrap(); - if new_mon.path().to_str().unwrap().ends_with(".tmp") { - new_mon = mons.next().unwrap().unwrap(); - assert_eq!(new_mon.path().extension(), None); - } - assert_eq!(new_mon.path(), mon.path()); - assert!(mons.next().is_none()); - } else { + let mons = list_monitor_files(&dir); + if mons.is_empty() { break; } + assert_eq!(mons.len(), 1); + assert_eq!(mons[0].path(), mon.path()); } bp.stop().unwrap(); @@ -3855,30 +3865,20 @@ mod tests { )); let dir = format!("{}_persister_1/monitors", &persist_dir); - let mut mons = std::fs::read_dir(&dir).unwrap(); - let mut mon = mons.next().unwrap().unwrap(); - if mon.path().to_str().unwrap().ends_with(".tmp") { - mon = mons.next().unwrap().unwrap(); - assert_eq!(mon.path().extension(), None); - } - assert!(mons.next().is_none()); + let mut mons = list_monitor_files(&dir); + assert_eq!(mons.len(), 1); + let mon = mons.pop().unwrap(); // Because the channel wasn't funded, we'll archive the ChannelMonitor immedaitely after // its force-closed (at least on node B, which didn't put their money into it). nodes[1].node.force_close_all_channels_broadcasting_latest_txn("".to_owned()); loop { - let mut mons = std::fs::read_dir(&dir).unwrap(); - if let Some(new_mon) = mons.next() { - let mut new_mon = new_mon.unwrap(); - if new_mon.path().to_str().unwrap().ends_with(".tmp") { - new_mon = mons.next().unwrap().unwrap(); - assert_eq!(new_mon.path().extension(), None); - } - assert_eq!(new_mon.path(), mon.path()); - assert!(mons.next().is_none()); - } else { + let mons = list_monitor_files(&dir); + if mons.is_empty() { break; } + assert_eq!(mons.len(), 1); + assert_eq!(mons[0].path(), mon.path()); tokio::task::yield_now().await; } From 0f253c0b9d310e074668bb00a724a2c3b0ba6620 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sun, 25 Jan 2026 14:30:10 +0000 Subject: [PATCH 127/242] Use a single `WithContext` wrapper rather than several log-wrappers In `ChannelMonitor` logging, we often wrap a logger with `WithChannelMonitor` to automatically include metadata in our structured logging. That's great, except having too many logger wrapping types flying around makes for less compatibility if we have methods that want to require a wrapped-logger. Here we change the `WithChannelMonitor` "constructors" to actually return a `WithContext` instead, making things more consistent. --- lightning/src/chain/channelmonitor.rs | 77 ++++++++++----------------- 1 file changed, 29 insertions(+), 48 deletions(-) diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 515a3dc5f1d..fc9ffec7f8f 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -66,7 +66,7 @@ use crate::sign::{ use crate::types::features::ChannelTypeFeatures; use crate::types::payment::{PaymentHash, PaymentPreimage}; use crate::util::byte_utils; -use crate::util::logger::{Logger, Record}; +use crate::util::logger::{Logger, WithContext}; use crate::util::persist::MonitorName; use crate::util::ser::{ MaybeReadable, Readable, ReadableArgs, RequiredWrapper, UpgradableRequired, Writeable, Writer, @@ -1825,45 +1825,27 @@ macro_rules! _process_events_body { } pub(super) use _process_events_body as process_events_body; -pub(crate) struct WithChannelMonitor<'a, L: Deref> -where - L::Target: Logger, -{ - logger: &'a L, - peer_id: Option, - channel_id: Option, - payment_hash: Option, -} +pub(crate) struct WithChannelMonitor; -impl<'a, L: Deref> Logger for WithChannelMonitor<'a, L> -where - L::Target: Logger, -{ - fn log(&self, mut record: Record) { - record.peer_id = self.peer_id; - record.channel_id = self.channel_id; - record.payment_hash = self.payment_hash; - self.logger.log(record) - } -} - -impl<'a, L: Deref> WithChannelMonitor<'a, L> -where - L::Target: Logger, -{ - pub(crate) fn from( +impl WithChannelMonitor { + pub(crate) fn from<'a, L: Deref, S: EcdsaChannelSigner>( logger: &'a L, monitor: &ChannelMonitor, payment_hash: Option, - ) -> Self { + ) -> WithContext<'a, L> + where + L::Target: Logger, + { Self::from_impl(logger, &*monitor.inner.lock().unwrap(), payment_hash) } - #[rustfmt::skip] - pub(crate) fn from_impl(logger: &'a L, monitor_impl: &ChannelMonitorImpl, payment_hash: Option) -> Self { + pub(crate) fn from_impl<'a, L: Deref, S: EcdsaChannelSigner>( + logger: &'a L, monitor_impl: &ChannelMonitorImpl, payment_hash: Option, + ) -> WithContext<'a, L> + where + L::Target: Logger, + { let peer_id = Some(monitor_impl.counterparty_node_id); let channel_id = Some(monitor_impl.channel_id()); - WithChannelMonitor { - logger, peer_id, channel_id, payment_hash, - } + WithContext::from(logger, peer_id, channel_id, payment_hash) } } @@ -3829,7 +3811,7 @@ impl ChannelMonitorImpl { fn provide_payment_preimage( &mut self, payment_hash: &PaymentHash, payment_preimage: &PaymentPreimage, payment_info: &Option, broadcaster: &B, - fee_estimator: &LowerBoundedFeeEstimator, logger: &WithChannelMonitor) + fee_estimator: &LowerBoundedFeeEstimator, logger: &WithContext) where B::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, @@ -4006,7 +3988,7 @@ impl ChannelMonitorImpl { /// /// [`ChannelMonitor::broadcast_latest_holder_commitment_txn`]: crate::chain::channelmonitor::ChannelMonitor::broadcast_latest_holder_commitment_txn pub(crate) fn queue_latest_holder_commitment_txn_for_broadcast( - &mut self, broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator, logger: &WithChannelMonitor, + &mut self, broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator, logger: &WithContext, require_funding_seen: bool, ) where @@ -4034,8 +4016,7 @@ impl ChannelMonitorImpl { } fn renegotiated_funding( - &mut self, logger: &WithChannelMonitor, - channel_parameters: &ChannelTransactionParameters, + &mut self, logger: &WithContext, channel_parameters: &ChannelTransactionParameters, alternative_holder_commitment_tx: &HolderCommitmentTransaction, alternative_counterparty_commitment_tx: &CommitmentTransaction, ) -> Result<(), ()> @@ -4210,7 +4191,7 @@ impl ChannelMonitorImpl { #[rustfmt::skip] fn update_monitor( - &mut self, updates: &ChannelMonitorUpdate, broadcaster: &B, fee_estimator: &F, logger: &WithChannelMonitor + &mut self, updates: &ChannelMonitorUpdate, broadcaster: &B, fee_estimator: &F, logger: &WithContext ) -> Result<(), ()> where B::Target: BroadcasterInterface, F::Target: FeeEstimator, @@ -5254,7 +5235,7 @@ impl ChannelMonitorImpl { /// Note that this includes possibly-locktimed-in-the-future transactions! #[rustfmt::skip] fn unsafe_get_latest_holder_commitment_txn( - &mut self, logger: &WithChannelMonitor + &mut self, logger: &WithContext ) -> Vec where L::Target: Logger { log_debug!(logger, "Getting signed copy of latest holder commitment transaction!"); let commitment_tx = { @@ -5307,7 +5288,7 @@ impl ChannelMonitorImpl { #[rustfmt::skip] fn block_connected( &mut self, header: &Header, txdata: &TransactionData, height: u32, broadcaster: B, - fee_estimator: F, logger: &WithChannelMonitor, + fee_estimator: F, logger: &WithContext, ) -> Vec where B::Target: BroadcasterInterface, F::Target: FeeEstimator, @@ -5327,7 +5308,7 @@ impl ChannelMonitorImpl { height: u32, broadcaster: B, fee_estimator: &LowerBoundedFeeEstimator, - logger: &WithChannelMonitor, + logger: &WithContext, ) -> Vec where B::Target: BroadcasterInterface, @@ -5360,7 +5341,7 @@ impl ChannelMonitorImpl { height: u32, broadcaster: B, fee_estimator: &LowerBoundedFeeEstimator, - logger: &WithChannelMonitor, + logger: &WithContext, ) -> Vec where B::Target: BroadcasterInterface, @@ -5647,7 +5628,7 @@ impl ChannelMonitorImpl { mut claimable_outpoints: Vec, broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator, - logger: &WithChannelMonitor, + logger: &WithContext, ) -> Vec where B::Target: BroadcasterInterface, @@ -5867,7 +5848,7 @@ impl ChannelMonitorImpl { #[rustfmt::skip] fn blocks_disconnected( - &mut self, fork_point: BestBlock, broadcaster: B, fee_estimator: F, logger: &WithChannelMonitor + &mut self, fork_point: BestBlock, broadcaster: B, fee_estimator: F, logger: &WithContext ) where B::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, @@ -5920,7 +5901,7 @@ impl ChannelMonitorImpl { txid: &Txid, broadcaster: B, fee_estimator: &LowerBoundedFeeEstimator, - logger: &WithChannelMonitor, + logger: &WithContext, ) where B::Target: BroadcasterInterface, F::Target: FeeEstimator, @@ -6031,7 +6012,7 @@ impl ChannelMonitorImpl { #[rustfmt::skip] fn should_broadcast_holder_commitment_txn( - &self, logger: &WithChannelMonitor + &self, logger: &WithContext ) -> Option where L::Target: Logger { // There's no need to broadcast our commitment transaction if we've seen one confirmed (even // with 1 confirmation) as it'll be rejected as duplicate/conflicting. @@ -6098,7 +6079,7 @@ impl ChannelMonitorImpl { /// or counterparty commitment tx, if so send back the source, preimage if found and payment_hash of resolved HTLC #[rustfmt::skip] fn is_resolving_htlc_output( - &mut self, tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &WithChannelMonitor, + &mut self, tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &WithContext, ) where L::Target: Logger { let funding_spent = get_confirmed_funding_scope!(self); @@ -6355,7 +6336,7 @@ impl ChannelMonitorImpl { /// own. #[rustfmt::skip] fn check_tx_and_push_spendable_outputs( - &mut self, tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &WithChannelMonitor, + &mut self, tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &WithContext, ) where L::Target: Logger { let funding_spent = get_confirmed_funding_scope!(self); for spendable_output in self.get_spendable_outputs(funding_spent, tx) { From 5e64c4018492cba44761fbd9427ac0df0cc7cdd2 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sun, 25 Jan 2026 14:39:19 +0000 Subject: [PATCH 128/242] Require `WithContext` log wrappers on `OutboundPayments` calls In much of LDK we pass around `Logger` objects both to avoid having to `Clone` `Logger` `Deref`s (soon to only be `Logger`s) and to allow us to set context with a wrapper such that any log calls on that wrapper get additional useful metadata in them. Sadly, when we added a `Logger` type to `OutboundPayments` we broke the ability to do the second thing - payment information logged directly or indirectly via logic in the `OutboundPayments` has no context making log-searching rather challenging. Here we fix this by retunring to passing loggers explicitly to `OutboundPayments` methods that need them, specifically requiring `WithContext` wrappers to ensure the callsite sets appropriate context on the logger. Fixes #4307 --- lightning/src/ln/channelmanager.rs | 39 +++- lightning/src/ln/outbound_payment.rs | 265 +++++++++++++++------------ 2 files changed, 175 insertions(+), 129 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index fd5e5d15b9f..77933fb1fc4 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -2672,7 +2672,7 @@ pub struct ChannelManager< /// after reloading from disk while replaying blocks against ChannelMonitors. /// /// See `PendingOutboundPayment` documentation for more info. - pending_outbound_payments: OutboundPayments, + pending_outbound_payments: OutboundPayments, /// SCID/SCID Alias -> forward infos. Key of 0 means payments received. /// @@ -3485,7 +3485,7 @@ where best_block: RwLock::new(params.best_block), outbound_scid_aliases: Mutex::new(new_hash_set()), - pending_outbound_payments: OutboundPayments::new(new_hash_map(), logger.clone()), + pending_outbound_payments: OutboundPayments::new(new_hash_map()), forward_htlcs: Mutex::new(new_hash_map()), decode_update_add_htlcs: Mutex::new(new_hash_map()), claimable_payments: Mutex::new(ClaimablePayments { claimable_payments: new_hash_map(), pending_claiming_payments: new_hash_map() }), @@ -5354,11 +5354,12 @@ where }); if route.route_params.is_none() { route.route_params = Some(route_params.clone()); } let router = FixedRouter::new(route); + let logger = WithContext::from(&self.logger, None, None, Some(payment_hash)); self.pending_outbound_payments .send_payment(payment_hash, recipient_onion, payment_id, Retry::Attempts(0), route_params, &&router, self.list_usable_channels(), || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height, - &self.pending_events, |args| self.send_payment_along_path(args)) + &self.pending_events, |args| self.send_payment_along_path(args), &logger) } /// Sends a payment to the route found using the provided [`RouteParameters`], retrying failed @@ -5418,6 +5419,7 @@ where best_block_height, &self.pending_events, |args| self.send_payment_along_path(args), + &WithContext::from(&self.logger, None, None, Some(payment_hash)), ) } @@ -5516,6 +5518,7 @@ where best_block_height, &self.pending_events, |args| self.send_payment_along_path(args), + &WithContext::from(&self.logger, None, None, Some(invoice.payment_hash())), ) } @@ -5568,6 +5571,7 @@ where best_block_height, &self.pending_events, |args| self.send_payment_along_path(args), + &WithContext::from(&self.logger, None, None, None), ) } @@ -5748,6 +5752,7 @@ where best_block_height, &self.pending_events, |args| self.send_payment_along_path(args), + &WithContext::from(&self.logger, None, None, None), ) } @@ -5826,6 +5831,7 @@ where ) -> Result { let best_block_height = self.best_block.read().unwrap().height; let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); + let payment_hash = payment_preimage.map(|preimage| preimage.into()); self.pending_outbound_payments.send_spontaneous_payment( payment_preimage, recipient_onion, @@ -5840,6 +5846,7 @@ where best_block_height, &self.pending_events, |args| self.send_payment_along_path(args), + &WithContext::from(&self.logger, None, None, payment_hash), ) } @@ -7252,6 +7259,7 @@ where best_block_height, &self.pending_events, |args| self.send_payment_along_path(args), + &WithContext::from(&self.logger, None, None, None), ); if needs_persist { should_persist = NotifyOption::DoPersist; @@ -8644,6 +8652,7 @@ where // being fully configured. See the docs for `ChannelManagerReadArgs` for more. match source { HTLCSource::OutboundRoute { ref path, ref session_priv, ref payment_id, .. } => { + let logger = WithContext::from(&self.logger, None, None, Some(*payment_hash)); self.pending_outbound_payments.fail_htlc( source, payment_hash, @@ -8655,6 +8664,7 @@ where &self.secp_ctx, &self.pending_events, &mut from_monitor_update_completion, + &logger, ); if let Some(update) = from_monitor_update_completion { // If `fail_htlc` didn't `take` the post-event action, we should go ahead and @@ -9345,6 +9355,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ from_onchain, &mut ev_completion_action, &self.pending_events, + &WithContext::from(&self.logger, None, None, Some(payment_preimage.into())), ); // If an event was generated, `claim_htlc` set `ev_completion_action` to None, if // not, we should go ahead and run it now (as the claim was duplicative), at least @@ -18064,8 +18075,7 @@ where } pending_outbound_payments = Some(outbounds); } - let pending_outbounds = - OutboundPayments::new(pending_outbound_payments.unwrap(), args.logger.clone()); + let pending_outbounds = OutboundPayments::new(pending_outbound_payments.unwrap()); for (peer_pubkey, peer_storage) in peer_storage_dir { if let Some(peer_state) = per_peer_state.get_mut(&peer_pubkey) { @@ -18418,6 +18428,7 @@ where session_priv_bytes, &path, best_block_height, + &logger, ); } } @@ -18452,7 +18463,7 @@ where &mut decode_update_add_htlcs, &prev_hop_data, "HTLC already forwarded to the outbound edge", - &args.logger, + &&logger, ); } @@ -18469,7 +18480,7 @@ where &mut decode_update_add_htlcs_legacy, &prev_hop_data, "HTLC was forwarded to the closed channel", - &args.logger, + &&logger, ); forward_htlcs_legacy.retain(|_, forwards| { forwards.retain(|forward| { @@ -18526,6 +18537,7 @@ where true, &mut compl_action, &pending_events, + &logger, ); // If the completion action was not consumed, then there was no // payment to claim, and we need to tell the `ChannelMonitor` @@ -18579,8 +18591,10 @@ where } } for (htlc_source, payment_hash) in monitor.get_onchain_failed_outbound_htlcs() { + let logger = + WithChannelMonitor::from(&args.logger, monitor, Some(payment_hash)); log_info!( - args.logger, + logger, "Failing HTLC with payment hash {} as it was resolved on-chain.", payment_hash ); @@ -18648,6 +18662,11 @@ where // inbound edge of the payment's monitor has already claimed // the HTLC) we skip trying to replay the claim. let htlc_payment_hash: PaymentHash = payment_preimage.into(); + let logger = WithChannelMonitor::from( + &args.logger, + monitor, + Some(htlc_payment_hash), + ); let balance_could_incl_htlc = |bal| match bal { &Balance::ClaimableOnChannelClose { .. } => { // The channel is still open, assume we can still @@ -18670,7 +18689,7 @@ where // edge monitor but the channel is closed (and thus we'll // immediately panic if we call claim_funds_from_hop). if short_to_chan_info.get(&prev_hop.prev_outbound_scid_alias).is_none() { - log_error!(args.logger, + log_error!(logger, "We need to replay the HTLC claim for payment_hash {} (preimage {}) but cannot do so as the HTLC was forwarded prior to LDK 0.0.124.\ All HTLCs that were forwarded by LDK 0.0.123 and prior must be resolved prior to upgrading to LDK 0.1", htlc_payment_hash, @@ -18685,7 +18704,7 @@ where // of panicking at runtime. The user ideally should have read // the release notes and we wouldn't be here, but we go ahead // and let things run in the hope that it'll all just work out. - log_error!(args.logger, + log_error!(logger, "We need to replay the HTLC claim for payment_hash {} (preimage {}) but don't have all the required information to do so reliably.\ As long as the channel for the inbound edge of the forward remains open, this may work okay, but we may panic at runtime!\ All HTLCs that were forwarded by LDK 0.0.123 and prior must be resolved prior to upgrading to LDK 0.1\ diff --git a/lightning/src/ln/outbound_payment.rs b/lightning/src/ln/outbound_payment.rs index 65493829635..bab3bf616d0 100644 --- a/lightning/src/ln/outbound_payment.rs +++ b/lightning/src/ln/outbound_payment.rs @@ -34,7 +34,7 @@ use crate::sign::{EntropySource, NodeSigner, Recipient}; use crate::types::features::Bolt12InvoiceFeatures; use crate::types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; use crate::util::errors::APIError; -use crate::util::logger::Logger; +use crate::util::logger::{Logger, WithContext}; use crate::util::ser::ReadableArgs; #[cfg(feature = "std")] use crate::util::time::Instant; @@ -837,22 +837,15 @@ pub(super) struct SendAlongPathArgs<'a> { pub hold_htlc_at_next_hop: bool, } -pub(super) struct OutboundPayments -where - L::Target: Logger, -{ +pub(super) struct OutboundPayments { pub(super) pending_outbound_payments: Mutex>, awaiting_invoice: AtomicBool, retry_lock: Mutex<()>, - logger: L, } -impl OutboundPayments -where - L::Target: Logger, -{ +impl OutboundPayments { pub(super) fn new( - pending_outbound_payments: HashMap, logger: L, + pending_outbound_payments: HashMap, ) -> Self { let has_invoice_requests = pending_outbound_payments.values().any(|payment| { matches!( @@ -867,17 +860,19 @@ where pending_outbound_payments: Mutex::new(pending_outbound_payments), awaiting_invoice: AtomicBool::new(has_invoice_requests), retry_lock: Mutex::new(()), - logger, } } +} +impl OutboundPayments { #[rustfmt::skip] - pub(super) fn send_payment( + pub(super) fn send_payment( &self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId, retry_strategy: Retry, route_params: RouteParameters, router: &R, first_hops: Vec, compute_inflight_htlcs: IH, entropy_source: &ES, node_signer: &NS, best_block_height: u32, pending_events: &Mutex)>>, send_payment_along_path: SP, + logger: &WithContext, ) -> Result<(), RetryableSendFailure> where R::Target: Router, @@ -885,19 +880,21 @@ where NS::Target: NodeSigner, IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, + L::Target: Logger, { self.send_payment_for_non_bolt12_invoice(payment_id, payment_hash, recipient_onion, None, retry_strategy, route_params, router, first_hops, &compute_inflight_htlcs, entropy_source, node_signer, - best_block_height, pending_events, &send_payment_along_path) + best_block_height, pending_events, &send_payment_along_path, logger) } #[rustfmt::skip] - pub(super) fn send_spontaneous_payment( + pub(super) fn send_spontaneous_payment( &self, payment_preimage: Option, recipient_onion: RecipientOnionFields, payment_id: PaymentId, retry_strategy: Retry, route_params: RouteParameters, router: &R, first_hops: Vec, inflight_htlcs: IH, entropy_source: &ES, node_signer: &NS, best_block_height: u32, - pending_events: &Mutex)>>, send_payment_along_path: SP + pending_events: &Mutex)>>, send_payment_along_path: SP, + logger: &WithContext, ) -> Result where R::Target: Router, @@ -905,18 +902,20 @@ where NS::Target: NodeSigner, IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, + L::Target: Logger, { let preimage = payment_preimage .unwrap_or_else(|| PaymentPreimage(entropy_source.get_secure_random_bytes())); let payment_hash = PaymentHash(Sha256::hash(&preimage.0).to_byte_array()); self.send_payment_for_non_bolt12_invoice(payment_id, payment_hash, recipient_onion, Some(preimage), retry_strategy, route_params, router, first_hops, inflight_htlcs, entropy_source, - node_signer, best_block_height, pending_events, send_payment_along_path) - .map(|()| payment_hash) + node_signer, best_block_height, pending_events, send_payment_along_path, logger, + ) + .map(|()| payment_hash) } #[rustfmt::skip] - pub(super) fn pay_for_bolt11_invoice( + pub(super) fn pay_for_bolt11_invoice( &self, invoice: &Bolt11Invoice, payment_id: PaymentId, amount_msats: Option, route_params_config: RouteParametersConfig, @@ -925,6 +924,7 @@ where first_hops: Vec, compute_inflight_htlcs: IH, entropy_source: &ES, node_signer: &NS, best_block_height: u32, pending_events: &Mutex)>>, send_payment_along_path: SP, + logger: &WithContext, ) -> Result<(), Bolt11PaymentError> where R::Target: Router, @@ -932,6 +932,7 @@ where NS::Target: NodeSigner, IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, + L::Target: Logger, { let payment_hash = invoice.payment_hash(); @@ -957,20 +958,20 @@ where self.send_payment_for_non_bolt12_invoice(payment_id, payment_hash, recipient_onion, None, retry_strategy, route_params, router, first_hops, compute_inflight_htlcs, entropy_source, node_signer, best_block_height, - pending_events, send_payment_along_path + pending_events, send_payment_along_path, logger, ).map_err(|err| Bolt11PaymentError::SendingFailed(err)) } #[rustfmt::skip] pub(super) fn send_payment_for_bolt12_invoice< - R: Deref, ES: Deref, NS: Deref, NL: Deref, IH, SP + R: Deref, ES: Deref, NS: Deref, NL: Deref, IH, SP, L: Deref, >( &self, invoice: &Bolt12Invoice, payment_id: PaymentId, router: &R, first_hops: Vec, features: Bolt12InvoiceFeatures, inflight_htlcs: IH, entropy_source: &ES, node_signer: &NS, node_id_lookup: &NL, secp_ctx: &Secp256k1, best_block_height: u32, pending_events: &Mutex)>>, - send_payment_along_path: SP, + send_payment_along_path: SP, logger: &WithContext, ) -> Result<(), Bolt12PaymentError> where R::Target: Router, @@ -979,6 +980,7 @@ where NL::Target: NodeIdLookUp, IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, + L::Target: Logger, { let (payment_hash, retry_strategy, params_config, _) = self @@ -1002,13 +1004,13 @@ where self.send_payment_for_bolt12_invoice_internal( payment_id, payment_hash, None, None, invoice, route_params, retry_strategy, false, router, first_hops, inflight_htlcs, entropy_source, node_signer, node_id_lookup, secp_ctx, - best_block_height, pending_events, send_payment_along_path + best_block_height, pending_events, send_payment_along_path, logger, ) } #[rustfmt::skip] fn send_payment_for_bolt12_invoice_internal< - R: Deref, ES: Deref, NS: Deref, NL: Deref, IH, SP + R: Deref, ES: Deref, NS: Deref, NL: Deref, IH, SP, L: Deref, >( &self, payment_id: PaymentId, payment_hash: PaymentHash, keysend_preimage: Option, invoice_request: Option<&InvoiceRequest>, @@ -1017,7 +1019,7 @@ where first_hops: Vec, inflight_htlcs: IH, entropy_source: &ES, node_signer: &NS, node_id_lookup: &NL, secp_ctx: &Secp256k1, best_block_height: u32, pending_events: &Mutex)>>, - send_payment_along_path: SP, + send_payment_along_path: SP, logger: &WithContext, ) -> Result<(), Bolt12PaymentError> where R::Target: Router, @@ -1026,6 +1028,7 @@ where NL::Target: NodeIdLookUp, IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, + L::Target: Logger, { // Advance any blinded path where the introduction node is our node. if let Ok(our_node_id) = node_signer.get_node_id(Recipient::Node) { @@ -1053,6 +1056,7 @@ where let route = match self.find_initial_route( payment_id, payment_hash, &recipient_onion, keysend_preimage, invoice_request, &mut route_params, router, &first_hops, &inflight_htlcs, node_signer, best_block_height, + logger, ) { Ok(route) => route, Err(e) => { @@ -1102,14 +1106,14 @@ where best_block_height, &send_payment_along_path ); log_info!( - self.logger, "Sending payment with id {} and hash {} returned {:?}", payment_id, + logger, "Sending payment with id {} and hash {} returned {:?}", payment_id, payment_hash, result ); if let Err(e) = result { self.handle_pay_route_err( e, payment_id, payment_hash, route, route_params, onion_session_privs, router, first_hops, &inflight_htlcs, entropy_source, node_signer, best_block_height, pending_events, - &send_payment_along_path + &send_payment_along_path, logger, ); } Ok(()) @@ -1231,12 +1235,13 @@ where NL: Deref, IH, SP, + L: Deref, >( &self, payment_id: PaymentId, hold_htlcs_at_next_hop: bool, router: &R, first_hops: Vec, inflight_htlcs: IH, entropy_source: &ES, node_signer: &NS, node_id_lookup: &NL, secp_ctx: &Secp256k1, best_block_height: u32, pending_events: &Mutex)>>, - send_payment_along_path: SP, + send_payment_along_path: SP, logger: &WithContext, ) -> Result<(), Bolt12PaymentError> where R::Target: Router, @@ -1245,6 +1250,7 @@ where NL::Target: NodeIdLookUp, IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, + L::Target: Logger, { let ( payment_hash, @@ -1303,15 +1309,16 @@ where best_block_height, pending_events, send_payment_along_path, + logger, ) } // Returns whether the data changed and needs to be repersisted. - pub(super) fn check_retry_payments( + pub(super) fn check_retry_payments( &self, router: &R, first_hops: FH, inflight_htlcs: IH, entropy_source: &ES, node_signer: &NS, best_block_height: u32, pending_events: &Mutex)>>, - send_payment_along_path: SP, + send_payment_along_path: SP, logger: &WithContext, ) -> bool where R::Target: Router, @@ -1320,6 +1327,7 @@ where SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, IH: Fn() -> InFlightHtlcs, FH: Fn() -> Vec, + L::Target: Logger, { let _single_thread = self.retry_lock.lock().unwrap(); let mut should_persist = false; @@ -1369,6 +1377,7 @@ where best_block_height, pending_events, &send_payment_along_path, + logger, ); should_persist = true; } else { @@ -1414,11 +1423,11 @@ where } #[rustfmt::skip] - fn find_initial_route( + fn find_initial_route( &self, payment_id: PaymentId, payment_hash: PaymentHash, recipient_onion: &RecipientOnionFields, keysend_preimage: Option, invoice_request: Option<&InvoiceRequest>, route_params: &mut RouteParameters, router: &R, first_hops: &Vec, - inflight_htlcs: &IH, node_signer: &NS, best_block_height: u32, + inflight_htlcs: &IH, node_signer: &NS, best_block_height: u32, logger: &WithContext, ) -> Result where R::Target: Router, @@ -1428,7 +1437,7 @@ where { #[cfg(feature = "std")] { if has_expired(&route_params) { - log_error!(self.logger, "Payment with id {} and hash {} had expired before we started paying", + log_error!(logger, "Payment with id {} and hash {} had expired before we started paying", payment_id, payment_hash); return Err(RetryableSendFailure::PaymentExpired) } @@ -1438,7 +1447,7 @@ where route_params, recipient_onion, keysend_preimage, invoice_request, best_block_height ) .map_err(|()| { - log_error!(self.logger, "Can't construct an onion packet without exceeding 1300-byte onion \ + log_error!(logger, "Can't construct an onion packet without exceeding 1300-byte onion \ hop_data length for payment with id {} and hash {}", payment_id, payment_hash); RetryableSendFailure::OnionPacketSizeExceeded })?; @@ -1448,7 +1457,7 @@ where Some(&first_hops.iter().collect::>()), inflight_htlcs(), payment_hash, payment_id, ).map_err(|_| { - log_error!(self.logger, "Failed to find route for payment with id {} and hash {}", + log_error!(logger, "Failed to find route for payment with id {} and hash {}", payment_id, payment_hash); RetryableSendFailure::RouteNotFound })?; @@ -1469,12 +1478,13 @@ where /// [`Event::PaymentPathFailed`]: crate::events::Event::PaymentPathFailed /// [`Event::PaymentFailed`]: crate::events::Event::PaymentFailed #[rustfmt::skip] - fn send_payment_for_non_bolt12_invoice( + fn send_payment_for_non_bolt12_invoice( &self, payment_id: PaymentId, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, keysend_preimage: Option, retry_strategy: Retry, mut route_params: RouteParameters, router: &R, first_hops: Vec, inflight_htlcs: IH, entropy_source: &ES, node_signer: &NS, best_block_height: u32, pending_events: &Mutex)>>, send_payment_along_path: SP, + logger: &WithContext, ) -> Result<(), RetryableSendFailure> where R::Target: Router, @@ -1486,14 +1496,14 @@ where { let route = self.find_initial_route( payment_id, payment_hash, &recipient_onion, keysend_preimage, None, &mut route_params, router, - &first_hops, &inflight_htlcs, node_signer, best_block_height, + &first_hops, &inflight_htlcs, node_signer, best_block_height, logger, )?; let onion_session_privs = self.add_new_pending_payment(payment_hash, recipient_onion.clone(), payment_id, keysend_preimage, &route, Some(retry_strategy), Some(route_params.payment_params.clone()), entropy_source, best_block_height, None) .map_err(|_| { - log_error!(self.logger, "Payment with id {} is already pending. New payment had payment hash {}", + log_error!(logger, "Payment with id {} is already pending. New payment had payment hash {}", payment_id, payment_hash); RetryableSendFailure::DuplicatePayment })?; @@ -1501,24 +1511,25 @@ where let res = self.pay_route_internal(&route, payment_hash, &recipient_onion, keysend_preimage, None, None, payment_id, None, &onion_session_privs, false, node_signer, best_block_height, &send_payment_along_path); - log_info!(self.logger, "Sending payment with id {} and hash {} returned {:?}", + log_info!(logger, "Sending payment with id {} and hash {} returned {:?}", payment_id, payment_hash, res); if let Err(e) = res { self.handle_pay_route_err( e, payment_id, payment_hash, route, route_params, onion_session_privs, router, first_hops, &inflight_htlcs, entropy_source, node_signer, best_block_height, pending_events, - &send_payment_along_path + &send_payment_along_path, logger, ); } Ok(()) } #[rustfmt::skip] - fn find_route_and_send_payment( + fn find_route_and_send_payment( &self, payment_hash: PaymentHash, payment_id: PaymentId, route_params: RouteParameters, router: &R, first_hops: Vec, inflight_htlcs: &IH, entropy_source: &ES, node_signer: &NS, best_block_height: u32, - pending_events: &Mutex)>>, send_payment_along_path: &SP, + pending_events: &Mutex)>>, + send_payment_along_path: &SP, logger: &WithContext, ) where R::Target: Router, @@ -1530,7 +1541,7 @@ where { #[cfg(feature = "std")] { if has_expired(&route_params) { - log_error!(self.logger, "Payment params expired on retry, abandoning payment {}", &payment_id); + log_error!(logger, "Payment params expired on retry, abandoning payment {}", &payment_id); self.abandon_payment(payment_id, PaymentFailureReason::PaymentExpired, pending_events); return } @@ -1543,7 +1554,7 @@ where ) { Ok(route) => route, Err(e) => { - log_error!(self.logger, "Failed to find a route on retry, abandoning payment {}: {:#?}", &payment_id, e); + log_error!(logger, "Failed to find a route on retry, abandoning payment {}: {:#?}", &payment_id, e); self.abandon_payment(payment_id, PaymentFailureReason::RouteNotFound, pending_events); return } @@ -1557,7 +1568,7 @@ where for path in route.paths.iter() { if path.hops.len() == 0 { - log_error!(self.logger, "Unusable path in route (path.hops.len() must be at least 1"); + log_error!(logger, "Unusable path in route (path.hops.len() must be at least 1"); self.abandon_payment(payment_id, PaymentFailureReason::UnexpectedError, pending_events); return } @@ -1590,13 +1601,13 @@ where const RETRY_OVERFLOW_PERCENTAGE: u64 = 10; let retry_amt_msat = route.get_total_amount(); if retry_amt_msat + *pending_amt_msat > *total_msat * (100 + RETRY_OVERFLOW_PERCENTAGE) / 100 { - log_error!(self.logger, "retry_amt_msat of {} will put pending_amt_msat (currently: {}) more than 10% over total_payment_amt_msat of {}", retry_amt_msat, pending_amt_msat, total_msat); + log_error!(logger, "retry_amt_msat of {} will put pending_amt_msat (currently: {}) more than 10% over total_payment_amt_msat of {}", retry_amt_msat, pending_amt_msat, total_msat); abandon_with_entry!(payment, PaymentFailureReason::UnexpectedError); return } if !payment.get().is_retryable_now() { - log_error!(self.logger, "Retries exhausted for payment id {}", &payment_id); + log_error!(logger, "Retries exhausted for payment id {}", &payment_id); abandon_with_entry!(payment, PaymentFailureReason::RetriesExhausted); return } @@ -1625,38 +1636,38 @@ where (total_msat, recipient_onion, keysend_preimage, onion_session_privs, invoice_request, bolt12_invoice.cloned()) }, PendingOutboundPayment::Legacy { .. } => { - log_error!(self.logger, "Unable to retry payments that were initially sent on LDK versions prior to 0.0.102"); + log_error!(logger, "Unable to retry payments that were initially sent on LDK versions prior to 0.0.102"); return }, PendingOutboundPayment::AwaitingInvoice { .. } | PendingOutboundPayment::AwaitingOffer { .. } => { - log_error!(self.logger, "Payment not yet sent"); + log_error!(logger, "Payment not yet sent"); debug_assert!(false); return }, PendingOutboundPayment::InvoiceReceived { .. } => { - log_error!(self.logger, "Payment already initiating"); + log_error!(logger, "Payment already initiating"); debug_assert!(false); return }, PendingOutboundPayment::StaticInvoiceReceived { .. } => { - log_error!(self.logger, "Payment already initiating"); + log_error!(logger, "Payment already initiating"); debug_assert!(false); return }, PendingOutboundPayment::Fulfilled { .. } => { - log_error!(self.logger, "Payment already completed"); + log_error!(logger, "Payment already completed"); return }, PendingOutboundPayment::Abandoned { .. } => { - log_error!(self.logger, "Payment already abandoned (with some HTLCs still pending)"); + log_error!(logger, "Payment already abandoned (with some HTLCs still pending)"); return }, } }, hash_map::Entry::Vacant(_) => { - log_error!(self.logger, "Payment with ID {} not found", &payment_id); + log_error!(logger, "Payment with ID {} not found", &payment_id); return } } @@ -1664,24 +1675,24 @@ where let res = self.pay_route_internal(&route, payment_hash, &recipient_onion, keysend_preimage, invoice_request.as_ref(), bolt12_invoice.as_ref(), payment_id, Some(total_msat), &onion_session_privs, false, node_signer, best_block_height, &send_payment_along_path); - log_info!(self.logger, "Result retrying payment id {}: {:?}", &payment_id, res); + log_info!(logger, "Result retrying payment id {}: {:?}", &payment_id, res); if let Err(e) = res { self.handle_pay_route_err( e, payment_id, payment_hash, route, route_params, onion_session_privs, router, first_hops, inflight_htlcs, entropy_source, node_signer, best_block_height, pending_events, - send_payment_along_path + send_payment_along_path, logger ); } } #[rustfmt::skip] - fn handle_pay_route_err( + fn handle_pay_route_err( &self, err: PaymentSendFailure, payment_id: PaymentId, payment_hash: PaymentHash, route: Route, mut route_params: RouteParameters, onion_session_privs: Vec<[u8; 32]>, router: &R, first_hops: Vec, inflight_htlcs: &IH, entropy_source: &ES, node_signer: &NS, best_block_height: u32, pending_events: &Mutex)>>, - send_payment_along_path: &SP, + send_payment_along_path: &SP, logger: &WithContext, ) where R::Target: Router, @@ -1689,12 +1700,13 @@ where NS::Target: NodeSigner, IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, + L::Target: Logger, { match err { PaymentSendFailure::AllFailedResendSafe(errs) => { self.remove_session_privs(payment_id, route.paths.iter().zip(onion_session_privs.iter())); - Self::push_path_failed_evs_and_scids(payment_id, payment_hash, &mut route_params, route.paths, errs.into_iter().map(|e| Err(e)), &self.logger, pending_events); - self.find_route_and_send_payment(payment_hash, payment_id, route_params, router, first_hops, inflight_htlcs, entropy_source, node_signer, best_block_height, pending_events, send_payment_along_path); + Self::push_path_failed_evs_and_scids(payment_id, payment_hash, &mut route_params, route.paths, errs.into_iter().map(|e| Err(e)), pending_events, logger); + self.find_route_and_send_payment(payment_hash, payment_id, route_params, router, first_hops, inflight_htlcs, entropy_source, node_signer, best_block_height, pending_events, send_payment_along_path, logger); }, PaymentSendFailure::PartialFailure { failed_paths_retry: Some(mut retry), results, .. } => { debug_assert_eq!(results.len(), route.paths.len()); @@ -1710,11 +1722,11 @@ where } }); self.remove_session_privs(payment_id, failed_paths); - Self::push_path_failed_evs_and_scids(payment_id, payment_hash, &mut retry, route.paths, results.into_iter(), &self.logger, pending_events); + Self::push_path_failed_evs_and_scids(payment_id, payment_hash, &mut retry, route.paths, results.into_iter(), pending_events, logger); // Some paths were sent, even if we failed to send the full MPP value our recipient may // misbehave and claim the funds, at which point we have to consider the payment sent, so // return `Ok()` here, ignoring any retry errors. - self.find_route_and_send_payment(payment_hash, payment_id, retry, router, first_hops, inflight_htlcs, entropy_source, node_signer, best_block_height, pending_events, send_payment_along_path); + self.find_route_and_send_payment(payment_hash, payment_id, retry, router, first_hops, inflight_htlcs, entropy_source, node_signer, best_block_height, pending_events, send_payment_along_path, logger); }, PaymentSendFailure::PartialFailure { failed_paths_retry: None, .. } => { // This may happen if we send a payment and some paths fail, but only due to a temporary @@ -1722,13 +1734,13 @@ where // initial HTLC-Add messages yet. }, PaymentSendFailure::PathParameterError(results) => { - log_error!(self.logger, "Failed to send to route due to parameter error in a single path. Your router is buggy"); + log_error!(logger, "Failed to send to route due to parameter error in a single path. Your router is buggy"); self.remove_session_privs(payment_id, route.paths.iter().zip(onion_session_privs.iter())); - Self::push_path_failed_evs_and_scids(payment_id, payment_hash, &mut route_params, route.paths, results.into_iter(), &self.logger, pending_events); + Self::push_path_failed_evs_and_scids(payment_id, payment_hash, &mut route_params, route.paths, results.into_iter(), pending_events, logger); self.abandon_payment(payment_id, PaymentFailureReason::UnexpectedError, pending_events); }, PaymentSendFailure::ParameterError(e) => { - log_error!(self.logger, "Failed to send to route due to parameter error: {:?}. Your router is buggy", e); + log_error!(logger, "Failed to send to route due to parameter error: {:?}. Your router is buggy", e); self.remove_session_privs(payment_id, route.paths.iter().zip(onion_session_privs.iter())); self.abandon_payment(payment_id, PaymentFailureReason::UnexpectedError, pending_events); }, @@ -1738,11 +1750,15 @@ where fn push_path_failed_evs_and_scids< I: ExactSizeIterator + Iterator>, + L: Deref, >( payment_id: PaymentId, payment_hash: PaymentHash, route_params: &mut RouteParameters, - paths: Vec, path_results: I, logger: &L, + paths: Vec, path_results: I, pending_events: &Mutex)>>, - ) { + logger: &WithContext, + ) where + L::Target: Logger, + { let mut events = pending_events.lock().unwrap(); debug_assert_eq!(paths.len(), path_results.len()); for (path, path_res) in paths.into_iter().zip(path_results) { @@ -2216,11 +2232,15 @@ where } #[rustfmt::skip] - pub(super) fn claim_htlc( + pub(super) fn claim_htlc( &self, payment_id: PaymentId, payment_preimage: PaymentPreimage, bolt12_invoice: Option, session_priv: SecretKey, path: Path, from_onchain: bool, ev_completion_action: &mut Option, pending_events: &Mutex)>>, - ) { + logger: &WithContext, + ) + where + L::Target: Logger, + { let mut session_priv_bytes = [0; 32]; session_priv_bytes.copy_from_slice(&session_priv[..]); let mut outbounds = self.pending_outbound_payments.lock().unwrap(); @@ -2228,7 +2248,7 @@ where if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(payment_id) { if !payment.get().is_fulfilled() { let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).to_byte_array()); - log_info!(self.logger, "Payment with id {} and hash {} sent!", payment_id, payment_hash); + log_info!(logger, "Payment with id {} and hash {} sent!", payment_id, payment_hash); let fee_paid_msat = payment.get().get_pending_fee_msat(); let amount_msat = payment.get().total_msat(); pending_events.push_back((events::Event::PaymentSent { @@ -2258,7 +2278,7 @@ where } } } else { - log_trace!(self.logger, "Received duplicative fulfill for HTLC with payment_preimage {}", &payment_preimage); + log_trace!(logger, "Received duplicative fulfill for HTLC with payment_preimage {}", &payment_preimage); } } @@ -2378,13 +2398,15 @@ where }); } - pub(super) fn fail_htlc( + pub(super) fn fail_htlc( &self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, path: &Path, session_priv: &SecretKey, payment_id: &PaymentId, probing_cookie_secret: [u8; 32], secp_ctx: &Secp256k1, pending_events: &Mutex)>>, - completion_action: &mut Option, - ) { + completion_action: &mut Option, logger: &WithContext, + ) where + L::Target: Logger, + { #[cfg(any(test, feature = "_test_utils"))] let DecodedOnionFailure { network_update, @@ -2395,7 +2417,7 @@ where failed_within_blinded_path, hold_times, .. - } = onion_error.decode_onion_failure(secp_ctx, &self.logger, &source); + } = onion_error.decode_onion_failure(secp_ctx, &logger, &source); #[cfg(not(any(test, feature = "_test_utils")))] let DecodedOnionFailure { network_update, @@ -2404,7 +2426,7 @@ where failed_within_blinded_path, hold_times, .. - } = onion_error.decode_onion_failure(secp_ctx, &self.logger, &source); + } = onion_error.decode_onion_failure(secp_ctx, &logger, &source); let payment_is_probe = payment_is_probe(payment_hash, &payment_id, probing_cookie_secret); let mut session_priv_bytes = [0; 32]; @@ -2429,7 +2451,7 @@ where if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(*payment_id) { if !payment.get_mut().remove(&session_priv_bytes, Some(&path)) { log_trace!( - self.logger, + logger, "Received duplicative fail for HTLC with payment_hash {}", &payment_hash ); @@ -2437,7 +2459,7 @@ where } if payment.get().is_fulfilled() { log_trace!( - self.logger, + logger, "Received failure of HTLC with payment_hash {} after payment completion", &payment_hash ); @@ -2485,18 +2507,13 @@ where is_retryable_now } else { log_trace!( - self.logger, - "Received duplicative fail for HTLC with payment_hash {}", - &payment_hash + logger, + "Received duplicative fail for HTLC with payment_hash {payment_hash}" ); return; }; core::mem::drop(outbounds); - log_trace!( - self.logger, - "Failing outbound payment HTLC with payment_hash {}", - &payment_hash - ); + log_trace!(logger, "Failing outbound payment HTLC with payment_hash {payment_hash}"); let path_failure = { if payment_is_probe { @@ -2618,10 +2635,12 @@ where invoice_requests } - pub(super) fn insert_from_monitor_on_startup( + pub(super) fn insert_from_monitor_on_startup( &self, payment_id: PaymentId, payment_hash: PaymentHash, session_priv_bytes: [u8; 32], - path: &Path, best_block_height: u32, - ) { + path: &Path, best_block_height: u32, logger: &WithContext, + ) where + L::Target: Logger, + { let path_amt = path.final_value_msat(); let path_fee = path.fee_msat(); @@ -2670,12 +2689,12 @@ where entry.get_mut().insert(session_priv_bytes, &path) }, }; - log_info!(self.logger, "{} a pending payment path for {} msat for session priv {} on an existing pending payment with payment hash {}", + log_info!(logger, "{} a pending payment path for {} msat for session priv {} on an existing pending payment with payment hash {}", if newly_added { "Added" } else { "Had" }, path_amt, log_bytes!(session_priv_bytes), payment_hash); }, hash_map::Entry::Vacant(entry) => { entry.insert(new_retryable!()); - log_info!(self.logger, "Added a pending payment for {} msat with payment hash {} for path with session priv {}", + log_info!(logger, "Added a pending payment for {} msat with payment hash {} for path with session priv {}", path_amt, payment_hash, log_bytes!(session_priv_bytes)); }, } @@ -2834,6 +2853,7 @@ mod tests { use crate::types::payment::{PaymentHash, PaymentPreimage}; use crate::util::errors::APIError; use crate::util::hash_tables::new_hash_map; + use crate::util::logger::WithContext; use crate::util::test_utils; use alloc::collections::VecDeque; @@ -2871,7 +2891,9 @@ mod tests { #[rustfmt::skip] fn do_fails_paying_after_expiration(on_retry: bool) { let logger = test_utils::TestLogger::new(); - let outbound_payments = OutboundPayments::new(new_hash_map(), &logger); + let logger_ref = &logger; + let log = WithContext::from(&logger_ref, None, None, Some(PaymentHash([0; 32]))); + let outbound_payments = OutboundPayments::new(new_hash_map()); let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger)); let scorer = RwLock::new(test_utils::TestScorer::new()); let router = test_utils::TestRouter::new(network_graph, &logger, &scorer); @@ -2893,7 +2915,7 @@ mod tests { outbound_payments.find_route_and_send_payment( PaymentHash([0; 32]), PaymentId([0; 32]), expired_route_params, &&router, vec![], &|| InFlightHtlcs::new(), &&keys_manager, &&keys_manager, 0, &pending_events, - &|_| Ok(())); + &|_| Ok(()), &log); let events = pending_events.lock().unwrap(); assert_eq!(events.len(), 1); if let Event::PaymentFailed { ref reason, .. } = events[0].0 { @@ -2903,7 +2925,7 @@ mod tests { let err = outbound_payments.send_payment( PaymentHash([0; 32]), RecipientOnionFields::spontaneous_empty(), PaymentId([0; 32]), Retry::Attempts(0), expired_route_params, &&router, vec![], || InFlightHtlcs::new(), - &&keys_manager, &&keys_manager, 0, &pending_events, |_| Ok(())).unwrap_err(); + &&keys_manager, &&keys_manager, 0, &pending_events, |_| Ok(()), &log).unwrap_err(); if let RetryableSendFailure::PaymentExpired = err { } else { panic!("Unexpected error"); } } } @@ -2916,7 +2938,9 @@ mod tests { #[rustfmt::skip] fn do_find_route_error(on_retry: bool) { let logger = test_utils::TestLogger::new(); - let outbound_payments = OutboundPayments::new(new_hash_map(), &logger); + let logger_ref = &logger; + let log = WithContext::from(&logger_ref, None, None, Some(PaymentHash([0; 32]))); + let outbound_payments = OutboundPayments::new(new_hash_map()); let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger)); let scorer = RwLock::new(test_utils::TestScorer::new()); let router = test_utils::TestRouter::new(network_graph, &logger, &scorer); @@ -2937,7 +2961,7 @@ mod tests { outbound_payments.find_route_and_send_payment( PaymentHash([0; 32]), PaymentId([0; 32]), route_params, &&router, vec![], &|| InFlightHtlcs::new(), &&keys_manager, &&keys_manager, 0, &pending_events, - &|_| Ok(())); + &|_| Ok(()), &log); let events = pending_events.lock().unwrap(); assert_eq!(events.len(), 1); if let Event::PaymentFailed { .. } = events[0].0 { } else { panic!("Unexpected event"); } @@ -2945,7 +2969,7 @@ mod tests { let err = outbound_payments.send_payment( PaymentHash([0; 32]), RecipientOnionFields::spontaneous_empty(), PaymentId([0; 32]), Retry::Attempts(0), route_params, &&router, vec![], || InFlightHtlcs::new(), - &&keys_manager, &&keys_manager, 0, &pending_events, |_| Ok(())).unwrap_err(); + &&keys_manager, &&keys_manager, 0, &pending_events, |_| Ok(()), &log).unwrap_err(); if let RetryableSendFailure::RouteNotFound = err { } else { panic!("Unexpected error"); } } @@ -2955,7 +2979,9 @@ mod tests { #[rustfmt::skip] fn initial_send_payment_path_failed_evs() { let logger = test_utils::TestLogger::new(); - let outbound_payments = OutboundPayments::new(new_hash_map(), &logger); + let logger_ref = &logger; + let log = WithContext::from(&logger_ref, None, None, Some(PaymentHash([0; 32]))); + let outbound_payments = OutboundPayments::new(new_hash_map()); let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger)); let scorer = RwLock::new(test_utils::TestScorer::new()); let router = test_utils::TestRouter::new(network_graph, &logger, &scorer); @@ -2995,7 +3021,7 @@ mod tests { PaymentHash([0; 32]), RecipientOnionFields::spontaneous_empty(), PaymentId([0; 32]), Retry::Attempts(0), route_params.clone(), &&router, vec![], || InFlightHtlcs::new(), &&keys_manager, &&keys_manager, 0, &pending_events, - |_| Err(APIError::ChannelUnavailable { err: "test".to_owned() })).unwrap(); + |_| Err(APIError::ChannelUnavailable { err: "test".to_owned() }), &log).unwrap(); let mut events = pending_events.lock().unwrap(); assert_eq!(events.len(), 2); if let Event::PaymentPathFailed { @@ -3013,7 +3039,7 @@ mod tests { PaymentHash([0; 32]), RecipientOnionFields::spontaneous_empty(), PaymentId([0; 32]), Retry::Attempts(0), route_params.clone(), &&router, vec![], || InFlightHtlcs::new(), &&keys_manager, &&keys_manager, 0, &pending_events, - |_| Err(APIError::MonitorUpdateInProgress)).unwrap(); + |_| Err(APIError::MonitorUpdateInProgress), &log).unwrap(); assert_eq!(pending_events.lock().unwrap().len(), 0); // Ensure that any other error will result in a PaymentPathFailed event but no blamed scid. @@ -3021,7 +3047,7 @@ mod tests { PaymentHash([0; 32]), RecipientOnionFields::spontaneous_empty(), PaymentId([1; 32]), Retry::Attempts(0), route_params.clone(), &&router, vec![], || InFlightHtlcs::new(), &&keys_manager, &&keys_manager, 0, &pending_events, - |_| Err(APIError::APIMisuseError { err: "test".to_owned() })).unwrap(); + |_| Err(APIError::APIMisuseError { err: "test".to_owned() }), &log).unwrap(); let events = pending_events.lock().unwrap(); assert_eq!(events.len(), 2); if let Event::PaymentPathFailed { @@ -3037,8 +3063,7 @@ mod tests { #[rustfmt::skip] fn removes_stale_awaiting_invoice_using_absolute_timeout() { let pending_events = Mutex::new(VecDeque::new()); - let logger = test_utils::TestLogger::new(); - let outbound_payments = OutboundPayments::new(new_hash_map(), &logger); + let outbound_payments = OutboundPayments::new(new_hash_map()); let payment_id = PaymentId([0; 32]); let absolute_expiry = 100; let tick_interval = 10; @@ -3093,8 +3118,7 @@ mod tests { #[rustfmt::skip] fn removes_stale_awaiting_invoice_using_timer_ticks() { let pending_events = Mutex::new(VecDeque::new()); - let logger = test_utils::TestLogger::new(); - let outbound_payments = OutboundPayments::new(new_hash_map(), &logger); + let outbound_payments = OutboundPayments::new(new_hash_map()); let payment_id = PaymentId([0; 32]); let timer_ticks = 3; let expiration = StaleExpiration::TimerTicks(timer_ticks); @@ -3148,8 +3172,7 @@ mod tests { #[rustfmt::skip] fn removes_abandoned_awaiting_invoice() { let pending_events = Mutex::new(VecDeque::new()); - let logger = test_utils::TestLogger::new(); - let outbound_payments = OutboundPayments::new(new_hash_map(), &logger); + let outbound_payments = OutboundPayments::new(new_hash_map()); let payment_id = PaymentId([0; 32]); let expiration = StaleExpiration::AbsoluteTimeout(Duration::from_secs(100)); @@ -3180,6 +3203,8 @@ mod tests { #[rustfmt::skip] fn fails_sending_payment_for_expired_bolt12_invoice() { let logger = test_utils::TestLogger::new(); + let logger_ref = &logger; + let log = WithContext::from(&logger_ref, None, None, Some(PaymentHash([0; 32]))); let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger)); let scorer = RwLock::new(test_utils::TestScorer::new()); let router = test_utils::TestRouter::new(network_graph, &logger, &scorer); @@ -3189,7 +3214,7 @@ mod tests { let nonce = Nonce([0; 16]); let pending_events = Mutex::new(VecDeque::new()); - let outbound_payments = OutboundPayments::new(new_hash_map(), &logger); + let outbound_payments = OutboundPayments::new(new_hash_map()); let payment_id = PaymentId([0; 32]); let expiration = StaleExpiration::AbsoluteTimeout(Duration::from_secs(100)); @@ -3214,7 +3239,7 @@ mod tests { outbound_payments.send_payment_for_bolt12_invoice( &invoice, payment_id, &&router, vec![], Bolt12InvoiceFeatures::empty(), || InFlightHtlcs::new(), &&keys_manager, &&keys_manager, &EmptyNodeIdLookUp {}, - &secp_ctx, 0, &pending_events, |_| panic!() + &secp_ctx, 0, &pending_events, |_| panic!(), &log ), Err(Bolt12PaymentError::SendingFailed(RetryableSendFailure::PaymentExpired)), ); @@ -3235,6 +3260,8 @@ mod tests { #[rustfmt::skip] fn fails_finding_route_for_bolt12_invoice() { let logger = test_utils::TestLogger::new(); + let logger_ref = &logger; + let log = WithContext::from(&logger_ref, None, None, Some(PaymentHash([0; 32]))); let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger)); let scorer = RwLock::new(test_utils::TestScorer::new()); let router = test_utils::TestRouter::new(network_graph, &logger, &scorer); @@ -3242,7 +3269,7 @@ mod tests { let keys_manager = test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet); let pending_events = Mutex::new(VecDeque::new()); - let outbound_payments = OutboundPayments::new(new_hash_map(), &logger); + let outbound_payments = OutboundPayments::new(new_hash_map()); let expanded_key = ExpandedKey::new([42; 32]); let nonce = Nonce([0; 16]); let payment_id = PaymentId([0; 32]); @@ -3277,7 +3304,7 @@ mod tests { outbound_payments.send_payment_for_bolt12_invoice( &invoice, payment_id, &&router, vec![], Bolt12InvoiceFeatures::empty(), || InFlightHtlcs::new(), &&keys_manager, &&keys_manager, &EmptyNodeIdLookUp {}, - &secp_ctx, 0, &pending_events, |_| panic!() + &secp_ctx, 0, &pending_events, |_| panic!(), &log ), Err(Bolt12PaymentError::SendingFailed(RetryableSendFailure::RouteNotFound)), ); @@ -3298,6 +3325,8 @@ mod tests { #[rustfmt::skip] fn sends_payment_for_bolt12_invoice() { let logger = test_utils::TestLogger::new(); + let logger_ref = &logger; + let log = WithContext::from(&logger_ref, None, None, Some(PaymentHash([0; 32]))); let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger)); let scorer = RwLock::new(test_utils::TestScorer::new()); let router = test_utils::TestRouter::new(network_graph, &logger, &scorer); @@ -3305,7 +3334,7 @@ mod tests { let keys_manager = test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet); let pending_events = Mutex::new(VecDeque::new()); - let outbound_payments = OutboundPayments::new(new_hash_map(), &logger); + let outbound_payments = OutboundPayments::new(new_hash_map()); let expanded_key = ExpandedKey::new([42; 32]); let nonce = Nonce([0; 16]); let payment_id = PaymentId([0; 32]); @@ -3353,7 +3382,7 @@ mod tests { outbound_payments.send_payment_for_bolt12_invoice( &invoice, payment_id, &&router, vec![], Bolt12InvoiceFeatures::empty(), || InFlightHtlcs::new(), &&keys_manager, &&keys_manager, &EmptyNodeIdLookUp {}, - &secp_ctx, 0, &pending_events, |_| panic!() + &secp_ctx, 0, &pending_events, |_| panic!(), &log ), Err(Bolt12PaymentError::UnexpectedInvoice), ); @@ -3373,7 +3402,7 @@ mod tests { outbound_payments.send_payment_for_bolt12_invoice( &invoice, payment_id, &&router, vec![], Bolt12InvoiceFeatures::empty(), || InFlightHtlcs::new(), &&keys_manager, &&keys_manager, &EmptyNodeIdLookUp {}, - &secp_ctx, 0, &pending_events, |_| Ok(()) + &secp_ctx, 0, &pending_events, |_| Ok(()), &log ), Ok(()), ); @@ -3384,7 +3413,7 @@ mod tests { outbound_payments.send_payment_for_bolt12_invoice( &invoice, payment_id, &&router, vec![], Bolt12InvoiceFeatures::empty(), || InFlightHtlcs::new(), &&keys_manager, &&keys_manager, &EmptyNodeIdLookUp {}, - &secp_ctx, 0, &pending_events, |_| panic!() + &secp_ctx, 0, &pending_events, |_| panic!(), &log ), Err(Bolt12PaymentError::DuplicateInvoice), ); @@ -3413,8 +3442,7 @@ mod tests { #[rustfmt::skip] fn time_out_unreleased_async_payments() { let pending_events = Mutex::new(VecDeque::new()); - let logger = test_utils::TestLogger::new(); - let outbound_payments = OutboundPayments::new(new_hash_map(), &logger); + let outbound_payments = OutboundPayments::new(new_hash_map()); let payment_id = PaymentId([0; 32]); let absolute_expiry = 60; @@ -3464,8 +3492,7 @@ mod tests { #[rustfmt::skip] fn abandon_unreleased_async_payment() { let pending_events = Mutex::new(VecDeque::new()); - let logger = test_utils::TestLogger::new(); - let outbound_payments = OutboundPayments::new(new_hash_map(), &logger); + let outbound_payments = OutboundPayments::new(new_hash_map()); let payment_id = PaymentId([0; 32]); let absolute_expiry = 60; From 253ceedee635bea1fe2e481cca18da96a03731d6 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sun, 25 Jan 2026 15:14:45 +0000 Subject: [PATCH 129/242] Add `PaymentId` to logging `Record`s While `PaymentHash`es are great for searching logs, in the case of BOLT 12 the hash isn't selected until well into the payment process. Thus, its important that we allow for filtering by `PaymentId` as well to ensure payment-related logs are always reliably searchable. --- lightning/src/ln/channelmanager.rs | 64 ++++++++++++++++------------ lightning/src/ln/outbound_payment.rs | 5 ++- lightning/src/util/logger.rs | 46 +++++++++++++------- lightning/src/util/macro_logger.rs | 2 +- 4 files changed, 73 insertions(+), 44 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 77933fb1fc4..1ad1f22f15b 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -5192,6 +5192,13 @@ where let prng_seed = self.entropy_source.get_secure_random_bytes(); let session_priv = SecretKey::from_slice(&session_priv_bytes[..]).expect("RNG is busted"); + let logger = WithContext::for_payment( + &self.logger, + path.hops.first().map(|hop| hop.pubkey), + None, + Some(*payment_hash), + payment_id, + ); let (onion_packet, htlc_msat, htlc_cltv) = onion_utils::create_payment_onion( &self.secp_ctx, &path, @@ -5205,8 +5212,6 @@ where prng_seed, ) .map_err(|e| { - let first_hop_key = Some(path.hops.first().unwrap().pubkey); - let logger = WithContext::from(&self.logger, first_hop_key, None, Some(*payment_hash)); log_error!(logger, "Failed to build an onion for path"); e })?; @@ -5217,9 +5222,6 @@ where let (counterparty_node_id, id) = match first_chan { None => { - let first_hop_key = Some(path.hops.first().unwrap().pubkey); - let logger = - WithContext::from(&self.logger, first_hop_key, None, Some(*payment_hash)); log_error!(logger, "Failed to find first-hop for payment hash {payment_hash}"); return Err(APIError::ChannelUnavailable { err: "No channel available with first hop!".to_owned(), @@ -5228,12 +5230,9 @@ where Some((cp_id, chan_id)) => (cp_id, chan_id), }; - let logger = WithContext::from( - &self.logger, - Some(counterparty_node_id), - Some(id), - Some(*payment_hash), - ); + // Add the channel id to the logger that already has the rest filled in. + let logger_ref = &logger; + let logger = WithContext::from(&logger_ref, None, Some(id), None); log_trace!( logger, "Attempting to send payment along path with next hop {first_chan_scid}" @@ -5256,11 +5255,6 @@ where }); } let funding_txo = chan.funding.get_funding_txo().unwrap(); - let logger = WithChannelContext::from( - &self.logger, - &chan.context, - Some(*payment_hash), - ); let htlc_source = HTLCSource::OutboundRoute { path: path.clone(), session_priv: session_priv.clone(), @@ -5354,7 +5348,8 @@ where }); if route.route_params.is_none() { route.route_params = Some(route_params.clone()); } let router = FixedRouter::new(route); - let logger = WithContext::from(&self.logger, None, None, Some(payment_hash)); + let logger = + WithContext::for_payment(&self.logger, None, None, Some(payment_hash), payment_id); self.pending_outbound_payments .send_payment(payment_hash, recipient_onion, payment_id, Retry::Attempts(0), route_params, &&router, self.list_usable_channels(), || self.compute_inflight_htlcs(), @@ -5419,7 +5414,7 @@ where best_block_height, &self.pending_events, |args| self.send_payment_along_path(args), - &WithContext::from(&self.logger, None, None, Some(payment_hash)), + &WithContext::for_payment(&self.logger, None, None, Some(payment_hash), payment_id), ) } @@ -5504,6 +5499,7 @@ where ) -> Result<(), Bolt11PaymentError> { let best_block_height = self.best_block.read().unwrap().height; let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); + let payment_hash = invoice.payment_hash(); self.pending_outbound_payments.pay_for_bolt11_invoice( invoice, payment_id, @@ -5518,7 +5514,7 @@ where best_block_height, &self.pending_events, |args| self.send_payment_along_path(args), - &WithContext::from(&self.logger, None, None, Some(invoice.payment_hash())), + &WithContext::for_payment(&self.logger, None, None, Some(payment_hash), payment_id), ) } @@ -5571,7 +5567,7 @@ where best_block_height, &self.pending_events, |args| self.send_payment_along_path(args), - &WithContext::from(&self.logger, None, None, None), + &WithContext::for_payment(&self.logger, None, None, None, payment_id), ) } @@ -5628,6 +5624,7 @@ where ) -> Result<(), Bolt12PaymentError> { let mut res = Ok(()); PersistenceNotifierGuard::optionally_notify(self, || { + let logger = WithContext::for_payment(&self.logger, None, None, None, payment_id); let best_block_height = self.best_block.read().unwrap().height; let features = self.bolt12_invoice_features(); let outbound_pmts_res = self.pending_outbound_payments.static_invoice_received( @@ -5660,7 +5657,7 @@ where self.send_payment_for_static_invoice_no_persist(payment_id, channels, true) { log_trace!( - self.logger, + logger, "Failed to send held HTLC with payment id {}: {:?}", payment_id, e @@ -5752,7 +5749,7 @@ where best_block_height, &self.pending_events, |args| self.send_payment_along_path(args), - &WithContext::from(&self.logger, None, None, None), + &WithContext::for_payment(&self.logger, None, None, None, payment_id), ) } @@ -5846,7 +5843,7 @@ where best_block_height, &self.pending_events, |args| self.send_payment_along_path(args), - &WithContext::from(&self.logger, None, None, payment_hash), + &WithContext::for_payment(&self.logger, None, None, payment_hash, payment_id), ) } @@ -8652,7 +8649,13 @@ where // being fully configured. See the docs for `ChannelManagerReadArgs` for more. match source { HTLCSource::OutboundRoute { ref path, ref session_priv, ref payment_id, .. } => { - let logger = WithContext::from(&self.logger, None, None, Some(*payment_hash)); + let logger = WithContext::for_payment( + &self.logger, + path.hops.first().map(|hop| hop.pubkey), + None, + Some(*payment_hash), + *payment_id, + ); self.pending_outbound_payments.fail_htlc( source, payment_hash, @@ -9346,6 +9349,13 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ counterparty_node_id: path.hops[0].pubkey, }) }; + let logger = WithContext::for_payment( + &self.logger, + path.hops.first().map(|hop| hop.pubkey), + None, + Some(payment_preimage.into()), + payment_id, + ); self.pending_outbound_payments.claim_htlc( payment_id, payment_preimage, @@ -9355,7 +9365,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ from_onchain, &mut ev_completion_action, &self.pending_events, - &WithContext::from(&self.logger, None, None, Some(payment_preimage.into())), + &logger, ); // If an event was generated, `claim_htlc` set `ev_completion_action` to None, if // not, we should go ahead and run it now (as the claim was duplicative), at least @@ -16118,8 +16128,8 @@ where Err(()) => return None, }; - let logger = WithContext::from( - &self.logger, None, None, Some(invoice.payment_hash()), + let logger = WithContext::for_payment( + &self.logger, None, None, Some(invoice.payment_hash()), payment_id, ); if self.config.read().unwrap().manually_handle_bolt12_invoices { diff --git a/lightning/src/ln/outbound_payment.rs b/lightning/src/ln/outbound_payment.rs index bab3bf616d0..b255dcd16a3 100644 --- a/lightning/src/ln/outbound_payment.rs +++ b/lightning/src/ln/outbound_payment.rs @@ -1365,6 +1365,9 @@ impl OutboundPayments { } core::mem::drop(outbounds); if let Some((payment_hash, payment_id, route_params)) = retry_id_route_params { + let logger = + WithContext::for_payment(&logger, None, None, Some(payment_hash), payment_id); + let logger = &logger; self.find_route_and_send_payment( payment_hash, payment_id, @@ -1810,7 +1813,7 @@ impl OutboundPayments { #[rustfmt::skip] pub(super) fn send_probe( &self, path: Path, probing_cookie_secret: [u8; 32], entropy_source: &ES, node_signer: &NS, - best_block_height: u32, send_payment_along_path: F + best_block_height: u32, send_payment_along_path: F, ) -> Result<(PaymentHash, PaymentId), ProbeSendFailure> where ES::Target: EntropySource, diff --git a/lightning/src/util/logger.rs b/lightning/src/util/logger.rs index b49cd32c131..2921688f93f 100644 --- a/lightning/src/util/logger.rs +++ b/lightning/src/util/logger.rs @@ -21,6 +21,7 @@ use core::fmt::Display; use core::fmt::Write; use core::ops::Deref; +use crate::ln::channelmanager::PaymentId; use crate::ln::types::ChannelId; #[cfg(c_bindings)] use crate::prelude::*; // Needed for String @@ -124,12 +125,18 @@ pub struct Record<$($args)?> { pub file: &'static str, /// The line containing the message. pub line: u32, - /// The payment hash. Since payment_hash is not repeated in the message body, include it in the log output so - /// entries remain clear. + /// The payment hash. /// - /// Note that this is only filled in for logs pertaining to a specific payment, and will be - /// `None` for logs which are not directly related to a payment. + /// Since payment_hash is generally not repeated in the message body, you should ensure you log + /// it so that entries remain clear. + /// + /// Note that payments don't always have a [`PaymentHash`] immediately - when paying BOLT 12 + /// offers the [`PaymentHash`] is only selected a ways into the payment process. Thus, when + /// searching your logs for specific payments you should also search for the relevant + /// [`Self::payment_id`]. pub payment_hash: Option, + /// The payment id if the log pertained to a payment with an ID. + pub payment_id: Option, } impl<$($args)?> Record<$($args)?> { @@ -138,14 +145,13 @@ impl<$($args)?> Record<$($args)?> { /// This is not exported to bindings users as fmt can't be used in C #[inline] pub fn new<$($nonstruct_args)?>( - level: Level, peer_id: Option, channel_id: Option, - args: fmt::Arguments<'a>, module_path: &'static str, file: &'static str, line: u32, - payment_hash: Option + level: Level, args: fmt::Arguments<'a>, module_path: &'static str, file: &'static str, + line: u32, ) -> Record<$($args)?> { Record { level, - peer_id, - channel_id, + peer_id: None, + channel_id: None, #[cfg(not(c_bindings))] args, #[cfg(c_bindings)] @@ -153,7 +159,8 @@ impl<$($args)?> Record<$($args)?> { module_path, file, line, - payment_hash, + payment_hash: None, + payment_id: None, } } } @@ -295,14 +302,11 @@ pub struct WithContext<'a, L: Deref> where L::Target: Logger, { - /// The logger to delegate to after adding context to the record. logger: &'a L, - /// The node id of the peer pertaining to the logged record. peer_id: Option, - /// The channel id of the channel pertaining to the logged record. channel_id: Option, - /// The payment hash of the payment pertaining to the logged record. payment_hash: Option, + payment_id: Option, } impl<'a, L: Deref> Logger for WithContext<'a, L> @@ -319,6 +323,9 @@ where if self.payment_hash.is_some() { record.payment_hash = self.payment_hash; } + if self.payment_id.is_some() { + record.payment_id = self.payment_id; + } self.logger.log(record) } } @@ -332,7 +339,16 @@ where logger: &'a L, peer_id: Option, channel_id: Option, payment_hash: Option, ) -> Self { - WithContext { logger, peer_id, channel_id, payment_hash } + WithContext { logger, peer_id, channel_id, payment_hash, payment_id: None } + } + + /// Wraps the given logger, providing additional context to any logged records. + pub fn for_payment( + logger: &'a L, peer_id: Option, channel_id: Option, + payment_hash: Option, payment_id: PaymentId, + ) -> Self { + let payment_id = Some(payment_id); + WithContext { logger, peer_id, channel_id, payment_hash, payment_id } } } diff --git a/lightning/src/util/macro_logger.rs b/lightning/src/util/macro_logger.rs index ec9eb14ba38..12f4f67962e 100644 --- a/lightning/src/util/macro_logger.rs +++ b/lightning/src/util/macro_logger.rs @@ -175,7 +175,7 @@ macro_rules! log_spendable { #[macro_export] macro_rules! log_given_level { ($logger: expr, $lvl:expr, $($arg:tt)+) => ( - $logger.log($crate::util::logger::Record::new($lvl, None, None, format_args!($($arg)+), module_path!(), file!(), line!(), None)) + $logger.log($crate::util::logger::Record::new($lvl, format_args!($($arg)+), module_path!(), file!(), line!())) ); } From 878e632cdbe7cbae62e21f85d80bf5569e824864 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 28 Jan 2026 22:04:59 +0000 Subject: [PATCH 130/242] Don't override log context set by inner `WithContext`s If a logger is wrapped with `WithContext` which is then wrapped with another `WithContext` with different values, we want to use the context information set closest to the code, ie that which will be set first. Thus, here, we avoid overriding context that has already been set. --- lightning/src/util/logger.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lightning/src/util/logger.rs b/lightning/src/util/logger.rs index 2921688f93f..0d2eb47fa62 100644 --- a/lightning/src/util/logger.rs +++ b/lightning/src/util/logger.rs @@ -314,16 +314,16 @@ where L::Target: Logger, { fn log(&self, mut record: Record) { - if self.peer_id.is_some() { + if self.peer_id.is_some() && record.peer_id.is_none() { record.peer_id = self.peer_id }; - if self.channel_id.is_some() { + if self.channel_id.is_some() && record.channel_id.is_none() { record.channel_id = self.channel_id; } - if self.payment_hash.is_some() { + if self.payment_hash.is_some() && record.payment_hash.is_none() { record.payment_hash = self.payment_hash; } - if self.payment_id.is_some() { + if self.payment_id.is_some() && record.payment_id.is_none() { record.payment_id = self.payment_id; } self.logger.log(record) From 821559b97bb1ae04d987c14757a08191f6b23214 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 26 Jan 2026 13:54:37 +0100 Subject: [PATCH 131/242] Remove `rustfmt::skip` from `utxo.rs` Signed-off-by: Elias Rohrer --- lightning/src/routing/utxo.rs | 559 ++++++++++++++++++++++------------ 1 file changed, 358 insertions(+), 201 deletions(-) diff --git a/lightning/src/routing/utxo.rs b/lightning/src/routing/utxo.rs index f46160f1f14..d55902c5984 100644 --- a/lightning/src/routing/utxo.rs +++ b/lightning/src/routing/utxo.rs @@ -189,9 +189,8 @@ impl PendingChecks { /// Checks if there is a pending `channel_update` UTXO validation for the given channel, /// and, if so, stores the channel message for handling later and returns an `Err`. - #[rustfmt::skip] pub(super) fn check_hold_pending_channel_update( - &self, msg: &msgs::UnsignedChannelUpdate, full_msg: Option<&msgs::ChannelUpdate> + &self, msg: &msgs::UnsignedChannelUpdate, full_msg: Option<&msgs::ChannelUpdate>, ) -> Result<(), LightningError> { let mut pending_checks = self.internal.lock().unwrap(); if let hash_map::Entry::Occupied(e) = pending_checks.channels.entry(msg.short_channel_id) { @@ -200,25 +199,32 @@ impl PendingChecks { Some(msgs_ref) => { let mut messages = msgs_ref.lock().unwrap(); let latest_update = if is_from_a { - &mut messages.latest_channel_update_a - } else { - &mut messages.latest_channel_update_b - }; - if latest_update.is_none() || latest_update.as_ref().unwrap().timestamp() < msg.timestamp { + &mut messages.latest_channel_update_a + } else { + &mut messages.latest_channel_update_b + }; + if latest_update.is_none() + || latest_update.as_ref().unwrap().timestamp() < msg.timestamp + { // If the messages we got has a higher timestamp, just blindly assume the // signatures on the new message are correct and drop the old message. This // may cause us to end up dropping valid `channel_update`s if a peer is // malicious, but we should get the correct ones when the node updates them. - *latest_update = Some( - if let Some(msg) = full_msg { ChannelUpdate::Full(msg.clone()) } - else { ChannelUpdate::Unsigned(msg.clone()) }); + *latest_update = Some(if let Some(msg) = full_msg { + ChannelUpdate::Full(msg.clone()) + } else { + ChannelUpdate::Unsigned(msg.clone()) + }); } return Err(LightningError { - err: "Awaiting channel_announcement validation to accept channel_update".to_owned(), + err: "Awaiting channel_announcement validation to accept channel_update" + .to_owned(), action: ErrorAction::IgnoreAndLog(Level::Gossip), }); }, - None => { e.remove(); }, + None => { + e.remove(); + }, } } Ok(()) @@ -226,45 +232,49 @@ impl PendingChecks { /// Checks if there is a pending `node_announcement` UTXO validation for a channel with the /// given node and, if so, stores the channel message for handling later and returns an `Err`. - #[rustfmt::skip] pub(super) fn check_hold_pending_node_announcement( - &self, msg: &msgs::UnsignedNodeAnnouncement, full_msg: Option<&msgs::NodeAnnouncement> + &self, msg: &msgs::UnsignedNodeAnnouncement, full_msg: Option<&msgs::NodeAnnouncement>, ) -> Result<(), LightningError> { let mut pending_checks = self.internal.lock().unwrap(); if let hash_map::Entry::Occupied(mut e) = pending_checks.nodes.entry(msg.node_id) { let mut found_at_least_one_chan = false; - e.get_mut().retain(|node_msgs| { - match Weak::upgrade(&node_msgs) { - Some(chan_mtx) => { - let mut chan_msgs = chan_mtx.lock().unwrap(); - if let Some(chan_announce) = &chan_msgs.channel_announce { - let latest_announce = - if *chan_announce.node_id_1() == msg.node_id { - &mut chan_msgs.latest_node_announce_a - } else { - &mut chan_msgs.latest_node_announce_b - }; - if latest_announce.is_none() || - latest_announce.as_ref().unwrap().timestamp() < msg.timestamp - { - *latest_announce = Some( - if let Some(msg) = full_msg { NodeAnnouncement::Full(msg.clone()) } - else { NodeAnnouncement::Unsigned(msg.clone()) }); - } - found_at_least_one_chan = true; - true + e.get_mut().retain(|node_msgs| match Weak::upgrade(&node_msgs) { + Some(chan_mtx) => { + let mut chan_msgs = chan_mtx.lock().unwrap(); + if let Some(chan_announce) = &chan_msgs.channel_announce { + let latest_announce = if *chan_announce.node_id_1() == msg.node_id { + &mut chan_msgs.latest_node_announce_a } else { - debug_assert!(false, "channel_announce is set before struct is added to node map"); - false + &mut chan_msgs.latest_node_announce_b + }; + if latest_announce.is_none() + || latest_announce.as_ref().unwrap().timestamp() < msg.timestamp + { + *latest_announce = Some(if let Some(msg) = full_msg { + NodeAnnouncement::Full(msg.clone()) + } else { + NodeAnnouncement::Unsigned(msg.clone()) + }); } - }, - None => false, - } + found_at_least_one_chan = true; + true + } else { + debug_assert!( + false, + "channel_announce is set before struct is added to node map" + ); + false + } + }, + None => false, }); - if e.get().is_empty() { e.remove(); } + if e.get().is_empty() { + e.remove(); + } if found_at_least_one_chan { return Err(LightningError { - err: "Awaiting channel_announcement validation to accept node_announcement".to_owned(), + err: "Awaiting channel_announcement validation to accept node_announcement" + .to_owned(), action: ErrorAction::IgnoreAndLog(Level::Gossip), }); } @@ -272,10 +282,10 @@ impl PendingChecks { Ok(()) } - #[rustfmt::skip] - fn check_replace_previous_entry(msg: &msgs::UnsignedChannelAnnouncement, - full_msg: Option<&msgs::ChannelAnnouncement>, replacement: Option>>, - pending_channels: &mut HashMap>> + fn check_replace_previous_entry( + msg: &msgs::UnsignedChannelAnnouncement, full_msg: Option<&msgs::ChannelAnnouncement>, + replacement: Option>>, + pending_channels: &mut HashMap>>, ) -> Result<(), msgs::LightningError> { match pending_channels.entry(msg.short_channel_id) { hash_map::Entry::Occupied(mut e) => { @@ -287,8 +297,13 @@ impl PendingChecks { // This may be called with the mutex held on a different UtxoMessages // struct, however in that case we have a global lockorder of new messages // -> old messages, which makes this safe. - let pending_matches = match &pending_msgs.unsafe_well_ordered_double_lock_self().channel_announce { - Some(ChannelAnnouncement::Full(pending_msg)) => Some(pending_msg) == full_msg, + let pending_matches = match &pending_msgs + .unsafe_well_ordered_double_lock_self() + .channel_announce + { + Some(ChannelAnnouncement::Full(pending_msg)) => { + Some(pending_msg) == full_msg + }, Some(ChannelAnnouncement::Unsigned(pending_msg)) => pending_msg == msg, None => { // This shouldn't actually be reachable. We set the @@ -320,54 +335,66 @@ impl PendingChecks { // so just remove/replace it and move on. if let Some(item) = replacement { *e.get_mut() = item; - } else { e.remove(); } + } else { + e.remove(); + } }, } }, hash_map::Entry::Vacant(v) => { - if let Some(item) = replacement { v.insert(item); } + if let Some(item) = replacement { + v.insert(item); + } }, } Ok(()) } - #[rustfmt::skip] - pub(super) fn check_channel_announcement(&self, - utxo_lookup: &Option, msg: &msgs::UnsignedChannelAnnouncement, - full_msg: Option<&msgs::ChannelAnnouncement> - ) -> Result, msgs::LightningError> where U::Target: UtxoLookup { - let handle_result = |res| { - match res { - Ok(TxOut { value, script_pubkey }) => { - let expected_script = - make_funding_redeemscript_from_slices(msg.bitcoin_key_1.as_array(), msg.bitcoin_key_2.as_array()).to_p2wsh(); - if script_pubkey != expected_script { - return Err(LightningError{ - err: format!("Channel announcement key ({}) didn't match on-chain script ({})", - expected_script.to_hex_string(), script_pubkey.to_hex_string()), - action: ErrorAction::IgnoreError - }); - } - Ok(Some(value)) - }, - Err(UtxoLookupError::UnknownChain) => { - Err(LightningError { - err: format!("Channel announced on an unknown chain ({})", - msg.chain_hash.to_bytes().as_hex()), - action: ErrorAction::IgnoreError - }) - }, - Err(UtxoLookupError::UnknownTx) => { - Err(LightningError { - err: "Channel announced without corresponding UTXO entry".to_owned(), - action: ErrorAction::IgnoreError - }) - }, - } + pub(super) fn check_channel_announcement( + &self, utxo_lookup: &Option, msg: &msgs::UnsignedChannelAnnouncement, + full_msg: Option<&msgs::ChannelAnnouncement>, + ) -> Result, msgs::LightningError> + where + U::Target: UtxoLookup, + { + let handle_result = |res| match res { + Ok(TxOut { value, script_pubkey }) => { + let expected_script = make_funding_redeemscript_from_slices( + msg.bitcoin_key_1.as_array(), + msg.bitcoin_key_2.as_array(), + ) + .to_p2wsh(); + if script_pubkey != expected_script { + return Err(LightningError { + err: format!( + "Channel announcement key ({}) didn't match on-chain script ({})", + expected_script.to_hex_string(), + script_pubkey.to_hex_string() + ), + action: ErrorAction::IgnoreError, + }); + } + Ok(Some(value)) + }, + Err(UtxoLookupError::UnknownChain) => Err(LightningError { + err: format!( + "Channel announced on an unknown chain ({})", + msg.chain_hash.to_bytes().as_hex() + ), + action: ErrorAction::IgnoreError, + }), + Err(UtxoLookupError::UnknownTx) => Err(LightningError { + err: "Channel announced without corresponding UTXO entry".to_owned(), + action: ErrorAction::IgnoreError, + }), }; - Self::check_replace_previous_entry(msg, full_msg, None, - &mut self.internal.lock().unwrap().channels)?; + Self::check_replace_previous_entry( + msg, + full_msg, + None, + &mut self.internal.lock().unwrap().channels, + )?; match utxo_lookup { &None => { @@ -386,15 +413,27 @@ impl PendingChecks { // handle the result in-line. handle_result(res) } else { - Self::check_replace_previous_entry(msg, full_msg, - Some(Arc::downgrade(&future.state)), &mut pending_checks.channels)?; - async_messages.channel_announce = Some( - if let Some(msg) = full_msg { ChannelAnnouncement::Full(msg.clone()) } - else { ChannelAnnouncement::Unsigned(msg.clone()) }); - pending_checks.nodes.entry(msg.node_id_1) - .or_default().push(Arc::downgrade(&future.state)); - pending_checks.nodes.entry(msg.node_id_2) - .or_default().push(Arc::downgrade(&future.state)); + Self::check_replace_previous_entry( + msg, + full_msg, + Some(Arc::downgrade(&future.state)), + &mut pending_checks.channels, + )?; + async_messages.channel_announce = Some(if let Some(msg) = full_msg { + ChannelAnnouncement::Full(msg.clone()) + } else { + ChannelAnnouncement::Unsigned(msg.clone()) + }); + pending_checks + .nodes + .entry(msg.node_id_1) + .or_default() + .push(Arc::downgrade(&future.state)); + pending_checks + .nodes + .entry(msg.node_id_2) + .or_default() + .push(Arc::downgrade(&future.state)); Err(LightningError { err: "Channel being checked async".to_owned(), action: ErrorAction::IgnoreAndLog(Level::Gossip), @@ -402,7 +441,7 @@ impl PendingChecks { } }, } - } + }, } } @@ -419,16 +458,13 @@ impl PendingChecks { /// Returns true if there are a large number of async checks pending and future /// `channel_announcement` messages should be delayed. Note that this is only a hint and /// messages already in-flight may still have to be handled for various reasons. - #[rustfmt::skip] pub(super) fn too_many_checks_pending(&self) -> bool { let mut pending_checks = self.internal.lock().unwrap(); if pending_checks.channels.len() > Self::MAX_PENDING_LOOKUPS { // If we have many channel checks pending, ensure we don't have any dangling checks // (i.e. checks where the user told us they'd call back but drop'd the `UtxoFuture` // instead) before we commit to applying backpressure. - pending_checks.channels.retain(|_, chan| { - Weak::upgrade(&chan).is_some() - }); + pending_checks.channels.retain(|_, chan| Weak::upgrade(&chan).is_some()); pending_checks.nodes.retain(|_, channels| { channels.retain(|chan| Weak::upgrade(&chan).is_some()); !channels.is_empty() @@ -595,11 +631,17 @@ mod tests { (chain_source, network_graph) } - #[rustfmt::skip] - fn get_test_objects() -> (msgs::ChannelAnnouncement, TestChainSource, - NetworkGraph>, bitcoin::ScriptBuf, msgs::NodeAnnouncement, - msgs::NodeAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, msgs::ChannelUpdate) - { + fn get_test_objects() -> ( + msgs::ChannelAnnouncement, + TestChainSource, + NetworkGraph>, + bitcoin::ScriptBuf, + msgs::NodeAnnouncement, + msgs::NodeAnnouncement, + msgs::ChannelUpdate, + msgs::ChannelUpdate, + msgs::ChannelUpdate, + ) { let secp_ctx = Secp256k1::new(); let (chain_source, network_graph) = get_network(); @@ -607,27 +649,39 @@ mod tests { let good_script = get_channel_script(&secp_ctx); let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap(); let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); - let valid_announcement = get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx); + let valid_announcement = + get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx); let node_a_announce = get_signed_node_announcement(|_| {}, node_1_privkey, &secp_ctx); let node_b_announce = get_signed_node_announcement(|_| {}, node_2_privkey, &secp_ctx); - // Note that we have to set the "direction" flag correctly on both messages - let chan_update_a = get_signed_channel_update(|msg| msg.channel_flags = 0, node_1_privkey, &secp_ctx); - let chan_update_b = get_signed_channel_update(|msg| msg.channel_flags = 1, node_2_privkey, &secp_ctx); - let chan_update_c = get_signed_channel_update(|msg| { - msg.channel_flags = 1; msg.timestamp += 1; }, node_2_privkey, &secp_ctx); - - (valid_announcement, chain_source, network_graph, good_script, node_a_announce, - node_b_announce, chan_update_a, chan_update_b, chan_update_c) + ( + valid_announcement, + chain_source, + network_graph, + good_script, + node_a_announce, + node_b_announce, + get_signed_channel_update(|msg| msg.channel_flags = 0, node_1_privkey, &secp_ctx), + get_signed_channel_update(|msg| msg.channel_flags = 1, node_2_privkey, &secp_ctx), + // Note that we have to set the "direction" flag correctly on both messages + get_signed_channel_update( + |msg| { + msg.channel_flags = 1; + msg.timestamp += 1; + }, + node_2_privkey, + &secp_ctx, + ), + ) } #[test] - #[rustfmt::skip] fn test_fast_async_lookup() { // Check that async lookups which resolve quicker than the future is returned to the // `get_utxo` call can read it still resolve properly. let (valid_announcement, chain_source, network_graph, good_script, ..) = get_test_objects(); + let scid = valid_announcement.contents.short_channel_id; let notifier = Arc::new(Notifier::new()); let future = UtxoFuture::new(Arc::clone(¬ifier)); @@ -637,113 +691,158 @@ mod tests { network_graph.pending_checks.check_resolved_futures(&network_graph); *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(future.clone()); - network_graph.update_channel_from_announcement(&valid_announcement, &Some(&chain_source)).unwrap(); - assert!(network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id).is_some()); + network_graph + .update_channel_from_announcement(&valid_announcement, &Some(&chain_source)) + .unwrap(); + assert!(network_graph.read_only().channels().get(&scid).is_some()); } #[test] - #[rustfmt::skip] fn test_async_lookup() { // Test a simple async lookup - let (valid_announcement, chain_source, network_graph, good_script, - node_a_announce, node_b_announce, ..) = get_test_objects(); + let ( + valid_announcement, + chain_source, + network_graph, + good_script, + node_a_announce, + node_b_announce, + .., + ) = get_test_objects(); + let scid = valid_announcement.contents.short_channel_id; + let node_id_1 = valid_announcement.contents.node_id_1; let notifier = Arc::new(Notifier::new()); let future = UtxoFuture::new(Arc::clone(¬ifier)); *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(future.clone()); assert_eq!( - network_graph.update_channel_from_announcement(&valid_announcement, &Some(&chain_source)).unwrap_err().err, - "Channel being checked async"); - assert!(network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id).is_none()); + network_graph + .update_channel_from_announcement(&valid_announcement, &Some(&chain_source)) + .unwrap_err() + .err, + "Channel being checked async" + ); + assert!(network_graph.read_only().channels().get(&scid).is_none()); future.resolve(Ok(TxOut { value: Amount::ZERO, script_pubkey: good_script })); assert!(notifier.notify_pending()); network_graph.pending_checks.check_resolved_futures(&network_graph); - network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id).unwrap(); - network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id).unwrap(); + network_graph.read_only().channels().get(&scid).unwrap(); + network_graph.read_only().channels().get(&scid).unwrap(); - assert!(network_graph.read_only().nodes().get(&valid_announcement.contents.node_id_1) - .unwrap().announcement_info.is_none()); + #[rustfmt::skip] + let is_node_a_announced = network_graph.read_only().nodes().get(&node_id_1).unwrap() + .announcement_info.is_some(); + assert!(!is_node_a_announced); network_graph.update_node_from_announcement(&node_a_announce).unwrap(); network_graph.update_node_from_announcement(&node_b_announce).unwrap(); - assert!(network_graph.read_only().nodes().get(&valid_announcement.contents.node_id_1) - .unwrap().announcement_info.is_some()); + #[rustfmt::skip] + let is_node_a_announced = network_graph.read_only().nodes().get(&node_id_1).unwrap() + .announcement_info.is_some(); + assert!(is_node_a_announced); } #[test] - #[rustfmt::skip] fn test_invalid_async_lookup() { // Test an async lookup which returns an incorrect script let (valid_announcement, chain_source, network_graph, ..) = get_test_objects(); + let scid = valid_announcement.contents.short_channel_id; let notifier = Arc::new(Notifier::new()); let future = UtxoFuture::new(Arc::clone(¬ifier)); *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(future.clone()); assert_eq!( - network_graph.update_channel_from_announcement(&valid_announcement, &Some(&chain_source)).unwrap_err().err, - "Channel being checked async"); - assert!(network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id).is_none()); + network_graph + .update_channel_from_announcement(&valid_announcement, &Some(&chain_source)) + .unwrap_err() + .err, + "Channel being checked async" + ); + assert!(network_graph.read_only().channels().get(&scid).is_none()); let value = Amount::from_sat(1_000_000); future.resolve(Ok(TxOut { value, script_pubkey: bitcoin::ScriptBuf::new() })); assert!(notifier.notify_pending()); network_graph.pending_checks.check_resolved_futures(&network_graph); - assert!(network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id).is_none()); + assert!(network_graph.read_only().channels().get(&scid).is_none()); } #[test] - #[rustfmt::skip] fn test_failing_async_lookup() { // Test an async lookup which returns an error let (valid_announcement, chain_source, network_graph, ..) = get_test_objects(); + let scid = valid_announcement.contents.short_channel_id; let notifier = Arc::new(Notifier::new()); let future = UtxoFuture::new(Arc::clone(¬ifier)); *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(future.clone()); assert_eq!( - network_graph.update_channel_from_announcement(&valid_announcement, &Some(&chain_source)).unwrap_err().err, - "Channel being checked async"); - assert!(network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id).is_none()); + network_graph + .update_channel_from_announcement(&valid_announcement, &Some(&chain_source)) + .unwrap_err() + .err, + "Channel being checked async" + ); + assert!(network_graph.read_only().channels().get(&scid).is_none()); future.resolve(Err(UtxoLookupError::UnknownTx)); assert!(notifier.notify_pending()); network_graph.pending_checks.check_resolved_futures(&network_graph); - assert!(network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id).is_none()); + assert!(network_graph.read_only().channels().get(&scid).is_none()); } #[test] - #[rustfmt::skip] fn test_updates_async_lookup() { // Test async lookups will process pending channel_update/node_announcements once they // complete. - let (valid_announcement, chain_source, network_graph, good_script, node_a_announce, - node_b_announce, chan_update_a, chan_update_b, ..) = get_test_objects(); + let ( + valid_announcement, + chain_source, + network_graph, + good_script, + node_a_announce, + node_b_announce, + chan_update_a, + chan_update_b, + .., + ) = get_test_objects(); + let scid = valid_announcement.contents.short_channel_id; let notifier = Arc::new(Notifier::new()); let future = UtxoFuture::new(Arc::clone(¬ifier)); *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(future.clone()); assert_eq!( - network_graph.update_channel_from_announcement(&valid_announcement, &Some(&chain_source)).unwrap_err().err, - "Channel being checked async"); - assert!(network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id).is_none()); + network_graph + .update_channel_from_announcement(&valid_announcement, &Some(&chain_source)) + .unwrap_err() + .err, + "Channel being checked async" + ); + assert!(network_graph.read_only().channels().get(&scid).is_none()); assert_eq!( network_graph.update_node_from_announcement(&node_a_announce).unwrap_err().err, - "Awaiting channel_announcement validation to accept node_announcement"); + "Awaiting channel_announcement validation to accept node_announcement" + ); assert_eq!( network_graph.update_node_from_announcement(&node_b_announce).unwrap_err().err, - "Awaiting channel_announcement validation to accept node_announcement"); + "Awaiting channel_announcement validation to accept node_announcement" + ); - assert_eq!(network_graph.update_channel(&chan_update_a).unwrap_err().err, - "Awaiting channel_announcement validation to accept channel_update"); - assert_eq!(network_graph.update_channel(&chan_update_b).unwrap_err().err, - "Awaiting channel_announcement validation to accept channel_update"); + assert_eq!( + network_graph.update_channel(&chan_update_a).unwrap_err().err, + "Awaiting channel_announcement validation to accept channel_update" + ); + assert_eq!( + network_graph.update_channel(&chan_update_b).unwrap_err().err, + "Awaiting channel_announcement validation to accept channel_update" + ); assert!(!notifier.notify_pending()); future @@ -751,70 +850,104 @@ mod tests { assert!(notifier.notify_pending()); network_graph.pending_checks.check_resolved_futures(&network_graph); - assert!(network_graph.read_only().channels() - .get(&valid_announcement.contents.short_channel_id).unwrap().one_to_two.is_some()); - assert!(network_graph.read_only().channels() - .get(&valid_announcement.contents.short_channel_id).unwrap().two_to_one.is_some()); - - assert!(network_graph.read_only().nodes().get(&valid_announcement.contents.node_id_1) - .unwrap().announcement_info.is_some()); - assert!(network_graph.read_only().nodes().get(&valid_announcement.contents.node_id_2) - .unwrap().announcement_info.is_some()); + assert!(network_graph.read_only().channels().get(&scid).unwrap().one_to_two.is_some()); + assert!(network_graph.read_only().channels().get(&scid).unwrap().two_to_one.is_some()); + + assert!(network_graph + .read_only() + .nodes() + .get(&valid_announcement.contents.node_id_1) + .unwrap() + .announcement_info + .is_some()); + assert!(network_graph + .read_only() + .nodes() + .get(&valid_announcement.contents.node_id_2) + .unwrap() + .announcement_info + .is_some()); } #[test] - #[rustfmt::skip] fn test_latest_update_async_lookup() { // Test async lookups will process the latest channel_update if two are received while // awaiting an async UTXO lookup. - let (valid_announcement, chain_source, network_graph, good_script, _, - _, chan_update_a, chan_update_b, chan_update_c, ..) = get_test_objects(); + let ( + valid_announcement, + chain_source, + network_graph, + good_script, + _, + _, + chan_update_a, + chan_update_b, + chan_update_c, + .., + ) = get_test_objects(); + let scid = valid_announcement.contents.short_channel_id; let notifier = Arc::new(Notifier::new()); let future = UtxoFuture::new(Arc::clone(¬ifier)); *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(future.clone()); assert_eq!( - network_graph.update_channel_from_announcement(&valid_announcement, &Some(&chain_source)).unwrap_err().err, - "Channel being checked async"); - assert!(network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id).is_none()); + network_graph + .update_channel_from_announcement(&valid_announcement, &Some(&chain_source)) + .unwrap_err() + .err, + "Channel being checked async" + ); + assert!(network_graph.read_only().channels().get(&scid).is_none()); - assert_eq!(network_graph.update_channel(&chan_update_a).unwrap_err().err, - "Awaiting channel_announcement validation to accept channel_update"); - assert_eq!(network_graph.update_channel(&chan_update_b).unwrap_err().err, - "Awaiting channel_announcement validation to accept channel_update"); - assert_eq!(network_graph.update_channel(&chan_update_c).unwrap_err().err, - "Awaiting channel_announcement validation to accept channel_update"); + assert_eq!( + network_graph.update_channel(&chan_update_a).unwrap_err().err, + "Awaiting channel_announcement validation to accept channel_update" + ); + assert_eq!( + network_graph.update_channel(&chan_update_b).unwrap_err().err, + "Awaiting channel_announcement validation to accept channel_update" + ); + assert_eq!( + network_graph.update_channel(&chan_update_c).unwrap_err().err, + "Awaiting channel_announcement validation to accept channel_update" + ); assert!(!notifier.notify_pending()); - future.resolve(Ok(TxOut { value: Amount::from_sat(1_000_000), script_pubkey: good_script })); + future + .resolve(Ok(TxOut { value: Amount::from_sat(1_000_000), script_pubkey: good_script })); assert!(notifier.notify_pending()); network_graph.pending_checks.check_resolved_futures(&network_graph); assert_eq!(chan_update_a.contents.timestamp, chan_update_b.contents.timestamp); let graph_lock = network_graph.read_only(); - assert!(graph_lock.channels() - .get(&valid_announcement.contents.short_channel_id).as_ref().unwrap() - .one_to_two.as_ref().unwrap().last_update != - graph_lock.channels() - .get(&valid_announcement.contents.short_channel_id).as_ref().unwrap() - .two_to_one.as_ref().unwrap().last_update); + #[rustfmt::skip] + let one_to_two_update = + graph_lock.channels().get(&scid).as_ref().unwrap().one_to_two.as_ref().unwrap().last_update; + #[rustfmt::skip] + let two_to_one_update = + graph_lock.channels().get(&scid).as_ref().unwrap().two_to_one.as_ref().unwrap().last_update; + assert!(one_to_two_update != two_to_one_update); } #[test] - #[rustfmt::skip] fn test_no_double_lookups() { // Test that a pending async lookup will prevent a second async lookup from flying, but // only if the channel_announcement message is identical. let (valid_announcement, chain_source, network_graph, good_script, ..) = get_test_objects(); + let scid = valid_announcement.contents.short_channel_id; let notifier_a = Arc::new(Notifier::new()); let future = UtxoFuture::new(Arc::clone(¬ifier_a)); *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(future.clone()); assert_eq!( - network_graph.update_channel_from_announcement(&valid_announcement, &Some(&chain_source)).unwrap_err().err, - "Channel being checked async"); + network_graph + .update_channel_from_announcement(&valid_announcement, &Some(&chain_source)) + .unwrap_err() + .err, + "Channel being checked async" + ); assert_eq!(chain_source.get_utxo_call_count.load(Ordering::Relaxed), 1); // If we make a second request with the same message, the call count doesn't increase... @@ -822,8 +955,12 @@ mod tests { let future_b = UtxoFuture::new(Arc::clone(¬ifier_b)); *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(future_b.clone()); assert_eq!( - network_graph.update_channel_from_announcement(&valid_announcement, &Some(&chain_source)).unwrap_err().err, - "Channel announcement is already being checked"); + network_graph + .update_channel_from_announcement(&valid_announcement, &Some(&chain_source)) + .unwrap_err() + .err, + "Channel announcement is already being checked" + ); assert_eq!(chain_source.get_utxo_call_count.load(Ordering::Relaxed), 1); // But if we make a third request with a tweaked message, we should get a second call @@ -831,10 +968,15 @@ mod tests { let secp_ctx = Secp256k1::new(); let replacement_pk_1 = &SecretKey::from_slice(&[99; 32]).unwrap(); let replacement_pk_2 = &SecretKey::from_slice(&[98; 32]).unwrap(); - let invalid_announcement = get_signed_channel_announcement(|_| {}, replacement_pk_1, replacement_pk_2, &secp_ctx); + let invalid_announcement = + get_signed_channel_announcement(|_| {}, replacement_pk_1, replacement_pk_2, &secp_ctx); assert_eq!( - network_graph.update_channel_from_announcement(&invalid_announcement, &Some(&chain_source)).unwrap_err().err, - "Channel being checked async"); + network_graph + .update_channel_from_announcement(&invalid_announcement, &Some(&chain_source)) + .unwrap_err() + .err, + "Channel being checked async" + ); assert_eq!(chain_source.get_utxo_call_count.load(Ordering::Relaxed), 2); // Still, if we resolve the original future, the original channel will be accepted. @@ -843,14 +985,14 @@ mod tests { assert!(notifier_a.notify_pending()); assert!(!notifier_b.notify_pending()); network_graph.pending_checks.check_resolved_futures(&network_graph); - assert!(!network_graph.read_only().channels() - .get(&valid_announcement.contents.short_channel_id).unwrap() - .announcement_message.as_ref().unwrap() - .contents.features.supports_unknown_test_feature()); + #[rustfmt::skip] + let is_test_feature_set = + network_graph.read_only().channels().get(&scid).unwrap().announcement_message + .as_ref().unwrap().contents.features.supports_unknown_test_feature(); + assert!(!is_test_feature_set); } #[test] - #[rustfmt::skip] fn test_checks_backpressure() { // Test that too_many_checks_pending returns true when there are many checks pending, and // returns false once they complete. @@ -867,14 +1009,22 @@ mod tests { for i in 0..PendingChecks::MAX_PENDING_LOOKUPS { let valid_announcement = get_signed_channel_announcement( - |msg| msg.short_channel_id += 1 + i as u64, node_1_privkey, node_2_privkey, &secp_ctx); - network_graph.update_channel_from_announcement(&valid_announcement, &Some(&chain_source)).unwrap_err(); + |msg| msg.short_channel_id += 1 + i as u64, + node_1_privkey, + node_2_privkey, + &secp_ctx, + ); + network_graph + .update_channel_from_announcement(&valid_announcement, &Some(&chain_source)) + .unwrap_err(); assert!(!network_graph.pending_checks.too_many_checks_pending()); } - let valid_announcement = get_signed_channel_announcement( - |_| {}, node_1_privkey, node_2_privkey, &secp_ctx); - network_graph.update_channel_from_announcement(&valid_announcement, &Some(&chain_source)).unwrap_err(); + let valid_announcement = + get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx); + network_graph + .update_channel_from_announcement(&valid_announcement, &Some(&chain_source)) + .unwrap_err(); assert!(network_graph.pending_checks.too_many_checks_pending()); // Once the future completes the "too many checks" flag should reset. @@ -885,7 +1035,6 @@ mod tests { } #[test] - #[rustfmt::skip] fn test_checks_backpressure_drop() { // Test that too_many_checks_pending returns true when there are many checks pending, and // returns false if we drop some of the futures without completion. @@ -901,14 +1050,22 @@ mod tests { for i in 0..PendingChecks::MAX_PENDING_LOOKUPS { let valid_announcement = get_signed_channel_announcement( - |msg| msg.short_channel_id += 1 + i as u64, node_1_privkey, node_2_privkey, &secp_ctx); - network_graph.update_channel_from_announcement(&valid_announcement, &Some(&chain_source)).unwrap_err(); + |msg| msg.short_channel_id += 1 + i as u64, + node_1_privkey, + node_2_privkey, + &secp_ctx, + ); + network_graph + .update_channel_from_announcement(&valid_announcement, &Some(&chain_source)) + .unwrap_err(); assert!(!network_graph.pending_checks.too_many_checks_pending()); } - let valid_announcement = get_signed_channel_announcement( - |_| {}, node_1_privkey, node_2_privkey, &secp_ctx); - network_graph.update_channel_from_announcement(&valid_announcement, &Some(&chain_source)).unwrap_err(); + let valid_announcement = + get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx); + network_graph + .update_channel_from_announcement(&valid_announcement, &Some(&chain_source)) + .unwrap_err(); assert!(network_graph.pending_checks.too_many_checks_pending()); // Once the future is drop'd (by resetting the `utxo_ret` value) the "too many checks" flag From 114f6b56864304cfeadc6f54fdc5abc2e8768d46 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 28 Jan 2026 13:42:43 +0100 Subject: [PATCH 132/242] Fix race condition in async `UtxoFuture` resolution Previously, we refactored the `GossipVerifier` to not require holding a circular reference. As part of this, we moved to a model where the `UtxoFuture`s are now polled by the background processor which checks for completion through `get_and_clear_pending_msg_events`. However, as part of this refactor we introduced race-condition: as we only held `Weak` references in `PendingChecksContext` and the `UtxoFuture` was directly dropped by the `GossipVerifier` after calling `resolve`, the actual data was dropped with the future and gone when the background processor attempted to retrieve and apply it via `check_resolved_futures`. Here, we fix this issue by simply holding on to the `state` `Arc`s in a separate `pending_states` `Vec` that is only pruned in `check_resolved_futures`, ensuring any completed results are collected first. Signed-off-by: Elias Rohrer --- lightning/src/routing/utxo.rs | 37 ++++++++++++++++++++++++++++++++++- 1 file changed, 36 insertions(+), 1 deletion(-) diff --git a/lightning/src/routing/utxo.rs b/lightning/src/routing/utxo.rs index d55902c5984..ab653b1ea74 100644 --- a/lightning/src/routing/utxo.rs +++ b/lightning/src/routing/utxo.rs @@ -166,6 +166,7 @@ impl UtxoFuture { } struct PendingChecksContext { + pending_states: Vec>>, channels: HashMap>>, nodes: HashMap>>>, } @@ -180,6 +181,7 @@ impl PendingChecks { pub(super) fn new() -> Self { PendingChecks { internal: Mutex::new(PendingChecksContext { + pending_states: Vec::new(), channels: new_hash_map(), nodes: new_hash_map(), }), @@ -413,6 +415,20 @@ impl PendingChecks { // handle the result in-line. handle_result(res) } else { + // To avoid cases where we drop the resolved data before it can be + // collected by `check_resolved_futures`, we here track all pending + // states at least until the next call of `check_resolved_futures`. + let pending_states = &mut pending_checks.pending_states; + if pending_states + .iter() + .find(|s| Arc::ptr_eq(s, &future.state)) + .is_none() + { + // We're not already tracking the future state, keep the `Arc` + // around. + pending_states.push(Arc::clone(&future.state)); + } + Self::check_replace_previous_entry( msg, full_msg, @@ -574,6 +590,21 @@ impl PendingChecks { let mut completed_states = Vec::new(); { let mut lck = self.internal.lock().unwrap(); + lck.pending_states.retain(|state| { + if state.lock().unwrap().complete.is_some() { + // We're done, collect the result and clean up. + completed_states.push(Arc::clone(&state)); + false + } else { + if Arc::strong_count(state) == 1 { + // The future has been dropped. + false + } else { + // It's still inflight. + true + } + } + }); lck.channels.retain(|_, state| { if let Some(state) = state.upgrade() { if state.lock().unwrap().complete.is_some() { @@ -1069,8 +1100,12 @@ mod tests { assert!(network_graph.pending_checks.too_many_checks_pending()); // Once the future is drop'd (by resetting the `utxo_ret` value) the "too many checks" flag - // should reset to false. + // should not yet reset to false. *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Sync(Err(UtxoLookupError::UnknownTx)); + assert!(network_graph.pending_checks.too_many_checks_pending()); + + // .. but it should once we called check_resolved_futures clearing the `pending_states`. + network_graph.pending_checks.check_resolved_futures(&network_graph); assert!(!network_graph.pending_checks.too_many_checks_pending()); } } From 83b2d3ec70c36938209566ee7086674b531c9a7b Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Thu, 22 Jan 2026 09:49:53 -0800 Subject: [PATCH 133/242] Rework ChannelManager::funding_transaction_signed Previously, we'd emit a FundingTransactionReadyForSigning event once the initial commitment_signed is exchanged for a splicing/dual-funding attempt and require users to call back with their signed inputs using ChannelManager::funding_transaction_signed. While this approach worked in practice, it prevents us from abandoning a splice if we cannot or no longer wish to sign as the splice has already been committed to by this point. This commit reworks the API such that this is now possible. After exchanging tx_complete, we will no longer immediately send our initial commitment_signed. We will now emit the FundingTransactionReadyForSigning event and wait for the user to call back before releasing both our initial commitment_signed and our tx_signatures. As a result, the event is now persisted, as there is only one possible path in which it is generated. Note that we continue to only emit the event if a local contribution to negotiated transaction was made. Future work will expose a cancellation API such that we can abandon splice attempts safely (we can just force close the channel with dual-funding). --- lightning/src/ln/channel.rs | 207 +++++++++------- lightning/src/ln/channelmanager.rs | 242 ++++++++++--------- lightning/src/ln/functional_test_utils.rs | 2 +- lightning/src/ln/interactivetxs.rs | 5 +- lightning/src/ln/splicing_tests.rs | 278 +++++++++------------- pending_changelog/4336.txt | 5 + 6 files changed, 365 insertions(+), 374 deletions(-) create mode 100644 pending_changelog/4336.txt diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index fd780da8d91..23edf616dae 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1904,10 +1904,7 @@ where pub fn tx_complete( &mut self, msg: &msgs::TxComplete, logger: &L, - ) -> Result< - (Option, Option), - (ChannelError, Option), - > + ) -> Result)> where L::Target: Logger, { @@ -1934,13 +1931,38 @@ where let funding_outpoint = if let Some(funding_outpoint) = negotiation_complete { funding_outpoint } else { - return Ok((interactive_tx_msg_send, None)); + return Ok(TxCompleteResult { + interactive_tx_msg_send, + event_unsigned_tx: None, + funding_tx_signed: None, + }); }; - let commitment_signed = self - .funding_tx_constructed(funding_outpoint, logger) + self.funding_tx_constructed(funding_outpoint) .map_err(|abort_reason| self.fail_interactive_tx_negotiation(abort_reason, logger))?; - Ok((interactive_tx_msg_send, Some(commitment_signed))) + + let signing_session = self + .context() + .interactive_tx_signing_session + .as_ref() + .expect("The signing session must have been initialized in funding_tx_constructed"); + let has_local_contribution = signing_session.has_local_contribution(); + + let event_unsigned_tx = + has_local_contribution.then(|| signing_session.unsigned_tx().tx().clone()); + + let funding_tx_signed = if !has_local_contribution { + let funding_txid = signing_session.unsigned_tx().tx().compute_txid(); + if let ChannelPhase::Funded(chan) = &mut self.phase { + chan.funding_transaction_signed(funding_txid, vec![], 0, logger).ok() + } else { + None + } + } else { + None + }; + + Ok(TxCompleteResult { interactive_tx_msg_send, event_unsigned_tx, funding_tx_signed }) } pub fn tx_abort( @@ -2046,14 +2068,8 @@ where result.map(|monitor| (self.as_funded_mut().expect("Channel should be funded"), monitor)) } - fn funding_tx_constructed( - &mut self, funding_outpoint: OutPoint, logger: &L, - ) -> Result - where - L::Target: Logger, - { - let logger = WithChannelContext::from(logger, self.context(), None); - let (interactive_tx_constructor, commitment_signed) = match &mut self.phase { + fn funding_tx_constructed(&mut self, funding_outpoint: OutPoint) -> Result<(), AbortReason> { + let interactive_tx_constructor = match &mut self.phase { ChannelPhase::UnfundedV2(chan) => { debug_assert_eq!( chan.context.channel_state, @@ -2072,77 +2088,31 @@ where chan.funding.channel_transaction_parameters.funding_outpoint = Some(funding_outpoint); - let interactive_tx_constructor = chan - .interactive_tx_constructor + chan.interactive_tx_constructor .take() - .expect("PendingV2Channel::interactive_tx_constructor should be set"); - - let commitment_signed = - chan.context.get_initial_commitment_signed_v2(&chan.funding, &&logger); - let commitment_signed = match commitment_signed { - Some(commitment_signed) => commitment_signed, - // TODO(dual_funding): Support async signing - None => { - return Err(AbortReason::InternalError( - "Failed to compute commitment_signed signatures", - )); - }, - }; - - (interactive_tx_constructor, commitment_signed) + .expect("PendingV2Channel::interactive_tx_constructor should be set") }, ChannelPhase::Funded(chan) => { if let Some(pending_splice) = chan.pending_splice.as_mut() { - pending_splice - .funding_negotiation - .take() - .and_then(|funding_negotiation| { - if let FundingNegotiation::ConstructingTransaction { - funding, - interactive_tx_constructor, - } = funding_negotiation - { - let is_initiator = interactive_tx_constructor.is_initiator(); - Some((is_initiator, funding, interactive_tx_constructor)) - } else { - // Replace the taken state for later error handling - pending_splice.funding_negotiation = Some(funding_negotiation); - None - } - }) - .ok_or_else(|| { - AbortReason::InternalError( - "Got a tx_complete message in an invalid state", - ) - }) - .and_then(|(is_initiator, mut funding, interactive_tx_constructor)| { - funding.channel_transaction_parameters.funding_outpoint = - Some(funding_outpoint); - match chan.context.get_initial_commitment_signed_v2(&funding, &&logger) - { - Some(commitment_signed) => { - // Advance the state - pending_splice.funding_negotiation = - Some(FundingNegotiation::AwaitingSignatures { - is_initiator, - funding, - }); - Ok((interactive_tx_constructor, commitment_signed)) - }, - // TODO(splicing): Support async signing - None => { - // Restore the taken state for later error handling - pending_splice.funding_negotiation = - Some(FundingNegotiation::ConstructingTransaction { - funding, - interactive_tx_constructor, - }); - Err(AbortReason::InternalError( - "Failed to compute commitment_signed signatures", - )) - }, - } - })? + let funding_negotiation = pending_splice.funding_negotiation.take(); + if let Some(FundingNegotiation::ConstructingTransaction { + mut funding, + interactive_tx_constructor, + }) = funding_negotiation + { + let is_initiator = interactive_tx_constructor.is_initiator(); + funding.channel_transaction_parameters.funding_outpoint = + Some(funding_outpoint); + pending_splice.funding_negotiation = + Some(FundingNegotiation::AwaitingSignatures { is_initiator, funding }); + interactive_tx_constructor + } else { + // Replace the taken state for later error handling + pending_splice.funding_negotiation = funding_negotiation; + return Err(AbortReason::InternalError( + "Got a tx_complete message in an invalid state", + )); + } } else { return Err(AbortReason::InternalError( "Got a tx_complete message in an invalid state", @@ -2159,7 +2129,7 @@ where let signing_session = interactive_tx_constructor.into_signing_session(); self.context_mut().interactive_tx_signing_session = Some(signing_session); - Ok(commitment_signed) + Ok(()) } pub fn force_shutdown(&mut self, closure_reason: ClosureReason) -> ShutdownResult { @@ -2911,6 +2881,7 @@ where /// send it first. resend_order: RAACommitmentOrder, + monitor_pending_tx_signatures: bool, monitor_pending_channel_ready: bool, monitor_pending_revoke_and_ack: bool, monitor_pending_commitment_signed: bool, @@ -3642,6 +3613,7 @@ where resend_order: RAACommitmentOrder::CommitmentFirst, + monitor_pending_tx_signatures: false, monitor_pending_channel_ready: false, monitor_pending_revoke_and_ack: false, monitor_pending_commitment_signed: false, @@ -3881,6 +3853,7 @@ where resend_order: RAACommitmentOrder::CommitmentFirst, + monitor_pending_tx_signatures: false, monitor_pending_channel_ready: false, monitor_pending_revoke_and_ack: false, monitor_pending_commitment_signed: false, @@ -6860,8 +6833,25 @@ type BestBlockUpdatedRes = ( Option, ); +/// The result of handling a `tx_complete` message during interactive transaction construction. +pub(crate) struct TxCompleteResult { + /// The message to send to the counterparty, if any. + pub interactive_tx_msg_send: Option, + + /// If the negotiation completed and the holder has local contributions, this contains the + /// unsigned funding transaction for the `FundingTransactionReadyForSigning` event. + pub event_unsigned_tx: Option, + + /// If the negotiation completed and the holder has no local contributions, this contains + /// the result of automatically calling `funding_transaction_signed` with empty witnesses. + pub funding_tx_signed: Option, +} + /// The result of signing a funding transaction negotiated using the interactive-tx protocol. pub struct FundingTxSigned { + /// The initial `commitment_signed` message to send to the counterparty, if necessary. + pub commitment_signed: Option, + /// Signatures that should be sent to the counterparty, if necessary. pub tx_signatures: Option, @@ -8051,6 +8041,7 @@ where Vec::new(), logger, ); + self.context.monitor_pending_tx_signatures = true; Ok(self.push_ret_blockable_mon_update(monitor_update)) } @@ -9104,6 +9095,7 @@ where // Our `tx_signatures` either should've been the first time we processed them, // or we're waiting for our counterparty to send theirs first. return Ok(FundingTxSigned { + commitment_signed: None, tx_signatures: None, funding_tx: None, splice_negotiated: None, @@ -9117,6 +9109,7 @@ where // We may be handling a duplicate call and the funding was already locked so we // no longer have the signing session present. return Ok(FundingTxSigned { + commitment_signed: None, tx_signatures: None, funding_tx: None, splice_negotiated: None, @@ -9178,7 +9171,21 @@ where (None, None) }; - Ok(FundingTxSigned { tx_signatures, funding_tx, splice_negotiated, splice_locked }) + let funding = self + .pending_splice + .as_ref() + .and_then(|pending_splice| pending_splice.funding_negotiation.as_ref()) + .and_then(|funding_negotiation| funding_negotiation.as_funding()) + .unwrap_or(&self.funding); + let commitment_signed = self.context.get_initial_commitment_signed_v2(funding, &&logger); + + Ok(FundingTxSigned { + commitment_signed, + tx_signatures, + funding_tx, + splice_negotiated, + splice_locked, + }) } pub fn tx_signatures( @@ -9246,6 +9253,7 @@ where }; Ok(FundingTxSigned { + commitment_signed: None, tx_signatures: holder_tx_signatures, funding_tx, splice_negotiated, @@ -9451,6 +9459,25 @@ where self.context.channel_state.clear_monitor_update_in_progress(); assert_eq!(self.blocked_monitor_updates_pending(), 0); + let mut tx_signatures = self + .context + .monitor_pending_tx_signatures + .then(|| ()) + .and_then(|_| self.context.interactive_tx_signing_session.as_ref()) + .and_then(|signing_session| signing_session.holder_tx_signatures().clone()); + if tx_signatures.is_some() { + // We want to clear that the monitor update for our `tx_signatures` has completed, but + // we may still need to hold back the message until it's ready to be sent. + self.context.monitor_pending_tx_signatures = false; + let signing_session = self.context.interactive_tx_signing_session.as_ref() + .expect("We have a tx_signatures message so we must have a valid signing session"); + if !signing_session.holder_sends_tx_signatures_first() + && !signing_session.has_received_tx_signatures() + { + tx_signatures.take(); + } + } + // If we're past (or at) the AwaitingChannelReady stage on an outbound (or V2-established) channel, // try to (re-)broadcast the funding transaction as we may have declined to broadcast it when we // first received the funding_signed. @@ -9539,7 +9566,7 @@ where match commitment_order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"}); MonitorRestoreUpdates { raa, commitment_update, commitment_order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, - pending_update_adds, funding_broadcastable, channel_ready, announcement_sigs, tx_signatures: None, + pending_update_adds, funding_broadcastable, channel_ready, announcement_sigs, tx_signatures, channel_ready_order, } } @@ -15074,6 +15101,9 @@ where let pending_splice = self.pending_splice.as_ref().filter(|_| !self.should_reset_pending_splice_state(false)); + let monitor_pending_tx_signatures = + self.context.monitor_pending_tx_signatures.then_some(()); + write_tlv_fields!(writer, { (0, self.context.announcement_sigs, option), // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a @@ -15093,6 +15123,7 @@ where (9, self.context.target_closing_feerate_sats_per_kw, option), (10, monitor_pending_update_adds, option), // Added in 0.0.122 (11, self.context.monitor_pending_finalized_fulfills, required_vec), + (12, monitor_pending_tx_signatures, option), // Added in 0.3 (13, self.context.channel_creation_height, required), (15, preimages, required_vec), (17, self.context.announcement_sigs_state, required), @@ -15524,6 +15555,8 @@ where let mut holding_cell_accountable: Option> = None; let mut pending_outbound_accountable: Option> = None; + let mut monitor_pending_tx_signatures: Option<()> = None; + read_tlv_fields!(reader, { (0, announcement_sigs, option), (1, minimum_depth, option), @@ -15537,6 +15570,7 @@ where (9, target_closing_feerate_sats_per_kw, option), (10, monitor_pending_update_adds, option), // Added in 0.0.122 (11, monitor_pending_finalized_fulfills, optional_vec), + (12, monitor_pending_tx_signatures, option), // Added in 0.3 (13, channel_creation_height, required), (15, preimages, required_vec), // The preimages transitioned from optional to required in 0.2 (17, announcement_sigs_state, required), @@ -15949,6 +15983,7 @@ where resend_order, + monitor_pending_tx_signatures: monitor_pending_tx_signatures.is_some(), monitor_pending_channel_ready, monitor_pending_revoke_and_ack, monitor_pending_commitment_signed, diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index dafeffe98bf..598e1d3b554 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -6455,7 +6455,8 @@ where &self.logger, ) { Ok(FundingTxSigned { - tx_signatures: Some(tx_signatures), + commitment_signed, + tx_signatures, funding_tx, splice_negotiated, splice_locked, @@ -6481,19 +6482,39 @@ where None, )); } - peer_state.pending_msg_events.push( - MessageSendEvent::SendTxSignatures { - node_id: *counterparty_node_id, - msg: tx_signatures, - }, - ); - if let Some(splice_locked) = splice_locked { - peer_state.pending_msg_events.push( - MessageSendEvent::SendSpliceLocked { - node_id: *counterparty_node_id, - msg: splice_locked, - }, - ); + if chan.context.is_connected() { + if let Some(commitment_signed) = commitment_signed { + peer_state.pending_msg_events.push( + MessageSendEvent::UpdateHTLCs { + node_id: *counterparty_node_id, + channel_id: *channel_id, + updates: CommitmentUpdate { + commitment_signed: vec![commitment_signed], + update_add_htlcs: vec![], + update_fulfill_htlcs: vec![], + update_fail_htlcs: vec![], + update_fail_malformed_htlcs: vec![], + update_fee: None, + }, + }, + ); + } + if let Some(tx_signatures) = tx_signatures { + peer_state.pending_msg_events.push( + MessageSendEvent::SendTxSignatures { + node_id: *counterparty_node_id, + msg: tx_signatures, + }, + ); + } + if let Some(splice_locked) = splice_locked { + peer_state.pending_msg_events.push( + MessageSendEvent::SendSpliceLocked { + node_id: *counterparty_node_id, + msg: splice_locked, + }, + ); + } } return NotifyOption::DoPersist; }, @@ -6501,17 +6522,6 @@ where result = Err(err); return NotifyOption::SkipPersistNoEvents; }, - Ok(FundingTxSigned { - tx_signatures: None, - funding_tx, - splice_negotiated, - splice_locked, - }) => { - debug_assert!(funding_tx.is_none()); - debug_assert!(splice_negotiated.is_none()); - debug_assert!(splice_locked.is_none()); - return NotifyOption::SkipPersistNoEvents; - }, } }, None => { @@ -10151,79 +10161,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } - if let Some(signing_session) = (!channel.is_awaiting_monitor_update()) - .then(|| ()) - .and_then(|_| channel.context.interactive_tx_signing_session.as_mut()) - .filter(|signing_session| signing_session.has_received_commitment_signed()) - .filter(|signing_session| signing_session.holder_tx_signatures().is_none()) - { - if signing_session.has_local_contribution() { - let mut pending_events = self.pending_events.lock().unwrap(); - let unsigned_transaction = signing_session.unsigned_tx().tx().clone(); - let event_action = ( - Event::FundingTransactionReadyForSigning { - unsigned_transaction, - counterparty_node_id, - channel_id: channel.context.channel_id(), - user_channel_id: channel.context.get_user_id(), - }, - None, - ); - - if !pending_events.contains(&event_action) { - pending_events.push_back(event_action); - } - } else { - let txid = signing_session.unsigned_tx().compute_txid(); - let best_block_height = self.best_block.read().unwrap().height; - match channel.funding_transaction_signed(txid, vec![], best_block_height, &self.logger) { - Ok(FundingTxSigned { - tx_signatures: Some(tx_signatures), - funding_tx, - splice_negotiated, - splice_locked, - }) => { - if let Some(funding_tx) = funding_tx { - self.broadcast_interactive_funding(channel, &funding_tx, &self.logger); - } - - if let Some(splice_negotiated) = splice_negotiated { - self.pending_events.lock().unwrap().push_back(( - events::Event::SplicePending { - channel_id: channel.context.channel_id(), - counterparty_node_id, - user_channel_id: channel.context.get_user_id(), - new_funding_txo: splice_negotiated.funding_txo, - channel_type: splice_negotiated.channel_type, - new_funding_redeem_script: splice_negotiated.funding_redeem_script, - }, - None, - )); - } - - if channel.context.is_connected() { - pending_msg_events.push(MessageSendEvent::SendTxSignatures { - node_id: counterparty_node_id, - msg: tx_signatures, - }); - if let Some(splice_locked) = splice_locked { - pending_msg_events.push(MessageSendEvent::SendSpliceLocked { - node_id: counterparty_node_id, - msg: splice_locked, - }); - } - } - }, - Ok(FundingTxSigned { tx_signatures: None, .. }) => { - debug_assert!(false, "If our tx_signatures is empty, then we should send it first!"); - }, - Err(err) => { - log_warn!(logger, "Failed signing interactive funding transaction: {err:?}"); - }, - } - } - } - { let mut pending_events = self.pending_events.lock().unwrap(); emit_channel_pending_event!(pending_events, channel); @@ -11206,30 +11143,69 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ hash_map::Entry::Occupied(mut chan_entry) => { let chan = chan_entry.get_mut(); match chan.tx_complete(msg, &self.logger) { - Ok((interactive_tx_msg_send, commitment_signed)) => { - let persist = if interactive_tx_msg_send.is_some() || commitment_signed.is_some() { - NotifyOption::SkipPersistHandleEvents - } else { - NotifyOption::SkipPersistNoEvents - }; - if let Some(interactive_tx_msg_send) = interactive_tx_msg_send { + Ok(tx_complete_result) => { + let mut persist = NotifyOption::SkipPersistNoEvents; + + if let Some(interactive_tx_msg_send) = tx_complete_result.interactive_tx_msg_send { let msg_send_event = interactive_tx_msg_send.into_msg_send_event(counterparty_node_id); peer_state.pending_msg_events.push(msg_send_event); + persist = NotifyOption::SkipPersistHandleEvents; }; - if let Some(commitment_signed) = commitment_signed { - peer_state.pending_msg_events.push(MessageSendEvent::UpdateHTLCs { - node_id: counterparty_node_id, - channel_id: msg.channel_id, - updates: CommitmentUpdate { - commitment_signed: vec![commitment_signed], - update_add_htlcs: vec![], - update_fulfill_htlcs: vec![], - update_fail_htlcs: vec![], - update_fail_malformed_htlcs: vec![], - update_fee: None, + + if let Some(unsigned_transaction) = tx_complete_result.event_unsigned_tx { + self.pending_events.lock().unwrap().push_back(( + events::Event::FundingTransactionReadyForSigning { + unsigned_transaction, + counterparty_node_id, + channel_id: msg.channel_id, + user_channel_id: chan.context().get_user_id(), }, - }); + None, + )); + // + // We have a successful signing session that we need to persist. + persist = NotifyOption::DoPersist; + } + + if let Some(FundingTxSigned { + commitment_signed, + tx_signatures, + funding_tx, + splice_negotiated, + splice_locked, + }) = tx_complete_result.funding_tx_signed + { + // We shouldn't expect to see the splice negotiated or locked yet as we + // haven't exchanged `tx_signatures` at this point. + debug_assert!(funding_tx.is_none()); + debug_assert!(splice_negotiated.is_none()); + debug_assert!(splice_locked.is_none()); + + if let Some(commitment_signed) = commitment_signed { + peer_state.pending_msg_events.push(MessageSendEvent::UpdateHTLCs { + node_id: counterparty_node_id, + channel_id: msg.channel_id, + updates: CommitmentUpdate { + commitment_signed: vec![commitment_signed], + update_add_htlcs: vec![], + update_fulfill_htlcs: vec![], + update_fail_htlcs: vec![], + update_fail_malformed_htlcs: vec![], + update_fee: None, + }, + }); + } + if let Some(tx_signatures) = tx_signatures { + peer_state.pending_msg_events.push(MessageSendEvent::SendTxSignatures { + node_id: counterparty_node_id, + msg: tx_signatures, + }); + } + + // We have a successful signing session that we need to persist. + persist = NotifyOption::DoPersist; } + Ok(persist) }, Err((error, splice_funding_failed)) => { @@ -11274,6 +11250,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ Some(chan) => { let best_block_height = self.best_block.read().unwrap().height; let FundingTxSigned { + commitment_signed, tx_signatures, funding_tx, splice_negotiated, @@ -11284,6 +11261,11 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ chan.tx_signatures(msg, best_block_height, &self.logger), chan_entry ); + + // We should never be sending a `commitment_signed` in response to their + // `tx_signatures`. + debug_assert!(commitment_signed.is_none()); + if let Some(tx_signatures) = tx_signatures { peer_state.pending_msg_events.push(MessageSendEvent::SendTxSignatures { node_id: *counterparty_node_id, @@ -19013,6 +18995,32 @@ where } } + // We may need to regenerate [`Event::FundingTransactionReadyForSigning`] for channels that + // still need their holder `tx_signatures`. + for (counterparty_node_id, peer_state_mutex) in per_peer_state.iter() { + let peer_state = peer_state_mutex.lock().unwrap(); + for (channel_id, chan) in peer_state.channel_by_id.iter() { + if let Some(signing_session) = + chan.context().interactive_tx_signing_session.as_ref() + { + if signing_session.holder_tx_signatures().is_none() + && signing_session.has_local_contribution() + { + let unsigned_transaction = signing_session.unsigned_tx().tx().clone(); + pending_events_read.push_back(( + Event::FundingTransactionReadyForSigning { + unsigned_transaction, + counterparty_node_id: *counterparty_node_id, + channel_id: *channel_id, + user_channel_id: chan.context().get_user_id(), + }, + None, + )); + } + } + } + } + let best_block = BestBlock::new(best_block_hash, best_block_height); let flow = OffersMessageFlow::new( chain_hash, diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 2cf5ea96acb..a70879718e2 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1086,7 +1086,7 @@ macro_rules! get_event { let ev = events.pop().unwrap(); match ev { $event_type { .. } => ev, - _ => panic!("Unexpected event"), + _ => panic!("Unexpected event {ev:?}"), } }}; } diff --git a/lightning/src/ln/interactivetxs.rs b/lightning/src/ln/interactivetxs.rs index 7ed829886c6..f402ac5efa6 100644 --- a/lightning/src/ln/interactivetxs.rs +++ b/lightning/src/ln/interactivetxs.rs @@ -668,10 +668,9 @@ impl InteractiveTxSigningSession { self.holder_tx_signatures = Some(tx_signatures); let funding_tx_opt = self.maybe_finalize_funding_tx(); - let holder_tx_signatures = (self.holder_sends_tx_signatures_first - || self.has_received_tx_signatures()) + let holder_tx_signatures = (self.has_received_commitment_signed + && (self.holder_sends_tx_signatures_first || self.has_received_tx_signatures())) .then(|| { - debug_assert!(self.has_received_commitment_signed); self.holder_tx_signatures.clone().expect("Holder tx_signatures were just provided") }); diff --git a/lightning/src/ln/splicing_tests.rs b/lightning/src/ln/splicing_tests.rs index db6680d963c..c0ba401cfae 100644 --- a/lightning/src/ln/splicing_tests.rs +++ b/lightning/src/ln/splicing_tests.rs @@ -27,7 +27,6 @@ use crate::ln::types::ChannelId; use crate::routing::router::{PaymentParameters, RouteParameters}; use crate::util::errors::APIError; use crate::util::ser::Writeable; -use crate::util::test_channel_signer::SignerOp; use bitcoin::hashes::Hash; use bitcoin::secp256k1::PublicKey; @@ -132,7 +131,7 @@ fn test_v1_splice_in_negative_insufficient_inputs() { pub fn negotiate_splice_tx<'a, 'b, 'c, 'd>( initiator: &'a Node<'b, 'c, 'd>, acceptor: &'a Node<'b, 'c, 'd>, channel_id: ChannelId, initiator_contribution: SpliceContribution, -) -> msgs::CommitmentSigned { +) { let new_funding_script = complete_splice_handshake(initiator, acceptor, channel_id, initiator_contribution.clone()); complete_interactive_funding_negotiation( @@ -141,7 +140,7 @@ pub fn negotiate_splice_tx<'a, 'b, 'c, 'd>( channel_id, initiator_contribution, new_funding_script, - ) + ); } pub fn complete_splice_handshake<'a, 'b, 'c, 'd>( @@ -184,7 +183,7 @@ pub fn complete_splice_handshake<'a, 'b, 'c, 'd>( pub fn complete_interactive_funding_negotiation<'a, 'b, 'c, 'd>( initiator: &'a Node<'b, 'c, 'd>, acceptor: &'a Node<'b, 'c, 'd>, channel_id: ChannelId, initiator_contribution: SpliceContribution, new_funding_script: ScriptBuf, -) -> msgs::CommitmentSigned { +) { let node_id_initiator = initiator.node.get_our_node_id(); let node_id_acceptor = acceptor.node.get_our_node_id(); @@ -240,22 +239,15 @@ pub fn complete_interactive_funding_negotiation<'a, 'b, 'c, 'd>( ); acceptor.node.handle_tx_add_output(node_id_initiator, &tx_add_output); } else { - let mut msg_events = initiator.node.get_and_clear_pending_msg_events(); - assert_eq!( - msg_events.len(), - if acceptor_sent_tx_complete { 2 } else { 1 }, - "{msg_events:?}" - ); - if let MessageSendEvent::SendTxComplete { ref msg, .. } = msg_events.remove(0) { + let msg_events = initiator.node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 1, "{msg_events:?}"); + if let MessageSendEvent::SendTxComplete { ref msg, .. } = &msg_events[0] { acceptor.node.handle_tx_complete(node_id_initiator, msg); } else { panic!(); } if acceptor_sent_tx_complete { - if let MessageSendEvent::UpdateHTLCs { mut updates, .. } = msg_events.remove(0) { - return updates.commitment_signed.remove(0); - } - panic!(); + break; } } @@ -271,13 +263,38 @@ pub fn complete_interactive_funding_negotiation<'a, 'b, 'c, 'd>( } pub fn sign_interactive_funding_tx<'a, 'b, 'c, 'd>( - initiator: &'a Node<'b, 'c, 'd>, acceptor: &'a Node<'b, 'c, 'd>, - initial_commit_sig_for_acceptor: msgs::CommitmentSigned, is_0conf: bool, + initiator: &'a Node<'b, 'c, 'd>, acceptor: &'a Node<'b, 'c, 'd>, is_0conf: bool, ) -> (Transaction, Option<(msgs::SpliceLocked, PublicKey)>) { let node_id_initiator = initiator.node.get_our_node_id(); let node_id_acceptor = acceptor.node.get_our_node_id(); assert!(initiator.node.get_and_clear_pending_msg_events().is_empty()); + + let event = get_event!(initiator, Event::FundingTransactionReadyForSigning); + if let Event::FundingTransactionReadyForSigning { + channel_id, + counterparty_node_id, + unsigned_transaction, + .. + } = event + { + let partially_signed_tx = initiator.wallet_source.sign_tx(unsigned_transaction).unwrap(); + initiator + .node + .funding_transaction_signed(&channel_id, &counterparty_node_id, partially_signed_tx) + .unwrap(); + } else { + panic!(); + } + + let msg_events = initiator.node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 1, "{msg_events:?}"); + let initial_commit_sig_for_acceptor = + if let MessageSendEvent::UpdateHTLCs { ref updates, .. } = &msg_events[0] { + updates.commitment_signed[0].clone() + } else { + panic!(); + }; acceptor.node.handle_commitment_signed(node_id_initiator, &initial_commit_sig_for_acceptor); let msg_events = acceptor.node.get_and_clear_pending_msg_events(); @@ -294,20 +311,6 @@ pub fn sign_interactive_funding_tx<'a, 'b, 'c, 'd>( panic!(); } - let event = get_event!(initiator, Event::FundingTransactionReadyForSigning); - if let Event::FundingTransactionReadyForSigning { - channel_id, - counterparty_node_id, - unsigned_transaction, - .. - } = event - { - let partially_signed_tx = initiator.wallet_source.sign_tx(unsigned_transaction).unwrap(); - initiator - .node - .funding_transaction_signed(&channel_id, &counterparty_node_id, partially_signed_tx) - .unwrap(); - } let mut msg_events = initiator.node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), if is_0conf { 2 } else { 1 }, "{msg_events:?}"); if let MessageSendEvent::SendTxSignatures { ref msg, .. } = &msg_events[0] { @@ -348,15 +351,14 @@ pub fn splice_channel<'a, 'b, 'c, 'd>( let new_funding_script = complete_splice_handshake(initiator, acceptor, channel_id, initiator_contribution.clone()); - let initial_commit_sig_for_acceptor = complete_interactive_funding_negotiation( + complete_interactive_funding_negotiation( initiator, acceptor, channel_id, initiator_contribution, new_funding_script, ); - let (splice_tx, splice_locked) = - sign_interactive_funding_tx(initiator, acceptor, initial_commit_sig_for_acceptor, false); + let (splice_tx, splice_locked) = sign_interactive_funding_tx(initiator, acceptor, false); assert!(splice_locked.is_none()); expect_splice_pending_event(initiator, &node_id_acceptor); @@ -642,15 +644,13 @@ fn do_test_splice_state_reset_on_disconnect(reload: bool) { nodes[0].node.handle_tx_complete(node_id_1, &tx_complete); let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(msg_events.len(), 2); + assert_eq!(msg_events.len(), 1); if let MessageSendEvent::SendTxComplete { .. } = &msg_events[0] { } else { panic!("Unexpected event"); } - if let MessageSendEvent::UpdateHTLCs { .. } = &msg_events[1] { - } else { - panic!("Unexpected event"); - } + + let _event = get_event!(nodes[0], Event::FundingTransactionReadyForSigning); if reload { let encoded_monitor_0 = get_monitor!(nodes[0], channel_id).encode(); @@ -671,6 +671,8 @@ fn do_test_splice_state_reset_on_disconnect(reload: bool) { chain_monitor_1c, node_1c ); + // We should have another signing event generated upon reload as they're not persisted. + let _ = get_event!(nodes[0], Event::FundingTransactionReadyForSigning); } else { nodes[0].node.peer_disconnected(node_id_1); nodes[1].node.peer_disconnected(node_id_0); @@ -1290,25 +1292,17 @@ fn do_test_splice_reestablish(reload: bool, async_monitor_update: bool) { script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), }, ]); - let initial_commit_sig_for_acceptor = - negotiate_splice_tx(&nodes[0], &nodes[1], channel_id, initiator_contribution); - assert_eq!(initial_commit_sig_for_acceptor.htlc_signatures.len(), 1); - let initial_commit_sig_for_initiator = get_htlc_update_msgs(&nodes[1], &node_id_0); - assert_eq!(initial_commit_sig_for_initiator.commitment_signed.len(), 1); - assert_eq!(initial_commit_sig_for_initiator.commitment_signed[0].htlc_signatures.len(), 1); + negotiate_splice_tx(&nodes[0], &nodes[1], channel_id, initiator_contribution); - macro_rules! reconnect_nodes { - ($f: expr) => { - nodes[0].node.peer_disconnected(node_id_1); - nodes[1].node.peer_disconnected(node_id_0); - let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); - $f(&mut reconnect_args); - reconnect_nodes(reconnect_args); - }; - } + // Node 0 should have a signing event to handle since they had a contribution in the splice. + // Node 1 won't and will immediately try to send their initial `commitment_signed`. + let signing_event = get_event!(nodes[0], Event::FundingTransactionReadyForSigning); + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); - // Reestablishing now should force both nodes to retransmit their initial `commitment_signed` - // message as they were never delivered. + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + let _ = get_htlc_update_msgs(&nodes[1], &node_id_0); + + // Disconnect them, and handle the signing event on the initiator side. if reload { let encoded_monitor_0 = get_monitor!(nodes[0], channel_id).encode(); reload_node!( @@ -1328,6 +1322,8 @@ fn do_test_splice_reestablish(reload: bool, async_monitor_update: bool) { chain_monitor_1a, node_1a ); + // We should have another signing event generated upon reload as they're not persisted. + let _ = get_event!(nodes[0], Event::FundingTransactionReadyForSigning); if async_monitor_update { persister_0a.set_update_ret(ChannelMonitorUpdateStatus::InProgress); persister_1a.set_update_ret(ChannelMonitorUpdateStatus::InProgress); @@ -1341,6 +1337,17 @@ fn do_test_splice_reestablish(reload: bool, async_monitor_update: bool) { } } + if let Event::FundingTransactionReadyForSigning { unsigned_transaction, .. } = signing_event { + let tx = nodes[0].wallet_source.sign_tx(unsigned_transaction).unwrap(); + nodes[0].node.funding_transaction_signed(&channel_id, &node_id_1, tx).unwrap(); + } + + // Since they're not connected, no messages should be sent. + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + // Reestablishing now should force both nodes to retransmit their initial `commitment_signed` + // message as they were never delivered. let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); reconnect_args.send_interactive_tx_commit_sig = (true, true); reconnect_nodes(reconnect_args); @@ -1350,6 +1357,16 @@ fn do_test_splice_reestablish(reload: bool, async_monitor_update: bool) { check_added_monitors(&nodes[0], 1); check_added_monitors(&nodes[1], 1); + macro_rules! reconnect_nodes { + ($f: expr) => { + nodes[0].node.peer_disconnected(node_id_1); + nodes[1].node.peer_disconnected(node_id_0); + let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); + $f(&mut reconnect_args); + reconnect_nodes(reconnect_args); + }; + } + if async_monitor_update { // Reconnecting again should result in no messages/events being generated as the monitor // update is pending. @@ -1364,11 +1381,9 @@ fn do_test_splice_reestablish(reload: bool, async_monitor_update: bool) { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); } - // Node 0 should have a signing event to handle since they had a contribution in the splice. - // Node 1 won't and will immediately send `tx_signatures`. - let _ = get_event!(nodes[0], Event::FundingTransactionReadyForSigning); + // Both nodes should have their `tx_signatures` ready after completing the monitor update, but + // node 0 has to wait for node 1 to send theirs first. assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); let _ = get_event_msg!(nodes[1], MessageSendEvent::SendTxSignatures, node_id_0); // Reconnecting now should force node 1 to retransmit their `tx_signatures` since it was never @@ -1377,18 +1392,6 @@ fn do_test_splice_reestablish(reload: bool, async_monitor_update: bool) { reconnect_nodes!(|reconnect_args: &mut ReconnectArgs| { reconnect_args.send_interactive_tx_sigs = (true, false); }); - let _ = get_event!(nodes[0], Event::FundingTransactionReadyForSigning); - - // Reconnect again to make sure node 1 doesn't retransmit `tx_signatures` unnecessarily as it - // was delivered in the previous reestablishment. - reconnect_nodes!(|_| {}); - - // Have node 0 sign, we should see its `tx_signatures` go out. - let event = get_event!(nodes[0], Event::FundingTransactionReadyForSigning); - if let Event::FundingTransactionReadyForSigning { unsigned_transaction, .. } = event { - let tx = nodes[0].wallet_source.sign_tx(unsigned_transaction).unwrap(); - nodes[0].node.funding_transaction_signed(&channel_id, &node_id_1, tx).unwrap(); - } let _ = get_event_msg!(nodes[0], MessageSendEvent::SendTxSignatures, node_id_1); expect_splice_pending_event(&nodes[0], &node_id_1); @@ -1639,25 +1642,22 @@ fn do_test_propose_splice_while_disconnected(reload: bool, use_0conf: bool) { .unwrap(); // Negotiate the first splice to completion. - let initial_commit_sig = { - nodes[1].node.handle_splice_init(node_id_0, &splice_init); - let splice_ack = get_event_msg!(nodes[1], MessageSendEvent::SendSpliceAck, node_id_0); - nodes[0].node.handle_splice_ack(node_id_1, &splice_ack); - let new_funding_script = chan_utils::make_funding_redeemscript( - &splice_init.funding_pubkey, - &splice_ack.funding_pubkey, - ) - .to_p2wsh(); - complete_interactive_funding_negotiation( - &nodes[0], - &nodes[1], - channel_id, - node_0_contribution, - new_funding_script, - ) - }; - let (splice_tx, splice_locked) = - sign_interactive_funding_tx(&nodes[0], &nodes[1], initial_commit_sig, use_0conf); + nodes[1].node.handle_splice_init(node_id_0, &splice_init); + let splice_ack = get_event_msg!(nodes[1], MessageSendEvent::SendSpliceAck, node_id_0); + nodes[0].node.handle_splice_ack(node_id_1, &splice_ack); + let new_funding_script = chan_utils::make_funding_redeemscript( + &splice_init.funding_pubkey, + &splice_ack.funding_pubkey, + ) + .to_p2wsh(); + complete_interactive_funding_negotiation( + &nodes[0], + &nodes[1], + channel_id, + node_0_contribution, + new_funding_script, + ); + let (splice_tx, splice_locked) = sign_interactive_funding_tx(&nodes[0], &nodes[1], use_0conf); expect_splice_pending_event(&nodes[0], &node_id_1); expect_splice_pending_event(&nodes[1], &node_id_0); @@ -1781,25 +1781,22 @@ fn do_test_propose_splice_while_disconnected(reload: bool, use_0conf: bool) { } let splice_init = get_event_msg!(nodes[1], MessageSendEvent::SendSpliceInit, node_id_0); - let initial_commit_sig = { - nodes[0].node.handle_splice_init(node_id_1, &splice_init); - let splice_ack = get_event_msg!(nodes[0], MessageSendEvent::SendSpliceAck, node_id_1); - nodes[1].node.handle_splice_ack(node_id_0, &splice_ack); - let new_funding_script = chan_utils::make_funding_redeemscript( - &splice_init.funding_pubkey, - &splice_ack.funding_pubkey, - ) - .to_p2wsh(); - complete_interactive_funding_negotiation( - &nodes[1], - &nodes[0], - channel_id, - node_1_contribution, - new_funding_script, - ) - }; - let (splice_tx, splice_locked) = - sign_interactive_funding_tx(&nodes[1], &nodes[0], initial_commit_sig, use_0conf); + nodes[0].node.handle_splice_init(node_id_1, &splice_init); + let splice_ack = get_event_msg!(nodes[0], MessageSendEvent::SendSpliceAck, node_id_1); + nodes[1].node.handle_splice_ack(node_id_0, &splice_ack); + let new_funding_script = chan_utils::make_funding_redeemscript( + &splice_init.funding_pubkey, + &splice_ack.funding_pubkey, + ) + .to_p2wsh(); + complete_interactive_funding_negotiation( + &nodes[1], + &nodes[0], + channel_id, + node_1_contribution, + new_funding_script, + ); + let (splice_tx, splice_locked) = sign_interactive_funding_tx(&nodes[1], &nodes[0], use_0conf); expect_splice_pending_event(&nodes[0], &node_id_1); expect_splice_pending_event(&nodes[1], &node_id_0); @@ -1829,7 +1826,7 @@ fn disconnect_on_unexpected_interactive_tx_message() { let initiator = &nodes[0]; let acceptor = &nodes[1]; - let _node_id_initiator = initiator.node.get_our_node_id(); + let node_id_initiator = initiator.node.get_our_node_id(); let node_id_acceptor = acceptor.node.get_our_node_id(); let initial_channel_capacity = 100_000; @@ -1846,11 +1843,10 @@ fn disconnect_on_unexpected_interactive_tx_message() { // Complete interactive-tx construction, but fail by having the acceptor send a duplicate // tx_complete instead of commitment_signed. - let _ = negotiate_splice_tx(initiator, acceptor, channel_id, contribution.clone()); + negotiate_splice_tx(initiator, acceptor, channel_id, contribution.clone()); - let mut msg_events = acceptor.node.get_and_clear_pending_msg_events(); - assert_eq!(msg_events.len(), 1); - assert!(matches!(msg_events.remove(0), MessageSendEvent::UpdateHTLCs { .. })); + let _ = get_event!(initiator, Event::FundingTransactionReadyForSigning); + let _ = get_htlc_update_msgs(acceptor, &node_id_initiator); let tx_complete = msgs::TxComplete { channel_id }; initiator.node.handle_tx_complete(node_id_acceptor, &tx_complete); @@ -1910,58 +1906,6 @@ fn fail_splice_on_interactive_tx_error() { let tx_abort = get_event_msg!(acceptor, MessageSendEvent::SendTxAbort, node_id_initiator); initiator.node.handle_tx_abort(node_id_acceptor, &tx_abort); - - // Fail signing the commitment transaction, which prevents the initiator from sending - // tx_complete. - initiator.disable_channel_signer_op( - &node_id_acceptor, - &channel_id, - SignerOp::SignCounterpartyCommitment, - ); - let _ = complete_splice_handshake(initiator, acceptor, channel_id, contribution.clone()); - - let tx_add_input = - get_event_msg!(initiator, MessageSendEvent::SendTxAddInput, node_id_acceptor); - acceptor.node.handle_tx_add_input(node_id_initiator, &tx_add_input); - - let tx_complete = get_event_msg!(acceptor, MessageSendEvent::SendTxComplete, node_id_initiator); - initiator.node.handle_tx_complete(node_id_acceptor, &tx_complete); - - let tx_add_input = - get_event_msg!(initiator, MessageSendEvent::SendTxAddInput, node_id_acceptor); - acceptor.node.handle_tx_add_input(node_id_initiator, &tx_add_input); - - let tx_complete = get_event_msg!(acceptor, MessageSendEvent::SendTxComplete, node_id_initiator); - initiator.node.handle_tx_complete(node_id_acceptor, &tx_complete); - - let tx_add_output = - get_event_msg!(initiator, MessageSendEvent::SendTxAddOutput, node_id_acceptor); - acceptor.node.handle_tx_add_output(node_id_initiator, &tx_add_output); - - let tx_complete = get_event_msg!(acceptor, MessageSendEvent::SendTxComplete, node_id_initiator); - initiator.node.handle_tx_complete(node_id_acceptor, &tx_complete); - - let tx_add_output = - get_event_msg!(initiator, MessageSendEvent::SendTxAddOutput, node_id_acceptor); - acceptor.node.handle_tx_add_output(node_id_initiator, &tx_add_output); - - let tx_complete = get_event_msg!(acceptor, MessageSendEvent::SendTxComplete, node_id_initiator); - initiator.node.handle_tx_complete(node_id_acceptor, &tx_complete); - - let event = get_event!(initiator, Event::SpliceFailed); - match event { - Event::SpliceFailed { contributed_inputs, .. } => { - assert_eq!(contributed_inputs.len(), 1); - assert_eq!(contributed_inputs[0], contribution.inputs()[0].outpoint()); - }, - _ => panic!("Expected Event::SpliceFailed"), - } - - let tx_abort = get_event_msg!(initiator, MessageSendEvent::SendTxAbort, node_id_acceptor); - acceptor.node.handle_tx_abort(node_id_initiator, &tx_abort); - - let tx_abort = get_event_msg!(acceptor, MessageSendEvent::SendTxAbort, node_id_initiator); - initiator.node.handle_tx_abort(node_id_acceptor, &tx_abort); } #[test] diff --git a/pending_changelog/4336.txt b/pending_changelog/4336.txt new file mode 100644 index 00000000000..a41c71ca2b4 --- /dev/null +++ b/pending_changelog/4336.txt @@ -0,0 +1,5 @@ +Forward Compatibility +===================== + +* Downgrading from 0.3 will not be possible while a splice is pending when using async monitor + updates. From 7e226a039c977c9a96050e6fd5fa23cb2296e8bc Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Thu, 22 Jan 2026 09:49:59 -0800 Subject: [PATCH 134/242] Buffer interactive-tx initial commitment signed from counterparty This is crucial to enable the splice cancellation use case. When we process the initial commitment signed from our counterparty, we queue a monitor update that cannot be undone. To give the user a chance to abort the splice negotiation before it's committed to, we buffer the message until a successful call to `Channel::funding_transaction_signed` and process it then. Note that this is currently only done for splice and RBF attempts, as if we want to abort a dual funding negotiation, we can just force close the channel as it hasn't been funded yet. --- lightning/src/ln/channel.rs | 94 ++++++++-- lightning/src/ln/channelmanager.rs | 266 ++++++++++++++++++----------- lightning/src/ln/splicing_tests.rs | 231 +++++++++++++++++++++++++ 3 files changed, 481 insertions(+), 110 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 23edf616dae..de42f413cb3 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1902,10 +1902,11 @@ where } } - pub fn tx_complete( - &mut self, msg: &msgs::TxComplete, logger: &L, + pub fn tx_complete( + &mut self, msg: &msgs::TxComplete, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, ) -> Result)> where + F::Target: FeeEstimator, L::Target: Logger, { let tx_complete_action = match self.interactive_tx_constructor_mut() { @@ -1954,7 +1955,7 @@ where let funding_tx_signed = if !has_local_contribution { let funding_txid = signing_session.unsigned_tx().tx().compute_txid(); if let ChannelPhase::Funded(chan) = &mut self.phase { - chan.funding_transaction_signed(funding_txid, vec![], 0, logger).ok() + chan.funding_transaction_signed(funding_txid, vec![], 0, fee_estimator, logger).ok() } else { None } @@ -2104,7 +2105,11 @@ where funding.channel_transaction_parameters.funding_outpoint = Some(funding_outpoint); pending_splice.funding_negotiation = - Some(FundingNegotiation::AwaitingSignatures { is_initiator, funding }); + Some(FundingNegotiation::AwaitingSignatures { + is_initiator, + funding, + initial_commitment_signed_from_counterparty: None, + }); interactive_tx_constructor } else { // Replace the taken state for later error handling @@ -2193,9 +2198,33 @@ where // which must always come after the initial commitment signed is sent. .unwrap_or(true); let res = if has_negotiated_pending_splice && !session_received_commitment_signed { - funded_channel - .splice_initial_commitment_signed(msg, fee_estimator, logger) - .map(|monitor_update_opt| (None, monitor_update_opt)) + let has_holder_tx_signatures = funded_channel + .context + .interactive_tx_signing_session + .as_ref() + .map(|session| session.holder_tx_signatures().is_some()) + .unwrap_or(false); + + // We delay processing this until the user manually approves the splice via + // [`FundedChannel::funding_transaction_signed`], as otherwise, there would be a + // [`ChannelMonitorUpdateStep::RenegotiatedFunding`] committed that we would + // need to undo if they no longer wish to proceed. + if has_holder_tx_signatures { + funded_channel + .splice_initial_commitment_signed(msg, fee_estimator, logger) + .map(|monitor_update_opt| (None, monitor_update_opt)) + } else { + let pending_splice = funded_channel.pending_splice.as_mut() + .expect("We have a pending splice negotiated"); + let funding_negotiation = pending_splice.funding_negotiation.as_mut() + .expect("We have a pending splice negotiated"); + if let FundingNegotiation::AwaitingSignatures { + ref mut initial_commitment_signed_from_counterparty, .. + } = funding_negotiation { + *initial_commitment_signed_from_counterparty = Some(msg.clone()); + } + Ok((None, None)) + } } else { funded_channel.commitment_signed(msg, fee_estimator, logger) .map(|monitor_update_opt| (None, monitor_update_opt)) @@ -2679,6 +2708,17 @@ enum FundingNegotiation { AwaitingSignatures { funding: FundingScope, is_initiator: bool, + /// The initial [`msgs::CommitmentSigned`] message received for the [`FundingScope`] above. + /// We delay processing this until the user manually approves the splice via + /// [`FundedChannel::funding_transaction_signed`], as otherwise, there would be a + /// [`ChannelMonitorUpdateStep::RenegotiatedFunding`] committed that we would need to undo + /// if they no longer wish to proceed. + /// + /// Note that this doesn't need to be done with dual-funded channels as there is no + /// equivalent monitor update for them, and we can just force close the channel. + /// + /// This field is not persisted as the message should be resent on reconnections. + initial_commitment_signed_from_counterparty: Option, }, } @@ -2686,6 +2726,7 @@ impl_writeable_tlv_based_enum_upgradable!(FundingNegotiation, (0, AwaitingSignatures) => { (1, funding, required), (3, is_initiator, required), + (_unused, initial_commitment_signed_from_counterparty, (static_value, None)), }, unread_variants: AwaitingAck, ConstructingTransaction ); @@ -6834,7 +6875,7 @@ type BestBlockUpdatedRes = ( ); /// The result of handling a `tx_complete` message during interactive transaction construction. -pub(crate) struct TxCompleteResult { +pub(super) struct TxCompleteResult { /// The message to send to the counterparty, if any. pub interactive_tx_msg_send: Option, @@ -6848,10 +6889,15 @@ pub(crate) struct TxCompleteResult { } /// The result of signing a funding transaction negotiated using the interactive-tx protocol. -pub struct FundingTxSigned { +pub(super) struct FundingTxSigned { /// The initial `commitment_signed` message to send to the counterparty, if necessary. pub commitment_signed: Option, + /// The result of processing a buffered initial commitment signed from our counterparty, + /// if any. + pub counterparty_initial_commitment_signed_result: + Option, ChannelError>>, + /// Signatures that should be sent to the counterparty, if necessary. pub tx_signatures: Option, @@ -9071,11 +9117,12 @@ where } } - pub fn funding_transaction_signed( + pub fn funding_transaction_signed( &mut self, funding_txid_signed: Txid, witnesses: Vec, best_block_height: u32, - logger: &L, + fee_estimator: &LowerBoundedFeeEstimator, logger: &L, ) -> Result where + F::Target: FeeEstimator, L::Target: Logger, { let signing_session = @@ -9096,6 +9143,7 @@ where // or we're waiting for our counterparty to send theirs first. return Ok(FundingTxSigned { commitment_signed: None, + counterparty_initial_commitment_signed_result: None, tx_signatures: None, funding_tx: None, splice_negotiated: None, @@ -9110,6 +9158,7 @@ where // no longer have the signing session present. return Ok(FundingTxSigned { commitment_signed: None, + counterparty_initial_commitment_signed_result: None, tx_signatures: None, funding_tx: None, splice_negotiated: None, @@ -9179,8 +9228,30 @@ where .unwrap_or(&self.funding); let commitment_signed = self.context.get_initial_commitment_signed_v2(funding, &&logger); + // If we have a pending splice with a buffered initial commitment_signed from our + // counterparty, process it now that we have provided our signatures. + let counterparty_initial_commitment_signed_result = self + .pending_splice + .as_mut() + .and_then(|pending_splice| pending_splice.funding_negotiation.as_mut()) + .and_then(|funding_negotiation| { + if let FundingNegotiation::AwaitingSignatures { + ref mut initial_commitment_signed_from_counterparty, + .. + } = funding_negotiation + { + initial_commitment_signed_from_counterparty.take() + } else { + None + } + }) + .map(|commit_sig| { + self.splice_initial_commitment_signed(&commit_sig, fee_estimator, &&logger) + }); + Ok(FundingTxSigned { commitment_signed, + counterparty_initial_commitment_signed_result, tx_signatures, funding_tx, splice_negotiated, @@ -9254,6 +9325,7 @@ where Ok(FundingTxSigned { commitment_signed: None, + counterparty_initial_commitment_signed_result: None, tx_signatures: holder_tx_signatures, funding_tx, splice_negotiated, diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 598e1d3b554..63e11d2ac2f 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -6424,118 +6424,164 @@ where pub fn funding_transaction_signed( &self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, transaction: Transaction, ) -> Result<(), APIError> { - let mut result = Ok(()); + let mut funding_tx_signed_result = Ok(()); + let mut monitor_update_result: Option< + Result, + > = None; + PersistenceNotifierGuard::optionally_notify(self, || { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); if peer_state_mutex_opt.is_none() { - result = Err(APIError::ChannelUnavailable { + funding_tx_signed_result = Err(APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}") }); return NotifyOption::SkipPersistNoEvents; } - let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap(); + let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); + let peer_state = &mut *peer_state_lock; - match peer_state.channel_by_id.get_mut(channel_id) { - Some(channel) => match channel.as_funded_mut() { - Some(chan) => { - let txid = transaction.compute_txid(); - let witnesses: Vec<_> = transaction - .input - .into_iter() - .map(|input| input.witness) - .filter(|witness| !witness.is_empty()) - .collect(); - let best_block_height = self.best_block.read().unwrap().height; - match chan.funding_transaction_signed( - txid, - witnesses, - best_block_height, - &self.logger, - ) { - Ok(FundingTxSigned { - commitment_signed, - tx_signatures, - funding_tx, - splice_negotiated, - splice_locked, - }) => { - if let Some(funding_tx) = funding_tx { - self.broadcast_interactive_funding( - chan, - &funding_tx, - &self.logger, - ); - } - if let Some(splice_negotiated) = splice_negotiated { - self.pending_events.lock().unwrap().push_back(( - events::Event::SplicePending { - channel_id: *channel_id, - counterparty_node_id: *counterparty_node_id, - user_channel_id: chan.context.get_user_id(), - new_funding_txo: splice_negotiated.funding_txo, - channel_type: splice_negotiated.channel_type, - new_funding_redeem_script: splice_negotiated - .funding_redeem_script, - }, - None, - )); - } - if chan.context.is_connected() { - if let Some(commitment_signed) = commitment_signed { - peer_state.pending_msg_events.push( - MessageSendEvent::UpdateHTLCs { - node_id: *counterparty_node_id, - channel_id: *channel_id, - updates: CommitmentUpdate { - commitment_signed: vec![commitment_signed], - update_add_htlcs: vec![], - update_fulfill_htlcs: vec![], - update_fail_htlcs: vec![], - update_fail_malformed_htlcs: vec![], - update_fee: None, - }, - }, + match peer_state.channel_by_id.entry(*channel_id) { + hash_map::Entry::Occupied(mut chan_entry) => { + match chan_entry.get_mut().as_funded_mut() { + Some(chan) => { + let txid = transaction.compute_txid(); + let witnesses: Vec<_> = transaction + .input + .into_iter() + .map(|input| input.witness) + .filter(|witness| !witness.is_empty()) + .collect(); + let best_block_height = self.best_block.read().unwrap().height; + + match chan.funding_transaction_signed( + txid, + witnesses, + best_block_height, + &self.fee_estimator, + &self.logger, + ) { + Ok(FundingTxSigned { + commitment_signed, + counterparty_initial_commitment_signed_result, + tx_signatures, + funding_tx, + splice_negotiated, + splice_locked, + }) => { + if let Some(funding_tx) = funding_tx { + self.broadcast_interactive_funding( + chan, + &funding_tx, + &self.logger, ); } - if let Some(tx_signatures) = tx_signatures { - peer_state.pending_msg_events.push( - MessageSendEvent::SendTxSignatures { - node_id: *counterparty_node_id, - msg: tx_signatures, + if let Some(splice_negotiated) = splice_negotiated { + self.pending_events.lock().unwrap().push_back(( + events::Event::SplicePending { + channel_id: *channel_id, + counterparty_node_id: *counterparty_node_id, + user_channel_id: chan.context.get_user_id(), + new_funding_txo: splice_negotiated.funding_txo, + channel_type: splice_negotiated.channel_type, + new_funding_redeem_script: splice_negotiated + .funding_redeem_script, }, - ); + None, + )); } - if let Some(splice_locked) = splice_locked { - peer_state.pending_msg_events.push( - MessageSendEvent::SendSpliceLocked { - node_id: *counterparty_node_id, - msg: splice_locked, - }, - ); + + if chan.context.is_connected() { + if let Some(commitment_signed) = commitment_signed { + peer_state.pending_msg_events.push( + MessageSendEvent::UpdateHTLCs { + node_id: *counterparty_node_id, + channel_id: *channel_id, + updates: CommitmentUpdate { + commitment_signed: vec![commitment_signed], + update_add_htlcs: vec![], + update_fulfill_htlcs: vec![], + update_fail_htlcs: vec![], + update_fail_malformed_htlcs: vec![], + update_fee: None, + }, + }, + ); + } + if let Some(tx_signatures) = tx_signatures { + peer_state.pending_msg_events.push( + MessageSendEvent::SendTxSignatures { + node_id: *counterparty_node_id, + msg: tx_signatures, + }, + ); + } + if let Some(splice_locked) = splice_locked { + peer_state.pending_msg_events.push( + MessageSendEvent::SendSpliceLocked { + node_id: *counterparty_node_id, + msg: splice_locked, + }, + ); + } } - } - return NotifyOption::DoPersist; - }, - Err(err) => { - result = Err(err); - return NotifyOption::SkipPersistNoEvents; - }, - } - }, - None => { - result = Err(APIError::APIMisuseError { - err: format!( - "Channel with id {} not expecting funding signatures", - channel_id - ), - }); - return NotifyOption::SkipPersistNoEvents; - }, + + match counterparty_initial_commitment_signed_result { + Some(Ok(Some(monitor_update))) => { + let funding_txo = chan.funding.get_funding_txo(); + if let Some(post_update_data) = self + .handle_new_monitor_update( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + &mut peer_state.pending_msg_events, + peer_state.is_connected, + chan, + funding_txo.unwrap(), + monitor_update, + ) { + monitor_update_result = Some(Ok(post_update_data)); + } + }, + Some(Err(err)) => { + let (drop, err) = self + .locked_handle_funded_force_close( + &mut peer_state + .closed_channel_monitor_update_ids, + &mut peer_state.in_flight_monitor_updates, + err, + chan, + ); + if drop { + chan_entry.remove_entry(); + } + + monitor_update_result = Some(Err(err)); + }, + Some(Ok(None)) | None => {}, + } + + funding_tx_signed_result = Ok(()); + }, + Err(err) => { + funding_tx_signed_result = Err(err); + return NotifyOption::SkipPersistNoEvents; + }, + } + }, + None => { + funding_tx_signed_result = Err(APIError::APIMisuseError { + err: format!( + "Channel with id {} not expecting funding signatures", + channel_id + ), + }); + return NotifyOption::SkipPersistNoEvents; + }, + } }, - None => { - result = Err(APIError::ChannelUnavailable { + hash_map::Entry::Vacant(_) => { + funding_tx_signed_result = Err(APIError::ChannelUnavailable { err: format!( "Channel with id {} not found for the passed counterparty node_id {}", channel_id, counterparty_node_id @@ -6544,9 +6590,25 @@ where return NotifyOption::SkipPersistNoEvents; }, } + + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + + if let Some(monitor_update_result) = monitor_update_result { + match monitor_update_result { + Ok(post_update_data) => { + self.handle_post_monitor_update_chan_resume(post_update_data); + }, + Err(_) => { + let _ = self.handle_error(monitor_update_result, *counterparty_node_id); + }, + } + } + + NotifyOption::DoPersist }); - result + funding_tx_signed_result } fn broadcast_interactive_funding( @@ -11142,7 +11204,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan_entry) => { let chan = chan_entry.get_mut(); - match chan.tx_complete(msg, &self.logger) { + match chan.tx_complete(msg, &self.fee_estimator, &self.logger) { Ok(tx_complete_result) => { let mut persist = NotifyOption::SkipPersistNoEvents; @@ -11169,6 +11231,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some(FundingTxSigned { commitment_signed, + counterparty_initial_commitment_signed_result, tx_signatures, funding_tx, splice_negotiated, @@ -11176,10 +11239,13 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }) = tx_complete_result.funding_tx_signed { // We shouldn't expect to see the splice negotiated or locked yet as we - // haven't exchanged `tx_signatures` at this point. + // haven't exchanged `tx_signatures` at this point. Similarly, we + // shouldn't have a result for the counterparty's initial commitment + // signed as they haven't sent it yet. debug_assert!(funding_tx.is_none()); debug_assert!(splice_negotiated.is_none()); debug_assert!(splice_locked.is_none()); + debug_assert!(counterparty_initial_commitment_signed_result.is_none()); if let Some(commitment_signed) = commitment_signed { peer_state.pending_msg_events.push(MessageSendEvent::UpdateHTLCs { @@ -11251,6 +11317,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let best_block_height = self.best_block.read().unwrap().height; let FundingTxSigned { commitment_signed, + counterparty_initial_commitment_signed_result, tx_signatures, funding_tx, splice_negotiated, @@ -11265,6 +11332,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // We should never be sending a `commitment_signed` in response to their // `tx_signatures`. debug_assert!(commitment_signed.is_none()); + debug_assert!(counterparty_initial_commitment_signed_result.is_none()); if let Some(tx_signatures) = tx_signatures { peer_state.pending_msg_events.push(MessageSendEvent::SendTxSignatures { diff --git a/lightning/src/ln/splicing_tests.rs b/lightning/src/ln/splicing_tests.rs index c0ba401cfae..ef524db6be3 100644 --- a/lightning/src/ln/splicing_tests.rs +++ b/lightning/src/ln/splicing_tests.rs @@ -29,6 +29,7 @@ use crate::util::errors::APIError; use crate::util::ser::Writeable; use bitcoin::hashes::Hash; +use bitcoin::secp256k1::ecdsa::Signature; use bitcoin::secp256k1::PublicKey; use bitcoin::{Amount, OutPoint as BitcoinOutPoint, ScriptBuf, Transaction, TxOut, WPubkeyHash}; @@ -2221,3 +2222,233 @@ fn test_splice_with_inflight_htlc_forward_and_resolution() { do_test_splice_with_inflight_htlc_forward_and_resolution(true); do_test_splice_with_inflight_htlc_forward_and_resolution(false); } + +#[test] +fn test_splice_buffer_commitment_signed_until_funding_tx_signed() { + // Test that when the counterparty sends their initial `commitment_signed` before the user has + // called `funding_transaction_signed`, we buffer the message and process it at the end of + // `funding_transaction_signed`. This allows the user to cancel the splice negotiation if + // desired without having queued an irreversible monitor update. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_id_0 = nodes[0].node.get_our_node_id(); + let node_id_1 = nodes[1].node.get_our_node_id(); + + let initial_channel_value_sat = 100_000; + let (_, _, channel_id, _) = + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, initial_channel_value_sat, 0); + + // Negotiate a splice-out where only the initiator (node 0) has a contribution. + // This means node 1 will send their commitment_signed immediately after tx_complete. + let initiator_contribution = SpliceContribution::splice_out(vec![TxOut { + value: Amount::from_sat(1_000), + script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), + }]); + negotiate_splice_tx(&nodes[0], &nodes[1], channel_id, initiator_contribution); + + // Node 0 (initiator with contribution) should have a signing event to handle. + let signing_event = get_event!(nodes[0], Event::FundingTransactionReadyForSigning); + + // Node 1 (acceptor with no contribution) won't have a signing event and will immediately + // send their initial commitment_signed. + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + let acceptor_commit_sig = get_htlc_update_msgs(&nodes[1], &node_id_0); + + // Deliver the acceptor's commitment_signed to the initiator BEFORE the initiator has called + // funding_transaction_signed. The message should be buffered, not processed. + nodes[0].node.handle_commitment_signed(node_id_1, &acceptor_commit_sig.commitment_signed[0]); + + // No monitor update should have happened since the message is buffered. + check_added_monitors(&nodes[0], 0); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + + // Now handle the signing event and call `funding_transaction_signed`. + if let Event::FundingTransactionReadyForSigning { + channel_id: event_channel_id, + counterparty_node_id, + unsigned_transaction, + .. + } = signing_event + { + assert_eq!(event_channel_id, channel_id); + assert_eq!(counterparty_node_id, node_id_1); + + let partially_signed_tx = nodes[0].wallet_source.sign_tx(unsigned_transaction).unwrap(); + nodes[0] + .node + .funding_transaction_signed(&channel_id, &node_id_1, partially_signed_tx) + .unwrap(); + } else { + panic!("Expected FundingTransactionReadyForSigning event"); + } + + // After funding_transaction_signed: + // 1. The initiator should send their commitment_signed + // 2. The buffered commitment_signed from the acceptor should be processed (monitor update) + let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 1, "{msg_events:?}"); + let initiator_commit_sig = + if let MessageSendEvent::UpdateHTLCs { ref updates, .. } = &msg_events[0] { + updates.commitment_signed[0].clone() + } else { + panic!("Expected UpdateHTLCs message"); + }; + + // The buffered commitment_signed should have been processed, resulting in a monitor update. + check_added_monitors(&nodes[0], 1); + + // Complete the rest of the flow normally. + nodes[1].node.handle_commitment_signed(node_id_0, &initiator_commit_sig); + let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 1, "{msg_events:?}"); + if let MessageSendEvent::SendTxSignatures { ref msg, .. } = &msg_events[0] { + nodes[0].node.handle_tx_signatures(node_id_1, msg); + } else { + panic!("Expected SendTxSignatures message"); + } + check_added_monitors(&nodes[1], 1); + + let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 1, "{msg_events:?}"); + if let MessageSendEvent::SendTxSignatures { ref msg, .. } = &msg_events[0] { + nodes[1].node.handle_tx_signatures(node_id_0, msg); + } else { + panic!("Expected SendTxSignatures message"); + } + + expect_splice_pending_event(&nodes[0], &node_id_1); + expect_splice_pending_event(&nodes[1], &node_id_0); + + // Both nodes should broadcast the splice transaction. + let splice_tx = { + let mut txn_0 = nodes[0].tx_broadcaster.txn_broadcast(); + assert_eq!(txn_0.len(), 1); + let txn_1 = nodes[1].tx_broadcaster.txn_broadcast(); + assert_eq!(txn_0, txn_1); + txn_0.remove(0) + }; + + // Verify the channel is operational by sending a payment. + send_payment(&nodes[0], &[&nodes[1]], 1_000_000); + + // Lock the splice by confirming the transaction. + mine_transaction(&nodes[0], &splice_tx); + mine_transaction(&nodes[1], &splice_tx); + lock_splice_after_blocks(&nodes[0], &nodes[1], ANTI_REORG_DELAY - 1); + + // Verify the channel is still operational by sending another payment. + send_payment(&nodes[0], &[&nodes[1]], 1_000_000); +} + +#[test] +fn test_splice_buffer_invalid_commitment_signed_closes_channel() { + // Test that when the counterparty sends an invalid `commitment_signed` (with a bad signature) + // before the user has called `funding_transaction_signed`, the channel is closed with an error + // when `ChannelManager::funding_transaction_signed` processes the buffered message. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_id_0 = nodes[0].node.get_our_node_id(); + let node_id_1 = nodes[1].node.get_our_node_id(); + + let initial_channel_value_sat = 100_000; + let (_, _, channel_id, _) = + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, initial_channel_value_sat, 0); + + // Negotiate a splice-out where only the initiator (node 0) has a contribution. + // This means node 1 will send their commitment_signed immediately after tx_complete. + let initiator_contribution = SpliceContribution::splice_out(vec![TxOut { + value: Amount::from_sat(1_000), + script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), + }]); + negotiate_splice_tx(&nodes[0], &nodes[1], channel_id, initiator_contribution); + + // Node 0 (initiator with contribution) should have a signing event to handle. + let signing_event = get_event!(nodes[0], Event::FundingTransactionReadyForSigning); + + // Node 1 (acceptor with no contribution) won't have a signing event and will immediately + // send their initial commitment_signed. + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + let mut acceptor_commit_sig = get_htlc_update_msgs(&nodes[1], &node_id_0); + + // Invalidate the signature by modifying one byte. This will cause signature verification + // to fail when the buffered message is processed. + let original_sig = acceptor_commit_sig.commitment_signed[0].signature; + let mut sig_bytes = original_sig.serialize_compact(); + sig_bytes[0] ^= 0x01; // Flip a bit to corrupt the signature + acceptor_commit_sig.commitment_signed[0].signature = + Signature::from_compact(&sig_bytes).unwrap(); + + // Deliver the acceptor's invalid commitment_signed to the initiator BEFORE the initiator has + // called funding_transaction_signed. The message should be buffered, not processed. + nodes[0].node.handle_commitment_signed(node_id_1, &acceptor_commit_sig.commitment_signed[0]); + + // No monitor update should have happened since the message is buffered. + check_added_monitors(&nodes[0], 0); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + + // Now handle the signing event and call `funding_transaction_signed`. + // This should process the buffered invalid commitment_signed and close the channel. + if let Event::FundingTransactionReadyForSigning { + channel_id: event_channel_id, + counterparty_node_id, + unsigned_transaction, + .. + } = signing_event + { + assert_eq!(event_channel_id, channel_id); + assert_eq!(counterparty_node_id, node_id_1); + + let partially_signed_tx = nodes[0].wallet_source.sign_tx(unsigned_transaction).unwrap(); + nodes[0] + .node + .funding_transaction_signed(&channel_id, &node_id_1, partially_signed_tx) + .unwrap(); + } else { + panic!("Expected FundingTransactionReadyForSigning event"); + } + + // After funding_transaction_signed: + // 1. The initiator sends its commitment_signed (UpdateHTLCs message). + // 2. The buffered invalid commitment_signed from the acceptor is processed, causing the + // channel to close due to the invalid signature. + // We expect 3 message events: UpdateHTLCs, BroadcastChannelUpdate, and HandleError. + let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 3, "{msg_events:?}"); + match &msg_events[0] { + MessageSendEvent::UpdateHTLCs { ref updates, .. } => { + assert!(!updates.commitment_signed.is_empty()); + }, + _ => panic!("Expected UpdateHTLCs message, got {:?}", msg_events[0]), + } + match &msg_events[1] { + MessageSendEvent::HandleError { + action: msgs::ErrorAction::SendErrorMessage { ref msg }, + .. + } => { + assert!(msg.data.contains("Invalid commitment tx signature from peer")); + }, + _ => panic!("Expected HandleError with SendErrorMessage, got {:?}", msg_events[1]), + } + match &msg_events[2] { + MessageSendEvent::BroadcastChannelUpdate { ref msg, .. } => { + assert_eq!(msg.contents.channel_flags & 2, 2); + }, + _ => panic!("Expected BroadcastChannelUpdate, got {:?}", msg_events[2]), + } + + let err = "Invalid commitment tx signature from peer".to_owned(); + let reason = ClosureReason::ProcessingError { err }; + check_closed_events( + &nodes[0], + &[ExpectedCloseEvent::from_id_reason(channel_id, false, reason)], + ); + check_added_monitors(&nodes[0], 1); +} From be67c67f528b6ff175323a752673792377e26ad5 Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Thu, 22 Jan 2026 11:52:42 -0800 Subject: [PATCH 135/242] Support funding_transaction_signed for unfunded dual-funded channels Now that we require users to first call `ChannelManager::funding_transaction_signed` before releasing any signatures, it's possible that it is called before we receive the initial commitment signed from our counterparty, which would transition the channel to funded. Because of this, we need to support the API call while the channel is still in the unfunded phase. Note that this commit is mostly a code move of `FundedChannel::funding_transaction_signed` to `Channel::funding_transaction_signed` that doesn't alter the signing logic. --- lightning/src/ln/channel.rs | 335 ++++++++++++++++------------- lightning/src/ln/channelmanager.rs | 241 ++++++++++----------- 2 files changed, 302 insertions(+), 274 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index de42f413cb3..65a627f6282 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1954,11 +1954,18 @@ where let funding_tx_signed = if !has_local_contribution { let funding_txid = signing_session.unsigned_tx().tx().compute_txid(); - if let ChannelPhase::Funded(chan) = &mut self.phase { - chan.funding_transaction_signed(funding_txid, vec![], 0, fee_estimator, logger).ok() - } else { - None - } + self.funding_transaction_signed(funding_txid, vec![], 0, fee_estimator, logger) + .map(Some) + .map_err(|err| { + log_error!( + logger, + "Failed signing funding transaction without local contribution: {err:?}" + ); + self.fail_interactive_tx_negotiation( + AbortReason::InternalError("Signing failed"), + logger, + ) + })? } else { None }; @@ -2137,6 +2144,178 @@ where Ok(()) } + pub fn funding_transaction_signed( + &mut self, funding_txid_signed: Txid, witnesses: Vec, best_block_height: u32, + fee_estimator: &LowerBoundedFeeEstimator, logger: &L, + ) -> Result + where + F::Target: FeeEstimator, + L::Target: Logger, + { + let (context, funding, pending_splice) = match &mut self.phase { + ChannelPhase::Undefined => unreachable!(), + ChannelPhase::UnfundedV2(channel) => (&mut channel.context, &channel.funding, None), + ChannelPhase::Funded(channel) => { + (&mut channel.context, &channel.funding, channel.pending_splice.as_ref()) + }, + _ => { + return Err(APIError::APIMisuseError { + err: format!( + "Channel with id {} not expecting funding signatures", + self.context().channel_id + ), + }); + }, + }; + + let signing_session = if let Some(signing_session) = + context.interactive_tx_signing_session.as_mut() + { + if let Some(pending_splice) = pending_splice.as_ref() { + debug_assert!(pending_splice + .funding_negotiation + .as_ref() + .map(|funding_negotiation| matches!( + funding_negotiation, + FundingNegotiation::AwaitingSignatures { .. } + )) + .unwrap_or(false)); + } + + if signing_session.holder_tx_signatures().is_some() { + // Our `tx_signatures` either should've been the first time we processed them, + // or we're waiting for our counterparty to send theirs first. + return Ok(FundingTxSigned { + commitment_signed: None, + counterparty_initial_commitment_signed_result: None, + tx_signatures: None, + funding_tx: None, + splice_negotiated: None, + splice_locked: None, + }); + } + + signing_session + } else { + if Some(funding_txid_signed) == funding.get_funding_txid() { + // We may be handling a duplicate call and the funding was already locked so we + // no longer have the signing session present. + return Ok(FundingTxSigned { + commitment_signed: None, + counterparty_initial_commitment_signed_result: None, + tx_signatures: None, + funding_tx: None, + splice_negotiated: None, + splice_locked: None, + }); + } + let err = format!("Channel {} not expecting funding signatures", context.channel_id); + return Err(APIError::APIMisuseError { err }); + }; + + let tx = signing_session.unsigned_tx().tx(); + if funding_txid_signed != tx.compute_txid() { + return Err(APIError::APIMisuseError { + err: "Transaction was malleated prior to signing".to_owned(), + }); + } + + let shared_input_signature = + if let Some(splice_input_index) = signing_session.unsigned_tx().shared_input_index() { + let sig = match &context.holder_signer { + ChannelSignerType::Ecdsa(signer) => signer.sign_splice_shared_input( + &funding.channel_transaction_parameters, + tx, + splice_input_index as usize, + &context.secp_ctx, + ), + #[cfg(taproot)] + ChannelSignerType::Taproot(_) => todo!(), + }; + Some(sig) + } else { + None + }; + debug_assert_eq!(pending_splice.is_some(), shared_input_signature.is_some()); + + let tx_signatures = msgs::TxSignatures { + channel_id: context.channel_id, + tx_hash: funding_txid_signed, + witnesses, + shared_input_signature, + }; + let (tx_signatures, funding_tx) = signing_session + .provide_holder_witnesses(tx_signatures, &context.secp_ctx) + .map_err(|err| APIError::APIMisuseError { err })?; + + let logger = WithChannelContext::from(logger, &context, None); + if tx_signatures.is_some() { + log_info!( + logger, + "Sending tx_signatures for interactive funding transaction {funding_txid_signed}" + ); + } + + let funding = pending_splice + .as_ref() + .and_then(|pending_splice| pending_splice.funding_negotiation.as_ref()) + .and_then(|funding_negotiation| funding_negotiation.as_funding()) + .unwrap_or(funding); + let commitment_signed = context.get_initial_commitment_signed_v2(funding, &&logger); + + // For zero conf channels, we don't expect the funding transaction to be ready for broadcast + // yet as, according to the spec, our counterparty shouldn't have sent their `tx_signatures` + // without us having sent our initial commitment signed to them first. However, in the event + // they do, we choose to handle it anyway. Note that because of this behavior not being + // spec-compliant, we're not able to test this without custom logic. + let (splice_negotiated, splice_locked) = if let Some(funding_tx) = funding_tx.clone() { + debug_assert!(tx_signatures.is_some()); + let funded_channel = self.as_funded_mut().expect( + "Funding transactions ready for broadcast can only exist for funded channels", + ); + funded_channel.on_tx_signatures_exchange(funding_tx, best_block_height, &logger) + } else { + (None, None) + }; + + // If we have a pending splice with a buffered initial commitment signed from our + // counterparty, process it now that we have provided our signatures. + let counterparty_initial_commitment_signed_result = + self.as_funded_mut().and_then(|funded_channel| { + funded_channel + .pending_splice + .as_mut() + .and_then(|pending_splice| pending_splice.funding_negotiation.as_mut()) + .and_then(|funding_negotiation| { + if let FundingNegotiation::AwaitingSignatures { + ref mut initial_commitment_signed_from_counterparty, + .. + } = funding_negotiation + { + initial_commitment_signed_from_counterparty.take() + } else { + None + } + }) + .map(|commit_sig| { + funded_channel.splice_initial_commitment_signed( + &commit_sig, + fee_estimator, + &&logger, + ) + }) + }); + + Ok(FundingTxSigned { + commitment_signed, + counterparty_initial_commitment_signed_result, + tx_signatures, + funding_tx, + splice_negotiated, + splice_locked, + }) + } + pub fn force_shutdown(&mut self, closure_reason: ClosureReason) -> ShutdownResult { let (funding, context) = self.funding_and_context_mut(); context.force_shutdown(funding, closure_reason) @@ -2206,7 +2385,7 @@ where .unwrap_or(false); // We delay processing this until the user manually approves the splice via - // [`FundedChannel::funding_transaction_signed`], as otherwise, there would be a + // [`Channel::funding_transaction_signed`], as otherwise, there would be a // [`ChannelMonitorUpdateStep::RenegotiatedFunding`] committed that we would // need to undo if they no longer wish to proceed. if has_holder_tx_signatures { @@ -2710,7 +2889,7 @@ enum FundingNegotiation { is_initiator: bool, /// The initial [`msgs::CommitmentSigned`] message received for the [`FundingScope`] above. /// We delay processing this until the user manually approves the splice via - /// [`FundedChannel::funding_transaction_signed`], as otherwise, there would be a + /// [`Channel::funding_transaction_signed`], as otherwise, there would be a /// [`ChannelMonitorUpdateStep::RenegotiatedFunding`] committed that we would need to undo /// if they no longer wish to proceed. /// @@ -9117,148 +9296,6 @@ where } } - pub fn funding_transaction_signed( - &mut self, funding_txid_signed: Txid, witnesses: Vec, best_block_height: u32, - fee_estimator: &LowerBoundedFeeEstimator, logger: &L, - ) -> Result - where - F::Target: FeeEstimator, - L::Target: Logger, - { - let signing_session = - if let Some(signing_session) = self.context.interactive_tx_signing_session.as_mut() { - if let Some(pending_splice) = self.pending_splice.as_ref() { - debug_assert!(pending_splice - .funding_negotiation - .as_ref() - .map(|funding_negotiation| matches!( - funding_negotiation, - FundingNegotiation::AwaitingSignatures { .. } - )) - .unwrap_or(false)); - } - - if signing_session.holder_tx_signatures().is_some() { - // Our `tx_signatures` either should've been the first time we processed them, - // or we're waiting for our counterparty to send theirs first. - return Ok(FundingTxSigned { - commitment_signed: None, - counterparty_initial_commitment_signed_result: None, - tx_signatures: None, - funding_tx: None, - splice_negotiated: None, - splice_locked: None, - }); - } - - signing_session - } else { - if Some(funding_txid_signed) == self.funding.get_funding_txid() { - // We may be handling a duplicate call and the funding was already locked so we - // no longer have the signing session present. - return Ok(FundingTxSigned { - commitment_signed: None, - counterparty_initial_commitment_signed_result: None, - tx_signatures: None, - funding_tx: None, - splice_negotiated: None, - splice_locked: None, - }); - } - let err = - format!("Channel {} not expecting funding signatures", self.context.channel_id); - return Err(APIError::APIMisuseError { err }); - }; - - let tx = signing_session.unsigned_tx().tx(); - if funding_txid_signed != tx.compute_txid() { - return Err(APIError::APIMisuseError { - err: "Transaction was malleated prior to signing".to_owned(), - }); - } - - let shared_input_signature = - if let Some(splice_input_index) = signing_session.unsigned_tx().shared_input_index() { - let sig = match &self.context.holder_signer { - ChannelSignerType::Ecdsa(signer) => signer.sign_splice_shared_input( - &self.funding.channel_transaction_parameters, - tx, - splice_input_index as usize, - &self.context.secp_ctx, - ), - #[cfg(taproot)] - ChannelSignerType::Taproot(_) => todo!(), - }; - Some(sig) - } else { - None - }; - debug_assert_eq!(self.pending_splice.is_some(), shared_input_signature.is_some()); - - let tx_signatures = msgs::TxSignatures { - channel_id: self.context.channel_id, - tx_hash: funding_txid_signed, - witnesses, - shared_input_signature, - }; - let (tx_signatures, funding_tx) = signing_session - .provide_holder_witnesses(tx_signatures, &self.context.secp_ctx) - .map_err(|err| APIError::APIMisuseError { err })?; - - let logger = WithChannelContext::from(logger, &self.context, None); - if tx_signatures.is_some() { - log_info!( - logger, - "Sending tx_signatures for interactive funding transaction {funding_txid_signed}" - ); - } - - let (splice_negotiated, splice_locked) = if let Some(funding_tx) = funding_tx.clone() { - debug_assert!(tx_signatures.is_some()); - self.on_tx_signatures_exchange(funding_tx, best_block_height, &logger) - } else { - (None, None) - }; - - let funding = self - .pending_splice - .as_ref() - .and_then(|pending_splice| pending_splice.funding_negotiation.as_ref()) - .and_then(|funding_negotiation| funding_negotiation.as_funding()) - .unwrap_or(&self.funding); - let commitment_signed = self.context.get_initial_commitment_signed_v2(funding, &&logger); - - // If we have a pending splice with a buffered initial commitment_signed from our - // counterparty, process it now that we have provided our signatures. - let counterparty_initial_commitment_signed_result = self - .pending_splice - .as_mut() - .and_then(|pending_splice| pending_splice.funding_negotiation.as_mut()) - .and_then(|funding_negotiation| { - if let FundingNegotiation::AwaitingSignatures { - ref mut initial_commitment_signed_from_counterparty, - .. - } = funding_negotiation - { - initial_commitment_signed_from_counterparty.take() - } else { - None - } - }) - .map(|commit_sig| { - self.splice_initial_commitment_signed(&commit_sig, fee_estimator, &&logger) - }); - - Ok(FundingTxSigned { - commitment_signed, - counterparty_initial_commitment_signed_result, - tx_signatures, - funding_tx, - splice_negotiated, - splice_locked, - }) - } - pub fn tx_signatures( &mut self, msg: &msgs::TxSignatures, best_block_height: u32, logger: &L, ) -> Result diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 63e11d2ac2f..23e94e76f1c 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -6444,138 +6444,129 @@ where match peer_state.channel_by_id.entry(*channel_id) { hash_map::Entry::Occupied(mut chan_entry) => { - match chan_entry.get_mut().as_funded_mut() { - Some(chan) => { - let txid = transaction.compute_txid(); - let witnesses: Vec<_> = transaction - .input - .into_iter() - .map(|input| input.witness) - .filter(|witness| !witness.is_empty()) - .collect(); - let best_block_height = self.best_block.read().unwrap().height; - - match chan.funding_transaction_signed( - txid, - witnesses, - best_block_height, - &self.fee_estimator, - &self.logger, - ) { - Ok(FundingTxSigned { - commitment_signed, - counterparty_initial_commitment_signed_result, - tx_signatures, - funding_tx, - splice_negotiated, - splice_locked, - }) => { - if let Some(funding_tx) = funding_tx { - self.broadcast_interactive_funding( - chan, - &funding_tx, - &self.logger, - ); - } - if let Some(splice_negotiated) = splice_negotiated { - self.pending_events.lock().unwrap().push_back(( - events::Event::SplicePending { - channel_id: *channel_id, - counterparty_node_id: *counterparty_node_id, - user_channel_id: chan.context.get_user_id(), - new_funding_txo: splice_negotiated.funding_txo, - channel_type: splice_negotiated.channel_type, - new_funding_redeem_script: splice_negotiated - .funding_redeem_script, + let txid = transaction.compute_txid(); + let witnesses: Vec<_> = transaction + .input + .into_iter() + .map(|input| input.witness) + .filter(|witness| !witness.is_empty()) + .collect(); + let best_block_height = self.best_block.read().unwrap().height; + + let chan = chan_entry.get_mut(); + match chan.funding_transaction_signed( + txid, + witnesses, + best_block_height, + &self.fee_estimator, + &self.logger, + ) { + Ok(FundingTxSigned { + commitment_signed, + counterparty_initial_commitment_signed_result, + tx_signatures, + funding_tx, + splice_negotiated, + splice_locked, + }) => { + if let Some(funding_tx) = funding_tx { + let funded_chan = chan.as_funded_mut().expect( + "Funding transactions ready for broadcast can only exist for funded channels", + ); + self.broadcast_interactive_funding( + funded_chan, + &funding_tx, + &self.logger, + ); + } + if let Some(splice_negotiated) = splice_negotiated { + self.pending_events.lock().unwrap().push_back(( + events::Event::SplicePending { + channel_id: *channel_id, + counterparty_node_id: *counterparty_node_id, + user_channel_id: chan.context().get_user_id(), + new_funding_txo: splice_negotiated.funding_txo, + channel_type: splice_negotiated.channel_type, + new_funding_redeem_script: splice_negotiated + .funding_redeem_script, + }, + None, + )); + } + + if chan.context().is_connected() { + if let Some(commitment_signed) = commitment_signed { + peer_state.pending_msg_events.push( + MessageSendEvent::UpdateHTLCs { + node_id: *counterparty_node_id, + channel_id: *channel_id, + updates: CommitmentUpdate { + commitment_signed: vec![commitment_signed], + update_add_htlcs: vec![], + update_fulfill_htlcs: vec![], + update_fail_htlcs: vec![], + update_fail_malformed_htlcs: vec![], + update_fee: None, }, - None, - )); - } + }, + ); + } + if let Some(tx_signatures) = tx_signatures { + peer_state.pending_msg_events.push( + MessageSendEvent::SendTxSignatures { + node_id: *counterparty_node_id, + msg: tx_signatures, + }, + ); + } + if let Some(splice_locked) = splice_locked { + peer_state.pending_msg_events.push( + MessageSendEvent::SendSpliceLocked { + node_id: *counterparty_node_id, + msg: splice_locked, + }, + ); + } + } - if chan.context.is_connected() { - if let Some(commitment_signed) = commitment_signed { - peer_state.pending_msg_events.push( - MessageSendEvent::UpdateHTLCs { - node_id: *counterparty_node_id, - channel_id: *channel_id, - updates: CommitmentUpdate { - commitment_signed: vec![commitment_signed], - update_add_htlcs: vec![], - update_fulfill_htlcs: vec![], - update_fail_htlcs: vec![], - update_fail_malformed_htlcs: vec![], - update_fee: None, - }, - }, - ); - } - if let Some(tx_signatures) = tx_signatures { - peer_state.pending_msg_events.push( - MessageSendEvent::SendTxSignatures { - node_id: *counterparty_node_id, - msg: tx_signatures, - }, - ); + if let Some(funded_chan) = chan.as_funded_mut() { + match counterparty_initial_commitment_signed_result { + Some(Ok(Some(monitor_update))) => { + let funding_txo = funded_chan.funding.get_funding_txo(); + if let Some(post_update_data) = self + .handle_new_monitor_update( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + &mut peer_state.pending_msg_events, + peer_state.is_connected, + funded_chan, + funding_txo.unwrap(), + monitor_update, + ) { + monitor_update_result = Some(Ok(post_update_data)); } - if let Some(splice_locked) = splice_locked { - peer_state.pending_msg_events.push( - MessageSendEvent::SendSpliceLocked { - node_id: *counterparty_node_id, - msg: splice_locked, - }, - ); + }, + Some(Err(err)) => { + let (drop, err) = self.locked_handle_funded_force_close( + &mut peer_state.closed_channel_monitor_update_ids, + &mut peer_state.in_flight_monitor_updates, + err, + funded_chan, + ); + if drop { + chan_entry.remove_entry(); } - } - match counterparty_initial_commitment_signed_result { - Some(Ok(Some(monitor_update))) => { - let funding_txo = chan.funding.get_funding_txo(); - if let Some(post_update_data) = self - .handle_new_monitor_update( - &mut peer_state.in_flight_monitor_updates, - &mut peer_state.monitor_update_blocked_actions, - &mut peer_state.pending_msg_events, - peer_state.is_connected, - chan, - funding_txo.unwrap(), - monitor_update, - ) { - monitor_update_result = Some(Ok(post_update_data)); - } - }, - Some(Err(err)) => { - let (drop, err) = self - .locked_handle_funded_force_close( - &mut peer_state - .closed_channel_monitor_update_ids, - &mut peer_state.in_flight_monitor_updates, - err, - chan, - ); - if drop { - chan_entry.remove_entry(); - } - - monitor_update_result = Some(Err(err)); - }, - Some(Ok(None)) | None => {}, - } - - funding_tx_signed_result = Ok(()); - }, - Err(err) => { - funding_tx_signed_result = Err(err); - return NotifyOption::SkipPersistNoEvents; - }, + monitor_update_result = Some(Err(err)); + }, + Some(Ok(None)) | None => {}, + } } + + funding_tx_signed_result = Ok(()); }, - None => { - funding_tx_signed_result = Err(APIError::APIMisuseError { - err: format!( - "Channel with id {} not expecting funding signatures", - channel_id - ), - }); + Err(err) => { + funding_tx_signed_result = Err(err); return NotifyOption::SkipPersistNoEvents; }, } From b52b0692382958102979b33075bac01f8d1f37e1 Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Thu, 29 Jan 2026 14:01:02 -0800 Subject: [PATCH 136/242] Extend full_stack fuzz test to cover splicing This includes a new seed that covers the splice flow to completion (exchanging `splice_locked` and promoting the `FundingScope`). Co-Authored-By: Claude Opus 4.5 --- fuzz/src/full_stack.rs | 365 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 363 insertions(+), 2 deletions(-) diff --git a/fuzz/src/full_stack.rs b/fuzz/src/full_stack.rs index 722db37bbd6..e73db74fa5d 100644 --- a/fuzz/src/full_stack.rs +++ b/fuzz/src/full_stack.rs @@ -21,7 +21,7 @@ use bitcoin::network::Network; use bitcoin::opcodes; use bitcoin::script::{Builder, ScriptBuf}; use bitcoin::transaction::Version; -use bitcoin::transaction::{Transaction, TxOut}; +use bitcoin::transaction::{Transaction, TxIn, TxOut}; use bitcoin::hash_types::{BlockHash, Txid}; use bitcoin::hashes::sha256::Hash as Sha256; @@ -30,6 +30,8 @@ use bitcoin::hashes::Hash as _; use bitcoin::hex::FromHex; use bitcoin::WPubkeyHash; +use lightning::ln::funding::{FundingTxInput, SpliceContribution}; + use lightning::blinded_path::message::{BlindedMessagePath, MessageContext, MessageForwardNode}; use lightning::blinded_path::payment::{BlindedPaymentPath, ReceiveTlvs}; use lightning::chain; @@ -37,6 +39,7 @@ use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, use lightning::chain::chainmonitor; use lightning::chain::transaction::OutPoint; use lightning::chain::{BestBlock, ChannelMonitorUpdateStatus, Confirm, Listen}; +use lightning::events::bump_transaction::sync::WalletSourceSync; use lightning::events::Event; use lightning::ln::channel_state::ChannelDetails; use lightning::ln::channelmanager::{ @@ -62,11 +65,11 @@ use lightning::sign::{ }; use lightning::types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; use lightning::util::config::{ChannelConfig, UserConfig}; -use lightning::util::errors::APIError; use lightning::util::hash_tables::*; use lightning::util::logger::Logger; use lightning::util::ser::{Readable, Writeable}; use lightning::util::test_channel_signer::{EnforcementState, TestChannelSigner}; +use lightning::util::test_utils::TestWalletSource; use lightning_invoice::RawBolt11Invoice; @@ -648,6 +651,26 @@ pub fn do_test(mut data: &[u8], logger: &Arc) { let mut pending_funding_generation: Vec<(ChannelId, PublicKey, u64, ScriptBuf)> = Vec::new(); let mut pending_funding_signatures = new_hash_map(); + // Set up a wallet with a coinbase transaction for splice funding + let wallet_secret = SecretKey::from_slice(&[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 2, + ]) + .unwrap(); + let wallet = TestWalletSource::new(wallet_secret); + let coinbase_tx = Transaction { + version: Version::TWO, + lock_time: LockTime::ZERO, + input: vec![TxIn { ..Default::default() }], + output: vec![TxOut { + value: Amount::from_sat(1_000_000), + script_pubkey: wallet.get_change_script().unwrap(), + }], + }; + let coinbase_txid = coinbase_tx.compute_txid(); + wallet + .add_utxo(bitcoin::OutPoint { txid: coinbase_txid, vout: 0 }, Amount::from_sat(1_000_000)); + loop { match get_slice!(1)[0] { 0 => { @@ -985,6 +1008,71 @@ pub fn do_test(mut data: &[u8], logger: &Arc) { rng_output.copy_from_slice(&get_slice!(32)); *keys_manager.rng_output.borrow_mut() = rng_output; }, + // Splice-in: add funds to a channel + 50 => { + let mut channels = channelmanager.list_channels(); + let channel_id_idx = get_slice!(1)[0] as usize; + if channel_id_idx >= channels.len() { + return; + } + channels.sort_by(|a, b| a.channel_id.cmp(&b.channel_id)); + let chan = &channels[channel_id_idx]; + // Only splice funded channels + if chan.funding_txo.is_none() { + continue; + } + let splice_in_sats = slice_to_be24(get_slice!(3)) as u64; + if splice_in_sats == 0 { + continue; + } + // Create a funding input from the coinbase transaction + if let Ok(input) = FundingTxInput::new_p2wpkh(coinbase_tx.clone(), 0) { + let contribution = SpliceContribution::splice_in( + Amount::from_sat(splice_in_sats.min(900_000)), // Cap at available funds minus fees + vec![input], + Some(wallet.get_change_script().unwrap()), + ); + let _ = channelmanager.splice_channel( + &chan.channel_id, + &chan.counterparty.node_id, + contribution, + 253, // funding_feerate_per_kw + None, + ); + } + }, + // Splice-out: remove funds from a channel + 51 => { + let mut channels = channelmanager.list_channels(); + let channel_id_idx = get_slice!(1)[0] as usize; + if channel_id_idx >= channels.len() { + return; + } + channels.sort_by(|a, b| a.channel_id.cmp(&b.channel_id)); + let chan = &channels[channel_id_idx]; + // Only splice funded channels with sufficient capacity + if chan.funding_txo.is_none() || chan.channel_value_satoshis < 20_000 { + continue; + } + let splice_out_sats = slice_to_be24(get_slice!(3)) as u64; + if splice_out_sats == 0 { + continue; + } + // Cap splice-out at a reasonable portion of channel capacity + let max_splice_out = chan.channel_value_satoshis / 4; + let splice_out_sats = splice_out_sats.min(max_splice_out).max(546); // At least dust limit + let contribution = SpliceContribution::splice_out(vec![TxOut { + value: Amount::from_sat(splice_out_sats), + script_pubkey: wallet.get_change_script().unwrap(), + }]); + let _ = channelmanager.splice_channel( + &chan.channel_id, + &chan.counterparty.node_id, + contribution, + 253, // funding_feerate_per_kw + None, + ); + }, _ => return, } loss_detector.handler.process_events(); @@ -1013,6 +1101,26 @@ pub fn do_test(mut data: &[u8], logger: &Arc) { intercepted_htlcs.push(intercept_id); } }, + Event::FundingTransactionReadyForSigning { + channel_id, + counterparty_node_id, + unsigned_transaction, + .. + } => { + // Sign the funding transaction and provide it back to the channel manager + let signed_tx = wallet.sign_tx(unsigned_transaction).unwrap(); + let _ = channelmanager.funding_transaction_signed( + &channel_id, + &counterparty_node_id, + signed_tx, + ); + }, + Event::SplicePending { .. } => { + // Splice negotiation completed, waiting for confirmation + }, + Event::SpliceFailed { .. } => { + // Splice failed, inputs can be re-spent + }, _ => {}, } } @@ -1578,6 +1686,221 @@ fn gossip_exchange_seed() -> Vec { test } +fn splice_seed() -> Vec { + // This seed sets up a channel between two peers and attempts a splice-in operation that becomes + // locked. + let mut test = Vec::new(); + + // our network key + ext_from_hex("0100000000000000000000000000000000000000000000000000000000000000", &mut test); + // config + ext_from_hex("000000000090000000000000000064000100000000000100ffff0000000000000000ffffffffffffffffffffffffffffffff0000000000000000ffffffffffffffff000000ffffffff00ffff1a000400010000020400000000040200000a08ffffffffffffffff000100000000000000", &mut test); + + // new outbound connection with id 0 + ext_from_hex("00", &mut test); + // peer's pubkey + ext_from_hex("030000000000000000000000000000000000000000000000000000000000000002", &mut test); + // inbound read from peer id 0 of len 50 + ext_from_hex("030032", &mut test); + // noise act two (0||pubkey||mac) + ext_from_hex("00 030000000000000000000000000000000000000000000000000000000000000002 03000000000000000000000000000000", &mut test); + + // inbound read from peer id 0 of len 18 + ext_from_hex("030012", &mut test); + // message header indicating message length 28 (init with extended features for splicing) + // init message = type(2) + global_len(2) + global(2) + features_len(2) + features(20) = 28 = 0x1c + ext_from_hex("001c 03000000000000000000000000000000", &mut test); + // inbound read from peer id 0 of len 44 (28 message + 16 MAC) + ext_from_hex("03002c", &mut test); + // init message (type 16) with splicing (bit 155) and quiescence (bit 35) enabled + // Features: 20 bytes with bit 155 (splicing) and bit 35 (quiescence) set + // Wire format (big-endian): 0x08 at byte 0 for bit 155, zeros for bytes 1-11, original 8 bytes at 12-19 + // 20 bytes = 08 + 11 zeros + 8 original bytes = 080000000000000000000000 + aaa210aa2a0a9aaa + ext_from_hex("0010 00021aaa 0014 080000000000000000000000aaa210aa2a0a9aaa 03000000000000000000000000000000", &mut test); + + // inbound read from peer id 0 of len 18 + ext_from_hex("030012", &mut test); + // message header indicating message length 327 + ext_from_hex("0147 03000000000000000000000000000000", &mut test); + // inbound read from peer id 0 of len 254 + ext_from_hex("0300fe", &mut test); + // beginning of open_channel message + ext_from_hex("0020 6fe28c0ab6f1b372c1a6a246ae63f74f931e8365e15a089c68d6190000000000 ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb181909679 000000000000c350 0000000000000000 0000000000000162 ffffffffffffffff 0000000000000222 0000000000000000 000000fd 0006 01e3 030000000000000000000000000000000000000000000000000000000000000001 030000000000000000000000000000000000000000000000000000000000000002 030000000000000000000000000000000000000000000000000000000000000003 030000000000000000000000000000000000000000000000000000000000000004", &mut test); + // inbound read from peer id 0 of len 89 + ext_from_hex("030059", &mut test); + // rest of open_channel and mac + ext_from_hex("030000000000000000000000000000000000000000000000000000000000000005 020900000000000000000000000000000000000000000000000000000000000000 01 0000 01021000 03000000000000000000000000000000", &mut test); + + // client should now respond with accept_channel + + // inbound read from peer id 0 of len 18 + ext_from_hex("030012", &mut test); + // message header indicating message length 132 + ext_from_hex("0084 03000000000000000000000000000000", &mut test); + // inbound read from peer id 0 of len 148 + ext_from_hex("030094", &mut test); + // funding_created and mac + ext_from_hex("0022 ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb181909679 c000000000000000000000000000000000000000000000000000000000000000 0000 00000000000000000000000000000000000000000000000000000000000000dc0100000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000", &mut test); + // client should now respond with funding_signed + + // connect a block with one transaction of len 94 + ext_from_hex("0c005e", &mut test); + // the funding transaction + ext_from_hex("020000000100000000000000000000000000000000000000000000000000000000000000000000000000ffffffff0150c3000000000000220020530000000000000000000000000000000000000000000000000000000000000000000000", &mut test); + // connect blocks to confirm the funding transaction (need minimum_depth confirmations) + for _ in 0..12 { + ext_from_hex("0c0000", &mut test); + } + // by now client should have sent a channel_ready + + // inbound read from peer id 0 of len 18 + ext_from_hex("030012", &mut test); + // message header indicating message length 67 + ext_from_hex("0043 03000000000000000000000000000000", &mut test); + // inbound read from peer id 0 of len 83 + ext_from_hex("030053", &mut test); + // channel_ready and mac + ext_from_hex("0024 c000000000000000000000000000000000000000000000000000000000000000 020800000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000", &mut test); + + // Channel is now established and ready for splicing! + + // Initiate splice-in on channel 0 (opcode 50) + // Format: 50 + // Channel index 0, splice amount 0x010000 (65536 sats) + ext_from_hex("32 00 010000", &mut test); + + // After splice_channel is called, we should receive a SendStfu event. + // The peer needs to respond with stfu to acknowledge quiescence. + // inbound read from peer id 0 of len 18 + ext_from_hex("030012", &mut test); + // message header indicating message length 35 (stfu message: type(2) + channel_id(32) + initiator(1) = 35 = 0x23) + ext_from_hex("0023 03000000000000000000000000000000", &mut test); + // inbound read from peer id 0 of len 51 (35 message + 16 MAC) + ext_from_hex("030033", &mut test); + // stfu message (type 2): channel_id (32 bytes) + initiator (1 byte) + mac + // channel_id = c000...0000, initiator = 0 (peer is not initiator, responding to our stfu) + ext_from_hex("0002 c000000000000000000000000000000000000000000000000000000000000000 00 03000000000000000000000000000000", &mut test); + + // After receiving peer's stfu, we send SpliceInit. Peer responds with SpliceAck. + // Message type IDs: SpliceAck = 81 (0x0051) + // inbound read from peer id 0 of len 18 + ext_from_hex("030012", &mut test); + // message header indicating message length 75 (SpliceAck: type(2) + channel_id(32) + funding_contribution(8) + funding_pubkey(33) = 75 = 0x4b) + ext_from_hex("004b 03000000000000000000000000000000", &mut test); + // inbound read from peer id 0 of len 91 (75 message + 16 MAC) + ext_from_hex("03005b", &mut test); + // SpliceAck message (type 81 = 0x0051): channel_id + funding_contribution + funding_pubkey + mac + // channel_id = c000...0000, funding_contribution = 0 (i64), funding_pubkey = valid 33-byte compressed pubkey + ext_from_hex("0051 c000000000000000000000000000000000000000000000000000000000000000 0000000000000000 030000000000000000000000000000000000000000000000000000000000000001 03000000000000000000000000000000", &mut test); + + // Now we're in interactive tx negotiation. We send TxAddInput for our new funding input. + // Peer responds with TxComplete (they have no inputs/outputs to add). + // Message type IDs: TxComplete = 70 (0x0046) + // inbound read from peer id 0 of len 18 + ext_from_hex("030012", &mut test); + // message header indicating message length 34 (TxComplete: type(2) + channel_id(32) = 34 = 0x22) + ext_from_hex("0022 03000000000000000000000000000000", &mut test); + // inbound read from peer id 0 of len 50 (34 message + 16 MAC) + ext_from_hex("030032", &mut test); + // TxComplete message (type 70 = 0x0046): channel_id + mac + ext_from_hex("0046 c000000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000", &mut test); + + // After peer's first TxComplete, we send another TxAddInput (for the shared input - existing funding). + // We also send TxAddOutput for the new funding output. + // Peer needs to respond with another TxComplete. + // inbound read from peer id 0 of len 18 + ext_from_hex("030012", &mut test); + // message header indicating message length 34 (TxComplete) + ext_from_hex("0022 03000000000000000000000000000000", &mut test); + // inbound read from peer id 0 of len 50 (34 message + 16 MAC) + ext_from_hex("030032", &mut test); + // TxComplete message + ext_from_hex("0046 c000000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000", &mut test); + + // We continue sending our inputs/outputs, peer continues with TxComplete. + // inbound read from peer id 0 of len 18 + ext_from_hex("030012", &mut test); + // message header indicating message length 34 (TxComplete) + ext_from_hex("0022 03000000000000000000000000000000", &mut test); + // inbound read from peer id 0 of len 50 (34 message + 16 MAC) + ext_from_hex("030032", &mut test); + // TxComplete message + ext_from_hex("0046 c000000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000", &mut test); + + // More TxComplete responses as we add our outputs + // inbound read from peer id 0 of len 18 + ext_from_hex("030012", &mut test); + // message header indicating message length 34 (TxComplete) + ext_from_hex("0022 03000000000000000000000000000000", &mut test); + // inbound read from peer id 0 of len 50 (34 message + 16 MAC) + ext_from_hex("030032", &mut test); + // TxComplete message + ext_from_hex("0046 c000000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000", &mut test); + + // After we send our TxComplete, the interactive tx negotiation completes. + // Both sides now need to exchange commitment_signed messages. + // Message type IDs: CommitmentSigned = 132 (0x0084) + // For splice, we need to include the funding_txid TLV. + // Message format: type(2) + channel_id(32) + signature(64) + num_htlcs(2) + TLV(type=0, len=32, txid=32) = 134 bytes + // The signature must encode the sighash first byte (f7) in r, following the fuzz pattern. + // inbound read from peer id 0 of len 18 + ext_from_hex("030012", &mut test); + // message header indicating message length 134 (0x86) + ext_from_hex("0086 03000000000000000000000000000000", &mut test); + // inbound read from peer id 0 of len 150 (134 message + 16 MAC) + ext_from_hex("030096", &mut test); + // CommitmentSigned message with proper signature (r=f7, s=01...) and funding_txid TLV + // signature r encodes sighash first byte f7, s follows the pattern from funding_created + // TLV type 1 (odd/optional) for funding_txid as per impl_writeable_msg!(CommitmentSigned, ...) + // Note: txid is encoded in reverse byte order (Bitcoin standard), so to get display 0000...0033, encode 3300...0000 + ext_from_hex("0084 c000000000000000000000000000000000000000000000000000000000000000 00000000000000000000000000000000000000000000000000000000000000f7 0100000000000000000000000000000000000000000000000000000000000000 0000 01 20 3300000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000", &mut test); + + // After commitment_signed exchange, we need to exchange tx_signatures. + // Message type IDs: TxSignatures = 71 (0x0047) + // TxSignatures: type(2) + channel_id(32) + txid(32) + num_witnesses(2) + TLV(type=0, len=64, shared_input_sig) + // With shared_input_signature: 2 + 32 + 32 + 2 + 1 + 1 + 64 = 134 = 0x86 + // inbound read from peer id 0 of len 18 + ext_from_hex("030012", &mut test); + // message header indicating message length 134 (0x86) + ext_from_hex("0086 03000000000000000000000000000000", &mut test); + // inbound read from peer id 0 of len 150 (134 message + 16 MAC) + ext_from_hex("030096", &mut test); + // TxSignatures message with shared_input_signature TLV (type 0) + // txid must match the splice funding txid (0x33 in reverse byte order) + // shared_input_signature: 64-byte fuzz signature for the shared input + ext_from_hex("0047 c000000000000000000000000000000000000000000000000000000000000000 3300000000000000000000000000000000000000000000000000000000000000 0000 00 40 00000000000000000000000000000000000000000000000000000000000000dc 0100000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000", &mut test); + + // Connect a block with the splice funding transaction to confirm it + // The splice funding tx: version(4) + input_count(1) + txid(32) + vout(4) + script_len(1) + sequence(4) + // + output_count(1) + value(8) + script_len(1) + script(34) + locktime(4) = 94 bytes = 0x5e + // Transaction structure from FundingTransactionReadyForSigning: + // - Input: spending c000...00:0 with sequence 0xfffffffd + // - Output: 115536 sats to OP_0 PUSH32 6e00...00 + // - Locktime: 13 + ext_from_hex("0c005e", &mut test); + ext_from_hex("02000000 01 c000000000000000000000000000000000000000000000000000000000000000 00000000 00 fdffffff 01 50c3010000000000 22 00206e00000000000000000000000000000000000000000000000000000000000000 0d000000", &mut test); + + // Connect additional blocks to reach minimum_depth confirmations + for _ in 0..5 { + ext_from_hex("0c0000", &mut test); + } + + // After confirmation, exchange splice_locked messages. + // Message type IDs: SpliceLocked = 77 (0x004d) + // SpliceLocked: type(2) + channel_id(32) + splice_txid(32) = 66 = 0x42 + // inbound read from peer id 0 of len 18 + ext_from_hex("030012", &mut test); + // message header indicating message length 66 + ext_from_hex("0042 03000000000000000000000000000000", &mut test); + // inbound read from peer id 0 of len 82 (66 message + 16 MAC) + ext_from_hex("030052", &mut test); + // SpliceLocked message (type 77 = 0x004d): channel_id + splice_txid + mac + // splice_txid must match the splice funding txid (0x33 in reverse byte order) + ext_from_hex("004d c000000000000000000000000000000000000000000000000000000000000000 3300000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000", &mut test); + + test +} + pub fn write_fst_seeds(path: &str) { use std::fs::File; use std::io::Write; @@ -1589,6 +1912,10 @@ pub fn write_fst_seeds(path: &str) { let mut f = File::create(path.to_owned() + "/gossip_exchange_seed").unwrap(); let gossip_exchange = gossip_exchange_seed(); f.write_all(&gossip_exchange).unwrap(); + + let mut f = File::create(path.to_owned() + "/splice_seed").unwrap(); + let splice = splice_seed(); + f.write_all(&splice).unwrap(); } #[cfg(test)] @@ -1666,4 +1993,38 @@ mod tests { assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Sending message to all peers except Some(PublicKey(0000000000000000000000000000000000000000000000000000000000000002ff00000000000000000000000000000000000000000000000000000000000002)): ChannelUpdate { signature: 3026020200a602200303030303030303030303030303030303030303030303030303030303030303, contents: UnsignedChannelUpdate { chain_hash: 6fe28c0ab6f1b372c1a6a246ae63f74f931e8365e15a089c68d6190000000000, short_channel_id: 42, timestamp: 44, message_flags: 1, channel_flags: 0, cltv_expiry_delta: 40, htlc_minimum_msat: 0, htlc_maximum_msat: 100000000, fee_base_msat: 0, fee_proportional_millionths: 0, excess_data: [] } }".to_string())), Some(&1)); assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Sending message to all peers except Some(PublicKey(0000000000000000000000000000000000000000000000000000000000000002ff00000000000000000000000000000000000000000000000000000000000002)) or the announced node: NodeAnnouncement { signature: 302502012802200303030303030303030303030303030303030303030303030303030303030303, contents: UnsignedNodeAnnouncement { features: [], timestamp: 43, node_id: NodeId(030303030303030303030303030303030303030303030303030303030303030303), rgb: [0, 0, 0], alias: NodeAlias([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), addresses: [], excess_address_data: [], excess_data: [] } }".to_string())), Some(&1)); } + + #[test] + fn test_splice_seed() { + let test = super::splice_seed(); + + let logger = Arc::new(TrackingLogger { lines: Mutex::new(HashMap::new()) }); + super::do_test(&test, &(Arc::clone(&logger) as Arc)); + + let log_entries = logger.lines.lock().unwrap(); + + // Channel open + assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling SendAcceptChannel event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000002 for channel ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb181909679".to_string())), Some(&1)); + assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling SendFundingSigned event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000002 for channel c000000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&1)); + assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling SendChannelReady event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000002 for channel c000000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&1)); + + // Quiescence + assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling SendStfu event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000002 for channel c000000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&1)); + + // Splice handshake + assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling SendSpliceInit event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000002 for channel c000000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&1)); + + // Interactive transaction negotiation + assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling SendTxAddInput event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000002 for channel c000000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&2)); // One for the shared input, one for the wallet input + assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling SendTxAddOutput event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000002 for channel c000000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&2)); // One for the shared output, one for the change output + assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling SendTxComplete event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000002 for channel c000000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&1)); + + // Transaction signing + assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling UpdateHTLCs event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000002 with 0 adds, 0 fulfills, 0 fails, 1 commits for channel c000000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&1)); + assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling SendTxSignatures event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000002 for channel c000000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&1)); + + // Splice locked + assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling SendSpliceLocked event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000002 for channel c000000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&1)); + assert_eq!(log_entries.get(&("lightning::ln::channel".to_string(), "Promoting splice funding txid 0000000000000000000000000000000000000000000000000000000000000033".to_string())), Some(&1)); + } } From 4f9d6511e405ffd79b233ec8bb9b95ad03351811 Mon Sep 17 00:00:00 2001 From: Thrishalmadasu Date: Fri, 30 Jan 2026 11:47:57 +0530 Subject: [PATCH 137/242] Log unknown channel_update with dont_forward at debug level instead of warn --- lightning/src/ln/channelmanager.rs | 2 +- lightning/src/ln/priv_short_conf_tests.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 56e97324e3b..565cfa051b0 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -12291,7 +12291,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ None => { // It's not a local channel if msg.contents.message_flags & (1 << 1) != 0 { - log_warn!(self.logger, "Received channel_update for unknown channel {} with dont_forward set.\n\tYou may wish to check if an incorrect tx_index was passed to chain::Confirm::transactions_confirmed.", msg.contents.short_channel_id); + log_debug!(self.logger, "Received channel_update for unknown channel {} with dont_forward set. You may wish to check if an incorrect tx_index was passed to chain::Confirm::transactions_confirmed.", msg.contents.short_channel_id); } return Ok(NotifyOption::SkipPersistNoEvents) } diff --git a/lightning/src/ln/priv_short_conf_tests.rs b/lightning/src/ln/priv_short_conf_tests.rs index ed7f7577bb7..14a343814e2 100644 --- a/lightning/src/ln/priv_short_conf_tests.rs +++ b/lightning/src/ln/priv_short_conf_tests.rs @@ -1614,7 +1614,7 @@ fn test_channel_update_dont_forward_flag() { } #[test] -fn test_unknown_channel_update_with_dont_forward_logs_warning() { +fn test_unknown_channel_update_with_dont_forward_logs_debug() { use bitcoin::constants::ChainHash; use bitcoin::secp256k1::ecdsa::Signature; use bitcoin::secp256k1::ffi::Signature as FFISignature; From 26f6cbb13eaa3ba3e862da43f39df314ae13223c Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Wed, 14 Jan 2026 12:24:29 -0500 Subject: [PATCH 138/242] Bump lightning-liquidity version Useful for upcoming commits where we otherwise break SemVer checks by changing the ALiquidityManager associated types. --- lightning-background-processor/Cargo.toml | 4 ++-- lightning-liquidity/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/lightning-background-processor/Cargo.toml b/lightning-background-processor/Cargo.toml index ef0a9840613..7fe68bc1933 100644 --- a/lightning-background-processor/Cargo.toml +++ b/lightning-background-processor/Cargo.toml @@ -26,14 +26,14 @@ bitcoin_hashes = { version = "0.14.0", default-features = false } bitcoin-io = { version = "0.1.2", default-features = false } lightning = { version = "0.3.0", path = "../lightning", default-features = false } lightning-rapid-gossip-sync = { version = "0.2.0", path = "../lightning-rapid-gossip-sync", default-features = false } -lightning-liquidity = { version = "0.2.0", path = "../lightning-liquidity", default-features = false } +lightning-liquidity = { version = "0.3.0", path = "../lightning-liquidity", default-features = false } possiblyrandom = { version = "0.2", path = "../possiblyrandom", default-features = false } [dev-dependencies] tokio = { version = "1.35", features = [ "macros", "rt", "rt-multi-thread", "sync", "time" ] } lightning = { version = "0.3.0", path = "../lightning", features = ["_test_utils"] } lightning-invoice = { version = "0.34.0", path = "../lightning-invoice" } -lightning-liquidity = { version = "0.2.0", path = "../lightning-liquidity", default-features = false, features = ["_test_utils"] } +lightning-liquidity = { version = "0.3.0", path = "../lightning-liquidity", default-features = false, features = ["_test_utils"] } lightning-persister = { version = "0.2.0", path = "../lightning-persister" } [lints] diff --git a/lightning-liquidity/Cargo.toml b/lightning-liquidity/Cargo.toml index 2f83077cabc..67e82a5fbf8 100644 --- a/lightning-liquidity/Cargo.toml +++ b/lightning-liquidity/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning-liquidity" -version = "0.2.0+git" +version = "0.3.0+git" authors = ["John Cantrell ", "Elias Rohrer "] homepage = "https://lightningdevkit.org/" license = "MIT OR Apache-2.0" From aa9feace17ed7fd0091c48716a0ed34bffaa4e7d Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Tue, 13 Jan 2026 14:13:57 -0500 Subject: [PATCH 139/242] Drop Deref indirection for BroadcasterInterface Reduces generics and verbosity across the codebase, should provide equivalent behavior. Co-Authored-By: Claude Opus 4.5 --- lightning-background-processor/src/lib.rs | 23 ++--- lightning-liquidity/src/lsps2/service.rs | 16 ++-- lightning-liquidity/src/manager.rs | 86 +++++++++---------- lightning/src/chain/chaininterface.rs | 6 ++ lightning/src/chain/chainmonitor.rs | 52 +++++------ lightning/src/chain/channelmonitor.rs | 82 +++++++----------- lightning/src/chain/onchaintx.rs | 23 ++--- lightning/src/events/bump_transaction/mod.rs | 7 +- lightning/src/events/bump_transaction/sync.rs | 7 +- lightning/src/ln/channelmanager.rs | 66 +++++--------- lightning/src/ln/functional_test_utils.rs | 4 +- lightning/src/util/anchor_channel_reserves.rs | 5 +- lightning/src/util/persist.rs | 51 ++++++----- lightning/src/util/sweep.rs | 48 ++++++----- 14 files changed, 208 insertions(+), 268 deletions(-) diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index c38d6dfe080..0cefcca7d24 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -492,10 +492,9 @@ pub const NO_LIQUIDITY_MANAGER: Option< K = &DummyKVStore, TimeProvider = dyn lightning_liquidity::utils::time::TimeProvider + Send + Sync, TP = &(dyn lightning_liquidity::utils::time::TimeProvider + Send + Sync), - BroadcasterInterface = dyn lightning::chain::chaininterface::BroadcasterInterface - + Send - + Sync, - T = &(dyn BroadcasterInterface + Send + Sync), + BroadcasterInterface = &(dyn lightning::chain::chaininterface::BroadcasterInterface + + Send + + Sync), > + Send + Sync, >, @@ -519,10 +518,9 @@ pub const NO_LIQUIDITY_MANAGER_SYNC: Option< KS = &(dyn lightning::util::persist::KVStoreSync + Send + Sync), TimeProvider = dyn lightning_liquidity::utils::time::TimeProvider + Send + Sync, TP = &(dyn lightning_liquidity::utils::time::TimeProvider + Send + Sync), - BroadcasterInterface = dyn lightning::chain::chaininterface::BroadcasterInterface - + Send - + Sync, - T = &(dyn BroadcasterInterface + Send + Sync), + BroadcasterInterface = &(dyn lightning::chain::chaininterface::BroadcasterInterface + + Send + + Sync), > + Send + Sync, >, @@ -956,7 +954,7 @@ pub async fn process_events_async< 'a, UL: Deref, CF: Deref, - T: Deref, + T: BroadcasterInterface, F: Deref, G: Deref>, L: Deref, @@ -989,7 +987,6 @@ pub async fn process_events_async< where UL::Target: UtxoLookup, CF::Target: chain::Filter, - T::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, P::Target: Persist<::Signer>, @@ -1457,7 +1454,7 @@ fn check_and_reset_sleeper< pub async fn process_events_async_with_kv_store_sync< UL: Deref, CF: Deref, - T: Deref, + T: BroadcasterInterface, F: Deref, G: Deref>, L: Deref, @@ -1490,7 +1487,6 @@ pub async fn process_events_async_with_kv_store_sync< where UL::Target: UtxoLookup, CF::Target: chain::Filter, - T::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, P::Target: Persist<::Signer>, @@ -1571,7 +1567,7 @@ impl BackgroundProcessor { 'a, UL: 'static + Deref, CF: 'static + Deref, - T: 'static + Deref, + T: 'static + BroadcasterInterface, F: 'static + Deref + Send, G: 'static + Deref>, L: 'static + Deref + Send, @@ -1604,7 +1600,6 @@ impl BackgroundProcessor { where UL::Target: 'static + UtxoLookup, CF::Target: 'static + chain::Filter, - T::Target: 'static + BroadcasterInterface, F::Target: 'static + FeeEstimator, L::Target: 'static + Logger, P::Target: 'static + Persist<::Signer>, diff --git a/lightning-liquidity/src/lsps2/service.rs b/lightning-liquidity/src/lsps2/service.rs index 1b5bf964996..756e8b32bc8 100644 --- a/lightning-liquidity/src/lsps2/service.rs +++ b/lightning-liquidity/src/lsps2/service.rs @@ -702,11 +702,10 @@ macro_rules! get_or_insert_peer_state_entry { } /// The main object allowing to send and receive bLIP-52 / LSPS2 messages. -pub struct LSPS2ServiceHandler +pub struct LSPS2ServiceHandler where CM::Target: AChannelManager, K::Target: KVStore, - T::Target: BroadcasterInterface, { channel_manager: CM, kv_store: K, @@ -721,11 +720,10 @@ where persistence_in_flight: AtomicUsize, } -impl LSPS2ServiceHandler +impl LSPS2ServiceHandler where CM::Target: AChannelManager, K::Target: KVStore, - T::Target: BroadcasterInterface, { /// Constructs a `LSPS2ServiceHandler`. pub(crate) fn new( @@ -2044,12 +2042,11 @@ where } } -impl LSPSProtocolMessageHandler +impl LSPSProtocolMessageHandler for LSPS2ServiceHandler where CM::Target: AChannelManager, K::Target: KVStore, - T::Target: BroadcasterInterface, { type ProtocolMessage = LSPS2Message; const PROTOCOL_NUMBER: Option = Some(2); @@ -2119,20 +2116,19 @@ fn calculate_amount_to_forward_per_htlc( /// A synchroneous wrapper around [`LSPS2ServiceHandler`] to be used in contexts where async is not /// available. -pub struct LSPS2ServiceHandlerSync<'a, CM: Deref, K: Deref + Clone, T: Deref + Clone> +pub struct LSPS2ServiceHandlerSync<'a, CM: Deref, K: Deref + Clone, T: BroadcasterInterface + Clone> where CM::Target: AChannelManager, K::Target: KVStore, - T::Target: BroadcasterInterface, { inner: &'a LSPS2ServiceHandler, } -impl<'a, CM: Deref, K: Deref + Clone, T: Deref + Clone> LSPS2ServiceHandlerSync<'a, CM, K, T> +impl<'a, CM: Deref, K: Deref + Clone, T: BroadcasterInterface + Clone> + LSPS2ServiceHandlerSync<'a, CM, K, T> where CM::Target: AChannelManager, K::Target: KVStore, - T::Target: BroadcasterInterface, { pub(crate) fn from_inner(inner: &'a LSPS2ServiceHandler) -> Self { Self { inner } diff --git a/lightning-liquidity/src/manager.rs b/lightning-liquidity/src/manager.rs index 0b4f5efaa3c..84a52e2ab13 100644 --- a/lightning-liquidity/src/manager.rs +++ b/lightning-liquidity/src/manager.rs @@ -128,13 +128,19 @@ pub trait ALiquidityManager { /// A type that may be dereferenced to [`Self::TimeProvider`]. type TP: Deref + Clone; /// A type implementing [`BroadcasterInterface`]. - type BroadcasterInterface: BroadcasterInterface + ?Sized; - /// A type that may be dereferenced to [`Self::BroadcasterInterface`]. - type T: Deref + Clone; + type BroadcasterInterface: BroadcasterInterface + Clone; /// Returns a reference to the actual [`LiquidityManager`] object. fn get_lm( &self, - ) -> &LiquidityManager; + ) -> &LiquidityManager< + Self::ES, + Self::NS, + Self::CM, + Self::C, + Self::K, + Self::TP, + Self::BroadcasterInterface, + >; } impl< @@ -144,7 +150,7 @@ impl< C: Deref + Clone, K: Deref + Clone, TP: Deref + Clone, - T: Deref + Clone, + T: BroadcasterInterface + Clone, > ALiquidityManager for LiquidityManager where ES::Target: EntropySource, @@ -153,7 +159,6 @@ where C::Target: Filter, K::Target: KVStore, TP::Target: TimeProvider, - T::Target: BroadcasterInterface, { type EntropySource = ES::Target; type ES = ES; @@ -167,8 +172,7 @@ where type K = K; type TimeProvider = TP::Target; type TP = TP; - type BroadcasterInterface = T::Target; - type T = T; + type BroadcasterInterface = T; fn get_lm(&self) -> &LiquidityManager { self } @@ -204,9 +208,7 @@ pub trait ALiquidityManagerSync { /// A type that may be dereferenced to [`Self::TimeProvider`]. type TP: Deref + Clone; /// A type implementing [`BroadcasterInterface`]. - type BroadcasterInterface: BroadcasterInterface + ?Sized; - /// A type that may be dereferenced to [`Self::BroadcasterInterface`]. - type T: Deref + Clone; + type BroadcasterInterface: BroadcasterInterface + Clone; /// Returns the inner async [`LiquidityManager`] for testing purposes. #[cfg(any(test, feature = "_test_utils"))] fn get_lm_async( @@ -218,12 +220,20 @@ pub trait ALiquidityManagerSync { Self::C, KVStoreSyncWrapper, Self::TP, - Self::T, + Self::BroadcasterInterface, >; /// Returns a reference to the actual [`LiquidityManager`] object. fn get_lm( &self, - ) -> &LiquidityManagerSync; + ) -> &LiquidityManagerSync< + Self::ES, + Self::NS, + Self::CM, + Self::C, + Self::KS, + Self::TP, + Self::BroadcasterInterface, + >; } impl< @@ -233,7 +243,7 @@ impl< C: Deref + Clone, KS: Deref + Clone, TP: Deref + Clone, - T: Deref + Clone, + T: BroadcasterInterface + Clone, > ALiquidityManagerSync for LiquidityManagerSync where ES::Target: EntropySource, @@ -242,7 +252,6 @@ where C::Target: Filter, KS::Target: KVStoreSync, TP::Target: TimeProvider, - T::Target: BroadcasterInterface, { type EntropySource = ES::Target; type ES = ES; @@ -256,8 +265,7 @@ where type KS = KS; type TimeProvider = TP::Target; type TP = TP; - type BroadcasterInterface = T::Target; - type T = T; + type BroadcasterInterface = T; /// Returns the inner async [`LiquidityManager`] for testing purposes. #[cfg(any(test, feature = "_test_utils"))] fn get_lm_async( @@ -269,7 +277,7 @@ where Self::C, KVStoreSyncWrapper, Self::TP, - Self::T, + Self::BroadcasterInterface, > { &self.inner } @@ -304,7 +312,7 @@ pub struct LiquidityManager< C: Deref + Clone, K: Deref + Clone, TP: Deref + Clone, - T: Deref + Clone, + T: BroadcasterInterface + Clone, > where ES::Target: EntropySource, NS::Target: NodeSigner, @@ -312,7 +320,6 @@ pub struct LiquidityManager< C::Target: Filter, K::Target: KVStore, TP::Target: TimeProvider, - T::Target: BroadcasterInterface, { pending_messages: Arc, pending_events: Arc>, @@ -342,7 +349,7 @@ impl< CM: Deref + Clone, C: Deref + Clone, K: Deref + Clone, - T: Deref + Clone, + T: BroadcasterInterface + Clone, > LiquidityManager where ES::Target: EntropySource, @@ -350,7 +357,6 @@ where CM::Target: AChannelManager, C::Target: Filter, K::Target: KVStore, - T::Target: BroadcasterInterface, { /// Constructor for the [`LiquidityManager`] using the default system clock /// @@ -384,7 +390,7 @@ impl< C: Deref + Clone, K: Deref + Clone, TP: Deref + Clone, - T: Deref + Clone, + T: BroadcasterInterface + Clone, > LiquidityManager where ES::Target: EntropySource, @@ -393,7 +399,6 @@ where C::Target: Filter, K::Target: KVStore, TP::Target: TimeProvider, - T::Target: BroadcasterInterface, { /// Constructor for the [`LiquidityManager`] with a custom time provider. /// @@ -811,7 +816,7 @@ impl< C: Deref + Clone, K: Deref + Clone, TP: Deref + Clone, - T: Deref + Clone, + T: BroadcasterInterface + Clone, > CustomMessageReader for LiquidityManager where ES::Target: EntropySource, @@ -820,7 +825,6 @@ where C::Target: Filter, K::Target: KVStore, TP::Target: TimeProvider, - T::Target: BroadcasterInterface, { type CustomMessage = RawLSPSMessage; @@ -843,7 +847,7 @@ impl< C: Deref + Clone, K: Deref + Clone, TP: Deref + Clone, - T: Deref + Clone, + T: BroadcasterInterface + Clone, > CustomMessageHandler for LiquidityManager where ES::Target: EntropySource, @@ -852,7 +856,6 @@ where C::Target: Filter, K::Target: KVStore, TP::Target: TimeProvider, - T::Target: BroadcasterInterface, { fn handle_custom_message( &self, msg: Self::CustomMessage, sender_node_id: PublicKey, @@ -977,7 +980,7 @@ impl< C: Deref + Clone, K: Deref + Clone, TP: Deref + Clone, - T: Deref + Clone, + T: BroadcasterInterface + Clone, > Listen for LiquidityManager where ES::Target: EntropySource, @@ -986,7 +989,6 @@ where C::Target: Filter, K::Target: KVStore, TP::Target: TimeProvider, - T::Target: BroadcasterInterface, { fn filtered_block_connected( &self, header: &bitcoin::block::Header, txdata: &chain::transaction::TransactionData, @@ -1023,7 +1025,7 @@ impl< C: Deref + Clone, K: Deref + Clone, TP: Deref + Clone, - T: Deref + Clone, + T: BroadcasterInterface + Clone, > Confirm for LiquidityManager where ES::Target: EntropySource, @@ -1032,7 +1034,6 @@ where C::Target: Filter, K::Target: KVStore, TP::Target: TimeProvider, - T::Target: BroadcasterInterface, { fn transactions_confirmed( &self, _header: &bitcoin::block::Header, _txdata: &chain::transaction::TransactionData, @@ -1069,7 +1070,7 @@ pub struct LiquidityManagerSync< C: Deref + Clone, KS: Deref + Clone, TP: Deref + Clone, - T: Deref + Clone, + T: BroadcasterInterface + Clone, > where ES::Target: EntropySource, NS::Target: NodeSigner, @@ -1077,7 +1078,6 @@ pub struct LiquidityManagerSync< C::Target: Filter, KS::Target: KVStoreSync, TP::Target: TimeProvider, - T::Target: BroadcasterInterface, { inner: LiquidityManager, TP, T>, } @@ -1089,7 +1089,7 @@ impl< CM: Deref + Clone, C: Deref + Clone, KS: Deref + Clone, - T: Deref + Clone, + T: BroadcasterInterface + Clone, > LiquidityManagerSync where ES::Target: EntropySource, @@ -1097,7 +1097,6 @@ where CM::Target: AChannelManager, KS::Target: KVStoreSync, C::Target: Filter, - T::Target: BroadcasterInterface, { /// Constructor for the [`LiquidityManagerSync`] using the default system clock /// @@ -1142,7 +1141,7 @@ impl< C: Deref + Clone, KS: Deref + Clone, TP: Deref + Clone, - T: Deref + Clone, + T: BroadcasterInterface + Clone, > LiquidityManagerSync where ES::Target: EntropySource, @@ -1151,7 +1150,6 @@ where C::Target: Filter, KS::Target: KVStoreSync, TP::Target: TimeProvider, - T::Target: BroadcasterInterface, { /// Constructor for the [`LiquidityManagerSync`] with a custom time provider. /// @@ -1312,7 +1310,7 @@ impl< C: Deref + Clone, KS: Deref + Clone, TP: Deref + Clone, - T: Deref + Clone, + T: BroadcasterInterface + Clone, > CustomMessageReader for LiquidityManagerSync where ES::Target: EntropySource, @@ -1321,7 +1319,6 @@ where C::Target: Filter, KS::Target: KVStoreSync, TP::Target: TimeProvider, - T::Target: BroadcasterInterface, { type CustomMessage = RawLSPSMessage; @@ -1339,7 +1336,7 @@ impl< C: Deref + Clone, KS: Deref + Clone, TP: Deref + Clone, - T: Deref + Clone, + T: BroadcasterInterface + Clone, > CustomMessageHandler for LiquidityManagerSync where ES::Target: EntropySource, @@ -1348,7 +1345,6 @@ where C::Target: Filter, KS::Target: KVStoreSync, TP::Target: TimeProvider, - T::Target: BroadcasterInterface, { fn handle_custom_message( &self, msg: Self::CustomMessage, sender_node_id: PublicKey, @@ -1386,7 +1382,7 @@ impl< C: Deref + Clone, KS: Deref + Clone, TP: Deref + Clone, - T: Deref + Clone, + T: BroadcasterInterface + Clone, > Listen for LiquidityManagerSync where ES::Target: EntropySource, @@ -1395,7 +1391,6 @@ where C::Target: Filter, KS::Target: KVStoreSync, TP::Target: TimeProvider, - T::Target: BroadcasterInterface, { fn filtered_block_connected( &self, header: &bitcoin::block::Header, txdata: &chain::transaction::TransactionData, @@ -1416,7 +1411,7 @@ impl< C: Deref + Clone, KS: Deref + Clone, TP: Deref + Clone, - T: Deref + Clone, + T: BroadcasterInterface + Clone, > Confirm for LiquidityManagerSync where ES::Target: EntropySource, @@ -1425,7 +1420,6 @@ where C::Target: Filter, KS::Target: KVStoreSync, TP::Target: TimeProvider, - T::Target: BroadcasterInterface, { fn transactions_confirmed( &self, header: &bitcoin::block::Header, txdata: &chain::transaction::TransactionData, diff --git a/lightning/src/chain/chaininterface.rs b/lightning/src/chain/chaininterface.rs index 117e9b3af05..d21017c25bb 100644 --- a/lightning/src/chain/chaininterface.rs +++ b/lightning/src/chain/chaininterface.rs @@ -48,6 +48,12 @@ pub trait BroadcasterInterface { fn broadcast_transactions(&self, txs: &[&Transaction]); } +impl> BroadcasterInterface for B { + fn broadcast_transactions(&self, txs: &[&Transaction]) { + self.deref().broadcast_transactions(txs) + } +} + /// An enum that represents the priority at which we want a transaction to confirm used for feerate /// estimation. #[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)] diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index 9fd6383cf7e..678c7b6ef5b 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -261,14 +261,13 @@ pub struct AsyncPersister< L: Deref + MaybeSend + MaybeSync + 'static, ES: Deref + MaybeSend + MaybeSync + 'static, SP: Deref + MaybeSend + MaybeSync + 'static, - BI: Deref + MaybeSend + MaybeSync + 'static, + BI: BroadcasterInterface + MaybeSend + MaybeSync + 'static, FE: Deref + MaybeSend + MaybeSync + 'static, > where K::Target: KVStore + MaybeSync, L::Target: Logger, ES::Target: EntropySource + Sized, SP::Target: SignerProvider + Sized, - BI::Target: BroadcasterInterface, FE::Target: FeeEstimator, { persister: MonitorUpdatingPersisterAsync, @@ -281,7 +280,7 @@ impl< L: Deref + MaybeSend + MaybeSync + 'static, ES: Deref + MaybeSend + MaybeSync + 'static, SP: Deref + MaybeSend + MaybeSync + 'static, - BI: Deref + MaybeSend + MaybeSync + 'static, + BI: BroadcasterInterface + MaybeSend + MaybeSync + 'static, FE: Deref + MaybeSend + MaybeSync + 'static, > Deref for AsyncPersister where @@ -289,7 +288,6 @@ where L::Target: Logger, ES::Target: EntropySource + Sized, SP::Target: SignerProvider + Sized, - BI::Target: BroadcasterInterface, FE::Target: FeeEstimator, { type Target = Self; @@ -304,7 +302,7 @@ impl< L: Deref + MaybeSend + MaybeSync + 'static, ES: Deref + MaybeSend + MaybeSync + 'static, SP: Deref + MaybeSend + MaybeSync + 'static, - BI: Deref + MaybeSend + MaybeSync + 'static, + BI: BroadcasterInterface + MaybeSend + MaybeSync + 'static, FE: Deref + MaybeSend + MaybeSync + 'static, > Persist<::EcdsaSigner> for AsyncPersister where @@ -312,7 +310,6 @@ where L::Target: Logger, ES::Target: EntropySource + Sized, SP::Target: SignerProvider + Sized, - BI::Target: BroadcasterInterface, FE::Target: FeeEstimator, ::EcdsaSigner: MaybeSend + 'static, { @@ -362,14 +359,13 @@ where pub struct ChainMonitor< ChannelSigner: EcdsaChannelSigner, C: Deref, - T: Deref, + T: BroadcasterInterface, F: Deref, L: Deref, P: Deref, ES: Deref, > where C::Target: chain::Filter, - T::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, P::Target: Persist, @@ -404,7 +400,7 @@ impl< S: FutureSpawner, SP: Deref + MaybeSend + MaybeSync + 'static, C: Deref, - T: Deref + MaybeSend + MaybeSync + 'static, + T: BroadcasterInterface + MaybeSend + MaybeSync + 'static, F: Deref + MaybeSend + MaybeSync + 'static, L: Deref + MaybeSend + MaybeSync + 'static, ES: Deref + MaybeSend + MaybeSync + 'static, @@ -421,7 +417,6 @@ impl< K::Target: KVStore + MaybeSync, SP::Target: SignerProvider + Sized, C::Target: chain::Filter, - T::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, ES::Target: EntropySource + Sized, @@ -462,7 +457,7 @@ impl< impl< ChannelSigner: EcdsaChannelSigner, C: Deref, - T: Deref, + T: BroadcasterInterface, F: Deref, L: Deref, P: Deref, @@ -470,7 +465,6 @@ impl< > ChainMonitor where C::Target: chain::Filter, - T::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, P::Target: Persist, @@ -895,7 +889,7 @@ where let monitors = self.monitors.read().unwrap(); for (_, monitor_holder) in &*monitors { monitor_holder.monitor.rebroadcast_pending_claims( - &*self.broadcaster, + &self.broadcaster, &*self.fee_estimator, &self.logger, ) @@ -911,7 +905,7 @@ where if let Some(channel_id) = monitor_opt { if let Some(monitor_holder) = monitors.get(&channel_id) { monitor_holder.monitor.signer_unblocked( - &*self.broadcaster, + &self.broadcaster, &*self.fee_estimator, &self.logger, ) @@ -919,7 +913,7 @@ where } else { for (_, monitor_holder) in &*monitors { monitor_holder.monitor.signer_unblocked( - &*self.broadcaster, + &self.broadcaster, &*self.fee_estimator, &self.logger, ) @@ -1109,7 +1103,7 @@ where impl< ChannelSigner: EcdsaChannelSigner, C: Deref, - T: Deref, + T: BroadcasterInterface, F: Deref, L: Deref, P: Deref, @@ -1117,7 +1111,6 @@ impl< > BaseMessageHandler for ChainMonitor where C::Target: chain::Filter, - T::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, P::Target: Persist, @@ -1148,7 +1141,7 @@ where impl< ChannelSigner: EcdsaChannelSigner, C: Deref, - T: Deref, + T: BroadcasterInterface, F: Deref, L: Deref, P: Deref, @@ -1156,7 +1149,6 @@ impl< > SendOnlyMessageHandler for ChainMonitor where C::Target: chain::Filter, - T::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, P::Target: Persist, @@ -1167,7 +1159,7 @@ where impl< ChannelSigner: EcdsaChannelSigner, C: Deref, - T: Deref, + T: BroadcasterInterface, F: Deref, L: Deref, P: Deref, @@ -1175,7 +1167,6 @@ impl< > chain::Listen for ChainMonitor where C::Target: chain::Filter, - T::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, P::Target: Persist, @@ -1193,7 +1184,7 @@ where header, txdata, height, - &*self.broadcaster, + &self.broadcaster, &*self.fee_estimator, &self.logger, ) @@ -1220,7 +1211,7 @@ where for monitor_state in monitor_states.values() { monitor_state.monitor.blocks_disconnected( fork_point, - &*self.broadcaster, + &self.broadcaster, &*self.fee_estimator, &self.logger, ); @@ -1231,7 +1222,7 @@ where impl< ChannelSigner: EcdsaChannelSigner, C: Deref, - T: Deref, + T: BroadcasterInterface, F: Deref, L: Deref, P: Deref, @@ -1239,7 +1230,6 @@ impl< > chain::Confirm for ChainMonitor where C::Target: chain::Filter, - T::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, P::Target: Persist, @@ -1258,7 +1248,7 @@ where header, txdata, height, - &*self.broadcaster, + &self.broadcaster, &*self.fee_estimator, &self.logger, ) @@ -1273,7 +1263,7 @@ where for monitor_state in monitor_states.values() { monitor_state.monitor.transaction_unconfirmed( txid, - &*self.broadcaster, + &self.broadcaster, &*self.fee_estimator, &self.logger, ); @@ -1294,7 +1284,7 @@ where monitor.best_block_updated( header, height, - &*self.broadcaster, + &self.broadcaster, &*self.fee_estimator, &self.logger, ) @@ -1326,7 +1316,7 @@ where impl< ChannelSigner: EcdsaChannelSigner, C: Deref, - T: Deref, + T: BroadcasterInterface, F: Deref, L: Deref, P: Deref, @@ -1334,7 +1324,6 @@ impl< > chain::Watch for ChainMonitor where C::Target: chain::Filter, - T::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, P::Target: Persist, @@ -1522,7 +1511,7 @@ where impl< ChannelSigner: EcdsaChannelSigner, C: Deref, - T: Deref, + T: BroadcasterInterface, F: Deref, L: Deref, P: Deref, @@ -1530,7 +1519,6 @@ impl< > events::EventsProvider for ChainMonitor where C::Target: chain::Filter, - T::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, P::Target: Persist, diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index fc9ffec7f8f..5c531cdb0ed 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -2058,7 +2058,7 @@ impl ChannelMonitor { /// /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager #[rustfmt::skip] - pub(crate) fn provide_payment_preimage_unsafe_legacy( + pub(crate) fn provide_payment_preimage_unsafe_legacy( &self, payment_hash: &PaymentHash, payment_preimage: &PaymentPreimage, @@ -2066,7 +2066,6 @@ impl ChannelMonitor { fee_estimator: &LowerBoundedFeeEstimator, logger: &L, ) where - B::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, { @@ -2083,11 +2082,10 @@ impl ChannelMonitor { /// itself. /// /// panics if the given update is not the next update by update_id. - pub fn update_monitor( + pub fn update_monitor( &self, updates: &ChannelMonitorUpdate, broadcaster: &B, fee_estimator: &F, logger: &L, ) -> Result<(), ()> where - B::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, { @@ -2338,10 +2336,9 @@ impl ChannelMonitor { /// transactions that cannot be confirmed until the funding transaction is visible. /// /// [`Event::BumpTransaction`]: crate::events::Event::BumpTransaction - pub fn broadcast_latest_holder_commitment_txn( + pub fn broadcast_latest_holder_commitment_txn( &self, broadcaster: &B, fee_estimator: &F, logger: &L, ) where - B::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, { @@ -2382,7 +2379,7 @@ impl ChannelMonitor { /// /// [`get_outputs_to_watch`]: #method.get_outputs_to_watch #[rustfmt::skip] - pub fn block_connected( + pub fn block_connected( &self, header: &Header, txdata: &TransactionData, @@ -2392,7 +2389,6 @@ impl ChannelMonitor { logger: &L, ) -> Vec where - B::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, { @@ -2404,10 +2400,9 @@ impl ChannelMonitor { /// Determines if the disconnected block contained any transactions of interest and updates /// appropriately. - pub fn blocks_disconnected( + pub fn blocks_disconnected( &self, fork_point: BestBlock, broadcaster: B, fee_estimator: F, logger: &L, ) where - B::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, { @@ -2424,7 +2419,7 @@ impl ChannelMonitor { /// /// [`block_connected`]: Self::block_connected #[rustfmt::skip] - pub fn transactions_confirmed( + pub fn transactions_confirmed( &self, header: &Header, txdata: &TransactionData, @@ -2434,7 +2429,6 @@ impl ChannelMonitor { logger: &L, ) -> Vec where - B::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, { @@ -2452,14 +2446,13 @@ impl ChannelMonitor { /// /// [`blocks_disconnected`]: Self::blocks_disconnected #[rustfmt::skip] - pub fn transaction_unconfirmed( + pub fn transaction_unconfirmed( &self, txid: &Txid, broadcaster: B, fee_estimator: F, logger: &L, ) where - B::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, { @@ -2479,7 +2472,7 @@ impl ChannelMonitor { /// /// [`block_connected`]: Self::block_connected #[rustfmt::skip] - pub fn best_block_updated( + pub fn best_block_updated( &self, header: &Header, height: u32, @@ -2488,7 +2481,6 @@ impl ChannelMonitor { logger: &L, ) -> Vec where - B::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, { @@ -2526,11 +2518,10 @@ impl ChannelMonitor { /// invoking this every 30 seconds, or lower if running in an environment with spotty /// connections, like on mobile. #[rustfmt::skip] - pub fn rebroadcast_pending_claims( + pub fn rebroadcast_pending_claims( &self, broadcaster: B, fee_estimator: F, logger: &L, ) where - B::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, { @@ -2554,11 +2545,10 @@ impl ChannelMonitor { /// Triggers rebroadcasts of pending claims from a force-closed channel after a transaction /// signature generation failure. #[rustfmt::skip] - pub fn signer_unblocked( + pub fn signer_unblocked( &self, broadcaster: B, fee_estimator: F, logger: &L, ) where - B::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, { @@ -3808,12 +3798,11 @@ impl ChannelMonitorImpl { /// /// Note that this is often called multiple times for the same payment and must be idempotent. #[rustfmt::skip] - fn provide_payment_preimage( + fn provide_payment_preimage( &mut self, payment_hash: &PaymentHash, payment_preimage: &PaymentPreimage, payment_info: &Option, broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator, logger: &WithContext) - where B::Target: BroadcasterInterface, - F::Target: FeeEstimator, + where F::Target: FeeEstimator, L::Target: Logger, { self.payment_preimages.entry(payment_hash.clone()) @@ -3987,12 +3976,11 @@ impl ChannelMonitorImpl { /// See also [`ChannelMonitor::broadcast_latest_holder_commitment_txn`]. /// /// [`ChannelMonitor::broadcast_latest_holder_commitment_txn`]: crate::chain::channelmonitor::ChannelMonitor::broadcast_latest_holder_commitment_txn - pub(crate) fn queue_latest_holder_commitment_txn_for_broadcast( + pub(crate) fn queue_latest_holder_commitment_txn_for_broadcast( &mut self, broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator, logger: &WithContext, require_funding_seen: bool, ) where - B::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, { @@ -4190,11 +4178,10 @@ impl ChannelMonitorImpl { } #[rustfmt::skip] - fn update_monitor( + fn update_monitor( &mut self, updates: &ChannelMonitorUpdate, broadcaster: &B, fee_estimator: &F, logger: &WithContext ) -> Result<(), ()> - where B::Target: BroadcasterInterface, - F::Target: FeeEstimator, + where F::Target: FeeEstimator, L::Target: Logger, { if self.latest_update_id == LEGACY_CLOSED_CHANNEL_UPDATE_ID && updates.update_id == LEGACY_CLOSED_CHANNEL_UPDATE_ID { @@ -5286,12 +5273,11 @@ impl ChannelMonitorImpl { } #[rustfmt::skip] - fn block_connected( + fn block_connected( &mut self, header: &Header, txdata: &TransactionData, height: u32, broadcaster: B, fee_estimator: F, logger: &WithContext, ) -> Vec - where B::Target: BroadcasterInterface, - F::Target: FeeEstimator, + where F::Target: FeeEstimator, L::Target: Logger, { let block_hash = header.block_hash(); @@ -5302,7 +5288,7 @@ impl ChannelMonitorImpl { } #[rustfmt::skip] - fn best_block_updated( + fn best_block_updated( &mut self, header: &Header, height: u32, @@ -5311,7 +5297,6 @@ impl ChannelMonitorImpl { logger: &WithContext, ) -> Vec where - B::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, { @@ -5334,7 +5319,7 @@ impl ChannelMonitorImpl { } #[rustfmt::skip] - fn transactions_confirmed( + fn transactions_confirmed( &mut self, header: &Header, txdata: &TransactionData, @@ -5344,7 +5329,6 @@ impl ChannelMonitorImpl { logger: &WithContext, ) -> Vec where - B::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, { @@ -5619,7 +5603,7 @@ impl ChannelMonitorImpl { /// `conf_height` should be set to the height at which any new transaction(s)/block(s) were /// confirmed at, even if it is not the current best height. #[rustfmt::skip] - fn block_confirmed( + fn block_confirmed( &mut self, conf_height: u32, conf_hash: BlockHash, @@ -5631,7 +5615,6 @@ impl ChannelMonitorImpl { logger: &WithContext, ) -> Vec where - B::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, { @@ -5847,10 +5830,9 @@ impl ChannelMonitorImpl { } #[rustfmt::skip] - fn blocks_disconnected( + fn blocks_disconnected( &mut self, fork_point: BestBlock, broadcaster: B, fee_estimator: F, logger: &WithContext - ) where B::Target: BroadcasterInterface, - F::Target: FeeEstimator, + ) where F::Target: FeeEstimator, L::Target: Logger, { let new_height = fork_point.height; @@ -5896,14 +5878,13 @@ impl ChannelMonitorImpl { } #[rustfmt::skip] - fn transaction_unconfirmed( + fn transaction_unconfirmed( &mut self, txid: &Txid, broadcaster: B, fee_estimator: &LowerBoundedFeeEstimator, logger: &WithContext, ) where - B::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, { @@ -6357,39 +6338,38 @@ impl ChannelMonitorImpl { } } -impl chain::Listen +impl chain::Listen for (ChannelMonitor, T, F, L) where - T::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, { fn filtered_block_connected(&self, header: &Header, txdata: &TransactionData, height: u32) { - self.0.block_connected(header, txdata, height, &*self.1, &*self.2, &self.3); + self.0.block_connected(header, txdata, height, &self.1, &*self.2, &self.3); } fn blocks_disconnected(&self, fork_point: BestBlock) { - self.0.blocks_disconnected(fork_point, &*self.1, &*self.2, &self.3); + self.0.blocks_disconnected(fork_point, &self.1, &*self.2, &self.3); } } -impl chain::Confirm for (M, T, F, L) +impl chain::Confirm + for (M, T, F, L) where M: Deref>, - T::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, { fn transactions_confirmed(&self, header: &Header, txdata: &TransactionData, height: u32) { - self.0.transactions_confirmed(header, txdata, height, &*self.1, &*self.2, &self.3); + self.0.transactions_confirmed(header, txdata, height, &self.1, &*self.2, &self.3); } fn transaction_unconfirmed(&self, txid: &Txid) { - self.0.transaction_unconfirmed(txid, &*self.1, &*self.2, &self.3); + self.0.transaction_unconfirmed(txid, &self.1, &*self.2, &self.3); } fn best_block_updated(&self, header: &Header, height: u32) { - self.0.best_block_updated(header, height, &*self.1, &*self.2, &self.3); + self.0.best_block_updated(header, height, &self.1, &*self.2, &self.3); } fn get_relevant_txids(&self) -> Vec<(Txid, u32, Option)> { diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index fb65aa0f157..321b6008683 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -485,13 +485,12 @@ impl OnchainTxHandler { /// invoking this every 30 seconds, or lower if running in an environment with spotty /// connections, like on mobile. #[rustfmt::skip] - pub(super) fn rebroadcast_pending_claims( + pub(super) fn rebroadcast_pending_claims( &mut self, current_height: u32, feerate_strategy: FeerateStrategy, broadcaster: &B, conf_target: ConfirmationTarget, destination_script: &Script, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, ) where - B::Target: BroadcasterInterface, F::Target: FeeEstimator, { let mut bump_requests = Vec::with_capacity(self.pending_claim_requests.len()); @@ -761,14 +760,11 @@ impl OnchainTxHandler { /// does not need to equal the current blockchain tip height, which should be provided via /// `cur_height`, however it must never be higher than `cur_height`. #[rustfmt::skip] - pub(super) fn update_claims_view_from_requests( + pub(super) fn update_claims_view_from_requests( &mut self, mut requests: Vec, conf_height: u32, cur_height: u32, broadcaster: &B, conf_target: ConfirmationTarget, destination_script: &Script, fee_estimator: &LowerBoundedFeeEstimator, logger: &L - ) where - B::Target: BroadcasterInterface, - F::Target: FeeEstimator, - { + ) where F::Target: FeeEstimator, { if !requests.is_empty() { log_debug!(logger, "Updating claims view at height {} with {} claim requests", cur_height, requests.len()); } @@ -912,12 +908,11 @@ impl OnchainTxHandler { /// confirmed. This does not need to equal the current blockchain tip height, which should be /// provided via `cur_height`, however it must never be higher than `cur_height`. #[rustfmt::skip] - pub(super) fn update_claims_view_from_matched_txn( + pub(super) fn update_claims_view_from_matched_txn( &mut self, txn_matched: &[&Transaction], conf_height: u32, conf_hash: BlockHash, cur_height: u32, broadcaster: &B, conf_target: ConfirmationTarget, destination_script: &Script, fee_estimator: &LowerBoundedFeeEstimator, logger: &L ) where - B::Target: BroadcasterInterface, F::Target: FeeEstimator, { let mut have_logged_intro = false; @@ -1110,7 +1105,7 @@ impl OnchainTxHandler { } #[rustfmt::skip] - pub(super) fn transaction_unconfirmed( + pub(super) fn transaction_unconfirmed( &mut self, txid: &Txid, broadcaster: &B, @@ -1119,7 +1114,6 @@ impl OnchainTxHandler { fee_estimator: &LowerBoundedFeeEstimator, logger: &L, ) where - B::Target: BroadcasterInterface, F::Target: FeeEstimator, { let mut height = None; @@ -1138,13 +1132,10 @@ impl OnchainTxHandler { } #[rustfmt::skip] - pub(super) fn blocks_disconnected( + pub(super) fn blocks_disconnected( &mut self, new_best_height: u32, broadcaster: &B, conf_target: ConfirmationTarget, destination_script: &Script, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, - ) - where B::Target: BroadcasterInterface, - F::Target: FeeEstimator, - { + ) where F::Target: FeeEstimator, { let mut bump_candidates = new_hash_map(); let onchain_events_awaiting_threshold_conf = self.onchain_events_awaiting_threshold_conf.drain(..).collect::>(); diff --git a/lightning/src/events/bump_transaction/mod.rs b/lightning/src/events/bump_transaction/mod.rs index e141d9b8abc..b45b65940ee 100644 --- a/lightning/src/events/bump_transaction/mod.rs +++ b/lightning/src/events/bump_transaction/mod.rs @@ -694,9 +694,8 @@ where /// /// [`Event::BumpTransaction`]: crate::events::Event::BumpTransaction // Note that updates to documentation on this struct should be copied to the synchronous version. -pub struct BumpTransactionEventHandler +pub struct BumpTransactionEventHandler where - B::Target: BroadcasterInterface, C::Target: CoinSelectionSource, SP::Target: SignerProvider, L::Target: Logger, @@ -708,9 +707,9 @@ where secp: Secp256k1, } -impl BumpTransactionEventHandler +impl + BumpTransactionEventHandler where - B::Target: BroadcasterInterface, C::Target: CoinSelectionSource, SP::Target: SignerProvider, L::Target: Logger, diff --git a/lightning/src/events/bump_transaction/sync.rs b/lightning/src/events/bump_transaction/sync.rs index 1328c2c1b3a..bf0668ccba3 100644 --- a/lightning/src/events/bump_transaction/sync.rs +++ b/lightning/src/events/bump_transaction/sync.rs @@ -267,9 +267,8 @@ where /// /// [`Event::BumpTransaction`]: crate::events::Event::BumpTransaction // Note that updates to documentation on this struct should be copied to the synchronous version. -pub struct BumpTransactionEventHandlerSync +pub struct BumpTransactionEventHandlerSync where - B::Target: BroadcasterInterface, C::Target: CoinSelectionSourceSync, SP::Target: SignerProvider, L::Target: Logger, @@ -278,9 +277,9 @@ where BumpTransactionEventHandler, SP, L>, } -impl BumpTransactionEventHandlerSync +impl + BumpTransactionEventHandlerSync where - B::Target: BroadcasterInterface, C::Target: CoinSelectionSourceSync, SP::Target: SignerProvider, L::Target: Logger, diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 0f9adfcc51a..7191e847acd 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1789,9 +1789,7 @@ pub trait AChannelManager { /// A type that may be dereferenced to [`Self::Watch`]. type M: Deref; /// A type implementing [`BroadcasterInterface`]. - type Broadcaster: BroadcasterInterface + ?Sized; - /// A type that may be dereferenced to [`Self::Broadcaster`]. - type T: Deref; + type Broadcaster: BroadcasterInterface; /// A type implementing [`EntropySource`]. type EntropySource: EntropySource + ?Sized; /// A type that may be dereferenced to [`Self::EntropySource`]. @@ -1827,7 +1825,7 @@ pub trait AChannelManager { &self, ) -> &ChannelManager< Self::M, - Self::T, + Self::Broadcaster, Self::ES, Self::NS, Self::SP, @@ -1840,7 +1838,7 @@ pub trait AChannelManager { impl< M: Deref, - T: Deref, + T: BroadcasterInterface, ES: Deref, NS: Deref, SP: Deref, @@ -1851,7 +1849,6 @@ impl< > AChannelManager for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, - T::Target: BroadcasterInterface, ES::Target: EntropySource, NS::Target: NodeSigner, SP::Target: SignerProvider, @@ -1862,8 +1859,7 @@ where { type Watch = M::Target; type M = M; - type Broadcaster = T::Target; - type T = T; + type Broadcaster = T; type EntropySource = ES::Target; type ES = ES; type NodeSigner = NS::Target; @@ -2625,7 +2621,7 @@ where /// [`read`]: ReadableArgs::read pub struct ChannelManager< M: Deref, - T: Deref, + T: BroadcasterInterface, ES: Deref, NS: Deref, SP: Deref, @@ -2635,7 +2631,6 @@ pub struct ChannelManager< L: Deref, > where M::Target: chain::Watch<::EcdsaSigner>, - T::Target: BroadcasterInterface, ES::Target: EntropySource, NS::Target: NodeSigner, SP::Target: SignerProvider, @@ -3415,7 +3410,7 @@ fn create_htlc_intercepted_event( impl< M: Deref, - T: Deref, + T: BroadcasterInterface, ES: Deref, NS: Deref, SP: Deref, @@ -3426,7 +3421,6 @@ impl< > ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, - T::Target: BroadcasterInterface, ES::Target: EntropySource, NS::Target: NodeSigner, SP::Target: SignerProvider, @@ -13556,7 +13550,7 @@ macro_rules! create_refund_builder { ($self: ident, $builder: ty) => { impl< M: Deref, - T: Deref, + T: BroadcasterInterface, ES: Deref, NS: Deref, SP: Deref, @@ -13567,7 +13561,6 @@ impl< > ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, - T::Target: BroadcasterInterface, ES::Target: EntropySource, NS::Target: NodeSigner, SP::Target: SignerProvider, @@ -14433,7 +14426,7 @@ where impl< M: Deref, - T: Deref, + T: BroadcasterInterface, ES: Deref, NS: Deref, SP: Deref, @@ -14444,7 +14437,6 @@ impl< > BaseMessageHandler for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, - T::Target: BroadcasterInterface, ES::Target: EntropySource, NS::Target: NodeSigner, SP::Target: SignerProvider, @@ -14804,7 +14796,7 @@ where impl< M: Deref, - T: Deref, + T: BroadcasterInterface, ES: Deref, NS: Deref, SP: Deref, @@ -14815,7 +14807,6 @@ impl< > EventsProvider for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, - T::Target: BroadcasterInterface, ES::Target: EntropySource, NS::Target: NodeSigner, SP::Target: SignerProvider, @@ -14839,7 +14830,7 @@ where impl< M: Deref, - T: Deref, + T: BroadcasterInterface, ES: Deref, NS: Deref, SP: Deref, @@ -14850,7 +14841,6 @@ impl< > chain::Listen for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, - T::Target: BroadcasterInterface, ES::Target: EntropySource, NS::Target: NodeSigner, SP::Target: SignerProvider, @@ -14900,7 +14890,7 @@ where impl< M: Deref, - T: Deref, + T: BroadcasterInterface, ES: Deref, NS: Deref, SP: Deref, @@ -14911,7 +14901,6 @@ impl< > chain::Confirm for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, - T::Target: BroadcasterInterface, ES::Target: EntropySource, NS::Target: NodeSigner, SP::Target: SignerProvider, @@ -15073,7 +15062,7 @@ pub(super) enum FundingConfirmedMessage { impl< M: Deref, - T: Deref, + T: BroadcasterInterface, ES: Deref, NS: Deref, SP: Deref, @@ -15084,7 +15073,6 @@ impl< > ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, - T::Target: BroadcasterInterface, ES::Target: EntropySource, NS::Target: NodeSigner, SP::Target: SignerProvider, @@ -15435,7 +15423,7 @@ where impl< M: Deref, - T: Deref, + T: BroadcasterInterface, ES: Deref, NS: Deref, SP: Deref, @@ -15446,7 +15434,6 @@ impl< > ChannelMessageHandler for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, - T::Target: BroadcasterInterface, ES::Target: EntropySource, NS::Target: NodeSigner, SP::Target: SignerProvider, @@ -16010,7 +15997,7 @@ where impl< M: Deref, - T: Deref, + T: BroadcasterInterface, ES: Deref, NS: Deref, SP: Deref, @@ -16021,7 +16008,6 @@ impl< > OffersMessageHandler for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, - T::Target: BroadcasterInterface, ES::Target: EntropySource, NS::Target: NodeSigner, SP::Target: SignerProvider, @@ -16228,7 +16214,7 @@ where impl< M: Deref, - T: Deref, + T: BroadcasterInterface, ES: Deref, NS: Deref, SP: Deref, @@ -16239,7 +16225,6 @@ impl< > AsyncPaymentsMessageHandler for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, - T::Target: BroadcasterInterface, ES::Target: EntropySource, NS::Target: NodeSigner, SP::Target: SignerProvider, @@ -16473,7 +16458,7 @@ where #[cfg(feature = "dnssec")] impl< M: Deref, - T: Deref, + T: BroadcasterInterface, ES: Deref, NS: Deref, SP: Deref, @@ -16484,7 +16469,6 @@ impl< > DNSResolverMessageHandler for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, - T::Target: BroadcasterInterface, ES::Target: EntropySource, NS::Target: NodeSigner, SP::Target: SignerProvider, @@ -16541,7 +16525,7 @@ where impl< M: Deref, - T: Deref, + T: BroadcasterInterface, ES: Deref, NS: Deref, SP: Deref, @@ -16552,7 +16536,6 @@ impl< > NodeIdLookUp for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, - T::Target: BroadcasterInterface, ES::Target: EntropySource, NS::Target: NodeSigner, SP::Target: SignerProvider, @@ -17057,7 +17040,7 @@ impl_writeable_tlv_based!(PendingInboundPayment, { impl< M: Deref, - T: Deref, + T: BroadcasterInterface, ES: Deref, NS: Deref, SP: Deref, @@ -17068,7 +17051,6 @@ impl< > Writeable for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, - T::Target: BroadcasterInterface, ES::Target: EntropySource, NS::Target: NodeSigner, SP::Target: SignerProvider, @@ -17424,7 +17406,7 @@ impl Readable for VecDeque<(Event, Option)> { pub struct ChannelManagerReadArgs< 'a, M: Deref, - T: Deref, + T: BroadcasterInterface, ES: Deref, NS: Deref, SP: Deref, @@ -17434,7 +17416,6 @@ pub struct ChannelManagerReadArgs< L: Deref + Clone, > where M::Target: chain::Watch<::EcdsaSigner>, - T::Target: BroadcasterInterface, ES::Target: EntropySource, NS::Target: NodeSigner, SP::Target: SignerProvider, @@ -17504,7 +17485,7 @@ pub struct ChannelManagerReadArgs< impl< 'a, M: Deref, - T: Deref, + T: BroadcasterInterface, ES: Deref, NS: Deref, SP: Deref, @@ -17515,7 +17496,6 @@ impl< > ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L> where M::Target: chain::Watch<::EcdsaSigner>, - T::Target: BroadcasterInterface, ES::Target: EntropySource, NS::Target: NodeSigner, SP::Target: SignerProvider, @@ -17592,7 +17572,7 @@ fn dedup_decode_update_add_htlcs( impl< 'a, M: Deref, - T: Deref, + T: BroadcasterInterface, ES: Deref, NS: Deref, SP: Deref, @@ -17604,7 +17584,6 @@ impl< for (BlockHash, Arc>) where M::Target: chain::Watch<::EcdsaSigner>, - T::Target: BroadcasterInterface, ES::Target: EntropySource, NS::Target: NodeSigner, SP::Target: SignerProvider, @@ -17625,7 +17604,7 @@ where impl< 'a, M: Deref, - T: Deref, + T: BroadcasterInterface, ES: Deref, NS: Deref, SP: Deref, @@ -17637,7 +17616,6 @@ impl< for (BlockHash, ChannelManager) where M::Target: chain::Watch<::EcdsaSigner>, - T::Target: BroadcasterInterface, ES::Target: EntropySource, NS::Target: NodeSigner, SP::Target: SignerProvider, diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 9aaa0d5170d..69c18d96f1b 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -733,7 +733,7 @@ pub trait NodeHolder { &self, ) -> &ChannelManager< ::M, - ::T, + ::Broadcaster, ::ES, ::NS, ::SP, @@ -750,7 +750,7 @@ impl NodeHolder for &H { &self, ) -> &ChannelManager< ::M, - ::T, + ::Broadcaster, ::ES, ::NS, ::SP, diff --git a/lightning/src/util/anchor_channel_reserves.rs b/lightning/src/util/anchor_channel_reserves.rs index e50e103211f..26212ca3966 100644 --- a/lightning/src/util/anchor_channel_reserves.rs +++ b/lightning/src/util/anchor_channel_reserves.rs @@ -273,7 +273,7 @@ pub fn can_support_additional_anchor_channel< AChannelManagerRef: Deref, ChannelSigner: EcdsaChannelSigner, FilterRef: Deref, - BroadcasterRef: Deref, + B: BroadcasterInterface, EstimatorRef: Deref, LoggerRef: Deref, PersistRef: Deref, @@ -282,7 +282,7 @@ pub fn can_support_additional_anchor_channel< Target = ChainMonitor< ChannelSigner, FilterRef, - BroadcasterRef, + B, EstimatorRef, LoggerRef, PersistRef, @@ -296,7 +296,6 @@ pub fn can_support_additional_anchor_channel< where AChannelManagerRef::Target: AChannelManager, FilterRef::Target: Filter, - BroadcasterRef::Target: BroadcasterInterface, EstimatorRef::Target: FeeEstimator, LoggerRef::Target: Logger, PersistRef::Target: Persist, diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index 2e1e8805d0a..92a565a28f8 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -588,25 +588,28 @@ fn poll_sync_future(future: F) -> F::Output { /// If you have many stale updates stored (such as after a crash with pending lazy deletes), and /// would like to get rid of them, consider using the /// [`MonitorUpdatingPersister::cleanup_stale_updates`] function. -pub struct MonitorUpdatingPersister( - MonitorUpdatingPersisterAsync, PanicingSpawner, L, ES, SP, BI, FE>, -) +pub struct MonitorUpdatingPersister< + K: Deref, + L: Deref, + ES: Deref, + SP: Deref, + BI: BroadcasterInterface, + FE: Deref, +>(MonitorUpdatingPersisterAsync, PanicingSpawner, L, ES, SP, BI, FE>) where K::Target: KVStoreSync, L::Target: Logger, ES::Target: EntropySource + Sized, SP::Target: SignerProvider + Sized, - BI::Target: BroadcasterInterface, FE::Target: FeeEstimator; -impl +impl MonitorUpdatingPersister where K::Target: KVStoreSync, L::Target: Logger, ES::Target: EntropySource + Sized, SP::Target: SignerProvider + Sized, - BI::Target: BroadcasterInterface, FE::Target: FeeEstimator, { /// Constructs a new [`MonitorUpdatingPersister`]. @@ -697,7 +700,7 @@ impl< L: Deref, ES: Deref, SP: Deref, - BI: Deref, + BI: BroadcasterInterface, FE: Deref, > Persist for MonitorUpdatingPersister where @@ -705,7 +708,6 @@ where L::Target: Logger, ES::Target: EntropySource + Sized, SP::Target: SignerProvider + Sized, - BI::Target: BroadcasterInterface, FE::Target: FeeEstimator, { /// Persists a new channel. This means writing the entire monitor to the @@ -783,7 +785,7 @@ pub struct MonitorUpdatingPersisterAsync< L: Deref, ES: Deref, SP: Deref, - BI: Deref, + BI: BroadcasterInterface, FE: Deref, >(Arc>) where @@ -791,7 +793,6 @@ where L::Target: Logger, ES::Target: EntropySource + Sized, SP::Target: SignerProvider + Sized, - BI::Target: BroadcasterInterface, FE::Target: FeeEstimator; struct MonitorUpdatingPersisterAsyncInner< @@ -800,14 +801,13 @@ struct MonitorUpdatingPersisterAsyncInner< L: Deref, ES: Deref, SP: Deref, - BI: Deref, + BI: BroadcasterInterface, FE: Deref, > where K::Target: KVStore, L::Target: Logger, ES::Target: EntropySource + Sized, SP::Target: SignerProvider + Sized, - BI::Target: BroadcasterInterface, FE::Target: FeeEstimator, { kv_store: K, @@ -821,14 +821,20 @@ struct MonitorUpdatingPersisterAsyncInner< fee_estimator: FE, } -impl - MonitorUpdatingPersisterAsync +impl< + K: Deref, + S: FutureSpawner, + L: Deref, + ES: Deref, + SP: Deref, + BI: BroadcasterInterface, + FE: Deref, + > MonitorUpdatingPersisterAsync where K::Target: KVStore, L::Target: Logger, ES::Target: EntropySource + Sized, SP::Target: SignerProvider + Sized, - BI::Target: BroadcasterInterface, FE::Target: FeeEstimator, { /// Constructs a new [`MonitorUpdatingPersisterAsync`]. @@ -971,7 +977,7 @@ impl< L: Deref + MaybeSend + MaybeSync + 'static, ES: Deref + MaybeSend + MaybeSync + 'static, SP: Deref + MaybeSend + MaybeSync + 'static, - BI: Deref + MaybeSend + MaybeSync + 'static, + BI: BroadcasterInterface + MaybeSend + MaybeSync + 'static, FE: Deref + MaybeSend + MaybeSync + 'static, > MonitorUpdatingPersisterAsync where @@ -979,7 +985,6 @@ where L::Target: Logger, ES::Target: EntropySource + Sized, SP::Target: SignerProvider + Sized, - BI::Target: BroadcasterInterface, FE::Target: FeeEstimator, ::EcdsaSigner: MaybeSend + 'static, { @@ -1057,14 +1062,20 @@ where trait MaybeSendableFuture: Future> + MaybeSend {} impl> + MaybeSend> MaybeSendableFuture for F {} -impl - MonitorUpdatingPersisterAsyncInner +impl< + K: Deref, + S: FutureSpawner, + L: Deref, + ES: Deref, + SP: Deref, + BI: BroadcasterInterface, + FE: Deref, + > MonitorUpdatingPersisterAsyncInner where K::Target: KVStore, L::Target: Logger, ES::Target: EntropySource + Sized, SP::Target: SignerProvider + Sized, - BI::Target: BroadcasterInterface, FE::Target: FeeEstimator, { pub async fn read_channel_monitor_with_updates( diff --git a/lightning/src/util/sweep.rs b/lightning/src/util/sweep.rs index bf048efdae1..6b3ce10edb2 100644 --- a/lightning/src/util/sweep.rs +++ b/lightning/src/util/sweep.rs @@ -337,9 +337,15 @@ impl_writeable_tlv_based_enum!(OutputSpendStatus, /// /// [`Event::SpendableOutputs`]: crate::events::Event::SpendableOutputs // Note that updates to documentation on this struct should be copied to the synchronous version. -pub struct OutputSweeper -where - B::Target: BroadcasterInterface, +pub struct OutputSweeper< + B: BroadcasterInterface, + D: Deref, + E: Deref, + F: Deref, + K: Deref, + L: Deref, + O: Deref, +> where D::Target: ChangeDestinationSource, E::Target: FeeEstimator, F::Target: Filter, @@ -358,10 +364,9 @@ where logger: L, } -impl +impl OutputSweeper where - B::Target: BroadcasterInterface, D::Target: ChangeDestinationSource, E::Target: FeeEstimator, F::Target: Filter, @@ -710,10 +715,9 @@ where } } -impl Listen +impl Listen for OutputSweeper where - B::Target: BroadcasterInterface, D::Target: ChangeDestinationSource, E::Target: FeeEstimator, F::Target: Filter + Sync + Send, @@ -751,10 +755,9 @@ where } } -impl Confirm +impl Confirm for OutputSweeper where - B::Target: BroadcasterInterface, D::Target: ChangeDestinationSource, E::Target: FeeEstimator, F::Target: Filter + Sync + Send, @@ -848,10 +851,9 @@ pub enum SpendingDelay { }, } -impl +impl ReadableArgs<(B, E, Option, O, D, K, L)> for (BestBlock, OutputSweeper) where - B::Target: BroadcasterInterface, D::Target: ChangeDestinationSource, E::Target: FeeEstimator, F::Target: Filter + Sync + Send, @@ -918,9 +920,15 @@ where /// /// [`Event::SpendableOutputs`]: crate::events::Event::SpendableOutputs // Note that updates to documentation on this struct should be copied to the asynchronous version. -pub struct OutputSweeperSync -where - B::Target: BroadcasterInterface, +pub struct OutputSweeperSync< + B: BroadcasterInterface, + D: Deref, + E: Deref, + F: Deref, + K: Deref, + L: Deref, + O: Deref, +> where D::Target: ChangeDestinationSourceSync, E::Target: FeeEstimator, F::Target: Filter, @@ -932,10 +940,9 @@ where OutputSweeper, E, F, KVStoreSyncWrapper, L, O>, } -impl +impl OutputSweeperSync where - B::Target: BroadcasterInterface, D::Target: ChangeDestinationSourceSync, E::Target: FeeEstimator, F::Target: Filter, @@ -1052,10 +1059,9 @@ where } } -impl Listen +impl Listen for OutputSweeperSync where - B::Target: BroadcasterInterface, D::Target: ChangeDestinationSourceSync, E::Target: FeeEstimator, F::Target: Filter + Sync + Send, @@ -1074,10 +1080,9 @@ where } } -impl Confirm +impl Confirm for OutputSweeperSync where - B::Target: BroadcasterInterface, D::Target: ChangeDestinationSourceSync, E::Target: FeeEstimator, F::Target: Filter + Sync + Send, @@ -1104,10 +1109,9 @@ where } } -impl +impl ReadableArgs<(B, E, Option, O, D, K, L)> for (BestBlock, OutputSweeperSync) where - B::Target: BroadcasterInterface, D::Target: ChangeDestinationSourceSync, E::Target: FeeEstimator, F::Target: Filter + Sync + Send, From 32e6e100d4575f772c9e8716fdf182ccf24f076d Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Wed, 14 Jan 2026 13:48:47 -0500 Subject: [PATCH 140/242] Drop Deref indirection for EntropySource Reduces generics and verbosity across the codebase, should provide equivalent behavior. Co-Authored-By: Claude Opus 4.5 --- lightning-background-processor/src/lib.rs | 18 ++-- lightning-liquidity/src/lsps0/client.rs | 9 +- lightning-liquidity/src/lsps1/client.rs | 9 +- lightning-liquidity/src/lsps1/service.rs | 11 +-- lightning-liquidity/src/lsps2/client.rs | 9 +- lightning-liquidity/src/lsps5/client.rs | 9 +- lightning-liquidity/src/manager.rs | 70 ++++++--------- lightning-liquidity/src/utils/mod.rs | 7 +- lightning-net-tokio/src/lib.rs | 10 +-- lightning/src/blinded_path/message.rs | 24 ++---- lightning/src/blinded_path/payment.rs | 31 +++---- lightning/src/chain/chainmonitor.rs | 36 +++----- lightning/src/crypto/utils.rs | 9 +- lightning/src/ln/chan_utils.rs | 11 +-- lightning/src/ln/channel.rs | 40 ++++----- lightning/src/ln/channelmanager.rs | 86 +++++++------------ lightning/src/ln/functional_test_utils.rs | 4 +- lightning/src/ln/inbound_payment.rs | 7 +- lightning/src/ln/interactivetxs.rs | 33 +++---- lightning/src/ln/invoice_utils.rs | 9 +- lightning/src/ln/outbound_payment.rs | 63 ++++++-------- lightning/src/ln/types.rs | 6 +- lightning/src/offers/flow.rs | 64 +++++--------- lightning/src/offers/nonce.rs | 6 +- lightning/src/offers/refund.rs | 15 +--- lightning/src/onion_message/messenger.rs | 53 ++++-------- lightning/src/routing/router.rs | 13 ++- lightning/src/sign/mod.rs | 6 ++ lightning/src/util/anchor_channel_reserves.rs | 13 +-- lightning/src/util/persist.rs | 31 +++---- lightning/src/util/scid_utils.rs | 9 +- 31 files changed, 254 insertions(+), 467 deletions(-) diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index 0cefcca7d24..c8898b0690d 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -420,8 +420,7 @@ type DynChannelManager = lightning::ln::channelmanager::ChannelManager< pub const NO_ONION_MESSENGER: Option< Arc< dyn AOnionMessenger< - EntropySource = dyn EntropySource + Send + Sync, - ES = &(dyn EntropySource + Send + Sync), + EntropySource = &(dyn EntropySource + Send + Sync), NodeSigner = dyn lightning::sign::NodeSigner + Send + Sync, NS = &(dyn lightning::sign::NodeSigner + Send + Sync), Logger = dyn Logger + Send + Sync, @@ -480,8 +479,7 @@ impl KVStore for DummyKVStore { pub const NO_LIQUIDITY_MANAGER: Option< Arc< dyn ALiquidityManager< - EntropySource = dyn EntropySource + Send + Sync, - ES = &(dyn EntropySource + Send + Sync), + EntropySource = &(dyn EntropySource + Send + Sync), NodeSigner = dyn lightning::sign::NodeSigner + Send + Sync, NS = &(dyn lightning::sign::NodeSigner + Send + Sync), AChannelManager = DynChannelManager, @@ -506,8 +504,7 @@ pub const NO_LIQUIDITY_MANAGER: Option< pub const NO_LIQUIDITY_MANAGER_SYNC: Option< Arc< dyn ALiquidityManagerSync< - EntropySource = dyn EntropySource + Send + Sync, - ES = &(dyn EntropySource + Send + Sync), + EntropySource = &(dyn EntropySource + Send + Sync), NodeSigner = dyn lightning::sign::NodeSigner + Send + Sync, NS = &(dyn lightning::sign::NodeSigner + Send + Sync), AChannelManager = DynChannelManager, @@ -961,7 +958,7 @@ pub async fn process_events_async< P: Deref, EventHandlerFuture: core::future::Future>, EventHandler: Fn(Event) -> EventHandlerFuture, - ES: Deref, + ES: EntropySource, M: Deref::Signer, CF, T, F, L, P, ES>>, CM: Deref, OM: Deref, @@ -990,7 +987,6 @@ where F::Target: FeeEstimator, L::Target: Logger, P::Target: Persist<::Signer>, - ES::Target: EntropySource, CM::Target: AChannelManager, OM::Target: AOnionMessenger, PM::Target: APeerManager, @@ -1461,7 +1457,7 @@ pub async fn process_events_async_with_kv_store_sync< P: Deref, EventHandlerFuture: core::future::Future>, EventHandler: Fn(Event) -> EventHandlerFuture, - ES: Deref, + ES: EntropySource, M: Deref::Signer, CF, T, F, L, P, ES>>, CM: Deref, OM: Deref, @@ -1490,7 +1486,6 @@ where F::Target: FeeEstimator, L::Target: Logger, P::Target: Persist<::Signer>, - ES::Target: EntropySource, CM::Target: AChannelManager, OM::Target: AOnionMessenger, PM::Target: APeerManager, @@ -1573,7 +1568,7 @@ impl BackgroundProcessor { L: 'static + Deref + Send, P: 'static + Deref, EH: 'static + EventHandler + Send, - ES: 'static + Deref + Send, + ES: 'static + EntropySource + Send, M: 'static + Deref< Target = ChainMonitor<::Signer, CF, T, F, L, P, ES>, @@ -1603,7 +1598,6 @@ impl BackgroundProcessor { F::Target: 'static + FeeEstimator, L::Target: 'static + Logger, P::Target: 'static + Persist<::Signer>, - ES::Target: 'static + EntropySource, CM::Target: AChannelManager, OM::Target: AOnionMessenger, PM::Target: APeerManager, diff --git a/lightning-liquidity/src/lsps0/client.rs b/lightning-liquidity/src/lsps0/client.rs index d300936e2b2..776e9d3c9a5 100644 --- a/lightning-liquidity/src/lsps0/client.rs +++ b/lightning-liquidity/src/lsps0/client.rs @@ -25,9 +25,8 @@ use bitcoin::secp256k1::PublicKey; use core::ops::Deref; /// A message handler capable of sending and handling bLIP-50 / LSPS0 messages. -pub struct LSPS0ClientHandler +pub struct LSPS0ClientHandler where - ES::Target: EntropySource, K::Target: KVStore, { entropy_source: ES, @@ -35,9 +34,8 @@ where pending_events: Arc>, } -impl LSPS0ClientHandler +impl LSPS0ClientHandler where - ES::Target: EntropySource, K::Target: KVStore, { /// Returns a new instance of [`LSPS0ClientHandler`]. @@ -89,9 +87,8 @@ where } } -impl LSPSProtocolMessageHandler for LSPS0ClientHandler +impl LSPSProtocolMessageHandler for LSPS0ClientHandler where - ES::Target: EntropySource, K::Target: KVStore, { type ProtocolMessage = LSPS0Message; diff --git a/lightning-liquidity/src/lsps1/client.rs b/lightning-liquidity/src/lsps1/client.rs index 4a79fb64887..1e5b2e3bef4 100644 --- a/lightning-liquidity/src/lsps1/client.rs +++ b/lightning-liquidity/src/lsps1/client.rs @@ -47,9 +47,8 @@ struct PeerState { } /// The main object allowing to send and receive bLIP-51 / LSPS1 messages. -pub struct LSPS1ClientHandler +pub struct LSPS1ClientHandler where - ES::Target: EntropySource, K::Target: KVStore, { entropy_source: ES, @@ -59,9 +58,8 @@ where config: LSPS1ClientConfig, } -impl LSPS1ClientHandler +impl LSPS1ClientHandler where - ES::Target: EntropySource, K::Target: KVStore, { /// Constructs an `LSPS1ClientHandler`. @@ -432,9 +430,8 @@ where } } -impl LSPSProtocolMessageHandler for LSPS1ClientHandler +impl LSPSProtocolMessageHandler for LSPS1ClientHandler where - ES::Target: EntropySource, K::Target: KVStore, { type ProtocolMessage = LSPS1Message; diff --git a/lightning-liquidity/src/lsps1/service.rs b/lightning-liquidity/src/lsps1/service.rs index 8afea1b4345..76a9a437b0b 100644 --- a/lightning-liquidity/src/lsps1/service.rs +++ b/lightning-liquidity/src/lsps1/service.rs @@ -132,9 +132,8 @@ impl PeerState { } /// The main object allowing to send and receive bLIP-51 / LSPS1 messages. -pub struct LSPS1ServiceHandler +pub struct LSPS1ServiceHandler where - ES::Target: EntropySource, CM::Target: AChannelManager, C::Target: Filter, K::Target: KVStore, @@ -148,12 +147,11 @@ where config: LSPS1ServiceConfig, } -impl LSPS1ServiceHandler +impl + LSPS1ServiceHandler where - ES::Target: EntropySource, CM::Target: AChannelManager, C::Target: Filter, - ES::Target: EntropySource, K::Target: KVStore, { /// Constructs a `LSPS1ServiceHandler`. @@ -421,10 +419,9 @@ where } } -impl LSPSProtocolMessageHandler +impl LSPSProtocolMessageHandler for LSPS1ServiceHandler where - ES::Target: EntropySource, CM::Target: AChannelManager, C::Target: Filter, K::Target: KVStore, diff --git a/lightning-liquidity/src/lsps2/client.rs b/lightning-liquidity/src/lsps2/client.rs index 83aa7e3e99f..2e9fca2d444 100644 --- a/lightning-liquidity/src/lsps2/client.rs +++ b/lightning-liquidity/src/lsps2/client.rs @@ -68,9 +68,8 @@ impl PeerState { /// opened. Please refer to the [`bLIP-52 / LSPS2 specification`] for more information. /// /// [`bLIP-52 / LSPS2 specification`]: https://github.com/lightning/blips/blob/master/blip-0052.md#trust-models -pub struct LSPS2ClientHandler +pub struct LSPS2ClientHandler where - ES::Target: EntropySource, K::Target: KVStore, { entropy_source: ES, @@ -80,9 +79,8 @@ where config: LSPS2ClientConfig, } -impl LSPS2ClientHandler +impl LSPS2ClientHandler where - ES::Target: EntropySource, K::Target: KVStore, { /// Constructs an `LSPS2ClientHandler`. @@ -375,9 +373,8 @@ where } } -impl LSPSProtocolMessageHandler for LSPS2ClientHandler +impl LSPSProtocolMessageHandler for LSPS2ClientHandler where - ES::Target: EntropySource, K::Target: KVStore, { type ProtocolMessage = LSPS2Message; diff --git a/lightning-liquidity/src/lsps5/client.rs b/lightning-liquidity/src/lsps5/client.rs index 1c6f8b8a250..df10522077e 100644 --- a/lightning-liquidity/src/lsps5/client.rs +++ b/lightning-liquidity/src/lsps5/client.rs @@ -125,9 +125,8 @@ impl PeerState { /// [`lsps5.list_webhooks`]: super::msgs::LSPS5Request::ListWebhooks /// [`lsps5.remove_webhook`]: super::msgs::LSPS5Request::RemoveWebhook /// [`LSPS5Validator`]: super::validator::LSPS5Validator -pub struct LSPS5ClientHandler +pub struct LSPS5ClientHandler where - ES::Target: EntropySource, K::Target: KVStore, { pending_messages: Arc, @@ -137,9 +136,8 @@ where _config: LSPS5ClientConfig, } -impl LSPS5ClientHandler +impl LSPS5ClientHandler where - ES::Target: EntropySource, K::Target: KVStore, { /// Constructs an `LSPS5ClientHandler`. @@ -426,9 +424,8 @@ where } } -impl LSPSProtocolMessageHandler for LSPS5ClientHandler +impl LSPSProtocolMessageHandler for LSPS5ClientHandler where - ES::Target: EntropySource, K::Target: KVStore, { type ProtocolMessage = LSPS5Message; diff --git a/lightning-liquidity/src/manager.rs b/lightning-liquidity/src/manager.rs index 84a52e2ab13..14b0fa52246 100644 --- a/lightning-liquidity/src/manager.rs +++ b/lightning-liquidity/src/manager.rs @@ -104,9 +104,7 @@ pub struct LiquidityClientConfig { /// languages. pub trait ALiquidityManager { /// A type implementing [`EntropySource`] - type EntropySource: EntropySource + ?Sized; - /// A type that may be dereferenced to [`Self::EntropySource`]. - type ES: Deref + Clone; + type EntropySource: EntropySource + Clone; /// A type implementing [`NodeSigner`] type NodeSigner: NodeSigner + ?Sized; /// A type that may be dereferenced to [`Self::NodeSigner`]. @@ -133,7 +131,7 @@ pub trait ALiquidityManager { fn get_lm( &self, ) -> &LiquidityManager< - Self::ES, + Self::EntropySource, Self::NS, Self::CM, Self::C, @@ -144,7 +142,7 @@ pub trait ALiquidityManager { } impl< - ES: Deref + Clone, + ES: EntropySource + Clone, NS: Deref + Clone, CM: Deref + Clone, C: Deref + Clone, @@ -153,15 +151,13 @@ impl< T: BroadcasterInterface + Clone, > ALiquidityManager for LiquidityManager where - ES::Target: EntropySource, NS::Target: NodeSigner, CM::Target: AChannelManager, C::Target: Filter, K::Target: KVStore, TP::Target: TimeProvider, { - type EntropySource = ES::Target; - type ES = ES; + type EntropySource = ES; type NodeSigner = NS::Target; type NS = NS; type AChannelManager = CM::Target; @@ -184,9 +180,7 @@ where /// languages. pub trait ALiquidityManagerSync { /// A type implementing [`EntropySource`] - type EntropySource: EntropySource + ?Sized; - /// A type that may be dereferenced to [`Self::EntropySource`]. - type ES: Deref + Clone; + type EntropySource: EntropySource + Clone; /// A type implementing [`NodeSigner`] type NodeSigner: NodeSigner + ?Sized; /// A type that may be dereferenced to [`Self::NodeSigner`]. @@ -214,7 +208,7 @@ pub trait ALiquidityManagerSync { fn get_lm_async( &self, ) -> &LiquidityManager< - Self::ES, + Self::EntropySource, Self::NS, Self::CM, Self::C, @@ -226,7 +220,7 @@ pub trait ALiquidityManagerSync { fn get_lm( &self, ) -> &LiquidityManagerSync< - Self::ES, + Self::EntropySource, Self::NS, Self::CM, Self::C, @@ -237,7 +231,7 @@ pub trait ALiquidityManagerSync { } impl< - ES: Deref + Clone, + ES: EntropySource + Clone, NS: Deref + Clone, CM: Deref + Clone, C: Deref + Clone, @@ -246,15 +240,13 @@ impl< T: BroadcasterInterface + Clone, > ALiquidityManagerSync for LiquidityManagerSync where - ES::Target: EntropySource, NS::Target: NodeSigner, CM::Target: AChannelManager, C::Target: Filter, KS::Target: KVStoreSync, TP::Target: TimeProvider, { - type EntropySource = ES::Target; - type ES = ES; + type EntropySource = ES; type NodeSigner = NS::Target; type NS = NS; type AChannelManager = CM::Target; @@ -271,7 +263,7 @@ where fn get_lm_async( &self, ) -> &LiquidityManager< - Self::ES, + Self::EntropySource, Self::NS, Self::CM, Self::C, @@ -306,7 +298,7 @@ where /// [`Event::HTLCHandlingFailed`]: lightning::events::Event::HTLCHandlingFailed /// [`Event::PaymentForwarded`]: lightning::events::Event::PaymentForwarded pub struct LiquidityManager< - ES: Deref + Clone, + ES: EntropySource + Clone, NS: Deref + Clone, CM: Deref + Clone, C: Deref + Clone, @@ -314,7 +306,6 @@ pub struct LiquidityManager< TP: Deref + Clone, T: BroadcasterInterface + Clone, > where - ES::Target: EntropySource, NS::Target: NodeSigner, CM::Target: AChannelManager, C::Target: Filter, @@ -344,7 +335,7 @@ pub struct LiquidityManager< #[cfg(feature = "time")] impl< - ES: Deref + Clone, + ES: EntropySource + Clone, NS: Deref + Clone, CM: Deref + Clone, C: Deref + Clone, @@ -352,7 +343,6 @@ impl< T: BroadcasterInterface + Clone, > LiquidityManager where - ES::Target: EntropySource, NS::Target: NodeSigner, CM::Target: AChannelManager, C::Target: Filter, @@ -384,7 +374,7 @@ where } impl< - ES: Deref + Clone, + ES: EntropySource + Clone, NS: Deref + Clone, CM: Deref + Clone, C: Deref + Clone, @@ -393,7 +383,6 @@ impl< T: BroadcasterInterface + Clone, > LiquidityManager where - ES::Target: EntropySource, NS::Target: NodeSigner, CM::Target: AChannelManager, C::Target: Filter, @@ -810,7 +799,7 @@ where } impl< - ES: Deref + Clone, + ES: EntropySource + Clone, NS: Deref + Clone, CM: Deref + Clone, C: Deref + Clone, @@ -819,7 +808,6 @@ impl< T: BroadcasterInterface + Clone, > CustomMessageReader for LiquidityManager where - ES::Target: EntropySource, NS::Target: NodeSigner, CM::Target: AChannelManager, C::Target: Filter, @@ -841,7 +829,7 @@ where } impl< - ES: Deref + Clone, + ES: EntropySource + Clone, NS: Deref + Clone, CM: Deref + Clone, C: Deref + Clone, @@ -850,7 +838,6 @@ impl< T: BroadcasterInterface + Clone, > CustomMessageHandler for LiquidityManager where - ES::Target: EntropySource, NS::Target: NodeSigner, CM::Target: AChannelManager, C::Target: Filter, @@ -974,7 +961,7 @@ where } impl< - ES: Deref + Clone, + ES: EntropySource + Clone, NS: Deref + Clone, CM: Deref + Clone, C: Deref + Clone, @@ -983,7 +970,6 @@ impl< T: BroadcasterInterface + Clone, > Listen for LiquidityManager where - ES::Target: EntropySource, NS::Target: NodeSigner, CM::Target: AChannelManager, C::Target: Filter, @@ -1019,7 +1005,7 @@ where } impl< - ES: Deref + Clone, + ES: EntropySource + Clone, NS: Deref + Clone, CM: Deref + Clone, C: Deref + Clone, @@ -1028,7 +1014,6 @@ impl< T: BroadcasterInterface + Clone, > Confirm for LiquidityManager where - ES::Target: EntropySource, NS::Target: NodeSigner, CM::Target: AChannelManager, C::Target: Filter, @@ -1064,7 +1049,7 @@ where /// A synchroneous wrapper around [`LiquidityManager`] to be used in contexts where async is not /// available. pub struct LiquidityManagerSync< - ES: Deref + Clone, + ES: EntropySource + Clone, NS: Deref + Clone, CM: Deref + Clone, C: Deref + Clone, @@ -1072,7 +1057,6 @@ pub struct LiquidityManagerSync< TP: Deref + Clone, T: BroadcasterInterface + Clone, > where - ES::Target: EntropySource, NS::Target: NodeSigner, CM::Target: AChannelManager, C::Target: Filter, @@ -1084,7 +1068,7 @@ pub struct LiquidityManagerSync< #[cfg(feature = "time")] impl< - ES: Deref + Clone, + ES: EntropySource + Clone, NS: Deref + Clone, CM: Deref + Clone, C: Deref + Clone, @@ -1092,7 +1076,6 @@ impl< T: BroadcasterInterface + Clone, > LiquidityManagerSync where - ES::Target: EntropySource, NS::Target: NodeSigner, CM::Target: AChannelManager, KS::Target: KVStoreSync, @@ -1135,7 +1118,7 @@ where } impl< - ES: Deref + Clone, + ES: EntropySource + Clone, NS: Deref + Clone, CM: Deref + Clone, C: Deref + Clone, @@ -1144,7 +1127,6 @@ impl< T: BroadcasterInterface + Clone, > LiquidityManagerSync where - ES::Target: EntropySource, NS::Target: NodeSigner, CM::Target: AChannelManager, C::Target: Filter, @@ -1304,7 +1286,7 @@ where } impl< - ES: Deref + Clone, + ES: EntropySource + Clone, NS: Deref + Clone, CM: Deref + Clone, C: Deref + Clone, @@ -1313,7 +1295,6 @@ impl< T: BroadcasterInterface + Clone, > CustomMessageReader for LiquidityManagerSync where - ES::Target: EntropySource, NS::Target: NodeSigner, CM::Target: AChannelManager, C::Target: Filter, @@ -1330,7 +1311,7 @@ where } impl< - ES: Deref + Clone, + ES: EntropySource + Clone, NS: Deref + Clone, CM: Deref + Clone, C: Deref + Clone, @@ -1339,7 +1320,6 @@ impl< T: BroadcasterInterface + Clone, > CustomMessageHandler for LiquidityManagerSync where - ES::Target: EntropySource, NS::Target: NodeSigner, CM::Target: AChannelManager, C::Target: Filter, @@ -1376,7 +1356,7 @@ where } impl< - ES: Deref + Clone, + ES: EntropySource + Clone, NS: Deref + Clone, CM: Deref + Clone, C: Deref + Clone, @@ -1385,7 +1365,6 @@ impl< T: BroadcasterInterface + Clone, > Listen for LiquidityManagerSync where - ES::Target: EntropySource, NS::Target: NodeSigner, CM::Target: AChannelManager, C::Target: Filter, @@ -1405,7 +1384,7 @@ where } impl< - ES: Deref + Clone, + ES: EntropySource + Clone, NS: Deref + Clone, CM: Deref + Clone, C: Deref + Clone, @@ -1414,7 +1393,6 @@ impl< T: BroadcasterInterface + Clone, > Confirm for LiquidityManagerSync where - ES::Target: EntropySource, NS::Target: NodeSigner, CM::Target: AChannelManager, C::Target: Filter, diff --git a/lightning-liquidity/src/utils/mod.rs b/lightning-liquidity/src/utils/mod.rs index b66d3eb7ead..32b50443350 100644 --- a/lightning-liquidity/src/utils/mod.rs +++ b/lightning-liquidity/src/utils/mod.rs @@ -1,7 +1,7 @@ //! Utilities for LSPS5 service. use alloc::string::String; -use core::{fmt::Write, ops::Deref}; +use core::fmt::Write; use lightning::sign::EntropySource; @@ -23,10 +23,7 @@ pub fn scid_from_human_readable_string(human_readable_scid: &str) -> Result(entropy_source: &ES) -> LSPSRequestId -where - ES::Target: EntropySource, -{ +pub(crate) fn generate_request_id(entropy_source: &ES) -> LSPSRequestId { let bytes = entropy_source.get_secure_random_bytes(); LSPSRequestId(hex_str(&bytes[0..16])) } diff --git a/lightning-net-tokio/src/lib.rs b/lightning-net-tokio/src/lib.rs index 27d309f2c18..eec0e424eaa 100644 --- a/lightning-net-tokio/src/lib.rs +++ b/lightning-net-tokio/src/lib.rs @@ -480,13 +480,12 @@ where /// /// Returns a future (as the fn is async) that yields another future, see [`connect_outbound`] for /// details on this return value. -pub async fn tor_connect_outbound( +pub async fn tor_connect_outbound( peer_manager: PM, their_node_id: PublicKey, addr: SocketAddress, tor_proxy_addr: SocketAddr, entropy_source: ES, ) -> Option> where PM::Target: APeerManager, - ES::Target: EntropySource, { let connect_fut = async { tor_connect(addr, tor_proxy_addr, entropy_source).await.map(|s| s.into_std().unwrap()) @@ -500,12 +499,9 @@ where } } -async fn tor_connect( +async fn tor_connect( addr: SocketAddress, tor_proxy_addr: SocketAddr, entropy_source: ES, -) -> Result -where - ES::Target: EntropySource, -{ +) -> Result { use std::io::Write; use tokio::io::AsyncReadExt; diff --git a/lightning/src/blinded_path/message.rs b/lightning/src/blinded_path/message.rs index 84a42ff1be2..c914458ccbc 100644 --- a/lightning/src/blinded_path/message.rs +++ b/lightning/src/blinded_path/message.rs @@ -58,14 +58,11 @@ impl BlindedMessagePath { /// `compact_padding` selects between space-inefficient padding which better hides contents and /// a space-constrained padding which does very little to hide the contents, especially for the /// last hop. It should only be set when the blinded path needs to be as compact as possible. - pub fn one_hop( + pub fn one_hop( recipient_node_id: PublicKey, local_node_receive_key: ReceiveAuthKey, context: MessageContext, compact_padding: bool, entropy_source: ES, secp_ctx: &Secp256k1, - ) -> Self - where - ES::Target: EntropySource, - { + ) -> Self { Self::new( &[], recipient_node_id, @@ -82,14 +79,11 @@ impl BlindedMessagePath { /// `compact_padding` selects between space-inefficient padding which better hides contents and /// a space-constrained padding which does very little to hide the contents, especially for the /// last hop. It should only be set when the blinded path needs to be as compact as possible. - pub fn new( + pub fn new( intermediate_nodes: &[MessageForwardNode], recipient_node_id: PublicKey, local_node_receive_key: ReceiveAuthKey, context: MessageContext, compact_padding: bool, entropy_source: ES, secp_ctx: &Secp256k1, - ) -> Self - where - ES::Target: EntropySource, - { + ) -> Self { BlindedMessagePath::new_with_dummy_hops( intermediate_nodes, recipient_node_id, @@ -109,14 +103,14 @@ impl BlindedMessagePath { /// last hop. It should only be set when the blinded path needs to be as compact as possible. /// /// Note: At most [`MAX_DUMMY_HOPS_COUNT`] dummy hops can be added to the blinded path. - pub fn new_with_dummy_hops( + pub fn new_with_dummy_hops< + ES: EntropySource, + T: secp256k1::Signing + secp256k1::Verification, + >( intermediate_nodes: &[MessageForwardNode], recipient_node_id: PublicKey, dummy_hop_count: usize, local_node_receive_key: ReceiveAuthKey, context: MessageContext, compact_padding: bool, entropy_source: ES, secp_ctx: &Secp256k1, - ) -> Self - where - ES::Target: EntropySource, - { + ) -> Self { let introduction_node = IntroductionNode::NodeId( intermediate_nodes.first().map_or(recipient_node_id, |n| n.node_id), ); diff --git a/lightning/src/blinded_path/payment.rs b/lightning/src/blinded_path/payment.rs index b68be811cb4..e195f5a54ab 100644 --- a/lightning/src/blinded_path/payment.rs +++ b/lightning/src/blinded_path/payment.rs @@ -87,13 +87,10 @@ pub struct BlindedPaymentPath { impl BlindedPaymentPath { /// Create a one-hop blinded path for a payment. - pub fn one_hop( + pub fn one_hop( payee_node_id: PublicKey, local_node_receive_key: ReceiveAuthKey, payee_tlvs: ReceiveTlvs, min_final_cltv_expiry_delta: u16, entropy_source: ES, secp_ctx: &Secp256k1, - ) -> Result - where - ES::Target: EntropySource, - { + ) -> Result { // This value is not considered in pathfinding for 1-hop blinded paths, because it's intended to // be in relation to a specific channel. let htlc_maximum_msat = u64::max_value(); @@ -115,14 +112,11 @@ impl BlindedPaymentPath { /// * [`BlindedPayInfo`] calculation results in an integer overflow /// * any unknown features are required in the provided [`ForwardTlvs`] // TODO: make all payloads the same size with padding + add dummy hops - pub fn new( + pub fn new( intermediate_nodes: &[PaymentForwardNode], payee_node_id: PublicKey, local_node_receive_key: ReceiveAuthKey, payee_tlvs: ReceiveTlvs, htlc_maximum_msat: u64, min_final_cltv_expiry_delta: u16, entropy_source: ES, secp_ctx: &Secp256k1, - ) -> Result - where - ES::Target: EntropySource, - { + ) -> Result { BlindedPaymentPath::new_inner( intermediate_nodes, payee_node_id, @@ -147,15 +141,15 @@ impl BlindedPaymentPath { /// /// TODO: Add end-to-end tests validating fee aggregation, CLTV deltas, and /// HTLC bounds when dummy hops are present, before exposing this API publicly. - pub(crate) fn new_with_dummy_hops( + pub(crate) fn new_with_dummy_hops< + ES: EntropySource, + T: secp256k1::Signing + secp256k1::Verification, + >( intermediate_nodes: &[PaymentForwardNode], payee_node_id: PublicKey, dummy_tlvs: &[DummyTlvs], local_node_receive_key: ReceiveAuthKey, payee_tlvs: ReceiveTlvs, htlc_maximum_msat: u64, min_final_cltv_expiry_delta: u16, entropy_source: ES, secp_ctx: &Secp256k1, - ) -> Result - where - ES::Target: EntropySource, - { + ) -> Result { BlindedPaymentPath::new_inner( intermediate_nodes, payee_node_id, @@ -169,15 +163,12 @@ impl BlindedPaymentPath { ) } - fn new_inner( + fn new_inner( intermediate_nodes: &[PaymentForwardNode], payee_node_id: PublicKey, local_node_receive_key: ReceiveAuthKey, dummy_tlvs: &[DummyTlvs], payee_tlvs: ReceiveTlvs, htlc_maximum_msat: u64, min_final_cltv_expiry_delta: u16, entropy_source: ES, secp_ctx: &Secp256k1, - ) -> Result - where - ES::Target: EntropySource, - { + ) -> Result { let introduction_node = IntroductionNode::NodeId( intermediate_nodes.first().map_or(payee_node_id, |n| n.node_id), ); diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index 678c7b6ef5b..e4a9ca99290 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -259,14 +259,13 @@ pub struct AsyncPersister< K: Deref + MaybeSend + MaybeSync + 'static, S: FutureSpawner, L: Deref + MaybeSend + MaybeSync + 'static, - ES: Deref + MaybeSend + MaybeSync + 'static, + ES: EntropySource + MaybeSend + MaybeSync + 'static, SP: Deref + MaybeSend + MaybeSync + 'static, BI: BroadcasterInterface + MaybeSend + MaybeSync + 'static, FE: Deref + MaybeSend + MaybeSync + 'static, > where K::Target: KVStore + MaybeSync, L::Target: Logger, - ES::Target: EntropySource + Sized, SP::Target: SignerProvider + Sized, FE::Target: FeeEstimator, { @@ -278,7 +277,7 @@ impl< K: Deref + MaybeSend + MaybeSync + 'static, S: FutureSpawner, L: Deref + MaybeSend + MaybeSync + 'static, - ES: Deref + MaybeSend + MaybeSync + 'static, + ES: EntropySource + MaybeSend + MaybeSync + 'static, SP: Deref + MaybeSend + MaybeSync + 'static, BI: BroadcasterInterface + MaybeSend + MaybeSync + 'static, FE: Deref + MaybeSend + MaybeSync + 'static, @@ -286,7 +285,6 @@ impl< where K::Target: KVStore + MaybeSync, L::Target: Logger, - ES::Target: EntropySource + Sized, SP::Target: SignerProvider + Sized, FE::Target: FeeEstimator, { @@ -300,7 +298,7 @@ impl< K: Deref + MaybeSend + MaybeSync + 'static, S: FutureSpawner, L: Deref + MaybeSend + MaybeSync + 'static, - ES: Deref + MaybeSend + MaybeSync + 'static, + ES: EntropySource + MaybeSend + MaybeSync + 'static, SP: Deref + MaybeSend + MaybeSync + 'static, BI: BroadcasterInterface + MaybeSend + MaybeSync + 'static, FE: Deref + MaybeSend + MaybeSync + 'static, @@ -308,7 +306,6 @@ impl< where K::Target: KVStore + MaybeSync, L::Target: Logger, - ES::Target: EntropySource + Sized, SP::Target: SignerProvider + Sized, FE::Target: FeeEstimator, ::EcdsaSigner: MaybeSend + 'static, @@ -363,13 +360,12 @@ pub struct ChainMonitor< F: Deref, L: Deref, P: Deref, - ES: Deref, + ES: EntropySource, > where C::Target: chain::Filter, F::Target: FeeEstimator, L::Target: Logger, P::Target: Persist, - ES::Target: EntropySource, { monitors: RwLock>>, chain_source: Option, @@ -403,7 +399,7 @@ impl< T: BroadcasterInterface + MaybeSend + MaybeSync + 'static, F: Deref + MaybeSend + MaybeSync + 'static, L: Deref + MaybeSend + MaybeSync + 'static, - ES: Deref + MaybeSend + MaybeSync + 'static, + ES: EntropySource + MaybeSend + MaybeSync + 'static, > ChainMonitor< ::EcdsaSigner, @@ -419,7 +415,6 @@ impl< C::Target: chain::Filter, F::Target: FeeEstimator, L::Target: Logger, - ES::Target: EntropySource + Sized, ::EcdsaSigner: MaybeSend + 'static, { /// Creates a new `ChainMonitor` used to watch on-chain activity pertaining to channels. @@ -461,14 +456,13 @@ impl< F: Deref, L: Deref, P: Deref, - ES: Deref, + ES: EntropySource, > ChainMonitor where C::Target: chain::Filter, F::Target: FeeEstimator, L::Target: Logger, P::Target: Persist, - ES::Target: EntropySource, { /// Dispatches to per-channel monitors, which are responsible for updating their on-chain view /// of a channel and reacting accordingly based on transactions in the given chain data. See @@ -1107,14 +1101,13 @@ impl< F: Deref, L: Deref, P: Deref, - ES: Deref, + ES: EntropySource, > BaseMessageHandler for ChainMonitor where C::Target: chain::Filter, F::Target: FeeEstimator, L::Target: Logger, P::Target: Persist, - ES::Target: EntropySource, { fn get_and_clear_pending_msg_events(&self) -> Vec { let mut pending_events = self.pending_send_only_events.lock().unwrap(); @@ -1145,14 +1138,13 @@ impl< F: Deref, L: Deref, P: Deref, - ES: Deref, + ES: EntropySource, > SendOnlyMessageHandler for ChainMonitor where C::Target: chain::Filter, F::Target: FeeEstimator, L::Target: Logger, P::Target: Persist, - ES::Target: EntropySource, { } @@ -1163,14 +1155,13 @@ impl< F: Deref, L: Deref, P: Deref, - ES: Deref, + ES: EntropySource, > chain::Listen for ChainMonitor where C::Target: chain::Filter, F::Target: FeeEstimator, L::Target: Logger, P::Target: Persist, - ES::Target: EntropySource, { fn filtered_block_connected(&self, header: &Header, txdata: &TransactionData, height: u32) { log_debug!( @@ -1226,14 +1217,13 @@ impl< F: Deref, L: Deref, P: Deref, - ES: Deref, + ES: EntropySource, > chain::Confirm for ChainMonitor where C::Target: chain::Filter, F::Target: FeeEstimator, L::Target: Logger, P::Target: Persist, - ES::Target: EntropySource, { fn transactions_confirmed(&self, header: &Header, txdata: &TransactionData, height: u32) { log_debug!( @@ -1320,14 +1310,13 @@ impl< F: Deref, L: Deref, P: Deref, - ES: Deref, + ES: EntropySource, > chain::Watch for ChainMonitor where C::Target: chain::Filter, F::Target: FeeEstimator, L::Target: Logger, P::Target: Persist, - ES::Target: EntropySource, { fn watch_channel( &self, channel_id: ChannelId, monitor: ChannelMonitor, @@ -1515,14 +1504,13 @@ impl< F: Deref, L: Deref, P: Deref, - ES: Deref, + ES: EntropySource, > events::EventsProvider for ChainMonitor where C::Target: chain::Filter, F::Target: FeeEstimator, L::Target: Logger, P::Target: Persist, - ES::Target: EntropySource, { /// Processes [`SpendableOutputs`] events produced from each [`ChannelMonitor`] upon maturity. /// diff --git a/lightning/src/crypto/utils.rs b/lightning/src/crypto/utils.rs index b59cc6002d9..1570b3a0b2f 100644 --- a/lightning/src/crypto/utils.rs +++ b/lightning/src/crypto/utils.rs @@ -5,8 +5,6 @@ use bitcoin::secp256k1::{ecdsa::Signature, Message, Secp256k1, SecretKey, Signin use crate::sign::EntropySource; -use core::ops::Deref; - macro_rules! hkdf_extract_expand { ($salt: expr, $ikm: expr) => {{ let mut hmac = HmacEngine::::new($salt); @@ -72,12 +70,9 @@ pub fn sign(ctx: &Secp256k1, msg: &Message, sk: &SecretKey) -> Si #[inline] #[allow(unused_variables)] -pub fn sign_with_aux_rand( +pub fn sign_with_aux_rand( ctx: &Secp256k1, msg: &Message, sk: &SecretKey, entropy_source: &ES, -) -> Signature -where - ES::Target: EntropySource, -{ +) -> Signature { #[cfg(feature = "grind_signatures")] let sig = loop { let sig = ctx.sign_ecdsa_with_noncedata(msg, sk, &entropy_source.get_secure_random_bytes()); diff --git a/lightning/src/ln/chan_utils.rs b/lightning/src/ln/chan_utils.rs index 431fdd2859c..46afa05b2f2 100644 --- a/lightning/src/ln/chan_utils.rs +++ b/lightning/src/ln/chan_utils.rs @@ -1452,13 +1452,10 @@ impl BuiltCommitmentTransaction { } /// Signs the holder commitment transaction because we are about to broadcast it. - pub fn sign_holder_commitment( + pub fn sign_holder_commitment( &self, funding_key: &SecretKey, funding_redeemscript: &Script, channel_value_satoshis: u64, entropy_source: &ES, secp_ctx: &Secp256k1, - ) -> Signature - where - ES::Target: EntropySource, - { + ) -> Signature { let sighash = self.get_sighash_all(funding_redeemscript, channel_value_satoshis); sign_with_aux_rand(secp_ctx, &sighash, funding_key, entropy_source) } @@ -2139,10 +2136,10 @@ impl<'a> TrustedCommitmentTransaction<'a> { /// /// This function is only valid in the holder commitment context, it always uses EcdsaSighashType::All. #[rustfmt::skip] - pub fn get_htlc_sigs( + pub fn get_htlc_sigs( &self, htlc_base_key: &SecretKey, channel_parameters: &DirectedChannelTransactionParameters, entropy_source: &ES, secp_ctx: &Secp256k1, - ) -> Result, ()> where ES::Target: EntropySource { + ) -> Result, ()> { let inner = self.inner; let keys = &inner.keys; let txid = inner.built.txid; diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 65a627f6282..38502c995a8 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -3567,7 +3567,7 @@ where SP::Target: SignerProvider, { #[rustfmt::skip] - fn new_for_inbound_channel<'a, ES: Deref, F: Deref, L: Deref>( + fn new_for_inbound_channel<'a, ES: EntropySource, F: Deref, L: Deref>( fee_estimator: &'a LowerBoundedFeeEstimator, entropy_source: &'a ES, signer_provider: &'a SP, @@ -3587,7 +3587,6 @@ where open_channel_fields: msgs::CommonOpenChannelFields, ) -> Result<(FundingScope, ChannelContext), ChannelError> where - ES::Target: EntropySource, F::Target: FeeEstimator, L::Target: Logger, SP::Target: SignerProvider, @@ -3912,7 +3911,7 @@ where } #[rustfmt::skip] - fn new_for_outbound_channel<'a, ES: Deref, F: Deref, L: Deref>( + fn new_for_outbound_channel<'a, ES: EntropySource, F: Deref, L: Deref>( fee_estimator: &'a LowerBoundedFeeEstimator, entropy_source: &'a ES, signer_provider: &'a SP, @@ -3931,7 +3930,6 @@ where _logger: L, ) -> Result<(FundingScope, ChannelContext), APIError> where - ES::Target: EntropySource, F::Target: FeeEstimator, SP::Target: SignerProvider, L::Target: Logger, @@ -6846,13 +6844,12 @@ pub(super) struct FundingNegotiationContext { impl FundingNegotiationContext { /// Prepare and start interactive transaction negotiation. /// If error occurs, it is caused by our side, not the counterparty. - fn into_interactive_tx_constructor( + fn into_interactive_tx_constructor( mut self, context: &ChannelContext, funding: &FundingScope, signer_provider: &SP, entropy_source: &ES, holder_node_id: PublicKey, ) -> Result where SP::Target: SignerProvider, - ES::Target: EntropySource, { debug_assert_eq!( self.shared_funding_input.is_some(), @@ -12521,12 +12518,11 @@ where Ok(()) } - pub(crate) fn splice_init( + pub(crate) fn splice_init( &mut self, msg: &msgs::SpliceInit, our_funding_contribution_satoshis: i64, signer_provider: &SP, entropy_source: &ES, holder_node_id: &PublicKey, logger: &L, ) -> Result where - ES::Target: EntropySource, L::Target: Logger, { let our_funding_contribution = SignedAmount::from_sat(our_funding_contribution_satoshis); @@ -12592,12 +12588,11 @@ where }) } - pub(crate) fn splice_ack( + pub(crate) fn splice_ack( &mut self, msg: &msgs::SpliceAck, signer_provider: &SP, entropy_source: &ES, holder_node_id: &PublicKey, logger: &L, ) -> Result, ChannelError> where - ES::Target: EntropySource, L::Target: Logger, { let splice_funding = self.validate_splice_ack(msg)?; @@ -13704,13 +13699,12 @@ where #[allow(dead_code)] // TODO(dual_funding): Remove once opending V2 channels is enabled. #[rustfmt::skip] - pub fn new( + pub fn new( fee_estimator: &LowerBoundedFeeEstimator, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures, channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32, outbound_scid_alias: u64, temporary_channel_id: Option, logger: L ) -> Result, APIError> - where ES::Target: EntropySource, - F::Target: FeeEstimator, + where F::Target: FeeEstimator, L::Target: Logger, { let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config); @@ -14096,14 +14090,13 @@ where /// Creates a new channel from a remote sides' request for one. /// Assumes chain_hash has already been checked and corresponds with what we expect! #[rustfmt::skip] - pub fn new( + pub fn new( fee_estimator: &LowerBoundedFeeEstimator, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures, their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig, current_chain_height: u32, logger: &L, is_0conf: bool, ) -> Result, ChannelError> - where ES::Target: EntropySource, - F::Target: FeeEstimator, + where F::Target: FeeEstimator, L::Target: Logger, { let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.common_fields.temporary_channel_id), None); @@ -14370,15 +14363,14 @@ where { #[allow(dead_code)] // TODO(dual_funding): Remove once creating V2 channels is enabled. #[rustfmt::skip] - pub fn new_outbound( + pub fn new_outbound( fee_estimator: &LowerBoundedFeeEstimator, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures, funding_satoshis: u64, funding_inputs: Vec, user_id: u128, config: &UserConfig, current_chain_height: u32, outbound_scid_alias: u64, funding_confirmation_target: ConfirmationTarget, logger: L, ) -> Result - where ES::Target: EntropySource, - F::Target: FeeEstimator, + where F::Target: FeeEstimator, L::Target: Logger, { let channel_keys_id = signer_provider.generate_channel_keys_id(false, user_id); @@ -14519,14 +14511,13 @@ where /// TODO(dual_funding): Allow contributions, pass intended amount and inputs #[allow(dead_code)] // TODO(dual_funding): Remove once V2 channels is enabled. #[rustfmt::skip] - pub fn new_inbound( + pub fn new_inbound( fee_estimator: &LowerBoundedFeeEstimator, entropy_source: &ES, signer_provider: &SP, holder_node_id: PublicKey, counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures, their_features: &InitFeatures, msg: &msgs::OpenChannelV2, user_id: u128, config: &UserConfig, current_chain_height: u32, logger: &L, ) -> Result - where ES::Target: EntropySource, - F::Target: FeeEstimator, + where F::Target: FeeEstimator, L::Target: Logger, { // TODO(dual_funding): Take these as input once supported @@ -15277,10 +15268,9 @@ where } } -impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, &'c ChannelTypeFeatures)> - for FundedChannel +impl<'a, 'b, 'c, ES: EntropySource, SP: Deref> + ReadableArgs<(&'a ES, &'b SP, &'c ChannelTypeFeatures)> for FundedChannel where - ES::Target: EntropySource, SP::Target: SignerProvider, { fn read( diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 7191e847acd..bcb5b2a1f4b 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1791,9 +1791,7 @@ pub trait AChannelManager { /// A type implementing [`BroadcasterInterface`]. type Broadcaster: BroadcasterInterface; /// A type implementing [`EntropySource`]. - type EntropySource: EntropySource + ?Sized; - /// A type that may be dereferenced to [`Self::EntropySource`]. - type ES: Deref; + type EntropySource: EntropySource; /// A type implementing [`NodeSigner`]. type NodeSigner: NodeSigner + ?Sized; /// A type that may be dereferenced to [`Self::NodeSigner`]. @@ -1826,7 +1824,7 @@ pub trait AChannelManager { ) -> &ChannelManager< Self::M, Self::Broadcaster, - Self::ES, + Self::EntropySource, Self::NS, Self::SP, Self::F, @@ -1839,7 +1837,7 @@ pub trait AChannelManager { impl< M: Deref, T: BroadcasterInterface, - ES: Deref, + ES: EntropySource, NS: Deref, SP: Deref, F: Deref, @@ -1849,7 +1847,6 @@ impl< > AChannelManager for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, - ES::Target: EntropySource, NS::Target: NodeSigner, SP::Target: SignerProvider, F::Target: FeeEstimator, @@ -1860,8 +1857,7 @@ where type Watch = M::Target; type M = M; type Broadcaster = T; - type EntropySource = ES::Target; - type ES = ES; + type EntropySource = ES; type NodeSigner = NS::Target; type NS = NS; type Signer = ::EcdsaSigner; @@ -2622,7 +2618,7 @@ where pub struct ChannelManager< M: Deref, T: BroadcasterInterface, - ES: Deref, + ES: EntropySource, NS: Deref, SP: Deref, F: Deref, @@ -2631,7 +2627,6 @@ pub struct ChannelManager< L: Deref, > where M::Target: chain::Watch<::EcdsaSigner>, - ES::Target: EntropySource, NS::Target: NodeSigner, SP::Target: SignerProvider, F::Target: FeeEstimator, @@ -3411,7 +3406,7 @@ fn create_htlc_intercepted_event( impl< M: Deref, T: BroadcasterInterface, - ES: Deref, + ES: EntropySource, NS: Deref, SP: Deref, F: Deref, @@ -3421,7 +3416,6 @@ impl< > ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, - ES::Target: EntropySource, NS::Target: NodeSigner, SP::Target: SignerProvider, F::Target: FeeEstimator, @@ -5627,7 +5621,7 @@ where features, best_block_height, self.duration_since_epoch(), - &*self.entropy_source, + &self.entropy_source, &self.pending_events, ); match outbound_pmts_res { @@ -5761,7 +5755,7 @@ where intercept_id, prev_outbound_scid_alias, htlc_id, - &*self.entropy_source, + &self.entropy_source, ) } @@ -13407,7 +13401,7 @@ macro_rules! create_offer_builder { ($self: ident, $builder: ty) => { /// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest pub fn create_offer_builder(&$self) -> Result<$builder, Bolt12SemanticError> { let builder = $self.flow.create_offer_builder( - &*$self.entropy_source, $self.get_peers_for_blinded_path() + &$self.entropy_source, $self.get_peers_for_blinded_path() )?; Ok(builder.into()) @@ -13432,7 +13426,7 @@ macro_rules! create_offer_builder { ($self: ident, $builder: ty) => { ME::Target: MessageRouter, { let builder = $self.flow.create_offer_builder_using_router( - router, &*$self.entropy_source, $self.get_peers_for_blinded_path() + router, &$self.entropy_source, $self.get_peers_for_blinded_path() )?; Ok(builder.into()) @@ -13484,7 +13478,7 @@ macro_rules! create_refund_builder { ($self: ident, $builder: ty) => { &$self, amount_msats: u64, absolute_expiry: Duration, payment_id: PaymentId, retry_strategy: Retry, route_params_config: RouteParametersConfig ) -> Result<$builder, Bolt12SemanticError> { - let entropy = &*$self.entropy_source; + let entropy = &$self.entropy_source; let builder = $self.flow.create_refund_builder( entropy, amount_msats, absolute_expiry, @@ -13528,7 +13522,7 @@ macro_rules! create_refund_builder { ($self: ident, $builder: ty) => { where ME::Target: MessageRouter, { - let entropy = &*$self.entropy_source; + let entropy = &$self.entropy_source; let builder = $self.flow.create_refund_builder_using_router( router, entropy, amount_msats, absolute_expiry, @@ -13551,7 +13545,7 @@ macro_rules! create_refund_builder { ($self: ident, $builder: ty) => { impl< M: Deref, T: BroadcasterInterface, - ES: Deref, + ES: EntropySource, NS: Deref, SP: Deref, F: Deref, @@ -13561,7 +13555,6 @@ impl< > ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, - ES::Target: EntropySource, NS::Target: NodeSigner, SP::Target: SignerProvider, F::Target: FeeEstimator, @@ -13757,7 +13750,7 @@ where payer_note: Option, payment_id: PaymentId, human_readable_name: Option, create_pending_payment: CPP, ) -> Result<(), Bolt12SemanticError> { - let entropy = &*self.entropy_source; + let entropy = &self.entropy_source; let nonce = Nonce::from_entropy_source(entropy); let builder = self.flow.create_invoice_request_builder( @@ -13825,7 +13818,7 @@ where &self, refund: &Refund, ) -> Result { let secp_ctx = &self.secp_ctx; - let entropy = &*self.entropy_source; + let entropy = &self.entropy_source; let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); @@ -13890,7 +13883,7 @@ where optional_params: OptionalOfferPaymentParams, dns_resolvers: Vec, ) -> Result<(), ()> { let (onion_message, context) = - self.flow.hrn_resolver.resolve_name(payment_id, name, &*self.entropy_source)?; + self.flow.hrn_resolver.resolve_name(payment_id, name, &self.entropy_source)?; let expiration = StaleExpiration::TimerTicks(1); self.pending_outbound_payments.add_new_awaiting_offer( @@ -14427,7 +14420,7 @@ where impl< M: Deref, T: BroadcasterInterface, - ES: Deref, + ES: EntropySource, NS: Deref, SP: Deref, F: Deref, @@ -14437,7 +14430,6 @@ impl< > BaseMessageHandler for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, - ES::Target: EntropySource, NS::Target: NodeSigner, SP::Target: SignerProvider, F::Target: FeeEstimator, @@ -14797,7 +14789,7 @@ where impl< M: Deref, T: BroadcasterInterface, - ES: Deref, + ES: EntropySource, NS: Deref, SP: Deref, F: Deref, @@ -14807,7 +14799,6 @@ impl< > EventsProvider for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, - ES::Target: EntropySource, NS::Target: NodeSigner, SP::Target: SignerProvider, F::Target: FeeEstimator, @@ -14831,7 +14822,7 @@ where impl< M: Deref, T: BroadcasterInterface, - ES: Deref, + ES: EntropySource, NS: Deref, SP: Deref, F: Deref, @@ -14841,7 +14832,6 @@ impl< > chain::Listen for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, - ES::Target: EntropySource, NS::Target: NodeSigner, SP::Target: SignerProvider, F::Target: FeeEstimator, @@ -14891,7 +14881,7 @@ where impl< M: Deref, T: BroadcasterInterface, - ES: Deref, + ES: EntropySource, NS: Deref, SP: Deref, F: Deref, @@ -14901,7 +14891,6 @@ impl< > chain::Confirm for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, - ES::Target: EntropySource, NS::Target: NodeSigner, SP::Target: SignerProvider, F::Target: FeeEstimator, @@ -15063,7 +15052,7 @@ pub(super) enum FundingConfirmedMessage { impl< M: Deref, T: BroadcasterInterface, - ES: Deref, + ES: EntropySource, NS: Deref, SP: Deref, F: Deref, @@ -15073,7 +15062,6 @@ impl< > ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, - ES::Target: EntropySource, NS::Target: NodeSigner, SP::Target: SignerProvider, F::Target: FeeEstimator, @@ -15424,7 +15412,7 @@ where impl< M: Deref, T: BroadcasterInterface, - ES: Deref, + ES: EntropySource, NS: Deref, SP: Deref, F: Deref, @@ -15434,7 +15422,6 @@ impl< > ChannelMessageHandler for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, - ES::Target: EntropySource, NS::Target: NodeSigner, SP::Target: SignerProvider, F::Target: FeeEstimator, @@ -15998,7 +15985,7 @@ where impl< M: Deref, T: BroadcasterInterface, - ES: Deref, + ES: EntropySource, NS: Deref, SP: Deref, F: Deref, @@ -16008,7 +15995,6 @@ impl< > OffersMessageHandler for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, - ES::Target: EntropySource, NS::Target: NodeSigner, SP::Target: SignerProvider, F::Target: FeeEstimator, @@ -16215,7 +16201,7 @@ where impl< M: Deref, T: BroadcasterInterface, - ES: Deref, + ES: EntropySource, NS: Deref, SP: Deref, F: Deref, @@ -16225,7 +16211,6 @@ impl< > AsyncPaymentsMessageHandler for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, - ES::Target: EntropySource, NS::Target: NodeSigner, SP::Target: SignerProvider, F::Target: FeeEstimator, @@ -16259,7 +16244,7 @@ where responder.clone(), self.get_peers_for_blinded_path(), self.list_usable_channels(), - &*self.entropy_source, + &self.entropy_source, &*self.router, ) { Some((msg, ctx)) => (msg, ctx), @@ -16459,7 +16444,7 @@ where impl< M: Deref, T: BroadcasterInterface, - ES: Deref, + ES: EntropySource, NS: Deref, SP: Deref, F: Deref, @@ -16469,7 +16454,6 @@ impl< > DNSResolverMessageHandler for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, - ES::Target: EntropySource, NS::Target: NodeSigner, SP::Target: SignerProvider, F::Target: FeeEstimator, @@ -16526,7 +16510,7 @@ where impl< M: Deref, T: BroadcasterInterface, - ES: Deref, + ES: EntropySource, NS: Deref, SP: Deref, F: Deref, @@ -16536,7 +16520,6 @@ impl< > NodeIdLookUp for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, - ES::Target: EntropySource, NS::Target: NodeSigner, SP::Target: SignerProvider, F::Target: FeeEstimator, @@ -17041,7 +17024,7 @@ impl_writeable_tlv_based!(PendingInboundPayment, { impl< M: Deref, T: BroadcasterInterface, - ES: Deref, + ES: EntropySource, NS: Deref, SP: Deref, F: Deref, @@ -17051,7 +17034,6 @@ impl< > Writeable for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, - ES::Target: EntropySource, NS::Target: NodeSigner, SP::Target: SignerProvider, F::Target: FeeEstimator, @@ -17407,7 +17389,7 @@ pub struct ChannelManagerReadArgs< 'a, M: Deref, T: BroadcasterInterface, - ES: Deref, + ES: EntropySource, NS: Deref, SP: Deref, F: Deref, @@ -17416,7 +17398,6 @@ pub struct ChannelManagerReadArgs< L: Deref + Clone, > where M::Target: chain::Watch<::EcdsaSigner>, - ES::Target: EntropySource, NS::Target: NodeSigner, SP::Target: SignerProvider, F::Target: FeeEstimator, @@ -17486,7 +17467,7 @@ impl< 'a, M: Deref, T: BroadcasterInterface, - ES: Deref, + ES: EntropySource, NS: Deref, SP: Deref, F: Deref, @@ -17496,7 +17477,6 @@ impl< > ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L> where M::Target: chain::Watch<::EcdsaSigner>, - ES::Target: EntropySource, NS::Target: NodeSigner, SP::Target: SignerProvider, F::Target: FeeEstimator, @@ -17573,7 +17553,7 @@ impl< 'a, M: Deref, T: BroadcasterInterface, - ES: Deref, + ES: EntropySource, NS: Deref, SP: Deref, F: Deref, @@ -17584,7 +17564,6 @@ impl< for (BlockHash, Arc>) where M::Target: chain::Watch<::EcdsaSigner>, - ES::Target: EntropySource, NS::Target: NodeSigner, SP::Target: SignerProvider, F::Target: FeeEstimator, @@ -17605,7 +17584,7 @@ impl< 'a, M: Deref, T: BroadcasterInterface, - ES: Deref, + ES: EntropySource, NS: Deref, SP: Deref, F: Deref, @@ -17616,7 +17595,6 @@ impl< for (BlockHash, ChannelManager) where M::Target: chain::Watch<::EcdsaSigner>, - ES::Target: EntropySource, NS::Target: NodeSigner, SP::Target: SignerProvider, F::Target: FeeEstimator, diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 69c18d96f1b..c68a2c7cf9a 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -734,7 +734,7 @@ pub trait NodeHolder { ) -> &ChannelManager< ::M, ::Broadcaster, - ::ES, + ::EntropySource, ::NS, ::SP, ::F, @@ -751,7 +751,7 @@ impl NodeHolder for &H { ) -> &ChannelManager< ::M, ::Broadcaster, - ::ES, + ::EntropySource, ::NS, ::SP, ::F, diff --git a/lightning/src/ln/inbound_payment.rs b/lightning/src/ln/inbound_payment.rs index 17c2526e78d..03e271d196d 100644 --- a/lightning/src/ln/inbound_payment.rs +++ b/lightning/src/ln/inbound_payment.rs @@ -143,13 +143,10 @@ fn min_final_cltv_expiry_delta_from_metadata(bytes: [u8; METADATA_LEN]) -> u16 { /// /// [phantom node payments]: crate::sign::PhantomKeysManager /// [`NodeSigner::get_expanded_key`]: crate::sign::NodeSigner::get_expanded_key -pub fn create( +pub fn create( keys: &ExpandedKey, min_value_msat: Option, invoice_expiry_delta_secs: u32, entropy_source: &ES, current_time: u64, min_final_cltv_expiry_delta: Option, -) -> Result<(PaymentHash, PaymentSecret), ()> -where - ES::Target: EntropySource, -{ +) -> Result<(PaymentHash, PaymentSecret), ()> { let metadata_bytes = construct_metadata_bytes( min_value_msat, if min_final_cltv_expiry_delta.is_some() { diff --git a/lightning/src/ln/interactivetxs.rs b/lightning/src/ln/interactivetxs.rs index f402ac5efa6..a004f6e9f14 100644 --- a/lightning/src/ln/interactivetxs.rs +++ b/lightning/src/ln/interactivetxs.rs @@ -39,7 +39,6 @@ use crate::ln::types::ChannelId; use crate::sign::{EntropySource, P2TR_KEY_PATH_WITNESS_WEIGHT, P2WPKH_WITNESS_WEIGHT}; use core::fmt::Display; -use core::ops::Deref; /// The number of received `tx_add_input` messages during a negotiation at which point the /// negotiation MUST be failed. @@ -1989,10 +1988,9 @@ macro_rules! do_state_transition { }}; } -fn generate_holder_serial_id(entropy_source: &ES, is_initiator: bool) -> SerialId -where - ES::Target: EntropySource, -{ +fn generate_holder_serial_id( + entropy_source: &ES, is_initiator: bool, +) -> SerialId { let rand_bytes = entropy_source.get_secure_random_bytes(); let mut serial_id_bytes = [0u8; 8]; serial_id_bytes.copy_from_slice(&rand_bytes[..8]); @@ -2008,10 +2006,7 @@ pub(super) enum HandleTxCompleteValue { NegotiationComplete(Option, OutPoint), } -pub(super) struct InteractiveTxConstructorArgs<'a, ES: Deref> -where - ES::Target: EntropySource, -{ +pub(super) struct InteractiveTxConstructorArgs<'a, ES: EntropySource> { pub entropy_source: &'a ES, pub holder_node_id: PublicKey, pub counterparty_node_id: PublicKey, @@ -2030,10 +2025,9 @@ impl InteractiveTxConstructor { /// /// If the holder is the initiator, they need to send the first message which is a `TxAddInput` /// message. - pub fn new(args: InteractiveTxConstructorArgs) -> Result - where - ES::Target: EntropySource, - { + pub fn new( + args: InteractiveTxConstructorArgs, + ) -> Result { let InteractiveTxConstructorArgs { entropy_source, holder_node_id, @@ -2428,7 +2422,6 @@ mod tests { OutPoint, PubkeyHash, ScriptBuf, Sequence, SignedAmount, Transaction, TxIn, TxOut, WPubkeyHash, }; - use core::ops::Deref; use super::{ get_output_weight, ConstructedTransaction, InteractiveTxSigningSession, TxInMetadata, @@ -2498,19 +2491,15 @@ mod tests { do_test_interactive_tx_constructor_internal(session, &&entropy_source); } - fn do_test_interactive_tx_constructor_with_entropy_source( + fn do_test_interactive_tx_constructor_with_entropy_source( session: TestSession, entropy_source: ES, - ) where - ES::Target: EntropySource, - { + ) { do_test_interactive_tx_constructor_internal(session, &entropy_source); } - fn do_test_interactive_tx_constructor_internal( + fn do_test_interactive_tx_constructor_internal( session: TestSession, entropy_source: &ES, - ) where - ES::Target: EntropySource, - { + ) { let channel_id = ChannelId(entropy_source.get_secure_random_bytes()); let funding_tx_locktime = AbsoluteLockTime::from_height(1337).unwrap(); let holder_node_id = PublicKey::from_secret_key( diff --git a/lightning/src/ln/invoice_utils.rs b/lightning/src/ln/invoice_utils.rs index e72ea4518a4..5e4036b9d7e 100644 --- a/lightning/src/ln/invoice_utils.rs +++ b/lightning/src/ln/invoice_utils.rs @@ -67,14 +67,13 @@ use core::time::Duration; feature = "std", doc = "This can be used in a `no_std` environment, where [`std::time::SystemTime`] is not available and the current time is supplied by the caller." )] -pub fn create_phantom_invoice( +pub fn create_phantom_invoice( amt_msat: Option, payment_hash: Option, description: String, invoice_expiry_delta_secs: u32, phantom_route_hints: Vec, entropy_source: ES, node_signer: NS, logger: L, network: Currency, min_final_cltv_expiry_delta: Option, duration_since_epoch: Duration, ) -> Result> where - ES::Target: EntropySource, NS::Target: NodeSigner, L::Target: Logger, { @@ -135,14 +134,13 @@ where feature = "std", doc = "This version can be used in a `no_std` environment, where [`std::time::SystemTime`] is not available and the current time is supplied by the caller." )] -pub fn create_phantom_invoice_with_description_hash( +pub fn create_phantom_invoice_with_description_hash( amt_msat: Option, payment_hash: Option, invoice_expiry_delta_secs: u32, description_hash: Sha256, phantom_route_hints: Vec, entropy_source: ES, node_signer: NS, logger: L, network: Currency, min_final_cltv_expiry_delta: Option, duration_since_epoch: Duration, ) -> Result> where - ES::Target: EntropySource, NS::Target: NodeSigner, L::Target: Logger, { @@ -163,14 +161,13 @@ where const MAX_CHANNEL_HINTS: usize = 3; -fn _create_phantom_invoice( +fn _create_phantom_invoice( amt_msat: Option, payment_hash: Option, description: Bolt11InvoiceDescription, invoice_expiry_delta_secs: u32, phantom_route_hints: Vec, entropy_source: ES, node_signer: NS, logger: L, network: Currency, min_final_cltv_expiry_delta: Option, duration_since_epoch: Duration, ) -> Result> where - ES::Target: EntropySource, NS::Target: NodeSigner, L::Target: Logger, { diff --git a/lightning/src/ln/outbound_payment.rs b/lightning/src/ln/outbound_payment.rs index b255dcd16a3..caf31a70599 100644 --- a/lightning/src/ln/outbound_payment.rs +++ b/lightning/src/ln/outbound_payment.rs @@ -866,7 +866,7 @@ impl OutboundPayments { impl OutboundPayments { #[rustfmt::skip] - pub(super) fn send_payment( + pub(super) fn send_payment( &self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId, retry_strategy: Retry, route_params: RouteParameters, router: &R, first_hops: Vec, compute_inflight_htlcs: IH, entropy_source: &ES, @@ -876,7 +876,6 @@ impl OutboundPayments { ) -> Result<(), RetryableSendFailure> where R::Target: Router, - ES::Target: EntropySource, NS::Target: NodeSigner, IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, @@ -888,7 +887,7 @@ impl OutboundPayments { } #[rustfmt::skip] - pub(super) fn send_spontaneous_payment( + pub(super) fn send_spontaneous_payment( &self, payment_preimage: Option, recipient_onion: RecipientOnionFields, payment_id: PaymentId, retry_strategy: Retry, route_params: RouteParameters, router: &R, first_hops: Vec, inflight_htlcs: IH, entropy_source: &ES, @@ -898,7 +897,6 @@ impl OutboundPayments { ) -> Result where R::Target: Router, - ES::Target: EntropySource, NS::Target: NodeSigner, IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, @@ -915,7 +913,7 @@ impl OutboundPayments { } #[rustfmt::skip] - pub(super) fn pay_for_bolt11_invoice( + pub(super) fn pay_for_bolt11_invoice( &self, invoice: &Bolt11Invoice, payment_id: PaymentId, amount_msats: Option, route_params_config: RouteParametersConfig, @@ -928,7 +926,6 @@ impl OutboundPayments { ) -> Result<(), Bolt11PaymentError> where R::Target: Router, - ES::Target: EntropySource, NS::Target: NodeSigner, IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, @@ -964,7 +961,7 @@ impl OutboundPayments { #[rustfmt::skip] pub(super) fn send_payment_for_bolt12_invoice< - R: Deref, ES: Deref, NS: Deref, NL: Deref, IH, SP, L: Deref, + R: Deref, ES: EntropySource, NS: Deref, NL: Deref, IH, SP, L: Deref, >( &self, invoice: &Bolt12Invoice, payment_id: PaymentId, router: &R, first_hops: Vec, features: Bolt12InvoiceFeatures, inflight_htlcs: IH, @@ -975,7 +972,6 @@ impl OutboundPayments { ) -> Result<(), Bolt12PaymentError> where R::Target: Router, - ES::Target: EntropySource, NS::Target: NodeSigner, NL::Target: NodeIdLookUp, IH: Fn() -> InFlightHtlcs, @@ -1010,7 +1006,7 @@ impl OutboundPayments { #[rustfmt::skip] fn send_payment_for_bolt12_invoice_internal< - R: Deref, ES: Deref, NS: Deref, NL: Deref, IH, SP, L: Deref, + R: Deref, ES: EntropySource, NS: Deref, NL: Deref, IH, SP, L: Deref, >( &self, payment_id: PaymentId, payment_hash: PaymentHash, keysend_preimage: Option, invoice_request: Option<&InvoiceRequest>, @@ -1023,7 +1019,6 @@ impl OutboundPayments { ) -> Result<(), Bolt12PaymentError> where R::Target: Router, - ES::Target: EntropySource, NS::Target: NodeSigner, NL::Target: NodeIdLookUp, IH: Fn() -> InFlightHtlcs, @@ -1119,14 +1114,11 @@ impl OutboundPayments { Ok(()) } - pub(super) fn static_invoice_received( + pub(super) fn static_invoice_received( &self, invoice: &StaticInvoice, payment_id: PaymentId, features: Bolt12InvoiceFeatures, best_block_height: u32, duration_since_epoch: Duration, entropy_source: ES, pending_events: &Mutex)>>, - ) -> Result<(), Bolt12PaymentError> - where - ES::Target: EntropySource, - { + ) -> Result<(), Bolt12PaymentError> { macro_rules! abandon_with_entry { ($payment: expr, $reason: expr) => { assert!( @@ -1230,7 +1222,7 @@ impl OutboundPayments { pub(super) fn send_payment_for_static_invoice< R: Deref, - ES: Deref, + ES: EntropySource, NS: Deref, NL: Deref, IH, @@ -1245,7 +1237,6 @@ impl OutboundPayments { ) -> Result<(), Bolt12PaymentError> where R::Target: Router, - ES::Target: EntropySource, NS::Target: NodeSigner, NL::Target: NodeIdLookUp, IH: Fn() -> InFlightHtlcs, @@ -1314,7 +1305,15 @@ impl OutboundPayments { } // Returns whether the data changed and needs to be repersisted. - pub(super) fn check_retry_payments( + pub(super) fn check_retry_payments< + R: Deref, + ES: EntropySource, + NS: Deref, + SP, + IH, + FH, + L: Deref, + >( &self, router: &R, first_hops: FH, inflight_htlcs: IH, entropy_source: &ES, node_signer: &NS, best_block_height: u32, pending_events: &Mutex)>>, @@ -1322,7 +1321,6 @@ impl OutboundPayments { ) -> bool where R::Target: Router, - ES::Target: EntropySource, NS::Target: NodeSigner, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, IH: Fn() -> InFlightHtlcs, @@ -1481,7 +1479,7 @@ impl OutboundPayments { /// [`Event::PaymentPathFailed`]: crate::events::Event::PaymentPathFailed /// [`Event::PaymentFailed`]: crate::events::Event::PaymentFailed #[rustfmt::skip] - fn send_payment_for_non_bolt12_invoice( + fn send_payment_for_non_bolt12_invoice( &self, payment_id: PaymentId, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, keysend_preimage: Option, retry_strategy: Retry, mut route_params: RouteParameters, router: &R, first_hops: Vec, inflight_htlcs: IH, entropy_source: &ES, @@ -1491,7 +1489,6 @@ impl OutboundPayments { ) -> Result<(), RetryableSendFailure> where R::Target: Router, - ES::Target: EntropySource, NS::Target: NodeSigner, L::Target: Logger, IH: Fn() -> InFlightHtlcs, @@ -1527,7 +1524,7 @@ impl OutboundPayments { } #[rustfmt::skip] - fn find_route_and_send_payment( + fn find_route_and_send_payment( &self, payment_hash: PaymentHash, payment_id: PaymentId, route_params: RouteParameters, router: &R, first_hops: Vec, inflight_htlcs: &IH, entropy_source: &ES, node_signer: &NS, best_block_height: u32, @@ -1536,7 +1533,6 @@ impl OutboundPayments { ) where R::Target: Router, - ES::Target: EntropySource, NS::Target: NodeSigner, L::Target: Logger, IH: Fn() -> InFlightHtlcs, @@ -1689,7 +1685,7 @@ impl OutboundPayments { } #[rustfmt::skip] - fn handle_pay_route_err( + fn handle_pay_route_err( &self, err: PaymentSendFailure, payment_id: PaymentId, payment_hash: PaymentHash, route: Route, mut route_params: RouteParameters, onion_session_privs: Vec<[u8; 32]>, router: &R, first_hops: Vec, inflight_htlcs: &IH, entropy_source: &ES, node_signer: &NS, @@ -1699,7 +1695,6 @@ impl OutboundPayments { ) where R::Target: Router, - ES::Target: EntropySource, NS::Target: NodeSigner, IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, @@ -1811,12 +1806,11 @@ impl OutboundPayments { } #[rustfmt::skip] - pub(super) fn send_probe( + pub(super) fn send_probe( &self, path: Path, probing_cookie_secret: [u8; 32], entropy_source: &ES, node_signer: &NS, best_block_height: u32, send_payment_along_path: F, ) -> Result<(PaymentHash, PaymentId), ProbeSendFailure> where - ES::Target: EntropySource, NS::Target: NodeSigner, F: Fn(SendAlongPathArgs) -> Result<(), APIError>, { @@ -1886,20 +1880,20 @@ impl OutboundPayments { #[cfg(any(test, feature = "_externalize_tests"))] #[rustfmt::skip] - pub(super) fn test_add_new_pending_payment( + pub(super) fn test_add_new_pending_payment( &self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId, route: &Route, retry_strategy: Option, entropy_source: &ES, best_block_height: u32 - ) -> Result, PaymentSendFailure> where ES::Target: EntropySource { + ) -> Result, PaymentSendFailure> { self.add_new_pending_payment(payment_hash, recipient_onion, payment_id, None, route, retry_strategy, None, entropy_source, best_block_height, None) } #[rustfmt::skip] - pub(super) fn add_new_pending_payment( + pub(super) fn add_new_pending_payment( &self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId, keysend_preimage: Option, route: &Route, retry_strategy: Option, payment_params: Option, entropy_source: &ES, best_block_height: u32, bolt12_invoice: Option - ) -> Result, PaymentSendFailure> where ES::Target: EntropySource { + ) -> Result, PaymentSendFailure> { let mut pending_outbounds = self.pending_outbound_payments.lock().unwrap(); match pending_outbounds.entry(payment_id) { hash_map::Entry::Occupied(_) => Err(PaymentSendFailure::DuplicatePayment), @@ -1915,15 +1909,12 @@ impl OutboundPayments { } #[rustfmt::skip] - fn create_pending_payment( + fn create_pending_payment( payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, keysend_preimage: Option, invoice_request: Option, bolt12_invoice: Option, route: &Route, retry_strategy: Option, payment_params: Option, entropy_source: &ES, best_block_height: u32 - ) -> (PendingOutboundPayment, Vec<[u8; 32]>) - where - ES::Target: EntropySource, - { + ) -> (PendingOutboundPayment, Vec<[u8; 32]>) { let mut onion_session_privs = Vec::with_capacity(route.paths.len()); for _ in 0..route.paths.len() { onion_session_privs.push(entropy_source.get_secure_random_bytes()); diff --git a/lightning/src/ln/types.rs b/lightning/src/ln/types.rs index 5d72ba685cb..fd8ccbae382 100644 --- a/lightning/src/ln/types.rs +++ b/lightning/src/ln/types.rs @@ -24,7 +24,6 @@ use bitcoin::hashes::{sha256::Hash as Sha256, Hash as _, HashEngine as _}; use bitcoin::hex::display::impl_fmt_traits; use core::borrow::Borrow; -use core::ops::Deref; /// A unique 32-byte identifier for a channel. /// Depending on how the ID is generated, several varieties are distinguished @@ -53,10 +52,7 @@ impl ChannelId { } /// Create a _temporary_ channel ID randomly, based on an entropy source. - pub fn temporary_from_entropy_source(entropy_source: &ES) -> Self - where - ES::Target: EntropySource, - { + pub fn temporary_from_entropy_source(entropy_source: &ES) -> Self { Self(entropy_source.get_secure_random_bytes()) } diff --git a/lightning/src/offers/flow.rs b/lightning/src/offers/flow.rs index 8b03f0ea081..97e92fdaec5 100644 --- a/lightning/src/offers/flow.rs +++ b/lightning/src/offers/flow.rs @@ -550,11 +550,10 @@ where } } - fn create_offer_builder_intern( + fn create_offer_builder_intern( &self, entropy_source: ES, make_paths: PF, ) -> Result<(OfferBuilder<'_, DerivedMetadata, secp256k1::All>, Nonce), Bolt12SemanticError> where - ES::Target: EntropySource, PF: FnOnce( PublicKey, MessageContext, @@ -607,13 +606,10 @@ where /// This is not exported to bindings users as builder patterns don't map outside of move semantics. /// /// [`DefaultMessageRouter`]: crate::onion_message::messenger::DefaultMessageRouter - pub fn create_offer_builder( + pub fn create_offer_builder( &self, entropy_source: ES, peers: Vec, - ) -> Result, Bolt12SemanticError> - where - ES::Target: EntropySource, - { - self.create_offer_builder_intern(&*entropy_source, |_, context, _| { + ) -> Result, Bolt12SemanticError> { + self.create_offer_builder_intern(&entropy_source, |_, context, _| { self.create_blinded_paths(peers, context) .map(|paths| paths.into_iter().take(1)) .map_err(|_| Bolt12SemanticError::MissingPaths) @@ -630,15 +626,14 @@ where /// This is not exported to bindings users as builder patterns don't map outside of move semantics. /// /// See [`Self::create_offer_builder`] for more details on usage. - pub fn create_offer_builder_using_router( + pub fn create_offer_builder_using_router( &self, router: ME, entropy_source: ES, peers: Vec, ) -> Result, Bolt12SemanticError> where ME::Target: MessageRouter, - ES::Target: EntropySource, { let receive_key = self.get_receive_auth_key(); - self.create_offer_builder_intern(&*entropy_source, |node_id, context, secp_ctx| { + self.create_offer_builder_intern(&entropy_source, |node_id, context, secp_ctx| { router .create_blinded_paths(node_id, receive_key, context, peers, secp_ctx) .map(|paths| paths.into_iter().take(1)) @@ -657,23 +652,19 @@ where /// aforementioned always-online node. /// /// This is not exported to bindings users as builder patterns don't map outside of move semantics. - pub fn create_async_receive_offer_builder( + pub fn create_async_receive_offer_builder( &self, entropy_source: ES, message_paths_to_always_online_node: Vec, - ) -> Result<(OfferBuilder<'_, DerivedMetadata, secp256k1::All>, Nonce), Bolt12SemanticError> - where - ES::Target: EntropySource, - { - self.create_offer_builder_intern(&*entropy_source, |_, _, _| { + ) -> Result<(OfferBuilder<'_, DerivedMetadata, secp256k1::All>, Nonce), Bolt12SemanticError> { + self.create_offer_builder_intern(&entropy_source, |_, _, _| { Ok(message_paths_to_always_online_node) }) } - fn create_refund_builder_intern( + fn create_refund_builder_intern( &self, entropy_source: ES, make_paths: PF, amount_msats: u64, absolute_expiry: Duration, payment_id: PaymentId, ) -> Result, Bolt12SemanticError> where - ES::Target: EntropySource, PF: FnOnce( PublicKey, MessageContext, @@ -683,7 +674,7 @@ where { let node_id = self.get_our_node_id(); let expanded_key = &self.inbound_payment_key; - let entropy = &*entropy_source; + let entropy = &entropy_source; let secp_ctx = &self.secp_ctx; let nonce = Nonce::from_entropy_source(entropy); @@ -744,15 +735,12 @@ where /// /// [`Event::PaymentFailed`]: crate::events::Event::PaymentFailed /// [`RouteParameters::from_payment_params_and_value`]: crate::routing::router::RouteParameters::from_payment_params_and_value - pub fn create_refund_builder( + pub fn create_refund_builder( &self, entropy_source: ES, amount_msats: u64, absolute_expiry: Duration, payment_id: PaymentId, peers: Vec, - ) -> Result, Bolt12SemanticError> - where - ES::Target: EntropySource, - { + ) -> Result, Bolt12SemanticError> { self.create_refund_builder_intern( - &*entropy_source, + &entropy_source, |_, context, _| { self.create_blinded_paths(peers, context) .map(|paths| paths.into_iter().take(1)) @@ -785,17 +773,16 @@ where /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice /// [`Event::PaymentFailed`]: crate::events::Event::PaymentFailed /// [`RouteParameters::from_payment_params_and_value`]: crate::routing::router::RouteParameters::from_payment_params_and_value - pub fn create_refund_builder_using_router( + pub fn create_refund_builder_using_router( &self, router: ME, entropy_source: ES, amount_msats: u64, absolute_expiry: Duration, payment_id: PaymentId, peers: Vec, ) -> Result, Bolt12SemanticError> where ME::Target: MessageRouter, - ES::Target: EntropySource, { let receive_key = self.get_receive_auth_key(); self.create_refund_builder_intern( - &*entropy_source, + &entropy_source, |node_id, context, secp_ctx| { router .create_blinded_paths(node_id, receive_key, context, peers, secp_ctx) @@ -905,12 +892,11 @@ where /// blinded path can be constructed. /// /// This is not exported to bindings users as builder patterns don't map outside of move semantics. - pub fn create_invoice_builder_from_refund<'a, ES: Deref, R: Deref, F>( + pub fn create_invoice_builder_from_refund<'a, ES: EntropySource, R: Deref, F>( &'a self, router: &R, entropy_source: ES, refund: &'a Refund, usable_channels: Vec, get_payment_info: F, ) -> Result, Bolt12SemanticError> where - ES::Target: EntropySource, R::Target: Router, F: Fn(u64, u32) -> Result<(PaymentHash, PaymentSecret), Bolt12SemanticError>, { @@ -919,7 +905,7 @@ where } let expanded_key = &self.inbound_payment_key; - let entropy = &*entropy_source; + let entropy = &entropy_source; let amount_msats = refund.amount_msats(); let relative_expiry = DEFAULT_RELATIVE_EXPIRY.as_secs() as u32; @@ -1282,12 +1268,9 @@ where /// received to our node. /// /// [`ReleaseHeldHtlc`]: crate::onion_message::async_payments::ReleaseHeldHtlc - pub fn path_for_release_held_htlc( + pub fn path_for_release_held_htlc( &self, intercept_id: InterceptId, prev_outbound_scid_alias: u64, htlc_id: u64, entropy: ES, - ) -> BlindedMessagePath - where - ES::Target: EntropySource, - { + ) -> BlindedMessagePath { // In the future, we should support multi-hop paths here. let context = MessageContext::AsyncPayments(AsyncPaymentsContext::ReleaseHeldHtlc { intercept_id, @@ -1302,7 +1285,7 @@ where self.receive_auth_key, context, false, - &*entropy, + &entropy, &self.secp_ctx, ) } @@ -1589,13 +1572,12 @@ where /// /// Returns `None` if we have enough offers cached already, verification of `message` fails, or we /// fail to create blinded paths. - pub fn handle_offer_paths( + pub fn handle_offer_paths( &self, message: OfferPaths, context: AsyncPaymentsContext, responder: Responder, peers: Vec, usable_channels: Vec, entropy: ES, router: R, ) -> Option<(ServeStaticInvoice, MessageContext)> where - ES::Target: EntropySource, R::Target: Router, { let duration_since_epoch = self.duration_since_epoch(); @@ -1624,7 +1606,7 @@ where } let (mut offer_builder, offer_nonce) = - match self.create_async_receive_offer_builder(&*entropy, message.paths) { + match self.create_async_receive_offer_builder(&entropy, message.paths) { Ok((builder, nonce)) => (builder, nonce), Err(_) => return None, // Only reachable if OfferPaths::paths is empty }; diff --git a/lightning/src/offers/nonce.rs b/lightning/src/offers/nonce.rs index 0675414125f..8c99a464abc 100644 --- a/lightning/src/offers/nonce.rs +++ b/lightning/src/offers/nonce.rs @@ -13,7 +13,6 @@ use crate::io::{self, Read}; use crate::ln::msgs::DecodeError; use crate::sign::EntropySource; use crate::util::ser::{Readable, Writeable, Writer}; -use core::ops::Deref; #[allow(unused_imports)] use crate::prelude::*; @@ -34,10 +33,7 @@ impl Nonce { pub const LENGTH: usize = 16; /// Creates a `Nonce` from the given [`EntropySource`]. - pub fn from_entropy_source(entropy_source: ES) -> Self - where - ES::Target: EntropySource, - { + pub fn from_entropy_source(entropy_source: ES) -> Self { let mut bytes = [0u8; Self::LENGTH]; let rand_bytes = entropy_source.get_secure_random_bytes(); bytes.copy_from_slice(&rand_bytes[..Self::LENGTH]); diff --git a/lightning/src/offers/refund.rs b/lightning/src/offers/refund.rs index dd2c3e2a92e..c0fd9dfdd3e 100644 --- a/lightning/src/offers/refund.rs +++ b/lightning/src/offers/refund.rs @@ -110,7 +110,6 @@ use bitcoin::constants::ChainHash; use bitcoin::network::Network; use bitcoin::secp256k1::{self, PublicKey, Secp256k1}; use core::hash::{Hash, Hasher}; -use core::ops::Deref; use core::str::FromStr; use core::time::Duration; @@ -624,13 +623,10 @@ macro_rules! respond_with_derived_signing_pubkey_methods { ($self: ident, $build /// /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice #[cfg(feature = "std")] - pub fn respond_using_derived_keys( + pub fn respond_using_derived_keys( &$self, payment_paths: Vec, payment_hash: PaymentHash, expanded_key: &ExpandedKey, entropy_source: ES - ) -> Result<$builder, Bolt12SemanticError> - where - ES::Target: EntropySource, - { + ) -> Result<$builder, Bolt12SemanticError> { let created_at = std::time::SystemTime::now() .duration_since(std::time::SystemTime::UNIX_EPOCH) .expect("SystemTime::now() should come after SystemTime::UNIX_EPOCH"); @@ -648,13 +644,10 @@ macro_rules! respond_with_derived_signing_pubkey_methods { ($self: ident, $build /// This is not exported to bindings users as builder patterns don't map outside of move semantics. /// /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice - pub fn respond_using_derived_keys_no_std( + pub fn respond_using_derived_keys_no_std( &$self, payment_paths: Vec, payment_hash: PaymentHash, created_at: core::time::Duration, expanded_key: &ExpandedKey, entropy_source: ES - ) -> Result<$builder, Bolt12SemanticError> - where - ES::Target: EntropySource, - { + ) -> Result<$builder, Bolt12SemanticError> { if $self.features().requires_unknown_bits() { return Err(Bolt12SemanticError::UnknownRequiredFeatures); } diff --git a/lightning/src/onion_message/messenger.rs b/lightning/src/onion_message/messenger.rs index dbeab3937d0..d859d35dc09 100644 --- a/lightning/src/onion_message/messenger.rs +++ b/lightning/src/onion_message/messenger.rs @@ -66,9 +66,7 @@ pub(super) const MAX_TIMER_TICKS: usize = 2; /// languages. pub trait AOnionMessenger { /// A type implementing [`EntropySource`] - type EntropySource: EntropySource + ?Sized; - /// A type that may be dereferenced to [`Self::EntropySource`] - type ES: Deref; + type EntropySource: EntropySource; /// A type implementing [`NodeSigner`] type NodeSigner: NodeSigner + ?Sized; /// A type that may be dereferenced to [`Self::NodeSigner`] @@ -105,7 +103,7 @@ pub trait AOnionMessenger { fn get_om( &self, ) -> &OnionMessenger< - Self::ES, + Self::EntropySource, Self::NS, Self::L, Self::NL, @@ -118,7 +116,7 @@ pub trait AOnionMessenger { } impl< - ES: Deref, + ES: EntropySource, NS: Deref, L: Deref, NL: Deref, @@ -129,7 +127,6 @@ impl< CMH: Deref, > AOnionMessenger for OnionMessenger where - ES::Target: EntropySource, NS::Target: NodeSigner, L::Target: Logger, NL::Target: NodeIdLookUp, @@ -139,8 +136,7 @@ where DRH::Target: DNSResolverMessageHandler, CMH::Target: CustomOnionMessageHandler, { - type EntropySource = ES::Target; - type ES = ES; + type EntropySource = ES; type NodeSigner = NS::Target; type NS = NS; type Logger = L::Target; @@ -284,7 +280,7 @@ where /// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice pub struct OnionMessenger< - ES: Deref, + ES: EntropySource, NS: Deref, L: Deref, NL: Deref, @@ -294,7 +290,6 @@ pub struct OnionMessenger< DRH: Deref, CMH: Deref, > where - ES::Target: EntropySource, NS::Target: NodeSigner, L::Target: Logger, NL::Target: NodeIdLookUp, @@ -549,10 +544,9 @@ pub trait MessageRouter { /// node. Otherwise, there is no way to find a path to the introduction node in order to send a /// message, and thus an `Err` is returned. The impact of this may be somewhat muted when /// additional dummy hops are added to the blinded path, but this protection is not complete. -pub struct DefaultMessageRouter>, L: Deref, ES: Deref> +pub struct DefaultMessageRouter>, L: Deref, ES: EntropySource> where L::Target: Logger, - ES::Target: EntropySource, { network_graph: G, entropy_source: ES, @@ -569,10 +563,9 @@ pub(crate) const DUMMY_HOPS_PATH_LENGTH: usize = 4; // We add dummy hops until the path reaches this length (including the recipient). pub(crate) const QR_CODED_DUMMY_HOPS_PATH_LENGTH: usize = 2; -impl>, L: Deref, ES: Deref> DefaultMessageRouter +impl>, L: Deref, ES: EntropySource> DefaultMessageRouter where L::Target: Logger, - ES::Target: EntropySource, { /// Creates a [`DefaultMessageRouter`] using the given [`NetworkGraph`]. pub fn new(network_graph: G, entropy_source: ES) -> Self { @@ -660,7 +653,7 @@ where local_node_receive_key, context.clone(), size_constrained, - &**entropy_source, + &entropy_source, secp_ctx, ) }; @@ -738,11 +731,10 @@ where } } -impl>, L: Deref, ES: Deref> MessageRouter +impl>, L: Deref, ES: EntropySource> MessageRouter for DefaultMessageRouter where L::Target: Logger, - ES::Target: EntropySource, { fn find_path( &self, sender: PublicKey, peers: Vec, destination: Destination, @@ -784,19 +776,17 @@ where /// node. Otherwise, there is no way to find a path to the introduction node in order to send a /// message, and thus an `Err` is returned. The impact of this may be somewhat muted when /// additional dummy hops are added to the blinded path, but this protection is not complete. -pub struct NodeIdMessageRouter>, L: Deref, ES: Deref> +pub struct NodeIdMessageRouter>, L: Deref, ES: EntropySource> where L::Target: Logger, - ES::Target: EntropySource, { network_graph: G, entropy_source: ES, } -impl>, L: Deref, ES: Deref> NodeIdMessageRouter +impl>, L: Deref, ES: EntropySource> NodeIdMessageRouter where L::Target: Logger, - ES::Target: EntropySource, { /// Creates a [`NodeIdMessageRouter`] using the given [`NetworkGraph`]. pub fn new(network_graph: G, entropy_source: ES) -> Self { @@ -804,11 +794,10 @@ where } } -impl>, L: Deref, ES: Deref> MessageRouter +impl>, L: Deref, ES: EntropySource> MessageRouter for NodeIdMessageRouter where L::Target: Logger, - ES::Target: EntropySource, { fn find_path( &self, sender: PublicKey, peers: Vec, destination: Destination, @@ -1052,7 +1041,7 @@ pub enum PeeledOnion { /// Returns the node id of the peer to send the message to, the message itself, and any addresses /// needed to connect to the first node. pub fn create_onion_message_resolving_destination< - ES: Deref, + ES: EntropySource, NS: Deref, NL: Deref, T: OnionMessageContents, @@ -1062,7 +1051,6 @@ pub fn create_onion_message_resolving_destination< mut path: OnionMessagePath, contents: T, reply_path: Option, ) -> Result<(PublicKey, OnionMessage, Vec), SendError> where - ES::Target: EntropySource, NS::Target: NodeSigner, NL::Target: NodeIdLookUp, { @@ -1089,13 +1077,12 @@ where /// - unless it can be resolved by [`NodeIdLookUp::next_node_id`]. /// Use [`create_onion_message_resolving_destination`] instead to resolve the introduction node /// first with a [`ReadOnlyNetworkGraph`]. -pub fn create_onion_message( +pub fn create_onion_message( entropy_source: &ES, node_signer: &NS, node_id_lookup: &NL, secp_ctx: &Secp256k1, path: OnionMessagePath, contents: T, reply_path: Option, ) -> Result<(PublicKey, OnionMessage, Vec), SendError> where - ES::Target: EntropySource, NS::Target: NodeSigner, NL::Target: NodeIdLookUp, { @@ -1394,7 +1381,7 @@ macro_rules! drop_handled_events_and_abort { } impl< - ES: Deref, + ES: EntropySource, NS: Deref, L: Deref, NL: Deref, @@ -1405,7 +1392,6 @@ impl< CMH: Deref, > OnionMessenger where - ES::Target: EntropySource, NS::Target: NodeSigner, L::Target: Logger, NL::Target: NodeIdLookUp, @@ -2038,7 +2024,7 @@ fn outbound_buffer_full( } impl< - ES: Deref, + ES: EntropySource, NS: Deref, L: Deref, NL: Deref, @@ -2049,7 +2035,6 @@ impl< CMH: Deref, > EventsProvider for OnionMessenger where - ES::Target: EntropySource, NS::Target: NodeSigner, L::Target: Logger, NL::Target: NodeIdLookUp, @@ -2159,7 +2144,7 @@ where } impl< - ES: Deref, + ES: EntropySource, NS: Deref, L: Deref, NL: Deref, @@ -2170,7 +2155,6 @@ impl< CMH: Deref, > BaseMessageHandler for OnionMessenger where - ES::Target: EntropySource, NS::Target: NodeSigner, L::Target: Logger, NL::Target: NodeIdLookUp, @@ -2231,7 +2215,7 @@ where } impl< - ES: Deref, + ES: EntropySource, NS: Deref, L: Deref, NL: Deref, @@ -2242,7 +2226,6 @@ impl< CMH: Deref, > OnionMessageHandler for OnionMessenger where - ES::Target: EntropySource, NS::Target: NodeSigner, L::Target: Logger, NL::Target: NodeIdLookUp, diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index 2dd48ca8058..494860f1976 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -58,14 +58,13 @@ pub use lightning_types::routing::{RouteHint, RouteHintHop}; pub struct DefaultRouter< G: Deref>, L: Deref, - ES: Deref, + ES: EntropySource, S: Deref, SP: Sized, Sc: ScoreLookUp, > where L::Target: Logger, S::Target: for<'a> LockableScore<'a, ScoreLookUp = Sc>, - ES::Target: EntropySource, { network_graph: G, logger: L, @@ -80,7 +79,7 @@ pub const DEFAULT_PAYMENT_DUMMY_HOPS: usize = 3; impl< G: Deref>, L: Deref, - ES: Deref, + ES: EntropySource, S: Deref, SP: Sized, Sc: ScoreLookUp, @@ -88,7 +87,6 @@ impl< where L::Target: Logger, S::Target: for<'a> LockableScore<'a, ScoreLookUp = Sc>, - ES::Target: EntropySource, { /// Creates a new router. pub fn new( @@ -101,7 +99,7 @@ where impl< G: Deref>, L: Deref, - ES: Deref, + ES: EntropySource, S: Deref, SP: Sized, Sc: ScoreLookUp, @@ -109,7 +107,6 @@ impl< where L::Target: Logger, S::Target: for<'a> LockableScore<'a, ScoreLookUp = Sc>, - ES::Target: EntropySource, { #[rustfmt::skip] fn find_route( @@ -203,7 +200,7 @@ where .map(|forward_node| { BlindedPaymentPath::new_with_dummy_hops( &[forward_node], recipient, &[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS], - local_node_receive_key, tlvs.clone(), u64::MAX, MIN_FINAL_CLTV_EXPIRY_DELTA, &*self.entropy_source, secp_ctx + local_node_receive_key, tlvs.clone(), u64::MAX, MIN_FINAL_CLTV_EXPIRY_DELTA, &self.entropy_source, secp_ctx ) }) .take(MAX_PAYMENT_PATHS) @@ -215,7 +212,7 @@ where if network_graph.nodes().contains_key(&NodeId::from_pubkey(&recipient)) { BlindedPaymentPath::new_with_dummy_hops( &[], recipient, &[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS], - local_node_receive_key, tlvs, u64::MAX, MIN_FINAL_CLTV_EXPIRY_DELTA, &*self.entropy_source, secp_ctx + local_node_receive_key, tlvs, u64::MAX, MIN_FINAL_CLTV_EXPIRY_DELTA, &self.entropy_source, secp_ctx ).map(|path| vec![path]) } else { Err(()) diff --git a/lightning/src/sign/mod.rs b/lightning/src/sign/mod.rs index 26252c74dd2..51b00a68037 100644 --- a/lightning/src/sign/mod.rs +++ b/lightning/src/sign/mod.rs @@ -878,6 +878,12 @@ pub trait EntropySource { fn get_secure_random_bytes(&self) -> [u8; 32]; } +impl> EntropySource for E { + fn get_secure_random_bytes(&self) -> [u8; 32] { + self.deref().get_secure_random_bytes() + } +} + /// A trait that can handle cryptographic operations at the scope level of a node. pub trait NodeSigner { /// Get the [`ExpandedKey`] which provides cryptographic material for various Lightning Network operations. diff --git a/lightning/src/util/anchor_channel_reserves.rs b/lightning/src/util/anchor_channel_reserves.rs index 26212ca3966..0e2f53a84b4 100644 --- a/lightning/src/util/anchor_channel_reserves.rs +++ b/lightning/src/util/anchor_channel_reserves.rs @@ -277,17 +277,9 @@ pub fn can_support_additional_anchor_channel< EstimatorRef: Deref, LoggerRef: Deref, PersistRef: Deref, - EntropySourceRef: Deref, + ES: EntropySource, ChainMonitorRef: Deref< - Target = ChainMonitor< - ChannelSigner, - FilterRef, - B, - EstimatorRef, - LoggerRef, - PersistRef, - EntropySourceRef, - >, + Target = ChainMonitor, >, >( context: &AnchorChannelReserveContext, utxos: &[Utxo], a_channel_manager: AChannelManagerRef, @@ -299,7 +291,6 @@ where EstimatorRef::Target: FeeEstimator, LoggerRef::Target: Logger, PersistRef::Target: Persist, - EntropySourceRef::Target: EntropySource, { let mut anchor_channels = new_hash_set(); // Calculate the number of in-progress anchor channels by inspecting ChannelMonitors with balance. diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index 92a565a28f8..ecc2d946acd 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -445,12 +445,11 @@ impl Persist( +pub fn read_channel_monitors( kv_store: K, entropy_source: ES, signer_provider: SP, ) -> Result::EcdsaSigner>)>, io::Error> where K::Target: KVStoreSync, - ES::Target: EntropySource + Sized, SP::Target: SignerProvider + Sized, { let mut res = Vec::new(); @@ -465,7 +464,7 @@ where CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, &stored_key, )?), - (&*entropy_source, &*signer_provider), + (&entropy_source, &*signer_provider), ) { Ok(Some((block_hash, channel_monitor))) => { let monitor_name = MonitorName::from_str(&stored_key)?; @@ -591,7 +590,7 @@ fn poll_sync_future(future: F) -> F::Output { pub struct MonitorUpdatingPersister< K: Deref, L: Deref, - ES: Deref, + ES: EntropySource, SP: Deref, BI: BroadcasterInterface, FE: Deref, @@ -599,16 +598,14 @@ pub struct MonitorUpdatingPersister< where K::Target: KVStoreSync, L::Target: Logger, - ES::Target: EntropySource + Sized, SP::Target: SignerProvider + Sized, FE::Target: FeeEstimator; -impl +impl MonitorUpdatingPersister where K::Target: KVStoreSync, L::Target: Logger, - ES::Target: EntropySource + Sized, SP::Target: SignerProvider + Sized, FE::Target: FeeEstimator, { @@ -698,7 +695,7 @@ impl< ChannelSigner: EcdsaChannelSigner, K: Deref, L: Deref, - ES: Deref, + ES: EntropySource, SP: Deref, BI: BroadcasterInterface, FE: Deref, @@ -706,7 +703,6 @@ impl< where K::Target: KVStoreSync, L::Target: Logger, - ES::Target: EntropySource + Sized, SP::Target: SignerProvider + Sized, FE::Target: FeeEstimator, { @@ -783,7 +779,7 @@ pub struct MonitorUpdatingPersisterAsync< K: Deref, S: FutureSpawner, L: Deref, - ES: Deref, + ES: EntropySource, SP: Deref, BI: BroadcasterInterface, FE: Deref, @@ -791,7 +787,6 @@ pub struct MonitorUpdatingPersisterAsync< where K::Target: KVStore, L::Target: Logger, - ES::Target: EntropySource + Sized, SP::Target: SignerProvider + Sized, FE::Target: FeeEstimator; @@ -799,14 +794,13 @@ struct MonitorUpdatingPersisterAsyncInner< K: Deref, S: FutureSpawner, L: Deref, - ES: Deref, + ES: EntropySource, SP: Deref, BI: BroadcasterInterface, FE: Deref, > where K::Target: KVStore, L::Target: Logger, - ES::Target: EntropySource + Sized, SP::Target: SignerProvider + Sized, FE::Target: FeeEstimator, { @@ -825,7 +819,7 @@ impl< K: Deref, S: FutureSpawner, L: Deref, - ES: Deref, + ES: EntropySource, SP: Deref, BI: BroadcasterInterface, FE: Deref, @@ -833,7 +827,6 @@ impl< where K::Target: KVStore, L::Target: Logger, - ES::Target: EntropySource + Sized, SP::Target: SignerProvider + Sized, FE::Target: FeeEstimator, { @@ -975,7 +968,7 @@ impl< K: Deref + MaybeSend + MaybeSync + 'static, S: FutureSpawner, L: Deref + MaybeSend + MaybeSync + 'static, - ES: Deref + MaybeSend + MaybeSync + 'static, + ES: EntropySource + MaybeSend + MaybeSync + 'static, SP: Deref + MaybeSend + MaybeSync + 'static, BI: BroadcasterInterface + MaybeSend + MaybeSync + 'static, FE: Deref + MaybeSend + MaybeSync + 'static, @@ -983,7 +976,6 @@ impl< where K::Target: KVStore + MaybeSync, L::Target: Logger, - ES::Target: EntropySource + Sized, SP::Target: SignerProvider + Sized, FE::Target: FeeEstimator, ::EcdsaSigner: MaybeSend + 'static, @@ -1066,7 +1058,7 @@ impl< K: Deref, S: FutureSpawner, L: Deref, - ES: Deref, + ES: EntropySource, SP: Deref, BI: BroadcasterInterface, FE: Deref, @@ -1074,7 +1066,6 @@ impl< where K::Target: KVStore, L::Target: Logger, - ES::Target: EntropySource + Sized, SP::Target: SignerProvider + Sized, FE::Target: FeeEstimator, { @@ -1159,7 +1150,7 @@ where } match ::EcdsaSigner>)>>::read( &mut monitor_cursor, - (&*self.entropy_source, &*self.signer_provider), + (&self.entropy_source, &*self.signer_provider), ) { Ok(None) => Ok(None), Ok(Some((blockhash, channel_monitor))) => { diff --git a/lightning/src/util/scid_utils.rs b/lightning/src/util/scid_utils.rs index b9dcc4688e8..d57c529a41a 100644 --- a/lightning/src/util/scid_utils.rs +++ b/lightning/src/util/scid_utils.rs @@ -80,8 +80,6 @@ pub(crate) mod fake_scid { use bitcoin::constants::ChainHash; use bitcoin::Network; - use core::ops::Deref; - const TEST_SEGWIT_ACTIVATION_HEIGHT: u32 = 1; const MAINNET_SEGWIT_ACTIVATION_HEIGHT: u32 = 481_824; const MAX_TX_INDEX: u32 = 2_500; @@ -110,13 +108,10 @@ pub(crate) mod fake_scid { /// between segwit activation and the current best known height, and the tx index and output /// index are also selected from a "reasonable" range. We add this logic because it makes it /// non-obvious at a glance that the scid is fake, e.g. if it appears in invoice route hints. - pub(crate) fn get_fake_scid( + pub(crate) fn get_fake_scid( &self, highest_seen_blockheight: u32, chain_hash: &ChainHash, fake_scid_rand_bytes: &[u8; 32], entropy_source: &ES, - ) -> u64 - where - ES::Target: EntropySource, - { + ) -> u64 { // Ensure we haven't created a namespace that doesn't fit into the 3 bits we've allocated for // namespaces. assert!((*self as u8) < MAX_NAMESPACES); From fb2759e8f8500de04b52106030cd471fd5d5f2ef Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Wed, 14 Jan 2026 14:19:40 -0500 Subject: [PATCH 141/242] Drop Deref indirection for NodeSigner Reduces generics and verbosity across the codebase, should provide equivalent behavior. Co-Authored-By: Claude Opus 4.5 --- lightning-background-processor/src/lib.rs | 9 +-- lightning-liquidity/src/lsps5/service.rs | 9 +-- lightning-liquidity/src/manager.rs | 70 +++++++------------- lightning/src/blinded_path/message.rs | 3 +- lightning/src/blinded_path/payment.rs | 10 +-- lightning/src/ln/channel.rs | 43 +++++------- lightning/src/ln/channelmanager.rs | 76 ++++++++-------------- lightning/src/ln/functional_test_utils.rs | 4 +- lightning/src/ln/invoice_utils.rs | 9 +-- lightning/src/ln/msgs.rs | 11 +--- lightning/src/ln/onion_payment.rs | 10 ++- lightning/src/ln/onion_utils.rs | 18 ++--- lightning/src/ln/outbound_payment.rs | 42 ++++-------- lightning/src/ln/peer_channel_encryptor.rs | 33 +++------- lightning/src/ln/peer_handler.rs | 23 +++---- lightning/src/onion_message/messenger.rs | 38 ++++------- lightning/src/sign/mod.rs | 43 ++++++++++++ 17 files changed, 181 insertions(+), 270 deletions(-) diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index c8898b0690d..6731dae4b2e 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -421,8 +421,7 @@ pub const NO_ONION_MESSENGER: Option< Arc< dyn AOnionMessenger< EntropySource = &(dyn EntropySource + Send + Sync), - NodeSigner = dyn lightning::sign::NodeSigner + Send + Sync, - NS = &(dyn lightning::sign::NodeSigner + Send + Sync), + NodeSigner = &(dyn lightning::sign::NodeSigner + Send + Sync), Logger = dyn Logger + Send + Sync, L = &'static (dyn Logger + Send + Sync), NodeIdLookUp = DynChannelManager, @@ -480,8 +479,7 @@ pub const NO_LIQUIDITY_MANAGER: Option< Arc< dyn ALiquidityManager< EntropySource = &(dyn EntropySource + Send + Sync), - NodeSigner = dyn lightning::sign::NodeSigner + Send + Sync, - NS = &(dyn lightning::sign::NodeSigner + Send + Sync), + NodeSigner = &(dyn lightning::sign::NodeSigner + Send + Sync), AChannelManager = DynChannelManager, CM = &DynChannelManager, Filter = dyn chain::Filter + Send + Sync, @@ -505,8 +503,7 @@ pub const NO_LIQUIDITY_MANAGER_SYNC: Option< Arc< dyn ALiquidityManagerSync< EntropySource = &(dyn EntropySource + Send + Sync), - NodeSigner = dyn lightning::sign::NodeSigner + Send + Sync, - NS = &(dyn lightning::sign::NodeSigner + Send + Sync), + NodeSigner = &(dyn lightning::sign::NodeSigner + Send + Sync), AChannelManager = DynChannelManager, CM = &DynChannelManager, Filter = dyn chain::Filter + Send + Sync, diff --git a/lightning-liquidity/src/lsps5/service.rs b/lightning-liquidity/src/lsps5/service.rs index 53fa96ee565..489d543ca90 100644 --- a/lightning-liquidity/src/lsps5/service.rs +++ b/lightning-liquidity/src/lsps5/service.rs @@ -125,10 +125,9 @@ impl Default for LSPS5ServiceConfig { /// [`LSPS5ServiceEvent::SendWebhookNotification`]: super::event::LSPS5ServiceEvent::SendWebhookNotification /// [`app_name`]: super::msgs::LSPS5AppName /// [`lsps5.webhook_registered`]: super::msgs::WebhookNotificationMethod::LSPS5WebhookRegistered -pub struct LSPS5ServiceHandler +pub struct LSPS5ServiceHandler where CM::Target: AChannelManager, - NS::Target: NodeSigner, K::Target: KVStore, TP::Target: TimeProvider, { @@ -144,10 +143,9 @@ where persistence_in_flight: AtomicUsize, } -impl LSPS5ServiceHandler +impl LSPS5ServiceHandler where CM::Target: AChannelManager, - NS::Target: NodeSigner, K::Target: KVStore, TP::Target: TimeProvider, { @@ -694,11 +692,10 @@ where } } -impl LSPSProtocolMessageHandler +impl LSPSProtocolMessageHandler for LSPS5ServiceHandler where CM::Target: AChannelManager, - NS::Target: NodeSigner, K::Target: KVStore, TP::Target: TimeProvider, { diff --git a/lightning-liquidity/src/manager.rs b/lightning-liquidity/src/manager.rs index 14b0fa52246..0e897dd7abe 100644 --- a/lightning-liquidity/src/manager.rs +++ b/lightning-liquidity/src/manager.rs @@ -106,9 +106,7 @@ pub trait ALiquidityManager { /// A type implementing [`EntropySource`] type EntropySource: EntropySource + Clone; /// A type implementing [`NodeSigner`] - type NodeSigner: NodeSigner + ?Sized; - /// A type that may be dereferenced to [`Self::NodeSigner`]. - type NS: Deref + Clone; + type NodeSigner: NodeSigner + Clone; /// A type implementing [`AChannelManager`] type AChannelManager: AChannelManager + ?Sized; /// A type that may be dereferenced to [`Self::AChannelManager`]. @@ -132,7 +130,7 @@ pub trait ALiquidityManager { &self, ) -> &LiquidityManager< Self::EntropySource, - Self::NS, + Self::NodeSigner, Self::CM, Self::C, Self::K, @@ -143,7 +141,7 @@ pub trait ALiquidityManager { impl< ES: EntropySource + Clone, - NS: Deref + Clone, + NS: NodeSigner + Clone, CM: Deref + Clone, C: Deref + Clone, K: Deref + Clone, @@ -151,15 +149,13 @@ impl< T: BroadcasterInterface + Clone, > ALiquidityManager for LiquidityManager where - NS::Target: NodeSigner, CM::Target: AChannelManager, C::Target: Filter, K::Target: KVStore, TP::Target: TimeProvider, { type EntropySource = ES; - type NodeSigner = NS::Target; - type NS = NS; + type NodeSigner = NS; type AChannelManager = CM::Target; type CM = CM; type Filter = C::Target; @@ -182,9 +178,7 @@ pub trait ALiquidityManagerSync { /// A type implementing [`EntropySource`] type EntropySource: EntropySource + Clone; /// A type implementing [`NodeSigner`] - type NodeSigner: NodeSigner + ?Sized; - /// A type that may be dereferenced to [`Self::NodeSigner`]. - type NS: Deref + Clone; + type NodeSigner: NodeSigner + Clone; /// A type implementing [`AChannelManager`] type AChannelManager: AChannelManager + ?Sized; /// A type that may be dereferenced to [`Self::AChannelManager`]. @@ -209,7 +203,7 @@ pub trait ALiquidityManagerSync { &self, ) -> &LiquidityManager< Self::EntropySource, - Self::NS, + Self::NodeSigner, Self::CM, Self::C, KVStoreSyncWrapper, @@ -221,7 +215,7 @@ pub trait ALiquidityManagerSync { &self, ) -> &LiquidityManagerSync< Self::EntropySource, - Self::NS, + Self::NodeSigner, Self::CM, Self::C, Self::KS, @@ -232,7 +226,7 @@ pub trait ALiquidityManagerSync { impl< ES: EntropySource + Clone, - NS: Deref + Clone, + NS: NodeSigner + Clone, CM: Deref + Clone, C: Deref + Clone, KS: Deref + Clone, @@ -240,15 +234,13 @@ impl< T: BroadcasterInterface + Clone, > ALiquidityManagerSync for LiquidityManagerSync where - NS::Target: NodeSigner, CM::Target: AChannelManager, C::Target: Filter, KS::Target: KVStoreSync, TP::Target: TimeProvider, { type EntropySource = ES; - type NodeSigner = NS::Target; - type NS = NS; + type NodeSigner = NS; type AChannelManager = CM::Target; type CM = CM; type Filter = C::Target; @@ -264,7 +256,7 @@ where &self, ) -> &LiquidityManager< Self::EntropySource, - Self::NS, + Self::NodeSigner, Self::CM, Self::C, KVStoreSyncWrapper, @@ -299,14 +291,13 @@ where /// [`Event::PaymentForwarded`]: lightning::events::Event::PaymentForwarded pub struct LiquidityManager< ES: EntropySource + Clone, - NS: Deref + Clone, + NS: NodeSigner + Clone, CM: Deref + Clone, C: Deref + Clone, K: Deref + Clone, TP: Deref + Clone, T: BroadcasterInterface + Clone, > where - NS::Target: NodeSigner, CM::Target: AChannelManager, C::Target: Filter, K::Target: KVStore, @@ -336,14 +327,13 @@ pub struct LiquidityManager< #[cfg(feature = "time")] impl< ES: EntropySource + Clone, - NS: Deref + Clone, + NS: NodeSigner + Clone, CM: Deref + Clone, C: Deref + Clone, K: Deref + Clone, T: BroadcasterInterface + Clone, > LiquidityManager where - NS::Target: NodeSigner, CM::Target: AChannelManager, C::Target: Filter, K::Target: KVStore, @@ -375,7 +365,7 @@ where impl< ES: EntropySource + Clone, - NS: Deref + Clone, + NS: NodeSigner + Clone, CM: Deref + Clone, C: Deref + Clone, K: Deref + Clone, @@ -383,7 +373,6 @@ impl< T: BroadcasterInterface + Clone, > LiquidityManager where - NS::Target: NodeSigner, CM::Target: AChannelManager, C::Target: Filter, K::Target: KVStore, @@ -800,7 +789,7 @@ where impl< ES: EntropySource + Clone, - NS: Deref + Clone, + NS: NodeSigner + Clone, CM: Deref + Clone, C: Deref + Clone, K: Deref + Clone, @@ -808,7 +797,6 @@ impl< T: BroadcasterInterface + Clone, > CustomMessageReader for LiquidityManager where - NS::Target: NodeSigner, CM::Target: AChannelManager, C::Target: Filter, K::Target: KVStore, @@ -830,7 +818,7 @@ where impl< ES: EntropySource + Clone, - NS: Deref + Clone, + NS: NodeSigner + Clone, CM: Deref + Clone, C: Deref + Clone, K: Deref + Clone, @@ -838,7 +826,6 @@ impl< T: BroadcasterInterface + Clone, > CustomMessageHandler for LiquidityManager where - NS::Target: NodeSigner, CM::Target: AChannelManager, C::Target: Filter, K::Target: KVStore, @@ -962,7 +949,7 @@ where impl< ES: EntropySource + Clone, - NS: Deref + Clone, + NS: NodeSigner + Clone, CM: Deref + Clone, C: Deref + Clone, K: Deref + Clone, @@ -970,7 +957,6 @@ impl< T: BroadcasterInterface + Clone, > Listen for LiquidityManager where - NS::Target: NodeSigner, CM::Target: AChannelManager, C::Target: Filter, K::Target: KVStore, @@ -1006,7 +992,7 @@ where impl< ES: EntropySource + Clone, - NS: Deref + Clone, + NS: NodeSigner + Clone, CM: Deref + Clone, C: Deref + Clone, K: Deref + Clone, @@ -1014,7 +1000,6 @@ impl< T: BroadcasterInterface + Clone, > Confirm for LiquidityManager where - NS::Target: NodeSigner, CM::Target: AChannelManager, C::Target: Filter, K::Target: KVStore, @@ -1050,14 +1035,13 @@ where /// available. pub struct LiquidityManagerSync< ES: EntropySource + Clone, - NS: Deref + Clone, + NS: NodeSigner + Clone, CM: Deref + Clone, C: Deref + Clone, KS: Deref + Clone, TP: Deref + Clone, T: BroadcasterInterface + Clone, > where - NS::Target: NodeSigner, CM::Target: AChannelManager, C::Target: Filter, KS::Target: KVStoreSync, @@ -1069,14 +1053,13 @@ pub struct LiquidityManagerSync< #[cfg(feature = "time")] impl< ES: EntropySource + Clone, - NS: Deref + Clone, + NS: NodeSigner + Clone, CM: Deref + Clone, C: Deref + Clone, KS: Deref + Clone, T: BroadcasterInterface + Clone, > LiquidityManagerSync where - NS::Target: NodeSigner, CM::Target: AChannelManager, KS::Target: KVStoreSync, C::Target: Filter, @@ -1119,7 +1102,7 @@ where impl< ES: EntropySource + Clone, - NS: Deref + Clone, + NS: NodeSigner + Clone, CM: Deref + Clone, C: Deref + Clone, KS: Deref + Clone, @@ -1127,7 +1110,6 @@ impl< T: BroadcasterInterface + Clone, > LiquidityManagerSync where - NS::Target: NodeSigner, CM::Target: AChannelManager, C::Target: Filter, KS::Target: KVStoreSync, @@ -1287,7 +1269,7 @@ where impl< ES: EntropySource + Clone, - NS: Deref + Clone, + NS: NodeSigner + Clone, CM: Deref + Clone, C: Deref + Clone, KS: Deref + Clone, @@ -1295,7 +1277,6 @@ impl< T: BroadcasterInterface + Clone, > CustomMessageReader for LiquidityManagerSync where - NS::Target: NodeSigner, CM::Target: AChannelManager, C::Target: Filter, KS::Target: KVStoreSync, @@ -1312,7 +1293,7 @@ where impl< ES: EntropySource + Clone, - NS: Deref + Clone, + NS: NodeSigner + Clone, CM: Deref + Clone, C: Deref + Clone, KS: Deref + Clone, @@ -1320,7 +1301,6 @@ impl< T: BroadcasterInterface + Clone, > CustomMessageHandler for LiquidityManagerSync where - NS::Target: NodeSigner, CM::Target: AChannelManager, C::Target: Filter, KS::Target: KVStoreSync, @@ -1357,7 +1337,7 @@ where impl< ES: EntropySource + Clone, - NS: Deref + Clone, + NS: NodeSigner + Clone, CM: Deref + Clone, C: Deref + Clone, KS: Deref + Clone, @@ -1365,7 +1345,6 @@ impl< T: BroadcasterInterface + Clone, > Listen for LiquidityManagerSync where - NS::Target: NodeSigner, CM::Target: AChannelManager, C::Target: Filter, KS::Target: KVStoreSync, @@ -1385,7 +1364,7 @@ where impl< ES: EntropySource + Clone, - NS: Deref + Clone, + NS: NodeSigner + Clone, CM: Deref + Clone, C: Deref + Clone, KS: Deref + Clone, @@ -1393,7 +1372,6 @@ impl< T: BroadcasterInterface + Clone, > Confirm for LiquidityManagerSync where - NS::Target: NodeSigner, CM::Target: AChannelManager, C::Target: Filter, KS::Target: KVStoreSync, diff --git a/lightning/src/blinded_path/message.rs b/lightning/src/blinded_path/message.rs index c914458ccbc..68c4a60738b 100644 --- a/lightning/src/blinded_path/message.rs +++ b/lightning/src/blinded_path/message.rs @@ -192,11 +192,10 @@ impl BlindedMessagePath { /// introduction node. /// /// Will only modify `self` when returning `Ok`. - pub fn advance_path_by_one( + pub fn advance_path_by_one( &mut self, node_signer: &NS, node_id_lookup: &NL, secp_ctx: &Secp256k1, ) -> Result<(), ()> where - NS::Target: NodeSigner, NL::Target: NodeIdLookUp, T: secp256k1::Signing + secp256k1::Verification, { diff --git a/lightning/src/blinded_path/payment.rs b/lightning/src/blinded_path/payment.rs index e195f5a54ab..df0626e0673 100644 --- a/lightning/src/blinded_path/payment.rs +++ b/lightning/src/blinded_path/payment.rs @@ -230,11 +230,10 @@ impl BlindedPaymentPath { /// introduction node. /// /// Will only modify `self` when returning `Ok`. - pub fn advance_path_by_one( + pub fn advance_path_by_one( &mut self, node_signer: &NS, node_id_lookup: &NL, secp_ctx: &Secp256k1, ) -> Result<(), ()> where - NS::Target: NodeSigner, NL::Target: NodeIdLookUp, T: secp256k1::Signing + secp256k1::Verification, { @@ -265,12 +264,9 @@ impl BlindedPaymentPath { Ok(()) } - pub(crate) fn decrypt_intro_payload( + pub(crate) fn decrypt_intro_payload( &self, node_signer: &NS, - ) -> Result<(BlindedPaymentTlvs, SharedSecret), ()> - where - NS::Target: NodeSigner, - { + ) -> Result<(BlindedPaymentTlvs, SharedSecret), ()> { let control_tlvs_ss = node_signer.ecdh(Recipient::Node, &self.inner_path.blinding_point, None)?; let rho = onion_utils::gen_rho_from_shared_secret(&control_tlvs_ss.secret_bytes()); diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 38502c995a8..31c39968b33 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -7861,12 +7861,11 @@ where /// and the channel is now usable (and public), this may generate an announcement_signatures to /// reply with. #[rustfmt::skip] - pub fn channel_ready( + pub fn channel_ready( &mut self, msg: &msgs::ChannelReady, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock, logger: &L ) -> Result, ChannelError> where - NS::Target: NodeSigner, L::Target: Logger { if self.context.channel_state.is_peer_disconnected() { @@ -9552,13 +9551,12 @@ where /// successfully and we should restore normal operation. Returns messages which should be sent /// to the remote side. #[rustfmt::skip] - pub fn monitor_updating_restored( + pub fn monitor_updating_restored( &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig, best_block_height: u32, path_for_release_htlc: CBP ) -> MonitorRestoreUpdates where L::Target: Logger, - NS::Target: NodeSigner, CBP: Fn(u64) -> BlindedMessagePath { assert!(self.context.channel_state.is_monitor_update_in_progress()); @@ -10044,14 +10042,13 @@ where /// May panic if some calls other than message-handling calls (which will all Err immediately) /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call. #[rustfmt::skip] - pub fn channel_reestablish( + pub fn channel_reestablish( &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock, path_for_release_htlc: CBP, ) -> Result where L::Target: Logger, - NS::Target: NodeSigner, CBP: Fn(u64) -> BlindedMessagePath { if !self.context.channel_state.is_peer_disconnected() { @@ -11397,12 +11394,11 @@ where } /// Returns `Some` if a splice [`FundingScope`] was promoted. - fn maybe_promote_splice_funding( + fn maybe_promote_splice_funding( &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig, block_height: u32, logger: &L, ) -> Option where - NS::Target: NodeSigner, L::Target: Logger, { debug_assert!(self.pending_splice.is_some()); @@ -11518,12 +11514,11 @@ where /// In the first case, we store the confirmation height and calculating the short channel id. /// In the second, we simply return an Err indicating we need to be force-closed now. #[rustfmt::skip] - pub fn transactions_confirmed( + pub fn transactions_confirmed( &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData, chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L ) -> Result<(Option, Option), ClosureReason> where - NS::Target: NodeSigner, L::Target: Logger { for &(index_in_block, tx) in txdata.iter() { @@ -11616,12 +11611,11 @@ where /// /// May return some HTLCs (and their payment_hash) which have timed out and should be failed /// back. - pub fn best_block_updated( + pub fn best_block_updated( &mut self, height: u32, highest_header_time: Option, chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L, ) -> Result where - NS::Target: NodeSigner, L::Target: Logger, { self.do_best_block_updated( @@ -11633,12 +11627,11 @@ where } #[rustfmt::skip] - fn do_best_block_updated( + fn do_best_block_updated( &mut self, height: u32, highest_header_time: Option, chain_node_signer: Option<(ChainHash, &NS, &UserConfig)>, logger: &L ) -> Result<(Option, Vec<(HTLCSource, PaymentHash)>, Option), ClosureReason> where - NS::Target: NodeSigner, L::Target: Logger { let mut timed_out_htlcs = Vec::new(); @@ -11866,9 +11859,9 @@ where /// /// [`ChannelReady`]: crate::ln::msgs::ChannelReady #[rustfmt::skip] - fn get_channel_announcement( + fn get_channel_announcement( &self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig, - ) -> Result where NS::Target: NodeSigner { + ) -> Result { if !self.context.config.announce_for_forwarding { return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned())); } @@ -11898,12 +11891,11 @@ where } #[rustfmt::skip] - fn get_announcement_sigs( + fn get_announcement_sigs( &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig, best_block_height: u32, logger: &L ) -> Option where - NS::Target: NodeSigner, L::Target: Logger { if self.funding.funding_tx_confirmation_height == 0 || self.funding.funding_tx_confirmation_height + 5 > best_block_height { @@ -11972,9 +11964,9 @@ where /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are /// available. #[rustfmt::skip] - fn sign_channel_announcement( + fn sign_channel_announcement( &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement - ) -> Result where NS::Target: NodeSigner { + ) -> Result { if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs { let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node) .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?); @@ -12009,10 +12001,10 @@ where /// channel_announcement message which we can broadcast and storing our counterparty's /// signatures for later reconstruction/rebroadcast of the channel_announcement. #[rustfmt::skip] - pub fn announcement_signatures( + pub fn announcement_signatures( &mut self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, msg: &msgs::AnnouncementSignatures, user_config: &UserConfig - ) -> Result where NS::Target: NodeSigner { + ) -> Result { let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?; let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]); @@ -12040,9 +12032,9 @@ where /// Gets a signed channel_announcement for this channel, if we previously received an /// announcement_signatures from our counterparty. #[rustfmt::skip] - pub fn get_signed_channel_announcement( + pub fn get_signed_channel_announcement( &self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, user_config: &UserConfig - ) -> Option where NS::Target: NodeSigner { + ) -> Option { if self.funding.funding_tx_confirmation_height == 0 || self.funding.funding_tx_confirmation_height + 5 > best_block_height { return None; } @@ -12741,12 +12733,11 @@ where Ok((holder_balance_floor, counterparty_balance_floor)) } - pub fn splice_locked( + pub fn splice_locked( &mut self, msg: &msgs::SpliceLocked, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig, block_height: u32, logger: &L, ) -> Result, ChannelError> where - NS::Target: NodeSigner, L::Target: Logger, { log_info!(logger, "Received splice_locked txid {} from our peer", msg.splice_txid,); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index bcb5b2a1f4b..99adfb6d7c5 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1163,11 +1163,11 @@ impl ClaimablePayments { /// /// If no payment is found, `Err(Vec::new())` is returned. #[rustfmt::skip] - fn begin_claiming_payment( + fn begin_claiming_payment( &mut self, payment_hash: PaymentHash, node_signer: &S, logger: &L, inbound_payment_id_secret: &[u8; 32], custom_tlvs_known: bool, ) -> Result<(Vec, ClaimingPayment), Vec> - where L::Target: Logger, S::Target: NodeSigner, + where L::Target: Logger, { match self.claimable_payments.remove(&payment_hash) { Some(payment) => { @@ -1793,9 +1793,7 @@ pub trait AChannelManager { /// A type implementing [`EntropySource`]. type EntropySource: EntropySource; /// A type implementing [`NodeSigner`]. - type NodeSigner: NodeSigner + ?Sized; - /// A type that may be dereferenced to [`Self::NodeSigner`]. - type NS: Deref; + type NodeSigner: NodeSigner; /// A type implementing [`EcdsaChannelSigner`]. type Signer: EcdsaChannelSigner + Sized; /// A type implementing [`SignerProvider`] for [`Self::Signer`]. @@ -1825,7 +1823,7 @@ pub trait AChannelManager { Self::M, Self::Broadcaster, Self::EntropySource, - Self::NS, + Self::NodeSigner, Self::SP, Self::F, Self::R, @@ -1838,7 +1836,7 @@ impl< M: Deref, T: BroadcasterInterface, ES: EntropySource, - NS: Deref, + NS: NodeSigner, SP: Deref, F: Deref, R: Deref, @@ -1847,7 +1845,6 @@ impl< > AChannelManager for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, - NS::Target: NodeSigner, SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, @@ -1858,8 +1855,7 @@ where type M = M; type Broadcaster = T; type EntropySource = ES; - type NodeSigner = NS::Target; - type NS = NS; + type NodeSigner = NS; type Signer = ::EcdsaSigner; type SignerProvider = SP::Target; type SP = SP; @@ -2619,7 +2615,7 @@ pub struct ChannelManager< M: Deref, T: BroadcasterInterface, ES: EntropySource, - NS: Deref, + NS: NodeSigner, SP: Deref, F: Deref, R: Deref, @@ -2627,7 +2623,6 @@ pub struct ChannelManager< L: Deref, > where M::Target: chain::Watch<::EcdsaSigner>, - NS::Target: NodeSigner, SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, @@ -3407,7 +3402,7 @@ impl< M: Deref, T: BroadcasterInterface, ES: EntropySource, - NS: Deref, + NS: NodeSigner, SP: Deref, F: Deref, R: Deref, @@ -3416,7 +3411,6 @@ impl< > ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, - NS::Target: NodeSigner, SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, @@ -6936,7 +6930,7 @@ where let (next_hop, next_packet_details_opt) = match decode_incoming_update_add_htlc_onion( &update_add_htlc, - &*self.node_signer, + &self.node_signer, &*self.logger, &self.secp_ctx, ) { @@ -6957,7 +6951,7 @@ where next_hop_hmac, new_packet_bytes, next_packet_details, - &*self.node_signer, + &self.node_signer, &self.secp_ctx, ); @@ -7412,7 +7406,7 @@ where onion_packet.hmac, payment_hash, None, - &*self.node_signer, + &self.node_signer, ); let next_hop = match decode_res { Ok(res) => res, @@ -13546,7 +13540,7 @@ impl< M: Deref, T: BroadcasterInterface, ES: EntropySource, - NS: Deref, + NS: NodeSigner, SP: Deref, F: Deref, R: Deref, @@ -13555,7 +13549,6 @@ impl< > ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, - NS::Target: NodeSigner, SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, @@ -14421,7 +14414,7 @@ impl< M: Deref, T: BroadcasterInterface, ES: EntropySource, - NS: Deref, + NS: NodeSigner, SP: Deref, F: Deref, R: Deref, @@ -14430,7 +14423,6 @@ impl< > BaseMessageHandler for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, - NS::Target: NodeSigner, SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, @@ -14790,7 +14782,7 @@ impl< M: Deref, T: BroadcasterInterface, ES: EntropySource, - NS: Deref, + NS: NodeSigner, SP: Deref, F: Deref, R: Deref, @@ -14799,7 +14791,6 @@ impl< > EventsProvider for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, - NS::Target: NodeSigner, SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, @@ -14823,7 +14814,7 @@ impl< M: Deref, T: BroadcasterInterface, ES: EntropySource, - NS: Deref, + NS: NodeSigner, SP: Deref, F: Deref, R: Deref, @@ -14832,7 +14823,6 @@ impl< > chain::Listen for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, - NS::Target: NodeSigner, SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, @@ -14882,7 +14872,7 @@ impl< M: Deref, T: BroadcasterInterface, ES: EntropySource, - NS: Deref, + NS: NodeSigner, SP: Deref, F: Deref, R: Deref, @@ -14891,7 +14881,6 @@ impl< > chain::Confirm for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, - NS::Target: NodeSigner, SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, @@ -15053,7 +15042,7 @@ impl< M: Deref, T: BroadcasterInterface, ES: EntropySource, - NS: Deref, + NS: NodeSigner, SP: Deref, F: Deref, R: Deref, @@ -15062,7 +15051,6 @@ impl< > ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, - NS::Target: NodeSigner, SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, @@ -15413,7 +15401,7 @@ impl< M: Deref, T: BroadcasterInterface, ES: EntropySource, - NS: Deref, + NS: NodeSigner, SP: Deref, F: Deref, R: Deref, @@ -15422,7 +15410,6 @@ impl< > ChannelMessageHandler for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, - NS::Target: NodeSigner, SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, @@ -15986,7 +15973,7 @@ impl< M: Deref, T: BroadcasterInterface, ES: EntropySource, - NS: Deref, + NS: NodeSigner, SP: Deref, F: Deref, R: Deref, @@ -15995,7 +15982,6 @@ impl< > OffersMessageHandler for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, - NS::Target: NodeSigner, SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, @@ -16202,7 +16188,7 @@ impl< M: Deref, T: BroadcasterInterface, ES: EntropySource, - NS: Deref, + NS: NodeSigner, SP: Deref, F: Deref, R: Deref, @@ -16211,7 +16197,6 @@ impl< > AsyncPaymentsMessageHandler for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, - NS::Target: NodeSigner, SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, @@ -16445,7 +16430,7 @@ impl< M: Deref, T: BroadcasterInterface, ES: EntropySource, - NS: Deref, + NS: NodeSigner, SP: Deref, F: Deref, R: Deref, @@ -16454,7 +16439,6 @@ impl< > DNSResolverMessageHandler for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, - NS::Target: NodeSigner, SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, @@ -16511,7 +16495,7 @@ impl< M: Deref, T: BroadcasterInterface, ES: EntropySource, - NS: Deref, + NS: NodeSigner, SP: Deref, F: Deref, R: Deref, @@ -16520,7 +16504,6 @@ impl< > NodeIdLookUp for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, - NS::Target: NodeSigner, SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, @@ -17025,7 +17008,7 @@ impl< M: Deref, T: BroadcasterInterface, ES: EntropySource, - NS: Deref, + NS: NodeSigner, SP: Deref, F: Deref, R: Deref, @@ -17034,7 +17017,6 @@ impl< > Writeable for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, - NS::Target: NodeSigner, SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, @@ -17390,7 +17372,7 @@ pub struct ChannelManagerReadArgs< M: Deref, T: BroadcasterInterface, ES: EntropySource, - NS: Deref, + NS: NodeSigner, SP: Deref, F: Deref, R: Deref, @@ -17398,7 +17380,6 @@ pub struct ChannelManagerReadArgs< L: Deref + Clone, > where M::Target: chain::Watch<::EcdsaSigner>, - NS::Target: NodeSigner, SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, @@ -17468,7 +17449,7 @@ impl< M: Deref, T: BroadcasterInterface, ES: EntropySource, - NS: Deref, + NS: NodeSigner, SP: Deref, F: Deref, R: Deref, @@ -17477,7 +17458,6 @@ impl< > ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L> where M::Target: chain::Watch<::EcdsaSigner>, - NS::Target: NodeSigner, SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, @@ -17554,7 +17534,7 @@ impl< M: Deref, T: BroadcasterInterface, ES: EntropySource, - NS: Deref, + NS: NodeSigner, SP: Deref, F: Deref, R: Deref, @@ -17564,7 +17544,6 @@ impl< for (BlockHash, Arc>) where M::Target: chain::Watch<::EcdsaSigner>, - NS::Target: NodeSigner, SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, @@ -17585,7 +17564,7 @@ impl< M: Deref, T: BroadcasterInterface, ES: EntropySource, - NS: Deref, + NS: NodeSigner, SP: Deref, F: Deref, R: Deref, @@ -17595,7 +17574,6 @@ impl< for (BlockHash, ChannelManager) where M::Target: chain::Watch<::EcdsaSigner>, - NS::Target: NodeSigner, SP::Target: SignerProvider, F::Target: FeeEstimator, R::Target: Router, diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index c68a2c7cf9a..e560e70c8a6 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -735,7 +735,7 @@ pub trait NodeHolder { ::M, ::Broadcaster, ::EntropySource, - ::NS, + ::NodeSigner, ::SP, ::F, ::R, @@ -752,7 +752,7 @@ impl NodeHolder for &H { ::M, ::Broadcaster, ::EntropySource, - ::NS, + ::NodeSigner, ::SP, ::F, ::R, diff --git a/lightning/src/ln/invoice_utils.rs b/lightning/src/ln/invoice_utils.rs index 5e4036b9d7e..e99f53a8b88 100644 --- a/lightning/src/ln/invoice_utils.rs +++ b/lightning/src/ln/invoice_utils.rs @@ -67,14 +67,13 @@ use core::time::Duration; feature = "std", doc = "This can be used in a `no_std` environment, where [`std::time::SystemTime`] is not available and the current time is supplied by the caller." )] -pub fn create_phantom_invoice( +pub fn create_phantom_invoice( amt_msat: Option, payment_hash: Option, description: String, invoice_expiry_delta_secs: u32, phantom_route_hints: Vec, entropy_source: ES, node_signer: NS, logger: L, network: Currency, min_final_cltv_expiry_delta: Option, duration_since_epoch: Duration, ) -> Result> where - NS::Target: NodeSigner, L::Target: Logger, { let description = Description::new(description).map_err(SignOrCreationError::CreationError)?; @@ -134,14 +133,13 @@ where feature = "std", doc = "This version can be used in a `no_std` environment, where [`std::time::SystemTime`] is not available and the current time is supplied by the caller." )] -pub fn create_phantom_invoice_with_description_hash( +pub fn create_phantom_invoice_with_description_hash( amt_msat: Option, payment_hash: Option, invoice_expiry_delta_secs: u32, description_hash: Sha256, phantom_route_hints: Vec, entropy_source: ES, node_signer: NS, logger: L, network: Currency, min_final_cltv_expiry_delta: Option, duration_since_epoch: Duration, ) -> Result> where - NS::Target: NodeSigner, L::Target: Logger, { _create_phantom_invoice::( @@ -161,14 +159,13 @@ where const MAX_CHANNEL_HINTS: usize = 3; -fn _create_phantom_invoice( +fn _create_phantom_invoice( amt_msat: Option, payment_hash: Option, description: Bolt11InvoiceDescription, invoice_expiry_delta_secs: u32, phantom_route_hints: Vec, entropy_source: ES, node_signer: NS, logger: L, network: Currency, min_final_cltv_expiry_delta: Option, duration_since_epoch: Duration, ) -> Result> where - NS::Target: NodeSigner, L::Target: Logger, { if phantom_route_hints.is_empty() { diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index 2bb2b244ccb..2f7d1c46880 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -50,7 +50,6 @@ use crate::io_extras::read_to_end; use core::fmt; use core::fmt::Debug; use core::fmt::Display; -use core::ops::Deref; #[cfg(feature = "std")] use core::str::FromStr; #[cfg(feature = "std")] @@ -3637,10 +3636,7 @@ impl<'a> Writeable for OutboundTrampolinePayload<'a> { } } -impl ReadableArgs<(Option, NS)> for InboundOnionPayload -where - NS::Target: NodeSigner, -{ +impl ReadableArgs<(Option, NS)> for InboundOnionPayload { fn read(r: &mut R, args: (Option, NS)) -> Result { let (update_add_blinding_point, node_signer) = args; @@ -3824,10 +3820,7 @@ where } } -impl ReadableArgs<(Option, NS)> for InboundTrampolinePayload -where - NS::Target: NodeSigner, -{ +impl ReadableArgs<(Option, NS)> for InboundTrampolinePayload { fn read(r: &mut R, args: (Option, NS)) -> Result { let (update_add_blinding_point, node_signer) = args; let receive_auth_key = node_signer.get_receive_auth_key(); diff --git a/lightning/src/ln/onion_payment.rs b/lightning/src/ln/onion_payment.rs index e7b5f557ffb..ed0de3902e3 100644 --- a/lightning/src/ln/onion_payment.rs +++ b/lightning/src/ln/onion_payment.rs @@ -487,16 +487,15 @@ pub(super) fn create_recv_pending_htlc_info( /// /// [`Event::PaymentClaimable`]: crate::events::Event::PaymentClaimable #[rustfmt::skip] -pub fn peel_payment_onion( +pub fn peel_payment_onion( msg: &msgs::UpdateAddHTLC, node_signer: NS, logger: L, secp_ctx: &Secp256k1, cur_height: u32, allow_skimmed_fees: bool, ) -> Result where - NS::Target: NodeSigner, L::Target: Logger, { let (hop, next_packet_details_opt) = - decode_incoming_update_add_htlc_onion(msg, &*node_signer, &*logger, secp_ctx + decode_incoming_update_add_htlc_onion(msg, &node_signer, &*logger, secp_ctx ).map_err(|(msg, failure_reason)| { let (reason, err_data) = match msg { HTLCFailureMsg::Malformed(_) => (failure_reason, Vec::new()), @@ -551,7 +550,7 @@ where next_hop_hmac, new_packet_bytes, next_packet_details, - &*node_signer, + &node_signer, secp_ctx ); @@ -586,11 +585,10 @@ pub(super) struct NextPacketDetails { } #[rustfmt::skip] -pub(super) fn decode_incoming_update_add_htlc_onion( +pub(super) fn decode_incoming_update_add_htlc_onion( msg: &msgs::UpdateAddHTLC, node_signer: NS, logger: L, secp_ctx: &Secp256k1, ) -> Result<(onion_utils::Hop, Option), (HTLCFailureMsg, LocalHTLCFailureReason)> where - NS::Target: NodeSigner, L::Target: Logger, { let encode_malformed_error = |message: &str, failure_reason: LocalHTLCFailureReason| { diff --git a/lightning/src/ln/onion_utils.rs b/lightning/src/ln/onion_utils.rs index 7cf1062a885..63d92fd4424 100644 --- a/lightning/src/ln/onion_utils.rs +++ b/lightning/src/ln/onion_utils.rs @@ -2322,13 +2322,10 @@ pub(crate) enum OnionDecodeErr { }, } -pub(crate) fn decode_next_payment_hop( +pub(crate) fn decode_next_payment_hop( recipient: Recipient, hop_pubkey: &PublicKey, hop_data: &[u8], hmac_bytes: [u8; 32], payment_hash: PaymentHash, blinding_point: Option, node_signer: NS, -) -> Result -where - NS::Target: NodeSigner, -{ +) -> Result { let blinded_node_id_tweak = blinding_point.map(|bp| { let blinded_tlvs_ss = node_signer.ecdh(recipient, &bp, None).unwrap().secret_bytes(); let mut hmac = HmacEngine::::new(b"blinded_node_id"); @@ -2343,7 +2340,7 @@ where hop_data, hmac_bytes, Some(payment_hash), - (blinding_point, &(*node_signer)), + (blinding_point, &node_signer), ); match decoded_hop { Ok((next_hop_data, Some((next_hop_hmac, FixedSizeOnionPacket(new_packet_bytes))))) => { @@ -2417,7 +2414,7 @@ where &hop_data.trampoline_packet.hop_data, hop_data.trampoline_packet.hmac, Some(payment_hash), - (blinding_point, node_signer), + (blinding_point, &node_signer), ); match decoded_trampoline_hop { Ok(( @@ -2555,14 +2552,11 @@ where /// /// This function performs no validation and does not enqueue or forward the HTLC. /// It only reconstructs the next `UpdateAddHTLC` for further local processing. -pub(super) fn peel_dummy_hop_update_add_htlc( +pub(super) fn peel_dummy_hop_update_add_htlc( msg: &UpdateAddHTLC, dummy_hop_data: InboundOnionDummyPayload, next_hop_hmac: [u8; 32], new_packet_bytes: [u8; ONION_DATA_LEN], next_packet_details: NextPacketDetails, node_signer: NS, secp_ctx: &Secp256k1, -) -> UpdateAddHTLC -where - NS::Target: NodeSigner, -{ +) -> UpdateAddHTLC { let NextPacketDetails { next_packet_pubkey, outgoing_amt_msat, diff --git a/lightning/src/ln/outbound_payment.rs b/lightning/src/ln/outbound_payment.rs index caf31a70599..e2fc21c7442 100644 --- a/lightning/src/ln/outbound_payment.rs +++ b/lightning/src/ln/outbound_payment.rs @@ -866,7 +866,7 @@ impl OutboundPayments { impl OutboundPayments { #[rustfmt::skip] - pub(super) fn send_payment( + pub(super) fn send_payment( &self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId, retry_strategy: Retry, route_params: RouteParameters, router: &R, first_hops: Vec, compute_inflight_htlcs: IH, entropy_source: &ES, @@ -876,7 +876,6 @@ impl OutboundPayments { ) -> Result<(), RetryableSendFailure> where R::Target: Router, - NS::Target: NodeSigner, IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, L::Target: Logger, @@ -887,7 +886,7 @@ impl OutboundPayments { } #[rustfmt::skip] - pub(super) fn send_spontaneous_payment( + pub(super) fn send_spontaneous_payment( &self, payment_preimage: Option, recipient_onion: RecipientOnionFields, payment_id: PaymentId, retry_strategy: Retry, route_params: RouteParameters, router: &R, first_hops: Vec, inflight_htlcs: IH, entropy_source: &ES, @@ -897,7 +896,6 @@ impl OutboundPayments { ) -> Result where R::Target: Router, - NS::Target: NodeSigner, IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, L::Target: Logger, @@ -913,7 +911,7 @@ impl OutboundPayments { } #[rustfmt::skip] - pub(super) fn pay_for_bolt11_invoice( + pub(super) fn pay_for_bolt11_invoice( &self, invoice: &Bolt11Invoice, payment_id: PaymentId, amount_msats: Option, route_params_config: RouteParametersConfig, @@ -926,7 +924,6 @@ impl OutboundPayments { ) -> Result<(), Bolt11PaymentError> where R::Target: Router, - NS::Target: NodeSigner, IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, L::Target: Logger, @@ -961,7 +958,7 @@ impl OutboundPayments { #[rustfmt::skip] pub(super) fn send_payment_for_bolt12_invoice< - R: Deref, ES: EntropySource, NS: Deref, NL: Deref, IH, SP, L: Deref, + R: Deref, ES: EntropySource, NS: NodeSigner, NL: Deref, IH, SP, L: Deref, >( &self, invoice: &Bolt12Invoice, payment_id: PaymentId, router: &R, first_hops: Vec, features: Bolt12InvoiceFeatures, inflight_htlcs: IH, @@ -972,7 +969,6 @@ impl OutboundPayments { ) -> Result<(), Bolt12PaymentError> where R::Target: Router, - NS::Target: NodeSigner, NL::Target: NodeIdLookUp, IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, @@ -1006,7 +1002,7 @@ impl OutboundPayments { #[rustfmt::skip] fn send_payment_for_bolt12_invoice_internal< - R: Deref, ES: EntropySource, NS: Deref, NL: Deref, IH, SP, L: Deref, + R: Deref, ES: EntropySource, NS: NodeSigner, NL: Deref, IH, SP, L: Deref, >( &self, payment_id: PaymentId, payment_hash: PaymentHash, keysend_preimage: Option, invoice_request: Option<&InvoiceRequest>, @@ -1019,7 +1015,6 @@ impl OutboundPayments { ) -> Result<(), Bolt12PaymentError> where R::Target: Router, - NS::Target: NodeSigner, NL::Target: NodeIdLookUp, IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, @@ -1223,7 +1218,7 @@ impl OutboundPayments { pub(super) fn send_payment_for_static_invoice< R: Deref, ES: EntropySource, - NS: Deref, + NS: NodeSigner, NL: Deref, IH, SP, @@ -1237,7 +1232,6 @@ impl OutboundPayments { ) -> Result<(), Bolt12PaymentError> where R::Target: Router, - NS::Target: NodeSigner, NL::Target: NodeIdLookUp, IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, @@ -1308,7 +1302,7 @@ impl OutboundPayments { pub(super) fn check_retry_payments< R: Deref, ES: EntropySource, - NS: Deref, + NS: NodeSigner, SP, IH, FH, @@ -1321,7 +1315,6 @@ impl OutboundPayments { ) -> bool where R::Target: Router, - NS::Target: NodeSigner, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, IH: Fn() -> InFlightHtlcs, FH: Fn() -> Vec, @@ -1424,7 +1417,7 @@ impl OutboundPayments { } #[rustfmt::skip] - fn find_initial_route( + fn find_initial_route( &self, payment_id: PaymentId, payment_hash: PaymentHash, recipient_onion: &RecipientOnionFields, keysend_preimage: Option, invoice_request: Option<&InvoiceRequest>, route_params: &mut RouteParameters, router: &R, first_hops: &Vec, @@ -1432,7 +1425,6 @@ impl OutboundPayments { ) -> Result where R::Target: Router, - NS::Target: NodeSigner, L::Target: Logger, IH: Fn() -> InFlightHtlcs, { @@ -1479,7 +1471,7 @@ impl OutboundPayments { /// [`Event::PaymentPathFailed`]: crate::events::Event::PaymentPathFailed /// [`Event::PaymentFailed`]: crate::events::Event::PaymentFailed #[rustfmt::skip] - fn send_payment_for_non_bolt12_invoice( + fn send_payment_for_non_bolt12_invoice( &self, payment_id: PaymentId, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, keysend_preimage: Option, retry_strategy: Retry, mut route_params: RouteParameters, router: &R, first_hops: Vec, inflight_htlcs: IH, entropy_source: &ES, @@ -1489,7 +1481,6 @@ impl OutboundPayments { ) -> Result<(), RetryableSendFailure> where R::Target: Router, - NS::Target: NodeSigner, L::Target: Logger, IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, @@ -1524,7 +1515,7 @@ impl OutboundPayments { } #[rustfmt::skip] - fn find_route_and_send_payment( + fn find_route_and_send_payment( &self, payment_hash: PaymentHash, payment_id: PaymentId, route_params: RouteParameters, router: &R, first_hops: Vec, inflight_htlcs: &IH, entropy_source: &ES, node_signer: &NS, best_block_height: u32, @@ -1533,7 +1524,6 @@ impl OutboundPayments { ) where R::Target: Router, - NS::Target: NodeSigner, L::Target: Logger, IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, @@ -1685,7 +1675,7 @@ impl OutboundPayments { } #[rustfmt::skip] - fn handle_pay_route_err( + fn handle_pay_route_err( &self, err: PaymentSendFailure, payment_id: PaymentId, payment_hash: PaymentHash, route: Route, mut route_params: RouteParameters, onion_session_privs: Vec<[u8; 32]>, router: &R, first_hops: Vec, inflight_htlcs: &IH, entropy_source: &ES, node_signer: &NS, @@ -1695,7 +1685,6 @@ impl OutboundPayments { ) where R::Target: Router, - NS::Target: NodeSigner, IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, L::Target: Logger, @@ -1806,12 +1795,11 @@ impl OutboundPayments { } #[rustfmt::skip] - pub(super) fn send_probe( + pub(super) fn send_probe( &self, path: Path, probing_cookie_secret: [u8; 32], entropy_source: &ES, node_signer: &NS, best_block_height: u32, send_payment_along_path: F, ) -> Result<(PaymentHash, PaymentId), ProbeSendFailure> where - NS::Target: NodeSigner, F: Fn(SendAlongPathArgs) -> Result<(), APIError>, { let payment_id = PaymentId(entropy_source.get_secure_random_bytes()); @@ -2078,14 +2066,13 @@ impl OutboundPayments { } #[rustfmt::skip] - fn pay_route_internal( + fn pay_route_internal( &self, route: &Route, payment_hash: PaymentHash, recipient_onion: &RecipientOnionFields, keysend_preimage: Option, invoice_request: Option<&InvoiceRequest>, bolt12_invoice: Option<&PaidBolt12Invoice>, payment_id: PaymentId, recv_value_msat: Option, onion_session_privs: &Vec<[u8; 32]>, hold_htlcs_at_next_hop: bool, node_signer: &NS, best_block_height: u32, send_payment_along_path: &F ) -> Result<(), PaymentSendFailure> where - NS::Target: NodeSigner, F: Fn(SendAlongPathArgs) -> Result<(), APIError>, { if route.paths.len() < 1 { @@ -2193,14 +2180,13 @@ impl OutboundPayments { #[cfg(any(test, feature = "_externalize_tests"))] #[rustfmt::skip] - pub(super) fn test_send_payment_internal( + pub(super) fn test_send_payment_internal( &self, route: &Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, keysend_preimage: Option, payment_id: PaymentId, recv_value_msat: Option, onion_session_privs: Vec<[u8; 32]>, node_signer: &NS, best_block_height: u32, send_payment_along_path: F ) -> Result<(), PaymentSendFailure> where - NS::Target: NodeSigner, F: Fn(SendAlongPathArgs) -> Result<(), APIError>, { self.pay_route_internal(route, payment_hash, &recipient_onion, diff --git a/lightning/src/ln/peer_channel_encryptor.rs b/lightning/src/ln/peer_channel_encryptor.rs index 894de045b14..5554c5a8c19 100644 --- a/lightning/src/ln/peer_channel_encryptor.rs +++ b/lightning/src/ln/peer_channel_encryptor.rs @@ -30,8 +30,6 @@ use crate::crypto::chacha20poly1305rfc::ChaCha20Poly1305RFC; use crate::crypto::utils::hkdf_extract_expand_twice; use crate::util::ser::VecWriter; -use core::ops::Deref; - /// Maximum Lightning message data length according to /// [BOLT-8](https://github.com/lightning/bolts/blob/v1.0/08-transport.md#lightning-message-specification) /// and [BOLT-1](https://github.com/lightning/bolts/blob/master/01-messaging.md#lightning-message-format): @@ -52,10 +50,7 @@ const NOISE_H: [u8; 32] = [ 0x4b, 0xb4, 0x20, 0xd8, 0x9d, 0x2a, 0x04, 0x8a, 0x3c, 0x4f, 0x4c, 0x09, 0x2e, 0x37, 0xb6, 0x76, ]; -enum NoiseSecretKey<'a, 'b, NS: Deref> -where - NS::Target: NodeSigner, -{ +enum NoiseSecretKey<'a, 'b, NS: NodeSigner> { InMemory(&'a SecretKey), NodeSigner(&'b NS), } @@ -130,10 +125,7 @@ impl PeerChannelEncryptor { } } - pub fn new_inbound(node_signer: &NS) -> PeerChannelEncryptor - where - NS::Target: NodeSigner, - { + pub fn new_inbound(node_signer: &NS) -> PeerChannelEncryptor { let mut sha = Sha256::engine(); sha.input(&NOISE_H); let our_node_id = node_signer.get_node_id(Recipient::Node).unwrap(); @@ -248,12 +240,9 @@ impl PeerChannelEncryptor { } #[inline] - fn inbound_noise_act<'a, 'b, NS: Deref>( + fn inbound_noise_act<'a, 'b, NS: NodeSigner>( state: &mut BidirectionalNoiseState, act: &[u8], secret_key: NoiseSecretKey<'a, 'b, NS>, - ) -> Result<(PublicKey, [u8; 32]), LightningError> - where - NS::Target: NodeSigner, - { + ) -> Result<(PublicKey, [u8; 32]), LightningError> { assert_eq!(act.len(), 50); if act[0] != 0 { @@ -327,13 +316,10 @@ impl PeerChannelEncryptor { } } - pub fn process_act_one_with_keys( + pub fn process_act_one_with_keys( &mut self, act_one: &[u8], node_signer: &NS, our_ephemeral: SecretKey, secp_ctx: &Secp256k1, - ) -> Result<[u8; 50], LightningError> - where - NS::Target: NodeSigner, - { + ) -> Result<[u8; 50], LightningError> { assert_eq!(act_one.len(), 50); match self.noise_state { @@ -372,12 +358,9 @@ impl PeerChannelEncryptor { } } - pub fn process_act_two( + pub fn process_act_two( &mut self, act_two: &[u8], node_signer: &NS, - ) -> Result<([u8; 66], PublicKey), LightningError> - where - NS::Target: NodeSigner, - { + ) -> Result<([u8; 66], PublicKey), LightningError> { assert_eq!(act_two.len(), 50); let final_hkdf; diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index 4d1dff9cd52..c2bb0af3103 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -981,8 +981,7 @@ pub trait APeerManager { type L: Deref; type CMHT: CustomMessageHandler + ?Sized; type CMH: Deref; - type NST: NodeSigner + ?Sized; - type NS: Deref; + type NodeSigner: NodeSigner; type SMT: SendOnlyMessageHandler + ?Sized; type SM: Deref; /// Gets a reference to the underlying [`PeerManager`]. @@ -995,7 +994,7 @@ pub trait APeerManager { Self::OM, Self::L, Self::CMH, - Self::NS, + Self::NodeSigner, Self::SM, >; } @@ -1007,7 +1006,7 @@ impl< OM: Deref, L: Deref, CMH: Deref, - NS: Deref, + NS: NodeSigner, SM: Deref, > APeerManager for PeerManager where @@ -1016,7 +1015,6 @@ where OM::Target: OnionMessageHandler, L::Target: Logger, CMH::Target: CustomMessageHandler, - NS::Target: NodeSigner, SM::Target: SendOnlyMessageHandler, { type Descriptor = Descriptor; @@ -1030,8 +1028,7 @@ where type L = L; type CMHT = ::Target; type CMH = CMH; - type NST = ::Target; - type NS = NS; + type NodeSigner = NS; type SMT = ::Target; type SM = SM; fn as_ref(&self) -> &PeerManager { @@ -1065,7 +1062,7 @@ pub struct PeerManager< OM: Deref, L: Deref, CMH: Deref, - NS: Deref, + NS: NodeSigner, SM: Deref, > where CM::Target: ChannelMessageHandler, @@ -1073,7 +1070,6 @@ pub struct PeerManager< OM::Target: OnionMessageHandler, L::Target: Logger, CMH::Target: CustomMessageHandler, - NS::Target: NodeSigner, SM::Target: SendOnlyMessageHandler, { message_handler: MessageHandler, @@ -1151,13 +1147,12 @@ fn encode_message(message: wire::Message) -> Vec { buffer.0 } -impl +impl PeerManager where CM::Target: ChannelMessageHandler, OM::Target: OnionMessageHandler, L::Target: Logger, - NS::Target: NodeSigner, SM::Target: SendOnlyMessageHandler, { /// Constructs a new `PeerManager` with the given `ChannelMessageHandler` and @@ -1194,7 +1189,7 @@ where } } -impl +impl PeerManager< Descriptor, ErroringMessageHandler, @@ -1207,7 +1202,6 @@ impl > where RM::Target: RoutingMessageHandler, L::Target: Logger, - NS::Target: NodeSigner, { /// Constructs a new `PeerManager` with the given `RoutingMessageHandler`. No channel message /// handler or onion message handler is used and onion and channel messages will be ignored (or @@ -1298,7 +1292,7 @@ impl< OM: Deref, L: Deref, CMH: Deref, - NS: Deref, + NS: NodeSigner, SM: Deref, > PeerManager where @@ -1307,7 +1301,6 @@ where OM::Target: OnionMessageHandler, L::Target: Logger, CMH::Target: CustomMessageHandler, - NS::Target: NodeSigner, SM::Target: SendOnlyMessageHandler, { /// Constructs a new `PeerManager` with the given message handlers. diff --git a/lightning/src/onion_message/messenger.rs b/lightning/src/onion_message/messenger.rs index d859d35dc09..5f4b703618b 100644 --- a/lightning/src/onion_message/messenger.rs +++ b/lightning/src/onion_message/messenger.rs @@ -68,9 +68,7 @@ pub trait AOnionMessenger { /// A type implementing [`EntropySource`] type EntropySource: EntropySource; /// A type implementing [`NodeSigner`] - type NodeSigner: NodeSigner + ?Sized; - /// A type that may be dereferenced to [`Self::NodeSigner`] - type NS: Deref; + type NodeSigner: NodeSigner; /// A type implementing [`Logger`] type Logger: Logger + ?Sized; /// A type that may be dereferenced to [`Self::Logger`] @@ -104,7 +102,7 @@ pub trait AOnionMessenger { &self, ) -> &OnionMessenger< Self::EntropySource, - Self::NS, + Self::NodeSigner, Self::L, Self::NL, Self::MR, @@ -117,7 +115,7 @@ pub trait AOnionMessenger { impl< ES: EntropySource, - NS: Deref, + NS: NodeSigner, L: Deref, NL: Deref, MR: Deref, @@ -127,7 +125,6 @@ impl< CMH: Deref, > AOnionMessenger for OnionMessenger where - NS::Target: NodeSigner, L::Target: Logger, NL::Target: NodeIdLookUp, MR::Target: MessageRouter, @@ -137,8 +134,7 @@ where CMH::Target: CustomOnionMessageHandler, { type EntropySource = ES; - type NodeSigner = NS::Target; - type NS = NS; + type NodeSigner = NS; type Logger = L::Target; type L = L; type NodeIdLookUp = NL::Target; @@ -281,7 +277,7 @@ where /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice pub struct OnionMessenger< ES: EntropySource, - NS: Deref, + NS: NodeSigner, L: Deref, NL: Deref, MR: Deref, @@ -290,7 +286,6 @@ pub struct OnionMessenger< DRH: Deref, CMH: Deref, > where - NS::Target: NodeSigner, L::Target: Logger, NL::Target: NodeIdLookUp, MR::Target: MessageRouter, @@ -1042,7 +1037,7 @@ pub enum PeeledOnion { /// needed to connect to the first node. pub fn create_onion_message_resolving_destination< ES: EntropySource, - NS: Deref, + NS: NodeSigner, NL: Deref, T: OnionMessageContents, >( @@ -1051,7 +1046,6 @@ pub fn create_onion_message_resolving_destination< mut path: OnionMessagePath, contents: T, reply_path: Option, ) -> Result<(PublicKey, OnionMessage, Vec), SendError> where - NS::Target: NodeSigner, NL::Target: NodeIdLookUp, { path.destination.resolve(network_graph); @@ -1077,13 +1071,12 @@ where /// - unless it can be resolved by [`NodeIdLookUp::next_node_id`]. /// Use [`create_onion_message_resolving_destination`] instead to resolve the introduction node /// first with a [`ReadOnlyNetworkGraph`]. -pub fn create_onion_message( +pub fn create_onion_message( entropy_source: &ES, node_signer: &NS, node_id_lookup: &NL, secp_ctx: &Secp256k1, path: OnionMessagePath, contents: T, reply_path: Option, ) -> Result<(PublicKey, OnionMessage, Vec), SendError> where - NS::Target: NodeSigner, NL::Target: NodeIdLookUp, { let OnionMessagePath { intermediate_nodes, mut destination, first_node_addresses } = path; @@ -1158,12 +1151,11 @@ where /// /// Returns either the next layer of the onion for forwarding or the decrypted content for the /// receiver. -pub fn peel_onion_message( +pub fn peel_onion_message( msg: &OnionMessage, secp_ctx: &Secp256k1, node_signer: NS, logger: L, custom_handler: CMH, ) -> Result::Target as CustomOnionMessageHandler>::CustomMessage>, ()> where - NS::Target: NodeSigner, L::Target: Logger, CMH::Target: CustomOnionMessageHandler, { @@ -1382,7 +1374,7 @@ macro_rules! drop_handled_events_and_abort { impl< ES: EntropySource, - NS: Deref, + NS: NodeSigner, L: Deref, NL: Deref, MR: Deref, @@ -1392,7 +1384,6 @@ impl< CMH: Deref, > OnionMessenger where - NS::Target: NodeSigner, L::Target: Logger, NL::Target: NodeIdLookUp, MR::Target: MessageRouter, @@ -1794,7 +1785,7 @@ where peel_onion_message( msg, &self.secp_ctx, - &*self.node_signer, + &self.node_signer, &*self.logger, &*self.custom_handler, ) @@ -2025,7 +2016,7 @@ fn outbound_buffer_full( impl< ES: EntropySource, - NS: Deref, + NS: NodeSigner, L: Deref, NL: Deref, MR: Deref, @@ -2035,7 +2026,6 @@ impl< CMH: Deref, > EventsProvider for OnionMessenger where - NS::Target: NodeSigner, L::Target: Logger, NL::Target: NodeIdLookUp, MR::Target: MessageRouter, @@ -2145,7 +2135,7 @@ where impl< ES: EntropySource, - NS: Deref, + NS: NodeSigner, L: Deref, NL: Deref, MR: Deref, @@ -2155,7 +2145,6 @@ impl< CMH: Deref, > BaseMessageHandler for OnionMessenger where - NS::Target: NodeSigner, L::Target: Logger, NL::Target: NodeIdLookUp, MR::Target: MessageRouter, @@ -2216,7 +2205,7 @@ where impl< ES: EntropySource, - NS: Deref, + NS: NodeSigner, L: Deref, NL: Deref, MR: Deref, @@ -2226,7 +2215,6 @@ impl< CMH: Deref, > OnionMessageHandler for OnionMessenger where - NS::Target: NodeSigner, L::Target: Logger, NL::Target: NodeIdLookUp, MR::Target: MessageRouter, diff --git a/lightning/src/sign/mod.rs b/lightning/src/sign/mod.rs index 51b00a68037..fea20625f0b 100644 --- a/lightning/src/sign/mod.rs +++ b/lightning/src/sign/mod.rs @@ -885,6 +885,13 @@ impl> EntropySource for E { } /// A trait that can handle cryptographic operations at the scope level of a node. +/// +/// Instantiations of this trait should generally be shared by reference across the lightning +/// node's components, e.g. the [`NodeSigner`]s provided to [`PeerManager`] and [`ChannelManager`], +/// etc. MUST all return the same value for a given input. +/// +/// [`PeerManager`]: crate::ln::peer_handler::PeerManager +/// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager pub trait NodeSigner { /// Get the [`ExpandedKey`] which provides cryptographic material for various Lightning Network operations. /// @@ -998,6 +1005,42 @@ pub trait NodeSigner { fn sign_message(&self, msg: &[u8]) -> Result; } +impl> NodeSigner for N { + fn get_expanded_key(&self) -> ExpandedKey { + self.deref().get_expanded_key() + } + fn get_peer_storage_key(&self) -> PeerStorageKey { + self.deref().get_peer_storage_key() + } + fn get_receive_auth_key(&self) -> ReceiveAuthKey { + self.deref().get_receive_auth_key() + } + fn get_node_id(&self, recipient: Recipient) -> Result { + self.deref().get_node_id(recipient) + } + fn ecdh( + &self, recipient: Recipient, other_key: &PublicKey, tweak: Option<&Scalar>, + ) -> Result { + self.deref().ecdh(recipient, other_key, tweak) + } + fn sign_invoice( + &self, invoice: &RawBolt11Invoice, recipient: Recipient, + ) -> Result { + self.deref().sign_invoice(invoice, recipient) + } + fn sign_bolt12_invoice( + &self, invoice: &UnsignedBolt12Invoice, + ) -> Result { + self.deref().sign_bolt12_invoice(invoice) + } + fn sign_gossip_message(&self, msg: UnsignedGossipMessage) -> Result { + self.deref().sign_gossip_message(msg) + } + fn sign_message(&self, msg: &[u8]) -> Result { + self.deref().sign_message(msg) + } +} + /// A trait that describes a wallet capable of creating a spending [`Transaction`] from a set of /// [`SpendableOutputDescriptor`]s. pub trait OutputSpender { From b6f36703866354183aabcdaa940984eec4b3abac Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Wed, 14 Jan 2026 18:22:30 -0500 Subject: [PATCH 142/242] Drop Deref indirection for FeeEstimator Reduces generics and verbosity across the codebase, should provide equivalent behavior. Co-Authored-By: Claude Opus 4.5 --- lightning-background-processor/src/lib.rs | 9 +- lightning/src/chain/chaininterface.rs | 19 +- lightning/src/chain/chainmonitor.rs | 52 ++--- lightning/src/chain/channelmonitor.rs | 96 ++++----- lightning/src/chain/onchaintx.rs | 38 ++-- lightning/src/chain/package.rs | 24 +-- lightning/src/ln/chan_utils.rs | 7 +- lightning/src/ln/channel.rs | 186 ++++++------------ lightning/src/ln/channel_state.rs | 3 +- lightning/src/ln/channelmanager.rs | 66 +++---- lightning/src/ln/functional_test_utils.rs | 4 +- lightning/src/util/anchor_channel_reserves.rs | 7 +- lightning/src/util/persist.rs | 36 ++-- lightning/src/util/sweep.rs | 103 +++++++--- 14 files changed, 272 insertions(+), 378 deletions(-) diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index 6731dae4b2e..941de6b3cee 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -949,7 +949,7 @@ pub async fn process_events_async< UL: Deref, CF: Deref, T: BroadcasterInterface, - F: Deref, + F: FeeEstimator, G: Deref>, L: Deref, P: Deref, @@ -981,7 +981,6 @@ pub async fn process_events_async< where UL::Target: UtxoLookup, CF::Target: chain::Filter, - F::Target: FeeEstimator, L::Target: Logger, P::Target: Persist<::Signer>, CM::Target: AChannelManager, @@ -1448,7 +1447,7 @@ pub async fn process_events_async_with_kv_store_sync< UL: Deref, CF: Deref, T: BroadcasterInterface, - F: Deref, + F: FeeEstimator, G: Deref>, L: Deref, P: Deref, @@ -1480,7 +1479,6 @@ pub async fn process_events_async_with_kv_store_sync< where UL::Target: UtxoLookup, CF::Target: chain::Filter, - F::Target: FeeEstimator, L::Target: Logger, P::Target: Persist<::Signer>, CM::Target: AChannelManager, @@ -1560,7 +1558,7 @@ impl BackgroundProcessor { UL: 'static + Deref, CF: 'static + Deref, T: 'static + BroadcasterInterface, - F: 'static + Deref + Send, + F: 'static + FeeEstimator + Send, G: 'static + Deref>, L: 'static + Deref + Send, P: 'static + Deref, @@ -1592,7 +1590,6 @@ impl BackgroundProcessor { where UL::Target: 'static + UtxoLookup, CF::Target: 'static + chain::Filter, - F::Target: 'static + FeeEstimator, L::Target: 'static + Logger, P::Target: 'static + Persist<::Signer>, CM::Target: AChannelManager, diff --git a/lightning/src/chain/chaininterface.rs b/lightning/src/chain/chaininterface.rs index d21017c25bb..7e71d960e67 100644 --- a/lightning/src/chain/chaininterface.rs +++ b/lightning/src/chain/chaininterface.rs @@ -187,6 +187,12 @@ pub trait FeeEstimator { fn get_est_sat_per_1000_weight(&self, confirmation_target: ConfirmationTarget) -> u32; } +impl> FeeEstimator for F { + fn get_est_sat_per_1000_weight(&self, confirmation_target: ConfirmationTarget) -> u32 { + self.deref().get_est_sat_per_1000_weight(confirmation_target) + } +} + /// Minimum relay fee as required by bitcoin network mempool policy. pub const INCREMENTAL_RELAY_FEE_SAT_PER_1000_WEIGHT: u64 = 253; /// Minimum feerate that takes a sane approach to bitcoind weight-to-vbytes rounding. @@ -194,19 +200,14 @@ pub const INCREMENTAL_RELAY_FEE_SAT_PER_1000_WEIGHT: u64 = 253; /// pub const FEERATE_FLOOR_SATS_PER_KW: u32 = 253; -/// Wraps a `Deref` to a `FeeEstimator` so that any fee estimations provided by it -/// are bounded below by `FEERATE_FLOOR_SATS_PER_KW` (253 sats/KW). +/// Wraps a [`FeeEstimator`] so that any fee estimations provided by it are bounded below by +/// `FEERATE_FLOOR_SATS_PER_KW` (253 sats/KW). /// /// Note that this does *not* implement [`FeeEstimator`] to make it harder to accidentally mix the /// two. -pub(crate) struct LowerBoundedFeeEstimator(pub F) -where - F::Target: FeeEstimator; +pub(crate) struct LowerBoundedFeeEstimator(pub F); -impl LowerBoundedFeeEstimator -where - F::Target: FeeEstimator, -{ +impl LowerBoundedFeeEstimator { /// Creates a new `LowerBoundedFeeEstimator` which wraps the provided fee_estimator pub fn new(fee_estimator: F) -> Self { LowerBoundedFeeEstimator(fee_estimator) diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index e4a9ca99290..30f1d56ab71 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -262,12 +262,11 @@ pub struct AsyncPersister< ES: EntropySource + MaybeSend + MaybeSync + 'static, SP: Deref + MaybeSend + MaybeSync + 'static, BI: BroadcasterInterface + MaybeSend + MaybeSync + 'static, - FE: Deref + MaybeSend + MaybeSync + 'static, + FE: FeeEstimator + MaybeSend + MaybeSync + 'static, > where K::Target: KVStore + MaybeSync, L::Target: Logger, SP::Target: SignerProvider + Sized, - FE::Target: FeeEstimator, { persister: MonitorUpdatingPersisterAsync, event_notifier: Arc, @@ -280,13 +279,12 @@ impl< ES: EntropySource + MaybeSend + MaybeSync + 'static, SP: Deref + MaybeSend + MaybeSync + 'static, BI: BroadcasterInterface + MaybeSend + MaybeSync + 'static, - FE: Deref + MaybeSend + MaybeSync + 'static, + FE: FeeEstimator + MaybeSend + MaybeSync + 'static, > Deref for AsyncPersister where K::Target: KVStore + MaybeSync, L::Target: Logger, SP::Target: SignerProvider + Sized, - FE::Target: FeeEstimator, { type Target = Self; fn deref(&self) -> &Self { @@ -301,13 +299,12 @@ impl< ES: EntropySource + MaybeSend + MaybeSync + 'static, SP: Deref + MaybeSend + MaybeSync + 'static, BI: BroadcasterInterface + MaybeSend + MaybeSync + 'static, - FE: Deref + MaybeSend + MaybeSync + 'static, + FE: FeeEstimator + MaybeSend + MaybeSync + 'static, > Persist<::EcdsaSigner> for AsyncPersister where K::Target: KVStore + MaybeSync, L::Target: Logger, SP::Target: SignerProvider + Sized, - FE::Target: FeeEstimator, ::EcdsaSigner: MaybeSend + 'static, { fn persist_new_channel( @@ -357,13 +354,12 @@ pub struct ChainMonitor< ChannelSigner: EcdsaChannelSigner, C: Deref, T: BroadcasterInterface, - F: Deref, + F: FeeEstimator, L: Deref, P: Deref, ES: EntropySource, > where C::Target: chain::Filter, - F::Target: FeeEstimator, L::Target: Logger, P::Target: Persist, { @@ -397,7 +393,7 @@ impl< SP: Deref + MaybeSend + MaybeSync + 'static, C: Deref, T: BroadcasterInterface + MaybeSend + MaybeSync + 'static, - F: Deref + MaybeSend + MaybeSync + 'static, + F: FeeEstimator + MaybeSend + MaybeSync + 'static, L: Deref + MaybeSend + MaybeSync + 'static, ES: EntropySource + MaybeSend + MaybeSync + 'static, > @@ -413,7 +409,6 @@ impl< K::Target: KVStore + MaybeSync, SP::Target: SignerProvider + Sized, C::Target: chain::Filter, - F::Target: FeeEstimator, L::Target: Logger, ::EcdsaSigner: MaybeSend + 'static, { @@ -453,14 +448,13 @@ impl< ChannelSigner: EcdsaChannelSigner, C: Deref, T: BroadcasterInterface, - F: Deref, + F: FeeEstimator, L: Deref, P: Deref, ES: EntropySource, > ChainMonitor where C::Target: chain::Filter, - F::Target: FeeEstimator, L::Target: Logger, P::Target: Persist, { @@ -884,7 +878,7 @@ where for (_, monitor_holder) in &*monitors { monitor_holder.monitor.rebroadcast_pending_claims( &self.broadcaster, - &*self.fee_estimator, + &self.fee_estimator, &self.logger, ) } @@ -900,7 +894,7 @@ where if let Some(monitor_holder) = monitors.get(&channel_id) { monitor_holder.monitor.signer_unblocked( &self.broadcaster, - &*self.fee_estimator, + &self.fee_estimator, &self.logger, ) } @@ -908,7 +902,7 @@ where for (_, monitor_holder) in &*monitors { monitor_holder.monitor.signer_unblocked( &self.broadcaster, - &*self.fee_estimator, + &self.fee_estimator, &self.logger, ) } @@ -1098,14 +1092,13 @@ impl< ChannelSigner: EcdsaChannelSigner, C: Deref, T: BroadcasterInterface, - F: Deref, + F: FeeEstimator, L: Deref, P: Deref, ES: EntropySource, > BaseMessageHandler for ChainMonitor where C::Target: chain::Filter, - F::Target: FeeEstimator, L::Target: Logger, P::Target: Persist, { @@ -1135,14 +1128,13 @@ impl< ChannelSigner: EcdsaChannelSigner, C: Deref, T: BroadcasterInterface, - F: Deref, + F: FeeEstimator, L: Deref, P: Deref, ES: EntropySource, > SendOnlyMessageHandler for ChainMonitor where C::Target: chain::Filter, - F::Target: FeeEstimator, L::Target: Logger, P::Target: Persist, { @@ -1152,14 +1144,13 @@ impl< ChannelSigner: EcdsaChannelSigner, C: Deref, T: BroadcasterInterface, - F: Deref, + F: FeeEstimator, L: Deref, P: Deref, ES: EntropySource, > chain::Listen for ChainMonitor where C::Target: chain::Filter, - F::Target: FeeEstimator, L::Target: Logger, P::Target: Persist, { @@ -1176,7 +1167,7 @@ where txdata, height, &self.broadcaster, - &*self.fee_estimator, + &self.fee_estimator, &self.logger, ) }); @@ -1203,7 +1194,7 @@ where monitor_state.monitor.blocks_disconnected( fork_point, &self.broadcaster, - &*self.fee_estimator, + &self.fee_estimator, &self.logger, ); } @@ -1214,14 +1205,13 @@ impl< ChannelSigner: EcdsaChannelSigner, C: Deref, T: BroadcasterInterface, - F: Deref, + F: FeeEstimator, L: Deref, P: Deref, ES: EntropySource, > chain::Confirm for ChainMonitor where C::Target: chain::Filter, - F::Target: FeeEstimator, L::Target: Logger, P::Target: Persist, { @@ -1239,7 +1229,7 @@ where txdata, height, &self.broadcaster, - &*self.fee_estimator, + &self.fee_estimator, &self.logger, ) }); @@ -1254,7 +1244,7 @@ where monitor_state.monitor.transaction_unconfirmed( txid, &self.broadcaster, - &*self.fee_estimator, + &self.fee_estimator, &self.logger, ); } @@ -1275,7 +1265,7 @@ where header, height, &self.broadcaster, - &*self.fee_estimator, + &self.fee_estimator, &self.logger, ) }); @@ -1307,14 +1297,13 @@ impl< ChannelSigner: EcdsaChannelSigner, C: Deref, T: BroadcasterInterface, - F: Deref, + F: FeeEstimator, L: Deref, P: Deref, ES: EntropySource, > chain::Watch for ChainMonitor where C::Target: chain::Filter, - F::Target: FeeEstimator, L::Target: Logger, P::Target: Persist, { @@ -1501,14 +1490,13 @@ impl< ChannelSigner: EcdsaChannelSigner, C: Deref, T: BroadcasterInterface, - F: Deref, + F: FeeEstimator, L: Deref, P: Deref, ES: EntropySource, > events::EventsProvider for ChainMonitor where C::Target: chain::Filter, - F::Target: FeeEstimator, L::Target: Logger, P::Target: Persist, { diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 5c531cdb0ed..aa862ca3e5b 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -2058,7 +2058,7 @@ impl ChannelMonitor { /// /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager #[rustfmt::skip] - pub(crate) fn provide_payment_preimage_unsafe_legacy( + pub(crate) fn provide_payment_preimage_unsafe_legacy( &self, payment_hash: &PaymentHash, payment_preimage: &PaymentPreimage, @@ -2066,7 +2066,6 @@ impl ChannelMonitor { fee_estimator: &LowerBoundedFeeEstimator, logger: &L, ) where - F::Target: FeeEstimator, L::Target: Logger, { let mut inner = self.inner.lock().unwrap(); @@ -2082,11 +2081,10 @@ impl ChannelMonitor { /// itself. /// /// panics if the given update is not the next update by update_id. - pub fn update_monitor( + pub fn update_monitor( &self, updates: &ChannelMonitorUpdate, broadcaster: &B, fee_estimator: &F, logger: &L, ) -> Result<(), ()> where - F::Target: FeeEstimator, L::Target: Logger, { let mut inner = self.inner.lock().unwrap(); @@ -2336,14 +2334,17 @@ impl ChannelMonitor { /// transactions that cannot be confirmed until the funding transaction is visible. /// /// [`Event::BumpTransaction`]: crate::events::Event::BumpTransaction - pub fn broadcast_latest_holder_commitment_txn( + pub fn broadcast_latest_holder_commitment_txn< + B: BroadcasterInterface, + F: FeeEstimator, + L: Deref, + >( &self, broadcaster: &B, fee_estimator: &F, logger: &L, ) where - F::Target: FeeEstimator, L::Target: Logger, { let mut inner = self.inner.lock().unwrap(); - let fee_estimator = LowerBoundedFeeEstimator::new(&**fee_estimator); + let fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator); let logger = WithChannelMonitor::from_impl(logger, &*inner, None); inner.queue_latest_holder_commitment_txn_for_broadcast( @@ -2379,7 +2380,7 @@ impl ChannelMonitor { /// /// [`get_outputs_to_watch`]: #method.get_outputs_to_watch #[rustfmt::skip] - pub fn block_connected( + pub fn block_connected( &self, header: &Header, txdata: &TransactionData, @@ -2389,7 +2390,6 @@ impl ChannelMonitor { logger: &L, ) -> Vec where - F::Target: FeeEstimator, L::Target: Logger, { let mut inner = self.inner.lock().unwrap(); @@ -2400,10 +2400,9 @@ impl ChannelMonitor { /// Determines if the disconnected block contained any transactions of interest and updates /// appropriately. - pub fn blocks_disconnected( + pub fn blocks_disconnected( &self, fork_point: BestBlock, broadcaster: B, fee_estimator: F, logger: &L, ) where - F::Target: FeeEstimator, L::Target: Logger, { let mut inner = self.inner.lock().unwrap(); @@ -2419,7 +2418,7 @@ impl ChannelMonitor { /// /// [`block_connected`]: Self::block_connected #[rustfmt::skip] - pub fn transactions_confirmed( + pub fn transactions_confirmed( &self, header: &Header, txdata: &TransactionData, @@ -2429,7 +2428,6 @@ impl ChannelMonitor { logger: &L, ) -> Vec where - F::Target: FeeEstimator, L::Target: Logger, { let bounded_fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator); @@ -2446,14 +2444,13 @@ impl ChannelMonitor { /// /// [`blocks_disconnected`]: Self::blocks_disconnected #[rustfmt::skip] - pub fn transaction_unconfirmed( + pub fn transaction_unconfirmed( &self, txid: &Txid, broadcaster: B, fee_estimator: F, logger: &L, ) where - F::Target: FeeEstimator, L::Target: Logger, { let bounded_fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator); @@ -2472,7 +2469,7 @@ impl ChannelMonitor { /// /// [`block_connected`]: Self::block_connected #[rustfmt::skip] - pub fn best_block_updated( + pub fn best_block_updated( &self, header: &Header, height: u32, @@ -2481,7 +2478,6 @@ impl ChannelMonitor { logger: &L, ) -> Vec where - F::Target: FeeEstimator, L::Target: Logger, { let bounded_fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator); @@ -2518,11 +2514,10 @@ impl ChannelMonitor { /// invoking this every 30 seconds, or lower if running in an environment with spotty /// connections, like on mobile. #[rustfmt::skip] - pub fn rebroadcast_pending_claims( + pub fn rebroadcast_pending_claims( &self, broadcaster: B, fee_estimator: F, logger: &L, ) where - F::Target: FeeEstimator, L::Target: Logger, { let fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator); @@ -2545,11 +2540,10 @@ impl ChannelMonitor { /// Triggers rebroadcasts of pending claims from a force-closed channel after a transaction /// signature generation failure. #[rustfmt::skip] - pub fn signer_unblocked( + pub fn signer_unblocked( &self, broadcaster: B, fee_estimator: F, logger: &L, ) where - F::Target: FeeEstimator, L::Target: Logger, { let fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator); @@ -3798,13 +3792,11 @@ impl ChannelMonitorImpl { /// /// Note that this is often called multiple times for the same payment and must be idempotent. #[rustfmt::skip] - fn provide_payment_preimage( + fn provide_payment_preimage( &mut self, payment_hash: &PaymentHash, payment_preimage: &PaymentPreimage, payment_info: &Option, broadcaster: &B, - fee_estimator: &LowerBoundedFeeEstimator, logger: &WithContext) - where F::Target: FeeEstimator, - L::Target: Logger, - { + fee_estimator: &LowerBoundedFeeEstimator, logger: &WithContext + ) where L::Target: Logger { self.payment_preimages.entry(payment_hash.clone()) .and_modify(|(_, payment_infos)| { if let Some(payment_info) = payment_info { @@ -3976,12 +3968,11 @@ impl ChannelMonitorImpl { /// See also [`ChannelMonitor::broadcast_latest_holder_commitment_txn`]. /// /// [`ChannelMonitor::broadcast_latest_holder_commitment_txn`]: crate::chain::channelmonitor::ChannelMonitor::broadcast_latest_holder_commitment_txn - pub(crate) fn queue_latest_holder_commitment_txn_for_broadcast( + pub(crate) fn queue_latest_holder_commitment_txn_for_broadcast( &mut self, broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator, logger: &WithContext, require_funding_seen: bool, ) where - F::Target: FeeEstimator, L::Target: Logger, { let reason = ClosureReason::HolderForceClosed { @@ -4178,11 +4169,10 @@ impl ChannelMonitorImpl { } #[rustfmt::skip] - fn update_monitor( + fn update_monitor( &mut self, updates: &ChannelMonitorUpdate, broadcaster: &B, fee_estimator: &F, logger: &WithContext ) -> Result<(), ()> - where F::Target: FeeEstimator, - L::Target: Logger, + where L::Target: Logger, { if self.latest_update_id == LEGACY_CLOSED_CHANNEL_UPDATE_ID && updates.update_id == LEGACY_CLOSED_CHANNEL_UPDATE_ID { log_info!(logger, "Applying pre-0.1 post-force-closed update to monitor {} with {} change(s).", @@ -4224,7 +4214,7 @@ impl ChannelMonitorImpl { } } let mut ret = Ok(()); - let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&**fee_estimator); + let bounded_fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator); for update in updates.updates.iter() { match update { ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo { commitment_tx, htlc_outputs, claimed_htlcs, nondust_htlc_sources } => { @@ -5273,13 +5263,10 @@ impl ChannelMonitorImpl { } #[rustfmt::skip] - fn block_connected( + fn block_connected( &mut self, header: &Header, txdata: &TransactionData, height: u32, broadcaster: B, fee_estimator: F, logger: &WithContext, - ) -> Vec - where F::Target: FeeEstimator, - L::Target: Logger, - { + ) -> Vec where L::Target: Logger, { let block_hash = header.block_hash(); self.best_block = BestBlock::new(block_hash, height); @@ -5288,7 +5275,7 @@ impl ChannelMonitorImpl { } #[rustfmt::skip] - fn best_block_updated( + fn best_block_updated( &mut self, header: &Header, height: u32, @@ -5297,7 +5284,6 @@ impl ChannelMonitorImpl { logger: &WithContext, ) -> Vec where - F::Target: FeeEstimator, L::Target: Logger, { let block_hash = header.block_hash(); @@ -5319,7 +5305,7 @@ impl ChannelMonitorImpl { } #[rustfmt::skip] - fn transactions_confirmed( + fn transactions_confirmed( &mut self, header: &Header, txdata: &TransactionData, @@ -5329,7 +5315,6 @@ impl ChannelMonitorImpl { logger: &WithContext, ) -> Vec where - F::Target: FeeEstimator, L::Target: Logger, { let funding_seen_before = self.funding_seen_onchain; @@ -5603,7 +5588,7 @@ impl ChannelMonitorImpl { /// `conf_height` should be set to the height at which any new transaction(s)/block(s) were /// confirmed at, even if it is not the current best height. #[rustfmt::skip] - fn block_confirmed( + fn block_confirmed( &mut self, conf_height: u32, conf_hash: BlockHash, @@ -5615,7 +5600,6 @@ impl ChannelMonitorImpl { logger: &WithContext, ) -> Vec where - F::Target: FeeEstimator, L::Target: Logger, { log_trace!(logger, "Processing {} matched transactions for block at height {}.", txn_matched.len(), conf_height); @@ -5830,10 +5814,9 @@ impl ChannelMonitorImpl { } #[rustfmt::skip] - fn blocks_disconnected( + fn blocks_disconnected( &mut self, fork_point: BestBlock, broadcaster: B, fee_estimator: F, logger: &WithContext - ) where F::Target: FeeEstimator, - L::Target: Logger, + ) where L::Target: Logger, { let new_height = fork_point.height; log_trace!(logger, "Block(s) disconnected to height {}", new_height); @@ -5878,14 +5861,13 @@ impl ChannelMonitorImpl { } #[rustfmt::skip] - fn transaction_unconfirmed( + fn transaction_unconfirmed( &mut self, txid: &Txid, broadcaster: B, fee_estimator: &LowerBoundedFeeEstimator, logger: &WithContext, ) where - F::Target: FeeEstimator, L::Target: Logger, { let mut removed_height = None; @@ -6338,38 +6320,36 @@ impl ChannelMonitorImpl { } } -impl chain::Listen +impl chain::Listen for (ChannelMonitor, T, F, L) where - F::Target: FeeEstimator, L::Target: Logger, { fn filtered_block_connected(&self, header: &Header, txdata: &TransactionData, height: u32) { - self.0.block_connected(header, txdata, height, &self.1, &*self.2, &self.3); + self.0.block_connected(header, txdata, height, &self.1, &self.2, &self.3); } fn blocks_disconnected(&self, fork_point: BestBlock) { - self.0.blocks_disconnected(fork_point, &self.1, &*self.2, &self.3); + self.0.blocks_disconnected(fork_point, &self.1, &self.2, &self.3); } } -impl chain::Confirm - for (M, T, F, L) +impl + chain::Confirm for (M, T, F, L) where M: Deref>, - F::Target: FeeEstimator, L::Target: Logger, { fn transactions_confirmed(&self, header: &Header, txdata: &TransactionData, height: u32) { - self.0.transactions_confirmed(header, txdata, height, &self.1, &*self.2, &self.3); + self.0.transactions_confirmed(header, txdata, height, &self.1, &self.2, &self.3); } fn transaction_unconfirmed(&self, txid: &Txid) { - self.0.transaction_unconfirmed(txid, &self.1, &*self.2, &self.3); + self.0.transaction_unconfirmed(txid, &self.1, &self.2, &self.3); } fn best_block_updated(&self, header: &Header, height: u32) { - self.0.best_block_updated(header, height, &self.1, &*self.2, &self.3); + self.0.best_block_updated(header, height, &self.1, &self.2, &self.3); } fn get_relevant_txids(&self) -> Vec<(Txid, u32, Option)> { diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index 321b6008683..cfee63beefd 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -45,7 +45,6 @@ use alloc::collections::BTreeMap; use core::cmp; use core::mem::replace; use core::mem::swap; -use core::ops::Deref; const MAX_ALLOC_SIZE: usize = 64 * 1024; @@ -485,14 +484,11 @@ impl OnchainTxHandler { /// invoking this every 30 seconds, or lower if running in an environment with spotty /// connections, like on mobile. #[rustfmt::skip] - pub(super) fn rebroadcast_pending_claims( + pub(super) fn rebroadcast_pending_claims( &mut self, current_height: u32, feerate_strategy: FeerateStrategy, broadcaster: &B, conf_target: ConfirmationTarget, destination_script: &Script, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, - ) - where - F::Target: FeeEstimator, - { + ) { let mut bump_requests = Vec::with_capacity(self.pending_claim_requests.len()); for (claim_id, request) in self.pending_claim_requests.iter() { let inputs = request.outpoints(); @@ -553,13 +549,11 @@ impl OnchainTxHandler { /// Panics if there are signing errors, because signing operations in reaction to on-chain /// events are not expected to fail, and if they do, we may lose funds. #[rustfmt::skip] - fn generate_claim( + fn generate_claim( &mut self, cur_height: u32, cached_request: &PackageTemplate, feerate_strategy: &FeerateStrategy, conf_target: ConfirmationTarget, destination_script: &Script, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, - ) -> Option<(u32, u64, OnchainClaim)> - where F::Target: FeeEstimator, - { + ) -> Option<(u32, u64, OnchainClaim)> { let request_outpoints = cached_request.outpoints(); if request_outpoints.is_empty() { // Don't prune pending claiming request yet, we may have to resurrect HTLCs. Untractable @@ -760,11 +754,11 @@ impl OnchainTxHandler { /// does not need to equal the current blockchain tip height, which should be provided via /// `cur_height`, however it must never be higher than `cur_height`. #[rustfmt::skip] - pub(super) fn update_claims_view_from_requests( + pub(super) fn update_claims_view_from_requests( &mut self, mut requests: Vec, conf_height: u32, cur_height: u32, broadcaster: &B, conf_target: ConfirmationTarget, destination_script: &Script, - fee_estimator: &LowerBoundedFeeEstimator, logger: &L - ) where F::Target: FeeEstimator, { + fee_estimator: &LowerBoundedFeeEstimator, logger: &L, + ) { if !requests.is_empty() { log_debug!(logger, "Updating claims view at height {} with {} claim requests", cur_height, requests.len()); } @@ -908,13 +902,11 @@ impl OnchainTxHandler { /// confirmed. This does not need to equal the current blockchain tip height, which should be /// provided via `cur_height`, however it must never be higher than `cur_height`. #[rustfmt::skip] - pub(super) fn update_claims_view_from_matched_txn( + pub(super) fn update_claims_view_from_matched_txn( &mut self, txn_matched: &[&Transaction], conf_height: u32, conf_hash: BlockHash, cur_height: u32, broadcaster: &B, conf_target: ConfirmationTarget, - destination_script: &Script, fee_estimator: &LowerBoundedFeeEstimator, logger: &L - ) where - F::Target: FeeEstimator, - { + destination_script: &Script, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, + ) { let mut have_logged_intro = false; let mut maybe_log_intro = || { if !have_logged_intro { @@ -1105,7 +1097,7 @@ impl OnchainTxHandler { } #[rustfmt::skip] - pub(super) fn transaction_unconfirmed( + pub(super) fn transaction_unconfirmed( &mut self, txid: &Txid, broadcaster: &B, @@ -1113,9 +1105,7 @@ impl OnchainTxHandler { destination_script: &Script, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, - ) where - F::Target: FeeEstimator, - { + ) { let mut height = None; for entry in self.onchain_events_awaiting_threshold_conf.iter() { if entry.txid == *txid { @@ -1132,10 +1122,10 @@ impl OnchainTxHandler { } #[rustfmt::skip] - pub(super) fn blocks_disconnected( + pub(super) fn blocks_disconnected( &mut self, new_best_height: u32, broadcaster: &B, conf_target: ConfirmationTarget, destination_script: &Script, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, - ) where F::Target: FeeEstimator, { + ) { let mut bump_candidates = new_hash_map(); let onchain_events_awaiting_threshold_conf = self.onchain_events_awaiting_threshold_conf.drain(..).collect::>(); diff --git a/lightning/src/chain/package.rs b/lightning/src/chain/package.rs index db46f3be60d..0abe3534341 100644 --- a/lightning/src/chain/package.rs +++ b/lightning/src/chain/package.rs @@ -46,7 +46,6 @@ use crate::util::ser::{Readable, ReadableArgs, RequiredWrapper, Writeable, Write use crate::io; use core::cmp; -use core::ops::Deref; #[allow(unused_imports)] use crate::prelude::*; @@ -1512,12 +1511,10 @@ impl PackageTemplate { /// which was used to generate the value. Will not return less than `dust_limit_sats` for the /// value. #[rustfmt::skip] - pub(crate) fn compute_package_output( + pub(crate) fn compute_package_output( &self, predicted_weight: u64, dust_limit_sats: u64, feerate_strategy: &FeerateStrategy, conf_target: ConfirmationTarget, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, - ) -> Option<(u64, u64)> - where F::Target: FeeEstimator, - { + ) -> Option<(u64, u64)> { debug_assert!(matches!(self.malleability, PackageMalleability::Malleable(..)), "The package output is fixed for non-malleable packages"); let input_amounts = self.package_amount(); @@ -1540,10 +1537,10 @@ impl PackageTemplate { /// Computes a feerate based on the given confirmation target and feerate strategy. #[rustfmt::skip] - pub(crate) fn compute_package_feerate( + pub(crate) fn compute_package_feerate( &self, fee_estimator: &LowerBoundedFeeEstimator, conf_target: ConfirmationTarget, feerate_strategy: &FeerateStrategy, - ) -> u32 where F::Target: FeeEstimator { + ) -> u32 { let feerate_estimate = fee_estimator.bounded_sat_per_1000_weight(conf_target); if self.feerate_previous != 0 { let previous_feerate = self.feerate_previous.try_into().unwrap_or(u32::max_value()); @@ -1675,11 +1672,9 @@ impl Readable for PackageTemplate { /// fee and the corresponding updated feerate. If fee is under [`FEERATE_FLOOR_SATS_PER_KW`], /// we return nothing. #[rustfmt::skip] -fn compute_fee_from_spent_amounts( +fn compute_fee_from_spent_amounts( input_amounts: u64, predicted_weight: u64, conf_target: ConfirmationTarget, fee_estimator: &LowerBoundedFeeEstimator, logger: &L -) -> Option<(u64, u64)> - where F::Target: FeeEstimator, -{ +) -> Option<(u64, u64)> { let sweep_feerate = fee_estimator.bounded_sat_per_1000_weight(conf_target); let fee_rate = cmp::min(sweep_feerate, compute_feerate_sat_per_1000_weight(input_amounts / 2, predicted_weight)); let fee = fee_rate as u64 * (predicted_weight) / 1000; @@ -1701,14 +1696,11 @@ fn compute_fee_from_spent_amounts( /// respect BIP125 rules 3) and 4) and if required adjust the new fee to meet the RBF policy /// requirement. #[rustfmt::skip] -fn feerate_bump( +fn feerate_bump( predicted_weight: u64, input_amounts: u64, dust_limit_sats: u64, previous_feerate: u64, feerate_strategy: &FeerateStrategy, conf_target: ConfirmationTarget, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, -) -> Option<(u64, u64)> -where - F::Target: FeeEstimator, -{ +) -> Option<(u64, u64)> { let previous_fee = previous_feerate * predicted_weight / 1000; // If old feerate inferior to actual one given back by Fee Estimator, use it to compute new fee... diff --git a/lightning/src/ln/chan_utils.rs b/lightning/src/ln/chan_utils.rs index 46afa05b2f2..4bb8ffac9ef 100644 --- a/lightning/src/ln/chan_utils.rs +++ b/lightning/src/ln/chan_utils.rs @@ -320,12 +320,9 @@ pub(crate) fn htlc_tx_fees_sat(feerate_per_kw: u32, num_accepted_htlcs: usize, n /// Returns a fee estimate for the commitment transaction that we would ideally like to set, /// depending on channel type. -pub(super) fn selected_commitment_sat_per_1000_weight( +pub(super) fn selected_commitment_sat_per_1000_weight( fee_estimator: &LowerBoundedFeeEstimator, channel_type: &ChannelTypeFeatures, -) -> u32 -where - F::Target: FeeEstimator, -{ +) -> u32 { if channel_type.supports_anchor_zero_fee_commitments() { 0 } else if channel_type.supports_anchors_zero_fee_htlc_tx() { diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 31c39968b33..56317e774c0 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1757,12 +1757,11 @@ where } #[rustfmt::skip] - pub fn maybe_handle_error_without_close( + pub fn maybe_handle_error_without_close( &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, user_config: &UserConfig, their_features: &InitFeatures, ) -> Result, ()> where - F::Target: FeeEstimator, L::Target: Logger, { match &mut self.phase { @@ -1902,11 +1901,10 @@ where } } - pub fn tx_complete( + pub fn tx_complete( &mut self, msg: &msgs::TxComplete, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, ) -> Result)> where - F::Target: FeeEstimator, L::Target: Logger, { let tx_complete_action = match self.interactive_tx_constructor_mut() { @@ -2144,12 +2142,11 @@ where Ok(()) } - pub fn funding_transaction_signed( + pub fn funding_transaction_signed( &mut self, funding_txid_signed: Txid, witnesses: Vec, best_block_height: u32, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, ) -> Result where - F::Target: FeeEstimator, L::Target: Logger, { let (context, funding, pending_splice) = match &mut self.phase { @@ -2322,11 +2319,10 @@ where } #[rustfmt::skip] - pub fn commitment_signed( + pub fn commitment_signed( &mut self, msg: &msgs::CommitmentSigned, best_block: BestBlock, signer_provider: &SP, fee_estimator: &LowerBoundedFeeEstimator, logger: &L ) -> Result<(Option::EcdsaSigner>>, Option), ChannelError> where - F::Target: FeeEstimator, L::Target: Logger { let phase = core::mem::replace(&mut self.phase, ChannelPhase::Undefined); @@ -2424,12 +2420,9 @@ where /// Doesn't bother handling the /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC /// corner case properly. - pub fn get_available_balances( + pub fn get_available_balances( &self, fee_estimator: &LowerBoundedFeeEstimator, - ) -> AvailableBalances - where - F::Target: FeeEstimator, - { + ) -> AvailableBalances { match &self.phase { ChannelPhase::Undefined => unreachable!(), ChannelPhase::Funded(chan) => chan.get_available_balances(fee_estimator), @@ -3567,7 +3560,7 @@ where SP::Target: SignerProvider, { #[rustfmt::skip] - fn new_for_inbound_channel<'a, ES: EntropySource, F: Deref, L: Deref>( + fn new_for_inbound_channel<'a, ES: EntropySource, F: FeeEstimator, L: Deref>( fee_estimator: &'a LowerBoundedFeeEstimator, entropy_source: &'a ES, signer_provider: &'a SP, @@ -3587,7 +3580,6 @@ where open_channel_fields: msgs::CommonOpenChannelFields, ) -> Result<(FundingScope, ChannelContext), ChannelError> where - F::Target: FeeEstimator, L::Target: Logger, SP::Target: SignerProvider, { @@ -3911,7 +3903,7 @@ where } #[rustfmt::skip] - fn new_for_outbound_channel<'a, ES: EntropySource, F: Deref, L: Deref>( + fn new_for_outbound_channel<'a, ES: EntropySource, F: FeeEstimator, L: Deref>( fee_estimator: &'a LowerBoundedFeeEstimator, entropy_source: &'a ES, signer_provider: &'a SP, @@ -3930,7 +3922,6 @@ where _logger: L, ) -> Result<(FundingScope, ChannelContext), APIError> where - F::Target: FeeEstimator, SP::Target: SignerProvider, L::Target: Logger, { @@ -4541,12 +4532,9 @@ where /// Returns a maximum "sane" fee rate used to reason about our dust exposure. /// Will be Some if the `channel_type`'s dust exposure depends on its commitment fee rate, and /// None otherwise. - fn get_dust_exposure_limiting_feerate( + fn get_dust_exposure_limiting_feerate( &self, fee_estimator: &LowerBoundedFeeEstimator, channel_type: &ChannelTypeFeatures, - ) -> Option - where - F::Target: FeeEstimator, - { + ) -> Option { if channel_type.supports_anchor_zero_fee_commitments() { None } else { @@ -4943,13 +4931,10 @@ where Ok(ret) } - fn validate_update_add_htlc( + fn validate_update_add_htlc( &self, funding: &FundingScope, msg: &msgs::UpdateAddHTLC, fee_estimator: &LowerBoundedFeeEstimator, - ) -> Result<(), ChannelError> - where - F::Target: FeeEstimator, - { + ) -> Result<(), ChannelError> { if msg.amount_msat > funding.get_value_satoshis() * 1000 { return Err(ChannelError::close( "Remote side tried to send more than the total value of the channel".to_owned(), @@ -5061,13 +5046,10 @@ where Ok(()) } - fn validate_update_fee( + fn validate_update_fee( &self, funding: &FundingScope, fee_estimator: &LowerBoundedFeeEstimator, new_feerate_per_kw: u32, - ) -> Result<(), ChannelError> - where - F::Target: FeeEstimator, - { + ) -> Result<(), ChannelError> { // Check that we won't be pushed over our dust exposure limit by the feerate increase. let dust_exposure_limiting_feerate = self.get_dust_exposure_limiting_feerate(&fee_estimator, funding.get_channel_type()); @@ -5139,7 +5121,7 @@ where Ok(()) } - fn validate_commitment_signed( + fn validate_commitment_signed( &self, funding: &FundingScope, transaction_number: u64, commitment_point: PublicKey, msg: &msgs::CommitmentSigned, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, ) -> Result< @@ -5147,7 +5129,6 @@ where ChannelError, > where - F::Target: FeeEstimator, L::Target: Logger, { let funding_script = funding.get_funding_redeemscript(); @@ -5271,12 +5252,11 @@ where Ok((holder_commitment_tx, commitment_data.htlcs_included)) } - fn can_send_update_fee( + fn can_send_update_fee( &self, funding: &FundingScope, feerate_per_kw: u32, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, ) -> bool where - F::Target: FeeEstimator, L::Target: Logger, { // Before proposing a feerate update, check that we can actually afford the new fee. @@ -5858,12 +5838,9 @@ where } #[rustfmt::skip] - fn get_available_balances_for_scope( + fn get_available_balances_for_scope( &self, funding: &FundingScope, fee_estimator: &LowerBoundedFeeEstimator, - ) -> AvailableBalances - where - F::Target: FeeEstimator, - { + ) -> AvailableBalances { let context = &self; // Note that we have to handle overflow due to the case mentioned in the docs in general // here. @@ -6382,13 +6359,10 @@ where /// of the channel type we tried, not of our ability to open any channel at all. We can see if a /// downgrade of channel features would be possible so that we can still open the channel. #[rustfmt::skip] - pub(crate) fn maybe_downgrade_channel_features( + pub(crate) fn maybe_downgrade_channel_features( &mut self, funding: &mut FundingScope, fee_estimator: &LowerBoundedFeeEstimator, user_config: &UserConfig, their_features: &InitFeatures, - ) -> Result<(), ()> - where - F::Target: FeeEstimator - { + ) -> Result<(), ()> { if !funding.is_outbound() || !matches!( self.channel_state, ChannelState::NegotiatingFunding(flags) @@ -7332,11 +7306,10 @@ where } #[rustfmt::skip] - fn check_remote_fee( + fn check_remote_fee( channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator, feerate_per_kw: u32, cur_feerate_per_kw: Option, logger: &L - ) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger, - { + ) -> Result<(), ChannelError> where L::Target: Logger { if channel_type.supports_anchor_zero_fee_commitments() { if feerate_per_kw != 0 { let err = "Zero Fee Channels must never attempt to use a fee".to_owned(); @@ -7942,9 +7915,9 @@ where } #[rustfmt::skip] - pub fn update_add_htlc( + pub fn update_add_htlc( &mut self, msg: &msgs::UpdateAddHTLC, fee_estimator: &LowerBoundedFeeEstimator, - ) -> Result<(), ChannelError> where F::Target: FeeEstimator { + ) -> Result<(), ChannelError> { if self.context.channel_state.is_remote_stfu_sent() || self.context.channel_state.is_quiescent() { return Err(ChannelError::WarnAndDisconnect("Got add HTLC message while quiescent".to_owned())); } @@ -8166,12 +8139,11 @@ where /// Note that our `commitment_signed` send did not include a monitor update. This is due to: /// 1. Updates cannot be made since the state machine is paused until `tx_signatures`. /// 2. We're still able to abort negotiation until `tx_signatures`. - fn splice_initial_commitment_signed( + fn splice_initial_commitment_signed( &mut self, msg: &msgs::CommitmentSigned, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, ) -> Result, ChannelError> where - F::Target: FeeEstimator, L::Target: Logger, { debug_assert!(self @@ -8284,12 +8256,11 @@ where (nondust_htlc_sources, dust_htlcs) } - pub fn commitment_signed( + pub fn commitment_signed( &mut self, msg: &msgs::CommitmentSigned, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, ) -> Result, ChannelError> where - F::Target: FeeEstimator, L::Target: Logger, { self.commitment_signed_check_state()?; @@ -8328,12 +8299,11 @@ where self.commitment_signed_update_monitor(update, logger) } - pub fn commitment_signed_batch( + pub fn commitment_signed_batch( &mut self, batch: Vec, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, ) -> Result, ChannelError> where - F::Target: FeeEstimator, L::Target: Logger, { self.commitment_signed_check_state()?; @@ -8582,11 +8552,10 @@ where /// Public version of the below, checking relevant preconditions first. /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and /// returns `(None, Vec::new())`. - pub fn maybe_free_holding_cell_htlcs( + pub fn maybe_free_holding_cell_htlcs( &mut self, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, ) -> (Option, Vec<(HTLCSource, PaymentHash)>) where - F::Target: FeeEstimator, L::Target: Logger, { if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) @@ -8600,11 +8569,10 @@ where /// Frees any pending commitment updates in the holding cell, generating the relevant messages /// for our counterparty. - fn free_holding_cell_htlcs( + fn free_holding_cell_htlcs( &mut self, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, ) -> (Option, Vec<(HTLCSource, PaymentHash)>) where - F::Target: FeeEstimator, L::Target: Logger, { assert!(matches!(self.context.channel_state, ChannelState::ChannelReady(_))); @@ -8809,7 +8777,7 @@ where /// /// [`HeldHtlcAvailable`]: crate::onion_message::async_payments::HeldHtlcAvailable /// [`ReleaseHeldHtlc`]: crate::onion_message::async_payments::ReleaseHeldHtlc - pub fn revoke_and_ack( + pub fn revoke_and_ack( &mut self, msg: &msgs::RevokeAndACK, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, hold_mon_update: bool, ) -> Result< @@ -8821,7 +8789,6 @@ where ChannelError, > where - F::Target: FeeEstimator, L::Target: Logger, { if self.context.channel_state.is_quiescent() { @@ -9369,10 +9336,9 @@ where /// Queues up an outbound update fee by placing it in the holding cell. You should call /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the /// commitment update. - pub fn queue_update_fee( + pub fn queue_update_fee( &mut self, feerate_per_kw: u32, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, ) where - F::Target: FeeEstimator, L::Target: Logger, { let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger); @@ -9387,12 +9353,10 @@ where /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this /// [`FundedChannel`] if `force_holding_cell` is false. #[rustfmt::skip] - fn send_update_fee( + fn send_update_fee( &mut self, feerate_per_kw: u32, mut force_holding_cell: bool, fee_estimator: &LowerBoundedFeeEstimator, logger: &L - ) -> Option - where F::Target: FeeEstimator, L::Target: Logger - { + ) -> Option where L::Target: Logger { if !self.funding.is_outbound() { panic!("Cannot send fee from inbound channel"); } @@ -9704,8 +9668,8 @@ where } #[rustfmt::skip] - pub fn update_fee(&mut self, fee_estimator: &LowerBoundedFeeEstimator, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError> - where F::Target: FeeEstimator, L::Target: Logger + pub fn update_fee(&mut self, fee_estimator: &LowerBoundedFeeEstimator, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError> + where L::Target: Logger { if self.funding.is_outbound() { return Err(ChannelError::close("Non-funding remote tried to update channel fee".to_owned())); @@ -10427,12 +10391,9 @@ where /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart, /// at which point they will be recalculated. - fn calculate_closing_fee_limits( + fn calculate_closing_fee_limits( &mut self, fee_estimator: &LowerBoundedFeeEstimator, - ) -> (u64, u64) - where - F::Target: FeeEstimator, - { + ) -> (u64, u64) { if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); } @@ -10519,11 +10480,10 @@ where Ok(()) } - pub fn maybe_propose_closing_signed( + pub fn maybe_propose_closing_signed( &mut self, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, ) -> Result<(Option, Option<(Transaction, ShutdownResult)>), ChannelError> where - F::Target: FeeEstimator, L::Target: Logger, { // If we're waiting on a monitor persistence, that implies we're also waiting to send some @@ -10846,12 +10806,11 @@ where } } - pub fn closing_signed( + pub fn closing_signed( &mut self, fee_estimator: &LowerBoundedFeeEstimator, msg: &msgs::ClosingSigned, logger: &L, ) -> Result<(Option, Option<(Transaction, ShutdownResult)>), ChannelError> where - F::Target: FeeEstimator, L::Target: Logger, { if self.is_shutdown_pending_signature() { @@ -11096,13 +11055,9 @@ where /// When this function is called, the HTLC is already irrevocably committed to the channel; /// this function determines whether to fail the HTLC, or forward / claim it. #[rustfmt::skip] - pub fn can_accept_incoming_htlc( + pub fn can_accept_incoming_htlc( &self, fee_estimator: &LowerBoundedFeeEstimator, logger: L - ) -> Result<(), LocalHTLCFailureReason> - where - F::Target: FeeEstimator, - L::Target: Logger - { + ) -> Result<(), LocalHTLCFailureReason> where L::Target: Logger { if self.context.channel_state.is_local_shutdown_sent() { return Err(LocalHTLCFailureReason::ChannelClosed) } @@ -12780,14 +12735,13 @@ where /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the /// commitment update. - pub fn queue_add_htlc( + pub fn queue_add_htlc( &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option, blinding_point: Option, accountable: bool, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, ) -> Result<(), (LocalHTLCFailureReason, String)> where - F::Target: FeeEstimator, L::Target: Logger, { self.send_htlc( @@ -12829,14 +12783,13 @@ where /// on this [`FundedChannel`] if `force_holding_cell` is false. /// /// `Err`'s will always be temporary channel failures. - fn send_htlc( + fn send_htlc( &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool, skimmed_fee_msat: Option, blinding_point: Option, hold_htlc: bool, accountable: bool, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, ) -> Result where - F::Target: FeeEstimator, L::Target: Logger, { if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) @@ -12946,12 +12899,9 @@ where } #[rustfmt::skip] - pub(super) fn get_available_balances( + pub(super) fn get_available_balances( &self, fee_estimator: &LowerBoundedFeeEstimator, - ) -> AvailableBalances - where - F::Target: FeeEstimator, - { + ) -> AvailableBalances { core::iter::once(&self.funding) .chain(self.pending_funding().iter()) .map(|funding| self.context.get_available_balances_for_scope(funding, fee_estimator)) @@ -13185,14 +13135,13 @@ where /// /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info. - pub fn send_htlc_and_commit( + pub fn send_htlc_and_commit( &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option, hold_htlc: bool, accountable: bool, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, ) -> Result, ChannelError> where - F::Target: FeeEstimator, L::Target: Logger, { let send_res = self.send_htlc( @@ -13690,14 +13639,11 @@ where #[allow(dead_code)] // TODO(dual_funding): Remove once opending V2 channels is enabled. #[rustfmt::skip] - pub fn new( + pub fn new( fee_estimator: &LowerBoundedFeeEstimator, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures, channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32, outbound_scid_alias: u64, temporary_channel_id: Option, logger: L - ) -> Result, APIError> - where F::Target: FeeEstimator, - L::Target: Logger, - { + ) -> Result, APIError> where L::Target: Logger { let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config); if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { // Protocol level safety check in place, although it should never happen because @@ -13829,14 +13775,10 @@ where /// not of our ability to open any channel at all. Thus, on error, we should first call this /// and see if we get a new `OpenChannel` message, otherwise the channel is failed. #[rustfmt::skip] - pub(crate) fn maybe_handle_error_without_close( + pub(crate) fn maybe_handle_error_without_close( &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, user_config: &UserConfig, their_features: &InitFeatures, - ) -> Result - where - F::Target: FeeEstimator, - L::Target: Logger, - { + ) -> Result where L::Target: Logger, { self.context.maybe_downgrade_channel_features( &mut self.funding, fee_estimator, user_config, their_features, )?; @@ -14081,15 +14023,12 @@ where /// Creates a new channel from a remote sides' request for one. /// Assumes chain_hash has already been checked and corresponds with what we expect! #[rustfmt::skip] - pub fn new( + pub fn new( fee_estimator: &LowerBoundedFeeEstimator, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures, their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig, current_chain_height: u32, logger: &L, is_0conf: bool, - ) -> Result, ChannelError> - where F::Target: FeeEstimator, - L::Target: Logger, - { + ) -> Result, ChannelError> where L::Target: Logger { let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.common_fields.temporary_channel_id), None); // First check the channel type is known, failing before we do anything else if we don't @@ -14354,16 +14293,13 @@ where { #[allow(dead_code)] // TODO(dual_funding): Remove once creating V2 channels is enabled. #[rustfmt::skip] - pub fn new_outbound( + pub fn new_outbound( fee_estimator: &LowerBoundedFeeEstimator, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures, funding_satoshis: u64, funding_inputs: Vec, user_id: u128, config: &UserConfig, current_chain_height: u32, outbound_scid_alias: u64, funding_confirmation_target: ConfirmationTarget, logger: L, - ) -> Result - where F::Target: FeeEstimator, - L::Target: Logger, - { + ) -> Result where L::Target: Logger { let channel_keys_id = signer_provider.generate_channel_keys_id(false, user_id); let holder_signer = signer_provider.derive_channel_signer(channel_keys_id); @@ -14426,13 +14362,10 @@ where /// If we receive an error message, it may only be a rejection of the channel type we tried, /// not of our ability to open any channel at all. Thus, on error, we should first call this /// and see if we get a new `OpenChannelV2` message, otherwise the channel is failed. - pub(crate) fn maybe_handle_error_without_close( + pub(crate) fn maybe_handle_error_without_close( &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator, user_config: &UserConfig, their_features: &InitFeatures, - ) -> Result - where - F::Target: FeeEstimator, - { + ) -> Result { self.context.maybe_downgrade_channel_features( &mut self.funding, fee_estimator, @@ -14502,15 +14435,12 @@ where /// TODO(dual_funding): Allow contributions, pass intended amount and inputs #[allow(dead_code)] // TODO(dual_funding): Remove once V2 channels is enabled. #[rustfmt::skip] - pub fn new_inbound( + pub fn new_inbound( fee_estimator: &LowerBoundedFeeEstimator, entropy_source: &ES, signer_provider: &SP, holder_node_id: PublicKey, counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures, their_features: &InitFeatures, msg: &msgs::OpenChannelV2, user_id: u128, config: &UserConfig, current_chain_height: u32, logger: &L, - ) -> Result - where F::Target: FeeEstimator, - L::Target: Logger, - { + ) -> Result where L::Target: Logger, { // TODO(dual_funding): Take these as input once supported let (our_funding_contribution, our_funding_contribution_sats) = (SignedAmount::ZERO, 0u64); let our_funding_inputs = Vec::new(); diff --git a/lightning/src/ln/channel_state.rs b/lightning/src/ln/channel_state.rs index d10327b259a..7c591ff2c3b 100644 --- a/lightning/src/ln/channel_state.rs +++ b/lightning/src/ln/channel_state.rs @@ -524,13 +524,12 @@ impl ChannelDetails { } } - pub(super) fn from_channel( + pub(super) fn from_channel( channel: &Channel, best_block_height: u32, latest_features: InitFeatures, fee_estimator: &LowerBoundedFeeEstimator, ) -> Self where SP::Target: SignerProvider, - F::Target: FeeEstimator, { let context = channel.context(); let funding = channel.funding(); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 99adfb6d7c5..d7c6d865feb 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1801,9 +1801,7 @@ pub trait AChannelManager { /// A type that may be dereferenced to [`Self::SignerProvider`]. type SP: Deref; /// A type implementing [`FeeEstimator`]. - type FeeEstimator: FeeEstimator + ?Sized; - /// A type that may be dereferenced to [`Self::FeeEstimator`]. - type F: Deref; + type FeeEstimator: FeeEstimator; /// A type implementing [`Router`]. type Router: Router + ?Sized; /// A type that may be dereferenced to [`Self::Router`]. @@ -1825,7 +1823,7 @@ pub trait AChannelManager { Self::EntropySource, Self::NodeSigner, Self::SP, - Self::F, + Self::FeeEstimator, Self::R, Self::MR, Self::L, @@ -1838,7 +1836,7 @@ impl< ES: EntropySource, NS: NodeSigner, SP: Deref, - F: Deref, + F: FeeEstimator, R: Deref, MR: Deref, L: Deref, @@ -1846,7 +1844,6 @@ impl< where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - F::Target: FeeEstimator, R::Target: Router, MR::Target: MessageRouter, L::Target: Logger, @@ -1859,8 +1856,7 @@ where type Signer = ::EcdsaSigner; type SignerProvider = SP::Target; type SP = SP; - type FeeEstimator = F::Target; - type F = F; + type FeeEstimator = F; type Router = R::Target; type R = R; type MessageRouter = MR::Target; @@ -2617,14 +2613,13 @@ pub struct ChannelManager< ES: EntropySource, NS: NodeSigner, SP: Deref, - F: Deref, + F: FeeEstimator, R: Deref, MR: Deref, L: Deref, > where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - F::Target: FeeEstimator, R::Target: Router, MR::Target: MessageRouter, L::Target: Logger, @@ -3404,7 +3399,7 @@ impl< ES: EntropySource, NS: NodeSigner, SP: Deref, - F: Deref, + F: FeeEstimator, R: Deref, MR: Deref, L: Deref, @@ -3412,7 +3407,6 @@ impl< where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - F::Target: FeeEstimator, R::Target: Router, MR::Target: MessageRouter, L::Target: Logger, @@ -13542,7 +13536,7 @@ impl< ES: EntropySource, NS: NodeSigner, SP: Deref, - F: Deref, + F: FeeEstimator, R: Deref, MR: Deref, L: Deref, @@ -13550,7 +13544,6 @@ impl< where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - F::Target: FeeEstimator, R::Target: Router, MR::Target: MessageRouter, L::Target: Logger, @@ -14416,7 +14409,7 @@ impl< ES: EntropySource, NS: NodeSigner, SP: Deref, - F: Deref, + F: FeeEstimator, R: Deref, MR: Deref, L: Deref, @@ -14424,7 +14417,6 @@ impl< where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - F::Target: FeeEstimator, R::Target: Router, MR::Target: MessageRouter, L::Target: Logger, @@ -14784,7 +14776,7 @@ impl< ES: EntropySource, NS: NodeSigner, SP: Deref, - F: Deref, + F: FeeEstimator, R: Deref, MR: Deref, L: Deref, @@ -14792,7 +14784,6 @@ impl< where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - F::Target: FeeEstimator, R::Target: Router, MR::Target: MessageRouter, L::Target: Logger, @@ -14816,7 +14807,7 @@ impl< ES: EntropySource, NS: NodeSigner, SP: Deref, - F: Deref, + F: FeeEstimator, R: Deref, MR: Deref, L: Deref, @@ -14824,7 +14815,6 @@ impl< where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - F::Target: FeeEstimator, R::Target: Router, MR::Target: MessageRouter, L::Target: Logger, @@ -14874,7 +14864,7 @@ impl< ES: EntropySource, NS: NodeSigner, SP: Deref, - F: Deref, + F: FeeEstimator, R: Deref, MR: Deref, L: Deref, @@ -14882,7 +14872,6 @@ impl< where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - F::Target: FeeEstimator, R::Target: Router, MR::Target: MessageRouter, L::Target: Logger, @@ -15044,7 +15033,7 @@ impl< ES: EntropySource, NS: NodeSigner, SP: Deref, - F: Deref, + F: FeeEstimator, R: Deref, MR: Deref, L: Deref, @@ -15052,7 +15041,6 @@ impl< where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - F::Target: FeeEstimator, R::Target: Router, MR::Target: MessageRouter, L::Target: Logger, @@ -15403,7 +15391,7 @@ impl< ES: EntropySource, NS: NodeSigner, SP: Deref, - F: Deref, + F: FeeEstimator, R: Deref, MR: Deref, L: Deref, @@ -15411,7 +15399,6 @@ impl< where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - F::Target: FeeEstimator, R::Target: Router, MR::Target: MessageRouter, L::Target: Logger, @@ -15975,7 +15962,7 @@ impl< ES: EntropySource, NS: NodeSigner, SP: Deref, - F: Deref, + F: FeeEstimator, R: Deref, MR: Deref, L: Deref, @@ -15983,7 +15970,6 @@ impl< where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - F::Target: FeeEstimator, R::Target: Router, MR::Target: MessageRouter, L::Target: Logger, @@ -16190,7 +16176,7 @@ impl< ES: EntropySource, NS: NodeSigner, SP: Deref, - F: Deref, + F: FeeEstimator, R: Deref, MR: Deref, L: Deref, @@ -16198,7 +16184,6 @@ impl< where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - F::Target: FeeEstimator, R::Target: Router, MR::Target: MessageRouter, L::Target: Logger, @@ -16432,7 +16417,7 @@ impl< ES: EntropySource, NS: NodeSigner, SP: Deref, - F: Deref, + F: FeeEstimator, R: Deref, MR: Deref, L: Deref, @@ -16440,7 +16425,6 @@ impl< where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - F::Target: FeeEstimator, R::Target: Router, MR::Target: MessageRouter, L::Target: Logger, @@ -16497,7 +16481,7 @@ impl< ES: EntropySource, NS: NodeSigner, SP: Deref, - F: Deref, + F: FeeEstimator, R: Deref, MR: Deref, L: Deref, @@ -16505,7 +16489,6 @@ impl< where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - F::Target: FeeEstimator, R::Target: Router, MR::Target: MessageRouter, L::Target: Logger, @@ -17010,7 +16993,7 @@ impl< ES: EntropySource, NS: NodeSigner, SP: Deref, - F: Deref, + F: FeeEstimator, R: Deref, MR: Deref, L: Deref, @@ -17018,7 +17001,6 @@ impl< where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - F::Target: FeeEstimator, R::Target: Router, MR::Target: MessageRouter, L::Target: Logger, @@ -17374,14 +17356,13 @@ pub struct ChannelManagerReadArgs< ES: EntropySource, NS: NodeSigner, SP: Deref, - F: Deref, + F: FeeEstimator, R: Deref, MR: Deref, L: Deref + Clone, > where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - F::Target: FeeEstimator, R::Target: Router, MR::Target: MessageRouter, L::Target: Logger, @@ -17451,7 +17432,7 @@ impl< ES: EntropySource, NS: NodeSigner, SP: Deref, - F: Deref, + F: FeeEstimator, R: Deref, MR: Deref, L: Deref + Clone, @@ -17459,7 +17440,6 @@ impl< where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - F::Target: FeeEstimator, R::Target: Router, MR::Target: MessageRouter, L::Target: Logger, @@ -17536,7 +17516,7 @@ impl< ES: EntropySource, NS: NodeSigner, SP: Deref, - F: Deref, + F: FeeEstimator, R: Deref, MR: Deref, L: Deref + Clone, @@ -17545,7 +17525,6 @@ impl< where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - F::Target: FeeEstimator, R::Target: Router, MR::Target: MessageRouter, L::Target: Logger, @@ -17566,7 +17545,7 @@ impl< ES: EntropySource, NS: NodeSigner, SP: Deref, - F: Deref, + F: FeeEstimator, R: Deref, MR: Deref, L: Deref + Clone, @@ -17575,7 +17554,6 @@ impl< where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - F::Target: FeeEstimator, R::Target: Router, MR::Target: MessageRouter, L::Target: Logger, diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index e560e70c8a6..46af2b1d552 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -737,7 +737,7 @@ pub trait NodeHolder { ::EntropySource, ::NodeSigner, ::SP, - ::F, + ::FeeEstimator, ::R, ::MR, ::L, @@ -754,7 +754,7 @@ impl NodeHolder for &H { ::EntropySource, ::NodeSigner, ::SP, - ::F, + ::FeeEstimator, ::R, ::MR, ::L, diff --git a/lightning/src/util/anchor_channel_reserves.rs b/lightning/src/util/anchor_channel_reserves.rs index 0e2f53a84b4..92c51975e5c 100644 --- a/lightning/src/util/anchor_channel_reserves.rs +++ b/lightning/src/util/anchor_channel_reserves.rs @@ -274,13 +274,11 @@ pub fn can_support_additional_anchor_channel< ChannelSigner: EcdsaChannelSigner, FilterRef: Deref, B: BroadcasterInterface, - EstimatorRef: Deref, + FE: FeeEstimator, LoggerRef: Deref, PersistRef: Deref, ES: EntropySource, - ChainMonitorRef: Deref< - Target = ChainMonitor, - >, + ChainMonitorRef: Deref>, >( context: &AnchorChannelReserveContext, utxos: &[Utxo], a_channel_manager: AChannelManagerRef, chain_monitor: ChainMonitorRef, @@ -288,7 +286,6 @@ pub fn can_support_additional_anchor_channel< where AChannelManagerRef::Target: AChannelManager, FilterRef::Target: Filter, - EstimatorRef::Target: FeeEstimator, LoggerRef::Target: Logger, PersistRef::Target: Persist, { diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index ecc2d946acd..3a94732fa46 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -593,21 +593,25 @@ pub struct MonitorUpdatingPersister< ES: EntropySource, SP: Deref, BI: BroadcasterInterface, - FE: Deref, + FE: FeeEstimator, >(MonitorUpdatingPersisterAsync, PanicingSpawner, L, ES, SP, BI, FE>) where K::Target: KVStoreSync, L::Target: Logger, - SP::Target: SignerProvider + Sized, - FE::Target: FeeEstimator; + SP::Target: SignerProvider + Sized; -impl - MonitorUpdatingPersister +impl< + K: Deref, + L: Deref, + ES: EntropySource, + SP: Deref, + BI: BroadcasterInterface, + FE: FeeEstimator, + > MonitorUpdatingPersister where K::Target: KVStoreSync, L::Target: Logger, SP::Target: SignerProvider + Sized, - FE::Target: FeeEstimator, { /// Constructs a new [`MonitorUpdatingPersister`]. /// @@ -698,13 +702,12 @@ impl< ES: EntropySource, SP: Deref, BI: BroadcasterInterface, - FE: Deref, + FE: FeeEstimator, > Persist for MonitorUpdatingPersister where K::Target: KVStoreSync, L::Target: Logger, SP::Target: SignerProvider + Sized, - FE::Target: FeeEstimator, { /// Persists a new channel. This means writing the entire monitor to the /// parametrized [`KVStoreSync`]. @@ -782,13 +785,12 @@ pub struct MonitorUpdatingPersisterAsync< ES: EntropySource, SP: Deref, BI: BroadcasterInterface, - FE: Deref, + FE: FeeEstimator, >(Arc>) where K::Target: KVStore, L::Target: Logger, - SP::Target: SignerProvider + Sized, - FE::Target: FeeEstimator; + SP::Target: SignerProvider + Sized; struct MonitorUpdatingPersisterAsyncInner< K: Deref, @@ -797,12 +799,11 @@ struct MonitorUpdatingPersisterAsyncInner< ES: EntropySource, SP: Deref, BI: BroadcasterInterface, - FE: Deref, + FE: FeeEstimator, > where K::Target: KVStore, L::Target: Logger, SP::Target: SignerProvider + Sized, - FE::Target: FeeEstimator, { kv_store: K, async_completed_updates: Mutex>, @@ -822,13 +823,12 @@ impl< ES: EntropySource, SP: Deref, BI: BroadcasterInterface, - FE: Deref, + FE: FeeEstimator, > MonitorUpdatingPersisterAsync where K::Target: KVStore, L::Target: Logger, SP::Target: SignerProvider + Sized, - FE::Target: FeeEstimator, { /// Constructs a new [`MonitorUpdatingPersisterAsync`]. /// @@ -971,13 +971,12 @@ impl< ES: EntropySource + MaybeSend + MaybeSync + 'static, SP: Deref + MaybeSend + MaybeSync + 'static, BI: BroadcasterInterface + MaybeSend + MaybeSync + 'static, - FE: Deref + MaybeSend + MaybeSync + 'static, + FE: FeeEstimator + MaybeSend + MaybeSync + 'static, > MonitorUpdatingPersisterAsync where K::Target: KVStore + MaybeSync, L::Target: Logger, SP::Target: SignerProvider + Sized, - FE::Target: FeeEstimator, ::EcdsaSigner: MaybeSend + 'static, { pub(crate) fn spawn_async_persist_new_channel( @@ -1061,13 +1060,12 @@ impl< ES: EntropySource, SP: Deref, BI: BroadcasterInterface, - FE: Deref, + FE: FeeEstimator, > MonitorUpdatingPersisterAsyncInner where K::Target: KVStore, L::Target: Logger, SP::Target: SignerProvider + Sized, - FE::Target: FeeEstimator, { pub async fn read_channel_monitor_with_updates( &self, monitor_key: &str, diff --git a/lightning/src/util/sweep.rs b/lightning/src/util/sweep.rs index 6b3ce10edb2..a4088331f68 100644 --- a/lightning/src/util/sweep.rs +++ b/lightning/src/util/sweep.rs @@ -340,14 +340,13 @@ impl_writeable_tlv_based_enum!(OutputSpendStatus, pub struct OutputSweeper< B: BroadcasterInterface, D: Deref, - E: Deref, + E: FeeEstimator, F: Deref, K: Deref, L: Deref, O: Deref, > where D::Target: ChangeDestinationSource, - E::Target: FeeEstimator, F::Target: Filter, K::Target: KVStore, L::Target: Logger, @@ -364,11 +363,17 @@ pub struct OutputSweeper< logger: L, } -impl - OutputSweeper +impl< + B: BroadcasterInterface, + D: Deref, + E: FeeEstimator, + F: Deref, + K: Deref, + L: Deref, + O: Deref, + > OutputSweeper where D::Target: ChangeDestinationSource, - E::Target: FeeEstimator, F::Target: Filter, K::Target: KVStore, L::Target: Logger, @@ -715,11 +720,17 @@ where } } -impl Listen - for OutputSweeper +impl< + B: BroadcasterInterface, + D: Deref, + E: FeeEstimator, + F: Deref, + K: Deref, + L: Deref, + O: Deref, + > Listen for OutputSweeper where D::Target: ChangeDestinationSource, - E::Target: FeeEstimator, F::Target: Filter + Sync + Send, K::Target: KVStore, L::Target: Logger, @@ -755,11 +766,17 @@ where } } -impl Confirm - for OutputSweeper +impl< + B: BroadcasterInterface, + D: Deref, + E: FeeEstimator, + F: Deref, + K: Deref, + L: Deref, + O: Deref, + > Confirm for OutputSweeper where D::Target: ChangeDestinationSource, - E::Target: FeeEstimator, F::Target: Filter + Sync + Send, K::Target: KVStore, L::Target: Logger, @@ -851,11 +868,17 @@ pub enum SpendingDelay { }, } -impl - ReadableArgs<(B, E, Option, O, D, K, L)> for (BestBlock, OutputSweeper) +impl< + B: BroadcasterInterface, + D: Deref, + E: FeeEstimator, + F: Deref, + K: Deref, + L: Deref, + O: Deref, + > ReadableArgs<(B, E, Option, O, D, K, L)> for (BestBlock, OutputSweeper) where D::Target: ChangeDestinationSource, - E::Target: FeeEstimator, F::Target: Filter + Sync + Send, K::Target: KVStore, L::Target: Logger, @@ -923,14 +946,13 @@ where pub struct OutputSweeperSync< B: BroadcasterInterface, D: Deref, - E: Deref, + E: FeeEstimator, F: Deref, K: Deref, L: Deref, O: Deref, > where D::Target: ChangeDestinationSourceSync, - E::Target: FeeEstimator, F::Target: Filter, K::Target: KVStoreSync, L::Target: Logger, @@ -940,11 +962,17 @@ pub struct OutputSweeperSync< OutputSweeper, E, F, KVStoreSyncWrapper, L, O>, } -impl - OutputSweeperSync +impl< + B: BroadcasterInterface, + D: Deref, + E: FeeEstimator, + F: Deref, + K: Deref, + L: Deref, + O: Deref, + > OutputSweeperSync where D::Target: ChangeDestinationSourceSync, - E::Target: FeeEstimator, F::Target: Filter, K::Target: KVStoreSync, L::Target: Logger, @@ -1059,11 +1087,17 @@ where } } -impl Listen - for OutputSweeperSync +impl< + B: BroadcasterInterface, + D: Deref, + E: FeeEstimator, + F: Deref, + K: Deref, + L: Deref, + O: Deref, + > Listen for OutputSweeperSync where D::Target: ChangeDestinationSourceSync, - E::Target: FeeEstimator, F::Target: Filter + Sync + Send, K::Target: KVStoreSync, L::Target: Logger, @@ -1080,11 +1114,17 @@ where } } -impl Confirm - for OutputSweeperSync +impl< + B: BroadcasterInterface, + D: Deref, + E: FeeEstimator, + F: Deref, + K: Deref, + L: Deref, + O: Deref, + > Confirm for OutputSweeperSync where D::Target: ChangeDestinationSourceSync, - E::Target: FeeEstimator, F::Target: Filter + Sync + Send, K::Target: KVStoreSync, L::Target: Logger, @@ -1109,11 +1149,18 @@ where } } -impl - ReadableArgs<(B, E, Option, O, D, K, L)> for (BestBlock, OutputSweeperSync) +impl< + B: BroadcasterInterface, + D: Deref, + E: FeeEstimator, + F: Deref, + K: Deref, + L: Deref, + O: Deref, + > ReadableArgs<(B, E, Option, O, D, K, L)> + for (BestBlock, OutputSweeperSync) where D::Target: ChangeDestinationSourceSync, - E::Target: FeeEstimator, F::Target: Filter + Sync + Send, K::Target: KVStoreSync, L::Target: Logger, From 3f8f1ba961e9b9f33c7f78a496c71cc0ef18c374 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Wed, 14 Jan 2026 18:46:38 -0500 Subject: [PATCH 143/242] Drop Deref indirection for Router Reduces generics and verbosity across the codebase, should provide equivalent behavior. Co-Authored-By: Claude Opus 4.5 --- lightning/src/ln/channelmanager.rs | 70 ++++++++--------------- lightning/src/ln/functional_test_utils.rs | 4 +- lightning/src/ln/outbound_payment.rs | 33 ++++------- lightning/src/offers/flow.rs | 59 ++++++------------- lightning/src/routing/router.rs | 39 +++++++++++++ 5 files changed, 94 insertions(+), 111 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index d7c6d865feb..096942d4a43 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1803,9 +1803,7 @@ pub trait AChannelManager { /// A type implementing [`FeeEstimator`]. type FeeEstimator: FeeEstimator; /// A type implementing [`Router`]. - type Router: Router + ?Sized; - /// A type that may be dereferenced to [`Self::Router`]. - type R: Deref; + type Router: Router; /// A type implementing [`MessageRouter`]. type MessageRouter: MessageRouter + ?Sized; /// A type that may be dereferenced to [`Self::MessageRouter`]. @@ -1824,7 +1822,7 @@ pub trait AChannelManager { Self::NodeSigner, Self::SP, Self::FeeEstimator, - Self::R, + Self::Router, Self::MR, Self::L, >; @@ -1837,14 +1835,13 @@ impl< NS: NodeSigner, SP: Deref, F: FeeEstimator, - R: Deref, + R: Router, MR: Deref, L: Deref, > AChannelManager for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - R::Target: Router, MR::Target: MessageRouter, L::Target: Logger, { @@ -1857,8 +1854,7 @@ where type SignerProvider = SP::Target; type SP = SP; type FeeEstimator = F; - type Router = R::Target; - type R = R; + type Router = R; type MessageRouter = MR::Target; type MR = MR; type Logger = L::Target; @@ -2614,13 +2610,12 @@ pub struct ChannelManager< NS: NodeSigner, SP: Deref, F: FeeEstimator, - R: Deref, + R: Router, MR: Deref, L: Deref, > where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - R::Target: Router, MR::Target: MessageRouter, L::Target: Logger, { @@ -3400,14 +3395,13 @@ impl< NS: NodeSigner, SP: Deref, F: FeeEstimator, - R: Deref, + R: Router, MR: Deref, L: Deref, > ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - R::Target: Router, MR::Target: MessageRouter, L::Target: Logger, { @@ -5550,7 +5544,7 @@ where fn check_refresh_async_receive_offer_cache(&self, timer_tick_occurred: bool) { let peers = self.get_peers_for_blinded_path(); let channels = self.list_usable_channels(); - let router = &*self.router; + let router = &self.router; let refresh_res = self.flow.check_refresh_async_receive_offer_cache( peers, channels, @@ -13537,14 +13531,13 @@ impl< NS: NodeSigner, SP: Deref, F: FeeEstimator, - R: Deref, + R: Router, MR: Deref, L: Deref, > ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - R::Target: Router, MR::Target: MessageRouter, L::Target: Logger, { @@ -14410,14 +14403,13 @@ impl< NS: NodeSigner, SP: Deref, F: FeeEstimator, - R: Deref, + R: Router, MR: Deref, L: Deref, > BaseMessageHandler for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - R::Target: Router, MR::Target: MessageRouter, L::Target: Logger, { @@ -14777,14 +14769,13 @@ impl< NS: NodeSigner, SP: Deref, F: FeeEstimator, - R: Deref, + R: Router, MR: Deref, L: Deref, > EventsProvider for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - R::Target: Router, MR::Target: MessageRouter, L::Target: Logger, { @@ -14808,14 +14799,13 @@ impl< NS: NodeSigner, SP: Deref, F: FeeEstimator, - R: Deref, + R: Router, MR: Deref, L: Deref, > chain::Listen for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - R::Target: Router, MR::Target: MessageRouter, L::Target: Logger, { @@ -14865,14 +14855,13 @@ impl< NS: NodeSigner, SP: Deref, F: FeeEstimator, - R: Deref, + R: Router, MR: Deref, L: Deref, > chain::Confirm for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - R::Target: Router, MR::Target: MessageRouter, L::Target: Logger, { @@ -15034,14 +15023,13 @@ impl< NS: NodeSigner, SP: Deref, F: FeeEstimator, - R: Deref, + R: Router, MR: Deref, L: Deref, > ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - R::Target: Router, MR::Target: MessageRouter, L::Target: Logger, { @@ -15392,14 +15380,13 @@ impl< NS: NodeSigner, SP: Deref, F: FeeEstimator, - R: Deref, + R: Router, MR: Deref, L: Deref, > ChannelMessageHandler for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - R::Target: Router, MR::Target: MessageRouter, L::Target: Logger, { @@ -15963,14 +15950,13 @@ impl< NS: NodeSigner, SP: Deref, F: FeeEstimator, - R: Deref, + R: Router, MR: Deref, L: Deref, > OffersMessageHandler for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - R::Target: Router, MR::Target: MessageRouter, L::Target: Logger, { @@ -16177,14 +16163,13 @@ impl< NS: NodeSigner, SP: Deref, F: FeeEstimator, - R: Deref, + R: Router, MR: Deref, L: Deref, > AsyncPaymentsMessageHandler for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - R::Target: Router, MR::Target: MessageRouter, L::Target: Logger, { @@ -16215,7 +16200,7 @@ where self.get_peers_for_blinded_path(), self.list_usable_channels(), &self.entropy_source, - &*self.router, + &self.router, ) { Some((msg, ctx)) => (msg, ctx), None => return None, @@ -16418,14 +16403,13 @@ impl< NS: NodeSigner, SP: Deref, F: FeeEstimator, - R: Deref, + R: Router, MR: Deref, L: Deref, > DNSResolverMessageHandler for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - R::Target: Router, MR::Target: MessageRouter, L::Target: Logger, { @@ -16482,14 +16466,13 @@ impl< NS: NodeSigner, SP: Deref, F: FeeEstimator, - R: Deref, + R: Router, MR: Deref, L: Deref, > NodeIdLookUp for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - R::Target: Router, MR::Target: MessageRouter, L::Target: Logger, { @@ -16994,14 +16977,13 @@ impl< NS: NodeSigner, SP: Deref, F: FeeEstimator, - R: Deref, + R: Router, MR: Deref, L: Deref, > Writeable for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - R::Target: Router, MR::Target: MessageRouter, L::Target: Logger, { @@ -17357,13 +17339,12 @@ pub struct ChannelManagerReadArgs< NS: NodeSigner, SP: Deref, F: FeeEstimator, - R: Deref, + R: Router, MR: Deref, L: Deref + Clone, > where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - R::Target: Router, MR::Target: MessageRouter, L::Target: Logger, { @@ -17433,14 +17414,13 @@ impl< NS: NodeSigner, SP: Deref, F: FeeEstimator, - R: Deref, + R: Router, MR: Deref, L: Deref + Clone, > ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L> where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - R::Target: Router, MR::Target: MessageRouter, L::Target: Logger, { @@ -17517,7 +17497,7 @@ impl< NS: NodeSigner, SP: Deref, F: FeeEstimator, - R: Deref, + R: Router, MR: Deref, L: Deref + Clone, > ReadableArgs> @@ -17525,7 +17505,6 @@ impl< where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - R::Target: Router, MR::Target: MessageRouter, L::Target: Logger, { @@ -17546,7 +17525,7 @@ impl< NS: NodeSigner, SP: Deref, F: FeeEstimator, - R: Deref, + R: Router, MR: Deref, L: Deref + Clone, > ReadableArgs> @@ -17554,7 +17533,6 @@ impl< where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - R::Target: Router, MR::Target: MessageRouter, L::Target: Logger, { diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 46af2b1d552..8b3932a0195 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -738,7 +738,7 @@ pub trait NodeHolder { ::NodeSigner, ::SP, ::FeeEstimator, - ::R, + ::Router, ::MR, ::L, >; @@ -755,7 +755,7 @@ impl NodeHolder for &H { ::NodeSigner, ::SP, ::FeeEstimator, - ::R, + ::Router, ::MR, ::L, > { diff --git a/lightning/src/ln/outbound_payment.rs b/lightning/src/ln/outbound_payment.rs index e2fc21c7442..0bc61031a77 100644 --- a/lightning/src/ln/outbound_payment.rs +++ b/lightning/src/ln/outbound_payment.rs @@ -866,7 +866,7 @@ impl OutboundPayments { impl OutboundPayments { #[rustfmt::skip] - pub(super) fn send_payment( + pub(super) fn send_payment( &self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId, retry_strategy: Retry, route_params: RouteParameters, router: &R, first_hops: Vec, compute_inflight_htlcs: IH, entropy_source: &ES, @@ -875,7 +875,6 @@ impl OutboundPayments { logger: &WithContext, ) -> Result<(), RetryableSendFailure> where - R::Target: Router, IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, L::Target: Logger, @@ -886,7 +885,7 @@ impl OutboundPayments { } #[rustfmt::skip] - pub(super) fn send_spontaneous_payment( + pub(super) fn send_spontaneous_payment( &self, payment_preimage: Option, recipient_onion: RecipientOnionFields, payment_id: PaymentId, retry_strategy: Retry, route_params: RouteParameters, router: &R, first_hops: Vec, inflight_htlcs: IH, entropy_source: &ES, @@ -895,7 +894,6 @@ impl OutboundPayments { logger: &WithContext, ) -> Result where - R::Target: Router, IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, L::Target: Logger, @@ -911,7 +909,7 @@ impl OutboundPayments { } #[rustfmt::skip] - pub(super) fn pay_for_bolt11_invoice( + pub(super) fn pay_for_bolt11_invoice( &self, invoice: &Bolt11Invoice, payment_id: PaymentId, amount_msats: Option, route_params_config: RouteParametersConfig, @@ -923,7 +921,6 @@ impl OutboundPayments { logger: &WithContext, ) -> Result<(), Bolt11PaymentError> where - R::Target: Router, IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, L::Target: Logger, @@ -958,7 +955,7 @@ impl OutboundPayments { #[rustfmt::skip] pub(super) fn send_payment_for_bolt12_invoice< - R: Deref, ES: EntropySource, NS: NodeSigner, NL: Deref, IH, SP, L: Deref, + R: Router, ES: EntropySource, NS: NodeSigner, NL: Deref, IH, SP, L: Deref, >( &self, invoice: &Bolt12Invoice, payment_id: PaymentId, router: &R, first_hops: Vec, features: Bolt12InvoiceFeatures, inflight_htlcs: IH, @@ -968,7 +965,6 @@ impl OutboundPayments { send_payment_along_path: SP, logger: &WithContext, ) -> Result<(), Bolt12PaymentError> where - R::Target: Router, NL::Target: NodeIdLookUp, IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, @@ -1002,7 +998,7 @@ impl OutboundPayments { #[rustfmt::skip] fn send_payment_for_bolt12_invoice_internal< - R: Deref, ES: EntropySource, NS: NodeSigner, NL: Deref, IH, SP, L: Deref, + R: Router, ES: EntropySource, NS: NodeSigner, NL: Deref, IH, SP, L: Deref, >( &self, payment_id: PaymentId, payment_hash: PaymentHash, keysend_preimage: Option, invoice_request: Option<&InvoiceRequest>, @@ -1014,7 +1010,6 @@ impl OutboundPayments { send_payment_along_path: SP, logger: &WithContext, ) -> Result<(), Bolt12PaymentError> where - R::Target: Router, NL::Target: NodeIdLookUp, IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, @@ -1216,7 +1211,7 @@ impl OutboundPayments { } pub(super) fn send_payment_for_static_invoice< - R: Deref, + R: Router, ES: EntropySource, NS: NodeSigner, NL: Deref, @@ -1231,7 +1226,6 @@ impl OutboundPayments { send_payment_along_path: SP, logger: &WithContext, ) -> Result<(), Bolt12PaymentError> where - R::Target: Router, NL::Target: NodeIdLookUp, IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, @@ -1300,7 +1294,7 @@ impl OutboundPayments { // Returns whether the data changed and needs to be repersisted. pub(super) fn check_retry_payments< - R: Deref, + R: Router, ES: EntropySource, NS: NodeSigner, SP, @@ -1314,7 +1308,6 @@ impl OutboundPayments { send_payment_along_path: SP, logger: &WithContext, ) -> bool where - R::Target: Router, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, IH: Fn() -> InFlightHtlcs, FH: Fn() -> Vec, @@ -1417,14 +1410,13 @@ impl OutboundPayments { } #[rustfmt::skip] - fn find_initial_route( + fn find_initial_route( &self, payment_id: PaymentId, payment_hash: PaymentHash, recipient_onion: &RecipientOnionFields, keysend_preimage: Option, invoice_request: Option<&InvoiceRequest>, route_params: &mut RouteParameters, router: &R, first_hops: &Vec, inflight_htlcs: &IH, node_signer: &NS, best_block_height: u32, logger: &WithContext, ) -> Result where - R::Target: Router, L::Target: Logger, IH: Fn() -> InFlightHtlcs, { @@ -1471,7 +1463,7 @@ impl OutboundPayments { /// [`Event::PaymentPathFailed`]: crate::events::Event::PaymentPathFailed /// [`Event::PaymentFailed`]: crate::events::Event::PaymentFailed #[rustfmt::skip] - fn send_payment_for_non_bolt12_invoice( + fn send_payment_for_non_bolt12_invoice( &self, payment_id: PaymentId, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, keysend_preimage: Option, retry_strategy: Retry, mut route_params: RouteParameters, router: &R, first_hops: Vec, inflight_htlcs: IH, entropy_source: &ES, @@ -1480,7 +1472,6 @@ impl OutboundPayments { logger: &WithContext, ) -> Result<(), RetryableSendFailure> where - R::Target: Router, L::Target: Logger, IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, @@ -1515,7 +1506,7 @@ impl OutboundPayments { } #[rustfmt::skip] - fn find_route_and_send_payment( + fn find_route_and_send_payment( &self, payment_hash: PaymentHash, payment_id: PaymentId, route_params: RouteParameters, router: &R, first_hops: Vec, inflight_htlcs: &IH, entropy_source: &ES, node_signer: &NS, best_block_height: u32, @@ -1523,7 +1514,6 @@ impl OutboundPayments { send_payment_along_path: &SP, logger: &WithContext, ) where - R::Target: Router, L::Target: Logger, IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, @@ -1675,7 +1665,7 @@ impl OutboundPayments { } #[rustfmt::skip] - fn handle_pay_route_err( + fn handle_pay_route_err( &self, err: PaymentSendFailure, payment_id: PaymentId, payment_hash: PaymentHash, route: Route, mut route_params: RouteParameters, onion_session_privs: Vec<[u8; 32]>, router: &R, first_hops: Vec, inflight_htlcs: &IH, entropy_source: &ES, node_signer: &NS, @@ -1684,7 +1674,6 @@ impl OutboundPayments { send_payment_along_path: &SP, logger: &WithContext, ) where - R::Target: Router, IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, L::Target: Logger, diff --git a/lightning/src/offers/flow.rs b/lightning/src/offers/flow.rs index 97e92fdaec5..e22f97cf2d3 100644 --- a/lightning/src/offers/flow.rs +++ b/lightning/src/offers/flow.rs @@ -317,14 +317,11 @@ where /// Creates multi-hop blinded payment paths for the given `amount_msats` by delegating to /// [`Router::create_blinded_payment_paths`]. - fn create_blinded_payment_paths( + fn create_blinded_payment_paths( &self, router: &R, usable_channels: Vec, amount_msats: Option, payment_secret: PaymentSecret, payment_context: PaymentContext, relative_expiry_seconds: u32, - ) -> Result, ()> - where - R::Target: Router, - { + ) -> Result, ()> { let secp_ctx = &self.secp_ctx; let receive_auth_key = self.receive_auth_key; @@ -356,14 +353,11 @@ where #[cfg(test)] /// Creates multi-hop blinded payment paths for the given `amount_msats` by delegating to /// [`Router::create_blinded_payment_paths`]. - pub(crate) fn test_create_blinded_payment_paths( + pub(crate) fn test_create_blinded_payment_paths( &self, router: &R, usable_channels: Vec, amount_msats: Option, payment_secret: PaymentSecret, payment_context: PaymentContext, relative_expiry_seconds: u32, - ) -> Result, ()> - where - R::Target: Router, - { + ) -> Result, ()> { self.create_blinded_payment_paths( router, usable_channels, @@ -821,14 +815,11 @@ where /// created via [`Self::create_async_receive_offer_builder`]. /// /// This is not exported to bindings users as builder patterns don't map outside of move semantics. - pub fn create_static_invoice_builder<'a, R: Deref>( + pub fn create_static_invoice_builder<'a, R: Router>( &self, router: &R, offer: &'a Offer, offer_nonce: Nonce, payment_secret: PaymentSecret, relative_expiry_secs: u32, usable_channels: Vec, peers: Vec, - ) -> Result, Bolt12SemanticError> - where - R::Target: Router, - { + ) -> Result, Bolt12SemanticError> { let expanded_key = &self.inbound_payment_key; let secp_ctx = &self.secp_ctx; @@ -892,12 +883,11 @@ where /// blinded path can be constructed. /// /// This is not exported to bindings users as builder patterns don't map outside of move semantics. - pub fn create_invoice_builder_from_refund<'a, ES: EntropySource, R: Deref, F>( + pub fn create_invoice_builder_from_refund<'a, ES: EntropySource, R: Router, F>( &'a self, router: &R, entropy_source: ES, refund: &'a Refund, usable_channels: Vec, get_payment_info: F, ) -> Result, Bolt12SemanticError> where - R::Target: Router, F: Fn(u64, u32) -> Result<(PaymentHash, PaymentSecret), Bolt12SemanticError>, { if refund.chain() != self.chain_hash { @@ -960,12 +950,11 @@ where /// Returns a [`Bolt12SemanticError`] if: /// - Valid blinded payment paths could not be generated for the [`Bolt12Invoice`]. /// - The [`InvoiceBuilder`] could not be created from the [`InvoiceRequest`]. - pub fn create_invoice_builder_from_invoice_request_with_keys<'a, R: Deref, F>( + pub fn create_invoice_builder_from_invoice_request_with_keys<'a, R: Router, F>( &self, router: &R, invoice_request: &'a VerifiedInvoiceRequest, usable_channels: Vec, get_payment_info: F, ) -> Result<(InvoiceBuilder<'a, DerivedSigningPubkey>, MessageContext), Bolt12SemanticError> where - R::Target: Router, F: Fn(u64, u32) -> Result<(PaymentHash, PaymentSecret), Bolt12SemanticError>, { let relative_expiry = DEFAULT_RELATIVE_EXPIRY.as_secs() as u32; @@ -1020,12 +1009,11 @@ where /// Returns a [`Bolt12SemanticError`] if: /// - Valid blinded payment paths could not be generated for the [`Bolt12Invoice`]. /// - The [`InvoiceBuilder`] could not be created from the [`InvoiceRequest`]. - pub fn create_invoice_builder_from_invoice_request_without_keys<'a, R: Deref, F>( + pub fn create_invoice_builder_from_invoice_request_without_keys<'a, R: Router, F>( &self, router: &R, invoice_request: &'a VerifiedInvoiceRequest, usable_channels: Vec, get_payment_info: F, ) -> Result<(InvoiceBuilder<'a, ExplicitSigningPubkey>, MessageContext), Bolt12SemanticError> where - R::Target: Router, F: Fn(u64, u32) -> Result<(PaymentHash, PaymentSecret), Bolt12SemanticError>, { let relative_expiry = DEFAULT_RELATIVE_EXPIRY.as_secs() as u32; @@ -1372,13 +1360,10 @@ where /// the cache can self-regulate the number of messages sent out. /// /// Errors if we failed to create blinded reply paths when sending an [`OfferPathsRequest`] message. - pub fn check_refresh_async_receive_offer_cache( + pub fn check_refresh_async_receive_offer_cache( &self, peers: Vec, usable_channels: Vec, router: R, timer_tick_occurred: bool, - ) -> Result<(), ()> - where - R::Target: Router, - { + ) -> Result<(), ()> { // Terminate early if this node does not intend to receive async payments. { let cache = self.async_receive_offer_cache.lock().unwrap(); @@ -1447,11 +1432,9 @@ where /// Enqueue onion messages that will used to request invoice refresh from the static invoice /// server, based on the offers provided by the cache. - fn check_refresh_static_invoices( + fn check_refresh_static_invoices( &self, peers: Vec, usable_channels: Vec, router: R, - ) where - R::Target: Router, - { + ) { let mut serve_static_invoice_msgs = Vec::new(); { let duration_since_epoch = self.duration_since_epoch(); @@ -1464,7 +1447,7 @@ where offer_nonce, peers.clone(), usable_channels.clone(), - &*router, + &router, ) { Ok((invoice, path)) => (invoice, path), Err(()) => continue, @@ -1572,14 +1555,11 @@ where /// /// Returns `None` if we have enough offers cached already, verification of `message` fails, or we /// fail to create blinded paths. - pub fn handle_offer_paths( + pub fn handle_offer_paths( &self, message: OfferPaths, context: AsyncPaymentsContext, responder: Responder, peers: Vec, usable_channels: Vec, entropy: ES, router: R, - ) -> Option<(ServeStaticInvoice, MessageContext)> - where - R::Target: Router, - { + ) -> Option<(ServeStaticInvoice, MessageContext)> { let duration_since_epoch = self.duration_since_epoch(); let invoice_slot = match context { AsyncPaymentsContext::OfferPaths { invoice_slot, path_absolute_expiry } => { @@ -1662,13 +1642,10 @@ where /// Creates a [`StaticInvoice`] and a blinded path for the server to forward invoice requests from /// payers to our node. - fn create_static_invoice_for_server( + fn create_static_invoice_for_server( &self, offer: &Offer, offer_nonce: Nonce, peers: Vec, usable_channels: Vec, router: R, - ) -> Result<(StaticInvoice, BlindedMessagePath), ()> - where - R::Target: Router, - { + ) -> Result<(StaticInvoice, BlindedMessagePath), ()> { let expanded_key = &self.inbound_payment_key; let duration_since_epoch = self.duration_since_epoch(); let secp_ctx = &self.secp_ctx; diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index 494860f1976..0a235880858 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -292,6 +292,45 @@ pub trait Router { ) -> Result, ()>; } +impl> Router for R { + fn find_route( + &self, payer: &PublicKey, route_params: &RouteParameters, + first_hops: Option<&[&ChannelDetails]>, inflight_htlcs: InFlightHtlcs, + ) -> Result { + self.deref().find_route(payer, route_params, first_hops, inflight_htlcs) + } + + fn find_route_with_id( + &self, payer: &PublicKey, route_params: &RouteParameters, + first_hops: Option<&[&ChannelDetails]>, inflight_htlcs: InFlightHtlcs, + payment_hash: PaymentHash, payment_id: PaymentId, + ) -> Result { + self.deref().find_route_with_id( + payer, + route_params, + first_hops, + inflight_htlcs, + payment_hash, + payment_id, + ) + } + + fn create_blinded_payment_paths( + &self, recipient: PublicKey, local_node_receive_key: ReceiveAuthKey, + first_hops: Vec, tlvs: ReceiveTlvs, amount_msats: Option, + secp_ctx: &Secp256k1, + ) -> Result, ()> { + self.deref().create_blinded_payment_paths( + recipient, + local_node_receive_key, + first_hops, + tlvs, + amount_msats, + secp_ctx, + ) + } +} + /// [`ScoreLookUp`] implementation that factors in in-flight HTLC liquidity. /// /// Useful for custom [`Router`] implementations to wrap their [`ScoreLookUp`] on-the-fly when calling From dcb81f9fb22774478875b477b0c7d0b5dc4003f7 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Wed, 14 Jan 2026 19:17:23 -0500 Subject: [PATCH 144/242] Drop Deref indirection for MessageRouter Reduces generics and verbosity across the codebase, should provide equivalent behavior. Co-Authored-By: Claude Opus 4.5 --- lightning-background-processor/src/lib.rs | 3 +- lightning-dns-resolver/src/lib.rs | 6 -- lightning/src/ln/channelmanager.rs | 80 ++++++++--------------- lightning/src/ln/functional_test_utils.rs | 4 +- lightning/src/offers/flow.rs | 26 +++----- lightning/src/onion_message/messenger.rs | 48 +++++++++----- 6 files changed, 67 insertions(+), 100 deletions(-) diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index 941de6b3cee..3255d26328f 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -426,8 +426,7 @@ pub const NO_ONION_MESSENGER: Option< L = &'static (dyn Logger + Send + Sync), NodeIdLookUp = DynChannelManager, NL = &'static DynChannelManager, - MessageRouter = DynMessageRouter, - MR = &'static DynMessageRouter, + MessageRouter = &'static DynMessageRouter, OffersMessageHandler = lightning::ln::peer_handler::IgnoringMessageHandler, OMH = &'static lightning::ln::peer_handler::IgnoringMessageHandler, AsyncPaymentsMessageHandler = lightning::ln::peer_handler::IgnoringMessageHandler, diff --git a/lightning-dns-resolver/src/lib.rs b/lightning-dns-resolver/src/lib.rs index d9af330328e..62b30bf0864 100644 --- a/lightning-dns-resolver/src/lib.rs +++ b/lightning-dns-resolver/src/lib.rs @@ -243,12 +243,6 @@ mod test { )]) } } - impl Deref for DirectlyConnectedRouter { - type Target = DirectlyConnectedRouter; - fn deref(&self) -> &DirectlyConnectedRouter { - self - } - } struct URIResolver { resolved_uri: Mutex>, diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 096942d4a43..1d3f8ecca6d 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1805,9 +1805,7 @@ pub trait AChannelManager { /// A type implementing [`Router`]. type Router: Router; /// A type implementing [`MessageRouter`]. - type MessageRouter: MessageRouter + ?Sized; - /// A type that may be dereferenced to [`Self::MessageRouter`]. - type MR: Deref; + type MessageRouter: MessageRouter; /// A type implementing [`Logger`]. type Logger: Logger + ?Sized; /// A type that may be dereferenced to [`Self::Logger`]. @@ -1823,7 +1821,7 @@ pub trait AChannelManager { Self::SP, Self::FeeEstimator, Self::Router, - Self::MR, + Self::MessageRouter, Self::L, >; } @@ -1836,13 +1834,12 @@ impl< SP: Deref, F: FeeEstimator, R: Router, - MR: Deref, + MR: MessageRouter, L: Deref, > AChannelManager for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - MR::Target: MessageRouter, L::Target: Logger, { type Watch = M::Target; @@ -1855,8 +1852,7 @@ where type SP = SP; type FeeEstimator = F; type Router = R; - type MessageRouter = MR::Target; - type MR = MR; + type MessageRouter = MR; type Logger = L::Target; type L = L; fn get_cm(&self) -> &ChannelManager { @@ -2611,12 +2607,11 @@ pub struct ChannelManager< SP: Deref, F: FeeEstimator, R: Router, - MR: Deref, + MR: MessageRouter, L: Deref, > where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - MR::Target: MessageRouter, L::Target: Logger, { config: RwLock, @@ -3396,13 +3391,12 @@ impl< SP: Deref, F: FeeEstimator, R: Router, - MR: Deref, + MR: MessageRouter, L: Deref, > ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - MR::Target: MessageRouter, L::Target: Logger, { /// Constructs a new `ChannelManager` to hold several channels and route between them. @@ -13400,13 +13394,10 @@ macro_rules! create_offer_builder { ($self: ident, $builder: ty) => { /// [`BlindedMessagePath`]: crate::blinded_path::message::BlindedMessagePath /// [`Offer`]: crate::offers::offer::Offer /// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest - pub fn create_offer_builder_using_router( + pub fn create_offer_builder_using_router( &$self, router: ME, - ) -> Result<$builder, Bolt12SemanticError> - where - ME::Target: MessageRouter, - { + ) -> Result<$builder, Bolt12SemanticError> { let builder = $self.flow.create_offer_builder_using_router( router, &$self.entropy_source, $self.get_peers_for_blinded_path() )?; @@ -13497,13 +13488,10 @@ macro_rules! create_refund_builder { ($self: ident, $builder: ty) => { /// [`Refund`]: crate::offers::refund::Refund /// [`BlindedMessagePath`]: crate::blinded_path::message::BlindedMessagePath /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice - pub fn create_refund_builder_using_router( + pub fn create_refund_builder_using_router( &$self, router: ME, amount_msats: u64, absolute_expiry: Duration, payment_id: PaymentId, retry_strategy: Retry, route_params_config: RouteParametersConfig - ) -> Result<$builder, Bolt12SemanticError> - where - ME::Target: MessageRouter, - { + ) -> Result<$builder, Bolt12SemanticError> { let entropy = &$self.entropy_source; let builder = $self.flow.create_refund_builder_using_router( @@ -13532,13 +13520,12 @@ impl< SP: Deref, F: FeeEstimator, R: Router, - MR: Deref, + MR: MessageRouter, L: Deref, > ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - MR::Target: MessageRouter, L::Target: Logger, { #[cfg(not(c_bindings))] @@ -14404,13 +14391,12 @@ impl< SP: Deref, F: FeeEstimator, R: Router, - MR: Deref, + MR: MessageRouter, L: Deref, > BaseMessageHandler for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - MR::Target: MessageRouter, L::Target: Logger, { fn provided_node_features(&self) -> NodeFeatures { @@ -14770,13 +14756,12 @@ impl< SP: Deref, F: FeeEstimator, R: Router, - MR: Deref, + MR: MessageRouter, L: Deref, > EventsProvider for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - MR::Target: MessageRouter, L::Target: Logger, { /// Processes events that must be periodically handled. @@ -14800,13 +14785,12 @@ impl< SP: Deref, F: FeeEstimator, R: Router, - MR: Deref, + MR: MessageRouter, L: Deref, > chain::Listen for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - MR::Target: MessageRouter, L::Target: Logger, { fn filtered_block_connected(&self, header: &Header, txdata: &TransactionData, height: u32) { @@ -14856,13 +14840,12 @@ impl< SP: Deref, F: FeeEstimator, R: Router, - MR: Deref, + MR: MessageRouter, L: Deref, > chain::Confirm for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - MR::Target: MessageRouter, L::Target: Logger, { #[rustfmt::skip] @@ -15024,13 +15007,12 @@ impl< SP: Deref, F: FeeEstimator, R: Router, - MR: Deref, + MR: MessageRouter, L: Deref, > ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - MR::Target: MessageRouter, L::Target: Logger, { /// Calls a function which handles an on-chain event (blocks dis/connected, transactions @@ -15381,13 +15363,12 @@ impl< SP: Deref, F: FeeEstimator, R: Router, - MR: Deref, + MR: MessageRouter, L: Deref, > ChannelMessageHandler for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - MR::Target: MessageRouter, L::Target: Logger, { fn handle_open_channel(&self, counterparty_node_id: PublicKey, message: &msgs::OpenChannel) { @@ -15951,13 +15932,12 @@ impl< SP: Deref, F: FeeEstimator, R: Router, - MR: Deref, + MR: MessageRouter, L: Deref, > OffersMessageHandler for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - MR::Target: MessageRouter, L::Target: Logger, { #[rustfmt::skip] @@ -16164,13 +16144,12 @@ impl< SP: Deref, F: FeeEstimator, R: Router, - MR: Deref, + MR: MessageRouter, L: Deref, > AsyncPaymentsMessageHandler for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - MR::Target: MessageRouter, L::Target: Logger, { fn handle_offer_paths_request( @@ -16404,13 +16383,12 @@ impl< SP: Deref, F: FeeEstimator, R: Router, - MR: Deref, + MR: MessageRouter, L: Deref, > DNSResolverMessageHandler for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - MR::Target: MessageRouter, L::Target: Logger, { fn handle_dnssec_query( @@ -16467,13 +16445,12 @@ impl< SP: Deref, F: FeeEstimator, R: Router, - MR: Deref, + MR: MessageRouter, L: Deref, > NodeIdLookUp for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - MR::Target: MessageRouter, L::Target: Logger, { fn next_node_id(&self, short_channel_id: u64) -> Option { @@ -16978,13 +16955,12 @@ impl< SP: Deref, F: FeeEstimator, R: Router, - MR: Deref, + MR: MessageRouter, L: Deref, > Writeable for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - MR::Target: MessageRouter, L::Target: Logger, { #[rustfmt::skip] @@ -17340,12 +17316,11 @@ pub struct ChannelManagerReadArgs< SP: Deref, F: FeeEstimator, R: Router, - MR: Deref, + MR: MessageRouter, L: Deref + Clone, > where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - MR::Target: MessageRouter, L::Target: Logger, { /// A cryptographically secure source of entropy. @@ -17415,13 +17390,12 @@ impl< SP: Deref, F: FeeEstimator, R: Router, - MR: Deref, + MR: MessageRouter, L: Deref + Clone, > ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L> where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - MR::Target: MessageRouter, L::Target: Logger, { /// Simple utility function to create a ChannelManagerReadArgs which creates the monitor @@ -17498,14 +17472,13 @@ impl< SP: Deref, F: FeeEstimator, R: Router, - MR: Deref, + MR: MessageRouter, L: Deref + Clone, > ReadableArgs> for (BlockHash, Arc>) where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - MR::Target: MessageRouter, L::Target: Logger, { fn read( @@ -17526,14 +17499,13 @@ impl< SP: Deref, F: FeeEstimator, R: Router, - MR: Deref, + MR: MessageRouter, L: Deref + Clone, > ReadableArgs> for (BlockHash, ChannelManager) where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - MR::Target: MessageRouter, L::Target: Logger, { fn read( diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 8b3932a0195..c425bda2bf1 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -739,7 +739,7 @@ pub trait NodeHolder { ::SP, ::FeeEstimator, ::Router, - ::MR, + ::MessageRouter, ::L, >; fn chain_monitor(&self) -> Option<&test_utils::TestChainMonitor<'_>>; @@ -756,7 +756,7 @@ impl NodeHolder for &H { ::SP, ::FeeEstimator, ::Router, - ::MR, + ::MessageRouter, ::L, > { (*self).node() diff --git a/lightning/src/offers/flow.rs b/lightning/src/offers/flow.rs index e22f97cf2d3..3ee57c56c8f 100644 --- a/lightning/src/offers/flow.rs +++ b/lightning/src/offers/flow.rs @@ -74,9 +74,8 @@ use { /// /// [`OffersMessageFlow`] is parameterized by a [`MessageRouter`], which is responsible /// for finding message paths when initiating and retrying onion messages. -pub struct OffersMessageFlow +pub struct OffersMessageFlow where - MR::Target: MessageRouter, L::Target: Logger, { chain_hash: ChainHash, @@ -107,9 +106,8 @@ where logger: L, } -impl OffersMessageFlow +impl OffersMessageFlow where - MR::Target: MessageRouter, L::Target: Logger, { /// Creates a new [`OffersMessageFlow`] @@ -266,9 +264,8 @@ const DEFAULT_ASYNC_RECEIVE_OFFER_EXPIRY: Duration = Duration::from_secs(365 * 2 pub(crate) const TEST_DEFAULT_ASYNC_RECEIVE_OFFER_EXPIRY: Duration = DEFAULT_ASYNC_RECEIVE_OFFER_EXPIRY; -impl OffersMessageFlow +impl OffersMessageFlow where - MR::Target: MessageRouter, L::Target: Logger, { /// [`BlindedMessagePath`]s for an async recipient to communicate with this node and interactively @@ -430,9 +427,8 @@ pub enum HeldHtlcReplyPath { }, } -impl OffersMessageFlow +impl OffersMessageFlow where - MR::Target: MessageRouter, L::Target: Logger, { /// Verifies an [`InvoiceRequest`] using the provided [`OffersContext`] or the [`InvoiceRequest::metadata`]. @@ -620,12 +616,9 @@ where /// This is not exported to bindings users as builder patterns don't map outside of move semantics. /// /// See [`Self::create_offer_builder`] for more details on usage. - pub fn create_offer_builder_using_router( + pub fn create_offer_builder_using_router( &self, router: ME, entropy_source: ES, peers: Vec, - ) -> Result, Bolt12SemanticError> - where - ME::Target: MessageRouter, - { + ) -> Result, Bolt12SemanticError> { let receive_key = self.get_receive_auth_key(); self.create_offer_builder_intern(&entropy_source, |node_id, context, secp_ctx| { router @@ -767,13 +760,10 @@ where /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice /// [`Event::PaymentFailed`]: crate::events::Event::PaymentFailed /// [`RouteParameters::from_payment_params_and_value`]: crate::routing::router::RouteParameters::from_payment_params_and_value - pub fn create_refund_builder_using_router( + pub fn create_refund_builder_using_router( &self, router: ME, entropy_source: ES, amount_msats: u64, absolute_expiry: Duration, payment_id: PaymentId, peers: Vec, - ) -> Result, Bolt12SemanticError> - where - ME::Target: MessageRouter, - { + ) -> Result, Bolt12SemanticError> { let receive_key = self.get_receive_auth_key(); self.create_refund_builder_intern( &entropy_source, diff --git a/lightning/src/onion_message/messenger.rs b/lightning/src/onion_message/messenger.rs index 5f4b703618b..525d3a72fee 100644 --- a/lightning/src/onion_message/messenger.rs +++ b/lightning/src/onion_message/messenger.rs @@ -78,9 +78,7 @@ pub trait AOnionMessenger { /// A type that may be dereferenced to [`Self::NodeIdLookUp`] type NL: Deref; /// A type implementing [`MessageRouter`] - type MessageRouter: MessageRouter + ?Sized; - /// A type that may be dereferenced to [`Self::MessageRouter`] - type MR: Deref; + type MessageRouter: MessageRouter; /// A type implementing [`OffersMessageHandler`] type OffersMessageHandler: OffersMessageHandler + ?Sized; /// A type that may be dereferenced to [`Self::OffersMessageHandler`] @@ -105,7 +103,7 @@ pub trait AOnionMessenger { Self::NodeSigner, Self::L, Self::NL, - Self::MR, + Self::MessageRouter, Self::OMH, Self::APH, Self::DRH, @@ -118,7 +116,7 @@ impl< NS: NodeSigner, L: Deref, NL: Deref, - MR: Deref, + MR: MessageRouter, OMH: Deref, APH: Deref, DRH: Deref, @@ -127,7 +125,6 @@ impl< where L::Target: Logger, NL::Target: NodeIdLookUp, - MR::Target: MessageRouter, OMH::Target: OffersMessageHandler, APH::Target: AsyncPaymentsMessageHandler, DRH::Target: DNSResolverMessageHandler, @@ -139,8 +136,7 @@ where type L = L; type NodeIdLookUp = NL::Target; type NL = NL; - type MessageRouter = MR::Target; - type MR = MR; + type MessageRouter = MR; type OffersMessageHandler = OMH::Target; type OMH = OMH; type AsyncPaymentsMessageHandler = APH::Target; @@ -280,7 +276,7 @@ pub struct OnionMessenger< NS: NodeSigner, L: Deref, NL: Deref, - MR: Deref, + MR: MessageRouter, OMH: Deref, APH: Deref, DRH: Deref, @@ -288,7 +284,6 @@ pub struct OnionMessenger< > where L::Target: Logger, NL::Target: NodeIdLookUp, - MR::Target: MessageRouter, OMH::Target: OffersMessageHandler, APH::Target: AsyncPaymentsMessageHandler, DRH::Target: DNSResolverMessageHandler, @@ -512,6 +507,27 @@ pub trait MessageRouter { ) -> Result, ()>; } +impl> MessageRouter for R { + fn find_path( + &self, sender: PublicKey, peers: Vec, destination: Destination, + ) -> Result { + self.deref().find_path(sender, peers, destination) + } + + fn create_blinded_paths( + &self, recipient: PublicKey, local_node_receive_key: ReceiveAuthKey, + context: MessageContext, peers: Vec, secp_ctx: &Secp256k1, + ) -> Result, ()> { + self.deref().create_blinded_paths( + recipient, + local_node_receive_key, + context, + peers, + secp_ctx, + ) + } +} + /// A [`MessageRouter`] that can only route to a directly connected [`Destination`]. /// /// [`DefaultMessageRouter`] tries to construct compact or private [`BlindedMessagePath`]s based on @@ -1377,7 +1393,7 @@ impl< NS: NodeSigner, L: Deref, NL: Deref, - MR: Deref, + MR: MessageRouter, OMH: Deref, APH: Deref, DRH: Deref, @@ -1386,7 +1402,6 @@ impl< where L::Target: Logger, NL::Target: NodeIdLookUp, - MR::Target: MessageRouter, OMH::Target: OffersMessageHandler, APH::Target: AsyncPaymentsMessageHandler, DRH::Target: DNSResolverMessageHandler, @@ -2019,7 +2034,7 @@ impl< NS: NodeSigner, L: Deref, NL: Deref, - MR: Deref, + MR: MessageRouter, OMH: Deref, APH: Deref, DRH: Deref, @@ -2028,7 +2043,6 @@ impl< where L::Target: Logger, NL::Target: NodeIdLookUp, - MR::Target: MessageRouter, OMH::Target: OffersMessageHandler, APH::Target: AsyncPaymentsMessageHandler, DRH::Target: DNSResolverMessageHandler, @@ -2138,7 +2152,7 @@ impl< NS: NodeSigner, L: Deref, NL: Deref, - MR: Deref, + MR: MessageRouter, OMH: Deref, APH: Deref, DRH: Deref, @@ -2147,7 +2161,6 @@ impl< where L::Target: Logger, NL::Target: NodeIdLookUp, - MR::Target: MessageRouter, OMH::Target: OffersMessageHandler, APH::Target: AsyncPaymentsMessageHandler, DRH::Target: DNSResolverMessageHandler, @@ -2208,7 +2221,7 @@ impl< NS: NodeSigner, L: Deref, NL: Deref, - MR: Deref, + MR: MessageRouter, OMH: Deref, APH: Deref, DRH: Deref, @@ -2217,7 +2230,6 @@ impl< where L::Target: Logger, NL::Target: NodeIdLookUp, - MR::Target: MessageRouter, OMH::Target: OffersMessageHandler, APH::Target: AsyncPaymentsMessageHandler, DRH::Target: DNSResolverMessageHandler, From ac8074e58b8647f6c86c83f9503ef1ef954d70ae Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Thu, 15 Jan 2026 12:13:22 -0500 Subject: [PATCH 145/242] Drop Deref indirection for Logger Reduces generics and verbosity across the codebase, should provide equivalent behavior. Co-Authored-By: Claude Opus 4.5 --- lightning-background-processor/src/lib.rs | 33 +- lightning-dns-resolver/src/lib.rs | 6 - lightning-rapid-gossip-sync/src/lib.rs | 10 +- lightning-rapid-gossip-sync/src/processing.rs | 5 +- lightning-transaction-sync/src/electrum.rs | 15 +- lightning-transaction-sync/src/esplora.rs | 15 +- lightning/src/chain/chainmonitor.rs | 36 +- lightning/src/chain/channelmonitor.rs | 205 +++--- lightning/src/events/bump_transaction/mod.rs | 15 +- lightning/src/events/bump_transaction/sync.rs | 15 +- lightning/src/ln/channel.rs | 587 ++++++------------ lightning/src/ln/channelmanager.rs | 82 +-- lightning/src/ln/functional_test_utils.rs | 4 +- lightning/src/ln/inbound_payment.rs | 9 +- lightning/src/ln/invoice_utils.rs | 55 +- lightning/src/ln/onion_payment.rs | 18 +- lightning/src/ln/onion_utils.rs | 29 +- lightning/src/ln/outbound_payment.rs | 54 +- lightning/src/ln/peer_handler.rs | 23 +- lightning/src/offers/flow.rs | 21 +- lightning/src/onion_message/messenger.rs | 62 +- lightning/src/routing/gossip.rs | 49 +- lightning/src/routing/router.rs | 35 +- lightning/src/routing/scoring.rs | 71 +-- lightning/src/routing/utxo.rs | 13 +- lightning/src/sign/tx_builder.rs | 14 +- lightning/src/util/anchor_channel_reserves.rs | 5 +- lightning/src/util/logger.rs | 21 +- lightning/src/util/persist.rs | 24 +- lightning/src/util/sweep.rs | 30 +- 30 files changed, 490 insertions(+), 1071 deletions(-) diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index 3255d26328f..79a3b95463e 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -201,10 +201,9 @@ pub enum GossipSync< R: Deref>, G: Deref>, U: Deref, - L: Deref, + L: Logger, > where U::Target: UtxoLookup, - L::Target: Logger, { /// Gossip sync via the lightning peer-to-peer network as defined by BOLT 7. P2P(P), @@ -219,11 +218,10 @@ impl< R: Deref>, G: Deref>, U: Deref, - L: Deref, + L: Logger, > GossipSync where U::Target: UtxoLookup, - L::Target: Logger, { fn network_graph(&self) -> Option<&G> { match self { @@ -261,11 +259,10 @@ impl< P: Deref>, G: Deref>, U: Deref, - L: Deref, + L: Logger, > GossipSync, G, U, L> where U::Target: UtxoLookup, - L::Target: Logger, { /// Initializes a new [`GossipSync::P2P`] variant. pub fn p2p(gossip_sync: P) -> Self { @@ -278,7 +275,7 @@ impl< 'a, R: Deref>, G: Deref>, - L: Deref, + L: Logger, > GossipSync< &P2PGossipSync, @@ -286,8 +283,7 @@ impl< G, &'a (dyn UtxoLookup + Send + Sync), L, - > where - L::Target: Logger, + > { /// Initializes a new [`GossipSync::Rapid`] variant. pub fn rapid(gossip_sync: R) -> Self { @@ -296,15 +292,14 @@ impl< } /// This is not exported to bindings users as the bindings concretize everything and have constructors for us -impl<'a, L: Deref> +impl<'a, L: Logger> GossipSync< &P2PGossipSync<&'a NetworkGraph, &'a (dyn UtxoLookup + Send + Sync), L>, &RapidGossipSync<&'a NetworkGraph, L>, &'a NetworkGraph, &'a (dyn UtxoLookup + Send + Sync), L, - > where - L::Target: Logger, + > { /// Initializes a new [`GossipSync::None`] variant. pub fn none() -> Self { @@ -312,10 +307,7 @@ impl<'a, L: Deref> } } -fn handle_network_graph_update(network_graph: &NetworkGraph, event: &Event) -where - L::Target: Logger, -{ +fn handle_network_graph_update(network_graph: &NetworkGraph, event: &Event) { if let Event::PaymentPathFailed { failure: PathFailure::OnPath { network_update: Some(ref upd) }, .. @@ -422,8 +414,7 @@ pub const NO_ONION_MESSENGER: Option< dyn AOnionMessenger< EntropySource = &(dyn EntropySource + Send + Sync), NodeSigner = &(dyn lightning::sign::NodeSigner + Send + Sync), - Logger = dyn Logger + Send + Sync, - L = &'static (dyn Logger + Send + Sync), + Logger = &'static (dyn Logger + Send + Sync), NodeIdLookUp = DynChannelManager, NL = &'static DynChannelManager, MessageRouter = &'static DynMessageRouter, @@ -950,7 +941,7 @@ pub async fn process_events_async< T: BroadcasterInterface, F: FeeEstimator, G: Deref>, - L: Deref, + L: Logger, P: Deref, EventHandlerFuture: core::future::Future>, EventHandler: Fn(Event) -> EventHandlerFuture, @@ -980,7 +971,6 @@ pub async fn process_events_async< where UL::Target: UtxoLookup, CF::Target: chain::Filter, - L::Target: Logger, P::Target: Persist<::Signer>, CM::Target: AChannelManager, OM::Target: AOnionMessenger, @@ -1448,7 +1438,7 @@ pub async fn process_events_async_with_kv_store_sync< T: BroadcasterInterface, F: FeeEstimator, G: Deref>, - L: Deref, + L: Logger, P: Deref, EventHandlerFuture: core::future::Future>, EventHandler: Fn(Event) -> EventHandlerFuture, @@ -1478,7 +1468,6 @@ pub async fn process_events_async_with_kv_store_sync< where UL::Target: UtxoLookup, CF::Target: chain::Filter, - L::Target: Logger, P::Target: Persist<::Signer>, CM::Target: AChannelManager, OM::Target: AOnionMessenger, diff --git a/lightning-dns-resolver/src/lib.rs b/lightning-dns-resolver/src/lib.rs index 62b30bf0864..925e658ebe7 100644 --- a/lightning-dns-resolver/src/lib.rs +++ b/lightning-dns-resolver/src/lib.rs @@ -196,12 +196,6 @@ mod test { eprintln!("{:<8} {}", self.node, record); } } - impl Deref for TestLogger { - type Target = TestLogger; - fn deref(&self) -> &TestLogger { - self - } - } struct DummyNodeLookup {} impl NodeIdLookUp for DummyNodeLookup { diff --git a/lightning-rapid-gossip-sync/src/lib.rs b/lightning-rapid-gossip-sync/src/lib.rs index 429a3560be0..a9653754655 100644 --- a/lightning-rapid-gossip-sync/src/lib.rs +++ b/lightning-rapid-gossip-sync/src/lib.rs @@ -132,19 +132,13 @@ impl From for GraphSyncError { /// See [crate-level documentation] for usage. /// /// [crate-level documentation]: crate -pub struct RapidGossipSync>, L: Deref> -where - L::Target: Logger, -{ +pub struct RapidGossipSync>, L: Logger> { network_graph: NG, logger: L, is_initial_sync_complete: AtomicBool, } -impl>, L: Deref> RapidGossipSync -where - L::Target: Logger, -{ +impl>, L: Logger> RapidGossipSync { /// Instantiate a new [`RapidGossipSync`] instance. pub fn new(network_graph: NG, logger: L) -> Self { Self { network_graph, logger, is_initial_sync_complete: AtomicBool::new(false) } diff --git a/lightning-rapid-gossip-sync/src/processing.rs b/lightning-rapid-gossip-sync/src/processing.rs index 8319506b574..9d3287969f2 100644 --- a/lightning-rapid-gossip-sync/src/processing.rs +++ b/lightning-rapid-gossip-sync/src/processing.rs @@ -37,10 +37,7 @@ const MAX_INITIAL_NODE_ID_VECTOR_CAPACITY: u32 = 50_000; /// suggestion. const STALE_RGS_UPDATE_AGE_LIMIT_SECS: u64 = 60 * 60 * 24 * 14; -impl>, L: Deref> RapidGossipSync -where - L::Target: Logger, -{ +impl>, L: Logger> RapidGossipSync { #[cfg(feature = "std")] pub(crate) fn update_network_graph_from_byte_stream( &self, read_cursor: &mut R, diff --git a/lightning-transaction-sync/src/electrum.rs b/lightning-transaction-sync/src/electrum.rs index 1162b9c00c9..1905456d281 100644 --- a/lightning-transaction-sync/src/electrum.rs +++ b/lightning-transaction-sync/src/electrum.rs @@ -37,20 +37,14 @@ use std::time::Instant; /// [`ChainMonitor`]: lightning::chain::chainmonitor::ChainMonitor /// [`Watch::watch_channel`]: lightning::chain::Watch::watch_channel /// [`Filter`]: lightning::chain::Filter -pub struct ElectrumSyncClient -where - L::Target: Logger, -{ +pub struct ElectrumSyncClient { sync_state: Mutex, queue: Mutex, client: Arc, logger: L, } -impl ElectrumSyncClient -where - L::Target: Logger, -{ +impl ElectrumSyncClient { /// Returns a new [`ElectrumSyncClient`] object. pub fn new(server_url: String, logger: L) -> Result { let client = Arc::new(ElectrumClient::new(&server_url).map_err(|e| { @@ -506,10 +500,7 @@ where } } -impl Filter for ElectrumSyncClient -where - L::Target: Logger, -{ +impl Filter for ElectrumSyncClient { fn register_tx(&self, txid: &Txid, _script_pubkey: &Script) { let mut locked_queue = self.queue.lock().unwrap(); locked_queue.transactions.insert(*txid); diff --git a/lightning-transaction-sync/src/esplora.rs b/lightning-transaction-sync/src/esplora.rs index a191260bc01..6caf7a6a7ee 100644 --- a/lightning-transaction-sync/src/esplora.rs +++ b/lightning-transaction-sync/src/esplora.rs @@ -42,20 +42,14 @@ use std::collections::HashSet; /// [`ChainMonitor`]: lightning::chain::chainmonitor::ChainMonitor /// [`Watch::watch_channel`]: lightning::chain::Watch::watch_channel /// [`Filter`]: lightning::chain::Filter -pub struct EsploraSyncClient -where - L::Target: Logger, -{ +pub struct EsploraSyncClient { sync_state: MutexType, queue: std::sync::Mutex, client: EsploraClientType, logger: L, } -impl EsploraSyncClient -where - L::Target: Logger, -{ +impl EsploraSyncClient { /// Returns a new [`EsploraSyncClient`] object. pub fn new(server_url: String, logger: L) -> Self { let builder = Builder::new(&server_url); @@ -472,10 +466,7 @@ type EsploraClientType = AsyncClient; #[cfg(not(feature = "async-interface"))] type EsploraClientType = BlockingClient; -impl Filter for EsploraSyncClient -where - L::Target: Logger, -{ +impl Filter for EsploraSyncClient { fn register_tx(&self, txid: &Txid, _script_pubkey: &Script) { let mut locked_queue = self.queue.lock().unwrap(); locked_queue.transactions.insert(*txid); diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index 30f1d56ab71..87943bdf910 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -258,14 +258,13 @@ impl Deref for LockedChannelMonitor<'_, Chann pub struct AsyncPersister< K: Deref + MaybeSend + MaybeSync + 'static, S: FutureSpawner, - L: Deref + MaybeSend + MaybeSync + 'static, + L: Logger + MaybeSend + MaybeSync + 'static, ES: EntropySource + MaybeSend + MaybeSync + 'static, SP: Deref + MaybeSend + MaybeSync + 'static, BI: BroadcasterInterface + MaybeSend + MaybeSync + 'static, FE: FeeEstimator + MaybeSend + MaybeSync + 'static, > where K::Target: KVStore + MaybeSync, - L::Target: Logger, SP::Target: SignerProvider + Sized, { persister: MonitorUpdatingPersisterAsync, @@ -275,7 +274,7 @@ pub struct AsyncPersister< impl< K: Deref + MaybeSend + MaybeSync + 'static, S: FutureSpawner, - L: Deref + MaybeSend + MaybeSync + 'static, + L: Logger + MaybeSend + MaybeSync + 'static, ES: EntropySource + MaybeSend + MaybeSync + 'static, SP: Deref + MaybeSend + MaybeSync + 'static, BI: BroadcasterInterface + MaybeSend + MaybeSync + 'static, @@ -283,7 +282,6 @@ impl< > Deref for AsyncPersister where K::Target: KVStore + MaybeSync, - L::Target: Logger, SP::Target: SignerProvider + Sized, { type Target = Self; @@ -295,7 +293,7 @@ where impl< K: Deref + MaybeSend + MaybeSync + 'static, S: FutureSpawner, - L: Deref + MaybeSend + MaybeSync + 'static, + L: Logger + MaybeSend + MaybeSync + 'static, ES: EntropySource + MaybeSend + MaybeSync + 'static, SP: Deref + MaybeSend + MaybeSync + 'static, BI: BroadcasterInterface + MaybeSend + MaybeSync + 'static, @@ -303,7 +301,6 @@ impl< > Persist<::EcdsaSigner> for AsyncPersister where K::Target: KVStore + MaybeSync, - L::Target: Logger, SP::Target: SignerProvider + Sized, ::EcdsaSigner: MaybeSend + 'static, { @@ -355,12 +352,11 @@ pub struct ChainMonitor< C: Deref, T: BroadcasterInterface, F: FeeEstimator, - L: Deref, + L: Logger, P: Deref, ES: EntropySource, > where C::Target: chain::Filter, - L::Target: Logger, P::Target: Persist, { monitors: RwLock>>, @@ -394,7 +390,7 @@ impl< C: Deref, T: BroadcasterInterface + MaybeSend + MaybeSync + 'static, F: FeeEstimator + MaybeSend + MaybeSync + 'static, - L: Deref + MaybeSend + MaybeSync + 'static, + L: Logger + MaybeSend + MaybeSync + 'static, ES: EntropySource + MaybeSend + MaybeSync + 'static, > ChainMonitor< @@ -409,7 +405,6 @@ impl< K::Target: KVStore + MaybeSync, SP::Target: SignerProvider + Sized, C::Target: chain::Filter, - L::Target: Logger, ::EcdsaSigner: MaybeSend + 'static, { /// Creates a new `ChainMonitor` used to watch on-chain activity pertaining to channels. @@ -449,13 +444,12 @@ impl< C: Deref, T: BroadcasterInterface, F: FeeEstimator, - L: Deref, + L: Logger, P: Deref, ES: EntropySource, > ChainMonitor where C::Target: chain::Filter, - L::Target: Logger, P::Target: Persist, { /// Dispatches to per-channel monitors, which are responsible for updating their on-chain view @@ -1093,13 +1087,12 @@ impl< C: Deref, T: BroadcasterInterface, F: FeeEstimator, - L: Deref, + L: Logger, P: Deref, ES: EntropySource, > BaseMessageHandler for ChainMonitor where C::Target: chain::Filter, - L::Target: Logger, P::Target: Persist, { fn get_and_clear_pending_msg_events(&self) -> Vec { @@ -1129,13 +1122,12 @@ impl< C: Deref, T: BroadcasterInterface, F: FeeEstimator, - L: Deref, + L: Logger, P: Deref, ES: EntropySource, > SendOnlyMessageHandler for ChainMonitor where C::Target: chain::Filter, - L::Target: Logger, P::Target: Persist, { } @@ -1145,13 +1137,12 @@ impl< C: Deref, T: BroadcasterInterface, F: FeeEstimator, - L: Deref, + L: Logger, P: Deref, ES: EntropySource, > chain::Listen for ChainMonitor where C::Target: chain::Filter, - L::Target: Logger, P::Target: Persist, { fn filtered_block_connected(&self, header: &Header, txdata: &TransactionData, height: u32) { @@ -1206,13 +1197,12 @@ impl< C: Deref, T: BroadcasterInterface, F: FeeEstimator, - L: Deref, + L: Logger, P: Deref, ES: EntropySource, > chain::Confirm for ChainMonitor where C::Target: chain::Filter, - L::Target: Logger, P::Target: Persist, { fn transactions_confirmed(&self, header: &Header, txdata: &TransactionData, height: u32) { @@ -1298,13 +1288,12 @@ impl< C: Deref, T: BroadcasterInterface, F: FeeEstimator, - L: Deref, + L: Logger, P: Deref, ES: EntropySource, > chain::Watch for ChainMonitor where C::Target: chain::Filter, - L::Target: Logger, P::Target: Persist, { fn watch_channel( @@ -1491,13 +1480,12 @@ impl< C: Deref, T: BroadcasterInterface, F: FeeEstimator, - L: Deref, + L: Logger, P: Deref, ES: EntropySource, > events::EventsProvider for ChainMonitor where C::Target: chain::Filter, - L::Target: Logger, P::Target: Persist, { /// Processes [`SpendableOutputs`] events produced from each [`ChannelMonitor`] upon maturity. diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index aa862ca3e5b..015cae73282 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -1828,21 +1828,15 @@ pub(super) use _process_events_body as process_events_body; pub(crate) struct WithChannelMonitor; impl WithChannelMonitor { - pub(crate) fn from<'a, L: Deref, S: EcdsaChannelSigner>( + pub(crate) fn from<'a, L: Logger, S: EcdsaChannelSigner>( logger: &'a L, monitor: &ChannelMonitor, payment_hash: Option, - ) -> WithContext<'a, L> - where - L::Target: Logger, - { + ) -> WithContext<'a, L> { Self::from_impl(logger, &*monitor.inner.lock().unwrap(), payment_hash) } - pub(crate) fn from_impl<'a, L: Deref, S: EcdsaChannelSigner>( + pub(crate) fn from_impl<'a, L: Logger, S: EcdsaChannelSigner>( logger: &'a L, monitor_impl: &ChannelMonitorImpl, payment_hash: Option, - ) -> WithContext<'a, L> - where - L::Target: Logger, - { + ) -> WithContext<'a, L> { let peer_id = Some(monitor_impl.counterparty_node_id); let channel_id = Some(monitor_impl.channel_id()); WithContext::from(logger, peer_id, channel_id, payment_hash) @@ -2058,16 +2052,14 @@ impl ChannelMonitor { /// /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager #[rustfmt::skip] - pub(crate) fn provide_payment_preimage_unsafe_legacy( + pub(crate) fn provide_payment_preimage_unsafe_legacy( &self, payment_hash: &PaymentHash, payment_preimage: &PaymentPreimage, broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, - ) where - L::Target: Logger, - { + ) { let mut inner = self.inner.lock().unwrap(); let logger = WithChannelMonitor::from_impl(logger, &*inner, Some(*payment_hash)); // Note that we don't pass any MPP claim parts here. This is generally not okay but in this @@ -2081,12 +2073,9 @@ impl ChannelMonitor { /// itself. /// /// panics if the given update is not the next update by update_id. - pub fn update_monitor( + pub fn update_monitor( &self, updates: &ChannelMonitorUpdate, broadcaster: &B, fee_estimator: &F, logger: &L, - ) -> Result<(), ()> - where - L::Target: Logger, - { + ) -> Result<(), ()> { let mut inner = self.inner.lock().unwrap(); let logger = WithChannelMonitor::from_impl(logger, &*inner, None); inner.update_monitor(updates, broadcaster, fee_estimator, &logger) @@ -2136,10 +2125,8 @@ impl ChannelMonitor { /// calling `chain::Filter::register_output` and `chain::Filter::register_tx` until all outputs /// have been registered. #[rustfmt::skip] - pub fn load_outputs_to_watch(&self, filter: &F, logger: &L) - where - F::Target: chain::Filter, L::Target: Logger, - { + pub fn load_outputs_to_watch(&self, filter: &F, logger: &L) + where F::Target: chain::Filter { let lock = self.inner.lock().unwrap(); let logger = WithChannelMonitor::from_impl(logger, &*lock, None); for funding in core::iter::once(&lock.funding).chain(&lock.pending_funding) { @@ -2183,12 +2170,11 @@ impl ChannelMonitor { /// /// [`SpendableOutputs`]: crate::events::Event::SpendableOutputs /// [`BumpTransaction`]: crate::events::Event::BumpTransaction - pub fn process_pending_events( + pub fn process_pending_events( &self, handler: &H, logger: &L, ) -> Result<(), ReplayEvent> where H::Target: EventHandler, - L::Target: Logger, { let mut ev; process_events_body!(Some(self), logger, ev, handler.handle_event(ev)) @@ -2200,13 +2186,10 @@ impl ChannelMonitor { pub async fn process_pending_events_async< Future: core::future::Future>, H: Fn(Event) -> Future, - L: Deref, + L: Logger, >( &self, handler: &H, logger: &L, - ) -> Result<(), ReplayEvent> - where - L::Target: Logger, - { + ) -> Result<(), ReplayEvent> { let mut ev; process_events_body!(Some(self), logger, ev, { handler(ev).await }) } @@ -2337,12 +2320,10 @@ impl ChannelMonitor { pub fn broadcast_latest_holder_commitment_txn< B: BroadcasterInterface, F: FeeEstimator, - L: Deref, + L: Logger, >( &self, broadcaster: &B, fee_estimator: &F, logger: &L, - ) where - L::Target: Logger, - { + ) { let mut inner = self.inner.lock().unwrap(); let fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator); let logger = WithChannelMonitor::from_impl(logger, &*inner, None); @@ -2359,10 +2340,9 @@ impl ChannelMonitor { /// to bypass HolderCommitmentTransaction state update lockdown after signature and generate /// revoked commitment transaction. #[cfg(any(test, feature = "_test_utils", feature = "unsafe_revoked_tx_signing"))] - pub fn unsafe_get_latest_holder_commitment_txn(&self, logger: &L) -> Vec - where - L::Target: Logger, - { + pub fn unsafe_get_latest_holder_commitment_txn( + &self, logger: &L, + ) -> Vec { let mut inner = self.inner.lock().unwrap(); let logger = WithChannelMonitor::from_impl(logger, &*inner, None); inner.unsafe_get_latest_holder_commitment_txn(&logger) @@ -2380,7 +2360,7 @@ impl ChannelMonitor { /// /// [`get_outputs_to_watch`]: #method.get_outputs_to_watch #[rustfmt::skip] - pub fn block_connected( + pub fn block_connected( &self, header: &Header, txdata: &TransactionData, @@ -2388,10 +2368,7 @@ impl ChannelMonitor { broadcaster: B, fee_estimator: F, logger: &L, - ) -> Vec - where - L::Target: Logger, - { + ) -> Vec { let mut inner = self.inner.lock().unwrap(); let logger = WithChannelMonitor::from_impl(logger, &*inner, None); inner.block_connected( @@ -2400,11 +2377,9 @@ impl ChannelMonitor { /// Determines if the disconnected block contained any transactions of interest and updates /// appropriately. - pub fn blocks_disconnected( + pub fn blocks_disconnected( &self, fork_point: BestBlock, broadcaster: B, fee_estimator: F, logger: &L, - ) where - L::Target: Logger, - { + ) { let mut inner = self.inner.lock().unwrap(); let logger = WithChannelMonitor::from_impl(logger, &*inner, None); inner.blocks_disconnected(fork_point, broadcaster, fee_estimator, &logger) @@ -2418,7 +2393,7 @@ impl ChannelMonitor { /// /// [`block_connected`]: Self::block_connected #[rustfmt::skip] - pub fn transactions_confirmed( + pub fn transactions_confirmed( &self, header: &Header, txdata: &TransactionData, @@ -2426,10 +2401,7 @@ impl ChannelMonitor { broadcaster: B, fee_estimator: F, logger: &L, - ) -> Vec - where - L::Target: Logger, - { + ) -> Vec { let bounded_fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator); let mut inner = self.inner.lock().unwrap(); let logger = WithChannelMonitor::from_impl(logger, &*inner, None); @@ -2444,15 +2416,13 @@ impl ChannelMonitor { /// /// [`blocks_disconnected`]: Self::blocks_disconnected #[rustfmt::skip] - pub fn transaction_unconfirmed( + pub fn transaction_unconfirmed( &self, txid: &Txid, broadcaster: B, fee_estimator: F, logger: &L, - ) where - L::Target: Logger, - { + ) { let bounded_fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator); let mut inner = self.inner.lock().unwrap(); let logger = WithChannelMonitor::from_impl(logger, &*inner, None); @@ -2469,17 +2439,14 @@ impl ChannelMonitor { /// /// [`block_connected`]: Self::block_connected #[rustfmt::skip] - pub fn best_block_updated( + pub fn best_block_updated( &self, header: &Header, height: u32, broadcaster: B, fee_estimator: F, logger: &L, - ) -> Vec - where - L::Target: Logger, - { + ) -> Vec { let bounded_fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator); let mut inner = self.inner.lock().unwrap(); let logger = WithChannelMonitor::from_impl(logger, &*inner, None); @@ -2514,12 +2481,9 @@ impl ChannelMonitor { /// invoking this every 30 seconds, or lower if running in an environment with spotty /// connections, like on mobile. #[rustfmt::skip] - pub fn rebroadcast_pending_claims( + pub fn rebroadcast_pending_claims( &self, broadcaster: B, fee_estimator: F, logger: &L, - ) - where - L::Target: Logger, - { + ) { let fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator); let mut lock = self.inner.lock().unwrap(); let inner = &mut *lock; @@ -2540,12 +2504,9 @@ impl ChannelMonitor { /// Triggers rebroadcasts of pending claims from a force-closed channel after a transaction /// signature generation failure. #[rustfmt::skip] - pub fn signer_unblocked( + pub fn signer_unblocked( &self, broadcaster: B, fee_estimator: F, logger: &L, - ) - where - L::Target: Logger, - { + ) { let fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator); let mut lock = self.inner.lock().unwrap(); let inner = &mut *lock; @@ -3792,11 +3753,11 @@ impl ChannelMonitorImpl { /// /// Note that this is often called multiple times for the same payment and must be idempotent. #[rustfmt::skip] - fn provide_payment_preimage( + fn provide_payment_preimage( &mut self, payment_hash: &PaymentHash, payment_preimage: &PaymentPreimage, payment_info: &Option, broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator, logger: &WithContext - ) where L::Target: Logger { + ) { self.payment_preimages.entry(payment_hash.clone()) .and_modify(|(_, payment_infos)| { if let Some(payment_info) = payment_info { @@ -3968,13 +3929,10 @@ impl ChannelMonitorImpl { /// See also [`ChannelMonitor::broadcast_latest_holder_commitment_txn`]. /// /// [`ChannelMonitor::broadcast_latest_holder_commitment_txn`]: crate::chain::channelmonitor::ChannelMonitor::broadcast_latest_holder_commitment_txn - pub(crate) fn queue_latest_holder_commitment_txn_for_broadcast( + pub(crate) fn queue_latest_holder_commitment_txn_for_broadcast( &mut self, broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator, logger: &WithContext, require_funding_seen: bool, - ) - where - L::Target: Logger, - { + ) { let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message: "ChannelMonitor-initiated commitment transaction broadcast".to_owned(), @@ -3994,14 +3952,11 @@ impl ChannelMonitorImpl { ); } - fn renegotiated_funding( + fn renegotiated_funding( &mut self, logger: &WithContext, channel_parameters: &ChannelTransactionParameters, alternative_holder_commitment_tx: &HolderCommitmentTransaction, alternative_counterparty_commitment_tx: &CommitmentTransaction, - ) -> Result<(), ()> - where - L::Target: Logger, - { + ) -> Result<(), ()> { let alternative_counterparty_commitment_txid = alternative_counterparty_commitment_tx.trust().txid(); @@ -4169,11 +4124,9 @@ impl ChannelMonitorImpl { } #[rustfmt::skip] - fn update_monitor( + fn update_monitor( &mut self, updates: &ChannelMonitorUpdate, broadcaster: &B, fee_estimator: &F, logger: &WithContext - ) -> Result<(), ()> - where L::Target: Logger, - { + ) -> Result<(), ()> { if self.latest_update_id == LEGACY_CLOSED_CHANNEL_UPDATE_ID && updates.update_id == LEGACY_CLOSED_CHANNEL_UPDATE_ID { log_info!(logger, "Applying pre-0.1 post-force-closed update to monitor {} with {} change(s).", log_funding_info!(self), updates.updates.len()); @@ -4635,9 +4588,9 @@ impl ChannelMonitorImpl { /// Returns packages to claim the revoked output(s) and general information about the output that /// is to the counterparty in the commitment transaction. #[rustfmt::skip] - fn check_spend_counterparty_transaction(&mut self, commitment_txid: Txid, commitment_tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &L) + fn check_spend_counterparty_transaction(&mut self, commitment_txid: Txid, commitment_tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &L) -> (Vec, CommitmentTxCounterpartyOutputInfo) - where L::Target: Logger { + { // Most secp and related errors trying to create keys means we have no hope of constructing // a spend transaction...so we return no transactions to broadcast let mut claimable_outpoints = Vec::new(); @@ -4925,9 +4878,9 @@ impl ChannelMonitorImpl { /// Attempts to claim a counterparty HTLC-Success/HTLC-Timeout's outputs using the revocation key #[rustfmt::skip] - fn check_spend_counterparty_htlc( + fn check_spend_counterparty_htlc( &mut self, tx: &Transaction, commitment_number: u64, commitment_txid: &Txid, height: u32, logger: &L - ) -> (Vec, Option) where L::Target: Logger { + ) -> (Vec, Option) { let secret = if let Some(secret) = self.get_secret(commitment_number) { secret } else { return (Vec::new(), None); }; let per_commitment_key = match SecretKey::from_slice(&secret) { Ok(key) => key, @@ -5068,13 +5021,10 @@ impl ChannelMonitorImpl { /// revoked using data in holder_claimable_outpoints. /// Should not be used if check_spend_revoked_transaction succeeds. /// Returns None unless the transaction is definitely one of our commitment transactions. - fn check_spend_holder_transaction( + fn check_spend_holder_transaction( &mut self, commitment_txid: Txid, commitment_tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &L, - ) -> Option<(Vec, TransactionOutputs)> - where - L::Target: Logger, - { + ) -> Option<(Vec, TransactionOutputs)> { let funding_spent = get_confirmed_funding_scope!(self); // HTLCs set may differ between last and previous holder commitment txn, in case of one them hitting chain, ensure we cancel all HTLCs backward @@ -5137,9 +5087,9 @@ impl ChannelMonitorImpl { /// Cancels any existing pending claims for a commitment that previously confirmed and has now /// been replaced by another. #[rustfmt::skip] - pub fn cancel_prev_commitment_claims( + pub fn cancel_prev_commitment_claims( &mut self, logger: &L, confirmed_commitment_txid: &Txid - ) where L::Target: Logger { + ) { for (counterparty_commitment_txid, _) in &self.counterparty_commitment_txn_on_chain { // Cancel any pending claims for counterparty commitments we've seen confirm. if counterparty_commitment_txid == confirmed_commitment_txid { @@ -5211,9 +5161,9 @@ impl ChannelMonitorImpl { #[cfg(any(test, feature = "_test_utils", feature = "unsafe_revoked_tx_signing"))] /// Note that this includes possibly-locktimed-in-the-future transactions! #[rustfmt::skip] - fn unsafe_get_latest_holder_commitment_txn( + fn unsafe_get_latest_holder_commitment_txn( &mut self, logger: &WithContext - ) -> Vec where L::Target: Logger { + ) -> Vec { log_debug!(logger, "Getting signed copy of latest holder commitment transaction!"); let commitment_tx = { let sig = self.onchain_tx_handler.signer.unsafe_sign_holder_commitment( @@ -5263,10 +5213,10 @@ impl ChannelMonitorImpl { } #[rustfmt::skip] - fn block_connected( + fn block_connected( &mut self, header: &Header, txdata: &TransactionData, height: u32, broadcaster: B, fee_estimator: F, logger: &WithContext, - ) -> Vec where L::Target: Logger, { + ) -> Vec { let block_hash = header.block_hash(); self.best_block = BestBlock::new(block_hash, height); @@ -5275,17 +5225,14 @@ impl ChannelMonitorImpl { } #[rustfmt::skip] - fn best_block_updated( + fn best_block_updated( &mut self, header: &Header, height: u32, broadcaster: B, fee_estimator: &LowerBoundedFeeEstimator, logger: &WithContext, - ) -> Vec - where - L::Target: Logger, - { + ) -> Vec { let block_hash = header.block_hash(); if height > self.best_block.height { @@ -5305,7 +5252,7 @@ impl ChannelMonitorImpl { } #[rustfmt::skip] - fn transactions_confirmed( + fn transactions_confirmed( &mut self, header: &Header, txdata: &TransactionData, @@ -5313,10 +5260,7 @@ impl ChannelMonitorImpl { broadcaster: B, fee_estimator: &LowerBoundedFeeEstimator, logger: &WithContext, - ) -> Vec - where - L::Target: Logger, - { + ) -> Vec { let funding_seen_before = self.funding_seen_onchain; let txn_matched = self.filter_block(txdata); @@ -5588,7 +5532,7 @@ impl ChannelMonitorImpl { /// `conf_height` should be set to the height at which any new transaction(s)/block(s) were /// confirmed at, even if it is not the current best height. #[rustfmt::skip] - fn block_confirmed( + fn block_confirmed( &mut self, conf_height: u32, conf_hash: BlockHash, @@ -5598,10 +5542,7 @@ impl ChannelMonitorImpl { broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator, logger: &WithContext, - ) -> Vec - where - L::Target: Logger, - { + ) -> Vec { log_trace!(logger, "Processing {} matched transactions for block at height {}.", txn_matched.len(), conf_height); debug_assert!(self.best_block.height >= conf_height); @@ -5814,10 +5755,9 @@ impl ChannelMonitorImpl { } #[rustfmt::skip] - fn blocks_disconnected( + fn blocks_disconnected( &mut self, fork_point: BestBlock, broadcaster: B, fee_estimator: F, logger: &WithContext - ) where L::Target: Logger, - { + ) { let new_height = fork_point.height; log_trace!(logger, "Block(s) disconnected to height {}", new_height); assert!(self.best_block.height > fork_point.height, @@ -5861,15 +5801,13 @@ impl ChannelMonitorImpl { } #[rustfmt::skip] - fn transaction_unconfirmed( + fn transaction_unconfirmed( &mut self, txid: &Txid, broadcaster: B, fee_estimator: &LowerBoundedFeeEstimator, logger: &WithContext, - ) where - L::Target: Logger, - { + ) { let mut removed_height = None; for entry in self.onchain_events_awaiting_threshold_conf.iter() { if entry.txid == *txid { @@ -5974,9 +5912,9 @@ impl ChannelMonitorImpl { } #[rustfmt::skip] - fn should_broadcast_holder_commitment_txn( + fn should_broadcast_holder_commitment_txn( &self, logger: &WithContext - ) -> Option where L::Target: Logger { + ) -> Option { // There's no need to broadcast our commitment transaction if we've seen one confirmed (even // with 1 confirmation) as it'll be rejected as duplicate/conflicting. if self.funding_spend_confirmed.is_some() || @@ -6041,9 +5979,9 @@ impl ChannelMonitorImpl { /// Check if any transaction broadcasted is resolving HTLC output by a success or timeout on a holder /// or counterparty commitment tx, if so send back the source, preimage if found and payment_hash of resolved HTLC #[rustfmt::skip] - fn is_resolving_htlc_output( + fn is_resolving_htlc_output( &mut self, tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &WithContext, - ) where L::Target: Logger { + ) { let funding_spent = get_confirmed_funding_scope!(self); 'outer_loop: for input in &tx.input { @@ -6298,9 +6236,9 @@ impl ChannelMonitorImpl { /// Checks if the confirmed transaction is paying funds back to some address we can assume to /// own. #[rustfmt::skip] - fn check_tx_and_push_spendable_outputs( + fn check_tx_and_push_spendable_outputs( &mut self, tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &WithContext, - ) where L::Target: Logger { + ) { let funding_spent = get_confirmed_funding_scope!(self); for spendable_output in self.get_spendable_outputs(funding_spent, tx) { let entry = OnchainEventEntry { @@ -6320,10 +6258,8 @@ impl ChannelMonitorImpl { } } -impl chain::Listen +impl chain::Listen for (ChannelMonitor, T, F, L) -where - L::Target: Logger, { fn filtered_block_connected(&self, header: &Header, txdata: &TransactionData, height: u32) { self.0.block_connected(header, txdata, height, &self.1, &self.2, &self.3); @@ -6334,11 +6270,10 @@ where } } -impl +impl chain::Confirm for (M, T, F, L) where M: Deref>, - L::Target: Logger, { fn transactions_confirmed(&self, header: &Header, txdata: &TransactionData, height: u32) { self.0.transactions_confirmed(header, txdata, height, &self.1, &self.2, &self.3); diff --git a/lightning/src/events/bump_transaction/mod.rs b/lightning/src/events/bump_transaction/mod.rs index b45b65940ee..bc912124410 100644 --- a/lightning/src/events/bump_transaction/mod.rs +++ b/lightning/src/events/bump_transaction/mod.rs @@ -442,10 +442,9 @@ pub trait WalletSource { /// /// This is not exported to bindings users as async is only supported in Rust. // Note that updates to documentation on this struct should be copied to the synchronous version. -pub struct Wallet +pub struct Wallet where W::Target: WalletSource + MaybeSend, - L::Target: Logger + MaybeSend, { source: W, logger: L, @@ -455,10 +454,9 @@ where locked_utxos: Mutex>, } -impl Wallet +impl Wallet where W::Target: WalletSource + MaybeSend, - L::Target: Logger + MaybeSend, { /// Returns a new instance backed by the given [`WalletSource`] that serves as an implementation /// of [`CoinSelectionSource`]. @@ -617,11 +615,10 @@ where } } -impl CoinSelectionSource +impl CoinSelectionSource for Wallet where W::Target: WalletSource + MaybeSend + MaybeSync, - L::Target: Logger + MaybeSend + MaybeSync, { fn select_confirmed_utxos<'a>( &'a self, claim_id: ClaimId, must_spend: Vec, must_pay_to: &'a [TxOut], @@ -694,11 +691,10 @@ where /// /// [`Event::BumpTransaction`]: crate::events::Event::BumpTransaction // Note that updates to documentation on this struct should be copied to the synchronous version. -pub struct BumpTransactionEventHandler +pub struct BumpTransactionEventHandler where C::Target: CoinSelectionSource, SP::Target: SignerProvider, - L::Target: Logger, { broadcaster: B, utxo_source: C, @@ -707,12 +703,11 @@ where secp: Secp256k1, } -impl +impl BumpTransactionEventHandler where C::Target: CoinSelectionSource, SP::Target: SignerProvider, - L::Target: Logger, { /// Returns a new instance capable of handling [`Event::BumpTransaction`] events. /// diff --git a/lightning/src/events/bump_transaction/sync.rs b/lightning/src/events/bump_transaction/sync.rs index bf0668ccba3..e19ab3d7804 100644 --- a/lightning/src/events/bump_transaction/sync.rs +++ b/lightning/src/events/bump_transaction/sync.rs @@ -100,18 +100,16 @@ where /// /// For an asynchronous version of this wrapper, see [`Wallet`]. // Note that updates to documentation on this struct should be copied to the asynchronous version. -pub struct WalletSync +pub struct WalletSync where W::Target: WalletSourceSync + MaybeSend, - L::Target: Logger + MaybeSend, { wallet: Wallet, L>, } -impl WalletSync +impl WalletSync where W::Target: WalletSourceSync + MaybeSend, - L::Target: Logger + MaybeSend, { /// Constructs a new [`WalletSync`] instance. pub fn new(source: W, logger: L) -> Self { @@ -119,11 +117,10 @@ where } } -impl CoinSelectionSourceSync +impl CoinSelectionSourceSync for WalletSync where W::Target: WalletSourceSync + MaybeSend + MaybeSync, - L::Target: Logger + MaybeSend + MaybeSync, { fn select_confirmed_utxos( &self, claim_id: ClaimId, must_spend: Vec, must_pay_to: &[TxOut], @@ -267,22 +264,20 @@ where /// /// [`Event::BumpTransaction`]: crate::events::Event::BumpTransaction // Note that updates to documentation on this struct should be copied to the synchronous version. -pub struct BumpTransactionEventHandlerSync +pub struct BumpTransactionEventHandlerSync where C::Target: CoinSelectionSourceSync, SP::Target: SignerProvider, - L::Target: Logger, { bump_transaction_event_handler: BumpTransactionEventHandler, SP, L>, } -impl +impl BumpTransactionEventHandlerSync where C::Target: CoinSelectionSourceSync, SP::Target: SignerProvider, - L::Target: Logger, { /// Constructs a new instance of [`BumpTransactionEventHandlerSync`]. pub fn new(broadcaster: B, utxo_source: C, signer_provider: SP, logger: L) -> Self { diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 56317e774c0..fc20708009c 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -985,20 +985,14 @@ impl ChannelError { } } -pub(super) struct WithChannelContext<'a, L: Deref> -where - L::Target: Logger, -{ +pub(super) struct WithChannelContext<'a, L: Logger> { pub logger: &'a L, pub peer_id: Option, pub channel_id: Option, pub payment_hash: Option, } -impl<'a, L: Deref> Logger for WithChannelContext<'a, L> -where - L::Target: Logger, -{ +impl<'a, L: Logger> Logger for WithChannelContext<'a, L> { fn log(&self, mut record: Record) { record.peer_id = self.peer_id; record.channel_id = self.channel_id; @@ -1007,10 +1001,7 @@ where } } -impl<'a, 'b, L: Deref> WithChannelContext<'a, L> -where - L::Target: Logger, -{ +impl<'a, 'b, L: Logger> WithChannelContext<'a, L> { pub(super) fn from( logger: &'a L, context: &'b ChannelContext, payment_hash: Option, ) -> Self @@ -1294,11 +1285,10 @@ impl HolderCommitmentPoint { /// If we are pending advancing the next commitment point, this method tries asking the signer /// again. - pub fn try_resolve_pending( + pub fn try_resolve_pending( &mut self, signer: &ChannelSignerType, secp_ctx: &Secp256k1, logger: &L, ) where SP::Target: SignerProvider, - L::Target: Logger, { if !self.can_advance() { let pending_next_point = signer @@ -1331,12 +1321,11 @@ impl HolderCommitmentPoint { /// /// If our signer is ready to provide the next commitment point, the next call to `advance` will /// succeed. - pub fn advance( + pub fn advance( &mut self, signer: &ChannelSignerType, secp_ctx: &Secp256k1, logger: &L, ) -> Result<(), ()> where SP::Target: SignerProvider, - L::Target: Logger, { if let Some(next_point) = self.pending_next_point { *self = Self { @@ -1619,9 +1608,9 @@ where } #[rustfmt::skip] - pub fn signer_maybe_unblocked( + pub fn signer_maybe_unblocked( &mut self, chain_hash: ChainHash, logger: &L, path_for_release_htlc: CBP - ) -> Result, ChannelError> where L::Target: Logger, CBP: Fn(u64) -> BlindedMessagePath { + ) -> Result, ChannelError> where CBP: Fn(u64) -> BlindedMessagePath { match &mut self.phase { ChannelPhase::Undefined => unreachable!(), ChannelPhase::Funded(chan) => chan.signer_maybe_unblocked(logger, path_for_release_htlc).map(|r| Some(r)), @@ -1664,10 +1653,7 @@ where /// Should be called when the peer is disconnected. Returns true if the channel can be resumed /// when the peer reconnects (via [`Self::peer_connected_get_handshake`]). If not, the channel /// must be immediately closed. - pub fn peer_disconnected_is_resumable(&mut self, logger: &L) -> DisconnectResult - where - L::Target: Logger, - { + pub fn peer_disconnected_is_resumable(&mut self, logger: &L) -> DisconnectResult { let is_resumable = match &mut self.phase { ChannelPhase::Undefined => unreachable!(), ChannelPhase::Funded(chan) => { @@ -1721,9 +1707,9 @@ where /// Should be called when the peer re-connects, returning an initial message which we should /// send our peer to begin the channel reconnection process. #[rustfmt::skip] - pub fn peer_connected_get_handshake( + pub fn peer_connected_get_handshake( &mut self, chain_hash: ChainHash, logger: &L, - ) -> ReconnectionMsg where L::Target: Logger { + ) -> ReconnectionMsg { match &mut self.phase { ChannelPhase::Undefined => unreachable!(), ChannelPhase::Funded(chan) => @@ -1757,13 +1743,10 @@ where } #[rustfmt::skip] - pub fn maybe_handle_error_without_close( + pub fn maybe_handle_error_without_close( &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, user_config: &UserConfig, their_features: &InitFeatures, - ) -> Result, ()> - where - L::Target: Logger, - { + ) -> Result, ()> { match &mut self.phase { ChannelPhase::Undefined => unreachable!(), ChannelPhase::Funded(_) => Ok(None), @@ -1796,12 +1779,9 @@ where } } - fn fail_interactive_tx_negotiation( + fn fail_interactive_tx_negotiation( &mut self, reason: AbortReason, logger: &L, - ) -> (ChannelError, Option) - where - L::Target: Logger, - { + ) -> (ChannelError, Option) { let logger = WithChannelContext::from(logger, &self.context(), None); log_info!(logger, "Failed interactive transaction negotiation: {reason}"); @@ -1825,12 +1805,9 @@ where (ChannelError::Abort(reason), splice_funding_failed) } - pub fn tx_add_input( + pub fn tx_add_input( &mut self, msg: &msgs::TxAddInput, logger: &L, - ) -> Result)> - where - L::Target: Logger, - { + ) -> Result)> { match self.interactive_tx_constructor_mut() { Some(interactive_tx_constructor) => interactive_tx_constructor .handle_tx_add_input(msg) @@ -1844,12 +1821,9 @@ where } } - pub fn tx_add_output( + pub fn tx_add_output( &mut self, msg: &msgs::TxAddOutput, logger: &L, - ) -> Result)> - where - L::Target: Logger, - { + ) -> Result)> { match self.interactive_tx_constructor_mut() { Some(interactive_tx_constructor) => interactive_tx_constructor .handle_tx_add_output(msg) @@ -1863,12 +1837,9 @@ where } } - pub fn tx_remove_input( + pub fn tx_remove_input( &mut self, msg: &msgs::TxRemoveInput, logger: &L, - ) -> Result)> - where - L::Target: Logger, - { + ) -> Result)> { match self.interactive_tx_constructor_mut() { Some(interactive_tx_constructor) => interactive_tx_constructor .handle_tx_remove_input(msg) @@ -1882,12 +1853,9 @@ where } } - pub fn tx_remove_output( + pub fn tx_remove_output( &mut self, msg: &msgs::TxRemoveOutput, logger: &L, - ) -> Result)> - where - L::Target: Logger, - { + ) -> Result)> { match self.interactive_tx_constructor_mut() { Some(interactive_tx_constructor) => interactive_tx_constructor .handle_tx_remove_output(msg) @@ -1901,12 +1869,9 @@ where } } - pub fn tx_complete( + pub fn tx_complete( &mut self, msg: &msgs::TxComplete, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, - ) -> Result)> - where - L::Target: Logger, - { + ) -> Result)> { let tx_complete_action = match self.interactive_tx_constructor_mut() { Some(interactive_tx_constructor) => interactive_tx_constructor .handle_tx_complete(msg) @@ -1971,12 +1936,9 @@ where Ok(TxCompleteResult { interactive_tx_msg_send, event_unsigned_tx, funding_tx_signed }) } - pub fn tx_abort( + pub fn tx_abort( &mut self, msg: &msgs::TxAbort, logger: &L, - ) -> Result<(Option, Option), ChannelError> - where - L::Target: Logger, - { + ) -> Result<(Option, Option), ChannelError> { // If we have not sent a `tx_abort` message for this negotiation previously, we need to echo // back a tx_abort message according to the spec: // https://github.com/lightning/bolts/blob/247e83d/02-peer-protocol.md?plain=1#L560-L561 @@ -2043,12 +2005,9 @@ where } #[rustfmt::skip] - pub fn funding_signed( + pub fn funding_signed( &mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L - ) -> Result<(&mut FundedChannel, ChannelMonitor<::EcdsaSigner>), ChannelError> - where - L::Target: Logger - { + ) -> Result<(&mut FundedChannel, ChannelMonitor<::EcdsaSigner>), ChannelError> { let phase = core::mem::replace(&mut self.phase, ChannelPhase::Undefined); let result = if let ChannelPhase::UnfundedOutboundV1(chan) = phase { let channel_state = chan.context.channel_state; @@ -2142,13 +2101,10 @@ where Ok(()) } - pub fn funding_transaction_signed( + pub fn funding_transaction_signed( &mut self, funding_txid_signed: Txid, witnesses: Vec, best_block_height: u32, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, - ) -> Result - where - L::Target: Logger, - { + ) -> Result { let (context, funding, pending_splice) = match &mut self.phase { ChannelPhase::Undefined => unreachable!(), ChannelPhase::UnfundedV2(channel) => (&mut channel.context, &channel.funding, None), @@ -2319,12 +2275,9 @@ where } #[rustfmt::skip] - pub fn commitment_signed( + pub fn commitment_signed( &mut self, msg: &msgs::CommitmentSigned, best_block: BestBlock, signer_provider: &SP, fee_estimator: &LowerBoundedFeeEstimator, logger: &L - ) -> Result<(Option::EcdsaSigner>>, Option), ChannelError> - where - L::Target: Logger - { + ) -> Result<(Option::EcdsaSigner>>, Option), ChannelError> { let phase = core::mem::replace(&mut self.phase, ChannelPhase::Undefined); match phase { ChannelPhase::UnfundedV2(chan) => { @@ -3342,9 +3295,9 @@ where fn received_msg(&self) -> &'static str; #[rustfmt::skip] - fn check_counterparty_commitment_signature( + fn check_counterparty_commitment_signature( &self, sig: &Signature, holder_commitment_point: &HolderCommitmentPoint, logger: &L - ) -> Result where L::Target: Logger { + ) -> Result { let funding_script = self.funding().get_funding_redeemscript(); let commitment_data = self.context().build_commitment_transaction(self.funding(), @@ -3365,13 +3318,10 @@ where } #[rustfmt::skip] - fn initial_commitment_signed( + fn initial_commitment_signed( &mut self, channel_id: ChannelId, counterparty_signature: Signature, holder_commitment_point: &mut HolderCommitmentPoint, best_block: BestBlock, signer_provider: &SP, logger: &L, - ) -> Result<(ChannelMonitor<::EcdsaSigner>, CommitmentTransaction), ChannelError> - where - L::Target: Logger - { + ) -> Result<(ChannelMonitor<::EcdsaSigner>, CommitmentTransaction), ChannelError> { let initial_commitment_tx = match self.check_counterparty_commitment_signature(&counterparty_signature, holder_commitment_point, logger) { Ok(res) => res, Err(ChannelError::Close(e)) => { @@ -3560,7 +3510,7 @@ where SP::Target: SignerProvider, { #[rustfmt::skip] - fn new_for_inbound_channel<'a, ES: EntropySource, F: FeeEstimator, L: Deref>( + fn new_for_inbound_channel<'a, ES: EntropySource, F: FeeEstimator, L: Logger>( fee_estimator: &'a LowerBoundedFeeEstimator, entropy_source: &'a ES, signer_provider: &'a SP, @@ -3580,7 +3530,6 @@ where open_channel_fields: msgs::CommonOpenChannelFields, ) -> Result<(FundingScope, ChannelContext), ChannelError> where - L::Target: Logger, SP::Target: SignerProvider, { let logger = WithContext::from(logger, Some(counterparty_node_id), Some(open_channel_fields.temporary_channel_id), None); @@ -3903,7 +3852,7 @@ where } #[rustfmt::skip] - fn new_for_outbound_channel<'a, ES: EntropySource, F: FeeEstimator, L: Deref>( + fn new_for_outbound_channel<'a, ES: EntropySource, F: FeeEstimator, L: Logger>( fee_estimator: &'a LowerBoundedFeeEstimator, entropy_source: &'a ES, signer_provider: &'a SP, @@ -3923,7 +3872,6 @@ where ) -> Result<(FundingScope, ChannelContext), APIError> where SP::Target: SignerProvider, - L::Target: Logger, { // This will be updated with the counterparty contribution if this is a dual-funded channel let channel_value_satoshis = funding_satoshis; @@ -5121,16 +5069,13 @@ where Ok(()) } - fn validate_commitment_signed( + fn validate_commitment_signed( &self, funding: &FundingScope, transaction_number: u64, commitment_point: PublicKey, msg: &msgs::CommitmentSigned, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, ) -> Result< (HolderCommitmentTransaction, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>), ChannelError, - > - where - L::Target: Logger, - { + > { let funding_script = funding.get_funding_redeemscript(); let commitment_data = self.build_commitment_transaction( @@ -5252,13 +5197,10 @@ where Ok((holder_commitment_tx, commitment_data.htlcs_included)) } - fn can_send_update_fee( + fn can_send_update_fee( &self, funding: &FundingScope, feerate_per_kw: u32, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, - ) -> bool - where - L::Target: Logger, - { + ) -> bool { // Before proposing a feerate update, check that we can actually afford the new fee. let dust_exposure_limiting_feerate = self.get_dust_exposure_limiting_feerate(&fee_estimator, funding.get_channel_type()); @@ -5333,12 +5275,9 @@ where return true; } - fn can_accept_incoming_htlc( + fn can_accept_incoming_htlc( &self, funding: &FundingScope, dust_exposure_limiting_feerate: Option, logger: &L, - ) -> Result<(), LocalHTLCFailureReason> - where - L::Target: Logger, - { + ) -> Result<(), LocalHTLCFailureReason> { // The fee spike buffer (an additional nondust HTLC) we keep for the remote if the channel // is not zero fee. This deviates from the spec because the fee spike buffer requirement // doesn't exist on the receiver's side, only on the sender's. @@ -5464,9 +5403,7 @@ where /// which peer generated this transaction and "to whom" this transaction flows. #[inline] #[rustfmt::skip] - fn build_commitment_transaction(&self, funding: &FundingScope, commitment_number: u64, per_commitment_point: &PublicKey, local: bool, generated_by_local: bool, logger: &L) -> CommitmentData<'_> - where L::Target: Logger - { + fn build_commitment_transaction(&self, funding: &FundingScope, commitment_number: u64, per_commitment_point: &PublicKey, local: bool, generated_by_local: bool, logger: &L) -> CommitmentData<'_> { let broadcaster_dust_limit_sat = if local { self.holder_dust_limit_satoshis } else { self.counterparty_dust_limit_satoshis }; let feerate_per_kw = self.get_commitment_feerate(funding, generated_by_local); @@ -6319,10 +6256,10 @@ where /// Only allowed after [`FundingScope::channel_transaction_parameters`] is set. #[rustfmt::skip] - fn get_funding_signed_msg( + fn get_funding_signed_msg( &mut self, channel_parameters: &ChannelTransactionParameters, logger: &L, counterparty_initial_commitment_tx: CommitmentTransaction, - ) -> Option where L::Target: Logger { + ) -> Option { let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust(); let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction(); log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}", @@ -6424,12 +6361,11 @@ where } } - fn get_initial_counterparty_commitment_signatures( + fn get_initial_counterparty_commitment_signatures( &self, funding: &FundingScope, logger: &L, ) -> Option<(Signature, Vec)> where SP::Target: SignerProvider, - L::Target: Logger, { let mut commitment_number = self.counterparty_next_commitment_transaction_number; let mut commitment_point = self.counterparty_next_commitment_point.unwrap(); @@ -6469,12 +6405,11 @@ where } } - fn get_initial_commitment_signed_v2( + fn get_initial_commitment_signed_v2( &self, funding: &FundingScope, logger: &L, ) -> Option where SP::Target: SignerProvider, - L::Target: Logger, { let signatures = self.get_initial_counterparty_commitment_signatures(funding, logger); if let Some((signature, htlc_signatures)) = signatures { @@ -6521,13 +6456,10 @@ where } #[rustfmt::skip] - fn check_for_funding_tx_confirmed( + fn check_for_funding_tx_confirmed( &mut self, funding: &mut FundingScope, block_hash: &BlockHash, height: u32, index_in_block: usize, tx: &mut ConfirmedTransaction, logger: &L, - ) -> Result - where - L::Target: Logger, - { + ) -> Result { let funding_txo = match funding.get_funding_txo() { Some(funding_txo) => funding_txo, None => { @@ -7306,10 +7238,10 @@ where } #[rustfmt::skip] - fn check_remote_fee( + fn check_remote_fee( channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator, feerate_per_kw: u32, cur_feerate_per_kw: Option, logger: &L - ) -> Result<(), ChannelError> where L::Target: Logger { + ) -> Result<(), ChannelError> { if channel_type.supports_anchor_zero_fee_commitments() { if feerate_per_kw != 0 { let err = "Zero Fee Channels must never attempt to use a fee".to_owned(); @@ -7459,11 +7391,9 @@ where /// /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is /// disconnected). - pub fn claim_htlc_while_disconnected_dropping_mon_update_legacy( + pub fn claim_htlc_while_disconnected_dropping_mon_update_legacy( &mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L, - ) where - L::Target: Logger, - { + ) { // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc` // (see equivalent if condition there). assert!(!self.context.channel_state.can_generate_new_commitment()); @@ -7476,14 +7406,11 @@ where } } - fn get_update_fulfill_htlc( + fn get_update_fulfill_htlc( &mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, payment_info: Option, attribution_data: Option, logger: &L, - ) -> UpdateFulfillFetch - where - L::Target: Logger, - { + ) -> UpdateFulfillFetch { // Either ChannelReady got set (which means it won't be unset) or there is no way any // caller thought we could have something claimed (cause we wouldn't have accepted in an // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us, @@ -7630,14 +7557,11 @@ where UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, update_blocked: false } } - pub fn get_update_fulfill_htlc_and_commit( + pub fn get_update_fulfill_htlc_and_commit( &mut self, htlc_id: u64, payment_preimage: PaymentPreimage, payment_info: Option, attribution_data: Option, logger: &L, - ) -> UpdateFulfillCommitFetch - where - L::Target: Logger, - { + ) -> UpdateFulfillCommitFetch { let release_cs_monitor = self.context.blocked_monitor_updates.is_empty(); match self.get_update_fulfill_htlc( htlc_id, @@ -7697,12 +7621,9 @@ where /// Returns `Err` (always with [`ChannelError::Ignore`]) if the HTLC could not be failed (e.g. /// if it was already resolved). Otherwise returns `Ok`. - pub fn queue_fail_htlc( + pub fn queue_fail_htlc( &mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L, - ) -> Result<(), ChannelError> - where - L::Target: Logger, - { + ) -> Result<(), ChannelError> { self.fail_htlc(htlc_id_arg, err_packet, true, logger) .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?")) } @@ -7711,12 +7632,9 @@ where /// want to fail blinded HTLCs where we are not the intro node. /// /// See [`Self::queue_fail_htlc`] for more info. - pub fn queue_fail_malformed_htlc( + pub fn queue_fail_malformed_htlc( &mut self, htlc_id_arg: u64, failure_code: u16, sha256_of_onion: [u8; 32], logger: &L, - ) -> Result<(), ChannelError> - where - L::Target: Logger, - { + ) -> Result<(), ChannelError> { self.fail_htlc(htlc_id_arg, (sha256_of_onion, failure_code), true, logger) .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?")) } @@ -7724,10 +7642,10 @@ where /// Returns `Err` (always with [`ChannelError::Ignore`]) if the HTLC could not be failed (e.g. /// if it was already resolved). Otherwise returns `Ok`. #[rustfmt::skip] - fn fail_htlc( + fn fail_htlc( &mut self, htlc_id_arg: u64, err_contents: E, mut force_holding_cell: bool, logger: &L - ) -> Result, ChannelError> where L::Target: Logger { + ) -> Result, ChannelError> { if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) { panic!("Was asked to fail an HTLC when channel was not in an operational state"); } @@ -7834,13 +7752,10 @@ where /// and the channel is now usable (and public), this may generate an announcement_signatures to /// reply with. #[rustfmt::skip] - pub fn channel_ready( + pub fn channel_ready( &mut self, msg: &msgs::ChannelReady, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock, logger: &L - ) -> Result, ChannelError> - where - L::Target: Logger - { + ) -> Result, ChannelError> { if self.context.channel_state.is_peer_disconnected() { self.context.workaround_lnd_bug_4006 = Some(msg.clone()); return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned())); @@ -8069,13 +7984,10 @@ where Ok(()) } - pub fn initial_commitment_signed_v2( + pub fn initial_commitment_signed_v2( &mut self, msg: &msgs::CommitmentSigned, best_block: BestBlock, signer_provider: &SP, logger: &L, - ) -> Result::EcdsaSigner>, ChannelError> - where - L::Target: Logger, - { + ) -> Result::EcdsaSigner>, ChannelError> { if let Some(signing_session) = self.context.interactive_tx_signing_session.as_ref() { if signing_session.has_received_tx_signatures() { let msg = "Received initial commitment_signed after peer's tx_signatures received!"; @@ -8139,13 +8051,10 @@ where /// Note that our `commitment_signed` send did not include a monitor update. This is due to: /// 1. Updates cannot be made since the state machine is paused until `tx_signatures`. /// 2. We're still able to abort negotiation until `tx_signatures`. - fn splice_initial_commitment_signed( + fn splice_initial_commitment_signed( &mut self, msg: &msgs::CommitmentSigned, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, - ) -> Result, ChannelError> - where - L::Target: Logger, - { + ) -> Result, ChannelError> { debug_assert!(self .context .interactive_tx_signing_session @@ -8256,13 +8165,10 @@ where (nondust_htlc_sources, dust_htlcs) } - pub fn commitment_signed( + pub fn commitment_signed( &mut self, msg: &msgs::CommitmentSigned, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, - ) -> Result, ChannelError> - where - L::Target: Logger, - { + ) -> Result, ChannelError> { self.commitment_signed_check_state()?; if !self.pending_funding().is_empty() { @@ -8299,13 +8205,10 @@ where self.commitment_signed_update_monitor(update, logger) } - pub fn commitment_signed_batch( + pub fn commitment_signed_batch( &mut self, batch: Vec, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, - ) -> Result, ChannelError> - where - L::Target: Logger, - { + ) -> Result, ChannelError> { self.commitment_signed_check_state()?; let mut messages = BTreeMap::new(); @@ -8403,12 +8306,9 @@ where Ok(()) } - fn commitment_signed_update_monitor( + fn commitment_signed_update_monitor( &mut self, mut update: ChannelMonitorUpdateStep, logger: &L, - ) -> Result, ChannelError> - where - L::Target: Logger, - { + ) -> Result, ChannelError> { if self .holder_commitment_point .advance(&self.context.holder_signer, &self.context.secp_ctx, logger) @@ -8552,12 +8452,9 @@ where /// Public version of the below, checking relevant preconditions first. /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and /// returns `(None, Vec::new())`. - pub fn maybe_free_holding_cell_htlcs( + pub fn maybe_free_holding_cell_htlcs( &mut self, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, - ) -> (Option, Vec<(HTLCSource, PaymentHash)>) - where - L::Target: Logger, - { + ) -> (Option, Vec<(HTLCSource, PaymentHash)>) { if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.channel_state.can_generate_new_commitment() { @@ -8569,12 +8466,9 @@ where /// Frees any pending commitment updates in the holding cell, generating the relevant messages /// for our counterparty. - fn free_holding_cell_htlcs( + fn free_holding_cell_htlcs( &mut self, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, - ) -> (Option, Vec<(HTLCSource, PaymentHash)>) - where - L::Target: Logger, - { + ) -> (Option, Vec<(HTLCSource, PaymentHash)>) { assert!(matches!(self.context.channel_state, ChannelState::ChannelReady(_))); assert!(!self.context.channel_state.is_monitor_update_in_progress()); assert!(!self.context.channel_state.is_quiescent()); @@ -8777,7 +8671,7 @@ where /// /// [`HeldHtlcAvailable`]: crate::onion_message::async_payments::HeldHtlcAvailable /// [`ReleaseHeldHtlc`]: crate::onion_message::async_payments::ReleaseHeldHtlc - pub fn revoke_and_ack( + pub fn revoke_and_ack( &mut self, msg: &msgs::RevokeAndACK, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, hold_mon_update: bool, ) -> Result< @@ -8787,10 +8681,7 @@ where Option, ), ChannelError, - > - where - L::Target: Logger, - { + > { if self.context.channel_state.is_quiescent() { return Err(ChannelError::WarnAndDisconnect( "Got revoke_and_ack message while quiescent".to_owned(), @@ -9201,13 +9092,10 @@ where } } - fn on_tx_signatures_exchange<'a, L: Deref>( + fn on_tx_signatures_exchange<'a, L: Logger>( &mut self, funding_tx: Transaction, best_block_height: u32, logger: &WithChannelContext<'a, L>, - ) -> (Option, Option) - where - L::Target: Logger, - { + ) -> (Option, Option) { debug_assert!(!self.context.channel_state.is_monitor_update_in_progress()); debug_assert!(!self.context.channel_state.is_awaiting_remote_revoke()); @@ -9259,12 +9147,9 @@ where } } - pub fn tx_signatures( + pub fn tx_signatures( &mut self, msg: &msgs::TxSignatures, best_block_height: u32, logger: &L, - ) -> Result - where - L::Target: Logger, - { + ) -> Result { let signing_session = if let Some(signing_session) = self.context.interactive_tx_signing_session.as_mut() { @@ -9336,11 +9221,9 @@ where /// Queues up an outbound update fee by placing it in the holding cell. You should call /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the /// commitment update. - pub fn queue_update_fee( + pub fn queue_update_fee( &mut self, feerate_per_kw: u32, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, - ) where - L::Target: Logger, - { + ) { let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger); assert!(msg_opt.is_none(), "We forced holding cell?"); } @@ -9353,10 +9236,10 @@ where /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this /// [`FundedChannel`] if `force_holding_cell` is false. #[rustfmt::skip] - fn send_update_fee( + fn send_update_fee( &mut self, feerate_per_kw: u32, mut force_holding_cell: bool, fee_estimator: &LowerBoundedFeeEstimator, logger: &L - ) -> Option where L::Target: Logger { + ) -> Option { if !self.funding.is_outbound() { panic!("Cannot send fee from inbound channel"); } @@ -9406,7 +9289,7 @@ where /// completed. /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately. #[rustfmt::skip] - fn remove_uncommitted_htlcs_and_mark_paused(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger { + fn remove_uncommitted_htlcs_and_mark_paused(&mut self, logger: &L) -> Result<(), ()> { assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete)); if !self.context.can_resume_on_reconnect() { return Err(()) @@ -9492,14 +9375,12 @@ where /// [`ChannelManager`]: super::channelmanager::ChannelManager /// [`chain::Watch`]: crate::chain::Watch /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress - fn monitor_updating_paused( + fn monitor_updating_paused( &mut self, resend_raa: bool, resend_commitment: bool, resend_channel_ready: bool, pending_forwards: Vec<(PendingHTLCInfo, u64)>, pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, pending_finalized_claimed_htlcs: Vec<(HTLCSource, Option)>, logger: &L, - ) where - L::Target: Logger, - { + ) { log_trace!(logger, "Pausing channel monitor updates"); self.context.monitor_pending_revoke_and_ack |= resend_raa; @@ -9515,12 +9396,11 @@ where /// successfully and we should restore normal operation. Returns messages which should be sent /// to the remote side. #[rustfmt::skip] - pub fn monitor_updating_restored( + pub fn monitor_updating_restored( &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig, best_block_height: u32, path_for_release_htlc: CBP ) -> MonitorRestoreUpdates where - L::Target: Logger, CBP: Fn(u64) -> BlindedMessagePath { assert!(self.context.channel_state.is_monitor_update_in_progress()); @@ -9668,9 +9548,7 @@ where } #[rustfmt::skip] - pub fn update_fee(&mut self, fee_estimator: &LowerBoundedFeeEstimator, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError> - where L::Target: Logger - { + pub fn update_fee(&mut self, fee_estimator: &LowerBoundedFeeEstimator, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError> { if self.funding.is_outbound() { return Err(ChannelError::close("Non-funding remote tried to update channel fee".to_owned())); } @@ -9696,9 +9574,9 @@ where /// Indicates that the signer may have some signatures for us, so we should retry if we're /// blocked. #[rustfmt::skip] - pub fn signer_maybe_unblocked( + pub fn signer_maybe_unblocked( &mut self, logger: &L, path_for_release_htlc: CBP - ) -> Result where L::Target: Logger, CBP: Fn(u64) -> BlindedMessagePath { + ) -> Result where CBP: Fn(u64) -> BlindedMessagePath { if let Some((commitment_number, commitment_secret)) = self.context.signer_pending_stale_state_verification.clone() { if let Ok(expected_point) = self.context.holder_signer.as_ref() .get_per_commitment_point(commitment_number, &self.context.secp_ctx) @@ -9808,11 +9686,10 @@ where }) } - fn get_last_revoke_and_ack( + fn get_last_revoke_and_ack( &mut self, path_for_release_htlc: CBP, logger: &L, ) -> Option where - L::Target: Logger, CBP: Fn(u64) -> BlindedMessagePath, { debug_assert!( @@ -9867,12 +9744,9 @@ where } /// Gets the last commitment update for immediate sending to our peer. - fn get_last_commitment_update_for_send( + fn get_last_commitment_update_for_send( &mut self, logger: &L, - ) -> Result - where - L::Target: Logger, - { + ) -> Result { let mut update_add_htlcs = Vec::new(); let mut update_fulfill_htlcs = Vec::new(); let mut update_fail_htlcs = Vec::new(); @@ -9984,10 +9858,7 @@ where } } - fn panic_on_stale_state(logger: &L) - where - L::Target: Logger, - { + fn panic_on_stale_state(logger: &L) { macro_rules! log_and_panic { ($err_msg: expr) => { log_error!(logger, $err_msg); @@ -10006,13 +9877,12 @@ where /// May panic if some calls other than message-handling calls (which will all Err immediately) /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call. #[rustfmt::skip] - pub fn channel_reestablish( + pub fn channel_reestablish( &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock, path_for_release_htlc: CBP, ) -> Result where - L::Target: Logger, CBP: Fn(u64) -> BlindedMessagePath { if !self.context.channel_state.is_peer_disconnected() { @@ -10480,11 +10350,9 @@ where Ok(()) } - pub fn maybe_propose_closing_signed( + pub fn maybe_propose_closing_signed( &mut self, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, ) -> Result<(Option, Option<(Transaction, ShutdownResult)>), ChannelError> - where - L::Target: Logger, { // If we're waiting on a monitor persistence, that implies we're also waiting to send some // message to our counterparty (probably a `revoke_and_ack`). In such a case, we shouldn't @@ -10564,16 +10432,13 @@ where } } - pub fn shutdown( + pub fn shutdown( &mut self, logger: &L, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown, ) -> Result< (Option, Option, Vec<(HTLCSource, PaymentHash)>), ChannelError, - > - where - L::Target: Logger, - { + > { if self.context.channel_state.is_peer_disconnected() { return Err(ChannelError::close( "Peer sent shutdown when we needed a channel_reestablish".to_owned(), @@ -10745,13 +10610,10 @@ where tx } - fn get_closing_signed_msg( + fn get_closing_signed_msg( &mut self, closing_tx: &ClosingTransaction, skip_remote_output: bool, fee_satoshis: u64, min_fee_satoshis: u64, max_fee_satoshis: u64, logger: &L, - ) -> Option - where - L::Target: Logger, - { + ) -> Option { let sig = match &self.context.holder_signer { ChannelSignerType::Ecdsa(ecdsa) => ecdsa .sign_closing_transaction( @@ -10806,12 +10668,10 @@ where } } - pub fn closing_signed( + pub fn closing_signed( &mut self, fee_estimator: &LowerBoundedFeeEstimator, msg: &msgs::ClosingSigned, logger: &L, ) -> Result<(Option, Option<(Transaction, ShutdownResult)>), ChannelError> - where - L::Target: Logger, { if self.is_shutdown_pending_signature() { return Err(ChannelError::Warn(String::from("Remote end sent us a closing_signed while fully shutdown and just waiting on the final closing signature"))); @@ -11055,9 +10915,9 @@ where /// When this function is called, the HTLC is already irrevocably committed to the channel; /// this function determines whether to fail the HTLC, or forward / claim it. #[rustfmt::skip] - pub fn can_accept_incoming_htlc( + pub fn can_accept_incoming_htlc( &self, fee_estimator: &LowerBoundedFeeEstimator, logger: L - ) -> Result<(), LocalHTLCFailureReason> where L::Target: Logger { + ) -> Result<(), LocalHTLCFailureReason> { if self.context.channel_state.is_local_shutdown_sent() { return Err(LocalHTLCFailureReason::ChannelClosed) } @@ -11268,9 +11128,7 @@ where } #[rustfmt::skip] - fn check_get_channel_ready(&mut self, height: u32, logger: &L) -> Option - where L::Target: Logger - { + fn check_get_channel_ready(&mut self, height: u32, logger: &L) -> Option { // Called: // * always when a new block/transactions are confirmed with the new height // * when funding is signed with a height of 0 @@ -11327,9 +11185,9 @@ where } #[rustfmt::skip] - fn get_channel_ready( + fn get_channel_ready( &mut self, logger: &L - ) -> Option where L::Target: Logger { + ) -> Option { if self.holder_commitment_point.can_advance() { self.context.signer_pending_channel_ready = false; Some(msgs::ChannelReady { @@ -11349,13 +11207,10 @@ where } /// Returns `Some` if a splice [`FundingScope`] was promoted. - fn maybe_promote_splice_funding( + fn maybe_promote_splice_funding( &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig, block_height: u32, logger: &L, - ) -> Option - where - L::Target: Logger, - { + ) -> Option { debug_assert!(self.pending_splice.is_some()); let pending_splice = self.pending_splice.as_mut().unwrap(); @@ -11469,13 +11324,10 @@ where /// In the first case, we store the confirmation height and calculating the short channel id. /// In the second, we simply return an Err indicating we need to be force-closed now. #[rustfmt::skip] - pub fn transactions_confirmed( + pub fn transactions_confirmed( &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData, chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L - ) -> Result<(Option, Option), ClosureReason> - where - L::Target: Logger - { + ) -> Result<(Option, Option), ClosureReason> { for &(index_in_block, tx) in txdata.iter() { let mut confirmed_tx = ConfirmedTransaction::from(tx); @@ -11566,13 +11418,10 @@ where /// /// May return some HTLCs (and their payment_hash) which have timed out and should be failed /// back. - pub fn best_block_updated( + pub fn best_block_updated( &mut self, height: u32, highest_header_time: Option, chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L, - ) -> Result - where - L::Target: Logger, - { + ) -> Result { self.do_best_block_updated( height, highest_header_time, @@ -11582,13 +11431,10 @@ where } #[rustfmt::skip] - fn do_best_block_updated( + fn do_best_block_updated( &mut self, height: u32, highest_header_time: Option, chain_node_signer: Option<(ChainHash, &NS, &UserConfig)>, logger: &L - ) -> Result<(Option, Vec<(HTLCSource, PaymentHash)>, Option), ClosureReason> - where - L::Target: Logger - { + ) -> Result<(Option, Vec<(HTLCSource, PaymentHash)>, Option), ClosureReason> { let mut timed_out_htlcs = Vec::new(); // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to // forward an HTLC when our counterparty should almost certainly just fail it for expiring @@ -11764,12 +11610,9 @@ where /// before the channel has reached channel_ready or splice_locked, and we can just wait for more /// blocks. #[rustfmt::skip] - pub fn transaction_unconfirmed( + pub fn transaction_unconfirmed( &mut self, txid: &Txid, logger: &L, - ) -> Result<(), ClosureReason> - where - L::Target: Logger, - { + ) -> Result<(), ClosureReason> { let unconfirmed_funding = self .funding_and_pending_funding_iter_mut() .find(|funding| funding.get_funding_txid() == Some(*txid)); @@ -11846,13 +11689,10 @@ where } #[rustfmt::skip] - fn get_announcement_sigs( + fn get_announcement_sigs( &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig, best_block_height: u32, logger: &L - ) -> Option - where - L::Target: Logger - { + ) -> Option { if self.funding.funding_tx_confirmation_height == 0 || self.funding.funding_tx_confirmation_height + 5 > best_block_height { return None; } @@ -12059,7 +11899,7 @@ where /// May panic if called on a channel that wasn't immediately-previously /// self.remove_uncommitted_htlcs_and_mark_paused()'d #[rustfmt::skip] - fn get_channel_reestablish(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger { + fn get_channel_reestablish(&mut self, logger: &L) -> msgs::ChannelReestablish { assert!(self.context.channel_state.is_peer_disconnected()); assert_ne!(self.context.counterparty_next_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER); // This is generally the first function which gets called on any given channel once we're @@ -12116,13 +11956,10 @@ where /// Includes the witness weight for this input (e.g. P2WPKH_WITNESS_WEIGHT=109 for typical P2WPKH inputs). /// - `change_script`: an option change output script. If `None` and needed, one will be /// generated by `SignerProvider::get_destination_script`. - pub fn splice_channel( + pub fn splice_channel( &mut self, contribution: SpliceContribution, funding_feerate_per_kw: u32, locktime: u32, logger: &L, - ) -> Result, APIError> - where - L::Target: Logger, - { + ) -> Result, APIError> { if self.holder_commitment_point.current_point().is_none() { return Err(APIError::APIMisuseError { err: format!( @@ -12465,13 +12302,10 @@ where Ok(()) } - pub(crate) fn splice_init( + pub(crate) fn splice_init( &mut self, msg: &msgs::SpliceInit, our_funding_contribution_satoshis: i64, signer_provider: &SP, entropy_source: &ES, holder_node_id: &PublicKey, logger: &L, - ) -> Result - where - L::Target: Logger, - { + ) -> Result { let our_funding_contribution = SignedAmount::from_sat(our_funding_contribution_satoshis); let splice_funding = self.validate_splice_init(msg, our_funding_contribution)?; @@ -12535,13 +12369,10 @@ where }) } - pub(crate) fn splice_ack( + pub(crate) fn splice_ack( &mut self, msg: &msgs::SpliceAck, signer_provider: &SP, entropy_source: &ES, holder_node_id: &PublicKey, logger: &L, - ) -> Result, ChannelError> - where - L::Target: Logger, - { + ) -> Result, ChannelError> { let splice_funding = self.validate_splice_ack(msg)?; log_info!( @@ -12688,13 +12519,10 @@ where Ok((holder_balance_floor, counterparty_balance_floor)) } - pub fn splice_locked( + pub fn splice_locked( &mut self, msg: &msgs::SpliceLocked, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig, block_height: u32, logger: &L, - ) -> Result, ChannelError> - where - L::Target: Logger, - { + ) -> Result, ChannelError> { log_info!(logger, "Received splice_locked txid {} from our peer", msg.splice_txid,); let pending_splice = match self.pending_splice.as_mut() { @@ -12735,15 +12563,12 @@ where /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the /// commitment update. - pub fn queue_add_htlc( + pub fn queue_add_htlc( &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option, blinding_point: Option, accountable: bool, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, - ) -> Result<(), (LocalHTLCFailureReason, String)> - where - L::Target: Logger, - { + ) -> Result<(), (LocalHTLCFailureReason, String)> { self.send_htlc( amount_msat, payment_hash, @@ -12783,15 +12608,12 @@ where /// on this [`FundedChannel`] if `force_holding_cell` is false. /// /// `Err`'s will always be temporary channel failures. - fn send_htlc( + fn send_htlc( &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool, skimmed_fee_msat: Option, blinding_point: Option, hold_htlc: bool, accountable: bool, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, - ) -> Result - where - L::Target: Logger, - { + ) -> Result { if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) || self.context.channel_state.is_local_shutdown_sent() || self.context.channel_state.is_remote_shutdown_sent() @@ -12916,10 +12738,7 @@ where .expect("At least one FundingScope is always provided") } - fn build_commitment_no_status_check(&mut self, logger: &L) -> ChannelMonitorUpdate - where - L::Target: Logger, - { + fn build_commitment_no_status_check(&mut self, logger: &L) -> ChannelMonitorUpdate { log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed..."); // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we // fail to generate this, we still are at least at a position where upgrading their status @@ -13037,12 +12856,9 @@ where } #[rustfmt::skip] - fn build_commitment_no_state_update( + fn build_commitment_no_state_update( &self, funding: &FundingScope, logger: &L, - ) -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction) - where - L::Target: Logger, - { + ) -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction) { let commitment_data = self.context.build_commitment_transaction( funding, self.context.counterparty_next_commitment_transaction_number, &self.context.counterparty_next_commitment_point.unwrap(), false, true, logger, @@ -13054,12 +12870,9 @@ where /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed /// generation when we shouldn't change HTLC/channel state. - fn send_commitment_no_state_update( + fn send_commitment_no_state_update( &self, logger: &L, - ) -> Result, ChannelError> - where - L::Target: Logger, - { + ) -> Result, ChannelError> { core::iter::once(&self.funding) .chain(self.pending_funding().iter()) .map(|funding| self.send_commitment_no_state_update_for_funding(funding, logger)) @@ -13067,12 +12880,9 @@ where } #[rustfmt::skip] - fn send_commitment_no_state_update_for_funding( + fn send_commitment_no_state_update_for_funding( &self, funding: &FundingScope, logger: &L, - ) -> Result - where - L::Target: Logger, - { + ) -> Result { // Get the fee tests from `build_commitment_no_state_update` #[cfg(any(test, fuzzing))] self.build_commitment_no_state_update(funding, logger); @@ -13135,15 +12945,12 @@ where /// /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info. - pub fn send_htlc_and_commit( + pub fn send_htlc_and_commit( &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option, hold_htlc: bool, accountable: bool, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, - ) -> Result, ChannelError> - where - L::Target: Logger, - { + ) -> Result, ChannelError> { let send_res = self.send_htlc( amount_msat, payment_hash, @@ -13196,17 +13003,14 @@ where /// Begins the shutdown process, getting a message for the remote peer and returning all /// holding cell HTLCs for payment failure. - pub fn get_shutdown( + pub fn get_shutdown( &mut self, signer_provider: &SP, their_features: &InitFeatures, target_feerate_sats_per_kw: Option, override_shutdown_script: Option, logger: &L, ) -> Result< (msgs::Shutdown, Option, Vec<(HTLCSource, PaymentHash)>), APIError, - > - where - L::Target: Logger, - { + > { let logger = WithChannelContext::from(logger, &self.context, None); if self.context.channel_state.is_local_stfu_sent() @@ -13358,12 +13162,9 @@ where } #[rustfmt::skip] - pub fn propose_quiescence( + pub fn propose_quiescence( &mut self, logger: &L, action: QuiescentAction, - ) -> Result, &'static str> - where - L::Target: Logger, - { + ) -> Result, &'static str> { log_debug!(logger, "Attempting to initiate quiescence"); if !self.context.is_usable() { @@ -13399,10 +13200,7 @@ where // Assumes we are either awaiting quiescence or our counterparty has requested quiescence. #[rustfmt::skip] - pub fn send_stfu(&mut self, logger: &L) -> Result - where - L::Target: Logger, - { + pub fn send_stfu(&mut self, logger: &L) -> Result { debug_assert!(!self.context.channel_state.is_local_stfu_sent()); debug_assert!( self.context.channel_state.is_awaiting_quiescence() @@ -13437,9 +13235,9 @@ where } #[rustfmt::skip] - pub fn stfu( + pub fn stfu( &mut self, msg: &msgs::Stfu, logger: &L - ) -> Result, ChannelError> where L::Target: Logger { + ) -> Result, ChannelError> { if self.context.channel_state.is_quiescent() { return Err(ChannelError::Warn("Channel is already quiescent".to_owned())); } @@ -13540,12 +13338,9 @@ where Ok(None) } - pub fn try_send_stfu( + pub fn try_send_stfu( &mut self, logger: &L, - ) -> Result, ChannelError> - where - L::Target: Logger, - { + ) -> Result, ChannelError> { // We must never see both stfu flags set, we always set the quiescent flag instead. debug_assert!( !(self.context.channel_state.is_local_stfu_sent() @@ -13639,11 +13434,11 @@ where #[allow(dead_code)] // TODO(dual_funding): Remove once opending V2 channels is enabled. #[rustfmt::skip] - pub fn new( + pub fn new( fee_estimator: &LowerBoundedFeeEstimator, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures, channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32, outbound_scid_alias: u64, temporary_channel_id: Option, logger: L - ) -> Result, APIError> where L::Target: Logger { + ) -> Result, APIError> { let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config); if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { // Protocol level safety check in place, although it should never happen because @@ -13690,7 +13485,7 @@ where /// Only allowed after [`FundingScope::channel_transaction_parameters`] is set. #[rustfmt::skip] - fn get_funding_created_msg(&mut self, logger: &L) -> Option where L::Target: Logger { + fn get_funding_created_msg(&mut self, logger: &L) -> Option { let commitment_data = self.context.build_commitment_transaction(&self.funding, self.context.counterparty_next_commitment_transaction_number, &self.context.counterparty_next_commitment_point.unwrap(), false, false, logger); @@ -13735,8 +13530,8 @@ where /// Do NOT broadcast the funding transaction until after a successful funding_signed call! /// If an Err is returned, it is a ChannelError::Close. #[rustfmt::skip] - pub fn get_funding_created(&mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L) - -> Result, (Self, ChannelError)> where L::Target: Logger { + pub fn get_funding_created(&mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L) + -> Result, (Self, ChannelError)> { if !self.funding.is_outbound() { panic!("Tried to create outbound funding_created message on an inbound channel!"); } @@ -13775,10 +13570,10 @@ where /// not of our ability to open any channel at all. Thus, on error, we should first call this /// and see if we get a new `OpenChannel` message, otherwise the channel is failed. #[rustfmt::skip] - pub(crate) fn maybe_handle_error_without_close( + pub(crate) fn maybe_handle_error_without_close( &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, user_config: &UserConfig, their_features: &InitFeatures, - ) -> Result where L::Target: Logger, { + ) -> Result { self.context.maybe_downgrade_channel_features( &mut self.funding, fee_estimator, user_config, their_features, )?; @@ -13792,9 +13587,9 @@ where } #[rustfmt::skip] - pub fn get_open_channel( + pub fn get_open_channel( &mut self, chain_hash: ChainHash, _logger: &L - ) -> Option where L::Target: Logger { + ) -> Option { if !self.funding.is_outbound() { panic!("Tried to open a channel for an inbound channel?"); } @@ -13864,16 +13659,13 @@ where /// Handles a funding_signed message from the remote end. /// If this call is successful, broadcast the funding transaction (and not before!) - pub fn funding_signed( + pub fn funding_signed( mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L, ) -> Result< (FundedChannel, ChannelMonitor<::EcdsaSigner>), (OutboundV1Channel, ChannelError), - > - where - L::Target: Logger, - { + > { if !self.funding.is_outbound() { let err = "Received funding_signed for an inbound channel?"; return Err((self, ChannelError::close(err.to_owned()))); @@ -13937,9 +13729,9 @@ where /// Indicates that the signer may have some signatures for us, so we should retry if we're /// blocked. #[rustfmt::skip] - pub fn signer_maybe_unblocked( + pub fn signer_maybe_unblocked( &mut self, chain_hash: ChainHash, logger: &L - ) -> (Option, Option) where L::Target: Logger { + ) -> (Option, Option) { // If we were pending a commitment point, retry the signer and advance to an // available state. if self.unfunded_context.holder_commitment_point.is_none() { @@ -14023,12 +13815,12 @@ where /// Creates a new channel from a remote sides' request for one. /// Assumes chain_hash has already been checked and corresponds with what we expect! #[rustfmt::skip] - pub fn new( + pub fn new( fee_estimator: &LowerBoundedFeeEstimator, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures, their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig, current_chain_height: u32, logger: &L, is_0conf: bool, - ) -> Result, ChannelError> where L::Target: Logger { + ) -> Result, ChannelError> { let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.common_fields.temporary_channel_id), None); // First check the channel type is known, failing before we do anything else if we don't @@ -14076,10 +13868,7 @@ where /// should be sent back to the counterparty node. /// /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel - pub fn accept_inbound_channel(&mut self, logger: &L) -> Option - where - L::Target: Logger, - { + pub fn accept_inbound_channel(&mut self, logger: &L) -> Option { if self.funding.is_outbound() { panic!("Tried to send accept_channel for an outbound channel?"); } @@ -14102,9 +13891,9 @@ where /// /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel #[rustfmt::skip] - fn generate_accept_channel_message( + fn generate_accept_channel_message( &mut self, _logger: &L - ) -> Option where L::Target: Logger { + ) -> Option { let first_per_commitment_point = match self.unfunded_context.holder_commitment_point { Some(holder_commitment_point) if holder_commitment_point.can_advance() => { self.signer_pending_accept_channel = false; @@ -14150,16 +13939,13 @@ where /// /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel #[cfg(test)] - pub fn get_accept_channel_message( + pub fn get_accept_channel_message( &mut self, logger: &L, - ) -> Option - where - L::Target: Logger, - { + ) -> Option { self.generate_accept_channel_message(logger) } - pub fn funding_created( + pub fn funding_created( mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L, ) -> Result< @@ -14169,10 +13955,7 @@ where ChannelMonitor<::EcdsaSigner>, ), (Self, ChannelError), - > - where - L::Target: Logger, - { + > { if self.funding.is_outbound() { let err = "Received funding_created for an outbound channel?"; return Err((self, ChannelError::close(err.to_owned()))); @@ -14256,9 +14039,9 @@ where /// Indicates that the signer may have some signatures for us, so we should retry if we're /// blocked. #[rustfmt::skip] - pub fn signer_maybe_unblocked( + pub fn signer_maybe_unblocked( &mut self, logger: &L - ) -> Option where L::Target: Logger { + ) -> Option { if self.unfunded_context.holder_commitment_point.is_none() { self.unfunded_context.holder_commitment_point = HolderCommitmentPoint::new(&self.context.holder_signer, &self.context.secp_ctx); } @@ -14293,13 +14076,13 @@ where { #[allow(dead_code)] // TODO(dual_funding): Remove once creating V2 channels is enabled. #[rustfmt::skip] - pub fn new_outbound( + pub fn new_outbound( fee_estimator: &LowerBoundedFeeEstimator, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures, funding_satoshis: u64, funding_inputs: Vec, user_id: u128, config: &UserConfig, current_chain_height: u32, outbound_scid_alias: u64, funding_confirmation_target: ConfirmationTarget, logger: L, - ) -> Result where L::Target: Logger { + ) -> Result { let channel_keys_id = signer_provider.generate_channel_keys_id(false, user_id); let holder_signer = signer_provider.derive_channel_signer(channel_keys_id); @@ -14435,12 +14218,12 @@ where /// TODO(dual_funding): Allow contributions, pass intended amount and inputs #[allow(dead_code)] // TODO(dual_funding): Remove once V2 channels is enabled. #[rustfmt::skip] - pub fn new_inbound( + pub fn new_inbound( fee_estimator: &LowerBoundedFeeEstimator, entropy_source: &ES, signer_provider: &SP, holder_node_id: PublicKey, counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures, their_features: &InitFeatures, msg: &msgs::OpenChannelV2, user_id: u128, config: &UserConfig, current_chain_height: u32, logger: &L, - ) -> Result where L::Target: Logger, { + ) -> Result { // TODO(dual_funding): Take these as input once supported let (our_funding_contribution, our_funding_contribution_sats) = (SignedAmount::ZERO, 0u64); let our_funding_inputs = Vec::new(); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 1d3f8ecca6d..7ee7b6de02f 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1163,12 +1163,10 @@ impl ClaimablePayments { /// /// If no payment is found, `Err(Vec::new())` is returned. #[rustfmt::skip] - fn begin_claiming_payment( + fn begin_claiming_payment( &mut self, payment_hash: PaymentHash, node_signer: &S, logger: &L, inbound_payment_id_secret: &[u8; 32], custom_tlvs_known: bool, - ) -> Result<(Vec, ClaimingPayment), Vec> - where L::Target: Logger, - { + ) -> Result<(Vec, ClaimingPayment), Vec> { match self.claimable_payments.remove(&payment_hash) { Some(payment) => { let mut receiver_node_id = node_signer.get_node_id(Recipient::Node) @@ -1807,9 +1805,7 @@ pub trait AChannelManager { /// A type implementing [`MessageRouter`]. type MessageRouter: MessageRouter; /// A type implementing [`Logger`]. - type Logger: Logger + ?Sized; - /// A type that may be dereferenced to [`Self::Logger`]. - type L: Deref; + type Logger: Logger; /// Returns a reference to the actual [`ChannelManager`] object. fn get_cm( &self, @@ -1822,7 +1818,7 @@ pub trait AChannelManager { Self::FeeEstimator, Self::Router, Self::MessageRouter, - Self::L, + Self::Logger, >; } @@ -1835,12 +1831,11 @@ impl< F: FeeEstimator, R: Router, MR: MessageRouter, - L: Deref, + L: Logger, > AChannelManager for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - L::Target: Logger, { type Watch = M::Target; type M = M; @@ -1853,8 +1848,7 @@ where type FeeEstimator = F; type Router = R; type MessageRouter = MR; - type Logger = L::Target; - type L = L; + type Logger = L; fn get_cm(&self) -> &ChannelManager { self } @@ -2608,11 +2602,10 @@ pub struct ChannelManager< F: FeeEstimator, R: Router, MR: MessageRouter, - L: Deref, + L: Logger, > where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - L::Target: Logger, { config: RwLock, chain_hash: ChainHash, @@ -3392,12 +3385,11 @@ impl< F: FeeEstimator, R: Router, MR: MessageRouter, - L: Deref, + L: Logger, > ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - L::Target: Logger, { /// Constructs a new `ChannelManager` to hold several channels and route between them. /// @@ -3635,7 +3627,7 @@ where }; match OutboundV1Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider, their_network_key, their_features, channel_value_satoshis, push_msat, user_channel_id, config, - self.best_block.read().unwrap().height, outbound_scid_alias, temporary_channel_id, &*self.logger) + self.best_block.read().unwrap().height, outbound_scid_alias, temporary_channel_id, &self.logger) { Ok(res) => res, Err(e) => { @@ -6913,7 +6905,7 @@ where match decode_incoming_update_add_htlc_onion( &update_add_htlc, &self.node_signer, - &*self.logger, + &self.logger, &self.secp_ctx, ) { Ok(decoded_onion) => match decoded_onion { @@ -13521,12 +13513,11 @@ impl< F: FeeEstimator, R: Router, MR: MessageRouter, - L: Deref, + L: Logger, > ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - L::Target: Logger, { #[cfg(not(c_bindings))] create_offer_builder!(self, OfferBuilder<'_, DerivedMetadata, secp256k1::All>); @@ -14392,12 +14383,11 @@ impl< F: FeeEstimator, R: Router, MR: MessageRouter, - L: Deref, + L: Logger, > BaseMessageHandler for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - L::Target: Logger, { fn provided_node_features(&self) -> NodeFeatures { provided_node_features(&self.config.read().unwrap()) @@ -14757,12 +14747,11 @@ impl< F: FeeEstimator, R: Router, MR: MessageRouter, - L: Deref, + L: Logger, > EventsProvider for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - L::Target: Logger, { /// Processes events that must be periodically handled. /// @@ -14786,12 +14775,11 @@ impl< F: FeeEstimator, R: Router, MR: MessageRouter, - L: Deref, + L: Logger, > chain::Listen for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - L::Target: Logger, { fn filtered_block_connected(&self, header: &Header, txdata: &TransactionData, height: u32) { { @@ -14841,12 +14829,11 @@ impl< F: FeeEstimator, R: Router, MR: MessageRouter, - L: Deref, + L: Logger, > chain::Confirm for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - L::Target: Logger, { #[rustfmt::skip] fn transactions_confirmed(&self, header: &Header, txdata: &TransactionData, height: u32) { @@ -15008,12 +14995,11 @@ impl< F: FeeEstimator, R: Router, MR: MessageRouter, - L: Deref, + L: Logger, > ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - L::Target: Logger, { /// Calls a function which handles an on-chain event (blocks dis/connected, transactions /// un/confirmed, etc) on each channel, handling any resulting errors or messages generated by @@ -15364,12 +15350,11 @@ impl< F: FeeEstimator, R: Router, MR: MessageRouter, - L: Deref, + L: Logger, > ChannelMessageHandler for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - L::Target: Logger, { fn handle_open_channel(&self, counterparty_node_id: PublicKey, message: &msgs::OpenChannel) { // Note that we never need to persist the updated ChannelManager for an inbound @@ -15933,12 +15918,11 @@ impl< F: FeeEstimator, R: Router, MR: MessageRouter, - L: Deref, + L: Logger, > OffersMessageHandler for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - L::Target: Logger, { #[rustfmt::skip] fn handle_message( @@ -16145,12 +16129,11 @@ impl< F: FeeEstimator, R: Router, MR: MessageRouter, - L: Deref, + L: Logger, > AsyncPaymentsMessageHandler for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - L::Target: Logger, { fn handle_offer_paths_request( &self, message: OfferPathsRequest, context: AsyncPaymentsContext, @@ -16384,12 +16367,11 @@ impl< F: FeeEstimator, R: Router, MR: MessageRouter, - L: Deref, + L: Logger, > DNSResolverMessageHandler for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - L::Target: Logger, { fn handle_dnssec_query( &self, _message: DNSSECQuery, _responder: Option, @@ -16446,12 +16428,11 @@ impl< F: FeeEstimator, R: Router, MR: MessageRouter, - L: Deref, + L: Logger, > NodeIdLookUp for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - L::Target: Logger, { fn next_node_id(&self, short_channel_id: u64) -> Option { self.short_to_chan_info.read().unwrap().get(&short_channel_id).map(|(pubkey, _)| *pubkey) @@ -16956,12 +16937,11 @@ impl< F: FeeEstimator, R: Router, MR: MessageRouter, - L: Deref, + L: Logger, > Writeable for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - L::Target: Logger, { #[rustfmt::skip] fn write(&self, writer: &mut W) -> Result<(), io::Error> { @@ -17317,11 +17297,10 @@ pub struct ChannelManagerReadArgs< F: FeeEstimator, R: Router, MR: MessageRouter, - L: Deref + Clone, + L: Logger + Clone, > where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - L::Target: Logger, { /// A cryptographically secure source of entropy. pub entropy_source: ES, @@ -17391,12 +17370,11 @@ impl< F: FeeEstimator, R: Router, MR: MessageRouter, - L: Deref + Clone, + L: Logger + Clone, > ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L> where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - L::Target: Logger, { /// Simple utility function to create a ChannelManagerReadArgs which creates the monitor /// HashMap for you. This is primarily useful for C bindings where it is not practical to @@ -17427,12 +17405,10 @@ where // If the HTLC corresponding to `prev_hop_data` is present in `decode_update_add_htlcs`, remove it // from the map as it is already being stored and processed elsewhere. -fn dedup_decode_update_add_htlcs( +fn dedup_decode_update_add_htlcs( decode_update_add_htlcs: &mut HashMap>, prev_hop_data: &HTLCPreviousHopData, removal_reason: &'static str, logger: &L, -) where - L::Target: Logger, -{ +) { match decode_update_add_htlcs.entry(prev_hop_data.prev_outbound_scid_alias) { hash_map::Entry::Occupied(mut update_add_htlcs) => { update_add_htlcs.get_mut().retain(|update_add| { @@ -17473,13 +17449,12 @@ impl< F: FeeEstimator, R: Router, MR: MessageRouter, - L: Deref + Clone, + L: Logger + Clone, > ReadableArgs> for (BlockHash, Arc>) where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - L::Target: Logger, { fn read( reader: &mut Reader, args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>, @@ -17500,13 +17475,12 @@ impl< F: FeeEstimator, R: Router, MR: MessageRouter, - L: Deref + Clone, + L: Logger + Clone, > ReadableArgs> for (BlockHash, ChannelManager) where M::Target: chain::Watch<::EcdsaSigner>, SP::Target: SignerProvider, - L::Target: Logger, { fn read( reader: &mut Reader, mut args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>, diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index c425bda2bf1..bc75407d4a3 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -740,7 +740,7 @@ pub trait NodeHolder { ::FeeEstimator, ::Router, ::MessageRouter, - ::L, + ::Logger, >; fn chain_monitor(&self) -> Option<&test_utils::TestChainMonitor<'_>>; } @@ -757,7 +757,7 @@ impl NodeHolder for &H { ::FeeEstimator, ::Router, ::MessageRouter, - ::L, + ::Logger, > { (*self).node() } diff --git a/lightning/src/ln/inbound_payment.rs b/lightning/src/ln/inbound_payment.rs index 03e271d196d..51f8b7bfce9 100644 --- a/lightning/src/ln/inbound_payment.rs +++ b/lightning/src/ln/inbound_payment.rs @@ -27,8 +27,6 @@ use crate::util::logger::Logger; #[allow(unused_imports)] use crate::prelude::*; -use core::ops::Deref; - pub(crate) const IV_LEN: usize = 16; const METADATA_LEN: usize = 16; const METADATA_KEY_LEN: usize = 32; @@ -342,13 +340,10 @@ fn construct_payment_secret( /// [`NodeSigner::get_expanded_key`]: crate::sign::NodeSigner::get_expanded_key /// [`create_inbound_payment`]: crate::ln::channelmanager::ChannelManager::create_inbound_payment /// [`create_inbound_payment_for_hash`]: crate::ln::channelmanager::ChannelManager::create_inbound_payment_for_hash -pub(super) fn verify( +pub(super) fn verify( payment_hash: PaymentHash, payment_data: &msgs::FinalOnionHopData, highest_seen_timestamp: u64, keys: &ExpandedKey, logger: &L, -) -> Result<(Option, Option), ()> -where - L::Target: Logger, -{ +) -> Result<(Option, Option), ()> { let (iv_bytes, metadata_bytes) = decrypt_metadata(payment_data.payment_secret, keys); let payment_type_res = diff --git a/lightning/src/ln/invoice_utils.rs b/lightning/src/ln/invoice_utils.rs index e99f53a8b88..43f80af2e90 100644 --- a/lightning/src/ln/invoice_utils.rs +++ b/lightning/src/ln/invoice_utils.rs @@ -22,7 +22,6 @@ use bitcoin::hashes::Hash; use bitcoin::secp256k1::PublicKey; #[cfg(not(feature = "std"))] use core::iter::Iterator; -use core::ops::Deref; use core::time::Duration; /// Utility to create an invoice that can be paid to one of multiple nodes, or a "phantom invoice." @@ -67,15 +66,12 @@ use core::time::Duration; feature = "std", doc = "This can be used in a `no_std` environment, where [`std::time::SystemTime`] is not available and the current time is supplied by the caller." )] -pub fn create_phantom_invoice( +pub fn create_phantom_invoice( amt_msat: Option, payment_hash: Option, description: String, invoice_expiry_delta_secs: u32, phantom_route_hints: Vec, entropy_source: ES, node_signer: NS, logger: L, network: Currency, min_final_cltv_expiry_delta: Option, duration_since_epoch: Duration, -) -> Result> -where - L::Target: Logger, -{ +) -> Result> { let description = Description::new(description).map_err(SignOrCreationError::CreationError)?; let description = Bolt11InvoiceDescription::Direct(description); _create_phantom_invoice::( @@ -133,15 +129,16 @@ where feature = "std", doc = "This version can be used in a `no_std` environment, where [`std::time::SystemTime`] is not available and the current time is supplied by the caller." )] -pub fn create_phantom_invoice_with_description_hash( +pub fn create_phantom_invoice_with_description_hash< + ES: EntropySource, + NS: NodeSigner, + L: Logger, +>( amt_msat: Option, payment_hash: Option, invoice_expiry_delta_secs: u32, description_hash: Sha256, phantom_route_hints: Vec, entropy_source: ES, node_signer: NS, logger: L, network: Currency, min_final_cltv_expiry_delta: Option, duration_since_epoch: Duration, -) -> Result> -where - L::Target: Logger, -{ +) -> Result> { _create_phantom_invoice::( amt_msat, payment_hash, @@ -159,15 +156,12 @@ where const MAX_CHANNEL_HINTS: usize = 3; -fn _create_phantom_invoice( +fn _create_phantom_invoice( amt_msat: Option, payment_hash: Option, description: Bolt11InvoiceDescription, invoice_expiry_delta_secs: u32, phantom_route_hints: Vec, entropy_source: ES, node_signer: NS, logger: L, network: Currency, min_final_cltv_expiry_delta: Option, duration_since_epoch: Duration, -) -> Result> -where - L::Target: Logger, -{ +) -> Result> { if phantom_route_hints.is_empty() { return Err(SignOrCreationError::CreationError(CreationError::MissingRouteHints)); } @@ -262,12 +256,9 @@ where /// * Select one hint from each node, up to three hints or until we run out of hints. /// /// [`PhantomKeysManager`]: crate::sign::PhantomKeysManager -fn select_phantom_hints( +fn select_phantom_hints( amt_msat: Option, phantom_route_hints: Vec, logger: L, -) -> impl Iterator -where - L::Target: Logger, -{ +) -> impl Iterator { let mut phantom_hints: Vec<_> = Vec::new(); for PhantomRouteHints { channels, phantom_scid, real_node_pubkey } in phantom_route_hints { @@ -363,12 +354,9 @@ fn rotate_through_iterators>(mut vecs: Vec) -> impl /// * Limited to a total of 3 channels. /// * Sorted by lowest inbound capacity if an online channel with the minimum amount requested exists, /// otherwise sort by highest inbound capacity to give the payment the best chance of succeeding. -pub(super) fn sort_and_filter_channels( +pub(super) fn sort_and_filter_channels( channels: Vec, min_inbound_capacity_msat: Option, logger: &L, -) -> impl ExactSizeIterator -where - L::Target: Logger, -{ +) -> impl ExactSizeIterator { let mut filtered_channels: BTreeMap = BTreeMap::new(); let min_inbound_capacity = min_inbound_capacity_msat.unwrap_or(0); let mut min_capacity_channel_exists = false; @@ -574,20 +562,14 @@ fn prefer_current_channel( } /// Adds relevant context to a [`Record`] before passing it to the wrapped [`Logger`]. -struct WithChannelDetails<'a, 'b, L: Deref> -where - L::Target: Logger, -{ +struct WithChannelDetails<'a, 'b, L: Logger> { /// The logger to delegate to after adding context to the record. logger: &'a L, /// The [`ChannelDetails`] for adding relevant context to the logged record. details: &'b ChannelDetails, } -impl<'a, 'b, L: Deref> Logger for WithChannelDetails<'a, 'b, L> -where - L::Target: Logger, -{ +impl<'a, 'b, L: Logger> Logger for WithChannelDetails<'a, 'b, L> { fn log(&self, mut record: Record) { record.peer_id = Some(self.details.counterparty.node_id); record.channel_id = Some(self.details.channel_id); @@ -595,10 +577,7 @@ where } } -impl<'a, 'b, L: Deref> WithChannelDetails<'a, 'b, L> -where - L::Target: Logger, -{ +impl<'a, 'b, L: Logger> WithChannelDetails<'a, 'b, L> { fn from(logger: &'a L, details: &'b ChannelDetails) -> Self { Self { logger, details } } diff --git a/lightning/src/ln/onion_payment.rs b/lightning/src/ln/onion_payment.rs index ed0de3902e3..fd328e01d78 100644 --- a/lightning/src/ln/onion_payment.rs +++ b/lightning/src/ln/onion_payment.rs @@ -26,8 +26,6 @@ use crate::util::logger::Logger; #[allow(unused_imports)] use crate::prelude::*; -use core::ops::Deref; - /// Invalid inbound onion payment. #[derive(Clone, Debug, Hash, PartialEq, Eq)] pub struct InboundHTLCErr { @@ -487,15 +485,12 @@ pub(super) fn create_recv_pending_htlc_info( /// /// [`Event::PaymentClaimable`]: crate::events::Event::PaymentClaimable #[rustfmt::skip] -pub fn peel_payment_onion( +pub fn peel_payment_onion( msg: &msgs::UpdateAddHTLC, node_signer: NS, logger: L, secp_ctx: &Secp256k1, cur_height: u32, allow_skimmed_fees: bool, -) -> Result -where - L::Target: Logger, -{ +) -> Result { let (hop, next_packet_details_opt) = - decode_incoming_update_add_htlc_onion(msg, &node_signer, &*logger, secp_ctx + decode_incoming_update_add_htlc_onion(msg, &node_signer, &logger, secp_ctx ).map_err(|(msg, failure_reason)| { let (reason, err_data) = match msg { HTLCFailureMsg::Malformed(_) => (failure_reason, Vec::new()), @@ -585,12 +580,9 @@ pub(super) struct NextPacketDetails { } #[rustfmt::skip] -pub(super) fn decode_incoming_update_add_htlc_onion( +pub(super) fn decode_incoming_update_add_htlc_onion( msg: &msgs::UpdateAddHTLC, node_signer: NS, logger: L, secp_ctx: &Secp256k1, -) -> Result<(onion_utils::Hop, Option), (HTLCFailureMsg, LocalHTLCFailureReason)> -where - L::Target: Logger, -{ +) -> Result<(onion_utils::Hop, Option), (HTLCFailureMsg, LocalHTLCFailureReason)> { let encode_malformed_error = |message: &str, failure_reason: LocalHTLCFailureReason| { log_info!(logger, "Failed to accept/forward incoming HTLC: {}", message); let (sha256_of_onion, failure_reason) = if msg.blinding_point.is_some() || failure_reason == LocalHTLCFailureReason::InvalidOnionBlinding { diff --git a/lightning/src/ln/onion_utils.rs b/lightning/src/ln/onion_utils.rs index 63d92fd4424..fcfac7c5e63 100644 --- a/lightning/src/ln/onion_utils.rs +++ b/lightning/src/ln/onion_utils.rs @@ -40,7 +40,6 @@ use bitcoin::secp256k1::ecdh::SharedSecret; use bitcoin::secp256k1::{PublicKey, Scalar, Secp256k1, SecretKey}; use crate::io::{Cursor, Read}; -use core::ops::Deref; #[allow(unused_imports)] use crate::prelude::*; @@ -983,13 +982,10 @@ mod fuzzy_onion_utils { pub(crate) attribution_failed_channel: Option, } - pub fn process_onion_failure( + pub fn process_onion_failure( secp_ctx: &Secp256k1, logger: &L, htlc_source: &HTLCSource, encrypted_packet: OnionErrorPacket, - ) -> DecodedOnionFailure - where - L::Target: Logger, - { + ) -> DecodedOnionFailure { let (path, session_priv) = match htlc_source { HTLCSource::OutboundRoute { ref path, ref session_priv, .. } => (path, session_priv), _ => unreachable!(), @@ -999,13 +995,10 @@ mod fuzzy_onion_utils { } /// Decodes the attribution data that we got back from upstream on a payment we sent. - pub fn decode_fulfill_attribution_data( + pub fn decode_fulfill_attribution_data( secp_ctx: &Secp256k1, logger: &L, path: &Path, outer_session_priv: &SecretKey, mut attribution_data: AttributionData, - ) -> Vec - where - L::Target: Logger, - { + ) -> Vec { let mut hold_times = Vec::new(); // Only consider hops in the regular path for attribution data. Blinded path attribution data isn't accessible. @@ -1057,13 +1050,10 @@ pub(crate) use self::fuzzy_onion_utils::*; /// Process failure we got back from upstream on a payment we sent (implying htlc_source is an /// OutboundRoute). -fn process_onion_failure_inner( +fn process_onion_failure_inner( secp_ctx: &Secp256k1, logger: &L, path: &Path, session_priv: &SecretKey, trampoline_session_priv_override: Option, mut encrypted_packet: OnionErrorPacket, -) -> DecodedOnionFailure -where - L::Target: Logger, -{ +) -> DecodedOnionFailure { // Check that there is at least enough data for an hmac, otherwise none of the checking that we may do makes sense. // Also prevent slice out of bounds further down. if encrypted_packet.data.len() < 32 { @@ -2124,12 +2114,9 @@ impl HTLCFailReason { } } - pub(super) fn decode_onion_failure( + pub(super) fn decode_onion_failure( &self, secp_ctx: &Secp256k1, logger: &L, htlc_source: &HTLCSource, - ) -> DecodedOnionFailure - where - L::Target: Logger, - { + ) -> DecodedOnionFailure { match self.0 { HTLCFailReasonRepr::LightningError { ref err, .. } => { process_onion_failure(secp_ctx, logger, &htlc_source, err.clone()) diff --git a/lightning/src/ln/outbound_payment.rs b/lightning/src/ln/outbound_payment.rs index 0bc61031a77..d366f46d3e2 100644 --- a/lightning/src/ln/outbound_payment.rs +++ b/lightning/src/ln/outbound_payment.rs @@ -866,7 +866,7 @@ impl OutboundPayments { impl OutboundPayments { #[rustfmt::skip] - pub(super) fn send_payment( + pub(super) fn send_payment( &self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId, retry_strategy: Retry, route_params: RouteParameters, router: &R, first_hops: Vec, compute_inflight_htlcs: IH, entropy_source: &ES, @@ -877,7 +877,6 @@ impl OutboundPayments { where IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, - L::Target: Logger, { self.send_payment_for_non_bolt12_invoice(payment_id, payment_hash, recipient_onion, None, retry_strategy, route_params, router, first_hops, &compute_inflight_htlcs, entropy_source, node_signer, @@ -885,7 +884,7 @@ impl OutboundPayments { } #[rustfmt::skip] - pub(super) fn send_spontaneous_payment( + pub(super) fn send_spontaneous_payment( &self, payment_preimage: Option, recipient_onion: RecipientOnionFields, payment_id: PaymentId, retry_strategy: Retry, route_params: RouteParameters, router: &R, first_hops: Vec, inflight_htlcs: IH, entropy_source: &ES, @@ -896,7 +895,6 @@ impl OutboundPayments { where IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, - L::Target: Logger, { let preimage = payment_preimage .unwrap_or_else(|| PaymentPreimage(entropy_source.get_secure_random_bytes())); @@ -909,7 +907,7 @@ impl OutboundPayments { } #[rustfmt::skip] - pub(super) fn pay_for_bolt11_invoice( + pub(super) fn pay_for_bolt11_invoice( &self, invoice: &Bolt11Invoice, payment_id: PaymentId, amount_msats: Option, route_params_config: RouteParametersConfig, @@ -923,7 +921,6 @@ impl OutboundPayments { where IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, - L::Target: Logger, { let payment_hash = invoice.payment_hash(); @@ -955,7 +952,7 @@ impl OutboundPayments { #[rustfmt::skip] pub(super) fn send_payment_for_bolt12_invoice< - R: Router, ES: EntropySource, NS: NodeSigner, NL: Deref, IH, SP, L: Deref, + R: Router, ES: EntropySource, NS: NodeSigner, NL: Deref, IH, SP, L: Logger, >( &self, invoice: &Bolt12Invoice, payment_id: PaymentId, router: &R, first_hops: Vec, features: Bolt12InvoiceFeatures, inflight_htlcs: IH, @@ -968,7 +965,6 @@ impl OutboundPayments { NL::Target: NodeIdLookUp, IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, - L::Target: Logger, { let (payment_hash, retry_strategy, params_config, _) = self @@ -998,7 +994,7 @@ impl OutboundPayments { #[rustfmt::skip] fn send_payment_for_bolt12_invoice_internal< - R: Router, ES: EntropySource, NS: NodeSigner, NL: Deref, IH, SP, L: Deref, + R: Router, ES: EntropySource, NS: NodeSigner, NL: Deref, IH, SP, L: Logger, >( &self, payment_id: PaymentId, payment_hash: PaymentHash, keysend_preimage: Option, invoice_request: Option<&InvoiceRequest>, @@ -1013,7 +1009,6 @@ impl OutboundPayments { NL::Target: NodeIdLookUp, IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, - L::Target: Logger, { // Advance any blinded path where the introduction node is our node. if let Ok(our_node_id) = node_signer.get_node_id(Recipient::Node) { @@ -1217,7 +1212,7 @@ impl OutboundPayments { NL: Deref, IH, SP, - L: Deref, + L: Logger, >( &self, payment_id: PaymentId, hold_htlcs_at_next_hop: bool, router: &R, first_hops: Vec, inflight_htlcs: IH, entropy_source: &ES, node_signer: &NS, @@ -1229,7 +1224,6 @@ impl OutboundPayments { NL::Target: NodeIdLookUp, IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, - L::Target: Logger, { let ( payment_hash, @@ -1300,7 +1294,7 @@ impl OutboundPayments { SP, IH, FH, - L: Deref, + L: Logger, >( &self, router: &R, first_hops: FH, inflight_htlcs: IH, entropy_source: &ES, node_signer: &NS, best_block_height: u32, @@ -1311,7 +1305,6 @@ impl OutboundPayments { SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, IH: Fn() -> InFlightHtlcs, FH: Fn() -> Vec, - L::Target: Logger, { let _single_thread = self.retry_lock.lock().unwrap(); let mut should_persist = false; @@ -1410,14 +1403,13 @@ impl OutboundPayments { } #[rustfmt::skip] - fn find_initial_route( + fn find_initial_route( &self, payment_id: PaymentId, payment_hash: PaymentHash, recipient_onion: &RecipientOnionFields, keysend_preimage: Option, invoice_request: Option<&InvoiceRequest>, route_params: &mut RouteParameters, router: &R, first_hops: &Vec, inflight_htlcs: &IH, node_signer: &NS, best_block_height: u32, logger: &WithContext, ) -> Result where - L::Target: Logger, IH: Fn() -> InFlightHtlcs, { #[cfg(feature = "std")] { @@ -1463,7 +1455,7 @@ impl OutboundPayments { /// [`Event::PaymentPathFailed`]: crate::events::Event::PaymentPathFailed /// [`Event::PaymentFailed`]: crate::events::Event::PaymentFailed #[rustfmt::skip] - fn send_payment_for_non_bolt12_invoice( + fn send_payment_for_non_bolt12_invoice( &self, payment_id: PaymentId, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, keysend_preimage: Option, retry_strategy: Retry, mut route_params: RouteParameters, router: &R, first_hops: Vec, inflight_htlcs: IH, entropy_source: &ES, @@ -1472,7 +1464,6 @@ impl OutboundPayments { logger: &WithContext, ) -> Result<(), RetryableSendFailure> where - L::Target: Logger, IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, { @@ -1506,7 +1497,7 @@ impl OutboundPayments { } #[rustfmt::skip] - fn find_route_and_send_payment( + fn find_route_and_send_payment( &self, payment_hash: PaymentHash, payment_id: PaymentId, route_params: RouteParameters, router: &R, first_hops: Vec, inflight_htlcs: &IH, entropy_source: &ES, node_signer: &NS, best_block_height: u32, @@ -1514,7 +1505,6 @@ impl OutboundPayments { send_payment_along_path: &SP, logger: &WithContext, ) where - L::Target: Logger, IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, { @@ -1665,7 +1655,7 @@ impl OutboundPayments { } #[rustfmt::skip] - fn handle_pay_route_err( + fn handle_pay_route_err( &self, err: PaymentSendFailure, payment_id: PaymentId, payment_hash: PaymentHash, route: Route, mut route_params: RouteParameters, onion_session_privs: Vec<[u8; 32]>, router: &R, first_hops: Vec, inflight_htlcs: &IH, entropy_source: &ES, node_signer: &NS, @@ -1676,7 +1666,6 @@ impl OutboundPayments { where IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, - L::Target: Logger, { match err { PaymentSendFailure::AllFailedResendSafe(errs) => { @@ -1726,15 +1715,13 @@ impl OutboundPayments { fn push_path_failed_evs_and_scids< I: ExactSizeIterator + Iterator>, - L: Deref, + L: Logger, >( payment_id: PaymentId, payment_hash: PaymentHash, route_params: &mut RouteParameters, paths: Vec, path_results: I, pending_events: &Mutex)>>, logger: &WithContext, - ) where - L::Target: Logger, - { + ) { let mut events = pending_events.lock().unwrap(); debug_assert_eq!(paths.len(), path_results.len()); for (path, path_res) in paths.into_iter().zip(path_results) { @@ -2201,14 +2188,13 @@ impl OutboundPayments { } #[rustfmt::skip] - pub(super) fn claim_htlc( + pub(super) fn claim_htlc( &self, payment_id: PaymentId, payment_preimage: PaymentPreimage, bolt12_invoice: Option, session_priv: SecretKey, path: Path, from_onchain: bool, ev_completion_action: &mut Option, pending_events: &Mutex)>>, logger: &WithContext, ) where - L::Target: Logger, { let mut session_priv_bytes = [0; 32]; session_priv_bytes.copy_from_slice(&session_priv[..]); @@ -2367,15 +2353,13 @@ impl OutboundPayments { }); } - pub(super) fn fail_htlc( + pub(super) fn fail_htlc( &self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, path: &Path, session_priv: &SecretKey, payment_id: &PaymentId, probing_cookie_secret: [u8; 32], secp_ctx: &Secp256k1, pending_events: &Mutex)>>, completion_action: &mut Option, logger: &WithContext, - ) where - L::Target: Logger, - { + ) { #[cfg(any(test, feature = "_test_utils"))] let DecodedOnionFailure { network_update, @@ -2604,12 +2588,10 @@ impl OutboundPayments { invoice_requests } - pub(super) fn insert_from_monitor_on_startup( + pub(super) fn insert_from_monitor_on_startup( &self, payment_id: PaymentId, payment_hash: PaymentHash, session_priv_bytes: [u8; 32], path: &Path, best_block_height: u32, logger: &WithContext, - ) where - L::Target: Logger, - { + ) { let path_amt = path.final_value_msat(); let path_fee = path.fee_msat(); diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index c2bb0af3103..1891c52928d 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -977,8 +977,7 @@ pub trait APeerManager { type RM: Deref; type OMT: OnionMessageHandler + ?Sized; type OM: Deref; - type LT: Logger + ?Sized; - type L: Deref; + type Logger: Logger; type CMHT: CustomMessageHandler + ?Sized; type CMH: Deref; type NodeSigner: NodeSigner; @@ -992,7 +991,7 @@ pub trait APeerManager { Self::CM, Self::RM, Self::OM, - Self::L, + Self::Logger, Self::CMH, Self::NodeSigner, Self::SM, @@ -1004,7 +1003,7 @@ impl< CM: Deref, RM: Deref, OM: Deref, - L: Deref, + L: Logger, CMH: Deref, NS: NodeSigner, SM: Deref, @@ -1013,7 +1012,6 @@ where CM::Target: ChannelMessageHandler, RM::Target: RoutingMessageHandler, OM::Target: OnionMessageHandler, - L::Target: Logger, CMH::Target: CustomMessageHandler, SM::Target: SendOnlyMessageHandler, { @@ -1024,8 +1022,7 @@ where type RM = RM; type OMT = ::Target; type OM = OM; - type LT = ::Target; - type L = L; + type Logger = L; type CMHT = ::Target; type CMH = CMH; type NodeSigner = NS; @@ -1060,7 +1057,7 @@ pub struct PeerManager< CM: Deref, RM: Deref, OM: Deref, - L: Deref, + L: Logger, CMH: Deref, NS: NodeSigner, SM: Deref, @@ -1068,7 +1065,6 @@ pub struct PeerManager< CM::Target: ChannelMessageHandler, RM::Target: RoutingMessageHandler, OM::Target: OnionMessageHandler, - L::Target: Logger, CMH::Target: CustomMessageHandler, SM::Target: SendOnlyMessageHandler, { @@ -1147,12 +1143,11 @@ fn encode_message(message: wire::Message) -> Vec { buffer.0 } -impl +impl PeerManager where CM::Target: ChannelMessageHandler, OM::Target: OnionMessageHandler, - L::Target: Logger, SM::Target: SendOnlyMessageHandler, { /// Constructs a new `PeerManager` with the given `ChannelMessageHandler` and @@ -1189,7 +1184,7 @@ where } } -impl +impl PeerManager< Descriptor, ErroringMessageHandler, @@ -1201,7 +1196,6 @@ impl IgnoringMessageHandler, > where RM::Target: RoutingMessageHandler, - L::Target: Logger, { /// Constructs a new `PeerManager` with the given `RoutingMessageHandler`. No channel message /// handler or onion message handler is used and onion and channel messages will be ignored (or @@ -1290,7 +1284,7 @@ impl< CM: Deref, RM: Deref, OM: Deref, - L: Deref, + L: Logger, CMH: Deref, NS: NodeSigner, SM: Deref, @@ -1299,7 +1293,6 @@ where CM::Target: ChannelMessageHandler, RM::Target: RoutingMessageHandler, OM::Target: OnionMessageHandler, - L::Target: Logger, CMH::Target: CustomMessageHandler, SM::Target: SendOnlyMessageHandler, { diff --git a/lightning/src/offers/flow.rs b/lightning/src/offers/flow.rs index 3ee57c56c8f..0bb98777227 100644 --- a/lightning/src/offers/flow.rs +++ b/lightning/src/offers/flow.rs @@ -10,7 +10,6 @@ //! Provides data structures and functions for creating and managing Offers messages, //! facilitating communication, and handling BOLT12 messages and payments. -use core::ops::Deref; use core::sync::atomic::{AtomicUsize, Ordering}; use core::time::Duration; @@ -74,10 +73,7 @@ use { /// /// [`OffersMessageFlow`] is parameterized by a [`MessageRouter`], which is responsible /// for finding message paths when initiating and retrying onion messages. -pub struct OffersMessageFlow -where - L::Target: Logger, -{ +pub struct OffersMessageFlow { chain_hash: ChainHash, best_block: RwLock, @@ -106,10 +102,7 @@ where logger: L, } -impl OffersMessageFlow -where - L::Target: Logger, -{ +impl OffersMessageFlow { /// Creates a new [`OffersMessageFlow`] pub fn new( chain_hash: ChainHash, best_block: BestBlock, our_network_pubkey: PublicKey, @@ -264,10 +257,7 @@ const DEFAULT_ASYNC_RECEIVE_OFFER_EXPIRY: Duration = Duration::from_secs(365 * 2 pub(crate) const TEST_DEFAULT_ASYNC_RECEIVE_OFFER_EXPIRY: Duration = DEFAULT_ASYNC_RECEIVE_OFFER_EXPIRY; -impl OffersMessageFlow -where - L::Target: Logger, -{ +impl OffersMessageFlow { /// [`BlindedMessagePath`]s for an async recipient to communicate with this node and interactively /// build [`Offer`]s and [`StaticInvoice`]s for receiving async payments. /// @@ -427,10 +417,7 @@ pub enum HeldHtlcReplyPath { }, } -impl OffersMessageFlow -where - L::Target: Logger, -{ +impl OffersMessageFlow { /// Verifies an [`InvoiceRequest`] using the provided [`OffersContext`] or the [`InvoiceRequest::metadata`]. /// /// - If an [`OffersContext::InvoiceRequest`] with a `nonce` is provided, verification is performed using recipient context data. diff --git a/lightning/src/onion_message/messenger.rs b/lightning/src/onion_message/messenger.rs index 525d3a72fee..0aadc6d6e31 100644 --- a/lightning/src/onion_message/messenger.rs +++ b/lightning/src/onion_message/messenger.rs @@ -70,9 +70,7 @@ pub trait AOnionMessenger { /// A type implementing [`NodeSigner`] type NodeSigner: NodeSigner; /// A type implementing [`Logger`] - type Logger: Logger + ?Sized; - /// A type that may be dereferenced to [`Self::Logger`] - type L: Deref; + type Logger: Logger; /// A type implementing [`NodeIdLookUp`] type NodeIdLookUp: NodeIdLookUp + ?Sized; /// A type that may be dereferenced to [`Self::NodeIdLookUp`] @@ -101,7 +99,7 @@ pub trait AOnionMessenger { ) -> &OnionMessenger< Self::EntropySource, Self::NodeSigner, - Self::L, + Self::Logger, Self::NL, Self::MessageRouter, Self::OMH, @@ -114,7 +112,7 @@ pub trait AOnionMessenger { impl< ES: EntropySource, NS: NodeSigner, - L: Deref, + L: Logger, NL: Deref, MR: MessageRouter, OMH: Deref, @@ -123,7 +121,6 @@ impl< CMH: Deref, > AOnionMessenger for OnionMessenger where - L::Target: Logger, NL::Target: NodeIdLookUp, OMH::Target: OffersMessageHandler, APH::Target: AsyncPaymentsMessageHandler, @@ -132,8 +129,7 @@ where { type EntropySource = ES; type NodeSigner = NS; - type Logger = L::Target; - type L = L; + type Logger = L; type NodeIdLookUp = NL::Target; type NL = NL; type MessageRouter = MR; @@ -274,7 +270,7 @@ where pub struct OnionMessenger< ES: EntropySource, NS: NodeSigner, - L: Deref, + L: Logger, NL: Deref, MR: MessageRouter, OMH: Deref, @@ -282,7 +278,6 @@ pub struct OnionMessenger< DRH: Deref, CMH: Deref, > where - L::Target: Logger, NL::Target: NodeIdLookUp, OMH::Target: OffersMessageHandler, APH::Target: AsyncPaymentsMessageHandler, @@ -555,10 +550,7 @@ impl> MessageRouter for R { /// node. Otherwise, there is no way to find a path to the introduction node in order to send a /// message, and thus an `Err` is returned. The impact of this may be somewhat muted when /// additional dummy hops are added to the blinded path, but this protection is not complete. -pub struct DefaultMessageRouter>, L: Deref, ES: EntropySource> -where - L::Target: Logger, -{ +pub struct DefaultMessageRouter>, L: Logger, ES: EntropySource> { network_graph: G, entropy_source: ES, } @@ -574,9 +566,8 @@ pub(crate) const DUMMY_HOPS_PATH_LENGTH: usize = 4; // We add dummy hops until the path reaches this length (including the recipient). pub(crate) const QR_CODED_DUMMY_HOPS_PATH_LENGTH: usize = 2; -impl>, L: Deref, ES: EntropySource> DefaultMessageRouter -where - L::Target: Logger, +impl>, L: Logger, ES: EntropySource> + DefaultMessageRouter { /// Creates a [`DefaultMessageRouter`] using the given [`NetworkGraph`]. pub fn new(network_graph: G, entropy_source: ES) -> Self { @@ -742,10 +733,8 @@ where } } -impl>, L: Deref, ES: EntropySource> MessageRouter +impl>, L: Logger, ES: EntropySource> MessageRouter for DefaultMessageRouter -where - L::Target: Logger, { fn find_path( &self, sender: PublicKey, peers: Vec, destination: Destination, @@ -787,17 +776,13 @@ where /// node. Otherwise, there is no way to find a path to the introduction node in order to send a /// message, and thus an `Err` is returned. The impact of this may be somewhat muted when /// additional dummy hops are added to the blinded path, but this protection is not complete. -pub struct NodeIdMessageRouter>, L: Deref, ES: EntropySource> -where - L::Target: Logger, -{ +pub struct NodeIdMessageRouter>, L: Logger, ES: EntropySource> { network_graph: G, entropy_source: ES, } -impl>, L: Deref, ES: EntropySource> NodeIdMessageRouter -where - L::Target: Logger, +impl>, L: Logger, ES: EntropySource> + NodeIdMessageRouter { /// Creates a [`NodeIdMessageRouter`] using the given [`NetworkGraph`]. pub fn new(network_graph: G, entropy_source: ES) -> Self { @@ -805,10 +790,8 @@ where } } -impl>, L: Deref, ES: EntropySource> MessageRouter +impl>, L: Logger, ES: EntropySource> MessageRouter for NodeIdMessageRouter -where - L::Target: Logger, { fn find_path( &self, sender: PublicKey, peers: Vec, destination: Destination, @@ -1167,12 +1150,11 @@ where /// /// Returns either the next layer of the onion for forwarding or the decrypted content for the /// receiver. -pub fn peel_onion_message( +pub fn peel_onion_message( msg: &OnionMessage, secp_ctx: &Secp256k1, node_signer: NS, logger: L, custom_handler: CMH, ) -> Result::Target as CustomOnionMessageHandler>::CustomMessage>, ()> where - L::Target: Logger, CMH::Target: CustomOnionMessageHandler, { let control_tlvs_ss = match node_signer.ecdh(Recipient::Node, &msg.blinding_point, None) { @@ -1203,7 +1185,7 @@ where onion_decode_ss, &msg.onion_routing_packet.hop_data[..], msg.onion_routing_packet.hmac, - (control_tlvs_ss, custom_handler.deref(), receiving_context_auth_key, logger.deref()), + (control_tlvs_ss, custom_handler.deref(), receiving_context_auth_key, &logger), ); // Constructs the next onion message using packet data and blinding logic. @@ -1391,7 +1373,7 @@ macro_rules! drop_handled_events_and_abort { impl< ES: EntropySource, NS: NodeSigner, - L: Deref, + L: Logger, NL: Deref, MR: MessageRouter, OMH: Deref, @@ -1400,7 +1382,6 @@ impl< CMH: Deref, > OnionMessenger where - L::Target: Logger, NL::Target: NodeIdLookUp, OMH::Target: OffersMessageHandler, APH::Target: AsyncPaymentsMessageHandler, @@ -1801,7 +1782,7 @@ where msg, &self.secp_ctx, &self.node_signer, - &*self.logger, + &self.logger, &*self.custom_handler, ) } @@ -2032,7 +2013,7 @@ fn outbound_buffer_full( impl< ES: EntropySource, NS: NodeSigner, - L: Deref, + L: Logger, NL: Deref, MR: MessageRouter, OMH: Deref, @@ -2041,7 +2022,6 @@ impl< CMH: Deref, > EventsProvider for OnionMessenger where - L::Target: Logger, NL::Target: NodeIdLookUp, OMH::Target: OffersMessageHandler, APH::Target: AsyncPaymentsMessageHandler, @@ -2150,7 +2130,7 @@ where impl< ES: EntropySource, NS: NodeSigner, - L: Deref, + L: Logger, NL: Deref, MR: MessageRouter, OMH: Deref, @@ -2159,7 +2139,6 @@ impl< CMH: Deref, > BaseMessageHandler for OnionMessenger where - L::Target: Logger, NL::Target: NodeIdLookUp, OMH::Target: OffersMessageHandler, APH::Target: AsyncPaymentsMessageHandler, @@ -2219,7 +2198,7 @@ where impl< ES: EntropySource, NS: NodeSigner, - L: Deref, + L: Logger, NL: Deref, MR: MessageRouter, OMH: Deref, @@ -2228,7 +2207,6 @@ impl< CMH: Deref, > OnionMessageHandler for OnionMessenger where - L::Target: Logger, NL::Target: NodeIdLookUp, OMH::Target: OffersMessageHandler, APH::Target: AsyncPaymentsMessageHandler, diff --git a/lightning/src/routing/gossip.rs b/lightning/src/routing/gossip.rs index 534bebe7618..b3059e39e18 100644 --- a/lightning/src/routing/gossip.rs +++ b/lightning/src/routing/gossip.rs @@ -184,10 +184,7 @@ impl FromStr for NodeId { } /// Represents the network as nodes and channels between them -pub struct NetworkGraph -where - L::Target: Logger, -{ +pub struct NetworkGraph { secp_ctx: Secp256k1, last_rapid_gossip_sync_timestamp: Mutex>, chain_hash: ChainHash, @@ -322,10 +319,9 @@ impl MaybeReadable for NetworkUpdate { /// This network graph is then used for routing payments. /// Provides interface to help with initial routing sync by /// serving historical announcements. -pub struct P2PGossipSync>, U: Deref, L: Deref> +pub struct P2PGossipSync>, U: Deref, L: Logger> where U::Target: UtxoLookup, - L::Target: Logger, { network_graph: G, #[cfg(any(feature = "_test_utils", test))] @@ -337,10 +333,9 @@ where logger: L, } -impl>, U: Deref, L: Deref> P2PGossipSync +impl>, U: Deref, L: Logger> P2PGossipSync where U::Target: UtxoLookup, - L::Target: Logger, { /// Creates a new tracker of the actual state of the network of channels and nodes, /// assuming an existing [`NetworkGraph`]. @@ -426,10 +421,7 @@ where } } -impl NetworkGraph -where - L::Target: Logger, -{ +impl NetworkGraph { /// Handles any network updates originating from [`Event`]s. /// /// [`Event`]: crate::events::Event @@ -542,11 +534,10 @@ pub fn verify_channel_announcement( Ok(()) } -impl>, U: Deref, L: Deref> RoutingMessageHandler +impl>, U: Deref, L: Logger> RoutingMessageHandler for P2PGossipSync where U::Target: UtxoLookup, - L::Target: Logger, { fn handle_node_announcement( &self, _their_node_id: Option, msg: &msgs::NodeAnnouncement, @@ -770,11 +761,10 @@ where } } -impl>, U: Deref, L: Deref> BaseMessageHandler +impl>, U: Deref, L: Logger> BaseMessageHandler for P2PGossipSync where U::Target: UtxoLookup, - L::Target: Logger, { /// Initiates a stateless sync of routing gossip information with a peer /// using [`gossip_queries`]. The default strategy used by this implementation @@ -1644,10 +1634,7 @@ impl Readable for NodeInfo { const SERIALIZATION_VERSION: u8 = 1; const MIN_SERIALIZATION_VERSION: u8 = 1; -impl Writeable for NetworkGraph -where - L::Target: Logger, -{ +impl Writeable for NetworkGraph { fn write(&self, writer: &mut W) -> Result<(), io::Error> { self.test_node_counter_consistency(); @@ -1675,10 +1662,7 @@ where } } -impl ReadableArgs for NetworkGraph -where - L::Target: Logger, -{ +impl ReadableArgs for NetworkGraph { fn read(reader: &mut R, logger: L) -> Result, DecodeError> { let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION); @@ -1745,10 +1729,7 @@ where } } -impl fmt::Display for NetworkGraph -where - L::Target: Logger, -{ +impl fmt::Display for NetworkGraph { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { writeln!(f, "Network map\n[Channels]")?; for (key, val) in self.channels.read().unwrap().unordered_iter() { @@ -1762,11 +1743,8 @@ where } } -impl Eq for NetworkGraph where L::Target: Logger {} -impl PartialEq for NetworkGraph -where - L::Target: Logger, -{ +impl Eq for NetworkGraph {} +impl PartialEq for NetworkGraph { fn eq(&self, other: &Self) -> bool { // For a total lockorder, sort by position in memory and take the inner locks in that order. // (Assumes that we can't move within memory while a lock is held). @@ -1796,10 +1774,7 @@ const CHAN_COUNT_ESTIMATE: usize = 63_000; /// too low. const NODE_COUNT_ESTIMATE: usize = 20_000; -impl NetworkGraph -where - L::Target: Logger, -{ +impl NetworkGraph { /// Creates a new, empty, network graph. pub fn new(network: Network, logger: L) -> NetworkGraph { let (node_map_cap, chan_map_cap) = if matches!(network, Network::Bitcoin) { diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index 0a235880858..42d569415af 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -57,13 +57,12 @@ pub use lightning_types::routing::{RouteHint, RouteHintHop}; /// payment, and thus an `Err` is returned. pub struct DefaultRouter< G: Deref>, - L: Deref, + L: Logger, ES: EntropySource, S: Deref, SP: Sized, Sc: ScoreLookUp, > where - L::Target: Logger, S::Target: for<'a> LockableScore<'a, ScoreLookUp = Sc>, { network_graph: G, @@ -78,14 +77,13 @@ pub const DEFAULT_PAYMENT_DUMMY_HOPS: usize = 3; impl< G: Deref>, - L: Deref, + L: Logger, ES: EntropySource, S: Deref, SP: Sized, Sc: ScoreLookUp, > DefaultRouter where - L::Target: Logger, S::Target: for<'a> LockableScore<'a, ScoreLookUp = Sc>, { /// Creates a new router. @@ -98,14 +96,13 @@ where impl< G: Deref>, - L: Deref, + L: Logger, ES: EntropySource, S: Deref, SP: Sized, Sc: ScoreLookUp, > Router for DefaultRouter where - L::Target: Logger, S::Target: for<'a> LockableScore<'a, ScoreLookUp = Sc>, { #[rustfmt::skip] @@ -118,7 +115,7 @@ where ) -> Result { let random_seed_bytes = self.entropy_source.get_secure_random_bytes(); find_route( - payer, params, &self.network_graph, first_hops, &*self.logger, + payer, params, &self.network_graph, first_hops, &self.logger, &ScorerAccountingForInFlightHtlcs::new(self.scorer.read_lock(), &inflight_htlcs), &self.score_params, &random_seed_bytes @@ -1984,12 +1981,11 @@ impl<'a> NodeCounters<'a> { /// Calculates the introduction point for each blinded path in the given [`PaymentParameters`], if /// they can be found. #[rustfmt::skip] -fn calculate_blinded_path_intro_points<'a, L: Deref>( +fn calculate_blinded_path_intro_points<'a, L: Logger>( payment_params: &PaymentParameters, node_counters: &'a NodeCounters, network_graph: &ReadOnlyNetworkGraph, logger: &L, our_node_id: NodeId, first_hop_targets: &HashMap, u32)>, -) -> Result>, &'static str> -where L::Target: Logger { +) -> Result>, &'static str> { let introduction_node_id_cache = payment_params.payee.blinded_route_hints().iter() .map(|path| { match path.introduction_node() { @@ -2490,12 +2486,11 @@ fn sort_first_hop_channels( /// [`Event::PaymentPathFailed`]: crate::events::Event::PaymentPathFailed /// [`NetworkGraph`]: crate::routing::gossip::NetworkGraph #[rustfmt::skip] -pub fn find_route( +pub fn find_route( our_node_pubkey: &PublicKey, route_params: &RouteParameters, network_graph: &NetworkGraph, first_hops: Option<&[&ChannelDetails]>, logger: L, scorer: &S, score_params: &S::ScoreParams, random_seed_bytes: &[u8; 32] -) -> Result -where L::Target: Logger, GL::Target: Logger { +) -> Result { let graph_lock = network_graph.read_only(); let mut route = get_route(our_node_pubkey, &route_params, &graph_lock, first_hops, logger, scorer, score_params, random_seed_bytes)?; @@ -2504,12 +2499,11 @@ where L::Target: Logger, GL::Target: Logger { } #[rustfmt::skip] -pub(crate) fn get_route( +pub(crate) fn get_route( our_node_pubkey: &PublicKey, route_params: &RouteParameters, network_graph: &ReadOnlyNetworkGraph, first_hops: Option<&[&ChannelDetails]>, logger: L, scorer: &S, score_params: &S::ScoreParams, _random_seed_bytes: &[u8; 32] -) -> Result -where L::Target: Logger { +) -> Result { let payment_params = &route_params.payment_params; let max_path_length = core::cmp::min(payment_params.max_path_length, MAX_PATH_LENGTH_ESTIMATE); @@ -3893,11 +3887,10 @@ fn add_random_cltv_offset(route: &mut Route, payment_params: &PaymentParameters, /// /// Re-uses logic from `find_route`, so the restrictions described there also apply here. #[rustfmt::skip] -pub fn build_route_from_hops( +pub fn build_route_from_hops( our_node_pubkey: &PublicKey, hops: &[PublicKey], route_params: &RouteParameters, network_graph: &NetworkGraph, logger: L, random_seed_bytes: &[u8; 32] -) -> Result -where L::Target: Logger, GL::Target: Logger { +) -> Result { let graph_lock = network_graph.read_only(); let mut route = build_route_from_hops_internal(our_node_pubkey, hops, &route_params, &graph_lock, logger, random_seed_bytes)?; @@ -3906,10 +3899,10 @@ where L::Target: Logger, GL::Target: Logger { } #[rustfmt::skip] -fn build_route_from_hops_internal( +fn build_route_from_hops_internal( our_node_pubkey: &PublicKey, hops: &[PublicKey], route_params: &RouteParameters, network_graph: &ReadOnlyNetworkGraph, logger: L, random_seed_bytes: &[u8; 32], -) -> Result where L::Target: Logger { +) -> Result { struct HopScorer { our_node_id: NodeId, diff --git a/lightning/src/routing/scoring.rs b/lightning/src/routing/scoring.rs index d741adf58d3..47621e37380 100644 --- a/lightning/src/routing/scoring.rs +++ b/lightning/src/routing/scoring.rs @@ -479,10 +479,7 @@ impl ReadableArgs for FixedPenaltyScorer { /// [`liquidity_offset_half_life`]: ProbabilisticScoringDecayParameters::liquidity_offset_half_life /// [`historical_liquidity_penalty_multiplier_msat`]: ProbabilisticScoringFeeParameters::historical_liquidity_penalty_multiplier_msat /// [`historical_liquidity_penalty_amount_multiplier_msat`]: ProbabilisticScoringFeeParameters::historical_liquidity_penalty_amount_multiplier_msat -pub struct ProbabilisticScorer>, L: Deref> -where - L::Target: Logger, -{ +pub struct ProbabilisticScorer>, L: Logger> { decay_params: ProbabilisticScoringDecayParameters, network_graph: G, logger: L, @@ -964,10 +961,7 @@ struct DirectedChannelLiquidity< last_datapoint_time: T, } -impl>, L: Deref> ProbabilisticScorer -where - L::Target: Logger, -{ +impl>, L: Logger> ProbabilisticScorer { /// Creates a new scorer using the given scoring parameters for sending payments from a node /// through a network graph. pub fn new( @@ -1593,9 +1587,9 @@ impl< { /// Adjusts the channel liquidity balance bounds when failing to route `amount_msat`. #[rustfmt::skip] - fn failed_at_channel( + fn failed_at_channel( &mut self, amount_msat: u64, duration_since_epoch: Duration, chan_descr: fmt::Arguments, logger: &Log - ) where Log::Target: Logger { + ) { let existing_max_msat = self.max_liquidity_msat(); if amount_msat < existing_max_msat { log_debug!(logger, "Setting max liquidity of {} from {} to {}", chan_descr, existing_max_msat, amount_msat); @@ -1610,9 +1604,9 @@ impl< /// Adjusts the channel liquidity balance bounds when failing to route `amount_msat` downstream. #[rustfmt::skip] - fn failed_downstream( + fn failed_downstream( &mut self, amount_msat: u64, duration_since_epoch: Duration, chan_descr: fmt::Arguments, logger: &Log - ) where Log::Target: Logger { + ) { let existing_min_msat = self.min_liquidity_msat(); if amount_msat > existing_min_msat { log_debug!(logger, "Setting min liquidity of {} from {} to {}", existing_min_msat, chan_descr, amount_msat); @@ -1627,9 +1621,9 @@ impl< /// Adjusts the channel liquidity balance bounds when successfully routing `amount_msat`. #[rustfmt::skip] - fn successful(&mut self, + fn successful(&mut self, amount_msat: u64, duration_since_epoch: Duration, chan_descr: fmt::Arguments, logger: &Log - ) where Log::Target: Logger { + ) { let max_liquidity_msat = self.max_liquidity_msat().checked_sub(amount_msat).unwrap_or(0); log_debug!(logger, "Subtracting {} from max liquidity of {} (setting it to {})", amount_msat, chan_descr, max_liquidity_msat); self.set_max_liquidity_msat(max_liquidity_msat, duration_since_epoch); @@ -1669,10 +1663,7 @@ impl< } } -impl>, L: Deref> ScoreLookUp for ProbabilisticScorer -where - L::Target: Logger, -{ +impl>, L: Logger> ScoreLookUp for ProbabilisticScorer { type ScoreParams = ProbabilisticScoringFeeParameters; #[rustfmt::skip] fn channel_penalty_msat( @@ -1735,10 +1726,7 @@ where } } -impl>, L: Deref> ScoreUpdate for ProbabilisticScorer -where - L::Target: Logger, -{ +impl>, L: Logger> ScoreUpdate for ProbabilisticScorer { #[rustfmt::skip] fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64, duration_since_epoch: Duration) { let amount_msat = path.final_value_msat(); @@ -1836,18 +1824,12 @@ where /// /// Note that only the locally acquired data is persisted. After a restart, the external scores will be lost and must be /// resupplied. -pub struct CombinedScorer>, L: Deref> -where - L::Target: Logger, -{ +pub struct CombinedScorer>, L: Logger> { local_only_scorer: ProbabilisticScorer, scorer: ProbabilisticScorer, } -impl> + Clone, L: Deref + Clone> CombinedScorer -where - L::Target: Logger, -{ +impl> + Clone, L: Logger + Clone> CombinedScorer { /// Create a new combined scorer with the given local scorer. #[rustfmt::skip] pub fn new(local_scorer: ProbabilisticScorer) -> Self { @@ -1889,10 +1871,7 @@ where } } -impl>, L: Deref> ScoreLookUp for CombinedScorer -where - L::Target: Logger, -{ +impl>, L: Logger> ScoreLookUp for CombinedScorer { type ScoreParams = ProbabilisticScoringFeeParameters; fn channel_penalty_msat( @@ -1903,10 +1882,7 @@ where } } -impl>, L: Deref> ScoreUpdate for CombinedScorer -where - L::Target: Logger, -{ +impl>, L: Logger> ScoreUpdate for CombinedScorer { fn payment_path_failed( &mut self, path: &Path, short_channel_id: u64, duration_since_epoch: Duration, ) { @@ -1935,20 +1911,14 @@ where } } -impl>, L: Deref> Writeable for CombinedScorer -where - L::Target: Logger, -{ +impl>, L: Logger> Writeable for CombinedScorer { fn write(&self, writer: &mut W) -> Result<(), crate::io::Error> { self.local_only_scorer.write(writer) } } #[cfg(c_bindings)] -impl>, L: Deref> Score for ProbabilisticScorer where - L::Target: Logger -{ -} +impl>, L: Logger> Score for ProbabilisticScorer {} #[cfg(feature = "std")] #[inline] @@ -2520,20 +2490,15 @@ mod bucketed_history { } } -impl>, L: Deref> Writeable for ProbabilisticScorer -where - L::Target: Logger, -{ +impl>, L: Logger> Writeable for ProbabilisticScorer { #[inline] fn write(&self, w: &mut W) -> Result<(), io::Error> { self.channel_liquidities.write(w) } } -impl>, L: Deref> +impl>, L: Logger> ReadableArgs<(ProbabilisticScoringDecayParameters, G, L)> for ProbabilisticScorer -where - L::Target: Logger, { #[inline] #[rustfmt::skip] diff --git a/lightning/src/routing/utxo.rs b/lightning/src/routing/utxo.rs index ab653b1ea74..089c536ca60 100644 --- a/lightning/src/routing/utxo.rs +++ b/lightning/src/routing/utxo.rs @@ -491,12 +491,10 @@ impl PendingChecks { } } - fn resolve_single_future( + fn resolve_single_future( &self, graph: &NetworkGraph, entry: Arc>, new_messages: &mut Vec, - ) where - L::Target: Logger, - { + ) { let (announcement, result, announce_a, announce_b, update_a, update_b); { let mut state = entry.lock().unwrap(); @@ -581,12 +579,9 @@ impl PendingChecks { } } - pub(super) fn check_resolved_futures( + pub(super) fn check_resolved_futures( &self, graph: &NetworkGraph, - ) -> Vec - where - L::Target: Logger, - { + ) -> Vec { let mut completed_states = Vec::new(); { let mut lck = self.internal.lock().unwrap(); diff --git a/lightning/src/sign/tx_builder.rs b/lightning/src/sign/tx_builder.rs index 74941ec8a87..27b8b1a9a2b 100644 --- a/lightning/src/sign/tx_builder.rs +++ b/lightning/src/sign/tx_builder.rs @@ -2,7 +2,6 @@ #![allow(dead_code)] use core::cmp; -use core::ops::Deref; use bitcoin::secp256k1::{self, PublicKey, Secp256k1}; @@ -169,14 +168,12 @@ pub(crate) trait TxBuilder { &self, is_outbound_from_holder: bool, value_to_self_after_htlcs: u64, value_to_remote_after_htlcs: u64, channel_type: &ChannelTypeFeatures, ) -> (u64, u64); - fn build_commitment_transaction( + fn build_commitment_transaction( &self, local: bool, commitment_number: u64, per_commitment_point: &PublicKey, channel_parameters: &ChannelTransactionParameters, secp_ctx: &Secp256k1, value_to_self_msat: u64, htlcs_in_tx: Vec, feerate_per_kw: u32, broadcaster_dust_limit_satoshis: u64, logger: &L, - ) -> (CommitmentTransaction, CommitmentStats) - where - L::Target: Logger; + ) -> (CommitmentTransaction, CommitmentStats); } pub(crate) struct SpecTxBuilder {} @@ -322,15 +319,12 @@ impl TxBuilder for SpecTxBuilder { (local_balance_before_fee_msat, remote_balance_before_fee_msat) } - fn build_commitment_transaction( + fn build_commitment_transaction( &self, local: bool, commitment_number: u64, per_commitment_point: &PublicKey, channel_parameters: &ChannelTransactionParameters, secp_ctx: &Secp256k1, value_to_self_msat: u64, mut htlcs_in_tx: Vec, feerate_per_kw: u32, broadcaster_dust_limit_satoshis: u64, logger: &L, - ) -> (CommitmentTransaction, CommitmentStats) - where - L::Target: Logger, - { + ) -> (CommitmentTransaction, CommitmentStats) { let mut local_htlc_total_msat = 0; let mut remote_htlc_total_msat = 0; let channel_type = &channel_parameters.channel_type_features; diff --git a/lightning/src/util/anchor_channel_reserves.rs b/lightning/src/util/anchor_channel_reserves.rs index 92c51975e5c..3e9945f7b87 100644 --- a/lightning/src/util/anchor_channel_reserves.rs +++ b/lightning/src/util/anchor_channel_reserves.rs @@ -275,10 +275,10 @@ pub fn can_support_additional_anchor_channel< FilterRef: Deref, B: BroadcasterInterface, FE: FeeEstimator, - LoggerRef: Deref, + L: Logger, PersistRef: Deref, ES: EntropySource, - ChainMonitorRef: Deref>, + ChainMonitorRef: Deref>, >( context: &AnchorChannelReserveContext, utxos: &[Utxo], a_channel_manager: AChannelManagerRef, chain_monitor: ChainMonitorRef, @@ -286,7 +286,6 @@ pub fn can_support_additional_anchor_channel< where AChannelManagerRef::Target: AChannelManager, FilterRef::Target: Filter, - LoggerRef::Target: Logger, PersistRef::Target: Persist, { let mut anchor_channels = new_hash_set(); diff --git a/lightning/src/util/logger.rs b/lightning/src/util/logger.rs index 0d2eb47fa62..c8b6715ae7c 100644 --- a/lightning/src/util/logger.rs +++ b/lightning/src/util/logger.rs @@ -294,14 +294,17 @@ pub trait Logger { fn log(&self, record: Record); } +impl> Logger for L { + fn log(&self, record: Record) { + self.deref().log(record) + } +} + /// Adds relevant context to a [`Record`] before passing it to the wrapped [`Logger`]. /// /// This is not exported to bindings users as lifetimes are problematic and there's little reason /// for this to be used downstream anyway. -pub struct WithContext<'a, L: Deref> -where - L::Target: Logger, -{ +pub struct WithContext<'a, L: Logger> { logger: &'a L, peer_id: Option, channel_id: Option, @@ -309,10 +312,7 @@ where payment_id: Option, } -impl<'a, L: Deref> Logger for WithContext<'a, L> -where - L::Target: Logger, -{ +impl<'a, L: Logger> Logger for WithContext<'a, L> { fn log(&self, mut record: Record) { if self.peer_id.is_some() && record.peer_id.is_none() { record.peer_id = self.peer_id @@ -330,10 +330,7 @@ where } } -impl<'a, L: Deref> WithContext<'a, L> -where - L::Target: Logger, -{ +impl<'a, L: Logger> WithContext<'a, L> { /// Wraps the given logger, providing additional context to any logged records. pub fn from( logger: &'a L, peer_id: Option, channel_id: Option, diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index 3a94732fa46..a71e634699e 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -589,7 +589,7 @@ fn poll_sync_future(future: F) -> F::Output { /// [`MonitorUpdatingPersister::cleanup_stale_updates`] function. pub struct MonitorUpdatingPersister< K: Deref, - L: Deref, + L: Logger, ES: EntropySource, SP: Deref, BI: BroadcasterInterface, @@ -597,12 +597,11 @@ pub struct MonitorUpdatingPersister< >(MonitorUpdatingPersisterAsync, PanicingSpawner, L, ES, SP, BI, FE>) where K::Target: KVStoreSync, - L::Target: Logger, SP::Target: SignerProvider + Sized; impl< K: Deref, - L: Deref, + L: Logger, ES: EntropySource, SP: Deref, BI: BroadcasterInterface, @@ -610,7 +609,6 @@ impl< > MonitorUpdatingPersister where K::Target: KVStoreSync, - L::Target: Logger, SP::Target: SignerProvider + Sized, { /// Constructs a new [`MonitorUpdatingPersister`]. @@ -698,7 +696,7 @@ where impl< ChannelSigner: EcdsaChannelSigner, K: Deref, - L: Deref, + L: Logger, ES: EntropySource, SP: Deref, BI: BroadcasterInterface, @@ -706,7 +704,6 @@ impl< > Persist for MonitorUpdatingPersister where K::Target: KVStoreSync, - L::Target: Logger, SP::Target: SignerProvider + Sized, { /// Persists a new channel. This means writing the entire monitor to the @@ -781,7 +778,7 @@ where pub struct MonitorUpdatingPersisterAsync< K: Deref, S: FutureSpawner, - L: Deref, + L: Logger, ES: EntropySource, SP: Deref, BI: BroadcasterInterface, @@ -789,20 +786,18 @@ pub struct MonitorUpdatingPersisterAsync< >(Arc>) where K::Target: KVStore, - L::Target: Logger, SP::Target: SignerProvider + Sized; struct MonitorUpdatingPersisterAsyncInner< K: Deref, S: FutureSpawner, - L: Deref, + L: Logger, ES: EntropySource, SP: Deref, BI: BroadcasterInterface, FE: FeeEstimator, > where K::Target: KVStore, - L::Target: Logger, SP::Target: SignerProvider + Sized, { kv_store: K, @@ -819,7 +814,7 @@ struct MonitorUpdatingPersisterAsyncInner< impl< K: Deref, S: FutureSpawner, - L: Deref, + L: Logger, ES: EntropySource, SP: Deref, BI: BroadcasterInterface, @@ -827,7 +822,6 @@ impl< > MonitorUpdatingPersisterAsync where K::Target: KVStore, - L::Target: Logger, SP::Target: SignerProvider + Sized, { /// Constructs a new [`MonitorUpdatingPersisterAsync`]. @@ -967,7 +961,7 @@ where impl< K: Deref + MaybeSend + MaybeSync + 'static, S: FutureSpawner, - L: Deref + MaybeSend + MaybeSync + 'static, + L: Logger + MaybeSend + MaybeSync + 'static, ES: EntropySource + MaybeSend + MaybeSync + 'static, SP: Deref + MaybeSend + MaybeSync + 'static, BI: BroadcasterInterface + MaybeSend + MaybeSync + 'static, @@ -975,7 +969,6 @@ impl< > MonitorUpdatingPersisterAsync where K::Target: KVStore + MaybeSync, - L::Target: Logger, SP::Target: SignerProvider + Sized, ::EcdsaSigner: MaybeSend + 'static, { @@ -1056,7 +1049,7 @@ impl> + MaybeSend> MaybeSendableFuture impl< K: Deref, S: FutureSpawner, - L: Deref, + L: Logger, ES: EntropySource, SP: Deref, BI: BroadcasterInterface, @@ -1064,7 +1057,6 @@ impl< > MonitorUpdatingPersisterAsyncInner where K::Target: KVStore, - L::Target: Logger, SP::Target: SignerProvider + Sized, { pub async fn read_channel_monitor_with_updates( diff --git a/lightning/src/util/sweep.rs b/lightning/src/util/sweep.rs index a4088331f68..f7cf2771d3c 100644 --- a/lightning/src/util/sweep.rs +++ b/lightning/src/util/sweep.rs @@ -343,13 +343,12 @@ pub struct OutputSweeper< E: FeeEstimator, F: Deref, K: Deref, - L: Deref, + L: Logger, O: Deref, > where D::Target: ChangeDestinationSource, F::Target: Filter, K::Target: KVStore, - L::Target: Logger, O::Target: OutputSpender, { sweeper_state: Mutex, @@ -369,14 +368,13 @@ impl< E: FeeEstimator, F: Deref, K: Deref, - L: Deref, + L: Logger, O: Deref, > OutputSweeper where D::Target: ChangeDestinationSource, F::Target: Filter, K::Target: KVStore, - L::Target: Logger, O::Target: OutputSpender, { /// Constructs a new [`OutputSweeper`]. @@ -726,14 +724,13 @@ impl< E: FeeEstimator, F: Deref, K: Deref, - L: Deref, + L: Logger, O: Deref, > Listen for OutputSweeper where D::Target: ChangeDestinationSource, F::Target: Filter + Sync + Send, K::Target: KVStore, - L::Target: Logger, O::Target: OutputSpender, { fn filtered_block_connected( @@ -772,14 +769,13 @@ impl< E: FeeEstimator, F: Deref, K: Deref, - L: Deref, + L: Logger, O: Deref, > Confirm for OutputSweeper where D::Target: ChangeDestinationSource, F::Target: Filter + Sync + Send, K::Target: KVStore, - L::Target: Logger, O::Target: OutputSpender, { fn transactions_confirmed( @@ -874,14 +870,13 @@ impl< E: FeeEstimator, F: Deref, K: Deref, - L: Deref, + L: Logger, O: Deref, > ReadableArgs<(B, E, Option, O, D, K, L)> for (BestBlock, OutputSweeper) where D::Target: ChangeDestinationSource, F::Target: Filter + Sync + Send, K::Target: KVStore, - L::Target: Logger, O::Target: OutputSpender, { #[inline] @@ -949,13 +944,12 @@ pub struct OutputSweeperSync< E: FeeEstimator, F: Deref, K: Deref, - L: Deref, + L: Logger, O: Deref, > where D::Target: ChangeDestinationSourceSync, F::Target: Filter, K::Target: KVStoreSync, - L::Target: Logger, O::Target: OutputSpender, { sweeper: @@ -968,14 +962,13 @@ impl< E: FeeEstimator, F: Deref, K: Deref, - L: Deref, + L: Logger, O: Deref, > OutputSweeperSync where D::Target: ChangeDestinationSourceSync, F::Target: Filter, K::Target: KVStoreSync, - L::Target: Logger, O::Target: OutputSpender, { /// Constructs a new [`OutputSweeperSync`] instance. @@ -1093,14 +1086,13 @@ impl< E: FeeEstimator, F: Deref, K: Deref, - L: Deref, + L: Logger, O: Deref, > Listen for OutputSweeperSync where D::Target: ChangeDestinationSourceSync, F::Target: Filter + Sync + Send, K::Target: KVStoreSync, - L::Target: Logger, O::Target: OutputSpender, { fn filtered_block_connected( @@ -1120,14 +1112,13 @@ impl< E: FeeEstimator, F: Deref, K: Deref, - L: Deref, + L: Logger, O: Deref, > Confirm for OutputSweeperSync where D::Target: ChangeDestinationSourceSync, F::Target: Filter + Sync + Send, K::Target: KVStoreSync, - L::Target: Logger, O::Target: OutputSpender, { fn transactions_confirmed( @@ -1155,7 +1146,7 @@ impl< E: FeeEstimator, F: Deref, K: Deref, - L: Deref, + L: Logger, O: Deref, > ReadableArgs<(B, E, Option, O, D, K, L)> for (BestBlock, OutputSweeperSync) @@ -1163,7 +1154,6 @@ where D::Target: ChangeDestinationSourceSync, F::Target: Filter + Sync + Send, K::Target: KVStoreSync, - L::Target: Logger, O::Target: OutputSpender, { #[inline] From 9432adbc4f2f3f455971595d503567142b63bfca Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Thu, 15 Jan 2026 12:37:55 -0500 Subject: [PATCH 146/242] Drop Deref indirection for SignerProvider Reduces generics and verbosity across the codebase, should provide equivalent behavior. Co-Authored-By: Claude Opus 4.5 --- lightning/src/chain/chainmonitor.rs | 35 +--- lightning/src/events/bump_transaction/mod.rs | 12 +- lightning/src/events/bump_transaction/sync.rs | 12 +- lightning/src/ln/channel.rs | 198 +++++------------- lightning/src/ln/channel_state.rs | 9 +- lightning/src/ln/channelmanager.rs | 123 ++++------- lightning/src/sign/mod.rs | 29 +++ lightning/src/sign/type_resolver.rs | 27 +-- lightning/src/util/persist.rs | 85 +++----- 9 files changed, 189 insertions(+), 341 deletions(-) diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index 87943bdf910..8835e9c8185 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -260,12 +260,11 @@ pub struct AsyncPersister< S: FutureSpawner, L: Logger + MaybeSend + MaybeSync + 'static, ES: EntropySource + MaybeSend + MaybeSync + 'static, - SP: Deref + MaybeSend + MaybeSync + 'static, + SP: SignerProvider + MaybeSend + MaybeSync + 'static, BI: BroadcasterInterface + MaybeSend + MaybeSync + 'static, FE: FeeEstimator + MaybeSend + MaybeSync + 'static, > where K::Target: KVStore + MaybeSync, - SP::Target: SignerProvider + Sized, { persister: MonitorUpdatingPersisterAsync, event_notifier: Arc, @@ -276,13 +275,12 @@ impl< S: FutureSpawner, L: Logger + MaybeSend + MaybeSync + 'static, ES: EntropySource + MaybeSend + MaybeSync + 'static, - SP: Deref + MaybeSend + MaybeSync + 'static, + SP: SignerProvider + MaybeSend + MaybeSync + 'static, BI: BroadcasterInterface + MaybeSend + MaybeSync + 'static, FE: FeeEstimator + MaybeSend + MaybeSync + 'static, > Deref for AsyncPersister where K::Target: KVStore + MaybeSync, - SP::Target: SignerProvider + Sized, { type Target = Self; fn deref(&self) -> &Self { @@ -295,18 +293,16 @@ impl< S: FutureSpawner, L: Logger + MaybeSend + MaybeSync + 'static, ES: EntropySource + MaybeSend + MaybeSync + 'static, - SP: Deref + MaybeSend + MaybeSync + 'static, + SP: SignerProvider + MaybeSend + MaybeSync + 'static, BI: BroadcasterInterface + MaybeSend + MaybeSync + 'static, FE: FeeEstimator + MaybeSend + MaybeSync + 'static, - > Persist<::EcdsaSigner> for AsyncPersister + > Persist for AsyncPersister where K::Target: KVStore + MaybeSync, - SP::Target: SignerProvider + Sized, - ::EcdsaSigner: MaybeSend + 'static, + SP::EcdsaSigner: MaybeSend + 'static, { fn persist_new_channel( - &self, monitor_name: MonitorName, - monitor: &ChannelMonitor<::EcdsaSigner>, + &self, monitor_name: MonitorName, monitor: &ChannelMonitor, ) -> ChannelMonitorUpdateStatus { let notifier = Arc::clone(&self.event_notifier); self.persister.spawn_async_persist_new_channel(monitor_name, monitor, notifier); @@ -315,7 +311,7 @@ where fn update_persisted_channel( &self, monitor_name: MonitorName, monitor_update: Option<&ChannelMonitorUpdate>, - monitor: &ChannelMonitor<::EcdsaSigner>, + monitor: &ChannelMonitor, ) -> ChannelMonitorUpdateStatus { let notifier = Arc::clone(&self.event_notifier); self.persister.spawn_async_update_channel(monitor_name, monitor_update, monitor, notifier); @@ -386,26 +382,17 @@ pub struct ChainMonitor< impl< K: Deref + MaybeSend + MaybeSync + 'static, S: FutureSpawner, - SP: Deref + MaybeSend + MaybeSync + 'static, + SP: SignerProvider + MaybeSend + MaybeSync + 'static, C: Deref, T: BroadcasterInterface + MaybeSend + MaybeSync + 'static, F: FeeEstimator + MaybeSend + MaybeSync + 'static, L: Logger + MaybeSend + MaybeSync + 'static, ES: EntropySource + MaybeSend + MaybeSync + 'static, - > - ChainMonitor< - ::EcdsaSigner, - C, - T, - F, - L, - AsyncPersister, - ES, - > where + > ChainMonitor, ES> +where K::Target: KVStore + MaybeSync, - SP::Target: SignerProvider + Sized, C::Target: chain::Filter, - ::EcdsaSigner: MaybeSend + 'static, + SP::EcdsaSigner: MaybeSend + 'static, { /// Creates a new `ChainMonitor` used to watch on-chain activity pertaining to channels. /// diff --git a/lightning/src/events/bump_transaction/mod.rs b/lightning/src/events/bump_transaction/mod.rs index bc912124410..1b3496c5eab 100644 --- a/lightning/src/events/bump_transaction/mod.rs +++ b/lightning/src/events/bump_transaction/mod.rs @@ -691,10 +691,13 @@ where /// /// [`Event::BumpTransaction`]: crate::events::Event::BumpTransaction // Note that updates to documentation on this struct should be copied to the synchronous version. -pub struct BumpTransactionEventHandler -where +pub struct BumpTransactionEventHandler< + B: BroadcasterInterface, + C: Deref, + SP: SignerProvider, + L: Logger, +> where C::Target: CoinSelectionSource, - SP::Target: SignerProvider, { broadcaster: B, utxo_source: C, @@ -703,11 +706,10 @@ where secp: Secp256k1, } -impl +impl BumpTransactionEventHandler where C::Target: CoinSelectionSource, - SP::Target: SignerProvider, { /// Returns a new instance capable of handling [`Event::BumpTransaction`] events. /// diff --git a/lightning/src/events/bump_transaction/sync.rs b/lightning/src/events/bump_transaction/sync.rs index e19ab3d7804..f4245cd5194 100644 --- a/lightning/src/events/bump_transaction/sync.rs +++ b/lightning/src/events/bump_transaction/sync.rs @@ -264,20 +264,22 @@ where /// /// [`Event::BumpTransaction`]: crate::events::Event::BumpTransaction // Note that updates to documentation on this struct should be copied to the synchronous version. -pub struct BumpTransactionEventHandlerSync -where +pub struct BumpTransactionEventHandlerSync< + B: BroadcasterInterface, + C: Deref, + SP: SignerProvider, + L: Logger, +> where C::Target: CoinSelectionSourceSync, - SP::Target: SignerProvider, { bump_transaction_event_handler: BumpTransactionEventHandler, SP, L>, } -impl +impl BumpTransactionEventHandlerSync where C::Target: CoinSelectionSourceSync, - SP::Target: SignerProvider, { /// Constructs a new instance of [`BumpTransactionEventHandlerSync`]. pub fn new(broadcaster: B, utxo_source: C, signer_provider: SP, logger: L) -> Self { diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index fc20708009c..042b388e1a1 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -93,7 +93,6 @@ use crate::prelude::*; use crate::sign::type_resolver::ChannelSignerType; #[cfg(any(test, fuzzing, debug_assertions))] use crate::sync::Mutex; -use core::ops::Deref; use core::time::Duration; use core::{cmp, fmt, mem}; @@ -1002,12 +1001,9 @@ impl<'a, L: Logger> Logger for WithChannelContext<'a, L> { } impl<'a, 'b, L: Logger> WithChannelContext<'a, L> { - pub(super) fn from( + pub(super) fn from( logger: &'a L, context: &'b ChannelContext, payment_hash: Option, - ) -> Self - where - S::Target: SignerProvider, - { + ) -> Self { WithChannelContext { logger, peer_id: Some(context.counterparty_node_id), @@ -1242,9 +1238,7 @@ struct HolderCommitmentPoint { impl HolderCommitmentPoint { #[rustfmt::skip] - pub fn new(signer: &ChannelSignerType, secp_ctx: &Secp256k1) -> Option - where SP::Target: SignerProvider - { + pub fn new(signer: &ChannelSignerType, secp_ctx: &Secp256k1) -> Option { Some(HolderCommitmentPoint { next_transaction_number: INITIAL_COMMITMENT_NUMBER, previous_revoked_point: None, @@ -1285,11 +1279,9 @@ impl HolderCommitmentPoint { /// If we are pending advancing the next commitment point, this method tries asking the signer /// again. - pub fn try_resolve_pending( + pub fn try_resolve_pending( &mut self, signer: &ChannelSignerType, secp_ctx: &Secp256k1, logger: &L, - ) where - SP::Target: SignerProvider, - { + ) { if !self.can_advance() { let pending_next_point = signer .as_ref() @@ -1321,12 +1313,9 @@ impl HolderCommitmentPoint { /// /// If our signer is ready to provide the next commitment point, the next call to `advance` will /// succeed. - pub fn advance( + pub fn advance( &mut self, signer: &ChannelSignerType, secp_ctx: &Secp256k1, logger: &L, - ) -> Result<(), ()> - where - SP::Target: SignerProvider, - { + ) -> Result<(), ()> { if let Some(next_point) = self.pending_next_point { *self = Self { next_transaction_number: self.next_transaction_number - 1, @@ -1442,19 +1431,13 @@ impl_writeable_tlv_based!(PendingChannelMonitorUpdate, { /// A payment channel with a counterparty throughout its life-cycle, encapsulating negotiation and /// funding phases. -pub(super) struct Channel -where - SP::Target: SignerProvider, -{ +pub(super) struct Channel { phase: ChannelPhase, } /// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of /// its variants containing an appropriate channel struct. -enum ChannelPhase -where - SP::Target: SignerProvider, -{ +enum ChannelPhase { Undefined, UnfundedOutboundV1(OutboundV1Channel), UnfundedInboundV1(InboundV1Channel), @@ -1462,10 +1445,9 @@ where Funded(FundedChannel), } -impl Channel +impl Channel where - SP::Target: SignerProvider, - ::EcdsaSigner: ChannelSigner, + SP::EcdsaSigner: ChannelSigner, { pub fn context(&self) -> &ChannelContext { match &self.phase { @@ -2007,7 +1989,7 @@ where #[rustfmt::skip] pub fn funding_signed( &mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L - ) -> Result<(&mut FundedChannel, ChannelMonitor<::EcdsaSigner>), ChannelError> { + ) -> Result<(&mut FundedChannel, ChannelMonitor), ChannelError> { let phase = core::mem::replace(&mut self.phase, ChannelPhase::Undefined); let result = if let ChannelPhase::UnfundedOutboundV1(chan) = phase { let channel_state = chan.context.channel_state; @@ -2277,7 +2259,7 @@ where #[rustfmt::skip] pub fn commitment_signed( &mut self, msg: &msgs::CommitmentSigned, best_block: BestBlock, signer_provider: &SP, fee_estimator: &LowerBoundedFeeEstimator, logger: &L - ) -> Result<(Option::EcdsaSigner>>, Option), ChannelError> { + ) -> Result<(Option>, Option), ChannelError> { let phase = core::mem::replace(&mut self.phase, ChannelPhase::Undefined); match phase { ChannelPhase::UnfundedV2(chan) => { @@ -2396,40 +2378,36 @@ where } } -impl From> for Channel +impl From> for Channel where - SP::Target: SignerProvider, - ::EcdsaSigner: ChannelSigner, + SP::EcdsaSigner: ChannelSigner, { fn from(channel: OutboundV1Channel) -> Self { Channel { phase: ChannelPhase::UnfundedOutboundV1(channel) } } } -impl From> for Channel +impl From> for Channel where - SP::Target: SignerProvider, - ::EcdsaSigner: ChannelSigner, + SP::EcdsaSigner: ChannelSigner, { fn from(channel: InboundV1Channel) -> Self { Channel { phase: ChannelPhase::UnfundedInboundV1(channel) } } } -impl From> for Channel +impl From> for Channel where - SP::Target: SignerProvider, - ::EcdsaSigner: ChannelSigner, + SP::EcdsaSigner: ChannelSigner, { fn from(channel: PendingV2Channel) -> Self { Channel { phase: ChannelPhase::UnfundedV2(channel) } } } -impl From> for Channel +impl From> for Channel where - SP::Target: SignerProvider, - ::EcdsaSigner: ChannelSigner, + SP::EcdsaSigner: ChannelSigner, { fn from(channel: FundedChannel) -> Self { Channel { phase: ChannelPhase::Funded(channel) } @@ -2681,14 +2659,11 @@ impl FundingScope { } /// Constructs a `FundingScope` for splicing a channel. - fn for_splice( + fn for_splice( prev_funding: &Self, context: &ChannelContext, our_funding_contribution: SignedAmount, their_funding_contribution: SignedAmount, counterparty_funding_pubkey: PublicKey, our_new_holder_keys: ChannelPublicKeys, - ) -> Self - where - SP::Target: SignerProvider, - { + ) -> Self { debug_assert!(our_funding_contribution.abs() <= SignedAmount::MAX_MONEY); debug_assert!(their_funding_contribution.abs() <= SignedAmount::MAX_MONEY); @@ -2877,12 +2852,9 @@ impl FundingNegotiation { } impl PendingFunding { - fn check_get_splice_locked( + fn check_get_splice_locked( &mut self, context: &ChannelContext, confirmed_funding_index: usize, height: u32, - ) -> Option - where - SP::Target: SignerProvider, - { + ) -> Option { debug_assert!(confirmed_funding_index < self.negotiated_candidates.len()); let funding = &self.negotiated_candidates[confirmed_funding_index]; @@ -2988,10 +2960,7 @@ impl<'a> From<&'a Transaction> for ConfirmedTransaction<'a> { /// Contains everything about the channel including state, and various flags. #[cfg_attr(test, derive(Debug))] -pub(super) struct ChannelContext -where - SP::Target: SignerProvider, -{ +pub(super) struct ChannelContext { config: LegacyChannelConfig, // Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were @@ -3280,10 +3249,7 @@ where /// A channel struct implementing this trait can receive an initial counterparty commitment /// transaction signature. -trait InitialRemoteCommitmentReceiver -where - SP::Target: SignerProvider, -{ +trait InitialRemoteCommitmentReceiver { fn context(&self) -> &ChannelContext; fn context_mut(&mut self) -> &mut ChannelContext; @@ -3321,7 +3287,7 @@ where fn initial_commitment_signed( &mut self, channel_id: ChannelId, counterparty_signature: Signature, holder_commitment_point: &mut HolderCommitmentPoint, best_block: BestBlock, signer_provider: &SP, logger: &L, - ) -> Result<(ChannelMonitor<::EcdsaSigner>, CommitmentTransaction), ChannelError> { + ) -> Result<(ChannelMonitor, CommitmentTransaction), ChannelError> { let initial_commitment_tx = match self.check_counterparty_commitment_signature(&counterparty_signature, holder_commitment_point, logger) { Ok(res) => res, Err(ChannelError::Close(e)) => { @@ -3409,10 +3375,7 @@ where fn is_v2_established(&self) -> bool; } -impl InitialRemoteCommitmentReceiver for OutboundV1Channel -where - SP::Target: SignerProvider, -{ +impl InitialRemoteCommitmentReceiver for OutboundV1Channel { fn context(&self) -> &ChannelContext { &self.context } @@ -3438,10 +3401,7 @@ where } } -impl InitialRemoteCommitmentReceiver for InboundV1Channel -where - SP::Target: SignerProvider, -{ +impl InitialRemoteCommitmentReceiver for InboundV1Channel { fn context(&self) -> &ChannelContext { &self.context } @@ -3467,10 +3427,7 @@ where } } -impl InitialRemoteCommitmentReceiver for FundedChannel -where - SP::Target: SignerProvider, -{ +impl InitialRemoteCommitmentReceiver for FundedChannel { fn context(&self) -> &ChannelContext { &self.context } @@ -3505,10 +3462,7 @@ where } } -impl ChannelContext -where - SP::Target: SignerProvider, -{ +impl ChannelContext { #[rustfmt::skip] fn new_for_inbound_channel<'a, ES: EntropySource, F: FeeEstimator, L: Logger>( fee_estimator: &'a LowerBoundedFeeEstimator, @@ -3528,10 +3482,7 @@ where msg_channel_reserve_satoshis: u64, msg_push_msat: u64, open_channel_fields: msgs::CommonOpenChannelFields, - ) -> Result<(FundingScope, ChannelContext), ChannelError> - where - SP::Target: SignerProvider, - { + ) -> Result<(FundingScope, ChannelContext), ChannelError> { let logger = WithContext::from(logger, Some(counterparty_node_id), Some(open_channel_fields.temporary_channel_id), None); let announce_for_forwarding = if (open_channel_fields.channel_flags & 1) == 1 { true } else { false }; @@ -3867,12 +3818,9 @@ where temporary_channel_id_fn: Option ChannelId>, holder_selected_channel_reserve_satoshis: u64, channel_keys_id: [u8; 32], - holder_signer: ::EcdsaSigner, + holder_signer: SP::EcdsaSigner, _logger: L, - ) -> Result<(FundingScope, ChannelContext), APIError> - where - SP::Target: SignerProvider, - { + ) -> Result<(FundingScope, ChannelContext), APIError> { // This will be updated with the counterparty contribution if this is a dual-funded channel let channel_value_satoshis = funding_satoshis; @@ -6363,10 +6311,7 @@ where fn get_initial_counterparty_commitment_signatures( &self, funding: &FundingScope, logger: &L, - ) -> Option<(Signature, Vec)> - where - SP::Target: SignerProvider, - { + ) -> Option<(Signature, Vec)> { let mut commitment_number = self.counterparty_next_commitment_transaction_number; let mut commitment_point = self.counterparty_next_commitment_point.unwrap(); @@ -6407,10 +6352,7 @@ where fn get_initial_commitment_signed_v2( &self, funding: &FundingScope, logger: &L, - ) -> Option - where - SP::Target: SignerProvider, - { + ) -> Option { let signatures = self.get_initial_counterparty_commitment_signatures(funding, logger); if let Some((signature, htlc_signatures)) = signatures { log_info!(logger, "Generated commitment_signed for peer",); @@ -6750,13 +6692,10 @@ pub(super) struct FundingNegotiationContext { impl FundingNegotiationContext { /// Prepare and start interactive transaction negotiation. /// If error occurs, it is caused by our side, not the counterparty. - fn into_interactive_tx_constructor( + fn into_interactive_tx_constructor( mut self, context: &ChannelContext, funding: &FundingScope, signer_provider: &SP, entropy_source: &ES, holder_node_id: PublicKey, - ) -> Result - where - SP::Target: SignerProvider, - { + ) -> Result { debug_assert_eq!( self.shared_funding_input.is_some(), funding.channel_transaction_parameters.splice_parent_funding_txid.is_some(), @@ -6860,10 +6799,7 @@ impl FundingNegotiationContext { // Holder designates channel data owned for the benefit of the user client. // Counterparty designates channel data owned by the another channel participant entity. #[cfg_attr(test, derive(Debug))] -pub(super) struct FundedChannel -where - SP::Target: SignerProvider, -{ +pub(super) struct FundedChannel { pub funding: FundingScope, pub context: ChannelContext, holder_commitment_point: HolderCommitmentPoint, @@ -7068,10 +7004,9 @@ pub struct SpliceFundingPromotion { pub discarded_funding: Vec, } -impl FundedChannel +impl FundedChannel where - SP::Target: SignerProvider, - ::EcdsaSigner: EcdsaChannelSigner, + SP::EcdsaSigner: EcdsaChannelSigner, { pub fn context(&self) -> &ChannelContext { &self.context @@ -7987,7 +7922,7 @@ where pub fn initial_commitment_signed_v2( &mut self, msg: &msgs::CommitmentSigned, best_block: BestBlock, signer_provider: &SP, logger: &L, - ) -> Result::EcdsaSigner>, ChannelError> { + ) -> Result, ChannelError> { if let Some(signing_session) = self.context.interactive_tx_signing_session.as_ref() { if signing_session.has_received_tx_signatures() { let msg = "Received initial commitment_signed after peer's tx_signatures received!"; @@ -13411,10 +13346,7 @@ where } /// A not-yet-funded outbound (from holder) channel using V1 channel establishment. -pub(super) struct OutboundV1Channel -where - SP::Target: SignerProvider, -{ +pub(super) struct OutboundV1Channel { pub funding: FundingScope, pub context: ChannelContext, pub unfunded_context: UnfundedChannelContext, @@ -13424,10 +13356,7 @@ where pub signer_pending_open_channel: bool, } -impl OutboundV1Channel -where - SP::Target: SignerProvider, -{ +impl OutboundV1Channel { pub fn abandon_unfunded_chan(&mut self, closure_reason: ClosureReason) -> ShutdownResult { self.context.force_shutdown(&self.funding, closure_reason) } @@ -13663,7 +13592,7 @@ where mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L, ) -> Result< - (FundedChannel, ChannelMonitor<::EcdsaSigner>), + (FundedChannel, ChannelMonitor), (OutboundV1Channel, ChannelError), > { if !self.funding.is_outbound() { @@ -13767,10 +13696,7 @@ where } /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment. -pub(super) struct InboundV1Channel -where - SP::Target: SignerProvider, -{ +pub(super) struct InboundV1Channel { pub funding: FundingScope, pub context: ChannelContext, pub unfunded_context: UnfundedChannelContext, @@ -13808,10 +13734,7 @@ pub(super) fn channel_type_from_open_channel( Ok(channel_type.clone()) } -impl InboundV1Channel -where - SP::Target: SignerProvider, -{ +impl InboundV1Channel { /// Creates a new channel from a remote sides' request for one. /// Assumes chain_hash has already been checked and corresponds with what we expect! #[rustfmt::skip] @@ -13949,11 +13872,7 @@ where mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L, ) -> Result< - ( - FundedChannel, - Option, - ChannelMonitor<::EcdsaSigner>, - ), + (FundedChannel, Option, ChannelMonitor), (Self, ChannelError), > { if self.funding.is_outbound() { @@ -14058,10 +13977,7 @@ where } // A not-yet-funded channel using V2 channel establishment. -pub(super) struct PendingV2Channel -where - SP::Target: SignerProvider, -{ +pub(super) struct PendingV2Channel { pub funding: FundingScope, pub context: ChannelContext, pub unfunded_context: UnfundedChannelContext, @@ -14070,10 +13986,7 @@ where pub interactive_tx_constructor: Option, } -impl PendingV2Channel -where - SP::Target: SignerProvider, -{ +impl PendingV2Channel { #[allow(dead_code)] // TODO(dual_funding): Remove once creating V2 channels is enabled. #[rustfmt::skip] pub fn new_outbound( @@ -14480,10 +14393,7 @@ impl Readable for AnnouncementSigsState { } } -impl Writeable for FundedChannel -where - SP::Target: SignerProvider, -{ +impl Writeable for FundedChannel { fn write(&self, writer: &mut W) -> Result<(), io::Error> { // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been // called. @@ -14972,10 +14882,8 @@ where } } -impl<'a, 'b, 'c, ES: EntropySource, SP: Deref> +impl<'a, 'b, 'c, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP, &'c ChannelTypeFeatures)> for FundedChannel -where - SP::Target: SignerProvider, { fn read( reader: &mut R, args: (&'a ES, &'b SP, &'c ChannelTypeFeatures), diff --git a/lightning/src/ln/channel_state.rs b/lightning/src/ln/channel_state.rs index 7c591ff2c3b..86e53ba3262 100644 --- a/lightning/src/ln/channel_state.rs +++ b/lightning/src/ln/channel_state.rs @@ -22,8 +22,6 @@ use crate::types::features::{ChannelTypeFeatures, InitFeatures}; use crate::types::payment::PaymentHash; use crate::util::config::ChannelConfig; -use core::ops::Deref; - /// Exposes the state of pending inbound HTLCs. /// /// At a high level, an HTLC being forwarded from one Lightning node to another Lightning node goes @@ -524,13 +522,10 @@ impl ChannelDetails { } } - pub(super) fn from_channel( + pub(super) fn from_channel( channel: &Channel, best_block_height: u32, latest_features: InitFeatures, fee_estimator: &LowerBoundedFeeEstimator, - ) -> Self - where - SP::Target: SignerProvider, - { + ) -> Self { let context = channel.context(); let funding = channel.funding(); let balance = channel.get_available_balances(fee_estimator); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 7ee7b6de02f..cd61a595fc5 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1549,10 +1549,7 @@ impl Readable for Option { } /// State we hold per-peer. -pub(super) struct PeerState -where - SP::Target: SignerProvider, -{ +pub(super) struct PeerState { /// `channel_id` -> `Channel` /// /// Holds all channels where the peer is the counterparty. @@ -1627,10 +1624,7 @@ where peer_storage: Vec, } -impl PeerState -where - SP::Target: SignerProvider, -{ +impl PeerState { /// Indicates that a peer meets the criteria where we're ok to remove it from our storage. /// If true is passed for `require_disconnected`, the function will return false if we haven't /// disconnected from the node already, ie. `PeerState::is_connected` is set to `true`. @@ -1795,9 +1789,7 @@ pub trait AChannelManager { /// A type implementing [`EcdsaChannelSigner`]. type Signer: EcdsaChannelSigner + Sized; /// A type implementing [`SignerProvider`] for [`Self::Signer`]. - type SignerProvider: SignerProvider + ?Sized; - /// A type that may be dereferenced to [`Self::SignerProvider`]. - type SP: Deref; + type SP: SignerProvider; /// A type implementing [`FeeEstimator`]. type FeeEstimator: FeeEstimator; /// A type implementing [`Router`]. @@ -1827,23 +1819,21 @@ impl< T: BroadcasterInterface, ES: EntropySource, NS: NodeSigner, - SP: Deref, + SP: SignerProvider, F: FeeEstimator, R: Router, MR: MessageRouter, L: Logger, > AChannelManager for ChannelManager where - M::Target: chain::Watch<::EcdsaSigner>, - SP::Target: SignerProvider, + M::Target: chain::Watch, { type Watch = M::Target; type M = M; type Broadcaster = T; type EntropySource = ES; type NodeSigner = NS; - type Signer = ::EcdsaSigner; - type SignerProvider = SP::Target; + type Signer = SP::EcdsaSigner; type SP = SP; type FeeEstimator = F; type Router = R; @@ -2598,14 +2588,13 @@ pub struct ChannelManager< T: BroadcasterInterface, ES: EntropySource, NS: NodeSigner, - SP: Deref, + SP: SignerProvider, F: FeeEstimator, R: Router, MR: MessageRouter, L: Logger, > where - M::Target: chain::Watch<::EcdsaSigner>, - SP::Target: SignerProvider, + M::Target: chain::Watch, { config: RwLock, chain_hash: ChainHash, @@ -3381,15 +3370,14 @@ impl< T: BroadcasterInterface, ES: EntropySource, NS: NodeSigner, - SP: Deref, + SP: SignerProvider, F: FeeEstimator, R: Router, MR: MessageRouter, L: Logger, > ChannelManager where - M::Target: chain::Watch<::EcdsaSigner>, - SP::Target: SignerProvider, + M::Target: chain::Watch, { /// Constructs a new `ChannelManager` to hold several channels and route between them. /// @@ -4423,10 +4411,7 @@ where /// The same closure semantics as described in [`ChannelManager::locked_handle_force_close`] apply. fn locked_handle_unfunded_close( &self, err: ChannelError, chan: &mut Channel, - ) -> (bool, MsgHandleErrInternal) - where - SP::Target: SignerProvider, - { + ) -> (bool, MsgHandleErrInternal) { let chan_id = chan.context().channel_id(); convert_channel_err_internal(err, chan_id, |reason, msg| { let logger = WithChannelContext::from(&self.logger, chan.context(), None); @@ -13509,15 +13494,14 @@ impl< T: BroadcasterInterface, ES: EntropySource, NS: NodeSigner, - SP: Deref, + SP: SignerProvider, F: FeeEstimator, R: Router, MR: MessageRouter, L: Logger, > ChannelManager where - M::Target: chain::Watch<::EcdsaSigner>, - SP::Target: SignerProvider, + M::Target: chain::Watch, { #[cfg(not(c_bindings))] create_offer_builder!(self, OfferBuilder<'_, DerivedMetadata, secp256k1::All>); @@ -14379,15 +14363,14 @@ impl< T: BroadcasterInterface, ES: EntropySource, NS: NodeSigner, - SP: Deref, + SP: SignerProvider, F: FeeEstimator, R: Router, MR: MessageRouter, L: Logger, > BaseMessageHandler for ChannelManager where - M::Target: chain::Watch<::EcdsaSigner>, - SP::Target: SignerProvider, + M::Target: chain::Watch, { fn provided_node_features(&self) -> NodeFeatures { provided_node_features(&self.config.read().unwrap()) @@ -14743,15 +14726,14 @@ impl< T: BroadcasterInterface, ES: EntropySource, NS: NodeSigner, - SP: Deref, + SP: SignerProvider, F: FeeEstimator, R: Router, MR: MessageRouter, L: Logger, > EventsProvider for ChannelManager where - M::Target: chain::Watch<::EcdsaSigner>, - SP::Target: SignerProvider, + M::Target: chain::Watch, { /// Processes events that must be periodically handled. /// @@ -14771,15 +14753,14 @@ impl< T: BroadcasterInterface, ES: EntropySource, NS: NodeSigner, - SP: Deref, + SP: SignerProvider, F: FeeEstimator, R: Router, MR: MessageRouter, L: Logger, > chain::Listen for ChannelManager where - M::Target: chain::Watch<::EcdsaSigner>, - SP::Target: SignerProvider, + M::Target: chain::Watch, { fn filtered_block_connected(&self, header: &Header, txdata: &TransactionData, height: u32) { { @@ -14825,15 +14806,14 @@ impl< T: BroadcasterInterface, ES: EntropySource, NS: NodeSigner, - SP: Deref, + SP: SignerProvider, F: FeeEstimator, R: Router, MR: MessageRouter, L: Logger, > chain::Confirm for ChannelManager where - M::Target: chain::Watch<::EcdsaSigner>, - SP::Target: SignerProvider, + M::Target: chain::Watch, { #[rustfmt::skip] fn transactions_confirmed(&self, header: &Header, txdata: &TransactionData, height: u32) { @@ -14991,15 +14971,14 @@ impl< T: BroadcasterInterface, ES: EntropySource, NS: NodeSigner, - SP: Deref, + SP: SignerProvider, F: FeeEstimator, R: Router, MR: MessageRouter, L: Logger, > ChannelManager where - M::Target: chain::Watch<::EcdsaSigner>, - SP::Target: SignerProvider, + M::Target: chain::Watch, { /// Calls a function which handles an on-chain event (blocks dis/connected, transactions /// un/confirmed, etc) on each channel, handling any resulting errors or messages generated by @@ -15346,15 +15325,14 @@ impl< T: BroadcasterInterface, ES: EntropySource, NS: NodeSigner, - SP: Deref, + SP: SignerProvider, F: FeeEstimator, R: Router, MR: MessageRouter, L: Logger, > ChannelMessageHandler for ChannelManager where - M::Target: chain::Watch<::EcdsaSigner>, - SP::Target: SignerProvider, + M::Target: chain::Watch, { fn handle_open_channel(&self, counterparty_node_id: PublicKey, message: &msgs::OpenChannel) { // Note that we never need to persist the updated ChannelManager for an inbound @@ -15914,15 +15892,14 @@ impl< T: BroadcasterInterface, ES: EntropySource, NS: NodeSigner, - SP: Deref, + SP: SignerProvider, F: FeeEstimator, R: Router, MR: MessageRouter, L: Logger, > OffersMessageHandler for ChannelManager where - M::Target: chain::Watch<::EcdsaSigner>, - SP::Target: SignerProvider, + M::Target: chain::Watch, { #[rustfmt::skip] fn handle_message( @@ -16125,15 +16102,14 @@ impl< T: BroadcasterInterface, ES: EntropySource, NS: NodeSigner, - SP: Deref, + SP: SignerProvider, F: FeeEstimator, R: Router, MR: MessageRouter, L: Logger, > AsyncPaymentsMessageHandler for ChannelManager where - M::Target: chain::Watch<::EcdsaSigner>, - SP::Target: SignerProvider, + M::Target: chain::Watch, { fn handle_offer_paths_request( &self, message: OfferPathsRequest, context: AsyncPaymentsContext, @@ -16363,15 +16339,14 @@ impl< T: BroadcasterInterface, ES: EntropySource, NS: NodeSigner, - SP: Deref, + SP: SignerProvider, F: FeeEstimator, R: Router, MR: MessageRouter, L: Logger, > DNSResolverMessageHandler for ChannelManager where - M::Target: chain::Watch<::EcdsaSigner>, - SP::Target: SignerProvider, + M::Target: chain::Watch, { fn handle_dnssec_query( &self, _message: DNSSECQuery, _responder: Option, @@ -16424,15 +16399,14 @@ impl< T: BroadcasterInterface, ES: EntropySource, NS: NodeSigner, - SP: Deref, + SP: SignerProvider, F: FeeEstimator, R: Router, MR: MessageRouter, L: Logger, > NodeIdLookUp for ChannelManager where - M::Target: chain::Watch<::EcdsaSigner>, - SP::Target: SignerProvider, + M::Target: chain::Watch, { fn next_node_id(&self, short_channel_id: u64) -> Option { self.short_to_chan_info.read().unwrap().get(&short_channel_id).map(|(pubkey, _)| *pubkey) @@ -16933,15 +16907,14 @@ impl< T: BroadcasterInterface, ES: EntropySource, NS: NodeSigner, - SP: Deref, + SP: SignerProvider, F: FeeEstimator, R: Router, MR: MessageRouter, L: Logger, > Writeable for ChannelManager where - M::Target: chain::Watch<::EcdsaSigner>, - SP::Target: SignerProvider, + M::Target: chain::Watch, { #[rustfmt::skip] fn write(&self, writer: &mut W) -> Result<(), io::Error> { @@ -17293,14 +17266,13 @@ pub struct ChannelManagerReadArgs< T: BroadcasterInterface, ES: EntropySource, NS: NodeSigner, - SP: Deref, + SP: SignerProvider, F: FeeEstimator, R: Router, MR: MessageRouter, L: Logger + Clone, > where - M::Target: chain::Watch<::EcdsaSigner>, - SP::Target: SignerProvider, + M::Target: chain::Watch, { /// A cryptographically secure source of entropy. pub entropy_source: ES, @@ -17356,8 +17328,7 @@ pub struct ChannelManagerReadArgs< /// this struct. /// /// This is not exported to bindings users because we have no HashMap bindings - pub channel_monitors: - HashMap::EcdsaSigner>>, + pub channel_monitors: HashMap>, } impl< @@ -17366,15 +17337,14 @@ impl< T: BroadcasterInterface, ES: EntropySource, NS: NodeSigner, - SP: Deref, + SP: SignerProvider, F: FeeEstimator, R: Router, MR: MessageRouter, L: Logger + Clone, > ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L> where - M::Target: chain::Watch<::EcdsaSigner>, - SP::Target: SignerProvider, + M::Target: chain::Watch, { /// Simple utility function to create a ChannelManagerReadArgs which creates the monitor /// HashMap for you. This is primarily useful for C bindings where it is not practical to @@ -17382,8 +17352,7 @@ where pub fn new( entropy_source: ES, node_signer: NS, signer_provider: SP, fee_estimator: F, chain_monitor: M, tx_broadcaster: T, router: R, message_router: MR, logger: L, - config: UserConfig, - mut channel_monitors: Vec<&'a ChannelMonitor<::EcdsaSigner>>, + config: UserConfig, mut channel_monitors: Vec<&'a ChannelMonitor>, ) -> Self { Self { entropy_source, @@ -17445,7 +17414,7 @@ impl< T: BroadcasterInterface, ES: EntropySource, NS: NodeSigner, - SP: Deref, + SP: SignerProvider, F: FeeEstimator, R: Router, MR: MessageRouter, @@ -17453,8 +17422,7 @@ impl< > ReadableArgs> for (BlockHash, Arc>) where - M::Target: chain::Watch<::EcdsaSigner>, - SP::Target: SignerProvider, + M::Target: chain::Watch, { fn read( reader: &mut Reader, args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>, @@ -17471,7 +17439,7 @@ impl< T: BroadcasterInterface, ES: EntropySource, NS: NodeSigner, - SP: Deref, + SP: SignerProvider, F: FeeEstimator, R: Router, MR: MessageRouter, @@ -17479,8 +17447,7 @@ impl< > ReadableArgs> for (BlockHash, ChannelManager) where - M::Target: chain::Watch<::EcdsaSigner>, - SP::Target: SignerProvider, + M::Target: chain::Watch, { fn read( reader: &mut Reader, mut args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>, diff --git a/lightning/src/sign/mod.rs b/lightning/src/sign/mod.rs index fea20625f0b..f4f4c5cd4e2 100644 --- a/lightning/src/sign/mod.rs +++ b/lightning/src/sign/mod.rs @@ -1082,6 +1082,13 @@ pub type DynSignerProvider = pub type DynSignerProvider = dyn SignerProvider; /// A trait that can return signer instances for individual channels. +/// +/// Instantiations of this trait should generally be shared by reference across the lightning +/// node's components. E.g., it would be unsafe to provide a different [`SignerProvider`] to +/// [`ChannelManager`] vs [`MonitorUpdatingPersister`]. +/// +/// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager +/// [`MonitorUpdatingPersister`]: crate::util::persist::MonitorUpdatingPersister pub trait SignerProvider { /// A type which implements [`EcdsaChannelSigner`] which will be returned by [`Self::derive_channel_signer`]. type EcdsaSigner: EcdsaChannelSigner; @@ -1125,6 +1132,28 @@ pub trait SignerProvider { fn get_shutdown_scriptpubkey(&self) -> Result; } +impl> SignerProvider for SP { + type EcdsaSigner = T::EcdsaSigner; + #[cfg(taproot)] + type TaprootSigner = T::TaprootSigner; + + fn generate_channel_keys_id(&self, inbound: bool, user_channel_id: u128) -> [u8; 32] { + self.deref().generate_channel_keys_id(inbound, user_channel_id) + } + + fn derive_channel_signer(&self, channel_keys_id: [u8; 32]) -> Self::EcdsaSigner { + self.deref().derive_channel_signer(channel_keys_id) + } + + fn get_destination_script(&self, channel_keys_id: [u8; 32]) -> Result { + self.deref().get_destination_script(channel_keys_id) + } + + fn get_shutdown_scriptpubkey(&self) -> Result { + self.deref().get_shutdown_scriptpubkey() + } +} + /// A helper trait that describes an on-chain wallet capable of returning a (change) destination /// script. /// diff --git a/lightning/src/sign/type_resolver.rs b/lightning/src/sign/type_resolver.rs index a84886cdee0..405e346dda6 100644 --- a/lightning/src/sign/type_resolver.rs +++ b/lightning/src/sign/type_resolver.rs @@ -1,32 +1,21 @@ use crate::sign::{ChannelSigner, SignerProvider}; -use core::ops::Deref; -pub(crate) enum ChannelSignerType -where - SP::Target: SignerProvider, -{ +pub(crate) enum ChannelSignerType { // in practice, this will only ever be an EcdsaChannelSigner (specifically, Writeable) - Ecdsa(::EcdsaSigner), + Ecdsa(SP::EcdsaSigner), #[cfg(taproot)] #[allow(unused)] - Taproot(::TaprootSigner), + Taproot(SP::TaprootSigner), } #[cfg(test)] -impl std::fmt::Debug for ChannelSignerType -where - SP: Deref, - SP::Target: SignerProvider, -{ +impl std::fmt::Debug for ChannelSignerType { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("ChannelSignerType").finish() } } -impl ChannelSignerType -where - SP::Target: SignerProvider, -{ +impl ChannelSignerType { pub(crate) fn as_ref(&self) -> &dyn ChannelSigner { match self { ChannelSignerType::Ecdsa(ecs) => ecs, @@ -37,7 +26,7 @@ where } #[allow(unused)] - pub(crate) fn as_ecdsa(&self) -> Option<&::EcdsaSigner> { + pub(crate) fn as_ecdsa(&self) -> Option<&SP::EcdsaSigner> { match self { ChannelSignerType::Ecdsa(ecs) => Some(ecs), _ => None, @@ -45,9 +34,7 @@ where } #[allow(unused)] - pub(crate) fn as_mut_ecdsa( - &mut self, - ) -> Option<&mut ::EcdsaSigner> { + pub(crate) fn as_mut_ecdsa(&mut self) -> Option<&mut SP::EcdsaSigner> { match self { ChannelSignerType::Ecdsa(ecs) => Some(ecs), _ => None, diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index a71e634699e..7742abf4400 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -445,12 +445,11 @@ impl Persist( +pub fn read_channel_monitors( kv_store: K, entropy_source: ES, signer_provider: SP, -) -> Result::EcdsaSigner>)>, io::Error> +) -> Result)>, io::Error> where K::Target: KVStoreSync, - SP::Target: SignerProvider + Sized, { let mut res = Vec::new(); @@ -458,13 +457,13 @@ where CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, )? { - match ::EcdsaSigner>)>>::read( + match )>>::read( &mut io::Cursor::new(kv_store.read( CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, &stored_key, )?), - (&entropy_source, &*signer_provider), + (&entropy_source, &signer_provider), ) { Ok(Some((block_hash, channel_monitor))) => { let monitor_name = MonitorName::from_str(&stored_key)?; @@ -591,25 +590,23 @@ pub struct MonitorUpdatingPersister< K: Deref, L: Logger, ES: EntropySource, - SP: Deref, + SP: SignerProvider, BI: BroadcasterInterface, FE: FeeEstimator, >(MonitorUpdatingPersisterAsync, PanicingSpawner, L, ES, SP, BI, FE>) where - K::Target: KVStoreSync, - SP::Target: SignerProvider + Sized; + K::Target: KVStoreSync; impl< K: Deref, L: Logger, ES: EntropySource, - SP: Deref, + SP: SignerProvider, BI: BroadcasterInterface, FE: FeeEstimator, > MonitorUpdatingPersister where K::Target: KVStoreSync, - SP::Target: SignerProvider + Sized, { /// Constructs a new [`MonitorUpdatingPersister`]. /// @@ -653,10 +650,7 @@ where /// Reads all stored channel monitors, along with any stored updates for them. pub fn read_all_channel_monitors_with_updates( &self, - ) -> Result< - Vec<(BlockHash, ChannelMonitor<::EcdsaSigner>)>, - io::Error, - > { + ) -> Result)>, io::Error> { poll_sync_future(self.0.read_all_channel_monitors_with_updates()) } @@ -677,8 +671,7 @@ where /// function to accomplish this. Take care to limit the number of parallel readers. pub fn read_channel_monitor_with_updates( &self, monitor_key: &str, - ) -> Result<(BlockHash, ChannelMonitor<::EcdsaSigner>), io::Error> - { + ) -> Result<(BlockHash, ChannelMonitor), io::Error> { poll_sync_future(self.0.read_channel_monitor_with_updates(monitor_key)) } @@ -698,13 +691,12 @@ impl< K: Deref, L: Logger, ES: EntropySource, - SP: Deref, + SP: SignerProvider, BI: BroadcasterInterface, FE: FeeEstimator, > Persist for MonitorUpdatingPersister where K::Target: KVStoreSync, - SP::Target: SignerProvider + Sized, { /// Persists a new channel. This means writing the entire monitor to the /// parametrized [`KVStoreSync`]. @@ -780,25 +772,23 @@ pub struct MonitorUpdatingPersisterAsync< S: FutureSpawner, L: Logger, ES: EntropySource, - SP: Deref, + SP: SignerProvider, BI: BroadcasterInterface, FE: FeeEstimator, >(Arc>) where - K::Target: KVStore, - SP::Target: SignerProvider + Sized; + K::Target: KVStore; struct MonitorUpdatingPersisterAsyncInner< K: Deref, S: FutureSpawner, L: Logger, ES: EntropySource, - SP: Deref, + SP: SignerProvider, BI: BroadcasterInterface, FE: FeeEstimator, > where K::Target: KVStore, - SP::Target: SignerProvider + Sized, { kv_store: K, async_completed_updates: Mutex>, @@ -816,13 +806,12 @@ impl< S: FutureSpawner, L: Logger, ES: EntropySource, - SP: Deref, + SP: SignerProvider, BI: BroadcasterInterface, FE: FeeEstimator, > MonitorUpdatingPersisterAsync where K::Target: KVStore, - SP::Target: SignerProvider + Sized, { /// Constructs a new [`MonitorUpdatingPersisterAsync`]. /// @@ -855,10 +844,7 @@ where /// deserialization as well. pub async fn read_all_channel_monitors_with_updates( &self, - ) -> Result< - Vec<(BlockHash, ChannelMonitor<::EcdsaSigner>)>, - io::Error, - > { + ) -> Result)>, io::Error> { let primary = CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE; let secondary = CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE; let monitor_list = self.0.kv_store.list(primary, secondary).await?; @@ -889,10 +875,7 @@ where /// `Arc` that can live for `'static` and be sent and accessed across threads. pub async fn read_all_channel_monitors_with_updates_parallel( self: &Arc, - ) -> Result< - Vec<(BlockHash, ChannelMonitor<::EcdsaSigner>)>, - io::Error, - > + ) -> Result)>, io::Error> where K: MaybeSend + MaybeSync + 'static, L: MaybeSend + MaybeSync + 'static, @@ -900,7 +883,7 @@ where SP: MaybeSend + MaybeSync + 'static, BI: MaybeSend + MaybeSync + 'static, FE: MaybeSend + MaybeSync + 'static, - ::EcdsaSigner: MaybeSend, + SP::EcdsaSigner: MaybeSend, { let primary = CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE; let secondary = CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE; @@ -942,8 +925,7 @@ where /// function to accomplish this. Take care to limit the number of parallel readers. pub async fn read_channel_monitor_with_updates( &self, monitor_key: &str, - ) -> Result<(BlockHash, ChannelMonitor<::EcdsaSigner>), io::Error> - { + ) -> Result<(BlockHash, ChannelMonitor), io::Error> { self.0.read_channel_monitor_with_updates(monitor_key).await } @@ -963,18 +945,16 @@ impl< S: FutureSpawner, L: Logger + MaybeSend + MaybeSync + 'static, ES: EntropySource + MaybeSend + MaybeSync + 'static, - SP: Deref + MaybeSend + MaybeSync + 'static, + SP: SignerProvider + MaybeSend + MaybeSync + 'static, BI: BroadcasterInterface + MaybeSend + MaybeSync + 'static, FE: FeeEstimator + MaybeSend + MaybeSync + 'static, > MonitorUpdatingPersisterAsync where K::Target: KVStore + MaybeSync, - SP::Target: SignerProvider + Sized, - ::EcdsaSigner: MaybeSend + 'static, + SP::EcdsaSigner: MaybeSend + 'static, { pub(crate) fn spawn_async_persist_new_channel( - &self, monitor_name: MonitorName, - monitor: &ChannelMonitor<::EcdsaSigner>, + &self, monitor_name: MonitorName, monitor: &ChannelMonitor, notifier: Arc, ) { let inner = Arc::clone(&self.0); @@ -1001,8 +981,7 @@ where pub(crate) fn spawn_async_update_channel( &self, monitor_name: MonitorName, update: Option<&ChannelMonitorUpdate>, - monitor: &ChannelMonitor<::EcdsaSigner>, - notifier: Arc, + monitor: &ChannelMonitor, notifier: Arc, ) { let inner = Arc::clone(&self.0); // Note that `update_persisted_channel` is a sync method which calls all the way through to @@ -1051,18 +1030,16 @@ impl< S: FutureSpawner, L: Logger, ES: EntropySource, - SP: Deref, + SP: SignerProvider, BI: BroadcasterInterface, FE: FeeEstimator, > MonitorUpdatingPersisterAsyncInner where K::Target: KVStore, - SP::Target: SignerProvider + Sized, { pub async fn read_channel_monitor_with_updates( &self, monitor_key: &str, - ) -> Result<(BlockHash, ChannelMonitor<::EcdsaSigner>), io::Error> - { + ) -> Result<(BlockHash, ChannelMonitor), io::Error> { match self.maybe_read_channel_monitor_with_updates(monitor_key).await? { Some(res) => Ok(res), None => Err(io::Error::new( @@ -1079,10 +1056,7 @@ where async fn maybe_read_channel_monitor_with_updates( &self, monitor_key: &str, - ) -> Result< - Option<(BlockHash, ChannelMonitor<::EcdsaSigner>)>, - io::Error, - > { + ) -> Result)>, io::Error> { let monitor_name = MonitorName::from_str(monitor_key)?; let read_future = pin!(self.maybe_read_monitor(&monitor_name, monitor_key)); let list_future = pin!(self @@ -1126,10 +1100,7 @@ where /// Read a channel monitor. async fn maybe_read_monitor( &self, monitor_name: &MonitorName, monitor_key: &str, - ) -> Result< - Option<(BlockHash, ChannelMonitor<::EcdsaSigner>)>, - io::Error, - > { + ) -> Result)>, io::Error> { let primary = CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE; let secondary = CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE; let monitor_bytes = self.kv_store.read(primary, secondary, monitor_key).await?; @@ -1138,9 +1109,9 @@ where if monitor_cursor.get_ref().starts_with(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL) { monitor_cursor.set_position(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL.len() as u64); } - match ::EcdsaSigner>)>>::read( + match )>>::read( &mut monitor_cursor, - (&self.entropy_source, &*self.signer_provider), + (&self.entropy_source, &self.signer_provider), ) { Ok(None) => Ok(None), Ok(Some((blockhash, channel_monitor))) => { From 54a8858cfb94b29d4f3e5b62085fe23aa1bbbb85 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Thu, 15 Jan 2026 13:35:50 -0500 Subject: [PATCH 147/242] Drop Deref indirection for chain::Watch Reduces generics and verbosity across the codebase, should provide equivalent behavior. Co-Authored-By: Claude Opus 4.5 --- lightning/src/chain/mod.rs | 24 ++++++ lightning/src/ln/channelmanager.rs | 89 ++++++----------------- lightning/src/ln/functional_test_utils.rs | 4 +- 3 files changed, 50 insertions(+), 67 deletions(-) diff --git a/lightning/src/chain/mod.rs b/lightning/src/chain/mod.rs index b4cc6a302ae..9f5c9653f65 100644 --- a/lightning/src/chain/mod.rs +++ b/lightning/src/chain/mod.rs @@ -25,6 +25,8 @@ use crate::ln::types::ChannelId; use crate::sign::ecdsa::EcdsaChannelSigner; use crate::sign::HTLCDescriptor; +use core::ops::Deref; + #[allow(unused_imports)] use crate::prelude::*; @@ -346,6 +348,28 @@ pub trait Watch { ) -> Vec<(OutPoint, ChannelId, Vec, PublicKey)>; } +impl + ?Sized, W: Deref> + Watch for W +{ + fn watch_channel( + &self, channel_id: ChannelId, monitor: ChannelMonitor, + ) -> Result { + self.deref().watch_channel(channel_id, monitor) + } + + fn update_channel( + &self, channel_id: ChannelId, update: &ChannelMonitorUpdate, + ) -> ChannelMonitorUpdateStatus { + self.deref().update_channel(channel_id, update) + } + + fn release_pending_monitor_events( + &self, + ) -> Vec<(OutPoint, ChannelId, Vec, PublicKey)> { + self.deref().release_pending_monitor_events() + } +} + /// The `Filter` trait defines behavior for indicating chain activity of interest pertaining to /// channels. /// diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index cd61a595fc5..440ca347e89 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1777,9 +1777,7 @@ pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, 'i, M, T, F, L> /// languages. pub trait AChannelManager { /// A type implementing [`chain::Watch`]. - type Watch: chain::Watch + ?Sized; - /// A type that may be dereferenced to [`Self::Watch`]. - type M: Deref; + type Watch: chain::Watch; /// A type implementing [`BroadcasterInterface`]. type Broadcaster: BroadcasterInterface; /// A type implementing [`EntropySource`]. @@ -1802,7 +1800,7 @@ pub trait AChannelManager { fn get_cm( &self, ) -> &ChannelManager< - Self::M, + Self::Watch, Self::Broadcaster, Self::EntropySource, Self::NodeSigner, @@ -1815,7 +1813,7 @@ pub trait AChannelManager { } impl< - M: Deref, + M: chain::Watch, T: BroadcasterInterface, ES: EntropySource, NS: NodeSigner, @@ -1825,11 +1823,8 @@ impl< MR: MessageRouter, L: Logger, > AChannelManager for ChannelManager -where - M::Target: chain::Watch, { - type Watch = M::Target; - type M = M; + type Watch = M; type Broadcaster = T; type EntropySource = ES; type NodeSigner = NS; @@ -2584,7 +2579,7 @@ where /// [`ChannelUpdate`]: msgs::ChannelUpdate /// [`read`]: ReadableArgs::read pub struct ChannelManager< - M: Deref, + M: chain::Watch, T: BroadcasterInterface, ES: EntropySource, NS: NodeSigner, @@ -2593,9 +2588,7 @@ pub struct ChannelManager< R: Router, MR: MessageRouter, L: Logger, -> where - M::Target: chain::Watch, -{ +> { config: RwLock, chain_hash: ChainHash, fee_estimator: LowerBoundedFeeEstimator, @@ -3366,7 +3359,7 @@ fn create_htlc_intercepted_event( } impl< - M: Deref, + M: chain::Watch, T: BroadcasterInterface, ES: EntropySource, NS: NodeSigner, @@ -3376,8 +3369,6 @@ impl< MR: MessageRouter, L: Logger, > ChannelManager -where - M::Target: chain::Watch, { /// Constructs a new `ChannelManager` to hold several channels and route between them. /// @@ -13490,7 +13481,7 @@ macro_rules! create_refund_builder { ($self: ident, $builder: ty) => { } } impl< - M: Deref, + M: Watch, T: BroadcasterInterface, ES: EntropySource, NS: NodeSigner, @@ -13500,8 +13491,6 @@ impl< MR: MessageRouter, L: Logger, > ChannelManager -where - M::Target: chain::Watch, { #[cfg(not(c_bindings))] create_offer_builder!(self, OfferBuilder<'_, DerivedMetadata, secp256k1::All>); @@ -14359,7 +14348,7 @@ where } impl< - M: Deref, + M: chain::Watch, T: BroadcasterInterface, ES: EntropySource, NS: NodeSigner, @@ -14369,8 +14358,6 @@ impl< MR: MessageRouter, L: Logger, > BaseMessageHandler for ChannelManager -where - M::Target: chain::Watch, { fn provided_node_features(&self) -> NodeFeatures { provided_node_features(&self.config.read().unwrap()) @@ -14722,7 +14709,7 @@ where } impl< - M: Deref, + M: chain::Watch, T: BroadcasterInterface, ES: EntropySource, NS: NodeSigner, @@ -14732,8 +14719,6 @@ impl< MR: MessageRouter, L: Logger, > EventsProvider for ChannelManager -where - M::Target: chain::Watch, { /// Processes events that must be periodically handled. /// @@ -14749,7 +14734,7 @@ where } impl< - M: Deref, + M: chain::Watch, T: BroadcasterInterface, ES: EntropySource, NS: NodeSigner, @@ -14759,8 +14744,6 @@ impl< MR: MessageRouter, L: Logger, > chain::Listen for ChannelManager -where - M::Target: chain::Watch, { fn filtered_block_connected(&self, header: &Header, txdata: &TransactionData, height: u32) { { @@ -14802,7 +14785,7 @@ where } impl< - M: Deref, + M: chain::Watch, T: BroadcasterInterface, ES: EntropySource, NS: NodeSigner, @@ -14812,8 +14795,6 @@ impl< MR: MessageRouter, L: Logger, > chain::Confirm for ChannelManager -where - M::Target: chain::Watch, { #[rustfmt::skip] fn transactions_confirmed(&self, header: &Header, txdata: &TransactionData, height: u32) { @@ -14967,7 +14948,7 @@ pub(super) enum FundingConfirmedMessage { } impl< - M: Deref, + M: chain::Watch, T: BroadcasterInterface, ES: EntropySource, NS: NodeSigner, @@ -14977,8 +14958,6 @@ impl< MR: MessageRouter, L: Logger, > ChannelManager -where - M::Target: chain::Watch, { /// Calls a function which handles an on-chain event (blocks dis/connected, transactions /// un/confirmed, etc) on each channel, handling any resulting errors or messages generated by @@ -15321,7 +15300,7 @@ where } impl< - M: Deref, + M: chain::Watch, T: BroadcasterInterface, ES: EntropySource, NS: NodeSigner, @@ -15331,8 +15310,6 @@ impl< MR: MessageRouter, L: Logger, > ChannelMessageHandler for ChannelManager -where - M::Target: chain::Watch, { fn handle_open_channel(&self, counterparty_node_id: PublicKey, message: &msgs::OpenChannel) { // Note that we never need to persist the updated ChannelManager for an inbound @@ -15888,7 +15865,7 @@ where } impl< - M: Deref, + M: chain::Watch, T: BroadcasterInterface, ES: EntropySource, NS: NodeSigner, @@ -15898,8 +15875,6 @@ impl< MR: MessageRouter, L: Logger, > OffersMessageHandler for ChannelManager -where - M::Target: chain::Watch, { #[rustfmt::skip] fn handle_message( @@ -16098,7 +16073,7 @@ where } impl< - M: Deref, + M: chain::Watch, T: BroadcasterInterface, ES: EntropySource, NS: NodeSigner, @@ -16108,8 +16083,6 @@ impl< MR: MessageRouter, L: Logger, > AsyncPaymentsMessageHandler for ChannelManager -where - M::Target: chain::Watch, { fn handle_offer_paths_request( &self, message: OfferPathsRequest, context: AsyncPaymentsContext, @@ -16335,7 +16308,7 @@ where #[cfg(feature = "dnssec")] impl< - M: Deref, + M: chain::Watch, T: BroadcasterInterface, ES: EntropySource, NS: NodeSigner, @@ -16345,8 +16318,6 @@ impl< MR: MessageRouter, L: Logger, > DNSResolverMessageHandler for ChannelManager -where - M::Target: chain::Watch, { fn handle_dnssec_query( &self, _message: DNSSECQuery, _responder: Option, @@ -16395,7 +16366,7 @@ where } impl< - M: Deref, + M: chain::Watch, T: BroadcasterInterface, ES: EntropySource, NS: NodeSigner, @@ -16405,8 +16376,6 @@ impl< MR: MessageRouter, L: Logger, > NodeIdLookUp for ChannelManager -where - M::Target: chain::Watch, { fn next_node_id(&self, short_channel_id: u64) -> Option { self.short_to_chan_info.read().unwrap().get(&short_channel_id).map(|(pubkey, _)| *pubkey) @@ -16903,7 +16872,7 @@ impl_writeable_tlv_based!(PendingInboundPayment, { }); impl< - M: Deref, + M: chain::Watch, T: BroadcasterInterface, ES: EntropySource, NS: NodeSigner, @@ -16913,8 +16882,6 @@ impl< MR: MessageRouter, L: Logger, > Writeable for ChannelManager -where - M::Target: chain::Watch, { #[rustfmt::skip] fn write(&self, writer: &mut W) -> Result<(), io::Error> { @@ -17262,7 +17229,7 @@ impl Readable for VecDeque<(Event, Option)> { /// [`ChainMonitor::load_existing_monitor`]: crate::chain::chainmonitor::ChainMonitor::load_existing_monitor pub struct ChannelManagerReadArgs< 'a, - M: Deref, + M: chain::Watch, T: BroadcasterInterface, ES: EntropySource, NS: NodeSigner, @@ -17271,9 +17238,7 @@ pub struct ChannelManagerReadArgs< R: Router, MR: MessageRouter, L: Logger + Clone, -> where - M::Target: chain::Watch, -{ +> { /// A cryptographically secure source of entropy. pub entropy_source: ES, @@ -17333,7 +17298,7 @@ pub struct ChannelManagerReadArgs< impl< 'a, - M: Deref, + M: chain::Watch, T: BroadcasterInterface, ES: EntropySource, NS: NodeSigner, @@ -17343,8 +17308,6 @@ impl< MR: MessageRouter, L: Logger + Clone, > ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L> -where - M::Target: chain::Watch, { /// Simple utility function to create a ChannelManagerReadArgs which creates the monitor /// HashMap for you. This is primarily useful for C bindings where it is not practical to @@ -17410,7 +17373,7 @@ fn dedup_decode_update_add_htlcs( // SipmleArcChannelManager type: impl< 'a, - M: Deref, + M: chain::Watch, T: BroadcasterInterface, ES: EntropySource, NS: NodeSigner, @@ -17421,8 +17384,6 @@ impl< L: Logger + Clone, > ReadableArgs> for (BlockHash, Arc>) -where - M::Target: chain::Watch, { fn read( reader: &mut Reader, args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>, @@ -17435,7 +17396,7 @@ where impl< 'a, - M: Deref, + M: chain::Watch, T: BroadcasterInterface, ES: EntropySource, NS: NodeSigner, @@ -17446,8 +17407,6 @@ impl< L: Logger + Clone, > ReadableArgs> for (BlockHash, ChannelManager) -where - M::Target: chain::Watch, { fn read( reader: &mut Reader, mut args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>, diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index bc75407d4a3..cea9ea45428 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -732,7 +732,7 @@ pub trait NodeHolder { fn node( &self, ) -> &ChannelManager< - ::M, + ::Watch, ::Broadcaster, ::EntropySource, ::NodeSigner, @@ -749,7 +749,7 @@ impl NodeHolder for &H { fn node( &self, ) -> &ChannelManager< - ::M, + ::Watch, ::Broadcaster, ::EntropySource, ::NodeSigner, From 75b29b8c4ec259cd7d9935574e2379587df6e9e5 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Thu, 15 Jan 2026 14:23:59 -0500 Subject: [PATCH 148/242] Drop Deref indirection for KVStore Reduces generics and verbosity across the codebase, should provide equivalent behavior. Unfortunately the same improvement can't be made for KVStoreSync and Persist, due to the blanket implementation where all KVStoreSync traits implement Persist resulting in conflicting implementations. Co-Authored-By: Claude Opus 4.5 --- lightning-background-processor/src/lib.rs | 4 +- lightning-liquidity/src/events/event_queue.rs | 36 +++------- lightning-liquidity/src/lsps0/client.rs | 17 ++--- lightning-liquidity/src/lsps1/client.rs | 17 ++--- lightning-liquidity/src/lsps1/service.rs | 9 +-- lightning-liquidity/src/lsps2/client.rs | 16 ++--- lightning-liquidity/src/lsps2/service.rs | 21 +++--- lightning-liquidity/src/lsps5/client.rs | 17 ++--- lightning-liquidity/src/lsps5/service.rs | 9 +-- lightning-liquidity/src/manager.rs | 29 +++----- lightning-liquidity/src/persist.rs | 23 ++---- lightning/src/chain/chainmonitor.rs | 16 ++--- lightning/src/util/persist.rs | 70 ++++++++++++------- lightning/src/util/sweep.rs | 15 ++-- 14 files changed, 110 insertions(+), 189 deletions(-) diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index 79a3b95463e..a16933f1cde 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -474,7 +474,6 @@ pub const NO_LIQUIDITY_MANAGER: Option< CM = &DynChannelManager, Filter = dyn chain::Filter + Send + Sync, C = &(dyn chain::Filter + Send + Sync), - KVStore = DummyKVStore, K = &DummyKVStore, TimeProvider = dyn lightning_liquidity::utils::time::TimeProvider + Send + Sync, TP = &(dyn lightning_liquidity::utils::time::TimeProvider + Send + Sync), @@ -955,7 +954,7 @@ pub async fn process_events_async< LM: Deref, D: Deref, O: Deref, - K: Deref, + K: KVStore, OS: Deref>, S: Deref, SC: for<'b> WriteableScore<'b>, @@ -978,7 +977,6 @@ where LM::Target: ALiquidityManager, O::Target: OutputSpender, D::Target: ChangeDestinationSource, - K::Target: KVStore, { let async_event_handler = |event| { let network_graph = gossip_sync.network_graph(); diff --git a/lightning-liquidity/src/events/event_queue.rs b/lightning-liquidity/src/events/event_queue.rs index 0d6e3a0ec54..9fb8a250a9a 100644 --- a/lightning-liquidity/src/events/event_queue.rs +++ b/lightning-liquidity/src/events/event_queue.rs @@ -12,7 +12,6 @@ use alloc::collections::VecDeque; use alloc::vec::Vec; use core::future::Future; -use core::ops::Deref; use core::task::{Poll, Waker}; use lightning::ln::msgs::DecodeError; @@ -25,10 +24,7 @@ use lightning::util::wakers::Notifier; /// The maximum queue size we allow before starting to drop events. pub const MAX_EVENT_QUEUE_SIZE: usize = 1000; -pub(crate) struct EventQueue -where - K::Target: KVStore, -{ +pub(crate) struct EventQueue { state: Mutex, waker: Mutex>, #[cfg(feature = "std")] @@ -37,10 +33,7 @@ where persist_notifier: Arc, } -impl EventQueue -where - K::Target: KVStore, -{ +impl EventQueue { pub fn new( queue: VecDeque, kv_store: K, persist_notifier: Arc, ) -> Self { @@ -164,14 +157,9 @@ struct QueueState { // A guard type that will notify about new events when dropped. #[must_use] -pub(crate) struct EventQueueNotifierGuard<'a, K: Deref + Clone>(&'a EventQueue) -where - K::Target: KVStore; - -impl<'a, K: Deref + Clone> EventQueueNotifierGuard<'a, K> -where - K::Target: KVStore, -{ +pub(crate) struct EventQueueNotifierGuard<'a, K: KVStore + Clone>(&'a EventQueue); + +impl<'a, K: KVStore + Clone> EventQueueNotifierGuard<'a, K> { pub fn enqueue>(&self, event: E) { let mut state_lock = self.0.state.lock().unwrap(); if state_lock.queue.len() < MAX_EVENT_QUEUE_SIZE { @@ -183,10 +171,7 @@ where } } -impl<'a, K: Deref + Clone> Drop for EventQueueNotifierGuard<'a, K> -where - K::Target: KVStore, -{ +impl<'a, K: KVStore + Clone> Drop for EventQueueNotifierGuard<'a, K> { fn drop(&mut self) { let (should_notify, should_persist_notify) = { let state_lock = self.0.state.lock().unwrap(); @@ -208,14 +193,9 @@ where } } -struct EventFuture<'a, K: Deref + Clone>(&'a EventQueue) -where - K::Target: KVStore; +struct EventFuture<'a, K: KVStore + Clone>(&'a EventQueue); -impl Future for EventFuture<'_, K> -where - K::Target: KVStore, -{ +impl Future for EventFuture<'_, K> { type Output = LiquidityEvent; fn poll( diff --git a/lightning-liquidity/src/lsps0/client.rs b/lightning-liquidity/src/lsps0/client.rs index 776e9d3c9a5..298cb304b51 100644 --- a/lightning-liquidity/src/lsps0/client.rs +++ b/lightning-liquidity/src/lsps0/client.rs @@ -22,22 +22,14 @@ use lightning::util::persist::KVStore; use bitcoin::secp256k1::PublicKey; -use core::ops::Deref; - /// A message handler capable of sending and handling bLIP-50 / LSPS0 messages. -pub struct LSPS0ClientHandler -where - K::Target: KVStore, -{ +pub struct LSPS0ClientHandler { entropy_source: ES, pending_messages: Arc, pending_events: Arc>, } -impl LSPS0ClientHandler -where - K::Target: KVStore, -{ +impl LSPS0ClientHandler { /// Returns a new instance of [`LSPS0ClientHandler`]. pub(crate) fn new( entropy_source: ES, pending_messages: Arc, pending_events: Arc>, @@ -87,9 +79,8 @@ where } } -impl LSPSProtocolMessageHandler for LSPS0ClientHandler -where - K::Target: KVStore, +impl LSPSProtocolMessageHandler + for LSPS0ClientHandler { type ProtocolMessage = LSPS0Message; const PROTOCOL_NUMBER: Option = None; diff --git a/lightning-liquidity/src/lsps1/client.rs b/lightning-liquidity/src/lsps1/client.rs index 1e5b2e3bef4..2cbfb04c86a 100644 --- a/lightning-liquidity/src/lsps1/client.rs +++ b/lightning-liquidity/src/lsps1/client.rs @@ -30,8 +30,6 @@ use lightning::util::persist::KVStore; use bitcoin::secp256k1::PublicKey; use bitcoin::Address; -use core::ops::Deref; - /// Client-side configuration options for bLIP-51 / LSPS1 channel requests. #[derive(Clone, Debug)] pub struct LSPS1ClientConfig { @@ -47,10 +45,7 @@ struct PeerState { } /// The main object allowing to send and receive bLIP-51 / LSPS1 messages. -pub struct LSPS1ClientHandler -where - K::Target: KVStore, -{ +pub struct LSPS1ClientHandler { entropy_source: ES, pending_messages: Arc, pending_events: Arc>, @@ -58,10 +53,7 @@ where config: LSPS1ClientConfig, } -impl LSPS1ClientHandler -where - K::Target: KVStore, -{ +impl LSPS1ClientHandler { /// Constructs an `LSPS1ClientHandler`. pub(crate) fn new( entropy_source: ES, pending_messages: Arc, @@ -430,9 +422,8 @@ where } } -impl LSPSProtocolMessageHandler for LSPS1ClientHandler -where - K::Target: KVStore, +impl LSPSProtocolMessageHandler + for LSPS1ClientHandler { type ProtocolMessage = LSPS1Message; const PROTOCOL_NUMBER: Option = Some(1); diff --git a/lightning-liquidity/src/lsps1/service.rs b/lightning-liquidity/src/lsps1/service.rs index 76a9a437b0b..154c6f5d527 100644 --- a/lightning-liquidity/src/lsps1/service.rs +++ b/lightning-liquidity/src/lsps1/service.rs @@ -132,11 +132,10 @@ impl PeerState { } /// The main object allowing to send and receive bLIP-51 / LSPS1 messages. -pub struct LSPS1ServiceHandler +pub struct LSPS1ServiceHandler where CM::Target: AChannelManager, C::Target: Filter, - K::Target: KVStore, { entropy_source: ES, channel_manager: CM, @@ -147,12 +146,11 @@ where config: LSPS1ServiceConfig, } -impl +impl LSPS1ServiceHandler where CM::Target: AChannelManager, C::Target: Filter, - K::Target: KVStore, { /// Constructs a `LSPS1ServiceHandler`. pub(crate) fn new( @@ -419,12 +417,11 @@ where } } -impl LSPSProtocolMessageHandler +impl LSPSProtocolMessageHandler for LSPS1ServiceHandler where CM::Target: AChannelManager, C::Target: Filter, - K::Target: KVStore, { type ProtocolMessage = LSPS1Message; const PROTOCOL_NUMBER: Option = Some(1); diff --git a/lightning-liquidity/src/lsps2/client.rs b/lightning-liquidity/src/lsps2/client.rs index 2e9fca2d444..21b57162010 100644 --- a/lightning-liquidity/src/lsps2/client.rs +++ b/lightning-liquidity/src/lsps2/client.rs @@ -13,7 +13,6 @@ use alloc::string::{String, ToString}; use lightning::util::persist::KVStore; use core::default::Default; -use core::ops::Deref; use crate::events::EventQueue; use crate::lsps0::ser::{LSPSProtocolMessageHandler, LSPSRequestId, LSPSResponseError}; @@ -68,10 +67,7 @@ impl PeerState { /// opened. Please refer to the [`bLIP-52 / LSPS2 specification`] for more information. /// /// [`bLIP-52 / LSPS2 specification`]: https://github.com/lightning/blips/blob/master/blip-0052.md#trust-models -pub struct LSPS2ClientHandler -where - K::Target: KVStore, -{ +pub struct LSPS2ClientHandler { entropy_source: ES, pending_messages: Arc, pending_events: Arc>, @@ -79,10 +75,7 @@ where config: LSPS2ClientConfig, } -impl LSPS2ClientHandler -where - K::Target: KVStore, -{ +impl LSPS2ClientHandler { /// Constructs an `LSPS2ClientHandler`. pub(crate) fn new( entropy_source: ES, pending_messages: Arc, @@ -373,9 +366,8 @@ where } } -impl LSPSProtocolMessageHandler for LSPS2ClientHandler -where - K::Target: KVStore, +impl LSPSProtocolMessageHandler + for LSPS2ClientHandler { type ProtocolMessage = LSPS2Message; const PROTOCOL_NUMBER: Option = Some(2); diff --git a/lightning-liquidity/src/lsps2/service.rs b/lightning-liquidity/src/lsps2/service.rs index 756e8b32bc8..00f68aff696 100644 --- a/lightning-liquidity/src/lsps2/service.rs +++ b/lightning-liquidity/src/lsps2/service.rs @@ -702,10 +702,9 @@ macro_rules! get_or_insert_peer_state_entry { } /// The main object allowing to send and receive bLIP-52 / LSPS2 messages. -pub struct LSPS2ServiceHandler +pub struct LSPS2ServiceHandler where CM::Target: AChannelManager, - K::Target: KVStore, { channel_manager: CM, kv_store: K, @@ -720,10 +719,9 @@ where persistence_in_flight: AtomicUsize, } -impl LSPS2ServiceHandler +impl LSPS2ServiceHandler where CM::Target: AChannelManager, - K::Target: KVStore, { /// Constructs a `LSPS2ServiceHandler`. pub(crate) fn new( @@ -2042,11 +2040,10 @@ where } } -impl LSPSProtocolMessageHandler +impl LSPSProtocolMessageHandler for LSPS2ServiceHandler where CM::Target: AChannelManager, - K::Target: KVStore, { type ProtocolMessage = LSPS2Message; const PROTOCOL_NUMBER: Option = Some(2); @@ -2116,19 +2113,21 @@ fn calculate_amount_to_forward_per_htlc( /// A synchroneous wrapper around [`LSPS2ServiceHandler`] to be used in contexts where async is not /// available. -pub struct LSPS2ServiceHandlerSync<'a, CM: Deref, K: Deref + Clone, T: BroadcasterInterface + Clone> -where +pub struct LSPS2ServiceHandlerSync< + 'a, + CM: Deref, + K: KVStore + Clone, + T: BroadcasterInterface + Clone, +> where CM::Target: AChannelManager, - K::Target: KVStore, { inner: &'a LSPS2ServiceHandler, } -impl<'a, CM: Deref, K: Deref + Clone, T: BroadcasterInterface + Clone> +impl<'a, CM: Deref, K: KVStore + Clone, T: BroadcasterInterface + Clone> LSPS2ServiceHandlerSync<'a, CM, K, T> where CM::Target: AChannelManager, - K::Target: KVStore, { pub(crate) fn from_inner(inner: &'a LSPS2ServiceHandler) -> Self { Self { inner } diff --git a/lightning-liquidity/src/lsps5/client.rs b/lightning-liquidity/src/lsps5/client.rs index df10522077e..26c0b180421 100644 --- a/lightning-liquidity/src/lsps5/client.rs +++ b/lightning-liquidity/src/lsps5/client.rs @@ -35,8 +35,6 @@ use alloc::collections::VecDeque; use alloc::string::String; use lightning::util::persist::KVStore; -use core::ops::Deref; - impl PartialEq for (LSPSRequestId, (LSPS5AppName, LSPS5WebhookUrl)) { fn eq(&self, other: &LSPSRequestId) -> bool { &self.0 == other @@ -125,10 +123,7 @@ impl PeerState { /// [`lsps5.list_webhooks`]: super::msgs::LSPS5Request::ListWebhooks /// [`lsps5.remove_webhook`]: super::msgs::LSPS5Request::RemoveWebhook /// [`LSPS5Validator`]: super::validator::LSPS5Validator -pub struct LSPS5ClientHandler -where - K::Target: KVStore, -{ +pub struct LSPS5ClientHandler { pending_messages: Arc, pending_events: Arc>, entropy_source: ES, @@ -136,10 +131,7 @@ where _config: LSPS5ClientConfig, } -impl LSPS5ClientHandler -where - K::Target: KVStore, -{ +impl LSPS5ClientHandler { /// Constructs an `LSPS5ClientHandler`. pub(crate) fn new( entropy_source: ES, pending_messages: Arc, @@ -424,9 +416,8 @@ where } } -impl LSPSProtocolMessageHandler for LSPS5ClientHandler -where - K::Target: KVStore, +impl LSPSProtocolMessageHandler + for LSPS5ClientHandler { type ProtocolMessage = LSPS5Message; const PROTOCOL_NUMBER: Option = Some(5); diff --git a/lightning-liquidity/src/lsps5/service.rs b/lightning-liquidity/src/lsps5/service.rs index 489d543ca90..4678d38dc9a 100644 --- a/lightning-liquidity/src/lsps5/service.rs +++ b/lightning-liquidity/src/lsps5/service.rs @@ -125,10 +125,9 @@ impl Default for LSPS5ServiceConfig { /// [`LSPS5ServiceEvent::SendWebhookNotification`]: super::event::LSPS5ServiceEvent::SendWebhookNotification /// [`app_name`]: super::msgs::LSPS5AppName /// [`lsps5.webhook_registered`]: super::msgs::WebhookNotificationMethod::LSPS5WebhookRegistered -pub struct LSPS5ServiceHandler +pub struct LSPS5ServiceHandler where CM::Target: AChannelManager, - K::Target: KVStore, TP::Target: TimeProvider, { config: LSPS5ServiceConfig, @@ -143,10 +142,9 @@ where persistence_in_flight: AtomicUsize, } -impl LSPS5ServiceHandler +impl LSPS5ServiceHandler where CM::Target: AChannelManager, - K::Target: KVStore, TP::Target: TimeProvider, { /// Constructs a `LSPS5ServiceHandler` using the given time provider. @@ -692,11 +690,10 @@ where } } -impl LSPSProtocolMessageHandler +impl LSPSProtocolMessageHandler for LSPS5ServiceHandler where CM::Target: AChannelManager, - K::Target: KVStore, TP::Target: TimeProvider, { type ProtocolMessage = LSPS5Message; diff --git a/lightning-liquidity/src/manager.rs b/lightning-liquidity/src/manager.rs index 0e897dd7abe..c3e9fa48cca 100644 --- a/lightning-liquidity/src/manager.rs +++ b/lightning-liquidity/src/manager.rs @@ -116,9 +116,7 @@ pub trait ALiquidityManager { /// A type that may be dereferenced to [`Self::Filter`]. type C: Deref + Clone; /// A type implementing [`KVStore`]. - type KVStore: KVStore + ?Sized; - /// A type that may be dereferenced to [`Self::KVStore`]. - type K: Deref + Clone; + type K: KVStore + Clone; /// A type implementing [`TimeProvider`]. type TimeProvider: TimeProvider + ?Sized; /// A type that may be dereferenced to [`Self::TimeProvider`]. @@ -144,14 +142,13 @@ impl< NS: NodeSigner + Clone, CM: Deref + Clone, C: Deref + Clone, - K: Deref + Clone, + K: KVStore + Clone, TP: Deref + Clone, T: BroadcasterInterface + Clone, > ALiquidityManager for LiquidityManager where CM::Target: AChannelManager, C::Target: Filter, - K::Target: KVStore, TP::Target: TimeProvider, { type EntropySource = ES; @@ -160,7 +157,6 @@ where type CM = CM; type Filter = C::Target; type C = C; - type KVStore = K::Target; type K = K; type TimeProvider = TP::Target; type TP = TP; @@ -294,13 +290,12 @@ pub struct LiquidityManager< NS: NodeSigner + Clone, CM: Deref + Clone, C: Deref + Clone, - K: Deref + Clone, + K: KVStore + Clone, TP: Deref + Clone, T: BroadcasterInterface + Clone, > where CM::Target: AChannelManager, C::Target: Filter, - K::Target: KVStore, TP::Target: TimeProvider, { pending_messages: Arc, @@ -330,13 +325,12 @@ impl< NS: NodeSigner + Clone, CM: Deref + Clone, C: Deref + Clone, - K: Deref + Clone, + K: KVStore + Clone, T: BroadcasterInterface + Clone, > LiquidityManager where CM::Target: AChannelManager, C::Target: Filter, - K::Target: KVStore, { /// Constructor for the [`LiquidityManager`] using the default system clock /// @@ -368,14 +362,13 @@ impl< NS: NodeSigner + Clone, CM: Deref + Clone, C: Deref + Clone, - K: Deref + Clone, + K: KVStore + Clone, TP: Deref + Clone, T: BroadcasterInterface + Clone, > LiquidityManager where CM::Target: AChannelManager, C::Target: Filter, - K::Target: KVStore, TP::Target: TimeProvider, { /// Constructor for the [`LiquidityManager`] with a custom time provider. @@ -792,14 +785,13 @@ impl< NS: NodeSigner + Clone, CM: Deref + Clone, C: Deref + Clone, - K: Deref + Clone, + K: KVStore + Clone, TP: Deref + Clone, T: BroadcasterInterface + Clone, > CustomMessageReader for LiquidityManager where CM::Target: AChannelManager, C::Target: Filter, - K::Target: KVStore, TP::Target: TimeProvider, { type CustomMessage = RawLSPSMessage; @@ -821,14 +813,13 @@ impl< NS: NodeSigner + Clone, CM: Deref + Clone, C: Deref + Clone, - K: Deref + Clone, + K: KVStore + Clone, TP: Deref + Clone, T: BroadcasterInterface + Clone, > CustomMessageHandler for LiquidityManager where CM::Target: AChannelManager, C::Target: Filter, - K::Target: KVStore, TP::Target: TimeProvider, { fn handle_custom_message( @@ -952,14 +943,13 @@ impl< NS: NodeSigner + Clone, CM: Deref + Clone, C: Deref + Clone, - K: Deref + Clone, + K: KVStore + Clone, TP: Deref + Clone, T: BroadcasterInterface + Clone, > Listen for LiquidityManager where CM::Target: AChannelManager, C::Target: Filter, - K::Target: KVStore, TP::Target: TimeProvider, { fn filtered_block_connected( @@ -995,14 +985,13 @@ impl< NS: NodeSigner + Clone, CM: Deref + Clone, C: Deref + Clone, - K: Deref + Clone, + K: KVStore + Clone, TP: Deref + Clone, T: BroadcasterInterface + Clone, > Confirm for LiquidityManager where CM::Target: AChannelManager, C::Target: Filter, - K::Target: KVStore, TP::Target: TimeProvider, { fn transactions_confirmed( diff --git a/lightning-liquidity/src/persist.rs b/lightning-liquidity/src/persist.rs index ec0d5a6ddd3..d0199440514 100644 --- a/lightning-liquidity/src/persist.rs +++ b/lightning-liquidity/src/persist.rs @@ -22,8 +22,6 @@ use lightning::util::ser::Readable; use bitcoin::secp256k1::PublicKey; use alloc::collections::VecDeque; - -use core::ops::Deref; use core::str::FromStr; /// The primary namespace under which the [`LiquidityManager`] will be persisted. @@ -51,12 +49,9 @@ pub const LSPS2_SERVICE_PERSISTENCE_SECONDARY_NAMESPACE: &str = "lsps2_service"; /// [`LSPS5ServiceHandler`]: crate::lsps5::service::LSPS5ServiceHandler pub const LSPS5_SERVICE_PERSISTENCE_SECONDARY_NAMESPACE: &str = "lsps5_service"; -pub(crate) async fn read_event_queue( +pub(crate) async fn read_event_queue( kv_store: K, -) -> Result>, lightning::io::Error> -where - K::Target: KVStore, -{ +) -> Result>, lightning::io::Error> { let read_fut = kv_store.read( LIQUIDITY_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, LIQUIDITY_MANAGER_EVENT_QUEUE_PERSISTENCE_SECONDARY_NAMESPACE, @@ -85,12 +80,9 @@ where Ok(Some(queue.0)) } -pub(crate) async fn read_lsps2_service_peer_states( +pub(crate) async fn read_lsps2_service_peer_states( kv_store: K, -) -> Result>, lightning::io::Error> -where - K::Target: KVStore, -{ +) -> Result>, lightning::io::Error> { let mut res = new_hash_map(); for stored_key in kv_store @@ -129,12 +121,9 @@ where Ok(res) } -pub(crate) async fn read_lsps5_service_peer_states( +pub(crate) async fn read_lsps5_service_peer_states( kv_store: K, -) -> Result, lightning::io::Error> -where - K::Target: KVStore, -{ +) -> Result, lightning::io::Error> { let mut res = new_hash_map(); for stored_key in kv_store diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index 8835e9c8185..536a1f942b0 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -256,22 +256,20 @@ impl Deref for LockedChannelMonitor<'_, Chann /// /// This is not exported to bindings users as async is not supported outside of Rust. pub struct AsyncPersister< - K: Deref + MaybeSend + MaybeSync + 'static, + K: KVStore + MaybeSend + MaybeSync + 'static, S: FutureSpawner, L: Logger + MaybeSend + MaybeSync + 'static, ES: EntropySource + MaybeSend + MaybeSync + 'static, SP: SignerProvider + MaybeSend + MaybeSync + 'static, BI: BroadcasterInterface + MaybeSend + MaybeSync + 'static, FE: FeeEstimator + MaybeSend + MaybeSync + 'static, -> where - K::Target: KVStore + MaybeSync, -{ +> { persister: MonitorUpdatingPersisterAsync, event_notifier: Arc, } impl< - K: Deref + MaybeSend + MaybeSync + 'static, + K: KVStore + MaybeSend + MaybeSync + 'static, S: FutureSpawner, L: Logger + MaybeSend + MaybeSync + 'static, ES: EntropySource + MaybeSend + MaybeSync + 'static, @@ -279,8 +277,6 @@ impl< BI: BroadcasterInterface + MaybeSend + MaybeSync + 'static, FE: FeeEstimator + MaybeSend + MaybeSync + 'static, > Deref for AsyncPersister -where - K::Target: KVStore + MaybeSync, { type Target = Self; fn deref(&self) -> &Self { @@ -289,7 +285,7 @@ where } impl< - K: Deref + MaybeSend + MaybeSync + 'static, + K: KVStore + MaybeSend + MaybeSync + 'static, S: FutureSpawner, L: Logger + MaybeSend + MaybeSync + 'static, ES: EntropySource + MaybeSend + MaybeSync + 'static, @@ -298,7 +294,6 @@ impl< FE: FeeEstimator + MaybeSend + MaybeSync + 'static, > Persist for AsyncPersister where - K::Target: KVStore + MaybeSync, SP::EcdsaSigner: MaybeSend + 'static, { fn persist_new_channel( @@ -380,7 +375,7 @@ pub struct ChainMonitor< } impl< - K: Deref + MaybeSend + MaybeSync + 'static, + K: KVStore + MaybeSend + MaybeSync + 'static, S: FutureSpawner, SP: SignerProvider + MaybeSend + MaybeSync + 'static, C: Deref, @@ -390,7 +385,6 @@ impl< ES: EntropySource + MaybeSend + MaybeSync + 'static, > ChainMonitor, ES> where - K::Target: KVStore + MaybeSync, C::Target: chain::Filter, SP::EcdsaSigner: MaybeSend + 'static, { diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index 7742abf4400..440d1d31331 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -202,16 +202,6 @@ pub struct KVStoreSyncWrapper(pub K) where K::Target: KVStoreSync; -impl Deref for KVStoreSyncWrapper -where - K::Target: KVStoreSync, -{ - type Target = Self; - fn deref(&self) -> &Self::Target { - self - } -} - /// This is not exported to bindings users as async is only supported in Rust. impl KVStore for KVStoreSyncWrapper where @@ -268,6 +258,10 @@ where /// namespace, i.e., conflicts between keys and equally named /// primary namespaces/secondary namespaces must be avoided. /// +/// Instantiations of this trait should generally be shared by reference across the lightning +/// node's components. E.g., it would be unsafe to provide a different [`KVStore`] to +/// [`OutputSweeper`] vs [`MonitorUpdatingPersister`]. +/// /// **Note:** Users migrating custom persistence backends from the pre-v0.0.117 `KVStorePersister` /// interface can use a concatenation of `[{primary_namespace}/[{secondary_namespace}/]]{key}` to /// recover a `key` compatible with the data model previously assumed by `KVStorePersister::persist`. @@ -275,6 +269,9 @@ where /// For a synchronous version of this trait, see [`KVStoreSync`]. /// /// This is not exported to bindings users as async is only supported in Rust. +/// +/// [`OutputSweeper`]: crate::util::sweep::OutputSweeper +/// [`MonitorUpdatingPersister`]: crate::util::persist::MonitorUpdatingPersister // Note that updates to documentation on this trait should be copied to the synchronous version. pub trait KVStore { /// Returns the data stored for the given `primary_namespace`, `secondary_namespace`, and @@ -347,6 +344,36 @@ pub trait KVStore { ) -> impl Future, io::Error>> + 'static + MaybeSend; } +impl KVStore for K +where + K: Deref, + K::Target: KVStore, +{ + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> impl Future, io::Error>> + 'static + MaybeSend { + self.deref().read(primary_namespace, secondary_namespace, key) + } + + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> impl Future> + 'static + MaybeSend { + self.deref().write(primary_namespace, secondary_namespace, key, buf) + } + + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> impl Future> + 'static + MaybeSend { + self.deref().remove(primary_namespace, secondary_namespace, key, lazy) + } + + fn list( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> impl Future, io::Error>> + 'static + MaybeSend { + self.deref().list(primary_namespace, secondary_namespace) + } +} + /// Provides additional interface methods that are required for [`KVStore`]-to-[`KVStore`] /// data migration. pub trait MigratableKVStore: KVStoreSync { @@ -768,28 +795,24 @@ where /// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor /// [`ChainMonitor::new_async_beta`]: crate::chain::chainmonitor::ChainMonitor::new_async_beta pub struct MonitorUpdatingPersisterAsync< - K: Deref, + K: KVStore, S: FutureSpawner, L: Logger, ES: EntropySource, SP: SignerProvider, BI: BroadcasterInterface, FE: FeeEstimator, ->(Arc>) -where - K::Target: KVStore; +>(Arc>); struct MonitorUpdatingPersisterAsyncInner< - K: Deref, + K: KVStore, S: FutureSpawner, L: Logger, ES: EntropySource, SP: SignerProvider, BI: BroadcasterInterface, FE: FeeEstimator, -> where - K::Target: KVStore, -{ +> { kv_store: K, async_completed_updates: Mutex>, future_spawner: S, @@ -802,7 +825,7 @@ struct MonitorUpdatingPersisterAsyncInner< } impl< - K: Deref, + K: KVStore, S: FutureSpawner, L: Logger, ES: EntropySource, @@ -810,8 +833,6 @@ impl< BI: BroadcasterInterface, FE: FeeEstimator, > MonitorUpdatingPersisterAsync -where - K::Target: KVStore, { /// Constructs a new [`MonitorUpdatingPersisterAsync`]. /// @@ -941,7 +962,7 @@ where } impl< - K: Deref + MaybeSend + MaybeSync + 'static, + K: KVStore + MaybeSend + MaybeSync + 'static, S: FutureSpawner, L: Logger + MaybeSend + MaybeSync + 'static, ES: EntropySource + MaybeSend + MaybeSync + 'static, @@ -950,7 +971,6 @@ impl< FE: FeeEstimator + MaybeSend + MaybeSync + 'static, > MonitorUpdatingPersisterAsync where - K::Target: KVStore + MaybeSync, SP::EcdsaSigner: MaybeSend + 'static, { pub(crate) fn spawn_async_persist_new_channel( @@ -1026,7 +1046,7 @@ trait MaybeSendableFuture: Future> + MaybeSend {} impl> + MaybeSend> MaybeSendableFuture for F {} impl< - K: Deref, + K: KVStore, S: FutureSpawner, L: Logger, ES: EntropySource, @@ -1034,8 +1054,6 @@ impl< BI: BroadcasterInterface, FE: FeeEstimator, > MonitorUpdatingPersisterAsyncInner -where - K::Target: KVStore, { pub async fn read_channel_monitor_with_updates( &self, monitor_key: &str, diff --git a/lightning/src/util/sweep.rs b/lightning/src/util/sweep.rs index f7cf2771d3c..2d22244cb17 100644 --- a/lightning/src/util/sweep.rs +++ b/lightning/src/util/sweep.rs @@ -342,13 +342,12 @@ pub struct OutputSweeper< D: Deref, E: FeeEstimator, F: Deref, - K: Deref, + K: KVStore, L: Logger, O: Deref, > where D::Target: ChangeDestinationSource, F::Target: Filter, - K::Target: KVStore, O::Target: OutputSpender, { sweeper_state: Mutex, @@ -367,14 +366,13 @@ impl< D: Deref, E: FeeEstimator, F: Deref, - K: Deref, + K: KVStore, L: Logger, O: Deref, > OutputSweeper where D::Target: ChangeDestinationSource, F::Target: Filter, - K::Target: KVStore, O::Target: OutputSpender, { /// Constructs a new [`OutputSweeper`]. @@ -723,14 +721,13 @@ impl< D: Deref, E: FeeEstimator, F: Deref, - K: Deref, + K: KVStore, L: Logger, O: Deref, > Listen for OutputSweeper where D::Target: ChangeDestinationSource, F::Target: Filter + Sync + Send, - K::Target: KVStore, O::Target: OutputSpender, { fn filtered_block_connected( @@ -768,14 +765,13 @@ impl< D: Deref, E: FeeEstimator, F: Deref, - K: Deref, + K: KVStore, L: Logger, O: Deref, > Confirm for OutputSweeper where D::Target: ChangeDestinationSource, F::Target: Filter + Sync + Send, - K::Target: KVStore, O::Target: OutputSpender, { fn transactions_confirmed( @@ -869,14 +865,13 @@ impl< D: Deref, E: FeeEstimator, F: Deref, - K: Deref, + K: KVStore, L: Logger, O: Deref, > ReadableArgs<(B, E, Option, O, D, K, L)> for (BestBlock, OutputSweeper) where D::Target: ChangeDestinationSource, F::Target: Filter + Sync + Send, - K::Target: KVStore, O::Target: OutputSpender, { #[inline] From 9e81ce9051d1ddb17e4ce3c5a1fbcb8d433bd846 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Thu, 15 Jan 2026 15:46:02 -0500 Subject: [PATCH 149/242] Drop Deref indirection for NodeIdLookup Reduces generics and verbosity across the codebase, should provide equivalent behavior. Co-Authored-By: Claude Opus 4.5 --- lightning-background-processor/src/lib.rs | 1 - lightning-dns-resolver/src/lib.rs | 6 ---- lightning/src/blinded_path/message.rs | 4 +-- lightning/src/blinded_path/mod.rs | 7 ++-- lightning/src/blinded_path/payment.rs | 5 +-- lightning/src/ln/outbound_payment.rs | 20 ++++------- lightning/src/onion_message/messenger.rs | 42 +++++++++-------------- 7 files changed, 27 insertions(+), 58 deletions(-) diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index a16933f1cde..a67657be8dc 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -415,7 +415,6 @@ pub const NO_ONION_MESSENGER: Option< EntropySource = &(dyn EntropySource + Send + Sync), NodeSigner = &(dyn lightning::sign::NodeSigner + Send + Sync), Logger = &'static (dyn Logger + Send + Sync), - NodeIdLookUp = DynChannelManager, NL = &'static DynChannelManager, MessageRouter = &'static DynMessageRouter, OffersMessageHandler = lightning::ln::peer_handler::IgnoringMessageHandler, diff --git a/lightning-dns-resolver/src/lib.rs b/lightning-dns-resolver/src/lib.rs index 925e658ebe7..900672d0a3a 100644 --- a/lightning-dns-resolver/src/lib.rs +++ b/lightning-dns-resolver/src/lib.rs @@ -203,12 +203,6 @@ mod test { None } } - impl Deref for DummyNodeLookup { - type Target = DummyNodeLookup; - fn deref(&self) -> &DummyNodeLookup { - self - } - } struct DirectlyConnectedRouter {} impl MessageRouter for DirectlyConnectedRouter { diff --git a/lightning/src/blinded_path/message.rs b/lightning/src/blinded_path/message.rs index 68c4a60738b..7bcbe80a965 100644 --- a/lightning/src/blinded_path/message.rs +++ b/lightning/src/blinded_path/message.rs @@ -31,7 +31,6 @@ use crate::types::payment::PaymentHash; use crate::util::scid_utils; use crate::util::ser::{FixedLengthReader, LengthReadableArgs, Readable, Writeable, Writer}; -use core::ops::Deref; use core::time::Duration; use core::{cmp, mem}; @@ -192,11 +191,10 @@ impl BlindedMessagePath { /// introduction node. /// /// Will only modify `self` when returning `Ok`. - pub fn advance_path_by_one( + pub fn advance_path_by_one( &mut self, node_signer: &NS, node_id_lookup: &NL, secp_ctx: &Secp256k1, ) -> Result<(), ()> where - NL::Target: NodeIdLookUp, T: secp256k1::Signing + secp256k1::Verification, { let control_tlvs_ss = node_signer.ecdh(Recipient::Node, &self.0.blinding_point, None)?; diff --git a/lightning/src/blinded_path/mod.rs b/lightning/src/blinded_path/mod.rs index 2f9b1b9a411..d1f58c8c1d9 100644 --- a/lightning/src/blinded_path/mod.rs +++ b/lightning/src/blinded_path/mod.rs @@ -88,10 +88,9 @@ impl NodeIdLookUp for EmptyNodeIdLookUp { } } -impl Deref for EmptyNodeIdLookUp { - type Target = EmptyNodeIdLookUp; - fn deref(&self) -> &Self { - self +impl> NodeIdLookUp for N { + fn next_node_id(&self, short_channel_id: u64) -> Option { + self.deref().next_node_id(short_channel_id) } } diff --git a/lightning/src/blinded_path/payment.rs b/lightning/src/blinded_path/payment.rs index df0626e0673..27292bacf4d 100644 --- a/lightning/src/blinded_path/payment.rs +++ b/lightning/src/blinded_path/payment.rs @@ -33,8 +33,6 @@ use crate::util::ser::{ Writeable, Writer, }; -use core::ops::Deref; - #[allow(unused_imports)] use crate::prelude::*; @@ -230,11 +228,10 @@ impl BlindedPaymentPath { /// introduction node. /// /// Will only modify `self` when returning `Ok`. - pub fn advance_path_by_one( + pub fn advance_path_by_one( &mut self, node_signer: &NS, node_id_lookup: &NL, secp_ctx: &Secp256k1, ) -> Result<(), ()> where - NL::Target: NodeIdLookUp, T: secp256k1::Signing + secp256k1::Verification, { let (next_node_id, control_tlvs_ss) = diff --git a/lightning/src/ln/outbound_payment.rs b/lightning/src/ln/outbound_payment.rs index d366f46d3e2..863a4f9beb4 100644 --- a/lightning/src/ln/outbound_payment.rs +++ b/lightning/src/ln/outbound_payment.rs @@ -40,7 +40,6 @@ use crate::util::ser::ReadableArgs; use crate::util::time::Instant; use core::fmt::{self, Display, Formatter}; -use core::ops::Deref; use core::sync::atomic::{AtomicBool, Ordering}; use core::time::Duration; @@ -952,7 +951,7 @@ impl OutboundPayments { #[rustfmt::skip] pub(super) fn send_payment_for_bolt12_invoice< - R: Router, ES: EntropySource, NS: NodeSigner, NL: Deref, IH, SP, L: Logger, + R: Router, ES: EntropySource, NS: NodeSigner, NL: NodeIdLookUp, IH, SP, L: Logger, >( &self, invoice: &Bolt12Invoice, payment_id: PaymentId, router: &R, first_hops: Vec, features: Bolt12InvoiceFeatures, inflight_htlcs: IH, @@ -962,7 +961,6 @@ impl OutboundPayments { send_payment_along_path: SP, logger: &WithContext, ) -> Result<(), Bolt12PaymentError> where - NL::Target: NodeIdLookUp, IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, { @@ -994,7 +992,7 @@ impl OutboundPayments { #[rustfmt::skip] fn send_payment_for_bolt12_invoice_internal< - R: Router, ES: EntropySource, NS: NodeSigner, NL: Deref, IH, SP, L: Logger, + R: Router, ES: EntropySource, NS: NodeSigner, NL: NodeIdLookUp, IH, SP, L: Logger, >( &self, payment_id: PaymentId, payment_hash: PaymentHash, keysend_preimage: Option, invoice_request: Option<&InvoiceRequest>, @@ -1006,7 +1004,6 @@ impl OutboundPayments { send_payment_along_path: SP, logger: &WithContext, ) -> Result<(), Bolt12PaymentError> where - NL::Target: NodeIdLookUp, IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, { @@ -1209,9 +1206,9 @@ impl OutboundPayments { R: Router, ES: EntropySource, NS: NodeSigner, - NL: Deref, - IH, - SP, + NL: NodeIdLookUp, + IH: Fn() -> InFlightHtlcs, + SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, L: Logger, >( &self, payment_id: PaymentId, hold_htlcs_at_next_hop: bool, router: &R, @@ -1219,12 +1216,7 @@ impl OutboundPayments { node_id_lookup: &NL, secp_ctx: &Secp256k1, best_block_height: u32, pending_events: &Mutex)>>, send_payment_along_path: SP, logger: &WithContext, - ) -> Result<(), Bolt12PaymentError> - where - NL::Target: NodeIdLookUp, - IH: Fn() -> InFlightHtlcs, - SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, - { + ) -> Result<(), Bolt12PaymentError> { let ( payment_hash, keysend_preimage, diff --git a/lightning/src/onion_message/messenger.rs b/lightning/src/onion_message/messenger.rs index 0aadc6d6e31..5e2ec2d9bb9 100644 --- a/lightning/src/onion_message/messenger.rs +++ b/lightning/src/onion_message/messenger.rs @@ -72,9 +72,7 @@ pub trait AOnionMessenger { /// A type implementing [`Logger`] type Logger: Logger; /// A type implementing [`NodeIdLookUp`] - type NodeIdLookUp: NodeIdLookUp + ?Sized; - /// A type that may be dereferenced to [`Self::NodeIdLookUp`] - type NL: Deref; + type NL: NodeIdLookUp; /// A type implementing [`MessageRouter`] type MessageRouter: MessageRouter; /// A type implementing [`OffersMessageHandler`] @@ -113,7 +111,7 @@ impl< ES: EntropySource, NS: NodeSigner, L: Logger, - NL: Deref, + NL: NodeIdLookUp, MR: MessageRouter, OMH: Deref, APH: Deref, @@ -121,7 +119,6 @@ impl< CMH: Deref, > AOnionMessenger for OnionMessenger where - NL::Target: NodeIdLookUp, OMH::Target: OffersMessageHandler, APH::Target: AsyncPaymentsMessageHandler, DRH::Target: DNSResolverMessageHandler, @@ -130,7 +127,6 @@ where type EntropySource = ES; type NodeSigner = NS; type Logger = L; - type NodeIdLookUp = NL::Target; type NL = NL; type MessageRouter = MR; type OffersMessageHandler = OMH::Target; @@ -271,14 +267,13 @@ pub struct OnionMessenger< ES: EntropySource, NS: NodeSigner, L: Logger, - NL: Deref, + NL: NodeIdLookUp, MR: MessageRouter, OMH: Deref, APH: Deref, DRH: Deref, CMH: Deref, > where - NL::Target: NodeIdLookUp, OMH::Target: OffersMessageHandler, APH::Target: AsyncPaymentsMessageHandler, DRH::Target: DNSResolverMessageHandler, @@ -1037,16 +1032,13 @@ pub enum PeeledOnion { pub fn create_onion_message_resolving_destination< ES: EntropySource, NS: NodeSigner, - NL: Deref, + NL: NodeIdLookUp, T: OnionMessageContents, >( entropy_source: &ES, node_signer: &NS, node_id_lookup: &NL, network_graph: &ReadOnlyNetworkGraph, secp_ctx: &Secp256k1, mut path: OnionMessagePath, contents: T, reply_path: Option, -) -> Result<(PublicKey, OnionMessage, Vec), SendError> -where - NL::Target: NodeIdLookUp, -{ +) -> Result<(PublicKey, OnionMessage, Vec), SendError> { path.destination.resolve(network_graph); create_onion_message( entropy_source, @@ -1070,14 +1062,16 @@ where /// - unless it can be resolved by [`NodeIdLookUp::next_node_id`]. /// Use [`create_onion_message_resolving_destination`] instead to resolve the introduction node /// first with a [`ReadOnlyNetworkGraph`]. -pub fn create_onion_message( +pub fn create_onion_message< + ES: EntropySource, + NS: NodeSigner, + NL: NodeIdLookUp, + T: OnionMessageContents, +>( entropy_source: &ES, node_signer: &NS, node_id_lookup: &NL, secp_ctx: &Secp256k1, path: OnionMessagePath, contents: T, reply_path: Option, -) -> Result<(PublicKey, OnionMessage, Vec), SendError> -where - NL::Target: NodeIdLookUp, -{ +) -> Result<(PublicKey, OnionMessage, Vec), SendError> { let OnionMessagePath { intermediate_nodes, mut destination, first_node_addresses } = path; if let Destination::BlindedPath(ref path) = destination { if path.blinded_hops().is_empty() { @@ -1374,7 +1368,7 @@ impl< ES: EntropySource, NS: NodeSigner, L: Logger, - NL: Deref, + NL: NodeIdLookUp, MR: MessageRouter, OMH: Deref, APH: Deref, @@ -1382,7 +1376,6 @@ impl< CMH: Deref, > OnionMessenger where - NL::Target: NodeIdLookUp, OMH::Target: OffersMessageHandler, APH::Target: AsyncPaymentsMessageHandler, DRH::Target: DNSResolverMessageHandler, @@ -2014,7 +2007,7 @@ impl< ES: EntropySource, NS: NodeSigner, L: Logger, - NL: Deref, + NL: NodeIdLookUp, MR: MessageRouter, OMH: Deref, APH: Deref, @@ -2022,7 +2015,6 @@ impl< CMH: Deref, > EventsProvider for OnionMessenger where - NL::Target: NodeIdLookUp, OMH::Target: OffersMessageHandler, APH::Target: AsyncPaymentsMessageHandler, DRH::Target: DNSResolverMessageHandler, @@ -2131,7 +2123,7 @@ impl< ES: EntropySource, NS: NodeSigner, L: Logger, - NL: Deref, + NL: NodeIdLookUp, MR: MessageRouter, OMH: Deref, APH: Deref, @@ -2139,7 +2131,6 @@ impl< CMH: Deref, > BaseMessageHandler for OnionMessenger where - NL::Target: NodeIdLookUp, OMH::Target: OffersMessageHandler, APH::Target: AsyncPaymentsMessageHandler, DRH::Target: DNSResolverMessageHandler, @@ -2199,7 +2190,7 @@ impl< ES: EntropySource, NS: NodeSigner, L: Logger, - NL: Deref, + NL: NodeIdLookUp, MR: MessageRouter, OMH: Deref, APH: Deref, @@ -2207,7 +2198,6 @@ impl< CMH: Deref, > OnionMessageHandler for OnionMessenger where - NL::Target: NodeIdLookUp, OMH::Target: OffersMessageHandler, APH::Target: AsyncPaymentsMessageHandler, DRH::Target: DNSResolverMessageHandler, From c35b41be989930947a2d271e3b544d02097e4846 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Thu, 15 Jan 2026 16:35:48 -0500 Subject: [PATCH 150/242] Drop Deref indirection for message handler traits Reduces generics and verbosity across the codebase, should provide equivalent behavior. This could be split into multiple commits (e.g. one for OnionMessenger message handler types, one for PeerManager message handler types) but it would require adding a new IgnoringOnionMessageHandler type for the messenger handlers, due to the IgnoringMessageHandler's Deref implementation conflicting otherwise. Co-Authored-By: Claude Opus 4.5 --- lightning-background-processor/src/lib.rs | 12 +- lightning-dns-resolver/src/lib.rs | 18 +- lightning/src/ln/msgs.rs | 217 ++++++++++++++++++ lightning/src/ln/peer_handler.rs | 183 ++++++--------- lightning/src/ln/wire.rs | 20 +- lightning/src/onion_message/async_payments.rs | 40 ++++ lightning/src/onion_message/dns_resolution.rs | 18 ++ lightning/src/onion_message/messenger.rs | 132 +++++------ lightning/src/onion_message/offers.rs | 12 + 9 files changed, 436 insertions(+), 216 deletions(-) diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index a67657be8dc..4c41a2c67be 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -417,14 +417,10 @@ pub const NO_ONION_MESSENGER: Option< Logger = &'static (dyn Logger + Send + Sync), NL = &'static DynChannelManager, MessageRouter = &'static DynMessageRouter, - OffersMessageHandler = lightning::ln::peer_handler::IgnoringMessageHandler, - OMH = &'static lightning::ln::peer_handler::IgnoringMessageHandler, - AsyncPaymentsMessageHandler = lightning::ln::peer_handler::IgnoringMessageHandler, - APH = &'static lightning::ln::peer_handler::IgnoringMessageHandler, - DNSResolverMessageHandler = lightning::ln::peer_handler::IgnoringMessageHandler, - DRH = &'static lightning::ln::peer_handler::IgnoringMessageHandler, - CustomOnionMessageHandler = lightning::ln::peer_handler::IgnoringMessageHandler, - CMH = &'static lightning::ln::peer_handler::IgnoringMessageHandler, + OMH = lightning::ln::peer_handler::IgnoringMessageHandler, + APH = lightning::ln::peer_handler::IgnoringMessageHandler, + DRH = lightning::ln::peer_handler::IgnoringMessageHandler, + CMH = lightning::ln::peer_handler::IgnoringMessageHandler, > + Send + Sync, >, diff --git a/lightning-dns-resolver/src/lib.rs b/lightning-dns-resolver/src/lib.rs index 900672d0a3a..e9578844cf8 100644 --- a/lightning-dns-resolver/src/lib.rs +++ b/lightning-dns-resolver/src/lib.rs @@ -6,7 +6,6 @@ #![deny(rustdoc::private_intra_doc_links)] use std::net::SocketAddr; -use std::ops::Deref; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Arc, Mutex}; @@ -30,10 +29,7 @@ const WE_REQUIRE_32_OR_64_BIT_USIZE: u8 = 424242; /// A resolver which implements [`DNSResolverMessageHandler`] and replies to [`DNSSECQuery`] /// messages with with [`DNSSECProof`]s. -pub struct OMDomainResolver -where - PH::Target: DNSResolverMessageHandler, -{ +pub struct OMDomainResolver { state: Arc, proof_handler: Option, runtime_handle: Mutex>, @@ -56,10 +52,7 @@ impl OMDomainResolver { } } -impl OMDomainResolver -where - PH::Target: DNSResolverMessageHandler, -{ +impl OMDomainResolver { /// Creates a new [`OMDomainResolver`] given the [`SocketAddr`] of a DNS resolver listening on /// TCP (e.g. 8.8.8.8:53, 1.1.1.1:53 or your local DNS resolver). /// @@ -103,10 +96,7 @@ where } } -impl DNSResolverMessageHandler for OMDomainResolver -where - PH::Target: DNSResolverMessageHandler, -{ +impl DNSResolverMessageHandler for OMDomainResolver { fn handle_dnssec_proof(&self, proof: DNSSECProof, context: DNSResolverContext) { if let Some(proof_handler) = &self.proof_handler { proof_handler.handle_dnssec_proof(proof, context); @@ -169,7 +159,6 @@ mod test { use lightning::ln::msgs::{ BaseMessageHandler, ChannelMessageHandler, Init, OnionMessageHandler, }; - use lightning::ln::peer_handler::IgnoringMessageHandler; use lightning::offers::offer::Offer; use lightning::onion_message::dns_resolution::{HumanReadableName, OMNameResolver}; use lightning::onion_message::messenger::{ @@ -184,7 +173,6 @@ mod test { use lightning::expect_payment_claimed; use lightning_types::string::UntrustedString; - use std::ops::Deref; use std::sync::Mutex; use std::time::{Duration, Instant, SystemTime}; diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index 2f7d1c46880..b11a0b815ab 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -50,6 +50,7 @@ use crate::io_extras::read_to_end; use core::fmt; use core::fmt::Debug; use core::fmt::Display; +use core::ops::Deref; #[cfg(feature = "std")] use core::str::FromStr; #[cfg(feature = "std")] @@ -2074,6 +2075,26 @@ pub trait BaseMessageHandler { -> Result<(), ()>; } +impl> BaseMessageHandler for B { + fn get_and_clear_pending_msg_events(&self) -> Vec { + self.deref().get_and_clear_pending_msg_events() + } + fn peer_disconnected(&self, their_node_id: PublicKey) { + self.deref().peer_disconnected(their_node_id) + } + fn provided_node_features(&self) -> NodeFeatures { + self.deref().provided_node_features() + } + fn provided_init_features(&self, their_node_id: PublicKey) -> InitFeatures { + self.deref().provided_init_features(their_node_id) + } + fn peer_connected( + &self, their_node_id: PublicKey, msg: &Init, inbound: bool, + ) -> Result<(), ()> { + self.deref().peer_connected(their_node_id, msg, inbound) + } +} + /// A trait to describe an object which can receive channel messages. /// /// Messages MAY be called in parallel when they originate from different `their_node_ids`, however @@ -2214,6 +2235,137 @@ pub trait ChannelMessageHandler: BaseMessageHandler { fn message_received(&self); } +impl> ChannelMessageHandler for C { + fn handle_open_channel(&self, their_node_id: PublicKey, msg: &OpenChannel) { + self.deref().handle_open_channel(their_node_id, msg) + } + fn handle_open_channel_v2(&self, their_node_id: PublicKey, msg: &OpenChannelV2) { + self.deref().handle_open_channel_v2(their_node_id, msg) + } + fn handle_accept_channel(&self, their_node_id: PublicKey, msg: &AcceptChannel) { + self.deref().handle_accept_channel(their_node_id, msg) + } + fn handle_accept_channel_v2(&self, their_node_id: PublicKey, msg: &AcceptChannelV2) { + self.deref().handle_accept_channel_v2(their_node_id, msg) + } + fn handle_funding_created(&self, their_node_id: PublicKey, msg: &FundingCreated) { + self.deref().handle_funding_created(their_node_id, msg) + } + fn handle_funding_signed(&self, their_node_id: PublicKey, msg: &FundingSigned) { + self.deref().handle_funding_signed(their_node_id, msg) + } + fn handle_channel_ready(&self, their_node_id: PublicKey, msg: &ChannelReady) { + self.deref().handle_channel_ready(their_node_id, msg) + } + fn handle_peer_storage(&self, their_node_id: PublicKey, msg: PeerStorage) { + self.deref().handle_peer_storage(their_node_id, msg) + } + fn handle_peer_storage_retrieval(&self, their_node_id: PublicKey, msg: PeerStorageRetrieval) { + self.deref().handle_peer_storage_retrieval(their_node_id, msg) + } + fn handle_shutdown(&self, their_node_id: PublicKey, msg: &Shutdown) { + self.deref().handle_shutdown(their_node_id, msg) + } + fn handle_closing_signed(&self, their_node_id: PublicKey, msg: &ClosingSigned) { + self.deref().handle_closing_signed(their_node_id, msg) + } + #[cfg(simple_close)] + fn handle_closing_complete(&self, their_node_id: PublicKey, msg: ClosingComplete) { + self.deref().handle_closing_complete(their_node_id, msg) + } + #[cfg(simple_close)] + fn handle_closing_sig(&self, their_node_id: PublicKey, msg: ClosingSig) { + self.deref().handle_closing_sig(their_node_id, msg) + } + fn handle_stfu(&self, their_node_id: PublicKey, msg: &Stfu) { + self.deref().handle_stfu(their_node_id, msg) + } + fn handle_splice_init(&self, their_node_id: PublicKey, msg: &SpliceInit) { + self.deref().handle_splice_init(their_node_id, msg) + } + fn handle_splice_ack(&self, their_node_id: PublicKey, msg: &SpliceAck) { + self.deref().handle_splice_ack(their_node_id, msg) + } + fn handle_splice_locked(&self, their_node_id: PublicKey, msg: &SpliceLocked) { + self.deref().handle_splice_locked(their_node_id, msg) + } + fn handle_tx_add_input(&self, their_node_id: PublicKey, msg: &TxAddInput) { + self.deref().handle_tx_add_input(their_node_id, msg) + } + fn handle_tx_add_output(&self, their_node_id: PublicKey, msg: &TxAddOutput) { + self.deref().handle_tx_add_output(their_node_id, msg) + } + fn handle_tx_remove_input(&self, their_node_id: PublicKey, msg: &TxRemoveInput) { + self.deref().handle_tx_remove_input(their_node_id, msg) + } + fn handle_tx_remove_output(&self, their_node_id: PublicKey, msg: &TxRemoveOutput) { + self.deref().handle_tx_remove_output(their_node_id, msg) + } + fn handle_tx_complete(&self, their_node_id: PublicKey, msg: &TxComplete) { + self.deref().handle_tx_complete(their_node_id, msg) + } + fn handle_tx_signatures(&self, their_node_id: PublicKey, msg: &TxSignatures) { + self.deref().handle_tx_signatures(their_node_id, msg) + } + fn handle_tx_init_rbf(&self, their_node_id: PublicKey, msg: &TxInitRbf) { + self.deref().handle_tx_init_rbf(their_node_id, msg) + } + fn handle_tx_ack_rbf(&self, their_node_id: PublicKey, msg: &TxAckRbf) { + self.deref().handle_tx_ack_rbf(their_node_id, msg) + } + fn handle_tx_abort(&self, their_node_id: PublicKey, msg: &TxAbort) { + self.deref().handle_tx_abort(their_node_id, msg) + } + fn handle_update_add_htlc(&self, their_node_id: PublicKey, msg: &UpdateAddHTLC) { + self.deref().handle_update_add_htlc(their_node_id, msg) + } + fn handle_update_fulfill_htlc(&self, their_node_id: PublicKey, msg: UpdateFulfillHTLC) { + self.deref().handle_update_fulfill_htlc(their_node_id, msg) + } + fn handle_update_fail_htlc(&self, their_node_id: PublicKey, msg: &UpdateFailHTLC) { + self.deref().handle_update_fail_htlc(their_node_id, msg) + } + fn handle_update_fail_malformed_htlc( + &self, their_node_id: PublicKey, msg: &UpdateFailMalformedHTLC, + ) { + self.deref().handle_update_fail_malformed_htlc(their_node_id, msg) + } + fn handle_commitment_signed(&self, their_node_id: PublicKey, msg: &CommitmentSigned) { + self.deref().handle_commitment_signed(their_node_id, msg) + } + fn handle_commitment_signed_batch( + &self, their_node_id: PublicKey, channel_id: ChannelId, batch: Vec, + ) { + self.deref().handle_commitment_signed_batch(their_node_id, channel_id, batch) + } + fn handle_revoke_and_ack(&self, their_node_id: PublicKey, msg: &RevokeAndACK) { + self.deref().handle_revoke_and_ack(their_node_id, msg) + } + fn handle_update_fee(&self, their_node_id: PublicKey, msg: &UpdateFee) { + self.deref().handle_update_fee(their_node_id, msg) + } + fn handle_announcement_signatures( + &self, their_node_id: PublicKey, msg: &AnnouncementSignatures, + ) { + self.deref().handle_announcement_signatures(their_node_id, msg) + } + fn handle_channel_reestablish(&self, their_node_id: PublicKey, msg: &ChannelReestablish) { + self.deref().handle_channel_reestablish(their_node_id, msg) + } + fn handle_channel_update(&self, their_node_id: PublicKey, msg: &ChannelUpdate) { + self.deref().handle_channel_update(their_node_id, msg) + } + fn handle_error(&self, their_node_id: PublicKey, msg: &ErrorMessage) { + self.deref().handle_error(their_node_id, msg) + } + fn get_chain_hashes(&self) -> Option> { + self.deref().get_chain_hashes() + } + fn message_received(&self) { + self.deref().message_received() + } +} + /// A trait to describe an object which can receive routing messages. /// /// # Implementor DoS Warnings @@ -2288,6 +2440,57 @@ pub trait RoutingMessageHandler: BaseMessageHandler { fn processing_queue_high(&self) -> bool; } +impl> RoutingMessageHandler for R { + fn handle_node_announcement( + &self, their_node_id: Option, msg: &NodeAnnouncement, + ) -> Result { + self.deref().handle_node_announcement(their_node_id, msg) + } + fn handle_channel_announcement( + &self, their_node_id: Option, msg: &ChannelAnnouncement, + ) -> Result { + self.deref().handle_channel_announcement(their_node_id, msg) + } + fn handle_channel_update( + &self, their_node_id: Option, msg: &ChannelUpdate, + ) -> Result, LightningError> { + self.deref().handle_channel_update(their_node_id, msg) + } + fn get_next_channel_announcement( + &self, starting_point: u64, + ) -> Option<(ChannelAnnouncement, Option, Option)> { + self.deref().get_next_channel_announcement(starting_point) + } + fn get_next_node_announcement( + &self, starting_point: Option<&NodeId>, + ) -> Option { + self.deref().get_next_node_announcement(starting_point) + } + fn handle_reply_channel_range( + &self, their_node_id: PublicKey, msg: ReplyChannelRange, + ) -> Result<(), LightningError> { + self.deref().handle_reply_channel_range(their_node_id, msg) + } + fn handle_reply_short_channel_ids_end( + &self, their_node_id: PublicKey, msg: ReplyShortChannelIdsEnd, + ) -> Result<(), LightningError> { + self.deref().handle_reply_short_channel_ids_end(their_node_id, msg) + } + fn handle_query_channel_range( + &self, their_node_id: PublicKey, msg: QueryChannelRange, + ) -> Result<(), LightningError> { + self.deref().handle_query_channel_range(their_node_id, msg) + } + fn handle_query_short_channel_ids( + &self, their_node_id: PublicKey, msg: QueryShortChannelIds, + ) -> Result<(), LightningError> { + self.deref().handle_query_short_channel_ids(their_node_id, msg) + } + fn processing_queue_high(&self) -> bool { + self.deref().processing_queue_high() + } +} + /// A handler for received [`OnionMessage`]s and for providing generated ones to send. pub trait OnionMessageHandler: BaseMessageHandler { /// Handle an incoming `onion_message` message from the given peer. @@ -2304,6 +2507,18 @@ pub trait OnionMessageHandler: BaseMessageHandler { fn timer_tick_occurred(&self); } +impl> OnionMessageHandler for O { + fn handle_onion_message(&self, peer_node_id: PublicKey, msg: &OnionMessage) { + self.deref().handle_onion_message(peer_node_id, msg) + } + fn next_onion_message_for_peer(&self, peer_node_id: PublicKey) -> Option { + self.deref().next_onion_message_for_peer(peer_node_id) + } + fn timer_tick_occurred(&self) { + self.deref().timer_tick_occurred() + } +} + /// A handler which can only be used to send messages. /// /// This is implemented by [`ChainMonitor`]. @@ -2311,6 +2526,8 @@ pub trait OnionMessageHandler: BaseMessageHandler { /// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor pub trait SendOnlyMessageHandler: BaseMessageHandler {} +impl> SendOnlyMessageHandler for S {} + #[derive(Clone, Debug, PartialEq, Eq)] /// Information communicated in the onion to the recipient for multi-part tracking and proof that /// the payment is associated with an invoice. diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index 1891c52928d..759a1e7d887 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -53,7 +53,6 @@ use crate::util::ser::{VecWriter, Writeable, Writer}; #[allow(unused_imports)] use crate::prelude::*; -use super::wire::CustomMessageReader; use crate::io; use crate::sync::{FairRwLock, Mutex, MutexGuard}; use core::convert::Infallible; @@ -126,6 +125,31 @@ pub trait CustomMessageHandler: wire::CustomMessageReader { fn provided_init_features(&self, their_node_id: PublicKey) -> InitFeatures; } +impl> CustomMessageHandler for C { + fn handle_custom_message( + &self, msg: Self::CustomMessage, sender_node_id: PublicKey, + ) -> Result<(), LightningError> { + self.deref().handle_custom_message(msg, sender_node_id) + } + fn get_and_clear_pending_msg(&self) -> Vec<(PublicKey, Self::CustomMessage)> { + self.deref().get_and_clear_pending_msg() + } + fn peer_disconnected(&self, their_node_id: PublicKey) { + self.deref().peer_disconnected(their_node_id) + } + fn peer_connected( + &self, their_node_id: PublicKey, msg: &Init, inbound: bool, + ) -> Result<(), ()> { + self.deref().peer_connected(their_node_id, msg, inbound) + } + fn provided_node_features(&self) -> NodeFeatures { + self.deref().provided_node_features() + } + fn provided_init_features(&self, their_node_id: PublicKey) -> InitFeatures { + self.deref().provided_init_features(their_node_id) + } +} + /// A dummy struct which implements `RoutingMessageHandler` without storing any routing information /// or doing any processing. You can provide one of these as the route_handler in a MessageHandler. pub struct IgnoringMessageHandler {} @@ -288,13 +312,6 @@ impl OnionMessageContents for Infallible { } } -impl Deref for IgnoringMessageHandler { - type Target = IgnoringMessageHandler; - fn deref(&self) -> &Self { - self - } -} - // Implement Type for Infallible, note that it cannot be constructed, and thus you can never call a // method that takes self for it. impl wire::Type for Infallible { @@ -568,22 +585,14 @@ impl ChannelMessageHandler for ErroringMessageHandler { fn message_received(&self) {} } -impl Deref for ErroringMessageHandler { - type Target = ErroringMessageHandler; - fn deref(&self) -> &Self { - self - } -} - /// Provides references to trait impls which handle different types of messages. -pub struct MessageHandler -where - CM::Target: ChannelMessageHandler, - RM::Target: RoutingMessageHandler, - OM::Target: OnionMessageHandler, - CustomM::Target: CustomMessageHandler, - SM::Target: SendOnlyMessageHandler, -{ +pub struct MessageHandler< + CM: ChannelMessageHandler, + RM: RoutingMessageHandler, + OM: OnionMessageHandler, + CustomM: CustomMessageHandler, + SM: SendOnlyMessageHandler, +> { /// A message handler which handles messages specific to channels. Usually this is just a /// [`ChannelManager`] object or an [`ErroringMessageHandler`]. /// @@ -971,18 +980,13 @@ pub type SimpleRefPeerManager< #[allow(missing_docs)] pub trait APeerManager { type Descriptor: SocketDescriptor; - type CMT: ChannelMessageHandler + ?Sized; - type CM: Deref; - type RMT: RoutingMessageHandler + ?Sized; - type RM: Deref; - type OMT: OnionMessageHandler + ?Sized; - type OM: Deref; + type CM: ChannelMessageHandler; + type RM: RoutingMessageHandler; + type OM: OnionMessageHandler; type Logger: Logger; - type CMHT: CustomMessageHandler + ?Sized; - type CMH: Deref; + type CMH: CustomMessageHandler; type NodeSigner: NodeSigner; - type SMT: SendOnlyMessageHandler + ?Sized; - type SM: Deref; + type SM: SendOnlyMessageHandler; /// Gets a reference to the underlying [`PeerManager`]. fn as_ref( &self, @@ -1000,33 +1004,22 @@ pub trait APeerManager { impl< Descriptor: SocketDescriptor, - CM: Deref, - RM: Deref, - OM: Deref, + CM: ChannelMessageHandler, + RM: RoutingMessageHandler, + OM: OnionMessageHandler, L: Logger, - CMH: Deref, + CMH: CustomMessageHandler, NS: NodeSigner, - SM: Deref, + SM: SendOnlyMessageHandler, > APeerManager for PeerManager -where - CM::Target: ChannelMessageHandler, - RM::Target: RoutingMessageHandler, - OM::Target: OnionMessageHandler, - CMH::Target: CustomMessageHandler, - SM::Target: SendOnlyMessageHandler, { type Descriptor = Descriptor; - type CMT = ::Target; type CM = CM; - type RMT = ::Target; type RM = RM; - type OMT = ::Target; type OM = OM; type Logger = L; - type CMHT = ::Target; type CMH = CMH; type NodeSigner = NS; - type SMT = ::Target; type SM = SM; fn as_ref(&self) -> &PeerManager { self @@ -1054,20 +1047,14 @@ where /// [`read_event`]: PeerManager::read_event pub struct PeerManager< Descriptor: SocketDescriptor, - CM: Deref, - RM: Deref, - OM: Deref, + CM: ChannelMessageHandler, + RM: RoutingMessageHandler, + OM: OnionMessageHandler, L: Logger, - CMH: Deref, + CMH: CustomMessageHandler, NS: NodeSigner, - SM: Deref, -> where - CM::Target: ChannelMessageHandler, - RM::Target: RoutingMessageHandler, - OM::Target: OnionMessageHandler, - CMH::Target: CustomMessageHandler, - SM::Target: SendOnlyMessageHandler, -{ + SM: SendOnlyMessageHandler, +> { message_handler: MessageHandler, /// Connection state for each connected peer - we have an outer read-write lock which is taken /// as read while we're doing processing for a peer and taken write when a peer is being added @@ -1143,12 +1130,14 @@ fn encode_message(message: wire::Message) -> Vec { buffer.0 } -impl - PeerManager -where - CM::Target: ChannelMessageHandler, - OM::Target: OnionMessageHandler, - SM::Target: SendOnlyMessageHandler, +impl< + Descriptor: SocketDescriptor, + CM: ChannelMessageHandler, + OM: OnionMessageHandler, + L: Logger, + NS: NodeSigner, + SM: SendOnlyMessageHandler, + > PeerManager { /// Constructs a new `PeerManager` with the given `ChannelMessageHandler` and /// `OnionMessageHandler`. No routing message handler is used and network graph messages are @@ -1184,7 +1173,7 @@ where } } -impl +impl PeerManager< Descriptor, ErroringMessageHandler, @@ -1194,8 +1183,7 @@ impl IgnoringMessageHandler, NS, IgnoringMessageHandler, - > where - RM::Target: RoutingMessageHandler, + > { /// Constructs a new `PeerManager` with the given `RoutingMessageHandler`. No channel message /// handler or onion message handler is used and onion and channel messages will be ignored (or @@ -1281,20 +1269,14 @@ fn filter_addresses(ip_address: Option) -> Option impl< Descriptor: SocketDescriptor, - CM: Deref, - RM: Deref, - OM: Deref, + CM: ChannelMessageHandler, + RM: RoutingMessageHandler, + OM: OnionMessageHandler, L: Logger, - CMH: Deref, + CMH: CustomMessageHandler, NS: NodeSigner, - SM: Deref, + SM: SendOnlyMessageHandler, > PeerManager -where - CM::Target: ChannelMessageHandler, - RM::Target: RoutingMessageHandler, - OM::Target: OnionMessageHandler, - CMH::Target: CustomMessageHandler, - SM::Target: SendOnlyMessageHandler, { /// Constructs a new `PeerManager` with the given message handlers. /// @@ -1721,10 +1703,7 @@ where } /// Append a message to a peer's pending outbound/write buffer - fn enqueue_message( - &self, peer: &mut Peer, - message: Message<::CustomMessage>, - ) { + fn enqueue_message(&self, peer: &mut Peer, message: Message) { let their_node_id = peer.their_node_id.map(|p| p.0); if their_node_id.is_some() { let logger = WithContext::from(&self.logger, their_node_id, None, None); @@ -1940,7 +1919,7 @@ where let message_result = wire::read( &mut &peer.pending_read_buffer [..peer.pending_read_buffer.len() - 16], - &*self.message_handler.custom_message_handler, + &self.message_handler.custom_message_handler, ); // Reset read buffer @@ -2067,7 +2046,7 @@ where /// Returns the message back if it needs to be broadcasted to all other peers. fn handle_message( &self, peer_mutex: &Mutex, peer_lock: MutexGuard, - message: Message<<::Target as wire::CustomMessageReader>::CustomMessage>, + message: Message, ) -> Result, MessageHandlingError> { let their_node_id = peer_lock .their_node_id @@ -2107,15 +2086,9 @@ where // Returns `None` if the message was fully processed and otherwise returns the message back to // allow it to be subsequently processed by `do_handle_message_without_peer_lock`. fn do_handle_message_holding_peer_lock<'a>( - &self, mut peer_lock: MutexGuard, - message: Message<<::Target as wire::CustomMessageReader>::CustomMessage>, + &self, mut peer_lock: MutexGuard, message: Message, their_node_id: PublicKey, logger: &WithContext<'a, L>, - ) -> Result< - Option< - LogicalMessage<<::Target as wire::CustomMessageReader>::CustomMessage>, - >, - MessageHandlingError, - > { + ) -> Result>, MessageHandlingError> { peer_lock.received_message_since_timer_tick = true; // Need an Init as first message @@ -2387,8 +2360,7 @@ where // // Returns the message back if it needs to be broadcasted to all other peers. fn do_handle_message_without_peer_lock<'a>( - &self, peer_mutex: &Mutex, - message: Message<<::Target as wire::CustomMessageReader>::CustomMessage>, + &self, peer_mutex: &Mutex, message: Message, their_node_id: PublicKey, logger: &WithContext<'a, L>, ) -> Result, MessageHandlingError> { if is_gossip_msg(message.type_id()) { @@ -2657,8 +2629,7 @@ where let scid = msg.contents.short_channel_id; let node_id_1 = msg.contents.node_id_1; let node_id_2 = msg.contents.node_id_2; - let msg: Message<::CustomMessage> = - Message::ChannelAnnouncement(msg); + let msg: Message = Message::ChannelAnnouncement(msg); let encoded_msg = encode_message(msg); for (_, peer_mutex) in peers.iter() { let mut peer = peer_mutex.lock().unwrap(); @@ -2704,8 +2675,7 @@ where let our_announcement = self.our_node_id == msg.contents.node_id; let msg_node_id = msg.contents.node_id; - let msg: Message<::CustomMessage> = - Message::NodeAnnouncement(msg); + let msg: Message = Message::NodeAnnouncement(msg); let encoded_msg = encode_message(msg); for (_, peer_mutex) in peers.iter() { let mut peer = peer_mutex.lock().unwrap(); @@ -2750,8 +2720,7 @@ where ); let our_channel = self.our_node_id == node_id_1 || self.our_node_id == node_id_2; let scid = msg.contents.short_channel_id; - let msg: Message<::CustomMessage> = - Message::ChannelUpdate(msg); + let msg: Message = Message::ChannelUpdate(msg); let encoded_msg = encode_message(msg); for (_, peer_mutex) in peers.iter() { let mut peer = peer_mutex.lock().unwrap(); @@ -3285,9 +3254,8 @@ where // We do not have the peers write lock, so we just store that we're // about to disconnect the peer and do it after we finish // processing most messages. - let msg = msg.map(|msg| { - Message::<<::Target as wire::CustomMessageReader>::CustomMessage>::Error(msg) - }); + let msg = + msg.map(|msg| Message::::Error(msg)); peers_to_disconnect.insert(node_id, msg); }, msgs::ErrorAction::DisconnectPeerWithWarning { msg } => { @@ -3557,8 +3525,7 @@ where if peer.awaiting_pong_timer_tick_intervals == 0 { peer.awaiting_pong_timer_tick_intervals = -1; let ping = msgs::Ping { ponglen: 0, byteslen: 64 }; - let msg: Message<::CustomMessage> = - Message::Ping(ping); + let msg: Message = Message::Ping(ping); self.enqueue_message(peer, msg); } } diff --git a/lightning/src/ln/wire.rs b/lightning/src/ln/wire.rs index 9065c49c676..a2078ce4256 100644 --- a/lightning/src/ln/wire.rs +++ b/lightning/src/ln/wire.rs @@ -15,6 +15,7 @@ use crate::io; use crate::ln::msgs; use crate::util::ser::{LengthLimitedRead, LengthReadable, Readable, Writeable, Writer}; +use core::ops::Deref; /// Trait to be implemented by custom message (unrelated to the channel/gossip LN layers) /// decoders. @@ -30,6 +31,15 @@ pub trait CustomMessageReader { ) -> Result, msgs::DecodeError>; } +impl> CustomMessageReader for C { + type CustomMessage = T::CustomMessage; + fn read( + &self, message_type: u16, buffer: &mut R, + ) -> Result, msgs::DecodeError> { + self.deref().read(message_type, buffer) + } +} + // TestEq is a dummy trait which requires PartialEq when built in testing, and otherwise is // blanket-implemented for all types. @@ -244,23 +254,21 @@ impl Message { /// # Errors /// /// Returns an error if the message payload could not be decoded as the specified type. -pub(crate) fn read( +pub(crate) fn read>( buffer: &mut R, custom_reader: H, ) -> Result, (msgs::DecodeError, Option)> where T: core::fmt::Debug + Type + Writeable, - H::Target: CustomMessageReader, { let message_type = ::read(buffer).map_err(|e| (e, None))?; do_read(buffer, message_type, custom_reader).map_err(|e| (e, Some(message_type))) } -fn do_read( +fn do_read>( buffer: &mut R, message_type: u16, custom_reader: H, ) -> Result, msgs::DecodeError> where T: core::fmt::Debug + Type + Writeable, - H::Target: CustomMessageReader, { match message_type { msgs::Init::TYPE => { @@ -876,7 +884,7 @@ mod tests { #[test] fn read_custom_message() { let buffer = [35, 40]; - let decoded_msg = read(&mut &buffer[..], &TestCustomMessageReader {}).unwrap(); + let decoded_msg = read(&mut &buffer[..], TestCustomMessageReader {}).unwrap(); match decoded_msg { Message::Custom(custom) => { assert_eq!(custom.type_id(), CUSTOM_MESSAGE_TYPE); @@ -889,7 +897,7 @@ mod tests { #[test] fn read_with_custom_reader_unknown_message_type() { let buffer = [35, 42]; - let decoded_msg = read(&mut &buffer[..], &TestCustomMessageReader {}).unwrap(); + let decoded_msg = read(&mut &buffer[..], TestCustomMessageReader {}).unwrap(); match decoded_msg { Message::Unknown(_) => {}, _ => panic!("Expected unknown message, found message type: {}", decoded_msg.type_id()), diff --git a/lightning/src/onion_message/async_payments.rs b/lightning/src/onion_message/async_payments.rs index 127126e150f..41108cdccd7 100644 --- a/lightning/src/onion_message/async_payments.rs +++ b/lightning/src/onion_message/async_payments.rs @@ -17,6 +17,7 @@ use crate::onion_message::messenger::{MessageSendInstructions, Responder, Respon use crate::onion_message::packet::OnionMessageContents; use crate::prelude::*; use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer}; +use core::ops::Deref; // TLV record types for the `onionmsg_tlv` TLV stream as defined in BOLT 4. const OFFER_PATHS_REQ_TLV_TYPE: u64 = 75540; @@ -89,6 +90,45 @@ pub trait AsyncPaymentsMessageHandler { } } +impl> AsyncPaymentsMessageHandler + for A +{ + fn handle_offer_paths_request( + &self, message: OfferPathsRequest, context: AsyncPaymentsContext, + responder: Option, + ) -> Option<(OfferPaths, ResponseInstruction)> { + self.deref().handle_offer_paths_request(message, context, responder) + } + fn handle_offer_paths( + &self, message: OfferPaths, context: AsyncPaymentsContext, responder: Option, + ) -> Option<(ServeStaticInvoice, ResponseInstruction)> { + self.deref().handle_offer_paths(message, context, responder) + } + fn handle_serve_static_invoice( + &self, message: ServeStaticInvoice, context: AsyncPaymentsContext, + responder: Option, + ) { + self.deref().handle_serve_static_invoice(message, context, responder) + } + fn handle_static_invoice_persisted( + &self, message: StaticInvoicePersisted, context: AsyncPaymentsContext, + ) { + self.deref().handle_static_invoice_persisted(message, context) + } + fn handle_held_htlc_available( + &self, message: HeldHtlcAvailable, context: AsyncPaymentsContext, + responder: Option, + ) -> Option<(ReleaseHeldHtlc, ResponseInstruction)> { + self.deref().handle_held_htlc_available(message, context, responder) + } + fn handle_release_held_htlc(&self, message: ReleaseHeldHtlc, context: AsyncPaymentsContext) { + self.deref().handle_release_held_htlc(message, context) + } + fn release_pending_messages(&self) -> Vec<(AsyncPaymentsMessage, MessageSendInstructions)> { + self.deref().release_pending_messages() + } +} + /// Possible async payment messages sent and received via an [`OnionMessage`]. /// /// [`OnionMessage`]: crate::ln::msgs::OnionMessage diff --git a/lightning/src/onion_message/dns_resolution.rs b/lightning/src/onion_message/dns_resolution.rs index 47d4bc09e04..e857a359c78 100644 --- a/lightning/src/onion_message/dns_resolution.rs +++ b/lightning/src/onion_message/dns_resolution.rs @@ -37,6 +37,7 @@ use dnssec_prover::rr::Name; use lightning_types::features::NodeFeatures; use core::fmt; +use core::ops::Deref; use crate::blinded_path::message::DNSResolverContext; use crate::io; @@ -89,6 +90,23 @@ pub trait DNSResolverMessageHandler { } } +impl> DNSResolverMessageHandler for D { + fn handle_dnssec_query( + &self, message: DNSSECQuery, responder: Option, + ) -> Option<(DNSResolverMessage, ResponseInstruction)> { + self.deref().handle_dnssec_query(message, responder) + } + fn handle_dnssec_proof(&self, message: DNSSECProof, context: DNSResolverContext) { + self.deref().handle_dnssec_proof(message, context) + } + fn provided_node_features(&self) -> NodeFeatures { + self.deref().provided_node_features() + } + fn release_pending_messages(&self) -> Vec<(DNSResolverMessage, MessageSendInstructions)> { + self.deref().release_pending_messages() + } +} + #[derive(Clone, Debug, Hash, PartialEq, Eq)] /// An enum containing the possible onion messages which are used uses to request and receive /// DNSSEC proofs. diff --git a/lightning/src/onion_message/messenger.rs b/lightning/src/onion_message/messenger.rs index 5e2ec2d9bb9..e688c020ac6 100644 --- a/lightning/src/onion_message/messenger.rs +++ b/lightning/src/onion_message/messenger.rs @@ -76,21 +76,13 @@ pub trait AOnionMessenger { /// A type implementing [`MessageRouter`] type MessageRouter: MessageRouter; /// A type implementing [`OffersMessageHandler`] - type OffersMessageHandler: OffersMessageHandler + ?Sized; - /// A type that may be dereferenced to [`Self::OffersMessageHandler`] - type OMH: Deref; + type OMH: OffersMessageHandler; /// A type implementing [`AsyncPaymentsMessageHandler`] - type AsyncPaymentsMessageHandler: AsyncPaymentsMessageHandler + ?Sized; - /// A type that may be dereferenced to [`Self::AsyncPaymentsMessageHandler`] - type APH: Deref; + type APH: AsyncPaymentsMessageHandler; /// A type implementing [`DNSResolverMessageHandler`] - type DNSResolverMessageHandler: DNSResolverMessageHandler + ?Sized; - /// A type that may be dereferenced to [`Self::DNSResolverMessageHandler`] - type DRH: Deref; + type DRH: DNSResolverMessageHandler; /// A type implementing [`CustomOnionMessageHandler`] - type CustomOnionMessageHandler: CustomOnionMessageHandler + ?Sized; - /// A type that may be dereferenced to [`Self::CustomOnionMessageHandler`] - type CMH: Deref; + type CMH: CustomOnionMessageHandler; /// Returns a reference to the actual [`OnionMessenger`] object. fn get_om( &self, @@ -113,29 +105,20 @@ impl< L: Logger, NL: NodeIdLookUp, MR: MessageRouter, - OMH: Deref, - APH: Deref, - DRH: Deref, - CMH: Deref, + OMH: OffersMessageHandler, + APH: AsyncPaymentsMessageHandler, + DRH: DNSResolverMessageHandler, + CMH: CustomOnionMessageHandler, > AOnionMessenger for OnionMessenger -where - OMH::Target: OffersMessageHandler, - APH::Target: AsyncPaymentsMessageHandler, - DRH::Target: DNSResolverMessageHandler, - CMH::Target: CustomOnionMessageHandler, { type EntropySource = ES; type NodeSigner = NS; type Logger = L; type NL = NL; type MessageRouter = MR; - type OffersMessageHandler = OMH::Target; type OMH = OMH; - type AsyncPaymentsMessageHandler = APH::Target; type APH = APH; - type DNSResolverMessageHandler = DRH::Target; type DRH = DRH; - type CustomOnionMessageHandler = CMH::Target; type CMH = CMH; fn get_om(&self) -> &OnionMessenger { self @@ -269,16 +252,11 @@ pub struct OnionMessenger< L: Logger, NL: NodeIdLookUp, MR: MessageRouter, - OMH: Deref, - APH: Deref, - DRH: Deref, - CMH: Deref, -> where - OMH::Target: OffersMessageHandler, - APH::Target: AsyncPaymentsMessageHandler, - DRH::Target: DNSResolverMessageHandler, - CMH::Target: CustomOnionMessageHandler, -{ + OMH: OffersMessageHandler, + APH: AsyncPaymentsMessageHandler, + DRH: DNSResolverMessageHandler, + CMH: CustomOnionMessageHandler, +> { entropy_source: ES, #[cfg(test)] pub(super) node_signer: NS, @@ -1007,6 +985,25 @@ pub trait CustomOnionMessageHandler { ) -> Vec<(Self::CustomMessage, MessageSendInstructions)>; } +impl> CustomOnionMessageHandler for C { + type CustomMessage = T::CustomMessage; + fn handle_custom_message( + &self, message: Self::CustomMessage, context: Option>, responder: Option, + ) -> Option<(Self::CustomMessage, ResponseInstruction)> { + self.deref().handle_custom_message(message, context, responder) + } + fn read_custom_message( + &self, message_type: u64, buffer: &mut R, + ) -> Result, msgs::DecodeError> { + self.deref().read_custom_message(message_type, buffer) + } + fn release_pending_custom_messages( + &self, + ) -> Vec<(Self::CustomMessage, MessageSendInstructions)> { + self.deref().release_pending_custom_messages() + } +} + /// A processed incoming onion message, containing either a Forward (another onion message) /// or a Receive payload with decrypted contents. #[derive(Clone, Debug)] @@ -1144,13 +1141,10 @@ pub fn create_onion_message< /// /// Returns either the next layer of the onion for forwarding or the decrypted content for the /// receiver. -pub fn peel_onion_message( +pub fn peel_onion_message( msg: &OnionMessage, secp_ctx: &Secp256k1, node_signer: NS, logger: L, custom_handler: CMH, -) -> Result::Target as CustomOnionMessageHandler>::CustomMessage>, ()> -where - CMH::Target: CustomOnionMessageHandler, -{ +) -> Result, ()> { let control_tlvs_ss = match node_signer.ecdh(Recipient::Node, &msg.blinding_point, None) { Ok(ss) => ss, Err(e) => { @@ -1179,7 +1173,7 @@ where onion_decode_ss, &msg.onion_routing_packet.hop_data[..], msg.onion_routing_packet.hmac, - (control_tlvs_ss, custom_handler.deref(), receiving_context_auth_key, &logger), + (control_tlvs_ss, &custom_handler, receiving_context_auth_key, &logger), ); // Constructs the next onion message using packet data and blinding logic. @@ -1370,16 +1364,11 @@ impl< L: Logger, NL: NodeIdLookUp, MR: MessageRouter, - OMH: Deref, - APH: Deref, - DRH: Deref, - CMH: Deref, + OMH: OffersMessageHandler, + APH: AsyncPaymentsMessageHandler, + DRH: DNSResolverMessageHandler, + CMH: CustomOnionMessageHandler, > OnionMessenger -where - OMH::Target: OffersMessageHandler, - APH::Target: AsyncPaymentsMessageHandler, - DRH::Target: DNSResolverMessageHandler, - CMH::Target: CustomOnionMessageHandler, { /// Constructs a new `OnionMessenger` to send, forward, and delegate received onion messages to /// their respective handlers. @@ -1770,13 +1759,13 @@ where pub(crate) fn peel_onion_message( &self, msg: &OnionMessage, - ) -> Result::Target as CustomOnionMessageHandler>::CustomMessage>, ()> { + ) -> Result, ()> { peel_onion_message( msg, &self.secp_ctx, &self.node_signer, &self.logger, - &*self.custom_handler, + &self.custom_handler, ) } @@ -2009,16 +1998,11 @@ impl< L: Logger, NL: NodeIdLookUp, MR: MessageRouter, - OMH: Deref, - APH: Deref, - DRH: Deref, - CMH: Deref, + OMH: OffersMessageHandler, + APH: AsyncPaymentsMessageHandler, + DRH: DNSResolverMessageHandler, + CMH: CustomOnionMessageHandler, > EventsProvider for OnionMessenger -where - OMH::Target: OffersMessageHandler, - APH::Target: AsyncPaymentsMessageHandler, - DRH::Target: DNSResolverMessageHandler, - CMH::Target: CustomOnionMessageHandler, { fn process_pending_events(&self, handler: H) where @@ -2125,16 +2109,11 @@ impl< L: Logger, NL: NodeIdLookUp, MR: MessageRouter, - OMH: Deref, - APH: Deref, - DRH: Deref, - CMH: Deref, + OMH: OffersMessageHandler, + APH: AsyncPaymentsMessageHandler, + DRH: DNSResolverMessageHandler, + CMH: CustomOnionMessageHandler, > BaseMessageHandler for OnionMessenger -where - OMH::Target: OffersMessageHandler, - APH::Target: AsyncPaymentsMessageHandler, - DRH::Target: DNSResolverMessageHandler, - CMH::Target: CustomOnionMessageHandler, { fn provided_node_features(&self) -> NodeFeatures { let mut features = NodeFeatures::empty(); @@ -2192,16 +2171,11 @@ impl< L: Logger, NL: NodeIdLookUp, MR: MessageRouter, - OMH: Deref, - APH: Deref, - DRH: Deref, - CMH: Deref, + OMH: OffersMessageHandler, + APH: AsyncPaymentsMessageHandler, + DRH: DNSResolverMessageHandler, + CMH: CustomOnionMessageHandler, > OnionMessageHandler for OnionMessenger -where - OMH::Target: OffersMessageHandler, - APH::Target: AsyncPaymentsMessageHandler, - DRH::Target: DNSResolverMessageHandler, - CMH::Target: CustomOnionMessageHandler, { fn handle_onion_message(&self, peer_node_id: PublicKey, msg: &OnionMessage) { let logger = WithContext::from(&self.logger, Some(peer_node_id), None, None); diff --git a/lightning/src/onion_message/offers.rs b/lightning/src/onion_message/offers.rs index 06988d4db8f..8e3afdfa977 100644 --- a/lightning/src/onion_message/offers.rs +++ b/lightning/src/onion_message/offers.rs @@ -22,6 +22,7 @@ use crate::onion_message::packet::OnionMessageContents; use crate::util::logger::Logger; use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer}; use core::fmt; +use core::ops::Deref; use crate::prelude::*; @@ -63,6 +64,17 @@ pub trait OffersMessageHandler { } } +impl> OffersMessageHandler for O { + fn handle_message( + &self, message: OffersMessage, context: Option, responder: Option, + ) -> Option<(OffersMessage, ResponseInstruction)> { + self.deref().handle_message(message, context, responder) + } + fn release_pending_messages(&self) -> Vec<(OffersMessage, MessageSendInstructions)> { + self.deref().release_pending_messages() + } +} + /// Possible BOLT 12 Offers messages sent and received via an [`OnionMessage`]. /// /// [`OnionMessage`]: crate::ln::msgs::OnionMessage From f60a33f3cd5ab93c4eb223aa6133eb96947baf53 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Fri, 16 Jan 2026 20:33:09 -0500 Subject: [PATCH 151/242] Drop Deref indirection for chain::Filter Reduces generics and verbosity across the codebase, should provide equivalent behavior. Co-Authored-By: Claude Opus 4.5 --- lightning-background-processor/src/lib.rs | 11 +--- lightning-liquidity/src/lsps1/service.rs | 9 +-- lightning-liquidity/src/manager.rs | 58 ++++++------------- lightning/src/chain/chainmonitor.rs | 29 ++++------ lightning/src/chain/channelmonitor.rs | 4 +- lightning/src/chain/mod.rs | 10 ++++ lightning/src/util/anchor_channel_reserves.rs | 5 +- lightning/src/util/sweep.rs | 30 ++++------ 8 files changed, 57 insertions(+), 99 deletions(-) diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index 4c41a2c67be..905782cf2d5 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -467,7 +467,6 @@ pub const NO_LIQUIDITY_MANAGER: Option< NodeSigner = &(dyn lightning::sign::NodeSigner + Send + Sync), AChannelManager = DynChannelManager, CM = &DynChannelManager, - Filter = dyn chain::Filter + Send + Sync, C = &(dyn chain::Filter + Send + Sync), K = &DummyKVStore, TimeProvider = dyn lightning_liquidity::utils::time::TimeProvider + Send + Sync, @@ -490,7 +489,6 @@ pub const NO_LIQUIDITY_MANAGER_SYNC: Option< NodeSigner = &(dyn lightning::sign::NodeSigner + Send + Sync), AChannelManager = DynChannelManager, CM = &DynChannelManager, - Filter = dyn chain::Filter + Send + Sync, C = &(dyn chain::Filter + Send + Sync), KVStoreSync = dyn lightning::util::persist::KVStoreSync + Send + Sync, KS = &(dyn lightning::util::persist::KVStoreSync + Send + Sync), @@ -931,7 +929,7 @@ use futures_util::{dummy_waker, Joiner, OptionalSelector, Selector, SelectorOutp pub async fn process_events_async< 'a, UL: Deref, - CF: Deref, + CF: chain::Filter, T: BroadcasterInterface, F: FeeEstimator, G: Deref>, @@ -964,7 +962,6 @@ pub async fn process_events_async< ) -> Result<(), lightning::io::Error> where UL::Target: UtxoLookup, - CF::Target: chain::Filter, P::Target: Persist<::Signer>, CM::Target: AChannelManager, OM::Target: AOnionMessenger, @@ -1427,7 +1424,7 @@ fn check_and_reset_sleeper< /// synchronous background persistence. pub async fn process_events_async_with_kv_store_sync< UL: Deref, - CF: Deref, + CF: chain::Filter, T: BroadcasterInterface, F: FeeEstimator, G: Deref>, @@ -1460,7 +1457,6 @@ pub async fn process_events_async_with_kv_store_sync< ) -> Result<(), lightning::io::Error> where UL::Target: UtxoLookup, - CF::Target: chain::Filter, P::Target: Persist<::Signer>, CM::Target: AChannelManager, OM::Target: AOnionMessenger, @@ -1537,7 +1533,7 @@ impl BackgroundProcessor { pub fn start< 'a, UL: 'static + Deref, - CF: 'static + Deref, + CF: 'static + chain::Filter, T: 'static + BroadcasterInterface, F: 'static + FeeEstimator + Send, G: 'static + Deref>, @@ -1570,7 +1566,6 @@ impl BackgroundProcessor { ) -> Self where UL::Target: 'static + UtxoLookup, - CF::Target: 'static + chain::Filter, L::Target: 'static + Logger, P::Target: 'static + Persist<::Signer>, CM::Target: AChannelManager, diff --git a/lightning-liquidity/src/lsps1/service.rs b/lightning-liquidity/src/lsps1/service.rs index 154c6f5d527..d7010652c37 100644 --- a/lightning-liquidity/src/lsps1/service.rs +++ b/lightning-liquidity/src/lsps1/service.rs @@ -132,10 +132,9 @@ impl PeerState { } /// The main object allowing to send and receive bLIP-51 / LSPS1 messages. -pub struct LSPS1ServiceHandler +pub struct LSPS1ServiceHandler where CM::Target: AChannelManager, - C::Target: Filter, { entropy_source: ES, channel_manager: CM, @@ -146,11 +145,10 @@ where config: LSPS1ServiceConfig, } -impl +impl LSPS1ServiceHandler where CM::Target: AChannelManager, - C::Target: Filter, { /// Constructs a `LSPS1ServiceHandler`. pub(crate) fn new( @@ -417,11 +415,10 @@ where } } -impl LSPSProtocolMessageHandler +impl LSPSProtocolMessageHandler for LSPS1ServiceHandler where CM::Target: AChannelManager, - C::Target: Filter, { type ProtocolMessage = LSPS1Message; const PROTOCOL_NUMBER: Option = Some(1); diff --git a/lightning-liquidity/src/manager.rs b/lightning-liquidity/src/manager.rs index c3e9fa48cca..1f11fc8add7 100644 --- a/lightning-liquidity/src/manager.rs +++ b/lightning-liquidity/src/manager.rs @@ -112,9 +112,7 @@ pub trait ALiquidityManager { /// A type that may be dereferenced to [`Self::AChannelManager`]. type CM: Deref + Clone; /// A type implementing [`Filter`]. - type Filter: Filter + ?Sized; - /// A type that may be dereferenced to [`Self::Filter`]. - type C: Deref + Clone; + type C: Filter + Clone; /// A type implementing [`KVStore`]. type K: KVStore + Clone; /// A type implementing [`TimeProvider`]. @@ -141,21 +139,19 @@ impl< ES: EntropySource + Clone, NS: NodeSigner + Clone, CM: Deref + Clone, - C: Deref + Clone, + C: Filter + Clone, K: KVStore + Clone, TP: Deref + Clone, T: BroadcasterInterface + Clone, > ALiquidityManager for LiquidityManager where CM::Target: AChannelManager, - C::Target: Filter, TP::Target: TimeProvider, { type EntropySource = ES; type NodeSigner = NS; type AChannelManager = CM::Target; type CM = CM; - type Filter = C::Target; type C = C; type K = K; type TimeProvider = TP::Target; @@ -180,9 +176,7 @@ pub trait ALiquidityManagerSync { /// A type that may be dereferenced to [`Self::AChannelManager`]. type CM: Deref + Clone; /// A type implementing [`Filter`]. - type Filter: Filter + ?Sized; - /// A type that may be dereferenced to [`Self::Filter`]. - type C: Deref + Clone; + type C: Filter + Clone; /// A type implementing [`KVStoreSync`]. type KVStoreSync: KVStoreSync + ?Sized; /// A type that may be dereferenced to [`Self::KVStoreSync`]. @@ -224,14 +218,13 @@ impl< ES: EntropySource + Clone, NS: NodeSigner + Clone, CM: Deref + Clone, - C: Deref + Clone, + C: Filter + Clone, KS: Deref + Clone, TP: Deref + Clone, T: BroadcasterInterface + Clone, > ALiquidityManagerSync for LiquidityManagerSync where CM::Target: AChannelManager, - C::Target: Filter, KS::Target: KVStoreSync, TP::Target: TimeProvider, { @@ -239,7 +232,6 @@ where type NodeSigner = NS; type AChannelManager = CM::Target; type CM = CM; - type Filter = C::Target; type C = C; type KVStoreSync = KS::Target; type KS = KS; @@ -289,13 +281,12 @@ pub struct LiquidityManager< ES: EntropySource + Clone, NS: NodeSigner + Clone, CM: Deref + Clone, - C: Deref + Clone, + C: Filter + Clone, K: KVStore + Clone, TP: Deref + Clone, T: BroadcasterInterface + Clone, > where CM::Target: AChannelManager, - C::Target: Filter, TP::Target: TimeProvider, { pending_messages: Arc, @@ -324,13 +315,12 @@ impl< ES: EntropySource + Clone, NS: NodeSigner + Clone, CM: Deref + Clone, - C: Deref + Clone, + C: Filter + Clone, K: KVStore + Clone, T: BroadcasterInterface + Clone, > LiquidityManager where CM::Target: AChannelManager, - C::Target: Filter, { /// Constructor for the [`LiquidityManager`] using the default system clock /// @@ -361,14 +351,13 @@ impl< ES: EntropySource + Clone, NS: NodeSigner + Clone, CM: Deref + Clone, - C: Deref + Clone, + C: Filter + Clone, K: KVStore + Clone, TP: Deref + Clone, T: BroadcasterInterface + Clone, > LiquidityManager where CM::Target: AChannelManager, - C::Target: Filter, TP::Target: TimeProvider, { /// Constructor for the [`LiquidityManager`] with a custom time provider. @@ -784,14 +773,13 @@ impl< ES: EntropySource + Clone, NS: NodeSigner + Clone, CM: Deref + Clone, - C: Deref + Clone, + C: Filter + Clone, K: KVStore + Clone, TP: Deref + Clone, T: BroadcasterInterface + Clone, > CustomMessageReader for LiquidityManager where CM::Target: AChannelManager, - C::Target: Filter, TP::Target: TimeProvider, { type CustomMessage = RawLSPSMessage; @@ -812,14 +800,13 @@ impl< ES: EntropySource + Clone, NS: NodeSigner + Clone, CM: Deref + Clone, - C: Deref + Clone, + C: Filter + Clone, K: KVStore + Clone, TP: Deref + Clone, T: BroadcasterInterface + Clone, > CustomMessageHandler for LiquidityManager where CM::Target: AChannelManager, - C::Target: Filter, TP::Target: TimeProvider, { fn handle_custom_message( @@ -942,14 +929,13 @@ impl< ES: EntropySource + Clone, NS: NodeSigner + Clone, CM: Deref + Clone, - C: Deref + Clone, + C: Filter + Clone, K: KVStore + Clone, TP: Deref + Clone, T: BroadcasterInterface + Clone, > Listen for LiquidityManager where CM::Target: AChannelManager, - C::Target: Filter, TP::Target: TimeProvider, { fn filtered_block_connected( @@ -984,14 +970,13 @@ impl< ES: EntropySource + Clone, NS: NodeSigner + Clone, CM: Deref + Clone, - C: Deref + Clone, + C: Filter + Clone, K: KVStore + Clone, TP: Deref + Clone, T: BroadcasterInterface + Clone, > Confirm for LiquidityManager where CM::Target: AChannelManager, - C::Target: Filter, TP::Target: TimeProvider, { fn transactions_confirmed( @@ -1026,13 +1011,12 @@ pub struct LiquidityManagerSync< ES: EntropySource + Clone, NS: NodeSigner + Clone, CM: Deref + Clone, - C: Deref + Clone, + C: Filter + Clone, KS: Deref + Clone, TP: Deref + Clone, T: BroadcasterInterface + Clone, > where CM::Target: AChannelManager, - C::Target: Filter, KS::Target: KVStoreSync, TP::Target: TimeProvider, { @@ -1044,14 +1028,13 @@ impl< ES: EntropySource + Clone, NS: NodeSigner + Clone, CM: Deref + Clone, - C: Deref + Clone, + C: Filter + Clone, KS: Deref + Clone, T: BroadcasterInterface + Clone, > LiquidityManagerSync where CM::Target: AChannelManager, KS::Target: KVStoreSync, - C::Target: Filter, { /// Constructor for the [`LiquidityManagerSync`] using the default system clock /// @@ -1093,14 +1076,13 @@ impl< ES: EntropySource + Clone, NS: NodeSigner + Clone, CM: Deref + Clone, - C: Deref + Clone, + C: Filter + Clone, KS: Deref + Clone, TP: Deref + Clone, T: BroadcasterInterface + Clone, > LiquidityManagerSync where CM::Target: AChannelManager, - C::Target: Filter, KS::Target: KVStoreSync, TP::Target: TimeProvider, { @@ -1260,14 +1242,13 @@ impl< ES: EntropySource + Clone, NS: NodeSigner + Clone, CM: Deref + Clone, - C: Deref + Clone, + C: Filter + Clone, KS: Deref + Clone, TP: Deref + Clone, T: BroadcasterInterface + Clone, > CustomMessageReader for LiquidityManagerSync where CM::Target: AChannelManager, - C::Target: Filter, KS::Target: KVStoreSync, TP::Target: TimeProvider, { @@ -1284,14 +1265,13 @@ impl< ES: EntropySource + Clone, NS: NodeSigner + Clone, CM: Deref + Clone, - C: Deref + Clone, + C: Filter + Clone, KS: Deref + Clone, TP: Deref + Clone, T: BroadcasterInterface + Clone, > CustomMessageHandler for LiquidityManagerSync where CM::Target: AChannelManager, - C::Target: Filter, KS::Target: KVStoreSync, TP::Target: TimeProvider, { @@ -1328,14 +1308,13 @@ impl< ES: EntropySource + Clone, NS: NodeSigner + Clone, CM: Deref + Clone, - C: Deref + Clone, + C: Filter + Clone, KS: Deref + Clone, TP: Deref + Clone, T: BroadcasterInterface + Clone, > Listen for LiquidityManagerSync where CM::Target: AChannelManager, - C::Target: Filter, KS::Target: KVStoreSync, TP::Target: TimeProvider, { @@ -1355,14 +1334,13 @@ impl< ES: EntropySource + Clone, NS: NodeSigner + Clone, CM: Deref + Clone, - C: Deref + Clone, + C: Filter + Clone, KS: Deref + Clone, TP: Deref + Clone, T: BroadcasterInterface + Clone, > Confirm for LiquidityManagerSync where CM::Target: AChannelManager, - C::Target: Filter, KS::Target: KVStoreSync, TP::Target: TimeProvider, { diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index 536a1f942b0..2db34738737 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -37,7 +37,7 @@ use crate::chain::channelmonitor::{ WithChannelMonitor, }; use crate::chain::transaction::{OutPoint, TransactionData}; -use crate::chain::{BestBlock, ChannelMonitorUpdateStatus, Filter, WatchedOutput}; +use crate::chain::{BestBlock, ChannelMonitorUpdateStatus, WatchedOutput}; use crate::events::{self, Event, EventHandler, ReplayEvent}; use crate::ln::channel_state::ChannelDetails; #[cfg(peer_storage)] @@ -340,14 +340,13 @@ where /// [`rebroadcast_pending_claims`]: Self::rebroadcast_pending_claims pub struct ChainMonitor< ChannelSigner: EcdsaChannelSigner, - C: Deref, + C: chain::Filter, T: BroadcasterInterface, F: FeeEstimator, L: Logger, P: Deref, ES: EntropySource, > where - C::Target: chain::Filter, P::Target: Persist, { monitors: RwLock>>, @@ -378,14 +377,13 @@ impl< K: KVStore + MaybeSend + MaybeSync + 'static, S: FutureSpawner, SP: SignerProvider + MaybeSend + MaybeSync + 'static, - C: Deref, + C: chain::Filter, T: BroadcasterInterface + MaybeSend + MaybeSync + 'static, F: FeeEstimator + MaybeSend + MaybeSync + 'static, L: Logger + MaybeSend + MaybeSync + 'static, ES: EntropySource + MaybeSend + MaybeSync + 'static, > ChainMonitor, ES> where - C::Target: chain::Filter, SP::EcdsaSigner: MaybeSend + 'static, { /// Creates a new `ChainMonitor` used to watch on-chain activity pertaining to channels. @@ -422,7 +420,7 @@ where impl< ChannelSigner: EcdsaChannelSigner, - C: Deref, + C: chain::Filter, T: BroadcasterInterface, F: FeeEstimator, L: Logger, @@ -430,7 +428,6 @@ impl< ES: EntropySource, > ChainMonitor where - C::Target: chain::Filter, P::Target: Persist, { /// Dispatches to per-channel monitors, which are responsible for updating their on-chain view @@ -1065,7 +1062,7 @@ where impl< ChannelSigner: EcdsaChannelSigner, - C: Deref, + C: chain::Filter, T: BroadcasterInterface, F: FeeEstimator, L: Logger, @@ -1073,7 +1070,6 @@ impl< ES: EntropySource, > BaseMessageHandler for ChainMonitor where - C::Target: chain::Filter, P::Target: Persist, { fn get_and_clear_pending_msg_events(&self) -> Vec { @@ -1100,7 +1096,7 @@ where impl< ChannelSigner: EcdsaChannelSigner, - C: Deref, + C: chain::Filter, T: BroadcasterInterface, F: FeeEstimator, L: Logger, @@ -1108,14 +1104,13 @@ impl< ES: EntropySource, > SendOnlyMessageHandler for ChainMonitor where - C::Target: chain::Filter, P::Target: Persist, { } impl< ChannelSigner: EcdsaChannelSigner, - C: Deref, + C: chain::Filter, T: BroadcasterInterface, F: FeeEstimator, L: Logger, @@ -1123,7 +1118,6 @@ impl< ES: EntropySource, > chain::Listen for ChainMonitor where - C::Target: chain::Filter, P::Target: Persist, { fn filtered_block_connected(&self, header: &Header, txdata: &TransactionData, height: u32) { @@ -1175,7 +1169,7 @@ where impl< ChannelSigner: EcdsaChannelSigner, - C: Deref, + C: chain::Filter, T: BroadcasterInterface, F: FeeEstimator, L: Logger, @@ -1183,7 +1177,6 @@ impl< ES: EntropySource, > chain::Confirm for ChainMonitor where - C::Target: chain::Filter, P::Target: Persist, { fn transactions_confirmed(&self, header: &Header, txdata: &TransactionData, height: u32) { @@ -1266,7 +1259,7 @@ where impl< ChannelSigner: EcdsaChannelSigner, - C: Deref, + C: chain::Filter, T: BroadcasterInterface, F: FeeEstimator, L: Logger, @@ -1274,7 +1267,6 @@ impl< ES: EntropySource, > chain::Watch for ChainMonitor where - C::Target: chain::Filter, P::Target: Persist, { fn watch_channel( @@ -1458,7 +1450,7 @@ where impl< ChannelSigner: EcdsaChannelSigner, - C: Deref, + C: chain::Filter, T: BroadcasterInterface, F: FeeEstimator, L: Logger, @@ -1466,7 +1458,6 @@ impl< ES: EntropySource, > events::EventsProvider for ChainMonitor where - C::Target: chain::Filter, P::Target: Persist, { /// Processes [`SpendableOutputs`] events produced from each [`ChannelMonitor`] upon maturity. diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 015cae73282..80d0ef125fc 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -42,7 +42,6 @@ use crate::chain::package::{ HolderHTLCOutput, PackageSolvingData, PackageTemplate, RevokedHTLCOutput, RevokedOutput, }; use crate::chain::transaction::{OutPoint, TransactionData}; -use crate::chain::Filter; use crate::chain::{BestBlock, WatchedOutput}; use crate::events::bump_transaction::{AnchorDescriptor, BumpTransactionEvent}; use crate::events::{ClosureReason, Event, EventHandler, ReplayEvent}; @@ -2125,8 +2124,7 @@ impl ChannelMonitor { /// calling `chain::Filter::register_output` and `chain::Filter::register_tx` until all outputs /// have been registered. #[rustfmt::skip] - pub fn load_outputs_to_watch(&self, filter: &F, logger: &L) - where F::Target: chain::Filter { + pub fn load_outputs_to_watch(&self, filter: &F, logger: &L) { let lock = self.inner.lock().unwrap(); let logger = WithChannelMonitor::from_impl(logger, &*lock, None); for funding in core::iter::once(&lock.funding).chain(&lock.pending_funding) { diff --git a/lightning/src/chain/mod.rs b/lightning/src/chain/mod.rs index 9f5c9653f65..e8baa7aad1f 100644 --- a/lightning/src/chain/mod.rs +++ b/lightning/src/chain/mod.rs @@ -412,6 +412,16 @@ pub trait Filter { fn register_output(&self, output: WatchedOutput); } +impl> Filter for F { + fn register_tx(&self, txid: &Txid, script_pubkey: &Script) { + self.deref().register_tx(txid, script_pubkey) + } + + fn register_output(&self, output: WatchedOutput) { + self.deref().register_output(output) + } +} + /// A transaction output watched by a [`ChannelMonitor`] for spends on-chain. /// /// Used to convey to a [`Filter`] such an output with a given spending condition. Any transaction diff --git a/lightning/src/util/anchor_channel_reserves.rs b/lightning/src/util/anchor_channel_reserves.rs index 3e9945f7b87..8026af03d58 100644 --- a/lightning/src/util/anchor_channel_reserves.rs +++ b/lightning/src/util/anchor_channel_reserves.rs @@ -272,20 +272,19 @@ pub fn get_supportable_anchor_channels( pub fn can_support_additional_anchor_channel< AChannelManagerRef: Deref, ChannelSigner: EcdsaChannelSigner, - FilterRef: Deref, + FI: Filter, B: BroadcasterInterface, FE: FeeEstimator, L: Logger, PersistRef: Deref, ES: EntropySource, - ChainMonitorRef: Deref>, + ChainMonitorRef: Deref>, >( context: &AnchorChannelReserveContext, utxos: &[Utxo], a_channel_manager: AChannelManagerRef, chain_monitor: ChainMonitorRef, ) -> bool where AChannelManagerRef::Target: AChannelManager, - FilterRef::Target: Filter, PersistRef::Target: Persist, { let mut anchor_channels = new_hash_set(); diff --git a/lightning/src/util/sweep.rs b/lightning/src/util/sweep.rs index 2d22244cb17..e69b3a9f3a8 100644 --- a/lightning/src/util/sweep.rs +++ b/lightning/src/util/sweep.rs @@ -341,13 +341,12 @@ pub struct OutputSweeper< B: BroadcasterInterface, D: Deref, E: FeeEstimator, - F: Deref, + F: Filter, K: KVStore, L: Logger, O: Deref, > where D::Target: ChangeDestinationSource, - F::Target: Filter, O::Target: OutputSpender, { sweeper_state: Mutex, @@ -365,14 +364,13 @@ impl< B: BroadcasterInterface, D: Deref, E: FeeEstimator, - F: Deref, + F: Filter, K: KVStore, L: Logger, O: Deref, > OutputSweeper where D::Target: ChangeDestinationSource, - F::Target: Filter, O::Target: OutputSpender, { /// Constructs a new [`OutputSweeper`]. @@ -720,14 +718,13 @@ impl< B: BroadcasterInterface, D: Deref, E: FeeEstimator, - F: Deref, + F: Filter + Sync + Send, K: KVStore, L: Logger, O: Deref, > Listen for OutputSweeper where D::Target: ChangeDestinationSource, - F::Target: Filter + Sync + Send, O::Target: OutputSpender, { fn filtered_block_connected( @@ -764,14 +761,13 @@ impl< B: BroadcasterInterface, D: Deref, E: FeeEstimator, - F: Deref, + F: Filter + Sync + Send, K: KVStore, L: Logger, O: Deref, > Confirm for OutputSweeper where D::Target: ChangeDestinationSource, - F::Target: Filter + Sync + Send, O::Target: OutputSpender, { fn transactions_confirmed( @@ -864,14 +860,13 @@ impl< B: BroadcasterInterface, D: Deref, E: FeeEstimator, - F: Deref, + F: Filter + Sync + Send, K: KVStore, L: Logger, O: Deref, > ReadableArgs<(B, E, Option, O, D, K, L)> for (BestBlock, OutputSweeper) where D::Target: ChangeDestinationSource, - F::Target: Filter + Sync + Send, O::Target: OutputSpender, { #[inline] @@ -937,13 +932,12 @@ pub struct OutputSweeperSync< B: BroadcasterInterface, D: Deref, E: FeeEstimator, - F: Deref, + F: Filter, K: Deref, L: Logger, O: Deref, > where D::Target: ChangeDestinationSourceSync, - F::Target: Filter, K::Target: KVStoreSync, O::Target: OutputSpender, { @@ -955,14 +949,13 @@ impl< B: BroadcasterInterface, D: Deref, E: FeeEstimator, - F: Deref, + F: Filter, K: Deref, L: Logger, O: Deref, > OutputSweeperSync where D::Target: ChangeDestinationSourceSync, - F::Target: Filter, K::Target: KVStoreSync, O::Target: OutputSpender, { @@ -1079,14 +1072,13 @@ impl< B: BroadcasterInterface, D: Deref, E: FeeEstimator, - F: Deref, + F: Filter + Sync + Send, K: Deref, L: Logger, O: Deref, > Listen for OutputSweeperSync where D::Target: ChangeDestinationSourceSync, - F::Target: Filter + Sync + Send, K::Target: KVStoreSync, O::Target: OutputSpender, { @@ -1105,14 +1097,13 @@ impl< B: BroadcasterInterface, D: Deref, E: FeeEstimator, - F: Deref, + F: Filter + Sync + Send, K: Deref, L: Logger, O: Deref, > Confirm for OutputSweeperSync where D::Target: ChangeDestinationSourceSync, - F::Target: Filter + Sync + Send, K::Target: KVStoreSync, O::Target: OutputSpender, { @@ -1139,7 +1130,7 @@ impl< B: BroadcasterInterface, D: Deref, E: FeeEstimator, - F: Deref, + F: Filter + Sync + Send, K: Deref, L: Logger, O: Deref, @@ -1147,7 +1138,6 @@ impl< for (BestBlock, OutputSweeperSync) where D::Target: ChangeDestinationSourceSync, - F::Target: Filter + Sync + Send, K::Target: KVStoreSync, O::Target: OutputSpender, { From 644ca0c6a25f332b18990db8dcae8124e31533da Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Tue, 20 Jan 2026 10:42:27 -0500 Subject: [PATCH 152/242] Drop Deref indirection for OutputSpender Reduces generics and verbosity across the codebase, should provide equivalent behavior. Co-Authored-By: Claude Opus 4.5 --- lightning-background-processor/src/lib.rs | 9 +++---- lightning/src/sign/mod.rs | 17 +++++++++++++ lightning/src/util/sweep.rs | 30 ++++++++--------------- 3 files changed, 30 insertions(+), 26 deletions(-) diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index 905782cf2d5..d765ccac77f 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -946,7 +946,7 @@ pub async fn process_events_async< PM: Deref, LM: Deref, D: Deref, - O: Deref, + O: OutputSpender, K: KVStore, OS: Deref>, S: Deref, @@ -967,7 +967,6 @@ where OM::Target: AOnionMessenger, PM::Target: APeerManager, LM::Target: ALiquidityManager, - O::Target: OutputSpender, D::Target: ChangeDestinationSource, { let async_event_handler = |event| { @@ -1441,7 +1440,7 @@ pub async fn process_events_async_with_kv_store_sync< PM: Deref, LM: Deref, D: Deref, - O: Deref, + O: OutputSpender, K: Deref, OS: Deref>, S: Deref, @@ -1462,7 +1461,6 @@ where OM::Target: AOnionMessenger, PM::Target: APeerManager, LM::Target: ALiquidityManager, - O::Target: OutputSpender, D::Target: ChangeDestinationSourceSync, K::Target: KVStoreSync, { @@ -1556,7 +1554,7 @@ impl BackgroundProcessor { S: 'static + Deref + Send + Sync, SC: for<'b> WriteableScore<'b>, D: 'static + Deref, - O: 'static + Deref, + O: 'static + OutputSpender, K: 'static + Deref + Send, OS: 'static + Deref> + Send, >( @@ -1573,7 +1571,6 @@ impl BackgroundProcessor { PM::Target: APeerManager, LM::Target: ALiquidityManagerSync, D::Target: ChangeDestinationSourceSync, - O::Target: 'static + OutputSpender, K::Target: 'static + KVStoreSync, { let stop_thread = Arc::new(AtomicBool::new(false)); diff --git a/lightning/src/sign/mod.rs b/lightning/src/sign/mod.rs index f4f4c5cd4e2..84bfbb902ea 100644 --- a/lightning/src/sign/mod.rs +++ b/lightning/src/sign/mod.rs @@ -1063,6 +1063,23 @@ pub trait OutputSpender { ) -> Result; } +impl> OutputSpender for O { + fn spend_spendable_outputs( + &self, descriptors: &[&SpendableOutputDescriptor], outputs: Vec, + change_destination_script: ScriptBuf, feerate_sat_per_1000_weight: u32, + locktime: Option, secp_ctx: &Secp256k1, + ) -> Result { + self.deref().spend_spendable_outputs( + descriptors, + outputs, + change_destination_script, + feerate_sat_per_1000_weight, + locktime, + secp_ctx, + ) + } +} + // Primarily needed in doctests because of https://github.com/rust-lang/rust/issues/67295 /// A dynamic [`SignerProvider`] temporarily needed for doc tests. /// diff --git a/lightning/src/util/sweep.rs b/lightning/src/util/sweep.rs index e69b3a9f3a8..2aef2186323 100644 --- a/lightning/src/util/sweep.rs +++ b/lightning/src/util/sweep.rs @@ -344,10 +344,9 @@ pub struct OutputSweeper< F: Filter, K: KVStore, L: Logger, - O: Deref, + O: OutputSpender, > where D::Target: ChangeDestinationSource, - O::Target: OutputSpender, { sweeper_state: Mutex, pending_sweep: AtomicBool, @@ -367,11 +366,10 @@ impl< F: Filter, K: KVStore, L: Logger, - O: Deref, + O: OutputSpender, > OutputSweeper where D::Target: ChangeDestinationSource, - O::Target: OutputSpender, { /// Constructs a new [`OutputSweeper`]. /// @@ -721,11 +719,10 @@ impl< F: Filter + Sync + Send, K: KVStore, L: Logger, - O: Deref, + O: OutputSpender, > Listen for OutputSweeper where D::Target: ChangeDestinationSource, - O::Target: OutputSpender, { fn filtered_block_connected( &self, header: &Header, txdata: &chain::transaction::TransactionData, height: u32, @@ -764,11 +761,10 @@ impl< F: Filter + Sync + Send, K: KVStore, L: Logger, - O: Deref, + O: OutputSpender, > Confirm for OutputSweeper where D::Target: ChangeDestinationSource, - O::Target: OutputSpender, { fn transactions_confirmed( &self, header: &Header, txdata: &chain::transaction::TransactionData, height: u32, @@ -863,11 +859,10 @@ impl< F: Filter + Sync + Send, K: KVStore, L: Logger, - O: Deref, + O: OutputSpender, > ReadableArgs<(B, E, Option, O, D, K, L)> for (BestBlock, OutputSweeper) where D::Target: ChangeDestinationSource, - O::Target: OutputSpender, { #[inline] fn read( @@ -935,11 +930,10 @@ pub struct OutputSweeperSync< F: Filter, K: Deref, L: Logger, - O: Deref, + O: OutputSpender, > where D::Target: ChangeDestinationSourceSync, K::Target: KVStoreSync, - O::Target: OutputSpender, { sweeper: OutputSweeper, E, F, KVStoreSyncWrapper, L, O>, @@ -952,12 +946,11 @@ impl< F: Filter, K: Deref, L: Logger, - O: Deref, + O: OutputSpender, > OutputSweeperSync where D::Target: ChangeDestinationSourceSync, K::Target: KVStoreSync, - O::Target: OutputSpender, { /// Constructs a new [`OutputSweeperSync`] instance. /// @@ -1075,12 +1068,11 @@ impl< F: Filter + Sync + Send, K: Deref, L: Logger, - O: Deref, + O: OutputSpender, > Listen for OutputSweeperSync where D::Target: ChangeDestinationSourceSync, K::Target: KVStoreSync, - O::Target: OutputSpender, { fn filtered_block_connected( &self, header: &Header, txdata: &chain::transaction::TransactionData, height: u32, @@ -1100,12 +1092,11 @@ impl< F: Filter + Sync + Send, K: Deref, L: Logger, - O: Deref, + O: OutputSpender, > Confirm for OutputSweeperSync where D::Target: ChangeDestinationSourceSync, K::Target: KVStoreSync, - O::Target: OutputSpender, { fn transactions_confirmed( &self, header: &Header, txdata: &chain::transaction::TransactionData, height: u32, @@ -1133,13 +1124,12 @@ impl< F: Filter + Sync + Send, K: Deref, L: Logger, - O: Deref, + O: OutputSpender, > ReadableArgs<(B, E, Option, O, D, K, L)> for (BestBlock, OutputSweeperSync) where D::Target: ChangeDestinationSourceSync, K::Target: KVStoreSync, - O::Target: OutputSpender, { #[inline] fn read( From 101d2062d1f9f4fa0e6c0bf044fe90b1d2050881 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Tue, 20 Jan 2026 11:22:03 -0500 Subject: [PATCH 153/242] Drop Deref indirection for UtxoLookup Reduces generics and verbosity across the codebase, should provide equivalent behavior. Co-Authored-By: Claude Opus 4.5 --- lightning-background-processor/src/lib.rs | 23 ++++-------- lightning-block-sync/src/gossip.rs | 10 ----- lightning/src/routing/gossip.rs | 46 ++++++----------------- lightning/src/routing/utxo.rs | 16 +++++--- 4 files changed, 30 insertions(+), 65 deletions(-) diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index d765ccac77f..3bd39504aab 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -200,11 +200,9 @@ pub enum GossipSync< P: Deref>, R: Deref>, G: Deref>, - U: Deref, + U: UtxoLookup, L: Logger, -> where - U::Target: UtxoLookup, -{ +> { /// Gossip sync via the lightning peer-to-peer network as defined by BOLT 7. P2P(P), /// Rapid gossip sync from a trusted server. @@ -217,11 +215,9 @@ impl< P: Deref>, R: Deref>, G: Deref>, - U: Deref, + U: UtxoLookup, L: Logger, > GossipSync -where - U::Target: UtxoLookup, { fn network_graph(&self) -> Option<&G> { match self { @@ -258,11 +254,9 @@ where impl< P: Deref>, G: Deref>, - U: Deref, + U: UtxoLookup, L: Logger, > GossipSync, G, U, L> -where - U::Target: UtxoLookup, { /// Initializes a new [`GossipSync::P2P`] variant. pub fn p2p(gossip_sync: P) -> Self { @@ -928,7 +922,7 @@ use futures_util::{dummy_waker, Joiner, OptionalSelector, Selector, SelectorOutp ///``` pub async fn process_events_async< 'a, - UL: Deref, + UL: UtxoLookup, CF: chain::Filter, T: BroadcasterInterface, F: FeeEstimator, @@ -961,7 +955,6 @@ pub async fn process_events_async< sleeper: Sleeper, mobile_interruptable_platform: bool, fetch_time: FetchTime, ) -> Result<(), lightning::io::Error> where - UL::Target: UtxoLookup, P::Target: Persist<::Signer>, CM::Target: AChannelManager, OM::Target: AOnionMessenger, @@ -1422,7 +1415,7 @@ fn check_and_reset_sleeper< /// Async events processor that is based on [`process_events_async`] but allows for [`KVStoreSync`] to be used for /// synchronous background persistence. pub async fn process_events_async_with_kv_store_sync< - UL: Deref, + UL: UtxoLookup, CF: chain::Filter, T: BroadcasterInterface, F: FeeEstimator, @@ -1455,7 +1448,6 @@ pub async fn process_events_async_with_kv_store_sync< sleeper: Sleeper, mobile_interruptable_platform: bool, fetch_time: FetchTime, ) -> Result<(), lightning::io::Error> where - UL::Target: UtxoLookup, P::Target: Persist<::Signer>, CM::Target: AChannelManager, OM::Target: AOnionMessenger, @@ -1530,7 +1522,7 @@ impl BackgroundProcessor { /// [`NetworkGraph::write`]: lightning::routing::gossip::NetworkGraph#impl-Writeable pub fn start< 'a, - UL: 'static + Deref, + UL: 'static + UtxoLookup, CF: 'static + chain::Filter, T: 'static + BroadcasterInterface, F: 'static + FeeEstimator + Send, @@ -1563,7 +1555,6 @@ impl BackgroundProcessor { liquidity_manager: Option, sweeper: Option, logger: L, scorer: Option, ) -> Self where - UL::Target: 'static + UtxoLookup, L::Target: 'static + Logger, P::Target: 'static + Persist<::Signer>, CM::Target: AChannelManager, diff --git a/lightning-block-sync/src/gossip.rs b/lightning-block-sync/src/gossip.rs index 263fa4027ff..477e2785782 100644 --- a/lightning-block-sync/src/gossip.rs +++ b/lightning-block-sync/src/gossip.rs @@ -239,16 +239,6 @@ where } } -impl Deref for GossipVerifier -where - Blocks::Target: UtxoSource, -{ - type Target = Self; - fn deref(&self) -> &Self { - self - } -} - impl UtxoLookup for GossipVerifier where Blocks::Target: UtxoSource, diff --git a/lightning/src/routing/gossip.rs b/lightning/src/routing/gossip.rs index b3059e39e18..d0b348dc50a 100644 --- a/lightning/src/routing/gossip.rs +++ b/lightning/src/routing/gossip.rs @@ -319,10 +319,7 @@ impl MaybeReadable for NetworkUpdate { /// This network graph is then used for routing payments. /// Provides interface to help with initial routing sync by /// serving historical announcements. -pub struct P2PGossipSync>, U: Deref, L: Logger> -where - U::Target: UtxoLookup, -{ +pub struct P2PGossipSync>, U: UtxoLookup, L: Logger> { network_graph: G, #[cfg(any(feature = "_test_utils", test))] pub(super) utxo_lookup: Option, @@ -333,10 +330,7 @@ where logger: L, } -impl>, U: Deref, L: Logger> P2PGossipSync -where - U::Target: UtxoLookup, -{ +impl>, U: UtxoLookup, L: Logger> P2PGossipSync { /// Creates a new tracker of the actual state of the network of channels and nodes, /// assuming an existing [`NetworkGraph`]. /// @@ -534,10 +528,8 @@ pub fn verify_channel_announcement( Ok(()) } -impl>, U: Deref, L: Logger> RoutingMessageHandler +impl>, U: UtxoLookup, L: Logger> RoutingMessageHandler for P2PGossipSync -where - U::Target: UtxoLookup, { fn handle_node_announcement( &self, _their_node_id: Option, msg: &msgs::NodeAnnouncement, @@ -761,10 +753,8 @@ where } } -impl>, U: Deref, L: Logger> BaseMessageHandler +impl>, U: UtxoLookup, L: Logger> BaseMessageHandler for P2PGossipSync -where - U::Target: UtxoLookup, { /// Initiates a stateless sync of routing gossip information with a peer /// using [`gossip_queries`]. The default strategy used by this implementation @@ -1972,12 +1962,9 @@ impl NetworkGraph { /// /// If a [`UtxoLookup`] object is provided via `utxo_lookup`, it will be called to verify /// the corresponding UTXO exists on chain and is correctly-formatted. - pub fn update_channel_from_announcement( + pub fn update_channel_from_announcement( &self, msg: &msgs::ChannelAnnouncement, utxo_lookup: &Option, - ) -> Result<(), LightningError> - where - U::Target: UtxoLookup, - { + ) -> Result<(), LightningError> { self.pre_channel_announcement_validation_check(&msg.contents, utxo_lookup)?; verify_channel_announcement(msg, &self.secp_ctx)?; self.update_channel_from_unsigned_announcement_intern(&msg.contents, Some(msg), utxo_lookup) @@ -2002,12 +1989,9 @@ impl NetworkGraph { /// /// If a [`UtxoLookup`] object is provided via `utxo_lookup`, it will be called to verify /// the corresponding UTXO exists on chain and is correctly-formatted. - pub fn update_channel_from_unsigned_announcement( + pub fn update_channel_from_unsigned_announcement( &self, msg: &msgs::UnsignedChannelAnnouncement, utxo_lookup: &Option, - ) -> Result<(), LightningError> - where - U::Target: UtxoLookup, - { + ) -> Result<(), LightningError> { self.pre_channel_announcement_validation_check(&msg, utxo_lookup)?; self.update_channel_from_unsigned_announcement_intern(msg, None, utxo_lookup) } @@ -2126,12 +2110,9 @@ impl NetworkGraph { /// /// In those cases, this will return an `Err` that we can return immediately. Otherwise it will /// return an `Ok(())`. - fn pre_channel_announcement_validation_check( + fn pre_channel_announcement_validation_check( &self, msg: &msgs::UnsignedChannelAnnouncement, utxo_lookup: &Option, - ) -> Result<(), LightningError> - where - U::Target: UtxoLookup, - { + ) -> Result<(), LightningError> { let channels = self.channels.read().unwrap(); if let Some(chan) = channels.get(&msg.short_channel_id) { @@ -2170,13 +2151,10 @@ impl NetworkGraph { /// /// Generally [`Self::pre_channel_announcement_validation_check`] should have been called /// first. - fn update_channel_from_unsigned_announcement_intern( + fn update_channel_from_unsigned_announcement_intern( &self, msg: &msgs::UnsignedChannelAnnouncement, full_msg: Option<&msgs::ChannelAnnouncement>, utxo_lookup: &Option, - ) -> Result<(), LightningError> - where - U::Target: UtxoLookup, - { + ) -> Result<(), LightningError> { if msg.node_id_1 == msg.node_id_2 || msg.bitcoin_key_1 == msg.bitcoin_key_2 { return Err(LightningError { err: "Channel announcement node had a channel with itself".to_owned(), diff --git a/lightning/src/routing/utxo.rs b/lightning/src/routing/utxo.rs index 089c536ca60..466b9416f41 100644 --- a/lightning/src/routing/utxo.rs +++ b/lightning/src/routing/utxo.rs @@ -75,6 +75,15 @@ pub trait UtxoLookup { ) -> UtxoResult; } +impl> UtxoLookup for U { + fn get_utxo( + &self, chain_hash: &ChainHash, short_channel_id: u64, + async_completion_notifier: Arc, + ) -> UtxoResult { + self.deref().get_utxo(chain_hash, short_channel_id, async_completion_notifier) + } +} + enum ChannelAnnouncement { Full(msgs::ChannelAnnouncement), Unsigned(msgs::UnsignedChannelAnnouncement), @@ -352,13 +361,10 @@ impl PendingChecks { Ok(()) } - pub(super) fn check_channel_announcement( + pub(super) fn check_channel_announcement( &self, utxo_lookup: &Option, msg: &msgs::UnsignedChannelAnnouncement, full_msg: Option<&msgs::ChannelAnnouncement>, - ) -> Result, msgs::LightningError> - where - U::Target: UtxoLookup, - { + ) -> Result, msgs::LightningError> { let handle_result = |res| match res { Ok(TxOut { value, script_pubkey }) => { let expected_script = make_funding_redeemscript_from_slices( From e9c6bbccc3ccd4cb121a092229f50e29b3345552 Mon Sep 17 00:00:00 2001 From: shaavan Date: Fri, 23 Jan 2026 18:11:54 +0530 Subject: [PATCH 154/242] Centralize custom TLV validation behind `CustomTlvs` Introduce a `CustomTlvs` wrapper to move sorting and validation of custom TLVs out of `RecipientOnionFields` and into a dedicated type. This makes TLV validity an explicit construction-time concern, allowing `RecipientOnionFields` to assume correctness and remain a simple data carrier. In turn, custom TLV usage becomes easier to extend without duplicating protocol checks. --- lightning/src/ln/blinded_payment_tests.rs | 5 +- .../src/ln/max_payment_path_len_tests.rs | 16 +-- lightning/src/ln/msgs.rs | 2 +- lightning/src/ln/outbound_payment.rs | 97 ++++++++++++------- lightning/src/ln/payment_tests.rs | 7 +- 5 files changed, 79 insertions(+), 48 deletions(-) diff --git a/lightning/src/ln/blinded_payment_tests.rs b/lightning/src/ln/blinded_payment_tests.rs index 5a7c326ebaa..80da76452c0 100644 --- a/lightning/src/ln/blinded_payment_tests.rs +++ b/lightning/src/ln/blinded_payment_tests.rs @@ -22,7 +22,7 @@ use crate::ln::msgs::{ }; use crate::ln::onion_payment; use crate::ln::onion_utils::{self, LocalHTLCFailureReason}; -use crate::ln::outbound_payment::{Retry, IDEMPOTENCY_TIMEOUT_TICKS}; +use crate::ln::outbound_payment::{RecipientCustomTlvs, Retry, IDEMPOTENCY_TIMEOUT_TICKS}; use crate::ln::types::ChannelId; use crate::offers::invoice::UnsignedBolt12Invoice; use crate::prelude::*; @@ -1431,8 +1431,7 @@ fn custom_tlvs_to_blinded_path() { ); let recipient_onion_fields = RecipientOnionFields::spontaneous_empty() - .with_custom_tlvs(vec![((1 << 16) + 1, vec![42, 42])]) - .unwrap(); + .with_custom_tlvs(RecipientCustomTlvs::new(vec![((1 << 16) + 1, vec![42, 42])]).unwrap()); nodes[0].node.send_payment(payment_hash, recipient_onion_fields.clone(), PaymentId(payment_hash.0), route_params, Retry::Attempts(0)).unwrap(); check_added_monitors(&nodes[0], 1); diff --git a/lightning/src/ln/max_payment_path_len_tests.rs b/lightning/src/ln/max_payment_path_len_tests.rs index fa7e8d8f132..b947273115e 100644 --- a/lightning/src/ln/max_payment_path_len_tests.rs +++ b/lightning/src/ln/max_payment_path_len_tests.rs @@ -23,7 +23,9 @@ use crate::ln::msgs; use crate::ln::msgs::{BaseMessageHandler, OnionMessageHandler}; use crate::ln::onion_utils; use crate::ln::onion_utils::MIN_FINAL_VALUE_ESTIMATE_WITH_OVERPAY; -use crate::ln::outbound_payment::{RecipientOnionFields, Retry, RetryableSendFailure}; +use crate::ln::outbound_payment::{ + RecipientCustomTlvs, RecipientOnionFields, Retry, RetryableSendFailure, +}; use crate::prelude::*; use crate::routing::router::{ PaymentParameters, RouteParameters, DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA, @@ -259,9 +261,9 @@ fn one_hop_blinded_path_with_custom_tlv() { - final_payload_len_without_custom_tlv; // Check that we can send the maximum custom TLV with 1 blinded hop. - let max_sized_onion = RecipientOnionFields::spontaneous_empty() - .with_custom_tlvs(vec![(CUSTOM_TLV_TYPE, vec![42; max_custom_tlv_len])]) - .unwrap(); + let max_sized_onion = RecipientOnionFields::spontaneous_empty().with_custom_tlvs( + RecipientCustomTlvs::new(vec![(CUSTOM_TLV_TYPE, vec![42; max_custom_tlv_len])]).unwrap(), + ); let id = PaymentId(payment_hash.0); let no_retry = Retry::Attempts(0); nodes[1] @@ -385,9 +387,9 @@ fn blinded_path_with_custom_tlv() { - reserved_packet_bytes_without_custom_tlv; // Check that we can send the maximum custom TLV size with 0 intermediate unblinded hops. - let max_sized_onion = RecipientOnionFields::spontaneous_empty() - .with_custom_tlvs(vec![(CUSTOM_TLV_TYPE, vec![42; max_custom_tlv_len])]) - .unwrap(); + let max_sized_onion = RecipientOnionFields::spontaneous_empty().with_custom_tlvs( + RecipientCustomTlvs::new(vec![(CUSTOM_TLV_TYPE, vec![42; max_custom_tlv_len])]).unwrap(), + ); let no_retry = Retry::Attempts(0); let id = PaymentId(payment_hash.0); nodes[1] diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index 2bb2b244ccb..994443dc0d5 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -3537,7 +3537,7 @@ impl<'a> Writeable for OutboundOnionPayload<'a> { ref invoice_request, ref custom_tlvs, } => { - // We need to update [`ln::outbound_payment::RecipientOnionFields::with_custom_tlvs`] + // We need to update [`ln::outbound_payments::RecipientCustomTlvs::new`] // to reject any reserved types in the experimental range if new ones are ever // standardized. let invoice_request_tlv = invoice_request.map(|invreq| (77_777, invreq.encode())); // TODO: update TLV type once the async payments spec is merged diff --git a/lightning/src/ln/outbound_payment.rs b/lightning/src/ln/outbound_payment.rs index 65493829635..67dba864004 100644 --- a/lightning/src/ln/outbound_payment.rs +++ b/lightning/src/ln/outbound_payment.rs @@ -677,6 +677,54 @@ pub enum ProbeSendFailure { DuplicateProbe, } +/// A validated, sorted set of custom TLVs for payment recipient onion fields. +#[derive(Clone)] +pub struct RecipientCustomTlvs(Vec<(u64, Vec)>); + +impl RecipientCustomTlvs { + /// Each TLV is provided as a `(u64, Vec)` for the type number and + /// serialized value respectively. TLV type numbers must be unique and + /// within the range reserved for custom types, i.e. >= 2^16, otherwise + /// this method will return `Err(())`. + /// + /// This method will also error for TLV types in the experimental range + /// which have since been standardized within the protocol. This currently + /// includes 5482373484 (keysend) and 77_777 (invoice requests for async + /// payments). + pub fn new(mut tlvs: Vec<(u64, Vec)>) -> Result { + tlvs.sort_unstable_by_key(|(typ, _)| *typ); + let mut prev_type = None; + for (typ, _) in tlvs.iter() { + if *typ < 1 << 16 { + return Err(()); + } + if *typ == 5482373484 { + return Err(()); + } // keysend + if *typ == 77_777 { + return Err(()); + } // invoice requests for async payments + match prev_type { + Some(prev) if prev >= *typ => return Err(()), + _ => {}, + } + prev_type = Some(*typ); + } + + Ok(Self(tlvs)) + } + + /// Returns the inner TLV list. + pub(super) fn into_inner(self) -> Vec<(u64, Vec)> { + self.0 + } + + /// Borrow the inner TLV list. + pub fn as_slice(&self) -> &[(u64, Vec)] { + &self.0 + } +} + /// Information which is provided, encrypted, to the payment recipient when sending HTLCs. /// /// This should generally be constructed with data communicated to us from the recipient (via a @@ -739,31 +787,13 @@ impl RecipientOnionFields { Self { payment_secret: None, payment_metadata: None, custom_tlvs: Vec::new() } } - /// Creates a new [`RecipientOnionFields`] from an existing one, adding custom TLVs. Each - /// TLV is provided as a `(u64, Vec)` for the type number and serialized value - /// respectively. TLV type numbers must be unique and within the range - /// reserved for custom types, i.e. >= 2^16, otherwise this method will return `Err(())`. - /// - /// This method will also error for types in the experimental range which have been - /// standardized within the protocol, which only includes 5482373484 (keysend) for now. + /// Creates a new [`RecipientOnionFields`] from an existing one, adding validated custom TLVs. /// /// See [`Self::custom_tlvs`] for more info. #[rustfmt::skip] - pub fn with_custom_tlvs(mut self, mut custom_tlvs: Vec<(u64, Vec)>) -> Result { - custom_tlvs.sort_unstable_by_key(|(typ, _)| *typ); - let mut prev_type = None; - for (typ, _) in custom_tlvs.iter() { - if *typ < 1 << 16 { return Err(()); } - if *typ == 5482373484 { return Err(()); } // keysend - if *typ == 77_777 { return Err(()); } // invoice requests for async payments - match prev_type { - Some(prev) if prev >= *typ => return Err(()), - _ => {}, - } - prev_type = Some(*typ); - } - self.custom_tlvs = custom_tlvs; - Ok(self) + pub fn with_custom_tlvs(mut self, custom_tlvs: RecipientCustomTlvs) -> Self { + self.custom_tlvs = custom_tlvs.into_inner(); + self } /// Gets the custom TLVs that will be sent or have been received. @@ -2815,8 +2845,8 @@ mod tests { use crate::ln::channelmanager::{PaymentId, RecipientOnionFields}; use crate::ln::inbound_payment::ExpandedKey; use crate::ln::outbound_payment::{ - Bolt12PaymentError, OutboundPayments, PendingOutboundPayment, Retry, RetryableSendFailure, - StaleExpiration, + Bolt12PaymentError, OutboundPayments, PendingOutboundPayment, RecipientCustomTlvs, Retry, + RetryableSendFailure, StaleExpiration, }; #[cfg(feature = "std")] use crate::offers::invoice::DEFAULT_RELATIVE_EXPIRY; @@ -2843,22 +2873,23 @@ mod tests { fn test_recipient_onion_fields_with_custom_tlvs() { let onion_fields = RecipientOnionFields::spontaneous_empty(); - let bad_type_range_tlvs = vec![ + let bad_type_range_tlvs = RecipientCustomTlvs::new(vec![ (0, vec![42]), (1, vec![42; 32]), - ]; - assert!(onion_fields.clone().with_custom_tlvs(bad_type_range_tlvs).is_err()); + ]); + assert!(bad_type_range_tlvs.is_err()); - let keysend_tlv = vec![ + let keysend_tlv = RecipientCustomTlvs::new(vec![ (5482373484, vec![42; 32]), - ]; - assert!(onion_fields.clone().with_custom_tlvs(keysend_tlv).is_err()); + ]); + assert!(keysend_tlv.is_err()); - let good_tlvs = vec![ + let good_tlvs = RecipientCustomTlvs::new(vec![ ((1 << 16) + 1, vec![42]), ((1 << 16) + 3, vec![42; 32]), - ]; - assert!(onion_fields.with_custom_tlvs(good_tlvs).is_ok()); + ]); + assert!(good_tlvs.is_ok()); + onion_fields.with_custom_tlvs(good_tlvs.unwrap()); } #[test] diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 14446239a31..d3be6652237 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -32,7 +32,7 @@ use crate::ln::msgs; use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, MessageSendEvent}; use crate::ln::onion_utils::{self, LocalHTLCFailureReason}; use crate::ln::outbound_payment::{ - ProbeSendFailure, Retry, RetryableSendFailure, IDEMPOTENCY_TIMEOUT_TICKS, + ProbeSendFailure, RecipientCustomTlvs, Retry, RetryableSendFailure, IDEMPOTENCY_TIMEOUT_TICKS, }; use crate::ln::types::ChannelId; use crate::routing::gossip::{EffectiveCapacity, RoutingFees}; @@ -4539,7 +4539,7 @@ fn test_retry_custom_tlvs() { let custom_tlvs = vec![((1 << 16) + 1, vec![0x42u8; 16])]; let onion = RecipientOnionFields::secret_only(payment_secret); - let onion = onion.with_custom_tlvs(custom_tlvs.clone()).unwrap(); + let onion = onion.with_custom_tlvs(RecipientCustomTlvs::new(custom_tlvs.clone()).unwrap()); nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone())); nodes[0].node.send_payment(hash, onion, id, route_params.clone(), Retry::Attempts(1)).unwrap(); @@ -5079,8 +5079,7 @@ fn peel_payment_onion_custom_tlvs() { let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat); let route = functional_test_utils::get_route(&nodes[0], &route_params).unwrap(); let mut recipient_onion = RecipientOnionFields::spontaneous_empty() - .with_custom_tlvs(vec![(414141, vec![42; 1200])]) - .unwrap(); + .with_custom_tlvs(RecipientCustomTlvs::new(vec![(414141, vec![42; 1200])]).unwrap()); let prng_seed = chanmon_cfgs[0].keys_manager.get_secure_random_bytes(); let session_priv = SecretKey::from_slice(&prng_seed[..]).expect("RNG is busted"); let keysend_preimage = PaymentPreimage([42; 32]); From 6b20feeeb2feb479d2f6628864b4e25164936e94 Mon Sep 17 00:00:00 2001 From: shaavan Date: Fri, 5 Dec 2025 18:10:19 +0530 Subject: [PATCH 155/242] Introduce custom TLVs in `pay_for_bolt11_invoice` Custom TLVs let the payer attach arbitrary data to the onion packet, enabling everything from richer metadata to custom authentication on the payee's side. Until now, this flexibility existed only through `send_payment`. The simpler `pay_for_bolt11_invoice` API offered no way to pass custom TLVs, limiting its usefulness in flows that rely on additional context. This commit adds custom TLV support to `pay_for_bolt11_invoice`, bringing it to feature parity. --- .../tests/lsps2_integration_tests.rs | 15 ++---- lightning/src/ln/bolt11_payment_tests.rs | 15 ++---- lightning/src/ln/channelmanager.rs | 47 +++++++++++++++---- lightning/src/ln/invoice_utils.rs | 12 +++-- lightning/src/ln/outbound_payment.rs | 15 +++--- lightning/src/ln/payment_tests.rs | 5 +- 6 files changed, 66 insertions(+), 43 deletions(-) diff --git a/lightning-liquidity/tests/lsps2_integration_tests.rs b/lightning-liquidity/tests/lsps2_integration_tests.rs index 45c2891227d..8dc907ae7fd 100644 --- a/lightning-liquidity/tests/lsps2_integration_tests.rs +++ b/lightning-liquidity/tests/lsps2_integration_tests.rs @@ -9,8 +9,7 @@ use common::{ use lightning::events::{ClosureReason, Event}; use lightning::get_event_msg; -use lightning::ln::channelmanager::PaymentId; -use lightning::ln::channelmanager::Retry; +use lightning::ln::channelmanager::{OptionalBolt11PaymentParams, PaymentId}; use lightning::ln::functional_test_utils::*; use lightning::ln::msgs::BaseMessageHandler; use lightning::ln::msgs::ChannelMessageHandler; @@ -1214,8 +1213,7 @@ fn client_trusts_lsp_end_to_end_test() { &invoice, PaymentId(invoice.payment_hash().0), None, - Default::default(), - Retry::Attempts(3), + OptionalBolt11PaymentParams::default(), ) .unwrap(); @@ -1687,8 +1685,7 @@ fn late_payment_forwarded_and_safe_after_force_close_does_not_broadcast() { &invoice, PaymentId(invoice.payment_hash().0), None, - Default::default(), - Retry::Attempts(3), + OptionalBolt11PaymentParams::default(), ) .unwrap(); @@ -1878,8 +1875,7 @@ fn htlc_timeout_before_client_claim_results_in_handling_failed() { &invoice, PaymentId(invoice.payment_hash().0), None, - Default::default(), - Retry::Attempts(3), + OptionalBolt11PaymentParams::default(), ) .unwrap(); @@ -2215,8 +2211,7 @@ fn client_trusts_lsp_partial_fee_does_not_trigger_broadcast() { &invoice, PaymentId(invoice.payment_hash().0), None, - Default::default(), - Retry::Attempts(3), + OptionalBolt11PaymentParams::default(), ) .unwrap(); diff --git a/lightning/src/ln/bolt11_payment_tests.rs b/lightning/src/ln/bolt11_payment_tests.rs index 63c5576e333..690335e034d 100644 --- a/lightning/src/ln/bolt11_payment_tests.rs +++ b/lightning/src/ln/bolt11_payment_tests.rs @@ -10,11 +10,10 @@ //! Tests for verifying the correct end-to-end handling of BOLT11 payments, including metadata propagation. use crate::events::Event; -use crate::ln::channelmanager::{PaymentId, Retry}; +use crate::ln::channelmanager::{OptionalBolt11PaymentParams, PaymentId}; use crate::ln::functional_test_utils::*; use crate::ln::msgs::ChannelMessageHandler; use crate::ln::outbound_payment::Bolt11PaymentError; -use crate::routing::router::RouteParametersConfig; use crate::sign::{NodeSigner, Recipient}; use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hashes::Hash; @@ -55,8 +54,7 @@ fn payment_metadata_end_to_end_for_invoice_with_amount() { &invoice, PaymentId(payment_hash.0), Some(100), - RouteParametersConfig::default(), - Retry::Attempts(0), + OptionalBolt11PaymentParams::default(), ) { Err(Bolt11PaymentError::InvalidAmount) => (), _ => panic!("Unexpected result"), @@ -68,8 +66,7 @@ fn payment_metadata_end_to_end_for_invoice_with_amount() { &invoice, PaymentId(payment_hash.0), None, - RouteParametersConfig::default(), - Retry::Attempts(0), + OptionalBolt11PaymentParams::default(), ) .unwrap(); @@ -123,8 +120,7 @@ fn payment_metadata_end_to_end_for_invoice_with_no_amount() { &invoice, PaymentId(payment_hash.0), None, - RouteParametersConfig::default(), - Retry::Attempts(0), + OptionalBolt11PaymentParams::default(), ) { Err(Bolt11PaymentError::InvalidAmount) => (), _ => panic!("Unexpected result"), @@ -136,8 +132,7 @@ fn payment_metadata_end_to_end_for_invoice_with_no_amount() { &invoice, PaymentId(payment_hash.0), Some(50_000), - RouteParametersConfig::default(), - Retry::Attempts(0), + OptionalBolt11PaymentParams::default(), ) .unwrap(); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index fd5e5d15b9f..890831126e7 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -85,8 +85,8 @@ use crate::ln::our_peer_storage::{EncryptedOurPeerStorage, PeerStorageMonitorHol #[cfg(test)] use crate::ln::outbound_payment; use crate::ln::outbound_payment::{ - OutboundPayments, PendingOutboundPayment, RetryableInvoiceRequest, SendAlongPathArgs, - StaleExpiration, + OutboundPayments, PendingOutboundPayment, RecipientCustomTlvs, RetryableInvoiceRequest, + SendAlongPathArgs, StaleExpiration, }; use crate::ln::types::ChannelId; use crate::offers::async_receive_offer_cache::AsyncReceiveOfferCache; @@ -674,6 +674,36 @@ impl Readable for InterceptId { } } +/// Optional arguments to [`ChannelManager::pay_for_bolt11_invoice`] +/// +/// These fields will often not need to be set, and the provided [`Self::default`] can be used. +pub struct OptionalBolt11PaymentParams { + /// A set of custom tlvs, user can send along the payment. + pub custom_tlvs: RecipientCustomTlvs, + /// Pathfinding options which tweak how the path is constructed to the recipient. + pub route_params_config: RouteParametersConfig, + /// The number of tries or time during which we'll retry this payment if some paths to the + /// recipient fail. + /// + /// Once the retry limit is reached, further path failures will not be retried and the payment + /// will ultimately fail once all pending paths have failed (generating an + /// [`Event::PaymentFailed`]). + pub retry_strategy: Retry, +} + +impl Default for OptionalBolt11PaymentParams { + fn default() -> Self { + Self { + custom_tlvs: RecipientCustomTlvs::new(vec![]).unwrap(), + route_params_config: Default::default(), + #[cfg(feature = "std")] + retry_strategy: Retry::Timeout(core::time::Duration::from_secs(2)), + #[cfg(not(feature = "std"))] + retry_strategy: Retry::Attempts(3), + } + } +} + /// Optional arguments to [`ChannelManager::pay_for_offer`] #[cfg_attr( feature = "dnssec", @@ -2277,19 +2307,19 @@ where /// # use bitcoin::hashes::Hash; /// # use lightning::events::{Event, EventsProvider}; /// # use lightning::types::payment::PaymentHash; -/// # use lightning::ln::channelmanager::{AChannelManager, PaymentId, RecentPaymentDetails, Retry}; -/// # use lightning::routing::router::RouteParametersConfig; +/// # use lightning::ln::channelmanager::{AChannelManager, OptionalBolt11PaymentParams, PaymentId, RecentPaymentDetails, Retry}; /// # use lightning_invoice::Bolt11Invoice; /// # /// # fn example( -/// # channel_manager: T, invoice: &Bolt11Invoice, route_params_config: RouteParametersConfig, +/// # channel_manager: T, invoice: &Bolt11Invoice, optional_params: OptionalBolt11PaymentParams, /// # retry: Retry /// # ) { /// # let channel_manager = channel_manager.get_cm(); /// # let payment_id = PaymentId([42; 32]); /// # let payment_hash = invoice.payment_hash(); +/// /// match channel_manager.pay_for_bolt11_invoice( -/// invoice, payment_id, None, route_params_config, retry +/// invoice, payment_id, None, optional_params /// ) { /// Ok(()) => println!("Sending payment with hash {}", payment_hash), /// Err(e) => println!("Failed sending payment with hash {}: {:?}", payment_hash, e), @@ -5498,7 +5528,7 @@ where /// To use default settings, call the function with [`RouteParametersConfig::default`]. pub fn pay_for_bolt11_invoice( &self, invoice: &Bolt11Invoice, payment_id: PaymentId, amount_msats: Option, - route_params_config: RouteParametersConfig, retry_strategy: Retry, + optional_params: OptionalBolt11PaymentParams, ) -> Result<(), Bolt11PaymentError> { let best_block_height = self.best_block.read().unwrap().height; let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); @@ -5506,8 +5536,7 @@ where invoice, payment_id, amount_msats, - route_params_config, - retry_strategy, + optional_params, &self.router, self.list_usable_channels(), || self.compute_inflight_htlcs(), diff --git a/lightning/src/ln/invoice_utils.rs b/lightning/src/ln/invoice_utils.rs index e72ea4518a4..96a62a9e822 100644 --- a/lightning/src/ln/invoice_utils.rs +++ b/lightning/src/ln/invoice_utils.rs @@ -615,8 +615,8 @@ mod test { use super::*; use crate::chain::channelmonitor::HTLC_FAIL_BACK_BUFFER; use crate::ln::channelmanager::{ - Bolt11InvoiceParameters, PaymentId, PhantomRouteHints, RecipientOnionFields, Retry, - MIN_FINAL_CLTV_EXPIRY_DELTA, + Bolt11InvoiceParameters, OptionalBolt11PaymentParams, PaymentId, PhantomRouteHints, + RecipientOnionFields, Retry, MIN_FINAL_CLTV_EXPIRY_DELTA, }; use crate::ln::functional_test_utils::*; use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, MessageSendEvent}; @@ -707,10 +707,14 @@ mod test { assert_eq!(invoice.route_hints()[0].0[0].htlc_minimum_msat, chan.inbound_htlc_minimum_msat); assert_eq!(invoice.route_hints()[0].0[0].htlc_maximum_msat, chan.inbound_htlc_maximum_msat); - let retry = Retry::Attempts(0); nodes[0] .node - .pay_for_bolt11_invoice(&invoice, PaymentId([42; 32]), None, Default::default(), retry) + .pay_for_bolt11_invoice( + &invoice, + PaymentId([42; 32]), + None, + OptionalBolt11PaymentParams::default(), + ) .unwrap(); check_added_monitors(&nodes[0], 1); diff --git a/lightning/src/ln/outbound_payment.rs b/lightning/src/ln/outbound_payment.rs index 67dba864004..83977ad13a3 100644 --- a/lightning/src/ln/outbound_payment.rs +++ b/lightning/src/ln/outbound_payment.rs @@ -18,7 +18,8 @@ use crate::blinded_path::{IntroductionNode, NodeIdLookUp}; use crate::events::{self, PaidBolt12Invoice, PaymentFailureReason}; use crate::ln::channel_state::ChannelDetails; use crate::ln::channelmanager::{ - EventCompletionAction, HTLCSource, PaymentCompleteUpdate, PaymentId, + EventCompletionAction, HTLCSource, OptionalBolt11PaymentParams, PaymentCompleteUpdate, + PaymentId, }; use crate::ln::onion_utils; use crate::ln::onion_utils::{DecodedOnionFailure, HTLCFailReason}; @@ -949,8 +950,7 @@ where pub(super) fn pay_for_bolt11_invoice( &self, invoice: &Bolt11Invoice, payment_id: PaymentId, amount_msats: Option, - route_params_config: RouteParametersConfig, - retry_strategy: Retry, + optional_params: OptionalBolt11PaymentParams, router: &R, first_hops: Vec, compute_inflight_htlcs: IH, entropy_source: &ES, node_signer: &NS, best_block_height: u32, @@ -972,19 +972,20 @@ where (None, None) => return Err(Bolt11PaymentError::InvalidAmount), }; - let mut recipient_onion = RecipientOnionFields::secret_only(*invoice.payment_secret()); + let mut recipient_onion = RecipientOnionFields::secret_only(*invoice.payment_secret()) + .with_custom_tlvs(optional_params.custom_tlvs); recipient_onion.payment_metadata = invoice.payment_metadata().map(|v| v.clone()); let payment_params = PaymentParameters::from_bolt11_invoice(invoice) - .with_user_config_ignoring_fee_limit(route_params_config); + .with_user_config_ignoring_fee_limit(optional_params.route_params_config); let mut route_params = RouteParameters::from_payment_params_and_value(payment_params, amount); - if let Some(max_fee_msat) = route_params_config.max_total_routing_fee_msat { + if let Some(max_fee_msat) = optional_params.route_params_config.max_total_routing_fee_msat { route_params.max_total_routing_fee_msat = Some(max_fee_msat); } - self.send_payment_for_non_bolt12_invoice(payment_id, payment_hash, recipient_onion, None, retry_strategy, route_params, + self.send_payment_for_non_bolt12_invoice(payment_id, payment_hash, recipient_onion, None, optional_params.retry_strategy, route_params, router, first_hops, compute_inflight_htlcs, entropy_source, node_signer, best_block_height, pending_events, send_payment_along_path diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index d3be6652237..e41e60a46a7 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -5403,11 +5403,10 @@ fn max_out_mpp_path() { ..Default::default() }; let invoice = nodes[2].node.create_bolt11_invoice(invoice_params).unwrap(); - let route_params_cfg = crate::routing::router::RouteParametersConfig::default(); + let optional_params = crate::ln::channelmanager::OptionalBolt11PaymentParams::default(); let id = PaymentId([42; 32]); - let retry = Retry::Attempts(0); - nodes[0].node.pay_for_bolt11_invoice(&invoice, id, None, route_params_cfg, retry).unwrap(); + nodes[0].node.pay_for_bolt11_invoice(&invoice, id, None, optional_params).unwrap(); assert!(nodes[0].node.list_recent_payments().len() == 1); check_added_monitors(&nodes[0], 2); // one monitor update per MPP part From 53e668b47d4fee7421981d519f19a69509341e1e Mon Sep 17 00:00:00 2001 From: shaavan Date: Sat, 6 Dec 2025 18:40:30 +0530 Subject: [PATCH 156/242] Expand test to cover Bolt11 custom TLVs Extends the payment flow test to assert that custom TLVs passed to `pay_for_bolt11_invoice` are preserved and delivered correctly. --- lightning/src/ln/invoice_utils.rs | 50 ++++++++++++++++++++----------- 1 file changed, 32 insertions(+), 18 deletions(-) diff --git a/lightning/src/ln/invoice_utils.rs b/lightning/src/ln/invoice_utils.rs index 96a62a9e822..90b3b5c38a9 100644 --- a/lightning/src/ln/invoice_utils.rs +++ b/lightning/src/ln/invoice_utils.rs @@ -620,7 +620,8 @@ mod test { }; use crate::ln::functional_test_utils::*; use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, MessageSendEvent}; - use crate::routing::router::{PaymentParameters, RouteParameters}; + use crate::ln::outbound_payment::RecipientCustomTlvs; + use crate::routing::router::{PaymentParameters, RouteParameters, RouteParametersConfig}; use crate::sign::PhantomKeysManager; use crate::types::payment::{PaymentHash, PaymentPreimage}; use crate::util::config::UserConfig; @@ -663,26 +664,26 @@ mod test { } #[test] - fn create_and_pay_for_bolt11_invoice() { + fn create_and_pay_for_bolt11_invoice_with_custom_tlvs() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001); - let node_a_id = nodes[0].node.get_our_node_id(); - + let amt_msat = 10_000; let description = Bolt11InvoiceDescription::Direct(Description::new("test".to_string()).unwrap()); let non_default_invoice_expiry_secs = 4200; + let invoice_params = Bolt11InvoiceParameters { - amount_msats: Some(10_000), + amount_msats: Some(amt_msat), description, invoice_expiry_delta_secs: Some(non_default_invoice_expiry_secs), ..Default::default() }; let invoice = nodes[1].node.create_bolt11_invoice(invoice_params).unwrap(); - assert_eq!(invoice.amount_milli_satoshis(), Some(10_000)); + assert_eq!(invoice.amount_milli_satoshis(), Some(amt_msat)); // If no `min_final_cltv_expiry_delta` is specified, then it should be `MIN_FINAL_CLTV_EXPIRY_DELTA`. assert_eq!(invoice.min_final_cltv_expiry_delta(), MIN_FINAL_CLTV_EXPIRY_DELTA as u64); assert_eq!( @@ -694,6 +695,10 @@ mod test { Duration::from_secs(non_default_invoice_expiry_secs.into()) ); + let (payment_hash, payment_secret) = (invoice.payment_hash(), *invoice.payment_secret()); + + let preimage = nodes[1].node.get_payment_preimage(payment_hash, payment_secret).unwrap(); + // Invoice SCIDs should always use inbound SCID aliases over the real channel ID, if one is // available. let chan = &nodes[1].node.list_usable_channels()[0]; @@ -707,25 +712,34 @@ mod test { assert_eq!(invoice.route_hints()[0].0[0].htlc_minimum_msat, chan.inbound_htlc_minimum_msat); assert_eq!(invoice.route_hints()[0].0[0].htlc_maximum_msat, chan.inbound_htlc_maximum_msat); + let custom_tlvs = RecipientCustomTlvs::new(vec![(65537, vec![42; 42])]).unwrap(); + let optional_params = OptionalBolt11PaymentParams { + custom_tlvs: custom_tlvs.clone(), + route_params_config: RouteParametersConfig::default(), + retry_strategy: Retry::Attempts(0), + }; + nodes[0] .node - .pay_for_bolt11_invoice( - &invoice, - PaymentId([42; 32]), - None, - OptionalBolt11PaymentParams::default(), - ) + .pay_for_bolt11_invoice(&invoice, PaymentId([42; 32]), None, optional_params) .unwrap(); check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); - let payment_event = SendEvent::from_event(events.remove(0)); - nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); - nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); - check_added_monitors(&nodes[1], 1); - let events = nodes[1].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 2); + let ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events); + + let path = &[&nodes[1]]; + let args = PassAlongPathArgs::new(&nodes[0], path, amt_msat, payment_hash, ev) + .with_payment_preimage(preimage) + .with_payment_secret(payment_secret) + .with_custom_tlvs(custom_tlvs.clone().into_inner()); + + do_pass_along_path(args); + claim_payment_along_route( + ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1]]], preimage) + .with_custom_tlvs(custom_tlvs.into_inner()), + ); } fn do_create_invoice_min_final_cltv_delta(with_custom_delta: bool) { From f42b9f6d464083da2eff815c52e8268783e441b4 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Fri, 30 Jan 2026 20:03:19 +0000 Subject: [PATCH 157/242] Add CHANGELOG entries for 0.1.9 and 0.2.1 --- CHANGELOG.md | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a51c5fda8bd..6e83ef2a14d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,29 @@ +# 0.2.1 - Jan 29, 2025 - "Electrum Confirmations Logged" + +## API Updates + * The `AttributionData` struct is now public, correcting an issue where it was + accidentally sealed preventing construction of some messages (#4268). + * The async background processor now exits even if work remains to be done as + soon as the sleeper returns the exit flag (#4259). + +## Bug Fixes + * The presence of unconfirmed transactions no longer causes + `ElectrumSyncClient` to spuriously fail to sync (#4341). + * `ChannelManager::splice_channel` now properly fails immediately if the + peer does not support splicing (#4262, #4274). + * A spurious debug assertion was removed which could fail in cases where an + HTLC fails to be forwarded after being accepted (#4312). + * Many log calls related to outbound payments were corrected to include a + `payment_hash` field (#4342). + + +# 0.1.9 - Jan 26, 2026 - "Electrum Confirmations" + +## Bug Fixes + * The presence of unconfirmed transactions no longer causes + `ElectrumSyncClient` to spuriously fail to sync (#4341). + + # 0.2 - Dec 2, 2025 - "Natively Asynchronous Splicing" ## API Updates From 5e32d694d4e62bf5eb847b6f8d2237f367125399 Mon Sep 17 00:00:00 2001 From: Thrishalmadasu Date: Sun, 1 Feb 2026 01:26:19 +0530 Subject: [PATCH 158/242] Ignore channel_update with dont_forward bit set in P2PGossipSync --- lightning/src/routing/gossip.rs | 67 +++++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) diff --git a/lightning/src/routing/gossip.rs b/lightning/src/routing/gossip.rs index 534bebe7618..29053bb6984 100644 --- a/lightning/src/routing/gossip.rs +++ b/lightning/src/routing/gossip.rs @@ -568,6 +568,14 @@ where fn handle_channel_update( &self, _their_node_id: Option, msg: &msgs::ChannelUpdate, ) -> Result, LightningError> { + // Ignore channel updates with dont_forward bit set - these are for private channels + // and shouldn't be gossiped or stored in the network graph + if msg.contents.message_flags & (1 << 1) != 0 { + return Err(LightningError { + err: "Ignoring channel_update with dont_forward bit set".to_owned(), + action: ErrorAction::IgnoreAndLog(Level::Debug), + }); + } match self.network_graph.update_channel(msg) { Ok(nodes) if msg.contents.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY => Ok(nodes), Ok(_) => Ok(None), @@ -3341,6 +3349,65 @@ pub(crate) mod tests { }; } + #[test] + fn handling_channel_update_with_dont_forward_flag() { + // Test that channel updates with the dont_forward bit set are rejected + let secp_ctx = Secp256k1::new(); + let logger = test_utils::TestLogger::new(); + let chain_source = test_utils::TestChainSource::new(Network::Testnet); + let network_graph = NetworkGraph::new(Network::Testnet, &logger); + let gossip_sync = P2PGossipSync::new(&network_graph, Some(&chain_source), &logger); + + let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap(); + let node_1_pubkey = PublicKey::from_secret_key(&secp_ctx, node_1_privkey); + let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); + + // First announce a channel so we have something to update + let good_script = get_channel_script(&secp_ctx); + *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Sync(Ok(TxOut { + value: Amount::from_sat(1000_000), + script_pubkey: good_script.clone(), + })); + + let valid_channel_announcement = + get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx); + gossip_sync + .handle_channel_announcement(Some(node_1_pubkey), &valid_channel_announcement) + .unwrap(); + + // Create a channel update with dont_forward bit set (bit 1 of message_flags) + let dont_forward_update = get_signed_channel_update( + |unsigned_channel_update| { + unsigned_channel_update.message_flags = 1 | (1 << 1); // must_be_one + dont_forward + }, + node_1_privkey, + &secp_ctx, + ); + + // The update should be rejected because dont_forward is set + match gossip_sync.handle_channel_update(Some(node_1_pubkey), &dont_forward_update) { + Ok(_) => panic!("Expected channel update with dont_forward to be rejected"), + Err(e) => { + assert_eq!(e.err, "Ignoring channel_update with dont_forward bit set"); + match e.action { + crate::ln::msgs::ErrorAction::IgnoreAndLog(level) => { + assert_eq!(level, crate::util::logger::Level::Debug) + }, + _ => panic!("Expected IgnoreAndLog action"), + } + }, + }; + + // Verify the update was not applied to the network graph + let channels = network_graph.read_only(); + let channel = + channels.channels().get(&valid_channel_announcement.contents.short_channel_id).unwrap(); + assert!( + channel.one_to_two.is_none(), + "Channel update with dont_forward should not be stored in network graph" + ); + } + #[test] fn handling_network_update() { let logger = test_utils::TestLogger::new(); From 16a69168ce076e388a081ec3e59e9023516ab041 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sat, 31 Jan 2026 22:49:24 +0000 Subject: [PATCH 159/242] Remove spurious universality comment on `KVStore` There would be nothing wrong with passing a different `KVStore` to `OutputSweeper` from `MonitorUpdatingPersister` (though I'm not really sure why someone would), and in fact I'm not aware of any cases where passing a different `KVStore` to different structs would be problematic. Thus, the sentence in docs is simply removed. --- lightning/src/util/persist.rs | 7 ------- 1 file changed, 7 deletions(-) diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index 440d1d31331..1b750c63cd8 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -258,10 +258,6 @@ where /// namespace, i.e., conflicts between keys and equally named /// primary namespaces/secondary namespaces must be avoided. /// -/// Instantiations of this trait should generally be shared by reference across the lightning -/// node's components. E.g., it would be unsafe to provide a different [`KVStore`] to -/// [`OutputSweeper`] vs [`MonitorUpdatingPersister`]. -/// /// **Note:** Users migrating custom persistence backends from the pre-v0.0.117 `KVStorePersister` /// interface can use a concatenation of `[{primary_namespace}/[{secondary_namespace}/]]{key}` to /// recover a `key` compatible with the data model previously assumed by `KVStorePersister::persist`. @@ -269,9 +265,6 @@ where /// For a synchronous version of this trait, see [`KVStoreSync`]. /// /// This is not exported to bindings users as async is only supported in Rust. -/// -/// [`OutputSweeper`]: crate::util::sweep::OutputSweeper -/// [`MonitorUpdatingPersister`]: crate::util::persist::MonitorUpdatingPersister // Note that updates to documentation on this trait should be copied to the synchronous version. pub trait KVStore { /// Returns the data stored for the given `primary_namespace`, `secondary_namespace`, and From 3deecd4a9be9454b34b99cc9539f7f7ddaf1df52 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 22 Jan 2026 10:36:15 +0100 Subject: [PATCH 160/242] Split ChannelManager::read into two stages Introduce ChannelManagerData as an intermediate DTO that holds all deserialized data from a ChannelManager before validation. This splits the read implementation into: 1. Stage 1: Pure deserialization into ChannelManagerData 2. Stage 2: Validation and reconstruction using the DTO The existing validation and reconstruction logic remains unchanged; only the deserialization portion was extracted into the DTO's ReadableArgs implementation. Co-Authored-By: Claude Opus 4.5 --- lightning/src/ln/channelmanager.rs | 496 +++++++++++++++++++---------- 1 file changed, 319 insertions(+), 177 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 129a4d58171..24bb16186d3 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -17216,6 +17216,265 @@ impl Readable for VecDeque<(Event, Option)> { } } +// Raw deserialized data from a ChannelManager, before validation or reconstruction. +// This is an internal DTO used in the two-stage deserialization process. +pub(super) struct ChannelManagerData { + chain_hash: ChainHash, + best_block_height: u32, + best_block_hash: BlockHash, + channels: Vec>, + // Marked `_legacy` because in versions > 0.2 we are taking steps to remove the requirement of + // regularly persisting the `ChannelManager` and instead rebuild the set of HTLC forwards from + // `Channel{Monitor}` data. See [`ChannelManager::read`]. + forward_htlcs_legacy: HashMap>, + claimable_htlcs_list: Vec<(PaymentHash, Vec)>, + peer_init_features: Vec<(PublicKey, InitFeatures)>, + pending_events_read: VecDeque<(events::Event, Option)>, + highest_seen_timestamp: u32, + pending_outbound_payments_compat: HashMap, + pending_outbound_payments_no_retry: Option>>, + // Marked `_legacy` because in versions > 0.2 we are taking steps to remove the requirement of + // regularly persisting the `ChannelManager` and instead rebuild the set of HTLC forwards from + // `Channel{Monitor}` data. See [`ChannelManager::read`]. + pending_intercepted_htlcs_legacy: Option>, + pending_outbound_payments: Option>, + pending_claiming_payments: Option>, + received_network_pubkey: Option, + monitor_update_blocked_actions_per_peer: + Option>)>>, + fake_scid_rand_bytes: Option<[u8; 32]>, + events_override: Option)>>, + claimable_htlc_purposes: Option>, + legacy_in_flight_monitor_updates: + Option>>, + probing_cookie_secret: Option<[u8; 32]>, + claimable_htlc_onion_fields: Option>>, + // Marked `_legacy` because in versions > 0.2 we are taking steps to remove the requirement of + // regularly persisting the `ChannelManager` and instead rebuild the set of HTLC forwards from + // `Channel{Monitor}` data. See [`ChannelManager::read`]. + decode_update_add_htlcs_legacy: Option>>, + inbound_payment_id_secret: Option<[u8; 32]>, + in_flight_monitor_updates: Option>>, + peer_storage_dir: Option)>>, + async_receive_offer_cache: AsyncReceiveOfferCache, +} + +/// Arguments for deserializing [`ChannelManagerData`]. +struct ChannelManagerDataReadArgs<'a, ES: EntropySource, SP: SignerProvider, L: Logger> { + entropy_source: &'a ES, + signer_provider: &'a SP, + config: UserConfig, + logger: &'a L, +} + +impl<'a, ES: EntropySource, SP: SignerProvider, L: Logger> + ReadableArgs> for ChannelManagerData +{ + fn read( + reader: &mut R, args: ChannelManagerDataReadArgs<'a, ES, SP, L>, + ) -> Result { + let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION); + + let chain_hash: ChainHash = Readable::read(reader)?; + let best_block_height: u32 = Readable::read(reader)?; + let best_block_hash: BlockHash = Readable::read(reader)?; + + const MAX_ALLOC_SIZE: usize = 1024 * 64; + + let channel_count: u64 = Readable::read(reader)?; + let mut channels = Vec::with_capacity(cmp::min(channel_count as usize, 128)); + for _ in 0..channel_count { + let channel: FundedChannel = FundedChannel::read( + reader, + ( + args.entropy_source, + args.signer_provider, + &provided_channel_type_features(&args.config), + ), + )?; + channels.push(channel); + } + + let forward_htlcs_count: u64 = Readable::read(reader)?; + let mut forward_htlcs_legacy: HashMap> = + hash_map_with_capacity(cmp::min(forward_htlcs_count as usize, 128)); + for _ in 0..forward_htlcs_count { + let short_channel_id = Readable::read(reader)?; + let pending_forwards_count: u64 = Readable::read(reader)?; + let mut pending_forwards = Vec::with_capacity(cmp::min( + pending_forwards_count as usize, + MAX_ALLOC_SIZE / mem::size_of::(), + )); + for _ in 0..pending_forwards_count { + pending_forwards.push(Readable::read(reader)?); + } + forward_htlcs_legacy.insert(short_channel_id, pending_forwards); + } + + let claimable_htlcs_count: u64 = Readable::read(reader)?; + let mut claimable_htlcs_list = + Vec::with_capacity(cmp::min(claimable_htlcs_count as usize, 128)); + for _ in 0..claimable_htlcs_count { + let payment_hash = Readable::read(reader)?; + let previous_hops_len: u64 = Readable::read(reader)?; + let mut previous_hops = Vec::with_capacity(cmp::min( + previous_hops_len as usize, + MAX_ALLOC_SIZE / mem::size_of::(), + )); + for _ in 0..previous_hops_len { + previous_hops.push(::read(reader)?); + } + claimable_htlcs_list.push((payment_hash, previous_hops)); + } + + let peer_count: u64 = Readable::read(reader)?; + let mut peer_init_features = Vec::with_capacity(cmp::min(peer_count as usize, 128)); + for _ in 0..peer_count { + let peer_pubkey: PublicKey = Readable::read(reader)?; + let latest_features = Readable::read(reader)?; + peer_init_features.push((peer_pubkey, latest_features)); + } + + let event_count: u64 = Readable::read(reader)?; + let mut pending_events_read: VecDeque<(events::Event, Option)> = + VecDeque::with_capacity(cmp::min( + event_count as usize, + MAX_ALLOC_SIZE / mem::size_of::<(events::Event, Option)>(), + )); + for _ in 0..event_count { + match MaybeReadable::read(reader)? { + Some(event) => pending_events_read.push_back((event, None)), + None => continue, + } + } + + let background_event_count: u64 = Readable::read(reader)?; + for _ in 0..background_event_count { + match ::read(reader)? { + 0 => { + // LDK versions prior to 0.0.116 wrote pending `MonitorUpdateRegeneratedOnStartup`s here, + // however we really don't (and never did) need them - we regenerate all + // on-startup monitor updates. + let _: OutPoint = Readable::read(reader)?; + let _: ChannelMonitorUpdate = Readable::read(reader)?; + }, + _ => return Err(DecodeError::InvalidValue), + } + } + + let _last_node_announcement_serial: u32 = Readable::read(reader)?; // Only used < 0.0.111 + let highest_seen_timestamp: u32 = Readable::read(reader)?; + + // The last version where a pending inbound payment may have been added was 0.0.116. + let pending_inbound_payment_count: u64 = Readable::read(reader)?; + for _ in 0..pending_inbound_payment_count { + let payment_hash: PaymentHash = Readable::read(reader)?; + let logger = WithContext::from(args.logger, None, None, Some(payment_hash)); + let inbound: PendingInboundPayment = Readable::read(reader)?; + log_warn!( + logger, + "Ignoring deprecated pending inbound payment with payment hash {}: {:?}", + payment_hash, + inbound + ); + } + + let pending_outbound_payments_count_compat: u64 = Readable::read(reader)?; + let mut pending_outbound_payments_compat: HashMap = + hash_map_with_capacity(cmp::min( + pending_outbound_payments_count_compat as usize, + MAX_ALLOC_SIZE / 32, + )); + for _ in 0..pending_outbound_payments_count_compat { + let session_priv = Readable::read(reader)?; + let payment = PendingOutboundPayment::Legacy { + session_privs: hash_set_from_iter([session_priv]), + }; + if pending_outbound_payments_compat.insert(PaymentId(session_priv), payment).is_some() { + return Err(DecodeError::InvalidValue); + }; + } + + let mut pending_intercepted_htlcs_legacy: Option> = + None; + let mut decode_update_add_htlcs_legacy: Option>> = + None; + // pending_outbound_payments_no_retry is for compatibility with 0.0.101 clients. + let mut pending_outbound_payments_no_retry: Option>> = + None; + let mut pending_outbound_payments = None; + let mut received_network_pubkey: Option = None; + let mut fake_scid_rand_bytes: Option<[u8; 32]> = None; + let mut probing_cookie_secret: Option<[u8; 32]> = None; + let mut claimable_htlc_purposes = None; + let mut claimable_htlc_onion_fields = None; + let mut pending_claiming_payments = Some(new_hash_map()); + let mut monitor_update_blocked_actions_per_peer: Option>)>> = + Some(Vec::new()); + let mut events_override = None; + let mut legacy_in_flight_monitor_updates: Option< + HashMap<(PublicKey, OutPoint), Vec>, + > = None; + // We use this one over the legacy since they represent the same data, just with a different + // key. We still need to read the legacy one as it's an even TLV. + let mut in_flight_monitor_updates: Option< + HashMap<(PublicKey, ChannelId), Vec>, + > = None; + let mut inbound_payment_id_secret = None; + let mut peer_storage_dir: Option)>> = None; + let mut async_receive_offer_cache: AsyncReceiveOfferCache = AsyncReceiveOfferCache::new(); + read_tlv_fields!(reader, { + (1, pending_outbound_payments_no_retry, option), + (2, pending_intercepted_htlcs_legacy, option), + (3, pending_outbound_payments, option), + (4, pending_claiming_payments, option), + (5, received_network_pubkey, option), + (6, monitor_update_blocked_actions_per_peer, option), + (7, fake_scid_rand_bytes, option), + (8, events_override, option), + (9, claimable_htlc_purposes, optional_vec), + (10, legacy_in_flight_monitor_updates, option), + (11, probing_cookie_secret, option), + (13, claimable_htlc_onion_fields, optional_vec), + (14, decode_update_add_htlcs_legacy, option), + (15, inbound_payment_id_secret, option), + (17, in_flight_monitor_updates, option), + (19, peer_storage_dir, optional_vec), + (21, async_receive_offer_cache, (default_value, async_receive_offer_cache)), + }); + + Ok(ChannelManagerData { + chain_hash, + best_block_height, + best_block_hash, + channels, + forward_htlcs_legacy, + claimable_htlcs_list, + peer_init_features, + pending_events_read, + highest_seen_timestamp, + pending_outbound_payments_compat, + pending_outbound_payments_no_retry, + pending_intercepted_htlcs_legacy, + pending_outbound_payments, + pending_claiming_payments, + received_network_pubkey, + monitor_update_blocked_actions_per_peer, + fake_scid_rand_bytes, + events_override, + claimable_htlc_purposes, + legacy_in_flight_monitor_updates, + probing_cookie_secret, + claimable_htlc_onion_fields, + decode_update_add_htlcs_legacy, + inbound_payment_id_secret, + in_flight_monitor_updates, + peer_storage_dir, + async_receive_offer_cache, + }) + } +} + /// Arguments for the creation of a ChannelManager that are not deserialized. /// /// At a high-level, the process for deserializing a ChannelManager and resuming normal operation @@ -17440,11 +17699,52 @@ impl< fn read( reader: &mut Reader, mut args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>, ) -> Result { - let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION); + // Stage 1: Pure deserialization into DTO + let data: ChannelManagerData = ChannelManagerData::read( + reader, + ChannelManagerDataReadArgs { + entropy_source: &args.entropy_source, + signer_provider: &args.signer_provider, + config: args.config.clone(), + logger: &args.logger, + }, + )?; - let chain_hash: ChainHash = Readable::read(reader)?; - let best_block_height: u32 = Readable::read(reader)?; - let best_block_hash: BlockHash = Readable::read(reader)?; + // Stage 2: Validation and reconstruction + let ChannelManagerData { + chain_hash, + best_block_height, + best_block_hash, + channels, + mut forward_htlcs_legacy, + mut claimable_htlcs_list, + peer_init_features, + mut pending_events_read, + highest_seen_timestamp, + pending_outbound_payments_compat, + pending_outbound_payments_no_retry, + pending_intercepted_htlcs_legacy, + mut pending_outbound_payments, + pending_claiming_payments, + received_network_pubkey, + monitor_update_blocked_actions_per_peer, + mut fake_scid_rand_bytes, + events_override, + claimable_htlc_purposes, + legacy_in_flight_monitor_updates, + mut probing_cookie_secret, + claimable_htlc_onion_fields, + decode_update_add_htlcs_legacy, + mut inbound_payment_id_secret, + mut in_flight_monitor_updates, + peer_storage_dir, + async_receive_offer_cache, + } = data; + + let mut pending_intercepted_htlcs_legacy = + pending_intercepted_htlcs_legacy.unwrap_or_else(new_hash_map); + let mut decode_update_add_htlcs_legacy = + decode_update_add_htlcs_legacy.unwrap_or_else(new_hash_map); let empty_peer_state = || PeerState { channel_by_id: new_hash_map(), @@ -17459,25 +17759,18 @@ impl< is_connected: false, }; + const MAX_ALLOC_SIZE: usize = 1024 * 64; let mut failed_htlcs = Vec::new(); - let channel_count: u64 = Readable::read(reader)?; - let mut channel_id_set = hash_set_with_capacity(cmp::min(channel_count as usize, 128)); + let channel_count = channels.len(); + let mut channel_id_set = hash_set_with_capacity(cmp::min(channel_count, 128)); let mut per_peer_state = hash_map_with_capacity(cmp::min( - channel_count as usize, + channel_count, MAX_ALLOC_SIZE / mem::size_of::<(PublicKey, Mutex>)>(), )); - let mut short_to_chan_info = hash_map_with_capacity(cmp::min(channel_count as usize, 128)); + let mut short_to_chan_info = hash_map_with_capacity(cmp::min(channel_count, 128)); let mut channel_closures = VecDeque::new(); let mut close_background_events = Vec::new(); - for _ in 0..channel_count { - let mut channel: FundedChannel = FundedChannel::read( - reader, - ( - &args.entropy_source, - &args.signer_provider, - &provided_channel_type_features(&args.config), - ), - )?; + for mut channel in channels { let logger = WithChannelContext::from(&args.logger, &channel.context, None); let channel_id = channel.context.channel_id(); channel_id_set.insert(channel_id); @@ -17726,168 +18019,15 @@ impl< } } - const MAX_ALLOC_SIZE: usize = 1024 * 64; - let forward_htlcs_count: u64 = Readable::read(reader)?; - // Marked `_legacy` because in versions > 0.2 we are taking steps to remove the requirement of - // regularly persisting the `ChannelManager` and instead rebuild the set of HTLC forwards from - // `Channel{Monitor}` data. See `reconstruct_manager_from_monitors` usage below. - let mut forward_htlcs_legacy: HashMap> = - hash_map_with_capacity(cmp::min(forward_htlcs_count as usize, 128)); - for _ in 0..forward_htlcs_count { - let short_channel_id = Readable::read(reader)?; - let pending_forwards_count: u64 = Readable::read(reader)?; - let mut pending_forwards = Vec::with_capacity(cmp::min( - pending_forwards_count as usize, - MAX_ALLOC_SIZE / mem::size_of::(), - )); - for _ in 0..pending_forwards_count { - pending_forwards.push(Readable::read(reader)?); - } - forward_htlcs_legacy.insert(short_channel_id, pending_forwards); - } - - let claimable_htlcs_count: u64 = Readable::read(reader)?; - let mut claimable_htlcs_list = - Vec::with_capacity(cmp::min(claimable_htlcs_count as usize, 128)); - for _ in 0..claimable_htlcs_count { - let payment_hash = Readable::read(reader)?; - let previous_hops_len: u64 = Readable::read(reader)?; - let mut previous_hops = Vec::with_capacity(cmp::min( - previous_hops_len as usize, - MAX_ALLOC_SIZE / mem::size_of::(), - )); - for _ in 0..previous_hops_len { - previous_hops.push(::read(reader)?); - } - claimable_htlcs_list.push((payment_hash, previous_hops)); - } - - let peer_count: u64 = Readable::read(reader)?; - for _ in 0..peer_count { - let peer_pubkey: PublicKey = Readable::read(reader)?; - let latest_features = Readable::read(reader)?; + // Apply peer features from deserialized data + for (peer_pubkey, latest_features) in peer_init_features { if let Some(peer_state) = per_peer_state.get_mut(&peer_pubkey) { peer_state.get_mut().unwrap().latest_features = latest_features; } } - let event_count: u64 = Readable::read(reader)?; - let mut pending_events_read: VecDeque<(events::Event, Option)> = - VecDeque::with_capacity(cmp::min( - event_count as usize, - MAX_ALLOC_SIZE / mem::size_of::<(events::Event, Option)>(), - )); - for _ in 0..event_count { - match MaybeReadable::read(reader)? { - Some(event) => pending_events_read.push_back((event, None)), - None => continue, - } - } - - let background_event_count: u64 = Readable::read(reader)?; - for _ in 0..background_event_count { - match ::read(reader)? { - 0 => { - // LDK versions prior to 0.0.116 wrote pending `MonitorUpdateRegeneratedOnStartup`s here, - // however we really don't (and never did) need them - we regenerate all - // on-startup monitor updates. - let _: OutPoint = Readable::read(reader)?; - let _: ChannelMonitorUpdate = Readable::read(reader)?; - }, - _ => return Err(DecodeError::InvalidValue), - } - } - - let _last_node_announcement_serial: u32 = Readable::read(reader)?; // Only used < 0.0.111 - let highest_seen_timestamp: u32 = Readable::read(reader)?; - - // The last version where a pending inbound payment may have been added was 0.0.116. - let pending_inbound_payment_count: u64 = Readable::read(reader)?; - for _ in 0..pending_inbound_payment_count { - let payment_hash: PaymentHash = Readable::read(reader)?; - let logger = WithContext::from(&args.logger, None, None, Some(payment_hash)); - let inbound: PendingInboundPayment = Readable::read(reader)?; - log_warn!( - logger, - "Ignoring deprecated pending inbound payment with payment hash {}: {:?}", - payment_hash, - inbound - ); - } - - let pending_outbound_payments_count_compat: u64 = Readable::read(reader)?; - let mut pending_outbound_payments_compat: HashMap = - hash_map_with_capacity(cmp::min( - pending_outbound_payments_count_compat as usize, - MAX_ALLOC_SIZE / 32, - )); - for _ in 0..pending_outbound_payments_count_compat { - let session_priv = Readable::read(reader)?; - let payment = PendingOutboundPayment::Legacy { - session_privs: hash_set_from_iter([session_priv]), - }; - if pending_outbound_payments_compat.insert(PaymentId(session_priv), payment).is_some() { - return Err(DecodeError::InvalidValue); - }; - } - - // Marked `_legacy` because in versions > 0.2 we are taking steps to remove the requirement of - // regularly persisting the `ChannelManager` and instead rebuild the set of HTLC forwards from - // `Channel{Monitor}` data. See `reconstruct_manager_from_monitors` below. - let mut pending_intercepted_htlcs_legacy: Option> = - None; - let mut decode_update_add_htlcs_legacy: Option>> = - None; - - // pending_outbound_payments_no_retry is for compatibility with 0.0.101 clients. - let mut pending_outbound_payments_no_retry: Option>> = - None; - let mut pending_outbound_payments = None; - let mut received_network_pubkey: Option = None; - let mut fake_scid_rand_bytes: Option<[u8; 32]> = None; - let mut probing_cookie_secret: Option<[u8; 32]> = None; - let mut claimable_htlc_purposes = None; - let mut claimable_htlc_onion_fields = None; - let mut pending_claiming_payments = Some(new_hash_map()); - let mut monitor_update_blocked_actions_per_peer: Option>)>> = - Some(Vec::new()); - let mut events_override = None; - let mut legacy_in_flight_monitor_updates: Option< - HashMap<(PublicKey, OutPoint), Vec>, - > = None; - // We use this one over the legacy since they represent the same data, just with a different - // key. We still need to read the legacy one as it's an even TLV. - let mut in_flight_monitor_updates: Option< - HashMap<(PublicKey, ChannelId), Vec>, - > = None; - let mut inbound_payment_id_secret = None; - let mut peer_storage_dir: Option)>> = None; - let mut async_receive_offer_cache: AsyncReceiveOfferCache = AsyncReceiveOfferCache::new(); - read_tlv_fields!(reader, { - (1, pending_outbound_payments_no_retry, option), - (2, pending_intercepted_htlcs_legacy, option), - (3, pending_outbound_payments, option), - (4, pending_claiming_payments, option), - (5, received_network_pubkey, option), - (6, monitor_update_blocked_actions_per_peer, option), - (7, fake_scid_rand_bytes, option), - (8, events_override, option), - (9, claimable_htlc_purposes, optional_vec), - (10, legacy_in_flight_monitor_updates, option), - (11, probing_cookie_secret, option), - (13, claimable_htlc_onion_fields, optional_vec), - (14, decode_update_add_htlcs_legacy, option), - (15, inbound_payment_id_secret, option), - (17, in_flight_monitor_updates, option), - (19, peer_storage_dir, optional_vec), - (21, async_receive_offer_cache, (default_value, async_receive_offer_cache)), - }); - let mut decode_update_add_htlcs_legacy = - decode_update_add_htlcs_legacy.unwrap_or_else(|| new_hash_map()); - let mut pending_intercepted_htlcs_legacy = - pending_intercepted_htlcs_legacy.unwrap_or_else(|| new_hash_map()); + // Post-deserialization processing let mut decode_update_add_htlcs = new_hash_map(); - let peer_storage_dir: Vec<(PublicKey, Vec)> = peer_storage_dir.unwrap_or_else(Vec::new); if fake_scid_rand_bytes.is_none() { fake_scid_rand_bytes = Some(args.entropy_source.get_secure_random_bytes()); } @@ -17919,9 +18059,11 @@ impl< } let pending_outbounds = OutboundPayments::new(pending_outbound_payments.unwrap()); - for (peer_pubkey, peer_storage) in peer_storage_dir { - if let Some(peer_state) = per_peer_state.get_mut(&peer_pubkey) { - peer_state.get_mut().unwrap().peer_storage = peer_storage; + if let Some(peer_storage_dir) = peer_storage_dir { + for (peer_pubkey, peer_storage) in peer_storage_dir { + if let Some(peer_state) = per_peer_state.get_mut(&peer_pubkey) { + peer_state.get_mut().unwrap().peer_storage = peer_storage; + } } } @@ -19260,7 +19402,7 @@ impl< //TODO: Broadcast channel update for closed channels, but only after we've made a //connection or two. - Ok((best_block_hash.clone(), channel_manager)) + Ok((best_block_hash, channel_manager)) } } From fe6fd6489c1c3048d8a135846da92c875d618270 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Wed, 28 Jan 2026 10:42:35 +0100 Subject: [PATCH 161/242] Unwrap TLV fields with initialized defaults in ChannelManagerData For TLV fields that are initialized with Some(...) before reading and thus always have a value after deserialization, remove the Option wrapper from ChannelManagerData and unwrap when constructing it. This applies to pending_claiming_payments and monitor_update_blocked_actions_per_peer. Co-Authored-By: Claude Opus 4.5 --- lightning/src/ln/channelmanager.rs | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 24bb16186d3..be3c8f54826 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -17238,10 +17238,10 @@ pub(super) struct ChannelManagerData { // `Channel{Monitor}` data. See [`ChannelManager::read`]. pending_intercepted_htlcs_legacy: Option>, pending_outbound_payments: Option>, - pending_claiming_payments: Option>, + pending_claiming_payments: HashMap, received_network_pubkey: Option, monitor_update_blocked_actions_per_peer: - Option>)>>, + Vec<(PublicKey, BTreeMap>)>, fake_scid_rand_bytes: Option<[u8; 32]>, events_override: Option)>>, claimable_htlc_purposes: Option>, @@ -17457,9 +17457,12 @@ impl<'a, ES: EntropySource, SP: SignerProvider, L: Logger> pending_outbound_payments_no_retry, pending_intercepted_htlcs_legacy, pending_outbound_payments, - pending_claiming_payments, + // unwrap safety: pending_claiming_payments is guaranteed to be `Some` after read_tlv_fields + pending_claiming_payments: pending_claiming_payments.unwrap(), received_network_pubkey, - monitor_update_blocked_actions_per_peer, + // unwrap safety: monitor_update_blocked_actions_per_peer is guaranteed to be `Some` after read_tlv_fields + monitor_update_blocked_actions_per_peer: monitor_update_blocked_actions_per_peer + .unwrap(), fake_scid_rand_bytes, events_override, claimable_htlc_purposes, @@ -18890,9 +18893,7 @@ impl< let bounded_fee_estimator = LowerBoundedFeeEstimator::new(args.fee_estimator); - for (node_id, monitor_update_blocked_actions) in - monitor_update_blocked_actions_per_peer.unwrap() - { + for (node_id, monitor_update_blocked_actions) in monitor_update_blocked_actions_per_peer { if let Some(peer_state) = per_peer_state.get(&node_id) { for (channel_id, actions) in monitor_update_blocked_actions.iter() { let logger = @@ -19078,7 +19079,7 @@ impl< decode_update_add_htlcs: Mutex::new(decode_update_add_htlcs), claimable_payments: Mutex::new(ClaimablePayments { claimable_payments, - pending_claiming_payments: pending_claiming_payments.unwrap(), + pending_claiming_payments, }), outbound_scid_aliases: Mutex::new(outbound_scid_aliases), short_to_chan_info: FairRwLock::new(short_to_chan_info), From f08f4b4fdd1638bebb6d7ff36ed35c8c6d17e9dc Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Wed, 28 Jan 2026 10:50:15 +0100 Subject: [PATCH 162/242] Extract second stage of ChannelManager::read into from_channel_manager_data Move the validation and reconstruction logic (stage 2) from the ReadableArgs::read implementation into a new pub(super) constructor `from_channel_manager_data`. This separates the pure deserialization from the complex reconstruction logic, making the code more modular and easier to test. The read function now: 1. Deserializes into ChannelManagerData (stage 1) 2. Calls from_channel_manager_data for validation/reconstruction (stage 2) Co-Authored-By: Claude Opus 4.5 --- lightning/src/ln/channelmanager.rs | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index be3c8f54826..43a8e1d1735 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -17700,7 +17700,7 @@ impl< for (BlockHash, ChannelManager) { fn read( - reader: &mut Reader, mut args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>, + reader: &mut Reader, args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>, ) -> Result { // Stage 1: Pure deserialization into DTO let data: ChannelManagerData = ChannelManagerData::read( @@ -17714,6 +17714,34 @@ impl< )?; // Stage 2: Validation and reconstruction + ChannelManager::from_channel_manager_data(data, args) + } +} + +impl< + M: chain::Watch, + T: BroadcasterInterface, + ES: EntropySource, + NS: NodeSigner, + SP: SignerProvider, + F: FeeEstimator, + R: Router, + MR: MessageRouter, + L: Logger + Clone, + > ChannelManager +{ + /// Constructs a `ChannelManager` from deserialized data and runtime dependencies. + /// + /// This is the second stage of deserialization, taking the raw [`ChannelManagerData`] and combining it with the + /// provided [`ChannelManagerReadArgs`] to produce a fully functional `ChannelManager`. + /// + /// This method performs validation, reconciliation with [`ChannelMonitor`]s, and reconstruction of internal state. + /// It may close channels if monitors are ahead of the serialized state, and will replay any pending + /// [`ChannelMonitorUpdate`]s. + pub(super) fn from_channel_manager_data( + data: ChannelManagerData, + mut args: ChannelManagerReadArgs<'_, M, T, ES, NS, SP, F, R, MR, L>, + ) -> Result<(BlockHash, Self), DecodeError> { let ChannelManagerData { chain_hash, best_block_height, From d2c55dd2d8906d9398bf23a78cf427b3197f06b3 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Wed, 28 Jan 2026 15:35:08 +0100 Subject: [PATCH 163/242] Resolve legacy TLV fields during ChannelManagerData deserialization Move the resolution of legacy/compatibility TLV fields from from_channel_manager_data (stage 2) into ChannelManagerData::read (stage 1). This keeps ChannelManagerData minimal by consolidating mutually exclusive fields into their final form during deserialization: - pending_outbound_payments: Merge TLV 3, TLV 1 (no_retry), and non-TLV compat fields into a single HashMap - in_flight_monitor_updates: Convert legacy TLV 10 (keyed by OutPoint) to TLV 17 format (keyed by ChannelId) - pending_events: Apply events_override (TLV 8) if present Co-Authored-By: Claude Opus 4.5 --- lightning/src/ln/channelmanager.rs | 97 ++++++++++++++---------------- 1 file changed, 46 insertions(+), 51 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 43a8e1d1735..516b902c5f4 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -17231,22 +17231,17 @@ pub(super) struct ChannelManagerData { peer_init_features: Vec<(PublicKey, InitFeatures)>, pending_events_read: VecDeque<(events::Event, Option)>, highest_seen_timestamp: u32, - pending_outbound_payments_compat: HashMap, - pending_outbound_payments_no_retry: Option>>, // Marked `_legacy` because in versions > 0.2 we are taking steps to remove the requirement of // regularly persisting the `ChannelManager` and instead rebuild the set of HTLC forwards from // `Channel{Monitor}` data. See [`ChannelManager::read`]. pending_intercepted_htlcs_legacy: Option>, - pending_outbound_payments: Option>, + pending_outbound_payments: HashMap, pending_claiming_payments: HashMap, received_network_pubkey: Option, monitor_update_blocked_actions_per_peer: Vec<(PublicKey, BTreeMap>)>, fake_scid_rand_bytes: Option<[u8; 32]>, - events_override: Option)>>, claimable_htlc_purposes: Option>, - legacy_in_flight_monitor_updates: - Option>>, probing_cookie_secret: Option<[u8; 32]>, claimable_htlc_onion_fields: Option>>, // Marked `_legacy` because in versions > 0.2 we are taking steps to remove the requirement of @@ -17443,6 +17438,49 @@ impl<'a, ES: EntropySource, SP: SignerProvider, L: Logger> (21, async_receive_offer_cache, (default_value, async_receive_offer_cache)), }); + // Merge legacy pending_outbound_payments fields into a single HashMap. + // Priority: pending_outbound_payments (TLV 3) > pending_outbound_payments_no_retry (TLV 1) + // > pending_outbound_payments_compat (non-TLV legacy) + let pending_outbound_payments = if let Some(payments) = pending_outbound_payments { + payments + } else if let Some(mut pending_outbound_payments_no_retry) = + pending_outbound_payments_no_retry + { + let mut outbounds = new_hash_map(); + for (id, session_privs) in pending_outbound_payments_no_retry.drain() { + outbounds.insert(id, PendingOutboundPayment::Legacy { session_privs }); + } + outbounds + } else { + pending_outbound_payments_compat + }; + + // Merge legacy in-flight monitor updates (keyed by OutPoint) into the new format (keyed by + // ChannelId). + if let Some(legacy_in_flight_upds) = legacy_in_flight_monitor_updates { + // We should never serialize an empty map. + if legacy_in_flight_upds.is_empty() { + return Err(DecodeError::InvalidValue); + } + if in_flight_monitor_updates.is_none() { + let in_flight_upds = in_flight_monitor_updates.get_or_insert_with(new_hash_map); + for ((counterparty_node_id, funding_txo), updates) in legacy_in_flight_upds { + // All channels with legacy in flight monitor updates are v1 channels. + let channel_id = ChannelId::v1_from_funding_outpoint(funding_txo); + in_flight_upds.insert((counterparty_node_id, channel_id), updates); + } + } else if in_flight_monitor_updates.as_ref().unwrap().is_empty() { + // Both TLVs present - the new one takes precedence but must not be empty. + return Err(DecodeError::InvalidValue); + } + } + + // Resolve events_override: if present, it replaces pending_events. + let mut pending_events_read = pending_events_read; + if let Some(events) = events_override { + pending_events_read = events; + } + Ok(ChannelManagerData { chain_hash, best_block_height, @@ -17453,8 +17491,6 @@ impl<'a, ES: EntropySource, SP: SignerProvider, L: Logger> peer_init_features, pending_events_read, highest_seen_timestamp, - pending_outbound_payments_compat, - pending_outbound_payments_no_retry, pending_intercepted_htlcs_legacy, pending_outbound_payments, // unwrap safety: pending_claiming_payments is guaranteed to be `Some` after read_tlv_fields @@ -17464,9 +17500,7 @@ impl<'a, ES: EntropySource, SP: SignerProvider, L: Logger> monitor_update_blocked_actions_per_peer: monitor_update_blocked_actions_per_peer .unwrap(), fake_scid_rand_bytes, - events_override, claimable_htlc_purposes, - legacy_in_flight_monitor_updates, probing_cookie_secret, claimable_htlc_onion_fields, decode_update_add_htlcs_legacy, @@ -17752,17 +17786,13 @@ impl< peer_init_features, mut pending_events_read, highest_seen_timestamp, - pending_outbound_payments_compat, - pending_outbound_payments_no_retry, pending_intercepted_htlcs_legacy, - mut pending_outbound_payments, + pending_outbound_payments, pending_claiming_payments, received_network_pubkey, monitor_update_blocked_actions_per_peer, mut fake_scid_rand_bytes, - events_override, claimable_htlc_purposes, - legacy_in_flight_monitor_updates, mut probing_cookie_secret, claimable_htlc_onion_fields, decode_update_add_htlcs_legacy, @@ -18071,24 +18101,11 @@ impl< inbound_payment_id_secret = Some(args.entropy_source.get_secure_random_bytes()); } - if let Some(events) = events_override { - pending_events_read = events; - } - if !channel_closures.is_empty() { pending_events_read.append(&mut channel_closures); } - if pending_outbound_payments.is_none() && pending_outbound_payments_no_retry.is_none() { - pending_outbound_payments = Some(pending_outbound_payments_compat); - } else if pending_outbound_payments.is_none() { - let mut outbounds = new_hash_map(); - for (id, session_privs) in pending_outbound_payments_no_retry.unwrap().drain() { - outbounds.insert(id, PendingOutboundPayment::Legacy { session_privs }); - } - pending_outbound_payments = Some(outbounds); - } - let pending_outbounds = OutboundPayments::new(pending_outbound_payments.unwrap()); + let pending_outbounds = OutboundPayments::new(pending_outbound_payments); if let Some(peer_storage_dir) = peer_storage_dir { for (peer_pubkey, peer_storage) in peer_storage_dir { @@ -18098,28 +18115,6 @@ impl< } } - // Handle transitioning from the legacy TLV to the new one on upgrades. - if let Some(legacy_in_flight_upds) = legacy_in_flight_monitor_updates { - // We should never serialize an empty map. - if legacy_in_flight_upds.is_empty() { - return Err(DecodeError::InvalidValue); - } - if in_flight_monitor_updates.is_none() { - let in_flight_upds = - in_flight_monitor_updates.get_or_insert_with(|| new_hash_map()); - for ((counterparty_node_id, funding_txo), updates) in legacy_in_flight_upds { - // All channels with legacy in flight monitor updates are v1 channels. - let channel_id = ChannelId::v1_from_funding_outpoint(funding_txo); - in_flight_upds.insert((counterparty_node_id, channel_id), updates); - } - } else { - // We should never serialize an empty map. - if in_flight_monitor_updates.as_ref().unwrap().is_empty() { - return Err(DecodeError::InvalidValue); - } - } - } - // We have to replay (or skip, if they were completed after we wrote the `ChannelManager`) // each `ChannelMonitorUpdate` in `in_flight_monitor_updates`. After doing so, we have to // check that each channel we have isn't newer than the latest `ChannelMonitorUpdate`(s) we From 397cc1dcbea7dbdccab3717e419587936cf3f382 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 29 Jan 2026 11:18:27 +0100 Subject: [PATCH 164/242] Simplify legacy TLV resolution in ChannelManagerData::read The previous commit intentionally kept the code as close to a move as possible to ease review. This follow-up applies idiomatic simplifications: - Use unwrap_or for events_override resolution - Use unwrap_or_else with iterator chains for pending_outbound_payments - Use match with into_iter().collect() for in_flight_monitor_updates Co-Authored-By: Claude Opus 4.5 --- lightning/src/ln/channelmanager.rs | 57 ++++++++++++++++-------------- 1 file changed, 31 insertions(+), 26 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 516b902c5f4..52fcf69cee3 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -17441,19 +17441,18 @@ impl<'a, ES: EntropySource, SP: SignerProvider, L: Logger> // Merge legacy pending_outbound_payments fields into a single HashMap. // Priority: pending_outbound_payments (TLV 3) > pending_outbound_payments_no_retry (TLV 1) // > pending_outbound_payments_compat (non-TLV legacy) - let pending_outbound_payments = if let Some(payments) = pending_outbound_payments { - payments - } else if let Some(mut pending_outbound_payments_no_retry) = - pending_outbound_payments_no_retry - { - let mut outbounds = new_hash_map(); - for (id, session_privs) in pending_outbound_payments_no_retry.drain() { - outbounds.insert(id, PendingOutboundPayment::Legacy { session_privs }); - } - outbounds - } else { - pending_outbound_payments_compat - }; + let pending_outbound_payments = pending_outbound_payments + .or_else(|| { + pending_outbound_payments_no_retry.map(|no_retry| { + no_retry + .into_iter() + .map(|(id, session_privs)| { + (id, PendingOutboundPayment::Legacy { session_privs }) + }) + .collect() + }) + }) + .unwrap_or(pending_outbound_payments_compat); // Merge legacy in-flight monitor updates (keyed by OutPoint) into the new format (keyed by // ChannelId). @@ -17462,24 +17461,30 @@ impl<'a, ES: EntropySource, SP: SignerProvider, L: Logger> if legacy_in_flight_upds.is_empty() { return Err(DecodeError::InvalidValue); } - if in_flight_monitor_updates.is_none() { - let in_flight_upds = in_flight_monitor_updates.get_or_insert_with(new_hash_map); - for ((counterparty_node_id, funding_txo), updates) in legacy_in_flight_upds { + match &in_flight_monitor_updates { + None => { + // Convert legacy format (OutPoint) to new format (ChannelId). // All channels with legacy in flight monitor updates are v1 channels. - let channel_id = ChannelId::v1_from_funding_outpoint(funding_txo); - in_flight_upds.insert((counterparty_node_id, channel_id), updates); - } - } else if in_flight_monitor_updates.as_ref().unwrap().is_empty() { - // Both TLVs present - the new one takes precedence but must not be empty. - return Err(DecodeError::InvalidValue); + in_flight_monitor_updates = Some( + legacy_in_flight_upds + .into_iter() + .map(|((counterparty_node_id, funding_txo), updates)| { + let channel_id = ChannelId::v1_from_funding_outpoint(funding_txo); + ((counterparty_node_id, channel_id), updates) + }) + .collect(), + ); + }, + Some(upds) if upds.is_empty() => { + // Both TLVs present but new one is empty - invalid. + return Err(DecodeError::InvalidValue); + }, + Some(_) => {}, // New format takes precedence, nothing to do. } } // Resolve events_override: if present, it replaces pending_events. - let mut pending_events_read = pending_events_read; - if let Some(events) = events_override { - pending_events_read = events; - } + let pending_events_read = events_override.unwrap_or(pending_events_read); Ok(ChannelManagerData { chain_hash, From 9a05daf3f9089d4d4b4c1a756cc9e7199e250bad Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Fri, 30 Jan 2026 10:08:28 +0100 Subject: [PATCH 165/242] Resolve optional hash map TLV fields during ChannelManagerData deserialization Move the unwrap_or_else(new_hash_map) resolution for pending_intercepted_htlcs and decode_update_add_htlcs from stage 2 (from_channel_manager_data) to stage 1 (ChannelManagerData::read). This changes the struct fields from Option to HashMap, making it explicit that these are always present after deserialization. Co-Authored-By: Claude Opus 4.5 --- lightning/src/ln/channelmanager.rs | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 52fcf69cee3..21cc7d2ca1b 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -17234,7 +17234,7 @@ pub(super) struct ChannelManagerData { // Marked `_legacy` because in versions > 0.2 we are taking steps to remove the requirement of // regularly persisting the `ChannelManager` and instead rebuild the set of HTLC forwards from // `Channel{Monitor}` data. See [`ChannelManager::read`]. - pending_intercepted_htlcs_legacy: Option>, + pending_intercepted_htlcs_legacy: HashMap, pending_outbound_payments: HashMap, pending_claiming_payments: HashMap, received_network_pubkey: Option, @@ -17247,7 +17247,7 @@ pub(super) struct ChannelManagerData { // Marked `_legacy` because in versions > 0.2 we are taking steps to remove the requirement of // regularly persisting the `ChannelManager` and instead rebuild the set of HTLC forwards from // `Channel{Monitor}` data. See [`ChannelManager::read`]. - decode_update_add_htlcs_legacy: Option>>, + decode_update_add_htlcs_legacy: HashMap>, inbound_payment_id_secret: Option<[u8; 32]>, in_flight_monitor_updates: Option>>, peer_storage_dir: Option)>>, @@ -17496,7 +17496,8 @@ impl<'a, ES: EntropySource, SP: SignerProvider, L: Logger> peer_init_features, pending_events_read, highest_seen_timestamp, - pending_intercepted_htlcs_legacy, + pending_intercepted_htlcs_legacy: pending_intercepted_htlcs_legacy + .unwrap_or_else(new_hash_map), pending_outbound_payments, // unwrap safety: pending_claiming_payments is guaranteed to be `Some` after read_tlv_fields pending_claiming_payments: pending_claiming_payments.unwrap(), @@ -17508,7 +17509,8 @@ impl<'a, ES: EntropySource, SP: SignerProvider, L: Logger> claimable_htlc_purposes, probing_cookie_secret, claimable_htlc_onion_fields, - decode_update_add_htlcs_legacy, + decode_update_add_htlcs_legacy: decode_update_add_htlcs_legacy + .unwrap_or_else(new_hash_map), inbound_payment_id_secret, in_flight_monitor_updates, peer_storage_dir, @@ -17791,7 +17793,7 @@ impl< peer_init_features, mut pending_events_read, highest_seen_timestamp, - pending_intercepted_htlcs_legacy, + mut pending_intercepted_htlcs_legacy, pending_outbound_payments, pending_claiming_payments, received_network_pubkey, @@ -17800,18 +17802,13 @@ impl< claimable_htlc_purposes, mut probing_cookie_secret, claimable_htlc_onion_fields, - decode_update_add_htlcs_legacy, + mut decode_update_add_htlcs_legacy, mut inbound_payment_id_secret, mut in_flight_monitor_updates, peer_storage_dir, async_receive_offer_cache, } = data; - let mut pending_intercepted_htlcs_legacy = - pending_intercepted_htlcs_legacy.unwrap_or_else(new_hash_map); - let mut decode_update_add_htlcs_legacy = - decode_update_add_htlcs_legacy.unwrap_or_else(new_hash_map); - let empty_peer_state = || PeerState { channel_by_id: new_hash_map(), inbound_channel_request_by_id: new_hash_map(), From 0bc5c95484345c6289dffd9a379982a9b66d688f Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 2 Feb 2026 13:55:27 +0000 Subject: [PATCH 166/242] Export `outbound_payments` directly rather than via re-exports Every time we use re-exports to hide a module in the public API we end up accidentally breaking the public API due to accidental seals. We did this yet again in e9c6bbccc3ccd4cb121a092229f50e29b3345552 where we moved to using a `CustomTlvs` field in the public API for `RecipientOnionFields` but forgot to re-export it, making it impossible to use downstream. Instead, here, we just actually export `outbound_payments`. Compilation fixes by Claude. --- fuzz/src/chanmon_consistency.rs | 2 +- fuzz/src/full_stack.rs | 3 ++- lightning/src/chain/channelmonitor.rs | 3 ++- lightning/src/events/mod.rs | 7 +++--- lightning/src/ln/accountable_tests.rs | 5 ++--- lightning/src/ln/async_payments_tests.rs | 6 ++--- lightning/src/ln/async_signer_tests.rs | 3 ++- lightning/src/ln/blinded_payment_tests.rs | 6 +++-- lightning/src/ln/chanmon_update_fail_tests.rs | 3 ++- lightning/src/ln/channelmanager.rs | 22 +++++++++---------- lightning/src/ln/functional_test_utils.rs | 3 ++- lightning/src/ln/functional_tests.rs | 5 +++-- lightning/src/ln/interception_tests.rs | 3 ++- lightning/src/ln/invoice_utils.rs | 3 ++- lightning/src/ln/mod.rs | 2 +- lightning/src/ln/monitor_tests.rs | 3 ++- lightning/src/ln/offers_tests.rs | 3 ++- lightning/src/ln/onion_payment.rs | 3 ++- lightning/src/ln/onion_route_tests.rs | 4 ++-- lightning/src/ln/onion_utils.rs | 3 ++- lightning/src/ln/outbound_payment.rs | 5 +++-- lightning/src/ln/payment_tests.rs | 5 +++-- lightning/src/ln/priv_short_conf_tests.rs | 3 ++- lightning/src/ln/quiescence_tests.rs | 2 +- lightning/src/ln/reload_tests.rs | 3 ++- lightning/src/ln/shutdown_tests.rs | 3 ++- lightning/src/ln/splicing_tests.rs | 5 ++--- lightning/src/routing/router.rs | 5 ++--- 28 files changed, 68 insertions(+), 55 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index f87af5c6ff5..5fb07431b17 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -49,7 +49,6 @@ use lightning::ln::channel::{ use lightning::ln::channel_state::ChannelDetails; use lightning::ln::channelmanager::{ ChainParameters, ChannelManager, ChannelManagerReadArgs, PaymentId, RecentPaymentDetails, - RecipientOnionFields, }; use lightning::ln::functional_test_utils::*; use lightning::ln::funding::{FundingTxInput, SpliceContribution}; @@ -58,6 +57,7 @@ use lightning::ln::msgs::{ BaseMessageHandler, ChannelMessageHandler, CommitmentUpdate, Init, MessageSendEvent, UpdateAddHTLC, }; +use lightning::ln::outbound_payment::RecipientOnionFields; use lightning::ln::script::ShutdownScript; use lightning::ln::types::ChannelId; use lightning::offers::invoice::UnsignedBolt12Invoice; diff --git a/fuzz/src/full_stack.rs b/fuzz/src/full_stack.rs index e73db74fa5d..600335b5083 100644 --- a/fuzz/src/full_stack.rs +++ b/fuzz/src/full_stack.rs @@ -43,13 +43,14 @@ use lightning::events::bump_transaction::sync::WalletSourceSync; use lightning::events::Event; use lightning::ln::channel_state::ChannelDetails; use lightning::ln::channelmanager::{ - ChainParameters, ChannelManager, InterceptId, PaymentId, RecipientOnionFields, Retry, + ChainParameters, ChannelManager, InterceptId, PaymentId, }; use lightning::ln::functional_test_utils::*; use lightning::ln::inbound_payment::ExpandedKey; use lightning::ln::peer_handler::{ IgnoringMessageHandler, MessageHandler, PeerManager, SocketDescriptor, }; +use lightning::ln::outbound_payment::{RecipientOnionFields, Retry}; use lightning::ln::script::ShutdownScript; use lightning::ln::types::ChannelId; use lightning::offers::invoice::UnsignedBolt12Invoice; diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 80d0ef125fc..c7dd579967a 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -6774,8 +6774,9 @@ mod tests { DelayedPaymentBasepoint, DelayedPaymentKey, HtlcBasepoint, RevocationBasepoint, RevocationKey, }; - use crate::ln::channelmanager::{HTLCSource, PaymentId, RecipientOnionFields}; + use crate::ln::channelmanager::{HTLCSource, PaymentId}; use crate::ln::functional_test_utils::*; + use crate::ln::outbound_payment::RecipientOnionFields; use crate::ln::script::ShutdownScript; use crate::ln::types::ChannelId; use crate::sign::{ChannelSigner, InMemorySigner}; diff --git a/lightning/src/events/mod.rs b/lightning/src/events/mod.rs index b029caa30d7..3d860e9f363 100644 --- a/lightning/src/events/mod.rs +++ b/lightning/src/events/mod.rs @@ -24,9 +24,10 @@ use crate::blinded_path::payment::{ }; use crate::chain::transaction; use crate::ln::channel::FUNDING_CONF_DEADLINE_BLOCKS; -use crate::ln::channelmanager::{InterceptId, PaymentId, RecipientOnionFields}; +use crate::ln::channelmanager::{InterceptId, PaymentId}; use crate::ln::msgs; use crate::ln::onion_utils::LocalHTLCFailureReason; +use crate::ln::outbound_payment::RecipientOnionFields; use crate::ln::types::ChannelId; use crate::offers::invoice::Bolt12Invoice; use crate::offers::invoice_request::InvoiceRequest; @@ -662,7 +663,7 @@ pub enum PaymentFailureReason { #[cfg_attr(feature = "std", doc = "")] #[cfg_attr( feature = "std", - doc = "[`Retry::Timeout`]: crate::ln::channelmanager::Retry::Timeout" + doc = "[`Retry::Timeout`]: crate::ln::outbound_payment::Retry::Timeout" )] RetriesExhausted, /// Either the BOLT 12 invoice was expired by the time we received it or the payment expired while @@ -1082,7 +1083,7 @@ pub enum Event { /// This event will eventually be replayed after failures-to-handle (i.e., the event handler /// returning `Err(ReplayEvent ())`) and will be persisted across restarts. /// - /// [`Retry`]: crate::ln::channelmanager::Retry + /// [`Retry`]: crate::ln::outbound_payment::Retry /// [`ChannelManager::abandon_payment`]: crate::ln::channelmanager::ChannelManager::abandon_payment PaymentFailed { /// The `payment_id` passed to [`ChannelManager::send_payment`]. diff --git a/lightning/src/ln/accountable_tests.rs b/lightning/src/ln/accountable_tests.rs index 442186b376a..16ca1425817 100644 --- a/lightning/src/ln/accountable_tests.rs +++ b/lightning/src/ln/accountable_tests.rs @@ -9,11 +9,10 @@ //! Tests for verifying the correct relay of accountable signals between nodes. -use crate::ln::channelmanager::{ - HTLCForwardInfo, PaymentId, PendingAddHTLCInfo, PendingHTLCInfo, RecipientOnionFields, Retry, -}; +use crate::ln::channelmanager::{HTLCForwardInfo, PaymentId, PendingAddHTLCInfo, PendingHTLCInfo}; use crate::ln::functional_test_utils::*; use crate::ln::msgs::ChannelMessageHandler; +use crate::ln::outbound_payment::{RecipientOnionFields, Retry}; use crate::routing::router::{PaymentParameters, RouteParameters}; fn test_accountable_forwarding_with_override( diff --git a/lightning/src/ln/async_payments_tests.rs b/lightning/src/ln/async_payments_tests.rs index b8d23217cef..528cec44c00 100644 --- a/lightning/src/ln/async_payments_tests.rs +++ b/lightning/src/ln/async_payments_tests.rs @@ -18,10 +18,7 @@ use crate::events::{ PaymentFailureReason, PaymentPurpose, }; use crate::ln::blinded_payment_tests::{fail_blinded_htlc_backwards, get_blinded_route_parameters}; -use crate::ln::channelmanager::{ - Bolt12PaymentError, OptionalOfferPaymentParams, PaymentId, RecipientOnionFields, - MIN_CLTV_EXPIRY_DELTA, -}; +use crate::ln::channelmanager::{OptionalOfferPaymentParams, PaymentId, MIN_CLTV_EXPIRY_DELTA}; use crate::ln::functional_test_utils::*; use crate::ln::inbound_payment; use crate::ln::msgs; @@ -30,6 +27,7 @@ use crate::ln::msgs::{ }; use crate::ln::offers_tests; use crate::ln::onion_utils::LocalHTLCFailureReason; +use crate::ln::outbound_payment::{Bolt12PaymentError, RecipientOnionFields}; use crate::ln::outbound_payment::{ PendingOutboundPayment, Retry, TEST_ASYNC_PAYMENT_TIMEOUT_RELATIVE_EXPIRY, }; diff --git a/lightning/src/ln/async_signer_tests.rs b/lightning/src/ln/async_signer_tests.rs index f38afc41fcc..53187c14168 100644 --- a/lightning/src/ln/async_signer_tests.rs +++ b/lightning/src/ln/async_signer_tests.rs @@ -20,8 +20,9 @@ use crate::events::{ClosureReason, Event}; use crate::ln::chan_utils::ClosingTransaction; use crate::ln::channel::DISCONNECT_PEER_AWAITING_RESPONSE_TICKS; use crate::ln::channel_state::{ChannelDetails, ChannelShutdownState}; -use crate::ln::channelmanager::{PaymentId, RAACommitmentOrder, RecipientOnionFields}; +use crate::ln::channelmanager::{PaymentId, RAACommitmentOrder}; use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, ErrorAction, MessageSendEvent}; +use crate::ln::outbound_payment::RecipientOnionFields; use crate::ln::{functional_test_utils::*, msgs}; use crate::sign::ecdsa::EcdsaChannelSigner; use crate::sign::SignerProvider; diff --git a/lightning/src/ln/blinded_payment_tests.rs b/lightning/src/ln/blinded_payment_tests.rs index 80da76452c0..d78b9dfa4f2 100644 --- a/lightning/src/ln/blinded_payment_tests.rs +++ b/lightning/src/ln/blinded_payment_tests.rs @@ -14,7 +14,7 @@ use crate::blinded_path::payment::{ use crate::blinded_path::utils::is_padded; use crate::blinded_path::{self, BlindedHop}; use crate::events::{Event, HTLCHandlingFailureType, PaymentFailureReason}; -use crate::ln::channelmanager::{self, HTLCFailureMsg, PaymentId, RecipientOnionFields}; +use crate::ln::channelmanager::{self, HTLCFailureMsg, PaymentId}; use crate::ln::functional_test_utils::*; use crate::ln::inbound_payment::ExpandedKey; use crate::ln::msgs::{ @@ -22,7 +22,9 @@ use crate::ln::msgs::{ }; use crate::ln::onion_payment; use crate::ln::onion_utils::{self, LocalHTLCFailureReason}; -use crate::ln::outbound_payment::{RecipientCustomTlvs, Retry, IDEMPOTENCY_TIMEOUT_TICKS}; +use crate::ln::outbound_payment::{ + RecipientCustomTlvs, RecipientOnionFields, Retry, IDEMPOTENCY_TIMEOUT_TICKS, +}; use crate::ln::types::ChannelId; use crate::offers::invoice::UnsignedBolt12Invoice; use crate::prelude::*; diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index ff499d049d4..3fa2073d5ba 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -19,11 +19,12 @@ use crate::chain::transaction::OutPoint; use crate::chain::{ChannelMonitorUpdateStatus, Listen, Watch}; use crate::events::{ClosureReason, Event, HTLCHandlingFailureType, PaymentPurpose}; use crate::ln::channel::AnnouncementSigsState; -use crate::ln::channelmanager::{PaymentId, RAACommitmentOrder, RecipientOnionFields, Retry}; +use crate::ln::channelmanager::{PaymentId, RAACommitmentOrder}; use crate::ln::msgs; use crate::ln::msgs::{ BaseMessageHandler, ChannelMessageHandler, MessageSendEvent, RoutingMessageHandler, }; +use crate::ln::outbound_payment::{RecipientOnionFields, Retry}; use crate::ln::types::ChannelId; use crate::routing::router::{PaymentParameters, RouteParameters}; use crate::sign::NodeSigner; diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 129a4d58171..a2bc5d109ed 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -84,9 +84,12 @@ use crate::ln::onion_utils::{process_fulfill_attribution_data, AttributionData}; use crate::ln::our_peer_storage::{EncryptedOurPeerStorage, PeerStorageMonitorHolder}; #[cfg(test)] use crate::ln::outbound_payment; +#[cfg(any(test, feature = "_externalize_tests"))] +use crate::ln::outbound_payment::PaymentSendFailure; use crate::ln::outbound_payment::{ - OutboundPayments, PendingOutboundPayment, RecipientCustomTlvs, RetryableInvoiceRequest, - SendAlongPathArgs, StaleExpiration, + Bolt11PaymentError, Bolt12PaymentError, OutboundPayments, PendingOutboundPayment, + ProbeSendFailure, RecipientCustomTlvs, RecipientOnionFields, Retry, RetryableInvoiceRequest, + RetryableSendFailure, SendAlongPathArgs, StaleExpiration, }; use crate::ln::types::ChannelId; use crate::offers::async_receive_offer_cache::AsyncReceiveOfferCache; @@ -175,6 +178,7 @@ use crate::prelude::*; use crate::sync::{Arc, FairRwLock, LockHeldState, LockTestExt, Mutex, RwLock, RwLockReadGuard}; use bitcoin::hex::impl_fmt_traits; +use crate::ln::script::ShutdownScript; use core::borrow::Borrow; use core::cell::RefCell; use core::convert::Infallible; @@ -182,14 +186,6 @@ use core::ops::Deref; use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use core::time::Duration; use core::{cmp, mem}; -// Re-export this for use in the public API. -#[cfg(any(test, feature = "_externalize_tests"))] -pub(crate) use crate::ln::outbound_payment::PaymentSendFailure; -pub use crate::ln::outbound_payment::{ - Bolt11PaymentError, Bolt12PaymentError, ProbeSendFailure, RecipientOnionFields, Retry, - RetryableSendFailure, -}; -use crate::ln::script::ShutdownScript; // We hold various information about HTLC relay in the HTLC objects in Channel itself: // @@ -2262,7 +2258,8 @@ impl< /// # use bitcoin::hashes::Hash; /// # use lightning::events::{Event, EventsProvider}; /// # use lightning::types::payment::PaymentHash; -/// # use lightning::ln::channelmanager::{AChannelManager, OptionalBolt11PaymentParams, PaymentId, RecentPaymentDetails, Retry}; +/// # use lightning::ln::channelmanager::{AChannelManager, OptionalBolt11PaymentParams, PaymentId, RecentPaymentDetails}; +/// # use lightning::ln::outbound_payment::Retry; /// # use lightning_invoice::Bolt11Invoice; /// # /// # fn example( @@ -2420,7 +2417,8 @@ impl< /// ``` /// # use core::time::Duration; /// # use lightning::events::{Event, EventsProvider}; -/// # use lightning::ln::channelmanager::{AChannelManager, PaymentId, RecentPaymentDetails, Retry}; +/// # use lightning::ln::channelmanager::{AChannelManager, PaymentId, RecentPaymentDetails}; +/// # use lightning::ln::outbound_payment::Retry; /// # use lightning::offers::parse::Bolt12SemanticError; /// # use lightning::routing::router::RouteParametersConfig; /// # diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index cea9ea45428..e8965752331 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -27,7 +27,7 @@ use crate::ln::chan_utils::{ }; use crate::ln::channelmanager::{ AChannelManager, ChainParameters, ChannelManager, ChannelManagerReadArgs, PaymentId, - RAACommitmentOrder, RecipientOnionFields, MIN_CLTV_EXPIRY_DELTA, + RAACommitmentOrder, MIN_CLTV_EXPIRY_DELTA, }; use crate::ln::funding::FundingTxInput; use crate::ln::msgs; @@ -35,6 +35,7 @@ use crate::ln::msgs::{ BaseMessageHandler, ChannelMessageHandler, MessageSendEvent, RoutingMessageHandler, }; use crate::ln::onion_utils::LocalHTLCFailureReason; +use crate::ln::outbound_payment::RecipientOnionFields; use crate::ln::outbound_payment::Retry; use crate::ln::peer_handler::IgnoringMessageHandler; use crate::ln::types::ChannelId; diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index fcb348c690d..8e854b31150 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -33,14 +33,15 @@ use crate::ln::channel::{ MIN_CHAN_DUST_LIMIT_SATOSHIS, UNFUNDED_CHANNEL_AGE_LIMIT_TICKS, }; use crate::ln::channelmanager::{ - PaymentId, RAACommitmentOrder, RecipientOnionFields, BREAKDOWN_TIMEOUT, DISABLE_GOSSIP_TICKS, - ENABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA, + PaymentId, RAACommitmentOrder, BREAKDOWN_TIMEOUT, DISABLE_GOSSIP_TICKS, ENABLE_GOSSIP_TICKS, + MIN_CLTV_EXPIRY_DELTA, }; use crate::ln::msgs; use crate::ln::msgs::{ BaseMessageHandler, ChannelMessageHandler, ErrorAction, MessageSendEvent, RoutingMessageHandler, }; use crate::ln::onion_utils::LocalHTLCFailureReason; +use crate::ln::outbound_payment::RecipientOnionFields; use crate::ln::types::ChannelId; use crate::ln::{chan_utils, onion_utils}; use crate::routing::gossip::{NetworkGraph, NetworkUpdate}; diff --git a/lightning/src/ln/interception_tests.rs b/lightning/src/ln/interception_tests.rs index 11b5de166f6..c83ef177628 100644 --- a/lightning/src/ln/interception_tests.rs +++ b/lightning/src/ln/interception_tests.rs @@ -12,9 +12,10 @@ //! claim outputs on-chain. use crate::events::{Event, HTLCHandlingFailureReason, HTLCHandlingFailureType}; -use crate::ln::channelmanager::{PaymentId, RecipientOnionFields}; +use crate::ln::channelmanager::PaymentId; use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler}; use crate::ln::onion_utils::LocalHTLCFailureReason; +use crate::ln::outbound_payment::RecipientOnionFields; use crate::routing::router::PaymentParameters; use crate::util::config::HTLCInterceptionFlags; diff --git a/lightning/src/ln/invoice_utils.rs b/lightning/src/ln/invoice_utils.rs index 3026df645d1..7a93a929013 100644 --- a/lightning/src/ln/invoice_utils.rs +++ b/lightning/src/ln/invoice_utils.rs @@ -589,11 +589,12 @@ mod test { use crate::chain::channelmonitor::HTLC_FAIL_BACK_BUFFER; use crate::ln::channelmanager::{ Bolt11InvoiceParameters, OptionalBolt11PaymentParams, PaymentId, PhantomRouteHints, - RecipientOnionFields, Retry, MIN_FINAL_CLTV_EXPIRY_DELTA, + MIN_FINAL_CLTV_EXPIRY_DELTA, }; use crate::ln::functional_test_utils::*; use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, MessageSendEvent}; use crate::ln::outbound_payment::RecipientCustomTlvs; + use crate::ln::outbound_payment::{RecipientOnionFields, Retry}; use crate::routing::router::{PaymentParameters, RouteParameters, RouteParametersConfig}; use crate::sign::PhantomKeysManager; use crate::types::payment::{PaymentHash, PaymentPreimage}; diff --git a/lightning/src/ln/mod.rs b/lightning/src/ln/mod.rs index b077c98ae73..d6e0b92f1d0 100644 --- a/lightning/src/ln/mod.rs +++ b/lightning/src/ln/mod.rs @@ -42,7 +42,7 @@ pub mod channel; pub(crate) mod channel; pub mod onion_utils; -mod outbound_payment; +pub mod outbound_payment; pub mod wire; #[allow(dead_code)] // TODO(dual_funding): Remove once contribution to V2 channels is enabled. diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 04915affa20..c3266ae317f 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -21,7 +21,8 @@ use crate::events::{Event, ClosureReason, HTLCHandlingFailureType}; use crate::ln::channel; use crate::ln::types::ChannelId; use crate::ln::chan_utils; -use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, PaymentId, RecipientOnionFields}; +use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, PaymentId}; +use crate::ln::outbound_payment::RecipientOnionFields; use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, MessageSendEvent}; use crate::crypto::utils::sign; use crate::util::ser::Writeable; diff --git a/lightning/src/ln/offers_tests.rs b/lightning/src/ln/offers_tests.rs index 1d20d1d368e..12e631b4042 100644 --- a/lightning/src/ln/offers_tests.rs +++ b/lightning/src/ln/offers_tests.rs @@ -50,7 +50,8 @@ use crate::blinded_path::message::BlindedMessagePath; use crate::blinded_path::payment::{Bolt12OfferContext, Bolt12RefundContext, DummyTlvs, PaymentContext}; use crate::blinded_path::message::OffersContext; use crate::events::{ClosureReason, Event, HTLCHandlingFailureType, PaidBolt12Invoice, PaymentFailureReason, PaymentPurpose}; -use crate::ln::channelmanager::{Bolt12PaymentError, PaymentId, RecentPaymentDetails, RecipientOnionFields, Retry, self}; +use crate::ln::channelmanager::{PaymentId, RecentPaymentDetails, self}; +use crate::ln::outbound_payment::{Bolt12PaymentError, RecipientOnionFields, Retry}; use crate::types::features::Bolt12InvoiceFeatures; use crate::ln::functional_test_utils::*; use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, Init, NodeAnnouncement, OnionMessage, OnionMessageHandler, RoutingMessageHandler, SocketAddress, UnsignedGossipMessage, UnsignedNodeAnnouncement}; diff --git a/lightning/src/ln/onion_payment.rs b/lightning/src/ln/onion_payment.rs index fd328e01d78..555cc7a87af 100644 --- a/lightning/src/ln/onion_payment.rs +++ b/lightning/src/ln/onion_payment.rs @@ -750,10 +750,11 @@ pub(super) fn check_incoming_htlc_cltv( #[cfg(test)] mod tests { - use crate::ln::channelmanager::{RecipientOnionFields, MIN_CLTV_EXPIRY_DELTA}; + use crate::ln::channelmanager::MIN_CLTV_EXPIRY_DELTA; use crate::ln::functional_test_utils::TEST_FINAL_CLTV; use crate::ln::msgs; use crate::ln::onion_utils::create_payment_onion; + use crate::ln::outbound_payment::RecipientOnionFields; use crate::ln::types::ChannelId; use crate::routing::router::{Path, RouteHop}; use crate::types::features::{ChannelFeatures, NodeFeatures}; diff --git a/lightning/src/ln/onion_route_tests.rs b/lightning/src/ln/onion_route_tests.rs index 03557469537..27e0cfafade 100644 --- a/lightning/src/ln/onion_route_tests.rs +++ b/lightning/src/ln/onion_route_tests.rs @@ -16,8 +16,7 @@ use crate::events::{Event, HTLCHandlingFailureType, PathFailure, PaymentFailureR use crate::ln::channel::EXPIRE_PREV_CONFIG_TICKS; use crate::ln::channelmanager::{ FailureCode, HTLCForwardInfo, PaymentId, PendingAddHTLCInfo, PendingHTLCInfo, - PendingHTLCRouting, RecipientOnionFields, CLTV_FAR_FAR_AWAY, DISABLE_GOSSIP_TICKS, - MIN_CLTV_EXPIRY_DELTA, + PendingHTLCRouting, CLTV_FAR_FAR_AWAY, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA, }; use crate::ln::functional_test_utils::test_default_channel_config; use crate::ln::msgs; @@ -28,6 +27,7 @@ use crate::ln::msgs::{ use crate::ln::onion_utils::{ self, build_onion_payloads, construct_onion_keys, LocalHTLCFailureReason, }; +use crate::ln::outbound_payment::RecipientOnionFields; use crate::ln::wire::Encode; use crate::routing::gossip::{NetworkUpdate, RoutingFees}; use crate::routing::router::{ diff --git a/lightning/src/ln/onion_utils.rs b/lightning/src/ln/onion_utils.rs index fcfac7c5e63..d48fcb25179 100644 --- a/lightning/src/ln/onion_utils.rs +++ b/lightning/src/ln/onion_utils.rs @@ -15,9 +15,10 @@ use crate::crypto::chacha20::ChaCha20; use crate::crypto::streams::ChaChaReader; use crate::events::HTLCHandlingFailureReason; use crate::ln::channel::TOTAL_BITCOIN_SUPPLY_SATOSHIS; -use crate::ln::channelmanager::{HTLCSource, RecipientOnionFields}; +use crate::ln::channelmanager::HTLCSource; use crate::ln::msgs::{self, DecodeError, InboundOnionDummyPayload, OnionPacket, UpdateAddHTLC}; use crate::ln::onion_payment::{HopConnector, NextPacketDetails}; +use crate::ln::outbound_payment::RecipientOnionFields; use crate::offers::invoice_request::InvoiceRequest; use crate::routing::gossip::NetworkUpdate; use crate::routing::router::{BlindedTail, Path, RouteHop, RouteParameters, TrampolineHop}; diff --git a/lightning/src/ln/outbound_payment.rs b/lightning/src/ln/outbound_payment.rs index dd774a00664..ea33bb5d263 100644 --- a/lightning/src/ln/outbound_payment.rs +++ b/lightning/src/ln/outbound_payment.rs @@ -7,7 +7,7 @@ // You may not use this file except in accordance with one or both of these // licenses. -//! Utilities to send payments and manage outbound payment information. +//! This module contains various types which are used to configure or process outbound payments. use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hashes::Hash; @@ -2805,8 +2805,9 @@ mod tests { use crate::blinded_path::EmptyNodeIdLookUp; use crate::events::{Event, PathFailure, PaymentFailureReason}; - use crate::ln::channelmanager::{PaymentId, RecipientOnionFields}; + use crate::ln::channelmanager::PaymentId; use crate::ln::inbound_payment::ExpandedKey; + use crate::ln::outbound_payment::RecipientOnionFields; use crate::ln::outbound_payment::{ Bolt12PaymentError, OutboundPayments, PendingOutboundPayment, RecipientCustomTlvs, Retry, RetryableSendFailure, StaleExpiration, diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index e41e60a46a7..f73c55fd6c6 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -26,13 +26,14 @@ use crate::ln::channel::{ }; use crate::ln::channelmanager::{ HTLCForwardInfo, PaymentId, PendingAddHTLCInfo, PendingHTLCRouting, RecentPaymentDetails, - RecipientOnionFields, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MPP_TIMEOUT_TICKS, + BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MPP_TIMEOUT_TICKS, }; use crate::ln::msgs; use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, MessageSendEvent}; use crate::ln::onion_utils::{self, LocalHTLCFailureReason}; use crate::ln::outbound_payment::{ - ProbeSendFailure, RecipientCustomTlvs, Retry, RetryableSendFailure, IDEMPOTENCY_TIMEOUT_TICKS, + ProbeSendFailure, RecipientCustomTlvs, RecipientOnionFields, Retry, RetryableSendFailure, + IDEMPOTENCY_TIMEOUT_TICKS, }; use crate::ln::types::ChannelId; use crate::routing::gossip::{EffectiveCapacity, RoutingFees}; diff --git a/lightning/src/ln/priv_short_conf_tests.rs b/lightning/src/ln/priv_short_conf_tests.rs index 83aaca24203..57ee863a71d 100644 --- a/lightning/src/ln/priv_short_conf_tests.rs +++ b/lightning/src/ln/priv_short_conf_tests.rs @@ -14,12 +14,13 @@ use crate::chain::ChannelMonitorUpdateStatus; use crate::events::{ClosureReason, Event, HTLCHandlingFailureType, PaymentFailureReason}; use crate::ln::channel::CHANNEL_ANNOUNCEMENT_PROPAGATION_DELAY; -use crate::ln::channelmanager::{PaymentId, RecipientOnionFields, MIN_CLTV_EXPIRY_DELTA}; +use crate::ln::channelmanager::{PaymentId, MIN_CLTV_EXPIRY_DELTA}; use crate::ln::msgs; use crate::ln::msgs::{ BaseMessageHandler, ChannelMessageHandler, ErrorAction, MessageSendEvent, RoutingMessageHandler, }; use crate::ln::onion_utils::LocalHTLCFailureReason; +use crate::ln::outbound_payment::RecipientOnionFields; use crate::ln::types::ChannelId; use crate::routing::gossip::RoutingFees; use crate::routing::router::{PaymentParameters, RouteHint, RouteHintHop}; diff --git a/lightning/src/ln/quiescence_tests.rs b/lightning/src/ln/quiescence_tests.rs index a2b14a798c4..d972fb6a5c5 100644 --- a/lightning/src/ln/quiescence_tests.rs +++ b/lightning/src/ln/quiescence_tests.rs @@ -2,10 +2,10 @@ use crate::chain::ChannelMonitorUpdateStatus; use crate::events::{Event, HTLCHandlingFailureType}; use crate::ln::channel::DISCONNECT_PEER_AWAITING_RESPONSE_TICKS; use crate::ln::channelmanager::PaymentId; -use crate::ln::channelmanager::RecipientOnionFields; use crate::ln::functional_test_utils::*; use crate::ln::msgs; use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, ErrorAction, MessageSendEvent}; +use crate::ln::outbound_payment::RecipientOnionFields; use crate::util::errors::APIError; use crate::util::ser::Writeable; use crate::util::test_channel_signer::SignerOp; diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index 4fb2753b6be..a8206dfe850 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -18,7 +18,8 @@ use crate::routing::router::{PaymentParameters, RouteParameters}; use crate::sign::EntropySource; use crate::chain::transaction::OutPoint; use crate::events::{ClosureReason, Event, HTLCHandlingFailureType}; -use crate::ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, PaymentId, RecipientOnionFields, RAACommitmentOrder}; +use crate::ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, PaymentId, RAACommitmentOrder}; +use crate::ln::outbound_payment::RecipientOnionFields; use crate::ln::msgs; use crate::ln::types::ChannelId; use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, RoutingMessageHandler, ErrorAction, MessageSendEvent}; diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index 192bc6399e4..982dc788f60 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -14,10 +14,11 @@ use crate::chain::transaction::OutPoint; use crate::chain::ChannelMonitorUpdateStatus; use crate::events::{ClosureReason, Event, HTLCHandlingFailureReason, HTLCHandlingFailureType}; use crate::ln::channel_state::{ChannelDetails, ChannelShutdownState}; -use crate::ln::channelmanager::{self, PaymentId, RecipientOnionFields, Retry}; +use crate::ln::channelmanager::{self, PaymentId}; use crate::ln::msgs; use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, ErrorAction, MessageSendEvent}; use crate::ln::onion_utils::LocalHTLCFailureReason; +use crate::ln::outbound_payment::{RecipientOnionFields, Retry}; use crate::ln::script::ShutdownScript; use crate::ln::types::ChannelId; use crate::prelude::*; diff --git a/lightning/src/ln/splicing_tests.rs b/lightning/src/ln/splicing_tests.rs index ef524db6be3..cf93c6243c4 100644 --- a/lightning/src/ln/splicing_tests.rs +++ b/lightning/src/ln/splicing_tests.rs @@ -17,12 +17,11 @@ use crate::events::bump_transaction::sync::WalletSourceSync; use crate::events::{ClosureReason, Event, FundingInfo, HTLCHandlingFailureType}; use crate::ln::chan_utils; use crate::ln::channel::CHANNEL_ANNOUNCEMENT_PROPAGATION_DELAY; -use crate::ln::channelmanager::{ - provided_init_features, PaymentId, RecipientOnionFields, BREAKDOWN_TIMEOUT, -}; +use crate::ln::channelmanager::{provided_init_features, PaymentId, BREAKDOWN_TIMEOUT}; use crate::ln::functional_test_utils::*; use crate::ln::funding::{FundingTxInput, SpliceContribution}; use crate::ln::msgs::{self, BaseMessageHandler, ChannelMessageHandler, MessageSendEvent}; +use crate::ln::outbound_payment::RecipientOnionFields; use crate::ln::types::ChannelId; use crate::routing::router::{PaymentParameters, RouteParameters}; use crate::util::errors::APIError; diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index 42d569415af..b27dee1a450 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -19,9 +19,10 @@ use crate::blinded_path::payment::{ use crate::blinded_path::{BlindedHop, Direction, IntroductionNode}; use crate::crypto::chacha20::ChaCha20; use crate::ln::channel_state::ChannelDetails; -use crate::ln::channelmanager::{PaymentId, RecipientOnionFields, MIN_FINAL_CLTV_EXPIRY_DELTA}; +use crate::ln::channelmanager::{PaymentId, MIN_FINAL_CLTV_EXPIRY_DELTA}; use crate::ln::msgs::{DecodeError, MAX_VALUE_MSAT}; use crate::ln::onion_utils; +use crate::ln::outbound_payment::RecipientOnionFields; use crate::offers::invoice::Bolt12Invoice; use crate::offers::static_invoice::StaticInvoice; use crate::routing::gossip::{ @@ -1036,8 +1037,6 @@ impl PaymentParameters { /// whether your router will be allowed to find a multi-part route for this payment. If you /// set `allow_mpp` to true, you should ensure a payment secret is set on send, likely via /// [`RecipientOnionFields::secret_only`]. - /// - /// [`RecipientOnionFields::secret_only`]: crate::ln::channelmanager::RecipientOnionFields::secret_only #[rustfmt::skip] pub fn for_keysend(payee_pubkey: PublicKey, final_cltv_expiry_delta: u32, allow_mpp: bool) -> Self { Self::from_node_id(payee_pubkey, final_cltv_expiry_delta) From d652d86ab6e1c89d11eb72f8b0e938713fba4869 Mon Sep 17 00:00:00 2001 From: Jon Date: Fri, 30 Jan 2026 11:02:24 -0600 Subject: [PATCH 167/242] invoice: Use PaymentHash in raw invoice types --- lightning-invoice/src/de.rs | 141 ++++++++----- lightning-invoice/src/lib.rs | 141 +++++++------ lightning-invoice/src/ser.rs | 18 +- lightning-invoice/src/test_ser_de.rs | 29 +-- lightning-invoice/tests/ser_de.rs | 192 +++++++++++++----- .../tests/lsps2_integration_tests.rs | 5 - lightning/src/ln/bolt11_payment_tests.rs | 6 +- lightning/src/ln/channelmanager.rs | 2 +- lightning/src/ln/invoice_utils.rs | 6 +- 9 files changed, 351 insertions(+), 189 deletions(-) diff --git a/lightning-invoice/src/de.rs b/lightning-invoice/src/de.rs index 0747015a457..f1bbe29440a 100644 --- a/lightning-invoice/src/de.rs +++ b/lightning-invoice/src/de.rs @@ -16,7 +16,7 @@ use crate::Bolt11Bech32; use bitcoin::hashes::sha256; use bitcoin::hashes::Hash; use bitcoin::{PubkeyHash, ScriptHash, WitnessVersion}; -use lightning_types::payment::PaymentSecret; +use lightning_types::payment::{PaymentHash, PaymentSecret}; use lightning_types::routing::{RouteHint, RouteHintHop, RoutingFees}; use bitcoin::secp256k1::ecdsa::{RecoverableSignature, RecoveryId}; @@ -89,6 +89,18 @@ impl FromBase32 for PaymentSecret { } } +impl FromBase32 for PaymentHash { + type Err = Bolt11ParseError; + + fn from_base32(field_data: &[Fe32]) -> Result { + if field_data.len() != 52 { + return Err(Bolt11ParseError::InvalidSliceLength(field_data.len(), 52, "PaymentHash")); + } + let data_bytes = <[u8; 32]>::from_base32(field_data)?; + Ok(PaymentHash(data_bytes)) + } +} + impl FromBase32 for Bolt11InvoiceFeatures { type Err = Bolt11ParseError; @@ -540,7 +552,7 @@ impl FromBase32 for TaggedField { match tag.to_u8() { constants::TAG_PAYMENT_HASH => { - Ok(TaggedField::PaymentHash(Sha256::from_base32(field_data)?)) + Ok(TaggedField::PaymentHash(PaymentHash::from_base32(field_data)?)) }, constants::TAG_DESCRIPTION => { Ok(TaggedField::Description(Description::from_base32(field_data)?)) @@ -1068,8 +1080,9 @@ mod test { use crate::TaggedField::*; use crate::{ Bolt11InvoiceSignature, Currency, PositiveTimestamp, RawBolt11Invoice, RawDataPart, - RawHrp, Sha256, SiPrefix, SignedRawBolt11Invoice, + RawHrp, SiPrefix, SignedRawBolt11Invoice, }; + use bitcoin::hex::FromHex; use bitcoin::secp256k1::ecdsa::{RecoverableSignature, RecoveryId}; use lightning_types::features::Bolt11InvoiceFeatures; @@ -1077,45 +1090,51 @@ mod test { let expected_features = Bolt11InvoiceFeatures::from_le_bytes(vec![0, 130, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8]); let invoice_str = "lnbc25m1pvjluezpp5qqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqypqdq5vdhkven9v5sxyetpdeessp5zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zygs9q5sqqqqqqqqqqqqqqqpqsq67gye39hfg3zd8rgc80k32tvy9xk2xunwm5lzexnvpx6fd77en8qaq424dxgt56cag2dpt359k3ssyhetktkpqh24jqnjyw6uqd08sgptq44qu"; - let invoice = - SignedRawBolt11Invoice { - raw_invoice: RawBolt11Invoice { - hrp: RawHrp { - currency: Currency::Bitcoin, - raw_amount: Some(25), - si_prefix: Some(SiPrefix::Milli), - }, - data: RawDataPart { - timestamp: PositiveTimestamp::from_unix_timestamp(1496314658).unwrap(), - tagged_fields: vec ! [ - PaymentHash(Sha256(sha256::Hash::from_str( - "0001020304050607080900010203040506070809000102030405060708090102" - ).unwrap())).into(), + let invoice = SignedRawBolt11Invoice { + raw_invoice: RawBolt11Invoice { + hrp: RawHrp { + currency: Currency::Bitcoin, + raw_amount: Some(25), + si_prefix: Some(SiPrefix::Milli), + }, + data: RawDataPart { + timestamp: PositiveTimestamp::from_unix_timestamp(1496314658).unwrap(), + tagged_fields: vec ! [ + crate::TaggedField::PaymentHash(crate::PaymentHash( + <[u8; 32]>::try_from( + Vec::from_hex( + "0001020304050607080900010203040506070809000102030405060708090102", + ) + .unwrap(), + ) + .unwrap(), + )) + .into(), Description(crate::Description::new("coffee beans".to_owned()).unwrap()).into(), PaymentSecret(crate::PaymentSecret([17; 32])).into(), Features(expected_features).into()], - }, }, - hash: [ - 0xb1, 0x96, 0x46, 0xc3, 0xbc, 0x56, 0x76, 0x1d, 0x20, 0x65, 0x6e, 0x0e, 0x32, - 0xec, 0xd2, 0x69, 0x27, 0xb7, 0x62, 0x6e, 0x2a, 0x8b, 0xe6, 0x97, 0x71, 0x9f, - 0xf8, 0x7e, 0x44, 0x54, 0x55, 0xb9, - ], - signature: Bolt11InvoiceSignature( - RecoverableSignature::from_compact( - &[ - 0xd7, 0x90, 0x4c, 0xc4, 0xb7, 0x4a, 0x22, 0x26, 0x9c, 0x68, 0xc1, 0xdf, - 0x68, 0xa9, 0x6c, 0x21, 0x4d, 0x65, 0x1b, 0x93, 0x76, 0xe9, 0xf1, 0x64, - 0xd3, 0x60, 0x4d, 0xa4, 0xb7, 0xde, 0xcc, 0xce, 0x0e, 0x82, 0xaa, 0xab, - 0x4c, 0x85, 0xd3, 0x58, 0xea, 0x14, 0xd0, 0xae, 0x34, 0x2d, 0xa3, 0x08, - 0x12, 0xf9, 0x5d, 0x97, 0x60, 0x82, 0xea, 0xac, 0x81, 0x39, 0x11, 0xda, - 0xe0, 0x1a, 0xf3, 0xc1, - ], - RecoveryId::from_i32(1).unwrap(), - ) - .unwrap(), - ), - }; + }, + hash: [ + 0xb1, 0x96, 0x46, 0xc3, 0xbc, 0x56, 0x76, 0x1d, 0x20, 0x65, 0x6e, 0x0e, 0x32, 0xec, + 0xd2, 0x69, 0x27, 0xb7, 0x62, 0x6e, 0x2a, 0x8b, 0xe6, 0x97, 0x71, 0x9f, 0xf8, 0x7e, + 0x44, 0x54, 0x55, 0xb9, + ], + signature: Bolt11InvoiceSignature( + RecoverableSignature::from_compact( + &[ + 0xd7, 0x90, 0x4c, 0xc4, 0xb7, 0x4a, 0x22, 0x26, 0x9c, 0x68, 0xc1, 0xdf, + 0x68, 0xa9, 0x6c, 0x21, 0x4d, 0x65, 0x1b, 0x93, 0x76, 0xe9, 0xf1, 0x64, + 0xd3, 0x60, 0x4d, 0xa4, 0xb7, 0xde, 0xcc, 0xce, 0x0e, 0x82, 0xaa, 0xab, + 0x4c, 0x85, 0xd3, 0x58, 0xea, 0x14, 0xd0, 0xae, 0x34, 0x2d, 0xa3, 0x08, + 0x12, 0xf9, 0x5d, 0x97, 0x60, 0x82, 0xea, 0xac, 0x81, 0x39, 0x11, 0xda, + 0xe0, 0x1a, 0xf3, 0xc1, + ], + RecoveryId::from_i32(1).unwrap(), + ) + .unwrap(), + ), + }; assert_eq!(invoice_str, invoice.to_string()); assert_eq!(invoice_str.parse(), Ok(invoice)); } @@ -1125,8 +1144,9 @@ mod test { use crate::TaggedField::*; use crate::{ Bolt11InvoiceSignature, Currency, PositiveTimestamp, RawBolt11Invoice, RawDataPart, - RawHrp, Sha256, SignedRawBolt11Invoice, + RawHrp, SignedRawBolt11Invoice, }; + use bitcoin::hex::FromHex; use bitcoin::secp256k1::ecdsa::{RecoverableSignature, RecoveryId}; assert_eq!( @@ -1143,9 +1163,16 @@ mod test { data: RawDataPart { timestamp: PositiveTimestamp::from_unix_timestamp(1496314658).unwrap(), tagged_fields: vec ! [ - PaymentHash(Sha256(sha256::Hash::from_str( - "0001020304050607080900010203040506070809000102030405060708090102" - ).unwrap())).into(), + crate::TaggedField::PaymentHash(crate::PaymentHash( + <[u8; 32]>::try_from( + Vec::from_hex( + "0001020304050607080900010203040506070809000102030405060708090102", + ) + .unwrap(), + ) + .unwrap(), + )) + .into(), Description( crate::Description::new( "Please consider supporting this project".to_owned() @@ -1289,9 +1316,10 @@ mod test { use crate::TaggedField::*; use crate::{ Bolt11Invoice, Bolt11InvoiceFeatures, Bolt11InvoiceSignature, Currency, - PositiveTimestamp, RawBolt11Invoice, RawDataPart, RawHrp, RawTaggedField, Sha256, + PositiveTimestamp, RawBolt11Invoice, RawDataPart, RawHrp, RawTaggedField, SignedRawBolt11Invoice, }; + use bitcoin::hex::FromHex; use bitcoin::secp256k1::ecdsa::{RecoverableSignature, RecoveryId}; use bitcoin::secp256k1::PublicKey; use lightning_types::routing::{RouteHint, RouteHintHop, RoutingFees}; @@ -1310,10 +1338,13 @@ mod test { } // Invoice fields - let payment_hash = sha256::Hash::from_str( - "0001020304050607080900010203040506070809000102030405060708090102", - ) - .unwrap(); + let payment_hash = crate::PaymentHash( + <[u8; 32]>::try_from( + Vec::from_hex("0001020304050607080900010203040506070809000102030405060708090102") + .unwrap(), + ) + .unwrap(), + ); let description = "A".repeat(639); let fallback_addr = crate::Fallback::SegWitProgram { version: bitcoin::WitnessVersion::V0, @@ -1346,7 +1377,7 @@ mod test { data: RawDataPart { timestamp: PositiveTimestamp::from_unix_timestamp(1496314658).unwrap(), tagged_fields: vec![ - PaymentHash(Sha256(payment_hash)).into(), + crate::TaggedField::PaymentHash(payment_hash).into(), Description(crate::Description::new(description).unwrap()).into(), PayeePubKey(crate::PayeePubKey(payee_pk)).into(), ExpiryTime(crate::ExpiryTime(std::time::Duration::from_secs(u64::MAX))).into(), @@ -1414,4 +1445,18 @@ mod test { assert!(parse_is_code_length_err(&too_long)); assert!(!parse_is_code_length_err(&too_long[..too_long.len() - 1])); } + + #[test] + fn test_payment_hash_from_base32_invalid_len() { + use crate::PaymentHash; + + // PaymentHash must be 52 base32 characters (32 bytes). + // Test with 51 characters (too short). + let input = vec![Fe32::try_from(0).unwrap(); 51]; + assert!(PaymentHash::from_base32(&input).is_err()); + + // Test with 53 characters (too long). + let input = vec![Fe32::try_from(0).unwrap(); 53]; + assert!(PaymentHash::from_base32(&input).is_err()); + } } diff --git a/lightning-invoice/src/lib.rs b/lightning-invoice/src/lib.rs index a83130ab799..4ee9acb5f27 100644 --- a/lightning-invoice/src/lib.rs +++ b/lightning-invoice/src/lib.rs @@ -189,7 +189,7 @@ impl Checksum for Bolt11Bech32 { /// /// use lightning_types::payment::PaymentSecret; /// -/// use lightning_invoice::{Currency, InvoiceBuilder}; +/// use lightning_invoice::{Currency, InvoiceBuilder, PaymentHash}; /// /// # #[cfg(not(feature = "std"))] /// # fn main() {} @@ -203,7 +203,7 @@ impl Checksum for Bolt11Bech32 { /// ][..] /// ).unwrap(); /// -/// let payment_hash = sha256::Hash::from_slice(&[0; 32][..]).unwrap(); +/// let payment_hash = PaymentHash([0; 32]); /// let payment_secret = PaymentSecret([42u8; 32]); /// /// let invoice = InvoiceBuilder::new(Currency::Bitcoin) @@ -521,7 +521,7 @@ impl Ord for RawTaggedField { #[allow(missing_docs)] #[derive(Clone, Debug, Hash, Eq, PartialEq, Ord, PartialOrd)] pub enum TaggedField { - PaymentHash(Sha256), + PaymentHash(PaymentHash), Description(Description), PayeePubKey(PayeePubKey), DescriptionHash(Sha256), @@ -793,8 +793,8 @@ impl InvoiceBuilder { /// Set the payment hash. This function is only available if no payment hash was set. - pub fn payment_hash(mut self, hash: sha256::Hash) -> InvoiceBuilder { - self.tagged_fields.push(TaggedField::PaymentHash(Sha256(hash))); + pub fn payment_hash(mut self, hash: PaymentHash) -> InvoiceBuilder { + self.tagged_fields.push(TaggedField::PaymentHash(hash)); self.set_flags() } } @@ -1158,7 +1158,7 @@ impl RawBolt11Invoice { self.data.tagged_fields.iter().filter_map(match_raw) } - pub fn payment_hash(&self) -> Option<&Sha256> { + pub fn payment_hash(&self) -> Option<&PaymentHash> { find_extract!(self.known_tagged_fields(), TaggedField::PaymentHash(ref x), x) } @@ -1461,8 +1461,7 @@ impl Bolt11Invoice { /// Returns the hash to which we will receive the preimage on completion of the payment pub fn payment_hash(&self) -> PaymentHash { - let hash = self.signed_invoice.payment_hash().expect("checked by constructor").0; - PaymentHash(hash.to_byte_array()) + *self.signed_invoice.payment_hash().expect("checked by constructor") } /// Return the description or a hash of it for longer ones @@ -1925,10 +1924,7 @@ impl<'de> Deserialize<'de> for Bolt11Invoice { #[cfg(test)] mod test { - use bitcoin::hashes::sha256; use bitcoin::ScriptBuf; - use std::str::FromStr; - #[test] fn test_system_time_bounds_assumptions() { assert_eq!( @@ -1940,16 +1936,22 @@ mod test { #[test] fn test_calc_invoice_hash() { use crate::TaggedField::*; - use crate::{Currency, PositiveTimestamp, RawBolt11Invoice, RawDataPart, RawHrp}; + use crate::{ + Currency, PaymentHash, PositiveTimestamp, RawBolt11Invoice, RawDataPart, RawHrp, + }; + use bitcoin::hex::FromHex; let invoice = RawBolt11Invoice { hrp: RawHrp { currency: Currency::Bitcoin, raw_amount: None, si_prefix: None }, data: RawDataPart { timestamp: PositiveTimestamp::from_unix_timestamp(1496314658).unwrap(), tagged_fields: vec![ - PaymentHash(crate::Sha256( - sha256::Hash::from_str( - "0001020304050607080900010203040506070809000102030405060708090102", + crate::TaggedField::PaymentHash(PaymentHash( + <[u8; 32]>::try_from( + Vec::from_hex( + "0001020304050607080900010203040506070809000102030405060708090102", + ) + .unwrap(), ) .unwrap(), )) @@ -1978,51 +1980,58 @@ mod test { fn test_check_signature() { use crate::TaggedField::*; use crate::{ - Bolt11InvoiceSignature, Currency, PositiveTimestamp, RawBolt11Invoice, RawDataPart, - RawHrp, Sha256, SignedRawBolt11Invoice, + Bolt11InvoiceSignature, Currency, PaymentHash, PositiveTimestamp, RawBolt11Invoice, + RawDataPart, RawHrp, SignedRawBolt11Invoice, }; + use bitcoin::hex::FromHex; use bitcoin::secp256k1::ecdsa::{RecoverableSignature, RecoveryId}; use bitcoin::secp256k1::Secp256k1; use bitcoin::secp256k1::{PublicKey, SecretKey}; - let invoice = - SignedRawBolt11Invoice { - raw_invoice: RawBolt11Invoice { - hrp: RawHrp { currency: Currency::Bitcoin, raw_amount: None, si_prefix: None }, - data: RawDataPart { - timestamp: PositiveTimestamp::from_unix_timestamp(1496314658).unwrap(), - tagged_fields: vec ! [ - PaymentHash(Sha256(sha256::Hash::from_str( - "0001020304050607080900010203040506070809000102030405060708090102" - ).unwrap())).into(), + let invoice = SignedRawBolt11Invoice { + raw_invoice: RawBolt11Invoice { + hrp: RawHrp { currency: Currency::Bitcoin, raw_amount: None, si_prefix: None }, + data: RawDataPart { + timestamp: PositiveTimestamp::from_unix_timestamp(1496314658).unwrap(), + tagged_fields: vec ! [ + crate::TaggedField::PaymentHash(PaymentHash( + <[u8; 32]>::try_from( + Vec::from_hex( + "0001020304050607080900010203040506070809000102030405060708090102", + ) + .unwrap(), + ) + .unwrap(), + )) + .into(), Description( crate::Description::new( "Please consider supporting this project".to_owned() ).unwrap() ).into(), ], - }, }, - hash: [ - 0xc3, 0xd4, 0xe8, 0x3f, 0x64, 0x6f, 0xa7, 0x9a, 0x39, 0x3d, 0x75, 0x27, 0x7b, - 0x1d, 0x85, 0x8d, 0xb1, 0xd1, 0xf7, 0xab, 0x71, 0x37, 0xdc, 0xb7, 0x83, 0x5d, - 0xb2, 0xec, 0xd5, 0x18, 0xe1, 0xc9, - ], - signature: Bolt11InvoiceSignature( - RecoverableSignature::from_compact( - &[ - 0x38u8, 0xec, 0x68, 0x91, 0x34, 0x5e, 0x20, 0x41, 0x45, 0xbe, 0x8a, - 0x3a, 0x99, 0xde, 0x38, 0xe9, 0x8a, 0x39, 0xd6, 0xa5, 0x69, 0x43, 0x4e, - 0x18, 0x45, 0xc8, 0xaf, 0x72, 0x05, 0xaf, 0xcf, 0xcc, 0x7f, 0x42, 0x5f, - 0xcd, 0x14, 0x63, 0xe9, 0x3c, 0x32, 0x88, 0x1e, 0xad, 0x0d, 0x6e, 0x35, - 0x6d, 0x46, 0x7e, 0xc8, 0xc0, 0x25, 0x53, 0xf9, 0xaa, 0xb1, 0x5e, 0x57, - 0x38, 0xb1, 0x1f, 0x12, 0x7f, - ], - RecoveryId::from_i32(0).unwrap(), - ) - .unwrap(), - ), - }; + }, + hash: [ + 0xc3, 0xd4, 0xe8, 0x3f, 0x64, 0x6f, 0xa7, 0x9a, 0x39, 0x3d, 0x75, 0x27, 0x7b, 0x1d, + 0x85, 0x8d, 0xb1, 0xd1, 0xf7, 0xab, 0x71, 0x37, 0xdc, 0xb7, 0x83, 0x5d, 0xb2, 0xec, + 0xd5, 0x18, 0xe1, 0xc9, + ], + signature: Bolt11InvoiceSignature( + RecoverableSignature::from_compact( + &[ + 0x38u8, 0xec, 0x68, 0x91, 0x34, 0x5e, 0x20, 0x41, 0x45, 0xbe, 0x8a, 0x3a, + 0x99, 0xde, 0x38, 0xe9, 0x8a, 0x39, 0xd6, 0xa5, 0x69, 0x43, 0x4e, 0x18, + 0x45, 0xc8, 0xaf, 0x72, 0x05, 0xaf, 0xcf, 0xcc, 0x7f, 0x42, 0x5f, 0xcd, + 0x14, 0x63, 0xe9, 0x3c, 0x32, 0x88, 0x1e, 0xad, 0x0d, 0x6e, 0x35, 0x6d, + 0x46, 0x7e, 0xc8, 0xc0, 0x25, 0x53, 0xf9, 0xaa, 0xb1, 0x5e, 0x57, 0x38, + 0xb1, 0x1f, 0x12, 0x7f, + ], + RecoveryId::from_i32(0).unwrap(), + ) + .unwrap(), + ), + }; assert!(invoice.check_signature()); @@ -2050,9 +2059,10 @@ mod test { fn test_check_feature_bits() { use crate::TaggedField::*; use crate::{ - Bolt11Invoice, Bolt11SemanticError, Currency, PositiveTimestamp, RawBolt11Invoice, - RawDataPart, RawHrp, Sha256, + Bolt11Invoice, Bolt11SemanticError, Currency, PaymentHash, PositiveTimestamp, + RawBolt11Invoice, RawDataPart, RawHrp, }; + use bitcoin::hex::FromHex; use bitcoin::secp256k1::Secp256k1; use bitcoin::secp256k1::SecretKey; use lightning_types::features::Bolt11InvoiceFeatures; @@ -2064,9 +2074,12 @@ mod test { data: RawDataPart { timestamp: PositiveTimestamp::from_unix_timestamp(1496314658).unwrap(), tagged_fields: vec![ - PaymentHash(Sha256( - sha256::Hash::from_str( - "0001020304050607080900010203040506070809000102030405060708090102", + crate::TaggedField::PaymentHash(PaymentHash( + <[u8; 32]>::try_from( + Vec::from_hex( + "0001020304050607080900010203040506070809000102030405060708090102", + ) + .unwrap(), ) .unwrap(), )) @@ -2174,7 +2187,7 @@ mod test { let builder = InvoiceBuilder::new(Currency::Bitcoin) .description("Test".into()) - .payment_hash(sha256::Hash::from_slice(&[0; 32][..]).unwrap()) + .payment_hash(PaymentHash([0; 32])) .duration_since_epoch(Duration::from_secs(1234567)); let invoice = builder.clone().amount_milli_satoshis(1500).build_raw().unwrap(); @@ -2196,7 +2209,7 @@ mod test { use std::iter::FromIterator; let builder = InvoiceBuilder::new(Currency::Bitcoin) - .payment_hash(sha256::Hash::from_slice(&[0; 32][..]).unwrap()) + .payment_hash(PaymentHash([0; 32])) .duration_since_epoch(Duration::from_secs(1234567)) .min_final_cltv_expiry_delta(144); @@ -2300,7 +2313,7 @@ mod test { .private_route(route_1.clone()) .private_route(route_2.clone()) .description_hash(sha256::Hash::from_slice(&[3; 32][..]).unwrap()) - .payment_hash(sha256::Hash::from_slice(&[21; 32][..]).unwrap()) + .payment_hash(PaymentHash([21; 32])) .payment_secret(PaymentSecret([42; 32])) .basic_mpp(); @@ -2361,7 +2374,7 @@ mod test { let signed_invoice = InvoiceBuilder::new(Currency::Bitcoin) .description("Test".into()) - .payment_hash(sha256::Hash::from_slice(&[0; 32][..]).unwrap()) + .payment_hash(PaymentHash([0; 32])) .payment_secret(PaymentSecret([0; 32])) .duration_since_epoch(Duration::from_secs(1234567)) .build_raw() @@ -2387,7 +2400,7 @@ mod test { let signed_invoice = InvoiceBuilder::new(Currency::Bitcoin) .description("Test".into()) - .payment_hash(sha256::Hash::from_slice(&[0; 32][..]).unwrap()) + .payment_hash(PaymentHash([0; 32])) .payment_secret(PaymentSecret([0; 32])) .duration_since_epoch(Duration::from_secs(1234567)) .build_raw() @@ -2428,13 +2441,13 @@ mod test { #[test] fn raw_tagged_field_ordering() { - use crate::{ - sha256, Description, Fe32, RawTaggedField, Sha256, TaggedField, UntrustedString, - }; + use crate::{Description, Fe32, PaymentHash, RawTaggedField, TaggedField, UntrustedString}; + use bitcoin::hex::FromHex; - let field10 = RawTaggedField::KnownSemantics(TaggedField::PaymentHash(Sha256( - sha256::Hash::from_str( - "0001020304050607080900010203040506070809000102030405060708090102", + let field10 = RawTaggedField::KnownSemantics(crate::TaggedField::PaymentHash(PaymentHash( + <[u8; 32]>::try_from( + Vec::from_hex("0001020304050607080900010203040506070809000102030405060708090102") + .unwrap(), ) .unwrap(), ))); diff --git a/lightning-invoice/src/ser.rs b/lightning-invoice/src/ser.rs index 853accdd3ca..35d5dc024ea 100644 --- a/lightning-invoice/src/ser.rs +++ b/lightning-invoice/src/ser.rs @@ -7,9 +7,9 @@ use bech32::{ByteIterExt, Fe32, Fe32IterExt}; use super::{ constants, Bolt11Invoice, Bolt11InvoiceFeatures, Bolt11InvoiceSignature, Currency, Description, - ExpiryTime, Fallback, MinFinalCltvExpiryDelta, PayeePubKey, PaymentSecret, PositiveTimestamp, - PrivateRoute, RawDataPart, RawHrp, RawTaggedField, RouteHintHop, Sha256, SiPrefix, - SignedRawBolt11Invoice, TaggedField, + ExpiryTime, Fallback, MinFinalCltvExpiryDelta, PayeePubKey, PaymentHash, PaymentSecret, + PositiveTimestamp, PrivateRoute, RawDataPart, RawHrp, RawTaggedField, RouteHintHop, Sha256, + SiPrefix, SignedRawBolt11Invoice, TaggedField, }; macro_rules! define_iterator_enum { @@ -95,6 +95,18 @@ impl Base32Len for PaymentSecret { } } +impl Base32Iterable for PaymentHash { + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + self.0[..].fe_iter() + } +} + +impl Base32Len for PaymentHash { + fn base32_len(&self) -> usize { + 52 + } +} + impl Base32Iterable for Bolt11InvoiceFeatures { /// Convert to 5-bit values, by unpacking the 5 bit groups, /// putting the bytes from right-to-left, diff --git a/lightning-invoice/src/test_ser_de.rs b/lightning-invoice/src/test_ser_de.rs index e2e4d764ac2..efeed83980e 100644 --- a/lightning-invoice/src/test_ser_de.rs +++ b/lightning-invoice/src/test_ser_de.rs @@ -1,7 +1,11 @@ use crate::de::FromBase32; use crate::ser::{Base32Iterable, Base32Len}; -use crate::{sha256, PayeePubKey, PaymentSecret, PositiveTimestamp, RawDataPart, Sha256}; +use crate::{ + sha256, PayeePubKey, PaymentHash, PaymentSecret, PositiveTimestamp, RawDataPart, + RawTaggedField, Sha256, TaggedField, +}; use bech32::Fe32; +use bitcoin::hex::FromHex; use core::fmt::Debug; use std::str::FromStr; @@ -173,11 +177,12 @@ fn bolt11_invoice_features() { #[test] fn raw_tagged_field() { - use crate::TaggedField::PaymentHash; - - let field = PaymentHash(Sha256( - sha256::Hash::from_str("0001020304050607080900010203040506070809000102030405060708090102") - .unwrap(), + let field = TaggedField::PaymentHash(PaymentHash( + <[u8; 32]>::try_from( + Vec::from_hex("0001020304050607080900010203040506070809000102030405060708090102") + .unwrap(), + ) + .unwrap(), )); ser_de_test(field, "pp5qqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqypq"); } @@ -202,17 +207,15 @@ fn description() { #[test] fn raw_data_part() { - use crate::TaggedField::PaymentHash; - let raw_data_part = RawDataPart { timestamp: PositiveTimestamp::from_unix_timestamp(10000).unwrap(), - tagged_fields: vec![PaymentHash(Sha256( - sha256::Hash::from_str( - "0001020304050607080900010203040506070809000102030405060708090102", + tagged_fields: vec![RawTaggedField::KnownSemantics(TaggedField::PaymentHash(PaymentHash( + <[u8; 32]>::try_from( + Vec::from_hex("0001020304050607080900010203040506070809000102030405060708090102") + .unwrap(), ) .unwrap(), - )) - .into()], + )))], }; ser_de_test(raw_data_part, "qqqqfcspp5qqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqypq"); } diff --git a/lightning-invoice/tests/ser_de.rs b/lightning-invoice/tests/ser_de.rs index b4d3fa758e1..353878a9c52 100644 --- a/lightning-invoice/tests/ser_de.rs +++ b/lightning-invoice/tests/ser_de.rs @@ -18,9 +18,15 @@ fn get_test_tuples() -> Vec<(String, SignedRawBolt11Invoice, bool, bool)> { InvoiceBuilder::new(Currency::Bitcoin) .duration_since_epoch(Duration::from_secs(1496314658)) .payment_secret(PaymentSecret([0x11; 32])) - .payment_hash(sha256::Hash::from_str( - "0001020304050607080900010203040506070809000102030405060708090102" - ).unwrap()) + .payment_hash(lightning_invoice::PaymentHash( + <[u8; 32]>::try_from( + Vec::from_hex( + "0001020304050607080900010203040506070809000102030405060708090102", + ) + .unwrap(), + ) + .unwrap(), + )) .description("Please consider supporting this project".to_owned()) .build_raw() .unwrap() @@ -39,9 +45,15 @@ fn get_test_tuples() -> Vec<(String, SignedRawBolt11Invoice, bool, bool)> { .amount_milli_satoshis(250_000_000) .duration_since_epoch(Duration::from_secs(1496314658)) .payment_secret(PaymentSecret([0x11; 32])) - .payment_hash(sha256::Hash::from_str( - "0001020304050607080900010203040506070809000102030405060708090102" - ).unwrap()) + .payment_hash(lightning_invoice::PaymentHash( + <[u8; 32]>::try_from( + Vec::from_hex( + "0001020304050607080900010203040506070809000102030405060708090102", + ) + .unwrap(), + ) + .unwrap(), + )) .description("1 cup coffee".to_owned()) .expiry_time(Duration::from_secs(60)) .build_raw() @@ -61,9 +73,15 @@ fn get_test_tuples() -> Vec<(String, SignedRawBolt11Invoice, bool, bool)> { .amount_milli_satoshis(250_000_000) .duration_since_epoch(Duration::from_secs(1496314658)) .payment_secret(PaymentSecret([0x11; 32])) - .payment_hash(sha256::Hash::from_str( - "0001020304050607080900010203040506070809000102030405060708090102" - ).unwrap()) + .payment_hash(lightning_invoice::PaymentHash( + <[u8; 32]>::try_from( + Vec::from_hex( + "0001020304050607080900010203040506070809000102030405060708090102", + ) + .unwrap(), + ) + .unwrap(), + )) .description("ナンセンス 1杯".to_owned()) .expiry_time(Duration::from_secs(60)) .build_raw() @@ -84,9 +102,15 @@ fn get_test_tuples() -> Vec<(String, SignedRawBolt11Invoice, bool, bool)> { .duration_since_epoch(Duration::from_secs(1496314658)) .description_hash(sha256::Hash::hash(b"One piece of chocolate cake, one icecream cone, one pickle, one slice of swiss cheese, one slice of salami, one lollypop, one piece of cherry pie, one sausage, one cupcake, and one slice of watermelon")) .payment_secret(PaymentSecret([0x11; 32])) - .payment_hash(sha256::Hash::from_str( - "0001020304050607080900010203040506070809000102030405060708090102" - ).unwrap()) + .payment_hash(lightning_invoice::PaymentHash( + <[u8; 32]>::try_from( + Vec::from_hex( + "0001020304050607080900010203040506070809000102030405060708090102", + ) + .unwrap(), + ) + .unwrap(), + )) .build_raw() .unwrap() .sign(|_| { @@ -105,9 +129,15 @@ fn get_test_tuples() -> Vec<(String, SignedRawBolt11Invoice, bool, bool)> { .duration_since_epoch(Duration::from_secs(1496314658)) .description_hash(sha256::Hash::hash(b"One piece of chocolate cake, one icecream cone, one pickle, one slice of swiss cheese, one slice of salami, one lollypop, one piece of cherry pie, one sausage, one cupcake, and one slice of watermelon")) .payment_secret(PaymentSecret([0x11; 32])) - .payment_hash(sha256::Hash::from_str( - "0001020304050607080900010203040506070809000102030405060708090102" - ).unwrap()) + .payment_hash(lightning_invoice::PaymentHash( + <[u8; 32]>::try_from( + Vec::from_hex( + "0001020304050607080900010203040506070809000102030405060708090102", + ) + .unwrap(), + ) + .unwrap(), + )) .fallback(Fallback::PubKeyHash(PubkeyHash::from_slice(&[49, 114, 181, 101, 79, 102, 131, 200, 251, 20, 105, 89, 211, 71, 206, 48, 60, 174, 76, 167]).unwrap())) .build_raw() .unwrap() @@ -127,9 +157,15 @@ fn get_test_tuples() -> Vec<(String, SignedRawBolt11Invoice, bool, bool)> { .duration_since_epoch(Duration::from_secs(1496314658)) .description_hash(sha256::Hash::hash(b"One piece of chocolate cake, one icecream cone, one pickle, one slice of swiss cheese, one slice of salami, one lollypop, one piece of cherry pie, one sausage, one cupcake, and one slice of watermelon")) .payment_secret(PaymentSecret([0x11; 32])) - .payment_hash(sha256::Hash::from_str( - "0001020304050607080900010203040506070809000102030405060708090102" - ).unwrap()) + .payment_hash(lightning_invoice::PaymentHash( + <[u8; 32]>::try_from( + Vec::from_hex( + "0001020304050607080900010203040506070809000102030405060708090102", + ) + .unwrap(), + ) + .unwrap(), + )) .fallback(Fallback::PubKeyHash(PubkeyHash::from_slice(&[4, 182, 31, 125, 193, 234, 13, 201, 148, 36, 70, 76, 196, 6, 77, 197, 100, 217, 30, 137]).unwrap())) .private_route(RouteHint(vec![RouteHintHop { src_node_id: PublicKey::from_slice(&>::from_hex( @@ -166,9 +202,15 @@ fn get_test_tuples() -> Vec<(String, SignedRawBolt11Invoice, bool, bool)> { .duration_since_epoch(Duration::from_secs(1496314658)) .description_hash(sha256::Hash::hash(b"One piece of chocolate cake, one icecream cone, one pickle, one slice of swiss cheese, one slice of salami, one lollypop, one piece of cherry pie, one sausage, one cupcake, and one slice of watermelon")) .payment_secret(PaymentSecret([0x11; 32])) - .payment_hash(sha256::Hash::from_str( - "0001020304050607080900010203040506070809000102030405060708090102" - ).unwrap()) + .payment_hash(lightning_invoice::PaymentHash( + <[u8; 32]>::try_from( + Vec::from_hex( + "0001020304050607080900010203040506070809000102030405060708090102", + ) + .unwrap(), + ) + .unwrap(), + )) .fallback(Fallback::ScriptHash(ScriptHash::from_slice(&[143, 85, 86, 59, 154, 25, 243, 33, 194, 17, 233, 185, 243, 140, 223, 104, 110, 160, 120, 69]).unwrap())) .build_raw() .unwrap() @@ -188,9 +230,15 @@ fn get_test_tuples() -> Vec<(String, SignedRawBolt11Invoice, bool, bool)> { .duration_since_epoch(Duration::from_secs(1496314658)) .description_hash(sha256::Hash::hash(b"One piece of chocolate cake, one icecream cone, one pickle, one slice of swiss cheese, one slice of salami, one lollypop, one piece of cherry pie, one sausage, one cupcake, and one slice of watermelon")) .payment_secret(PaymentSecret([0x11; 32])) - .payment_hash(sha256::Hash::from_str( - "0001020304050607080900010203040506070809000102030405060708090102" - ).unwrap()) + .payment_hash(lightning_invoice::PaymentHash( + <[u8; 32]>::try_from( + Vec::from_hex( + "0001020304050607080900010203040506070809000102030405060708090102", + ) + .unwrap(), + ) + .unwrap(), + )) .fallback(Fallback::SegWitProgram { version: WitnessVersion::V0, program: vec![117, 30, 118, 232, 25, 145, 150, 212, 84, 148, 28, 69, 209, 179, 163, 35, 241, 67, 59, 214] }) @@ -212,9 +260,15 @@ fn get_test_tuples() -> Vec<(String, SignedRawBolt11Invoice, bool, bool)> { .duration_since_epoch(Duration::from_secs(1496314658)) .description_hash(sha256::Hash::hash(b"One piece of chocolate cake, one icecream cone, one pickle, one slice of swiss cheese, one slice of salami, one lollypop, one piece of cherry pie, one sausage, one cupcake, and one slice of watermelon")) .payment_secret(PaymentSecret([0x11; 32])) - .payment_hash(sha256::Hash::from_str( - "0001020304050607080900010203040506070809000102030405060708090102" - ).unwrap()) + .payment_hash(lightning_invoice::PaymentHash( + <[u8; 32]>::try_from( + Vec::from_hex( + "0001020304050607080900010203040506070809000102030405060708090102", + ) + .unwrap(), + ) + .unwrap(), + )) .fallback(Fallback::SegWitProgram { version: WitnessVersion::V0, program: vec![24, 99, 20, 60, 20, 197, 22, 104, 4, 189, 25, 32, 51, 86, 218, 19, 108, 152, 86, 120, 205, 77, 39, 161, 184, 198, 50, 150, 4, 144, 50, 98] }) @@ -235,9 +289,15 @@ fn get_test_tuples() -> Vec<(String, SignedRawBolt11Invoice, bool, bool)> { .amount_milli_satoshis(967878534) .duration_since_epoch(Duration::from_secs(1572468703)) .payment_secret(PaymentSecret([0x11; 32])) - .payment_hash(sha256::Hash::from_str( - "462264ede7e14047e9b249da94fefc47f41f7d02ee9b091815a5506bc8abf75f" - ).unwrap()) + .payment_hash(lightning_invoice::PaymentHash( + <[u8; 32]>::try_from( + Vec::from_hex( + "462264ede7e14047e9b249da94fefc47f41f7d02ee9b091815a5506bc8abf75f", + ) + .unwrap(), + ) + .unwrap(), + )) .expiry_time(Duration::from_secs(604800)) .min_final_cltv_expiry_delta(10) .description("Blockstream Store: 88.85 USD for Blockstream Ledger Nano S x 1, \"Back In My Day\" Sticker x 2, \"I Got Lightning Working\" Sticker x 2 and 1 more items".to_owned()) @@ -267,9 +327,15 @@ fn get_test_tuples() -> Vec<(String, SignedRawBolt11Invoice, bool, bool)> { .amount_milli_satoshis(2_500_000_000) .duration_since_epoch(Duration::from_secs(1496314658)) .payment_secret(PaymentSecret([0x11; 32])) - .payment_hash(sha256::Hash::from_str( - "0001020304050607080900010203040506070809000102030405060708090102" - ).unwrap()) + .payment_hash(lightning_invoice::PaymentHash( + <[u8; 32]>::try_from( + Vec::from_hex( + "0001020304050607080900010203040506070809000102030405060708090102", + ) + .unwrap(), + ) + .unwrap(), + )) .description("coffee beans".to_owned()) .build_raw() .unwrap() @@ -288,9 +354,15 @@ fn get_test_tuples() -> Vec<(String, SignedRawBolt11Invoice, bool, bool)> { .amount_milli_satoshis(2_500_000_000) .duration_since_epoch(Duration::from_secs(1496314658)) .payment_secret(PaymentSecret([0x11; 32])) - .payment_hash(sha256::Hash::from_str( - "0001020304050607080900010203040506070809000102030405060708090102" - ).unwrap()) + .payment_hash(lightning_invoice::PaymentHash( + <[u8; 32]>::try_from( + Vec::from_hex( + "0001020304050607080900010203040506070809000102030405060708090102", + ) + .unwrap(), + ) + .unwrap(), + )) .description("coffee beans".to_owned()) .build_raw() .unwrap() @@ -309,9 +381,15 @@ fn get_test_tuples() -> Vec<(String, SignedRawBolt11Invoice, bool, bool)> { .amount_milli_satoshis(2_500_000_000) .duration_since_epoch(Duration::from_secs(1496314658)) .payment_secret(PaymentSecret([0x11; 32])) - .payment_hash(sha256::Hash::from_str( - "0001020304050607080900010203040506070809000102030405060708090102" - ).unwrap()) + .payment_hash(lightning_invoice::PaymentHash( + <[u8; 32]>::try_from( + Vec::from_hex( + "0001020304050607080900010203040506070809000102030405060708090102", + ) + .unwrap(), + ) + .unwrap(), + )) .description("coffee beans".to_owned()) .build_raw() .unwrap() @@ -329,9 +407,15 @@ fn get_test_tuples() -> Vec<(String, SignedRawBolt11Invoice, bool, bool)> { InvoiceBuilder::new(Currency::Bitcoin) .amount_milli_satoshis(1_000_000_000) .duration_since_epoch(Duration::from_secs(1496314658)) - .payment_hash(sha256::Hash::from_str( - "0001020304050607080900010203040506070809000102030405060708090102" - ).unwrap()) + .payment_hash(lightning_invoice::PaymentHash( + <[u8; 32]>::try_from( + Vec::from_hex( + "0001020304050607080900010203040506070809000102030405060708090102", + ) + .unwrap(), + ) + .unwrap(), + )) .description("payment metadata inside".to_owned()) .payment_metadata(>::from_hex("01fafaf0").unwrap()) .require_payment_metadata() @@ -355,9 +439,15 @@ fn get_test_tuples() -> Vec<(String, SignedRawBolt11Invoice, bool, bool)> { InvoiceBuilder::new(Currency::Bitcoin) .amount_milli_satoshis(1_000_000_000) .duration_since_epoch(Duration::from_secs(1496314658)) - .payment_hash(sha256::Hash::from_str( - "0001020304050607080900010203040506070809000102030405060708090102" - ).unwrap()) + .payment_hash(lightning_invoice::PaymentHash( + <[u8; 32]>::try_from( + Vec::from_hex( + "0001020304050607080900010203040506070809000102030405060708090102", + ) + .unwrap(), + ) + .unwrap(), + )) .description("payment metadata inside".to_owned()) .payment_metadata(>::from_hex("01fafaf0").unwrap()) .require_payment_metadata() @@ -378,9 +468,15 @@ fn get_test_tuples() -> Vec<(String, SignedRawBolt11Invoice, bool, bool)> { InvoiceBuilder::new(Currency::Bitcoin) .duration_since_epoch(Duration::from_secs(1496314658)) .payment_secret(PaymentSecret([0x11; 32])) - .payment_hash(sha256::Hash::from_str( - "0001020304050607080900010203040506070809000102030405060708090102" - ).unwrap()) + .payment_hash(lightning_invoice::PaymentHash( + <[u8; 32]>::try_from( + Vec::from_hex( + "0001020304050607080900010203040506070809000102030405060708090102", + ) + .unwrap(), + ) + .unwrap(), + )) .description("Please consider supporting this project".to_owned()) .build_raw() .unwrap() diff --git a/lightning-liquidity/tests/lsps2_integration_tests.rs b/lightning-liquidity/tests/lsps2_integration_tests.rs index 45c2891227d..42195ac0ee1 100644 --- a/lightning-liquidity/tests/lsps2_integration_tests.rs +++ b/lightning-liquidity/tests/lsps2_integration_tests.rs @@ -47,7 +47,6 @@ use lightning_invoice::{Bolt11Invoice, InvoiceBuilder, RoutingFees}; use lightning_types::payment::PaymentHash; -use bitcoin::hashes::{sha256, Hash}; use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey}; use bitcoin::Network; use lightning_types::payment::PaymentPreimage; @@ -138,10 +137,6 @@ fn create_jit_invoice( htlc_maximum_msat: None, }]); - let payment_hash = sha256::Hash::from_slice(&payment_hash.0).map_err(|e| { - log_error!(node.logger, "Invalid payment hash: {:?}", e); - })?; - let currency = Network::Bitcoin.into(); let mut invoice_builder = InvoiceBuilder::new(currency) .description(description.to_string()) diff --git a/lightning/src/ln/bolt11_payment_tests.rs b/lightning/src/ln/bolt11_payment_tests.rs index 63c5576e333..63861f98945 100644 --- a/lightning/src/ln/bolt11_payment_tests.rs +++ b/lightning/src/ln/bolt11_payment_tests.rs @@ -16,8 +16,6 @@ use crate::ln::msgs::ChannelMessageHandler; use crate::ln::outbound_payment::Bolt11PaymentError; use crate::routing::router::RouteParametersConfig; use crate::sign::{NodeSigner, Recipient}; -use bitcoin::hashes::sha256::Hash as Sha256; -use bitcoin::hashes::Hash; use lightning_invoice::{Bolt11Invoice, Currency, InvoiceBuilder}; use std::time::SystemTime; @@ -39,7 +37,7 @@ fn payment_metadata_end_to_end_for_invoice_with_amount() { let timestamp = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap(); let invoice = InvoiceBuilder::new(Currency::Bitcoin) .description("test".into()) - .payment_hash(Sha256::from_slice(&payment_hash.0).unwrap()) + .payment_hash(payment_hash) .payment_secret(payment_secret) .duration_since_epoch(timestamp) .min_final_cltv_expiry_delta(144) @@ -108,7 +106,7 @@ fn payment_metadata_end_to_end_for_invoice_with_no_amount() { let timestamp = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap(); let invoice = InvoiceBuilder::new(Currency::Bitcoin) .description("test".into()) - .payment_hash(Sha256::from_slice(&payment_hash.0).unwrap()) + .payment_hash(payment_hash) .payment_secret(payment_secret) .duration_since_epoch(timestamp) .min_final_cltv_expiry_delta(144) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 0f9adfcc51a..41d49be6210 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -13309,7 +13309,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let mut invoice = invoice .duration_since_epoch(duration_since_epoch) .payee_pub_key(self.get_our_node_id()) - .payment_hash(Hash::from_slice(&payment_hash.0).unwrap()) + .payment_hash(payment_hash) .payment_secret(payment_secret) .basic_mpp() .min_final_cltv_expiry_delta( diff --git a/lightning/src/ln/invoice_utils.rs b/lightning/src/ln/invoice_utils.rs index e72ea4518a4..2cf32331873 100644 --- a/lightning/src/ln/invoice_utils.rs +++ b/lightning/src/ln/invoice_utils.rs @@ -18,7 +18,6 @@ use crate::sign::{EntropySource, NodeSigner, Recipient}; use crate::types::payment::PaymentHash; use crate::util::logger::{Logger, Record}; use alloc::collections::{btree_map, BTreeMap}; -use bitcoin::hashes::Hash; use bitcoin::secp256k1::PublicKey; #[cfg(not(feature = "std"))] use core::iter::Iterator; @@ -228,7 +227,7 @@ where let mut invoice = invoice .duration_since_epoch(duration_since_epoch) - .payment_hash(Hash::from_slice(&payment_hash.0).unwrap()) + .payment_hash(payment_hash) .payment_secret(payment_secret) .min_final_cltv_expiry_delta( // Add a buffer of 3 to the delta if present, otherwise use LDK's minimum. @@ -1257,7 +1256,8 @@ mod test { Duration::from_secs(genesis_timestamp), ) .unwrap(); - let (payment_hash, payment_secret) = (invoice.payment_hash(), *invoice.payment_secret()); + let payment_hash = invoice.payment_hash(); + let payment_secret = *invoice.payment_secret(); let payment_preimage = if user_generated_pmt_hash { user_payment_preimage } else { From 988f1b1490030afe4911cefe2d2c2308dda6773c Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 27 Jan 2026 12:34:53 +0100 Subject: [PATCH 168/242] Refactor `BroadcasterInterface` to include `TransactionType` Add a `TransactionType` enum to provide context about the type of transaction being broadcast. This information can be useful for logging, filtering, or prioritization purposes. The `TransactionType` variants are: - `Funding`: A funding transaction establishing a new channel - `CooperativeClose`: A cooperative close transaction - `UnilateralClose`: A force-close transaction - `AnchorBump`: An anchor transaction for CPFP fee-bumping - `Claim`: A transaction claiming outputs from commitment tx - `Sweep`: A transaction sweeping spendable outputs to wallet Co-Authored-By: HAL 9000 Signed-off-by: Elias Rohrer --- fuzz/src/chanmon_consistency.rs | 8 +- fuzz/src/full_stack.rs | 8 +- lightning-liquidity/src/lsps2/service.rs | 17 +-- lightning/src/chain/chaininterface.rs | 94 +++++++++++++- lightning/src/chain/channelmonitor.rs | 8 +- lightning/src/chain/onchaintx.rs | 42 +++++-- lightning/src/events/bump_transaction/mod.rs | 29 +++-- lightning/src/ln/channel.rs | 26 +++- lightning/src/ln/channelmanager.rs | 44 +++++-- lightning/src/util/sweep.rs | 125 +++++++++++-------- lightning/src/util/test_utils.rs | 14 ++- 11 files changed, 305 insertions(+), 110 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index f87af5c6ff5..f363da534ac 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -36,7 +36,9 @@ use bitcoin::WPubkeyHash; use lightning::blinded_path::message::{BlindedMessagePath, MessageContext, MessageForwardNode}; use lightning::blinded_path::payment::{BlindedPaymentPath, ReceiveTlvs}; use lightning::chain; -use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator}; +use lightning::chain::chaininterface::{ + TransactionType, BroadcasterInterface, ConfirmationTarget, FeeEstimator, +}; use lightning::chain::channelmonitor::{ChannelMonitor, MonitorEvent}; use lightning::chain::transaction::OutPoint; use lightning::chain::{ @@ -159,8 +161,8 @@ pub struct TestBroadcaster { txn_broadcasted: RefCell>, } impl BroadcasterInterface for TestBroadcaster { - fn broadcast_transactions(&self, txs: &[&Transaction]) { - for tx in txs { + fn broadcast_transactions(&self, txs: &[(&Transaction, TransactionType)]) { + for (tx, _broadcast_type) in txs { self.txn_broadcasted.borrow_mut().push((*tx).clone()); } } diff --git a/fuzz/src/full_stack.rs b/fuzz/src/full_stack.rs index e73db74fa5d..0b0f097863a 100644 --- a/fuzz/src/full_stack.rs +++ b/fuzz/src/full_stack.rs @@ -35,7 +35,9 @@ use lightning::ln::funding::{FundingTxInput, SpliceContribution}; use lightning::blinded_path::message::{BlindedMessagePath, MessageContext, MessageForwardNode}; use lightning::blinded_path::payment::{BlindedPaymentPath, ReceiveTlvs}; use lightning::chain; -use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator}; +use lightning::chain::chaininterface::{ + TransactionType, BroadcasterInterface, ConfirmationTarget, FeeEstimator, +}; use lightning::chain::chainmonitor; use lightning::chain::transaction::OutPoint; use lightning::chain::{BestBlock, ChannelMonitorUpdateStatus, Confirm, Listen}; @@ -187,8 +189,8 @@ struct TestBroadcaster { txn_broadcasted: Mutex>, } impl BroadcasterInterface for TestBroadcaster { - fn broadcast_transactions(&self, txs: &[&Transaction]) { - let owned_txs: Vec = txs.iter().map(|tx| (*tx).clone()).collect(); + fn broadcast_transactions(&self, txs: &[(&Transaction, TransactionType)]) { + let owned_txs: Vec = txs.iter().map(|(tx, _)| (*tx).clone()).collect(); self.txn_broadcasted.lock().unwrap().extend(owned_txs); } } diff --git a/lightning-liquidity/src/lsps2/service.rs b/lightning-liquidity/src/lsps2/service.rs index 00f68aff696..4c688d39eef 100644 --- a/lightning-liquidity/src/lsps2/service.rs +++ b/lightning-liquidity/src/lsps2/service.rs @@ -40,7 +40,7 @@ use crate::prelude::{new_hash_map, HashMap}; use crate::sync::{Arc, Mutex, MutexGuard, RwLock}; use crate::utils::async_poll::dummy_waker; -use lightning::chain::chaininterface::BroadcasterInterface; +use lightning::chain::chaininterface::{BroadcasterInterface, TransactionType}; use lightning::events::HTLCHandlingFailureType; use lightning::ln::channelmanager::{AChannelManager, FailureCode, InterceptId}; use lightning::ln::msgs::{ErrorAction, LightningError}; @@ -2019,23 +2019,24 @@ where // (for example when a forwarded HTLC nears expiry). Broadcasting funding after a // close could then confirm the commitment and trigger unintended on‑chain handling. // To avoid this, we check ChannelManager’s view (`is_channel_ready`) before broadcasting. - let channel_id_opt = jit_channel.get_channel_id(); - if let Some(ch_id) = channel_id_opt { + if let Some(ch_id) = jit_channel.get_channel_id() { let is_channel_ready = self .channel_manager .get_cm() .list_channels() .into_iter() .any(|cd| cd.channel_id == ch_id && cd.is_channel_ready); + if !is_channel_ready { return; } - } else { - return; - } - if let Some(funding_tx) = jit_channel.get_funding_tx() { - self.tx_broadcaster.broadcast_transactions(&[funding_tx]); + if let Some(funding_tx) = jit_channel.get_funding_tx() { + self.tx_broadcaster.broadcast_transactions(&[( + funding_tx, + TransactionType::Funding { channel_ids: vec![ch_id] }, + )]); + } } } } diff --git a/lightning/src/chain/chaininterface.rs b/lightning/src/chain/chaininterface.rs index 7e71d960e67..758fd1a74e2 100644 --- a/lightning/src/chain/chaininterface.rs +++ b/lightning/src/chain/chaininterface.rs @@ -15,10 +15,97 @@ use core::{cmp, ops::Deref}; +use crate::ln::types::ChannelId; use crate::prelude::*; use bitcoin::transaction::Transaction; +/// Represents the class of transaction being broadcast. +/// +/// This is used to provide context about the type of transaction being broadcast, which may be +/// useful for logging, filtering, or prioritization purposes. +#[derive(Clone, Debug, Hash, PartialEq, Eq)] +pub enum TransactionType { + /// A funding transaction establishing a new channel. + /// + /// If we initiated the channel the transaction given to + /// [`ChannelManager::funding_transaction_generated`] will be broadcast with this type. + /// + /// [`ChannelManager::funding_transaction_generated`]: crate::ln::channelmanager::ChannelManager::funding_transaction_generated + Funding { + /// The IDs of the channels being funded. + /// + /// A single funding transaction may establish multiple channels when using batch funding. + channel_ids: Vec, + }, + /// A transaction cooperatively closing a channel. + /// + /// A transaction of this type will be broadcast when cooperatively closing a channel via + /// [`ChannelManager::close_channel`] or if the counterparty closes the channel. + /// + /// [`ChannelManager::close_channel`]: crate::ln::channelmanager::ChannelManager::close_channel + CooperativeClose { + /// The ID of the channel being closed. + channel_id: ChannelId, + }, + /// A transaction being broadcast to force-close the channel. + /// + /// A transaction of this type will be broadcast when unilaterally closing a channel via + /// [`ChannelManager::force_close_broadcasting_latest_txn`] or if the counterparty force-closes + /// the channel. + /// + /// [`ChannelManager::force_close_broadcasting_latest_txn`]: crate::ln::channelmanager::ChannelManager::force_close_broadcasting_latest_txn + UnilateralClose { + /// The ID of the channel being force-closed. + channel_id: ChannelId, + }, + /// An anchor bumping transaction used for CPFP fee-bumping a closing transaction. + /// + /// This will be broadcast after an anchor channel has been closed. See + /// [`BumpTransactionEvent`] for more information. + /// + /// [`BumpTransactionEvent`]: crate::events::bump_transaction::BumpTransactionEvent + AnchorBump { + /// The ID of the channel whose closing transaction is being fee-bumped. + channel_id: ChannelId, + }, + /// A transaction which is resolving an output spendable by both us and our counterparty. + /// + /// When a channel closes via the unilateral close path, there may be transaction outputs which + /// are spendable by either our counterparty or us and represent some lightning state. In order + /// to resolve that state, the [`ChannelMonitor`] will spend any such outputs, ensuring funds + /// are only available to us prior to generating an [`Event::SpendableOutputs`]. This + /// transaction is one such transaction - resolving in-flight HTLCs or punishing our + /// counterparty if they broadcasted an outdated state. + /// + /// [`ChannelMonitor`]: crate::chain::ChannelMonitor + /// [`Event::SpendableOutputs`]: crate::events::Event::SpendableOutputs + Claim { + /// The ID of the channel from which outputs are being claimed. + channel_id: ChannelId, + }, + /// A transaction generated by the [`OutputSweeper`], sweeping [`SpendableOutputDescriptor`]s + /// to the user's wallet. + /// + /// [`OutputSweeper`]: crate::util::sweep::OutputSweeper + /// [`SpendableOutputDescriptor`]: crate::sign::SpendableOutputDescriptor + Sweep { + /// The IDs of the channels from which outputs are being swept, if known. + /// + /// A single sweep transaction may aggregate outputs from multiple channels. + channel_ids: Vec, + }, + /// A splice transaction modifying an existing channel's funding. + /// + /// A transaction of this type will be broadcast as a result of a [`ChannelManager::splice_channel`] operation. + /// + /// [`ChannelManager::splice_channel`]: crate::ln::channelmanager::ChannelManager::splice_channel + Splice { + /// The ID of the channel being spliced. + channel_id: ChannelId, + }, +} + // TODO: Define typed abstraction over feerates to handle their conversions. pub(crate) fn compute_feerate_sat_per_1000_weight(fee_sat: u64, weight: u64) -> u32 { (fee_sat * 1000 / weight).try_into().unwrap_or(u32::max_value()) @@ -45,11 +132,14 @@ pub trait BroadcasterInterface { /// /// Bitcoin transaction packages are defined in BIP 331 and here: /// - fn broadcast_transactions(&self, txs: &[&Transaction]); + /// + /// Each transaction is paired with a [`TransactionType`] indicating the class of transaction + /// being broadcast, which may be useful for logging, filtering, or prioritization. + fn broadcast_transactions(&self, txs: &[(&Transaction, TransactionType)]); } impl> BroadcasterInterface for B { - fn broadcast_transactions(&self, txs: &[&Transaction]) { + fn broadcast_transactions(&self, txs: &[(&Transaction, TransactionType)]) { self.deref().broadcast_transactions(txs) } } diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 80d0ef125fc..2d2fdd14ad2 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -1879,8 +1879,9 @@ impl ChannelMonitor { initial_holder_commitment_tx.trust().commitment_number(); let onchain_tx_handler = OnchainTxHandler::new( - channel_parameters.channel_value_satoshis, channel_keys_id, destination_script.into(), - keys, channel_parameters.clone(), initial_holder_commitment_tx.clone(), secp_ctx + channel_id, channel_parameters.channel_value_satoshis, channel_keys_id, + destination_script.into(), keys, channel_parameters.clone(), + initial_holder_commitment_tx.clone(), secp_ctx, ); let funding_outpoint = channel_parameters.funding_outpoint @@ -6491,7 +6492,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP return Err(DecodeError::InvalidValue); } } - let onchain_tx_handler: OnchainTxHandler = ReadableArgs::read( + let mut onchain_tx_handler: OnchainTxHandler = ReadableArgs::read( reader, (entropy_source, signer_provider, channel_value_satoshis, channel_keys_id) )?; @@ -6587,6 +6588,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP } let channel_id = channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint(outpoint)); + onchain_tx_handler.set_channel_id(channel_id); let (current_holder_commitment_tx, current_holder_htlc_data) = { let holder_commitment_tx = onchain_tx_handler.current_holder_commitment_tx(); diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index cfee63beefd..8de99eb8601 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -23,7 +23,9 @@ use bitcoin::transaction::OutPoint as BitcoinOutPoint; use bitcoin::transaction::Transaction; use crate::chain::chaininterface::ConfirmationTarget; -use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator, LowerBoundedFeeEstimator}; +use crate::chain::chaininterface::{ + BroadcasterInterface, FeeEstimator, LowerBoundedFeeEstimator, TransactionType, +}; use crate::chain::channelmonitor::ANTI_REORG_DELAY; use crate::chain::package::{PackageSolvingData, PackageTemplate}; use crate::chain::transaction::MaybeSignedTransaction; @@ -33,6 +35,7 @@ use crate::ln::chan_utils::{ HTLCOutputInCommitment, HolderCommitmentTransaction, }; use crate::ln::msgs::DecodeError; +use crate::ln::types::ChannelId; use crate::sign::{ecdsa::EcdsaChannelSigner, EntropySource, HTLCDescriptor, SignerProvider}; use crate::util::logger::Logger; use crate::util::ser::{ @@ -220,6 +223,7 @@ pub(crate) enum FeerateStrategy { /// do RBF bumping if possible. #[derive(Clone)] pub struct OnchainTxHandler { + channel_id: ChannelId, channel_value_satoshis: u64, // Deprecated as of 0.2. channel_keys_id: [u8; 32], // Deprecated as of 0.2. destination_script: ScriptBuf, // Deprecated as of 0.2. @@ -282,7 +286,8 @@ impl PartialEq for OnchainTxHandler bool { // `signer`, `secp_ctx`, and `pending_claim_events` are excluded on purpose. - self.channel_value_satoshis == other.channel_value_satoshis && + self.channel_id == other.channel_id && + self.channel_value_satoshis == other.channel_value_satoshis && self.channel_keys_id == other.channel_keys_id && self.destination_script == other.destination_script && self.holder_commitment == other.holder_commitment && @@ -345,6 +350,14 @@ impl OnchainTxHandler { write_tlv_fields!(writer, {}); Ok(()) } + + // `ChannelMonitor`s already track the `channel_id`, however, due to the derserialization order + // there we can't make use of `ReadableArgs` to hand it into `OnchainTxHandler`'s + // deserialization logic directly. Instead we opt to initialize it with 0s and override it + // after reading the respective field via this method. + pub(crate) fn set_channel_id(&mut self, channel_id: ChannelId) { + self.channel_id = channel_id; + } } impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP, u64, [u8; 32])> @@ -366,7 +379,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP let prev_holder_commitment = Readable::read(reader)?; let _prev_holder_htlc_sigs: Option>> = Readable::read(reader)?; - let channel_parameters = ReadableArgs::>::read(reader, Some(channel_value_satoshis))?; + let channel_parameters: ChannelTransactionParameters = ReadableArgs::>::read(reader, Some(channel_value_satoshis))?; // Read the serialized signer bytes, but don't deserialize them, as we'll obtain our signer // by re-deriving the private key material. @@ -420,10 +433,17 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP read_tlv_fields!(reader, {}); + // `ChannelMonitor`s already track the `channel_id`, however, due to the derserialization + // order there we can't make use of `ReadableArgs` to hand it in directly. Instead we opt + // to initialize it with 0s and override it after reading the respective field via + // `OnchainTxHandler::set_channel_id`. + let channel_id = ChannelId([0u8; 32]); + let mut secp_ctx = Secp256k1::new(); secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes()); Ok(OnchainTxHandler { + channel_id, channel_value_satoshis, channel_keys_id, destination_script, @@ -443,11 +463,13 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP impl OnchainTxHandler { pub(crate) fn new( - channel_value_satoshis: u64, channel_keys_id: [u8; 32], destination_script: ScriptBuf, - signer: ChannelSigner, channel_parameters: ChannelTransactionParameters, + channel_id: ChannelId, channel_value_satoshis: u64, channel_keys_id: [u8; 32], + destination_script: ScriptBuf, signer: ChannelSigner, + channel_parameters: ChannelTransactionParameters, holder_commitment: HolderCommitmentTransaction, secp_ctx: Secp256k1, ) -> Self { OnchainTxHandler { + channel_id, channel_value_satoshis, channel_keys_id, destination_script, @@ -511,7 +533,7 @@ impl OnchainTxHandler { if tx.is_fully_signed() { let log_start = if feerate_was_bumped { "Broadcasting RBF-bumped" } else { "Rebroadcasting" }; log_info!(logger, "{} onchain {}", log_start, log_tx!(tx.0)); - broadcaster.broadcast_transactions(&[&tx.0]); + broadcaster.broadcast_transactions(&[(&tx.0, TransactionType::Claim { channel_id: self.channel_id })]); } else { log_info!(logger, "Waiting for signature of unsigned onchain transaction {}", tx.0.compute_txid()); } @@ -853,7 +875,7 @@ impl OnchainTxHandler { OnchainClaim::Tx(tx) => { if tx.is_fully_signed() { log_info!(logger, "Broadcasting onchain {}", log_tx!(tx.0)); - broadcaster.broadcast_transactions(&[&tx.0]); + broadcaster.broadcast_transactions(&[(&tx.0, TransactionType::Claim { channel_id: self.channel_id })]); } else { log_info!(logger, "Waiting for signature of unsigned onchain transaction {}", tx.0.compute_txid()); } @@ -1071,7 +1093,7 @@ impl OnchainTxHandler { OnchainClaim::Tx(bump_tx) => { if bump_tx.is_fully_signed() { log_info!(logger, "Broadcasting RBF-bumped onchain {}", log_tx!(bump_tx.0)); - broadcaster.broadcast_transactions(&[&bump_tx.0]); + broadcaster.broadcast_transactions(&[(&bump_tx.0, TransactionType::Claim { channel_id: self.channel_id })]); } else { log_info!(logger, "Waiting for signature of RBF-bumped unsigned onchain transaction {}", bump_tx.0.compute_txid()); @@ -1168,7 +1190,7 @@ impl OnchainTxHandler { OnchainClaim::Tx(bump_tx) => { if bump_tx.is_fully_signed() { log_info!(logger, "Broadcasting onchain {}", log_tx!(bump_tx.0)); - broadcaster.broadcast_transactions(&[&bump_tx.0]); + broadcaster.broadcast_transactions(&[(&bump_tx.0, TransactionType::Claim { channel_id: self.channel_id })]); } else { log_info!(logger, "Waiting for signature of unsigned onchain transaction {}", bump_tx.0.compute_txid()); } @@ -1262,6 +1284,7 @@ mod tests { }; use crate::ln::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint, RevocationBasepoint}; use crate::ln::functional_test_utils::create_dummy_block; + use crate::ln::types::ChannelId; use crate::sign::{ChannelDerivationParameters, ChannelSigner, HTLCDescriptor, InMemorySigner}; use crate::types::payment::{PaymentHash, PaymentPreimage}; use crate::util::test_utils::{TestBroadcaster, TestFeeEstimator, TestLogger}; @@ -1346,6 +1369,7 @@ mod tests { let holder_commit = HolderCommitmentTransaction::dummy(1000000, funding_outpoint, nondust_htlcs); let destination_script = ScriptBuf::new(); let mut tx_handler = OnchainTxHandler::new( + ChannelId::from_bytes([0; 32]), 1000000, [0; 32], destination_script.clone(), diff --git a/lightning/src/events/bump_transaction/mod.rs b/lightning/src/events/bump_transaction/mod.rs index 1b3496c5eab..6b6581ec749 100644 --- a/lightning/src/events/bump_transaction/mod.rs +++ b/lightning/src/events/bump_transaction/mod.rs @@ -18,7 +18,7 @@ use core::future::Future; use core::ops::Deref; use crate::chain::chaininterface::{ - compute_feerate_sat_per_1000_weight, fee_for_weight, BroadcasterInterface, + compute_feerate_sat_per_1000_weight, fee_for_weight, BroadcasterInterface, TransactionType, }; use crate::chain::ClaimId; use crate::io_extras::sink; @@ -761,9 +761,9 @@ where /// transaction spending an anchor output of the commitment transaction to bump its fee and /// broadcasts them to the network as a package. async fn handle_channel_close( - &self, claim_id: ClaimId, package_target_feerate_sat_per_1000_weight: u32, - commitment_tx: &Transaction, commitment_tx_fee_sat: u64, - anchor_descriptor: &AnchorDescriptor, + &self, channel_id: ChannelId, claim_id: ClaimId, + package_target_feerate_sat_per_1000_weight: u32, commitment_tx: &Transaction, + commitment_tx_fee_sat: u64, anchor_descriptor: &AnchorDescriptor, ) -> Result<(), ()> { let channel_type = &anchor_descriptor .channel_derivation_parameters @@ -784,7 +784,10 @@ where log_debug!(self.logger, "Pre-signed commitment {} already has feerate {} sat/kW above required {} sat/kW, broadcasting.", commitment_tx.compute_txid(), commitment_tx_feerate_sat_per_1000_weight, package_target_feerate_sat_per_1000_weight); - self.broadcaster.broadcast_transactions(&[&commitment_tx]); + self.broadcaster.broadcast_transactions(&[( + &commitment_tx, + TransactionType::UnilateralClose { channel_id }, + )]); return Ok(()); } @@ -951,7 +954,10 @@ where anchor_txid, commitment_tx.compute_txid() ); - self.broadcaster.broadcast_transactions(&[&commitment_tx, &anchor_tx]); + self.broadcaster.broadcast_transactions(&[ + (&commitment_tx, TransactionType::UnilateralClose { channel_id }), + (&anchor_tx, TransactionType::AnchorBump { channel_id }), + ]); return Ok(()); } } @@ -959,7 +965,7 @@ where /// Handles a [`BumpTransactionEvent::HTLCResolution`] event variant by producing a /// fully-signed, fee-bumped HTLC transaction that is broadcast to the network. async fn handle_htlc_resolution( - &self, claim_id: ClaimId, target_feerate_sat_per_1000_weight: u32, + &self, channel_id: ChannelId, claim_id: ClaimId, target_feerate_sat_per_1000_weight: u32, htlc_descriptors: &[HTLCDescriptor], tx_lock_time: LockTime, ) -> Result<(), ()> { let channel_type = &htlc_descriptors[0] @@ -1184,7 +1190,10 @@ where } log_info!(self.logger, "Broadcasting {}", log_tx!(htlc_tx)); - self.broadcaster.broadcast_transactions(&[&htlc_tx]); + self.broadcaster.broadcast_transactions(&[( + &htlc_tx, + TransactionType::UnilateralClose { channel_id }, + )]); } Ok(()) @@ -1194,6 +1203,7 @@ where pub async fn handle_event(&self, event: &BumpTransactionEvent) { match event { BumpTransactionEvent::ChannelClose { + channel_id, claim_id, package_target_feerate_sat_per_1000_weight, commitment_tx, @@ -1208,6 +1218,7 @@ where commitment_tx.compute_txid() ); self.handle_channel_close( + *channel_id, *claim_id, *package_target_feerate_sat_per_1000_weight, commitment_tx, @@ -1224,6 +1235,7 @@ where }); }, BumpTransactionEvent::HTLCResolution { + channel_id, claim_id, target_feerate_sat_per_1000_weight, htlc_descriptors, @@ -1237,6 +1249,7 @@ where log_iter!(htlc_descriptors.iter().map(|d| d.outpoint())) ); self.handle_htlc_resolution( + *channel_id, *claim_id, *target_feerate_sat_per_1000_weight, htlc_descriptors, diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 042b388e1a1..a47241d5a98 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -28,7 +28,7 @@ use bitcoin::{secp256k1, sighash, FeeRate, Sequence, TxIn}; use crate::blinded_path::message::BlindedMessagePath; use crate::chain::chaininterface::{ - fee_for_weight, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator, + fee_for_weight, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator, TransactionType, }; use crate::chain::channelmonitor::{ ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, CommitmentHTLCData, @@ -2103,6 +2103,8 @@ where }, }; + let channel_id = context.channel_id; + let signing_session = if let Some(signing_session) = context.interactive_tx_signing_session.as_mut() { @@ -2213,6 +2215,15 @@ where (None, None) }; + let funding_tx = funding_tx.map(|tx| { + let tx_type = if splice_negotiated.is_some() { + TransactionType::Splice { channel_id } + } else { + TransactionType::Funding { channel_ids: vec![channel_id] } + }; + (tx, tx_type) + }); + // If we have a pending splice with a buffered initial commitment signed from our // counterparty, process it now that we have provided our signatures. let counterparty_initial_commitment_signed_result = @@ -6919,8 +6930,8 @@ pub(super) struct FundingTxSigned { /// Signatures that should be sent to the counterparty, if necessary. pub tx_signatures: Option, - /// The fully-signed funding transaction to be broadcast. - pub funding_tx: Option, + /// The fully-signed funding transaction to be broadcast, along with the transaction type. + pub funding_tx: Option<(Transaction, TransactionType)>, /// Information about the completed funding negotiation. pub splice_negotiated: Option, @@ -9143,6 +9154,15 @@ where (None, None) }; + let funding_tx = funding_tx.map(|tx| { + let tx_type = if splice_negotiated.is_some() { + TransactionType::Splice { channel_id: self.context.channel_id } + } else { + TransactionType::Funding { channel_ids: vec![self.context.channel_id] } + }; + (tx, tx_type) + }); + Ok(FundingTxSigned { commitment_signed: None, counterparty_initial_commitment_signed_result: None, diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 129a4d58171..775d1ea7682 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -40,6 +40,7 @@ use crate::blinded_path::NodeIdLookUp; use crate::chain; use crate::chain::chaininterface::{ BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator, + TransactionType, }; use crate::chain::channelmonitor::{ Balance, ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, MonitorEvent, @@ -6428,13 +6429,14 @@ impl< splice_negotiated, splice_locked, }) => { - if let Some(funding_tx) = funding_tx { + if let Some((funding_tx, tx_type)) = funding_tx { let funded_chan = chan.as_funded_mut().expect( "Funding transactions ready for broadcast can only exist for funded channels", ); self.broadcast_interactive_funding( funded_chan, &funding_tx, + Some(tx_type), &self.logger, ); } @@ -6562,7 +6564,8 @@ impl< } fn broadcast_interactive_funding( - &self, channel: &mut FundedChannel, funding_tx: &Transaction, logger: &L, + &self, channel: &mut FundedChannel, funding_tx: &Transaction, + transaction_type: Option, logger: &L, ) { let logger = WithChannelContext::from(logger, channel.context(), None); log_info!( @@ -6570,7 +6573,10 @@ impl< "Broadcasting signed interactive funding transaction {}", funding_tx.compute_txid() ); - self.tx_broadcaster.broadcast_transactions(&[funding_tx]); + let tx_type = transaction_type.unwrap_or_else(|| TransactionType::Funding { + channel_ids: vec![channel.context().channel_id()], + }); + self.tx_broadcaster.broadcast_transactions(&[(funding_tx, tx_type)]); { let mut pending_events = self.pending_events.lock().unwrap(); emit_channel_pending_event!(pending_events, channel); @@ -9579,6 +9585,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let removed_batch_state = funding_batch_states.remove(&txid).into_iter().flatten(); let per_peer_state = self.per_peer_state.read().unwrap(); let mut batch_funding_tx = None; + let mut batch_channel_ids = Vec::new(); for (channel_id, counterparty_node_id, _) in removed_batch_state { if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { let mut peer_state = peer_state_mutex.lock().unwrap(); @@ -9589,6 +9596,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ funded_chan.context.unbroadcasted_funding(&funded_chan.funding) }); funded_chan.set_batch_ready(); + batch_channel_ids.push(channel_id); let mut pending_events = self.pending_events.lock().unwrap(); emit_channel_pending_event!(pending_events, funded_chan); @@ -9597,7 +9605,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } if let Some(tx) = batch_funding_tx { log_info!(self.logger, "Broadcasting batch funding tx {}", tx.compute_txid()); - self.tx_broadcaster.broadcast_transactions(&[&tx]); + self.tx_broadcaster.broadcast_transactions(&[( + &tx, + TransactionType::Funding { channel_ids: batch_channel_ids }, + )]); } } } @@ -10263,7 +10274,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }; } else { log_info!(logger, "Broadcasting funding transaction with txid {}", tx.compute_txid()); - self.tx_broadcaster.broadcast_transactions(&[&tx]); + self.tx_broadcaster.broadcast_transactions(&[( + &tx, + TransactionType::Funding { channel_ids: vec![channel.context.channel_id()] }, + )]); } } @@ -11390,8 +11404,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ msg: splice_locked, }); } - if let Some(ref funding_tx) = funding_tx { - self.broadcast_interactive_funding(chan, funding_tx, &self.logger); + if let Some((ref funding_tx, ref tx_type)) = funding_tx { + self.broadcast_interactive_funding(chan, funding_tx, Some(tx_type.clone()), &self.logger); } if let Some(splice_negotiated) = splice_negotiated { self.pending_events.lock().unwrap().push_back(( @@ -11702,7 +11716,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ mem::drop(per_peer_state); if let Some((broadcast_tx, err)) = tx_err { log_info!(logger, "Broadcasting {}", log_tx!(broadcast_tx)); - self.tx_broadcaster.broadcast_transactions(&[&broadcast_tx]); + self.tx_broadcaster.broadcast_transactions(&[( + &broadcast_tx, + TransactionType::CooperativeClose { channel_id: msg.channel_id }, + )]); let _ = self.handle_error(err, *counterparty_node_id); } Ok(()) @@ -12919,7 +12936,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } if let Some(broadcast_tx) = msgs.signed_closing_tx { log_info!(logger, "Broadcasting closing tx {}", log_tx!(broadcast_tx)); - self.tx_broadcaster.broadcast_transactions(&[&broadcast_tx]); + self.tx_broadcaster.broadcast_transactions(&[( + &broadcast_tx, + TransactionType::CooperativeClose { channel_id }, + )]); } } else { // We don't know how to handle a channel_ready or signed_closing_tx for a @@ -13037,6 +13057,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some((tx, shutdown_res)) = tx_shutdown_result_opt { // We're done with this channel. We got a closing_signed and sent back // a closing_signed with a closing transaction to broadcast. + let channel_id = funded_chan.context.channel_id(); let err = self.locked_handle_funded_coop_close( &mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, @@ -13046,7 +13067,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ handle_errors.push((*cp_id, Err(err))); log_info!(logger, "Broadcasting {}", log_tx!(tx)); - self.tx_broadcaster.broadcast_transactions(&[&tx]); + self.tx_broadcaster.broadcast_transactions(&[( + &tx, + TransactionType::CooperativeClose { channel_id }, + )]); false } else { true diff --git a/lightning/src/util/sweep.rs b/lightning/src/util/sweep.rs index 2aef2186323..f0c2d6d2d15 100644 --- a/lightning/src/util/sweep.rs +++ b/lightning/src/util/sweep.rs @@ -8,7 +8,9 @@ //! [`SpendableOutputDescriptor`]s, i.e., persists them in a given [`KVStoreSync`] and regularly retries //! sweeping them. -use crate::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator}; +use crate::chain::chaininterface::{ + BroadcasterInterface, ConfirmationTarget, FeeEstimator, TransactionType, +}; use crate::chain::channelmonitor::{ANTI_REORG_DELAY, ARCHIVAL_DELAY_BLOCKS}; use crate::chain::{self, BestBlock, Confirm, Filter, Listen, WatchedOutput}; use crate::io; @@ -525,66 +527,79 @@ where self.change_destination_source.get_change_destination_script().await?; // Sweep the outputs. - let spending_tx = self - .update_state(|sweeper_state| -> Result<(Option, bool), ()> { - let cur_height = sweeper_state.best_block.height; - let cur_hash = sweeper_state.best_block.block_hash; - - let respend_descriptors_set: HashSet<&SpendableOutputDescriptor> = sweeper_state - .outputs - .iter() - .filter(|o| filter_fn(*o, cur_height)) - .map(|o| &o.descriptor) - .collect(); - - // we first collect into a set to avoid duplicates and to "randomize" the order - // in which outputs are spent. Then we collect into a vec as that is what - // `spend_outputs` requires. - let respend_descriptors: Vec<&SpendableOutputDescriptor> = - respend_descriptors_set.into_iter().collect(); - - // Generate the spending transaction and broadcast it. - if !respend_descriptors.is_empty() { - let spending_tx = self - .spend_outputs( - &sweeper_state, - &respend_descriptors, - change_destination_script, - ) - .map_err(|e| { - log_error!(self.logger, "Error spending outputs: {:?}", e); - })?; - - log_debug!( - self.logger, - "Generating and broadcasting sweeping transaction {}", - spending_tx.compute_txid() - ); - - // As we didn't modify the state so far, the same filter_fn yields the same elements as - // above. - let respend_outputs = - sweeper_state.outputs.iter_mut().filter(|o| filter_fn(&**o, cur_height)); - for output_info in respend_outputs { - if let Some(filter) = self.chain_data_source.as_ref() { - let watched_output = output_info.to_watched_output(cur_hash); - filter.register_output(watched_output); + let spending_tx_and_chan_id = self + .update_state( + |sweeper_state| -> Result<(Option<(Transaction, Vec)>, bool), ()> { + let cur_height = sweeper_state.best_block.height; + let cur_hash = sweeper_state.best_block.block_hash; + + let respend_descriptors_set: HashSet<&SpendableOutputDescriptor> = + sweeper_state + .outputs + .iter() + .filter(|o| filter_fn(*o, cur_height)) + .map(|o| &o.descriptor) + .collect(); + + // we first collect into a set to avoid duplicates and to "randomize" the order + // in which outputs are spent. Then we collect into a vec as that is what + // `spend_outputs` requires. + let respend_descriptors: Vec<&SpendableOutputDescriptor> = + respend_descriptors_set.into_iter().collect(); + + // Generate the spending transaction and broadcast it. + if !respend_descriptors.is_empty() { + let spending_tx = self + .spend_outputs( + &sweeper_state, + &respend_descriptors, + change_destination_script, + ) + .map_err(|e| { + log_error!(self.logger, "Error spending outputs: {:?}", e); + })?; + + log_debug!( + self.logger, + "Generating and broadcasting sweeping transaction {}", + spending_tx.compute_txid() + ); + + // As we didn't modify the state so far, the same filter_fn yields the same elements as + // above. + let respend_outputs = sweeper_state + .outputs + .iter_mut() + .filter(|o| filter_fn(&**o, cur_height)); + let mut channel_ids = Vec::new(); + for output_info in respend_outputs { + if let Some(filter) = self.chain_data_source.as_ref() { + let watched_output = output_info.to_watched_output(cur_hash); + filter.register_output(watched_output); + } + + if let Some(channel_id) = output_info.channel_id { + if !channel_ids.contains(&channel_id) { + channel_ids.push(channel_id); + } + } + + output_info.status.broadcast(cur_hash, cur_height, spending_tx.clone()); + sweeper_state.dirty = true; } - output_info.status.broadcast(cur_hash, cur_height, spending_tx.clone()); - sweeper_state.dirty = true; + Ok((Some((spending_tx, channel_ids)), false)) + } else { + Ok((None, false)) } - - Ok((Some(spending_tx), false)) - } else { - Ok((None, false)) - } - }) + }, + ) .await?; // Persistence completely successfully. If we have a spending transaction, we broadcast it. - if let Some(spending_tx) = spending_tx { - self.broadcaster.broadcast_transactions(&[&spending_tx]); + if let Some((spending_tx, channel_ids)) = spending_tx_and_chan_id { + self.broadcaster + .broadcast_transactions(&[(&spending_tx, TransactionType::Sweep { channel_ids })]); } Ok(()) diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index 34f5d5fe36e..f5c73ca4ca3 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -12,9 +12,9 @@ use crate::blinded_path::message::{BlindedMessagePath, MessageForwardNode}; use crate::blinded_path::payment::{BlindedPaymentPath, ReceiveTlvs}; use crate::chain; use crate::chain::chaininterface; -use crate::chain::chaininterface::ConfirmationTarget; #[cfg(any(test, feature = "_externalize_tests"))] use crate::chain::chaininterface::FEERATE_FLOOR_SATS_PER_KW; +use crate::chain::chaininterface::{ConfirmationTarget, TransactionType}; use crate::chain::chainmonitor::{ChainMonitor, Persist}; use crate::chain::channelmonitor::{ ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, MonitorEvent, @@ -1154,7 +1154,7 @@ impl TestBroadcaster { } impl chaininterface::BroadcasterInterface for TestBroadcaster { - fn broadcast_transactions(&self, txs: &[&Transaction]) { + fn broadcast_transactions(&self, txs: &[(&Transaction, TransactionType)]) { // Assert that any batch of transactions of length greater than 1 is sorted // topologically, and is a `child-with-parents` package as defined in // . @@ -1165,21 +1165,23 @@ impl chaininterface::BroadcasterInterface for TestBroadcaster { // Right now LDK only ever broadcasts packages of length 2. assert!(txs.len() <= 2); if txs.len() == 2 { - let parent_txid = txs[0].compute_txid(); + let parent_txid = txs[0].0.compute_txid(); assert!(txs[1] + .0 .input .iter() .map(|input| input.previous_output.txid) .any(|txid| txid == parent_txid)); - let child_txid = txs[1].compute_txid(); + let child_txid = txs[1].0.compute_txid(); assert!(txs[0] + .0 .input .iter() .map(|input| input.previous_output.txid) .all(|txid| txid != child_txid)); } - for tx in txs { + for (tx, _broadcast_type) in txs { let lock_time = tx.lock_time.to_consensus_u32(); assert!(lock_time < 1_500_000_000); if tx.lock_time.is_block_height() @@ -1195,7 +1197,7 @@ impl chaininterface::BroadcasterInterface for TestBroadcaster { } } } - let owned_txs: Vec = txs.iter().map(|tx| (*tx).clone()).collect(); + let owned_txs: Vec = txs.iter().map(|(tx, _)| (*tx).clone()).collect(); self.txn_broadcasted.lock().unwrap().extend(owned_txs); } } From 32a801ef18bc7d25c7c191d791ec359532bc14a4 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 2 Feb 2026 15:00:50 +0100 Subject: [PATCH 169/242] Add test coverage for `TransactionType::Splice` Add parallel `txn_types` vector to `TestBroadcaster` to track `TransactionType` alongside broadcast transactions. Existing `txn_broadcast()` API remains unchanged for backward compatibility. New `txn_broadcast_with_types()` API allows tests to verify transaction types. Also add a `clear()` helper method and update test files to use it instead of directly manipulating `txn_broadcasted`, ensuring the two vectors stay in sync. Update splice tests to use the new API and verify that splice transactions are broadcast with the correct `TransactionType`. Co-Authored-By: HAL 9000 Signed-off-by: Elias Rohrer --- lightning/src/ln/chanmon_update_fail_tests.rs | 1 + lightning/src/ln/channel_open_tests.rs | 2 +- lightning/src/ln/functional_test_utils.rs | 11 ++++---- lightning/src/ln/monitor_tests.rs | 4 +-- lightning/src/ln/payment_tests.rs | 6 ++--- lightning/src/ln/shutdown_tests.rs | 6 ++--- lightning/src/ln/splicing_tests.rs | 17 ++++++++---- lightning/src/util/test_utils.rs | 26 +++++++++++++++++-- 8 files changed, 51 insertions(+), 22 deletions(-) diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index ff499d049d4..f602bbe8d02 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -86,6 +86,7 @@ fn test_monitor_and_persister_update_fail() { let persister = test_utils::TestPersister::new(); let tx_broadcaster = TestBroadcaster { txn_broadcasted: Mutex::new(Vec::new()), + txn_types: Mutex::new(Vec::new()), // Because we will connect a block at height 200 below, we need the TestBroadcaster to know // that we are at height 200 so that it doesn't think we're violating the time lock // requirements of transactions broadcasted at that point. diff --git a/lightning/src/ln/channel_open_tests.rs b/lightning/src/ln/channel_open_tests.rs index 3a9c266aacd..b7965c4fb66 100644 --- a/lightning/src/ln/channel_open_tests.rs +++ b/lightning/src/ln/channel_open_tests.rs @@ -1699,7 +1699,7 @@ pub fn test_invalid_funding_tx() { assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0], tx); - nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); + nodes[0].tx_broadcaster.clear(); let expected_err = "funding tx had wrong script/value or output index"; confirm_transaction_at(&nodes[1], &tx, 1); diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index cea9ea45428..4739af956cb 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -861,6 +861,7 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> { txn_broadcasted: Mutex::new( self.tx_broadcaster.txn_broadcasted.lock().unwrap().clone(), ), + txn_types: Mutex::new(self.tx_broadcaster.txn_types.lock().unwrap().clone()), blocks: Arc::new(Mutex::new(self.tx_broadcaster.blocks.lock().unwrap().clone())), }; @@ -1538,7 +1539,7 @@ pub fn sign_funding_transaction<'a, 'b, 'c>( assert_eq!(node_a.tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); assert_eq!(node_a.tx_broadcaster.txn_broadcasted.lock().unwrap()[0], tx); - node_a.tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); + node_a.tx_broadcaster.clear(); // Ensure that funding_transaction_generated is idempotent. assert!(node_a @@ -1641,10 +1642,8 @@ pub fn open_zero_conf_channel_with_value<'a, 'b, 'c, 'd>( check_added_monitors(&initiator, 1); assert_eq!(initiator.tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); - assert_eq!( - initiator.tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0)[0], - tx - ); + assert_eq!(initiator.tx_broadcaster.txn_broadcasted.lock().unwrap()[0], tx); + initiator.tx_broadcaster.clear(); as_channel_ready = get_event_msg!(initiator, MessageSendEvent::SendChannelReady, receiver_node_id); @@ -2014,7 +2013,7 @@ pub fn create_unannounced_chan_between_nodes_with_value<'a, 'b, 'c, 'd>( assert_eq!(nodes[a].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); assert_eq!(nodes[a].tx_broadcaster.txn_broadcasted.lock().unwrap()[0], tx); - nodes[a].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); + nodes[a].tx_broadcaster.clear(); let conf_height = core::cmp::max(nodes[a].best_block_info().1 + 1, nodes[b].best_block_info().1 + 1); diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 04915affa20..5caff13cf71 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -1208,7 +1208,7 @@ fn test_no_preimage_inbound_htlc_balances() { }, a_received_htlc_balance.clone(), a_sent_htlc_balance.clone()]); mine_transaction(&nodes[0], &as_txn[0]); - nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); + nodes[0].tx_broadcaster.clear(); check_closed_broadcast!(nodes[0], true); check_added_monitors(&nodes[0], 1); check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[1].node.get_our_node_id()], 1000000); @@ -1255,7 +1255,7 @@ fn test_no_preimage_inbound_htlc_balances() { bs_pre_spend_claims.retain(|e| if let Balance::ClaimableAwaitingConfirmations { .. } = e { false } else { true }); // The next few blocks for B look the same as for A, though for the opposite HTLC - nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); + nodes[1].tx_broadcaster.clear(); connect_blocks(&nodes[1], TEST_FINAL_CLTV - (ANTI_REORG_DELAY - 1)); expect_htlc_failure_conditions(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash: to_b_failed_payment_hash }]); nodes[1].node.process_pending_htlc_forwards(); diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index e41e60a46a7..c44f3d107ff 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -838,7 +838,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { let as_commitment_tx = get_local_commitment_txn!(nodes[0], chan_id)[0].clone(); if confirm_before_reload { mine_transaction(&nodes[0], &as_commitment_tx); - nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); + nodes[0].tx_broadcaster.clear(); } // The ChannelMonitor should always be the latest version, as we're required to persist it @@ -893,7 +893,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { &node_b_id)) }, &[node_a_id], 100000); check_added_monitors(&nodes[1], 1); assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); - nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); + nodes[1].tx_broadcaster.clear(); }, _ => panic!("Unexpected event"), } @@ -954,7 +954,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { } else { confirm_transaction(&nodes[0], &first_htlc_timeout_tx); } - nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); + nodes[0].tx_broadcaster.clear(); let conditions = PaymentFailedConditions::new().from_mon_update(); expect_payment_failed_conditions(&nodes[0], payment_hash, false, conditions); diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index 192bc6399e4..66f8df69dce 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -495,7 +495,7 @@ fn updates_shutdown_wait() { assert!(nodes[0].node.list_channels().is_empty()); assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); - nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); + nodes[1].tx_broadcaster.clear(); close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true); assert!(nodes[1].node.list_channels().is_empty()); @@ -625,7 +625,7 @@ fn do_htlc_fail_async_shutdown(blinded_recipient: bool) { assert!(nodes[0].node.list_channels().is_empty()); assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); - nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); + nodes[1].tx_broadcaster.clear(); close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true); assert!(nodes[1].node.list_channels().is_empty()); assert!(nodes[2].node.list_channels().is_empty()); @@ -842,7 +842,7 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { assert!(nodes[0].node.list_channels().is_empty()); assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); - nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); + nodes[1].tx_broadcaster.clear(); close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true); assert!(nodes[1].node.list_channels().is_empty()); diff --git a/lightning/src/ln/splicing_tests.rs b/lightning/src/ln/splicing_tests.rs index ef524db6be3..05303cfa0b7 100644 --- a/lightning/src/ln/splicing_tests.rs +++ b/lightning/src/ln/splicing_tests.rs @@ -9,7 +9,7 @@ #![cfg_attr(not(test), allow(unused_imports))] -use crate::chain::chaininterface::FEERATE_FLOOR_SATS_PER_KW; +use crate::chain::chaininterface::{TransactionType, FEERATE_FLOOR_SATS_PER_KW}; use crate::chain::channelmonitor::{ANTI_REORG_DELAY, LATENCY_GRACE_PERIOD_BLOCKS}; use crate::chain::transaction::OutPoint; use crate::chain::ChannelMonitorUpdateStatus; @@ -333,11 +333,18 @@ pub fn sign_interactive_funding_tx<'a, 'b, 'c, 'd>( check_added_monitors(&acceptor, 1); let tx = { - let mut initiator_txn = initiator.tx_broadcaster.txn_broadcast(); + let mut initiator_txn = initiator.tx_broadcaster.txn_broadcast_with_types(); assert_eq!(initiator_txn.len(), 1); - let acceptor_txn = acceptor.tx_broadcaster.txn_broadcast(); - assert_eq!(initiator_txn, acceptor_txn,); - initiator_txn.remove(0) + let acceptor_txn = acceptor.tx_broadcaster.txn_broadcast_with_types(); + assert_eq!(initiator_txn, acceptor_txn); + let (tx, tx_type) = initiator_txn.remove(0); + // Verify transaction type is Splice + assert!( + matches!(tx_type, TransactionType::Splice { .. }), + "Expected TransactionType::Splice, got {:?}", + tx_type + ); + tx }; (tx, splice_locked) } diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index f5c73ca4ca3..18d003c7993 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -1126,31 +1126,50 @@ unsafe impl Send for TestStore {} pub struct TestBroadcaster { pub txn_broadcasted: Mutex>, + pub txn_types: Mutex>, pub blocks: Arc>>, } impl TestBroadcaster { pub fn new(network: Network) -> Self { let txn_broadcasted = Mutex::new(Vec::new()); + let txn_types = Mutex::new(Vec::new()); let blocks = Arc::new(Mutex::new(vec![(genesis_block(network), 0)])); - Self { txn_broadcasted, blocks } + Self { txn_broadcasted, txn_types, blocks } } pub fn with_blocks(blocks: Arc>>) -> Self { let txn_broadcasted = Mutex::new(Vec::new()); - Self { txn_broadcasted, blocks } + let txn_types = Mutex::new(Vec::new()); + Self { txn_broadcasted, txn_types, blocks } } pub fn txn_broadcast(&self) -> Vec { + self.txn_types.lock().unwrap().clear(); self.txn_broadcasted.lock().unwrap().split_off(0) } pub fn unique_txn_broadcast(&self) -> Vec { let mut txn = self.txn_broadcasted.lock().unwrap().split_off(0); + self.txn_types.lock().unwrap().clear(); let mut seen = new_hash_set(); txn.retain(|tx| seen.insert(tx.compute_txid())); txn } + + /// Returns all broadcast transactions with their types, clearing both internal lists. + pub fn txn_broadcast_with_types(&self) -> Vec<(Transaction, TransactionType)> { + let txn = self.txn_broadcasted.lock().unwrap().split_off(0); + let types = self.txn_types.lock().unwrap().split_off(0); + assert_eq!(txn.len(), types.len(), "Transaction and type vectors out of sync"); + txn.into_iter().zip(types.into_iter()).collect() + } + + /// Clears both the transaction and type vectors. + pub fn clear(&self) { + self.txn_broadcasted.lock().unwrap().clear(); + self.txn_types.lock().unwrap().clear(); + } } impl chaininterface::BroadcasterInterface for TestBroadcaster { @@ -1198,7 +1217,10 @@ impl chaininterface::BroadcasterInterface for TestBroadcaster { } } let owned_txs: Vec = txs.iter().map(|(tx, _)| (*tx).clone()).collect(); + let owned_types: Vec = + txs.iter().map(|(_, tx_type)| tx_type.clone()).collect(); self.txn_broadcasted.lock().unwrap().extend(owned_txs); + self.txn_types.lock().unwrap().extend(owned_types); } } From 0715f4aa58733b29fa062674eda240f1a1fe73da Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 2 Feb 2026 13:14:33 +0100 Subject: [PATCH 170/242] Drop unused imports Co-Authored-By: HAL 9000 --- lightning/src/chain/mod.rs | 1 - lightning/src/util/sweep.rs | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/lightning/src/chain/mod.rs b/lightning/src/chain/mod.rs index e8baa7aad1f..bc47f1b1db6 100644 --- a/lightning/src/chain/mod.rs +++ b/lightning/src/chain/mod.rs @@ -20,7 +20,6 @@ use bitcoin::secp256k1::PublicKey; use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, MonitorEvent}; use crate::chain::transaction::{OutPoint, TransactionData}; -use crate::impl_writeable_tlv_based; use crate::ln::types::ChannelId; use crate::sign::ecdsa::EcdsaChannelSigner; use crate::sign::HTLCDescriptor; diff --git a/lightning/src/util/sweep.rs b/lightning/src/util/sweep.rs index f0c2d6d2d15..6815b395d91 100644 --- a/lightning/src/util/sweep.rs +++ b/lightning/src/util/sweep.rs @@ -28,7 +28,7 @@ use crate::util::persist::{ OUTPUT_SWEEPER_PERSISTENCE_PRIMARY_NAMESPACE, OUTPUT_SWEEPER_PERSISTENCE_SECONDARY_NAMESPACE, }; use crate::util::ser::{Readable, ReadableArgs, Writeable}; -use crate::{impl_writeable_tlv_based, log_debug, log_error}; +use crate::{log_debug, log_error}; use bitcoin::block::Header; use bitcoin::locktime::absolute::LockTime; From d2eeb0a993ec2a7f4a8919a66071e47d6dc608bf Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 2 Feb 2026 11:58:54 +0000 Subject: [PATCH 171/242] Use `--quiet` not `--verbose` on `cargo` calls in `ci-tests.sh` I'm pretty confident I've never needed the full command strings printed by the `--verbose` argument to `cargo` in `ci-tests.sh` and it makes the output nontrivially larger. Further, I'm not sure there's really much reason at all to output which thing is built built - we only care about failures, thus we switch to `--quiet`. This should make CI output comparatively easy to parse as scrolling up to see which command failed requires a lot less scrolling. --- ci/ci-tests.sh | 96 +++++++++++++++++++++++++------------------------- 1 file changed, 48 insertions(+), 48 deletions(-) diff --git a/ci/ci-tests.sh b/ci/ci-tests.sh index 488c5ac4826..820935f9100 100755 --- a/ci/ci-tests.sh +++ b/ci/ci-tests.sh @@ -14,136 +14,136 @@ function PIN_RELEASE_DEPS { PIN_RELEASE_DEPS # pin the release dependencies in our main workspace # The backtrace v0.3.75 crate relies on rustc 1.82 -[ "$RUSTC_MINOR_VERSION" -lt 82 ] && cargo update -p backtrace --precise "0.3.74" --verbose +[ "$RUSTC_MINOR_VERSION" -lt 82 ] && cargo update -p backtrace --precise "0.3.74" --quiet # proptest 1.9.0 requires rustc 1.82.0 -[ "$RUSTC_MINOR_VERSION" -lt 82 ] && cargo update -p proptest --precise "1.8.0" --verbose +[ "$RUSTC_MINOR_VERSION" -lt 82 ] && cargo update -p proptest --precise "1.8.0" --quiet # Starting with version 1.2.0, the `idna_adapter` crate has an MSRV of rustc 1.81.0. -[ "$RUSTC_MINOR_VERSION" -lt 81 ] && cargo update -p idna_adapter --precise "1.1.0" --verbose +[ "$RUSTC_MINOR_VERSION" -lt 81 ] && cargo update -p idna_adapter --precise "1.1.0" --quiet export RUST_BACKTRACE=1 echo -e "\n\nChecking the workspace, except lightning-transaction-sync." -cargo check --verbose --color always +cargo check --quiet --color always WORKSPACE_MEMBERS=( $(cat Cargo.toml | tr '\n' '\r' | sed 's/\r //g' | tr '\r' '\n' | grep '^members =' | sed 's/members.*=.*\[//' | tr -d '"' | tr ',' ' ') ) echo -e "\n\nTesting the workspace, except lightning-transaction-sync." -cargo test --verbose --color always +cargo test --quiet --color always echo -e "\n\nTesting upgrade from prior versions of LDK" pushd lightning-tests -cargo test +cargo test --quiet popd echo -e "\n\nChecking and building docs for all workspace members individually..." for DIR in "${WORKSPACE_MEMBERS[@]}"; do - cargo check -p "$DIR" --verbose --color always - cargo doc -p "$DIR" --document-private-items + cargo check -p "$DIR" --quiet --color always + cargo doc -p "$DIR" --quiet --document-private-items done echo -e "\n\nChecking and testing lightning with features" -cargo test -p lightning --verbose --color always --features dnssec -cargo check -p lightning --verbose --color always --features dnssec -cargo doc -p lightning --document-private-items --features dnssec +cargo test -p lightning --quiet --color always --features dnssec +cargo check -p lightning --quiet --color always --features dnssec +cargo doc -p lightning --quiet --document-private-items --features dnssec echo -e "\n\nChecking and testing Block Sync Clients with features" -cargo test -p lightning-block-sync --verbose --color always --features rest-client -cargo check -p lightning-block-sync --verbose --color always --features rest-client -cargo test -p lightning-block-sync --verbose --color always --features rpc-client -cargo check -p lightning-block-sync --verbose --color always --features rpc-client -cargo test -p lightning-block-sync --verbose --color always --features rpc-client,rest-client -cargo check -p lightning-block-sync --verbose --color always --features rpc-client,rest-client -cargo test -p lightning-block-sync --verbose --color always --features rpc-client,rest-client,tokio -cargo check -p lightning-block-sync --verbose --color always --features rpc-client,rest-client,tokio +cargo test -p lightning-block-sync --quiet --color always --features rest-client +cargo check -p lightning-block-sync --quiet --color always --features rest-client +cargo test -p lightning-block-sync --quiet --color always --features rpc-client +cargo check -p lightning-block-sync --quiet --color always --features rpc-client +cargo test -p lightning-block-sync --quiet --color always --features rpc-client,rest-client +cargo check -p lightning-block-sync --quiet --color always --features rpc-client,rest-client +cargo test -p lightning-block-sync --quiet --color always --features rpc-client,rest-client,tokio +cargo check -p lightning-block-sync --quiet --color always --features rpc-client,rest-client,tokio echo -e "\n\nChecking Transaction Sync Clients with features." -cargo check -p lightning-transaction-sync --verbose --color always --features esplora-blocking -cargo check -p lightning-transaction-sync --verbose --color always --features esplora-async -cargo check -p lightning-transaction-sync --verbose --color always --features esplora-async-https -cargo check -p lightning-transaction-sync --verbose --color always --features electrum +cargo check -p lightning-transaction-sync --quiet --color always --features esplora-blocking +cargo check -p lightning-transaction-sync --quiet --color always --features esplora-async +cargo check -p lightning-transaction-sync --quiet --color always --features esplora-async-https +cargo check -p lightning-transaction-sync --quiet --color always --features electrum if [ -z "$CI_ENV" ] && [[ -z "$BITCOIND_EXE" || -z "$ELECTRS_EXE" ]]; then echo -e "\n\nSkipping testing Transaction Sync Clients due to BITCOIND_EXE or ELECTRS_EXE being unset." cargo check -p lightning-transaction-sync --tests else echo -e "\n\nTesting Transaction Sync Clients with features." - cargo test -p lightning-transaction-sync --verbose --color always --features esplora-blocking - cargo test -p lightning-transaction-sync --verbose --color always --features esplora-async - cargo test -p lightning-transaction-sync --verbose --color always --features esplora-async-https - cargo test -p lightning-transaction-sync --verbose --color always --features electrum + cargo test -p lightning-transaction-sync --quiet --color always --features esplora-blocking + cargo test -p lightning-transaction-sync --quiet --color always --features esplora-async + cargo test -p lightning-transaction-sync --quiet --color always --features esplora-async-https + cargo test -p lightning-transaction-sync --quiet --color always --features electrum fi echo -e "\n\nChecking and testing lightning-persister with features" -cargo test -p lightning-persister --verbose --color always --features tokio -cargo check -p lightning-persister --verbose --color always --features tokio -cargo doc -p lightning-persister --document-private-items --features tokio +cargo test -p lightning-persister --quiet --color always --features tokio +cargo check -p lightning-persister --quiet --color always --features tokio +cargo doc -p lightning-persister --quiet --document-private-items --features tokio echo -e "\n\nTest Custom Message Macros" -cargo test -p lightning-custom-message --verbose --color always +cargo test -p lightning-custom-message --quiet --color always [ "$CI_MINIMIZE_DISK_USAGE" != "" ] && cargo clean echo -e "\n\nTest backtrace-debug builds" -cargo test -p lightning --verbose --color always --features backtrace +cargo test -p lightning --quiet --color always --features backtrace echo -e "\n\nTesting no_std builds" for DIR in lightning-invoice lightning-rapid-gossip-sync lightning-liquidity; do - cargo test -p $DIR --verbose --color always --no-default-features + cargo test -p $DIR --quiet --color always --no-default-features done -cargo test -p lightning --verbose --color always --no-default-features -cargo test -p lightning-background-processor --verbose --color always --no-default-features +cargo test -p lightning --quiet --color always --no-default-features +cargo test -p lightning-background-processor --quiet --color always --no-default-features echo -e "\n\nTesting c_bindings builds" # Note that because `$RUSTFLAGS` is not passed through to doctest builds we cannot selectively # disable doctests in `c_bindings` so we skip doctests entirely here. -RUSTFLAGS="$RUSTFLAGS --cfg=c_bindings" cargo test --verbose --color always --lib --bins --tests +RUSTFLAGS="$RUSTFLAGS --cfg=c_bindings" cargo test --quiet --color always --lib --bins --tests for DIR in lightning-invoice lightning-rapid-gossip-sync; do # check if there is a conflict between no_std and the c_bindings cfg - RUSTFLAGS="$RUSTFLAGS --cfg=c_bindings" cargo test -p $DIR --verbose --color always --no-default-features + RUSTFLAGS="$RUSTFLAGS --cfg=c_bindings" cargo test -p $DIR --quiet --color always --no-default-features done # Note that because `$RUSTFLAGS` is not passed through to doctest builds we cannot selectively # disable doctests in `c_bindings` so we skip doctests entirely here. -RUSTFLAGS="$RUSTFLAGS --cfg=c_bindings" cargo test -p lightning-background-processor --verbose --color always --no-default-features --lib --bins --tests -RUSTFLAGS="$RUSTFLAGS --cfg=c_bindings" cargo test -p lightning --verbose --color always --no-default-features --lib --bins --tests +RUSTFLAGS="$RUSTFLAGS --cfg=c_bindings" cargo test -p lightning-background-processor --quiet --color always --no-default-features --lib --bins --tests +RUSTFLAGS="$RUSTFLAGS --cfg=c_bindings" cargo test -p lightning --quiet --color always --no-default-features --lib --bins --tests echo -e "\n\nTesting other crate-specific builds" # Note that outbound_commitment_test only runs in this mode because of hardcoded signature values -RUSTFLAGS="$RUSTFLAGS --cfg=ldk_test_vectors" cargo test -p lightning --verbose --color always --no-default-features --features=std +RUSTFLAGS="$RUSTFLAGS --cfg=ldk_test_vectors" cargo test -p lightning --quiet --color always --no-default-features --features=std # This one only works for lightning-invoice # check that compile with no_std and serde works in lightning-invoice -cargo test -p lightning-invoice --verbose --color always --no-default-features --features serde +cargo test -p lightning-invoice --quiet --color always --no-default-features --features serde echo -e "\n\nTesting no_std build on a downstream no-std crate" # check no-std compatibility across dependencies pushd no-std-check -cargo check --verbose --color always +cargo check --quiet --color always [ "$CI_MINIMIZE_DISK_USAGE" != "" ] && cargo clean popd # Test that we can build downstream code with only the "release pins". pushd msrv-no-dev-deps-check PIN_RELEASE_DEPS -cargo check +cargo check --quiet [ "$CI_MINIMIZE_DISK_USAGE" != "" ] && cargo clean popd if [ -f "$(which arm-none-eabi-gcc)" ]; then pushd no-std-check - cargo build --target=thumbv7m-none-eabi + cargo build --quiet --target=thumbv7m-none-eabi [ "$CI_MINIMIZE_DISK_USAGE" != "" ] && cargo clean popd fi echo -e "\n\nTest cfg-flag builds" -RUSTFLAGS="--cfg=taproot" cargo test --verbose --color always -p lightning +RUSTFLAGS="--cfg=taproot" cargo test --quiet --color always -p lightning [ "$CI_MINIMIZE_DISK_USAGE" != "" ] && cargo clean -RUSTFLAGS="--cfg=simple_close" cargo test --verbose --color always -p lightning +RUSTFLAGS="--cfg=simple_close" cargo test --quiet --color always -p lightning [ "$CI_MINIMIZE_DISK_USAGE" != "" ] && cargo clean -RUSTFLAGS="--cfg=lsps1_service" cargo test --verbose --color always -p lightning-liquidity +RUSTFLAGS="--cfg=lsps1_service" cargo test --quiet --color always -p lightning-liquidity [ "$CI_MINIMIZE_DISK_USAGE" != "" ] && cargo clean -RUSTFLAGS="--cfg=peer_storage" cargo test --verbose --color always -p lightning +RUSTFLAGS="--cfg=peer_storage" cargo test --quiet --color always -p lightning From 1f6095b504bf528ff4e7df80793626599cc3045f Mon Sep 17 00:00:00 2001 From: Willem Van Lint Date: Fri, 30 Jan 2026 16:24:15 -0800 Subject: [PATCH 172/242] Add Utxo::new_v1_p2tr --- lightning/src/events/bump_transaction/mod.rs | 45 +++++++++++++++++++- 1 file changed, 43 insertions(+), 2 deletions(-) diff --git a/lightning/src/events/bump_transaction/mod.rs b/lightning/src/events/bump_transaction/mod.rs index 1b3496c5eab..ff1bcca4b12 100644 --- a/lightning/src/events/bump_transaction/mod.rs +++ b/lightning/src/events/bump_transaction/mod.rs @@ -34,7 +34,8 @@ use crate::ln::types::ChannelId; use crate::prelude::*; use crate::sign::ecdsa::EcdsaChannelSigner; use crate::sign::{ - ChannelDerivationParameters, HTLCDescriptor, SignerProvider, P2WPKH_WITNESS_WEIGHT, + ChannelDerivationParameters, HTLCDescriptor, SignerProvider, P2TR_KEY_PATH_WITNESS_WEIGHT, + P2WPKH_WITNESS_WEIGHT, }; use crate::sync::Mutex; use crate::util::async_poll::{MaybeSend, MaybeSync}; @@ -43,6 +44,7 @@ use crate::util::logger::Logger; use bitcoin::amount::Amount; use bitcoin::consensus::Encodable; use bitcoin::constants::WITNESS_SCALE_FACTOR; +use bitcoin::key::TweakedPublicKey; use bitcoin::locktime::absolute::LockTime; use bitcoin::policy::MAX_STANDARD_TX_WEIGHT; use bitcoin::secp256k1; @@ -332,6 +334,17 @@ impl Utxo { satisfaction_weight: EMPTY_SCRIPT_SIG_WEIGHT + P2WPKH_WITNESS_WEIGHT, } } + + /// Returns a `Utxo` with the `satisfaction_weight` estimate for a keypath spend of a SegWit v1 P2TR output. + pub fn new_v1_p2tr( + outpoint: OutPoint, value: Amount, tweaked_public_key: TweakedPublicKey, + ) -> Self { + Self { + outpoint, + output: TxOut { value, script_pubkey: ScriptBuf::new_p2tr_tweaked(tweaked_public_key) }, + satisfaction_weight: EMPTY_SCRIPT_SIG_WEIGHT + P2TR_KEY_PATH_WITNESS_WEIGHT, + } + } } /// The result of a successful coin selection attempt for a transaction requiring additional UTXOs @@ -1272,7 +1285,9 @@ mod tests { use bitcoin::hashes::Hash; use bitcoin::hex::FromHex; - use bitcoin::{Network, ScriptBuf, Transaction, Txid}; + use bitcoin::{ + Network, ScriptBuf, Transaction, Txid, WitnessProgram, WitnessVersion, XOnlyPublicKey, + }; struct TestCoinSelectionSource { // (commitment + anchor value, commitment + input weight, target feerate, result) @@ -1389,4 +1404,30 @@ mod tests { pending_htlcs: Vec::new(), }); } + + #[test] + fn test_utxo_new_v1_p2tr() { + // Transaction 33e794d097969002ee05d336686fc03c9e15a597c1b9827669460fac98799036 + let p2tr_tx: Transaction = bitcoin::consensus::deserialize(&>::from_hex("01000000000101d1f1c1f8cdf6759167b90f52c9ad358a369f95284e841d7a2536cef31c0549580100000000fdffffff020000000000000000316a2f49206c696b65205363686e6f7272207369677320616e6420492063616e6e6f74206c69652e204062697462756734329e06010000000000225120a37c3903c8d0db6512e2b40b0dffa05e5a3ab73603ce8c9c4b7771e5412328f90140a60c383f71bac0ec919b1d7dbc3eb72dd56e7aa99583615564f9f99b8ae4e837b758773a5b2e4c51348854c8389f008e05029db7f464a5ff2e01d5e6e626174affd30a00").unwrap()).unwrap(); + + let script_pubkey = &p2tr_tx.output[1].script_pubkey; + assert_eq!(script_pubkey.witness_version(), Some(WitnessVersion::V1)); + let witness_bytes = &script_pubkey.as_bytes()[2..]; + let witness_program = WitnessProgram::new(WitnessVersion::V1, witness_bytes).unwrap(); + let tweaked_key = TweakedPublicKey::dangerous_assume_tweaked( + XOnlyPublicKey::from_slice(&witness_program.program().as_bytes()).unwrap(), + ); + + let utxo = Utxo::new_v1_p2tr( + OutPoint { txid: p2tr_tx.compute_txid(), vout: 1 }, + p2tr_tx.output[1].value, + tweaked_key, + ); + assert_eq!(utxo.output, p2tr_tx.output[1]); + assert_eq!( + utxo.satisfaction_weight, + 1 /* empty script_sig */ * WITNESS_SCALE_FACTOR as u64 + + 1 /* witness items */ + 1 /* schnorr sig len */ + 64 /* schnorr sig */ + ); + } } From 4800a473efb97762481a2d558b8fce2cc41cf0f9 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Mon, 2 Feb 2026 15:32:07 +0100 Subject: [PATCH 173/242] Add AChainMonitor trait and use it in background processor Add a new `AChainMonitor` trait following the same pattern as `AChannelManager`. This trait provides associated types for all generic parameters of `ChainMonitor` and a `get_cm()` method to access the underlying `ChainMonitor`. Update the background processor to use `AChainMonitor` trait bounds instead of spelling out the full `ChainMonitor` generic parameters. This simplifies the function signatures by removing 5-6 explicit generic parameters (CF, T, F, P, ES) per function. This is preparation for adding a flush method to the AChainMonitor trait. Co-Authored-By: Claude Opus 4.5 --- lightning-background-processor/src/lib.rs | 101 +++++++++++++--------- lightning/src/chain/chainmonitor.rs | 60 +++++++++++++ 2 files changed, 118 insertions(+), 43 deletions(-) diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index 26e8fc64a42..bb99d65e6b5 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -30,9 +30,11 @@ mod fwd_batch; use fwd_batch::BatchDelay; +#[cfg(not(c_bindings))] use lightning::chain; +#[cfg(not(c_bindings))] use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator}; -use lightning::chain::chainmonitor::{ChainMonitor, Persist}; +use lightning::chain::chainmonitor::AChainMonitor; #[cfg(feature = "std")] use lightning::events::EventHandler; #[cfg(feature = "std")] @@ -50,9 +52,9 @@ use lightning::onion_message::messenger::AOnionMessenger; use lightning::routing::gossip::{NetworkGraph, P2PGossipSync}; use lightning::routing::scoring::{ScoreUpdate, WriteableScore}; use lightning::routing::utxo::UtxoLookup; -use lightning::sign::{ - ChangeDestinationSource, ChangeDestinationSourceSync, EntropySource, OutputSpender, -}; +#[cfg(not(c_bindings))] +use lightning::sign::EntropySource; +use lightning::sign::{ChangeDestinationSource, ChangeDestinationSourceSync, OutputSpender}; #[cfg(not(c_bindings))] use lightning::util::async_poll::MaybeSend; use lightning::util::logger::Logger; @@ -118,6 +120,7 @@ use alloc::vec::Vec; /// /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager /// [`ChannelManager::timer_tick_occurred`]: lightning::ln::channelmanager::ChannelManager::timer_tick_occurred +/// [`ChainMonitor::rebroadcast_pending_claims`]: lightning::chain::chainmonitor::ChainMonitor::rebroadcast_pending_claims /// [`ChannelMonitor`]: lightning::chain::channelmonitor::ChannelMonitor /// [`Event`]: lightning::events::Event /// [`PeerManager::timer_tick_occurred`]: lightning::ln::peer_handler::PeerManager::timer_tick_occurred @@ -923,16 +926,11 @@ use futures_util::{dummy_waker, Joiner, OptionalSelector, Selector, SelectorOutp pub async fn process_events_async< 'a, UL: UtxoLookup, - CF: chain::Filter, - T: BroadcasterInterface, - F: FeeEstimator, G: Deref>, L: Logger, - P: Deref, EventHandlerFuture: core::future::Future>, EventHandler: Fn(Event) -> EventHandlerFuture, - ES: EntropySource, - M: Deref::Signer, CF, T, F, L, P, ES>>, + M: Deref, CM: Deref, OM: Deref, PGS: Deref>, @@ -942,7 +940,17 @@ pub async fn process_events_async< D: Deref, O: OutputSpender, K: KVStore, - OS: Deref>, + OS: Deref< + Target = OutputSweeper< + ::Broadcaster, + D, + ::FeeEstimator, + ::Filter, + K, + L, + O, + >, + >, S: Deref, SC: for<'b> WriteableScore<'b>, SleepFuture: core::future::Future + core::marker::Unpin, @@ -955,7 +963,7 @@ pub async fn process_events_async< sleeper: Sleeper, mobile_interruptable_platform: bool, fetch_time: FetchTime, ) -> Result<(), lightning::io::Error> where - P::Target: Persist<::Signer>, + M::Target: AChainMonitor::Signer, Logger = L>, CM::Target: AChannelManager, OM::Target: AOnionMessenger, PM::Target: APeerManager, @@ -1004,7 +1012,7 @@ where log_trace!(logger, "Calling ChannelManager's timer_tick_occurred on startup"); channel_manager.get_cm().timer_tick_occurred(); log_trace!(logger, "Rebroadcasting monitor's pending claims on startup"); - chain_monitor.rebroadcast_pending_claims(); + chain_monitor.get_cm().rebroadcast_pending_claims(); let mut last_freshness_call = sleeper(FRESHNESS_TIMER); let mut last_onion_message_handler_call = sleeper(ONION_MESSAGE_HANDLER_TIMER); @@ -1022,7 +1030,7 @@ where loop { channel_manager.get_cm().process_pending_events_async(async_event_handler).await; - chain_monitor.process_pending_events_async(async_event_handler).await; + chain_monitor.get_cm().process_pending_events_async(async_event_handler).await; if let Some(om) = &onion_messenger { om.get_om().process_pending_events_async(async_event_handler).await } @@ -1072,7 +1080,7 @@ where let fut = Selector { a: sleeper(sleep_delay), b: channel_manager.get_cm().get_event_or_persistence_needed_future(), - c: chain_monitor.get_update_future(), + c: chain_monitor.get_cm().get_update_future(), d: om_fut, e: lm_fut, f: gv_fut, @@ -1164,7 +1172,7 @@ where }; if archive_timer_elapsed { log_trace!(logger, "Archiving stale ChannelMonitors."); - chain_monitor.archive_fully_resolved_channel_monitors(); + chain_monitor.get_cm().archive_fully_resolved_channel_monitors(); have_archived = true; log_trace!(logger, "Archived stale ChannelMonitors."); } @@ -1354,7 +1362,7 @@ where match check_and_reset_sleeper(&mut last_rebroadcast_call, || sleeper(REBROADCAST_TIMER)) { Some(false) => { log_trace!(logger, "Rebroadcasting monitor's pending claims"); - chain_monitor.rebroadcast_pending_claims(); + chain_monitor.get_cm().rebroadcast_pending_claims(); }, Some(true) => break, None => {}, @@ -1416,16 +1424,11 @@ fn check_and_reset_sleeper< /// synchronous background persistence. pub async fn process_events_async_with_kv_store_sync< UL: UtxoLookup, - CF: chain::Filter, - T: BroadcasterInterface, - F: FeeEstimator, G: Deref>, L: Logger, - P: Deref, EventHandlerFuture: core::future::Future>, EventHandler: Fn(Event) -> EventHandlerFuture, - ES: EntropySource, - M: Deref::Signer, CF, T, F, L, P, ES>>, + M: Deref, CM: Deref, OM: Deref, PGS: Deref>, @@ -1435,7 +1438,17 @@ pub async fn process_events_async_with_kv_store_sync< D: Deref, O: OutputSpender, K: Deref, - OS: Deref>, + OS: Deref< + Target = OutputSweeperSync< + ::Broadcaster, + D, + ::FeeEstimator, + ::Filter, + K, + L, + O, + >, + >, S: Deref, SC: for<'b> WriteableScore<'b>, SleepFuture: core::future::Future + core::marker::Unpin, @@ -1448,7 +1461,7 @@ pub async fn process_events_async_with_kv_store_sync< sleeper: Sleeper, mobile_interruptable_platform: bool, fetch_time: FetchTime, ) -> Result<(), lightning::io::Error> where - P::Target: Persist<::Signer>, + M::Target: AChainMonitor::Signer, Logger = L>, CM::Target: AChannelManager, OM::Target: AOnionMessenger, PM::Target: APeerManager, @@ -1523,20 +1536,10 @@ impl BackgroundProcessor { pub fn start< 'a, UL: 'static + UtxoLookup, - CF: 'static + chain::Filter, - T: 'static + BroadcasterInterface, - F: 'static + FeeEstimator + Send, G: 'static + Deref>, L: 'static + Deref + Send, - P: 'static + Deref, EH: 'static + EventHandler + Send, - ES: 'static + EntropySource + Send, - M: 'static - + Deref< - Target = ChainMonitor<::Signer, CF, T, F, L, P, ES>, - > - + Send - + Sync, + M: 'static + Deref + Send + Sync, CM: 'static + Deref + Send, OM: 'static + Deref + Send, PGS: 'static + Deref> + Send, @@ -1548,7 +1551,19 @@ impl BackgroundProcessor { D: 'static + Deref, O: 'static + OutputSpender, K: 'static + Deref + Send, - OS: 'static + Deref> + Send, + OS: 'static + + Deref< + Target = OutputSweeperSync< + ::Broadcaster, + D, + ::FeeEstimator, + ::Filter, + K, + L, + O, + >, + > + + Send, >( kv_store: K, event_handler: EH, chain_monitor: M, channel_manager: CM, onion_messenger: Option, gossip_sync: GossipSync, peer_manager: PM, @@ -1556,7 +1571,7 @@ impl BackgroundProcessor { ) -> Self where L::Target: 'static + Logger, - P::Target: 'static + Persist<::Signer>, + M::Target: AChainMonitor::Signer, Logger = L>, CM::Target: AChannelManager, OM::Target: AOnionMessenger, PM::Target: APeerManager, @@ -1596,7 +1611,7 @@ impl BackgroundProcessor { log_trace!(logger, "Calling ChannelManager's timer_tick_occurred on startup"); channel_manager.get_cm().timer_tick_occurred(); log_trace!(logger, "Rebroadcasting monitor's pending claims on startup"); - chain_monitor.rebroadcast_pending_claims(); + chain_monitor.get_cm().rebroadcast_pending_claims(); let mut last_freshness_call = Instant::now(); let mut last_onion_message_handler_call = Instant::now(); @@ -1615,7 +1630,7 @@ impl BackgroundProcessor { loop { channel_manager.get_cm().process_pending_events(&event_handler); - chain_monitor.process_pending_events(&event_handler); + chain_monitor.get_cm().process_pending_events(&event_handler); if let Some(om) = &onion_messenger { om.get_om().process_pending_events(&event_handler) }; @@ -1648,7 +1663,7 @@ impl BackgroundProcessor { let gv_fut = gossip_sync.validation_completion_future(); let always_futures = [ channel_manager.get_cm().get_event_or_persistence_needed_future(), - chain_monitor.get_update_future(), + chain_monitor.get_cm().get_update_future(), ]; let futures = always_futures.into_iter().chain(om_fut).chain(lm_fut).chain(gv_fut); let sleeper = Sleeper::from_futures(futures); @@ -1701,7 +1716,7 @@ impl BackgroundProcessor { let archive_timer_elapsed = last_archive_call.elapsed() > archive_timer; if archive_timer_elapsed { log_trace!(logger, "Archiving stale ChannelMonitors."); - chain_monitor.archive_fully_resolved_channel_monitors(); + chain_monitor.get_cm().archive_fully_resolved_channel_monitors(); have_archived = true; last_archive_call = Instant::now(); log_trace!(logger, "Archived stale ChannelMonitors."); @@ -1786,7 +1801,7 @@ impl BackgroundProcessor { } if last_rebroadcast_call.elapsed() > REBROADCAST_TIMER { log_trace!(logger, "Rebroadcasting monitor's pending claims"); - chain_monitor.rebroadcast_pending_claims(); + chain_monitor.get_cm().rebroadcast_pending_claims(); last_rebroadcast_call = Instant::now(); } } diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index 2db34738737..17693f8ca7a 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -1488,6 +1488,66 @@ where } } +/// A trivial trait which describes any [`ChainMonitor`]. +/// +/// This is not exported to bindings users as general cover traits aren't useful in other +/// languages. +pub trait AChainMonitor { + /// A type implementing [`EcdsaChannelSigner`]. + type Signer: EcdsaChannelSigner + Sized; + /// A type implementing [`chain::Filter`]. + type Filter: chain::Filter; + /// A type implementing [`BroadcasterInterface`]. + type Broadcaster: BroadcasterInterface; + /// A type implementing [`FeeEstimator`]. + type FeeEstimator: FeeEstimator; + /// A type implementing [`Logger`]. + type Logger: Logger; + /// A type that derefs to [`Persist`]. + type Persister: Deref; + /// The target of [`Self::Persister`]. + type PersisterTarget: Persist + ?Sized; + /// A type implementing [`EntropySource`]. + type EntropySource: EntropySource; + /// Returns a reference to the actual [`ChainMonitor`] object. + fn get_cm( + &self, + ) -> &ChainMonitor< + Self::Signer, + Self::Filter, + Self::Broadcaster, + Self::FeeEstimator, + Self::Logger, + Self::Persister, + Self::EntropySource, + >; +} + +impl< + ChannelSigner: EcdsaChannelSigner, + C: chain::Filter, + T: BroadcasterInterface, + F: FeeEstimator, + L: Logger, + P: Deref, + ES: EntropySource, + > AChainMonitor for ChainMonitor +where + P::Target: Persist, +{ + type Signer = ChannelSigner; + type Filter = C; + type Broadcaster = T; + type FeeEstimator = F; + type Logger = L; + type Persister = P; + type PersisterTarget = P::Target; + type EntropySource = ES; + fn get_cm(&self) -> &ChainMonitor { + self + } +} + #[cfg(test)] mod tests { use crate::chain::channelmonitor::ANTI_REORG_DELAY; From 784b85c1cf4202b5e5c8c7452c797c6472c62ba2 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 3 Feb 2026 14:51:13 +0000 Subject: [PATCH 174/242] Correct crate version numbers that have broken semver The semver CI check is great but only checks the immediate crate in question. It doesn't catch that many of our crates depend on `lightning` and thus have actually broken semver as the types they use have changed to `lightning` 0.3. Here we hump the version of crates that have actually changed semver since 0.2. In addition to those that depend on `lightning`, `lightning-invoice`'s API has changed (but was not being checked by the semver CI task). Finally, `lightning-macros` was updated to 0.2.1, so the version is changed to 0.2.2. --- lightning-background-processor/Cargo.toml | 8 ++++---- lightning-custom-message/Cargo.toml | 2 +- lightning-invoice/Cargo.toml | 2 +- lightning-liquidity/Cargo.toml | 6 +++--- lightning-macros/Cargo.toml | 2 +- lightning-net-tokio/Cargo.toml | 2 +- lightning-persister/Cargo.toml | 2 +- lightning-rapid-gossip-sync/Cargo.toml | 2 +- lightning-transaction-sync/Cargo.toml | 2 +- lightning/Cargo.toml | 2 +- 10 files changed, 15 insertions(+), 15 deletions(-) diff --git a/lightning-background-processor/Cargo.toml b/lightning-background-processor/Cargo.toml index 7fe68bc1933..e958919f4d8 100644 --- a/lightning-background-processor/Cargo.toml +++ b/lightning-background-processor/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning-background-processor" -version = "0.2.0+git" +version = "0.3.0+git" authors = ["Valentine Wallace "] license = "MIT OR Apache-2.0" repository = "https://github.com/lightningdevkit/rust-lightning" @@ -25,16 +25,16 @@ bitcoin = { version = "0.32.2", default-features = false } bitcoin_hashes = { version = "0.14.0", default-features = false } bitcoin-io = { version = "0.1.2", default-features = false } lightning = { version = "0.3.0", path = "../lightning", default-features = false } -lightning-rapid-gossip-sync = { version = "0.2.0", path = "../lightning-rapid-gossip-sync", default-features = false } +lightning-rapid-gossip-sync = { version = "0.3.0", path = "../lightning-rapid-gossip-sync", default-features = false } lightning-liquidity = { version = "0.3.0", path = "../lightning-liquidity", default-features = false } possiblyrandom = { version = "0.2", path = "../possiblyrandom", default-features = false } [dev-dependencies] tokio = { version = "1.35", features = [ "macros", "rt", "rt-multi-thread", "sync", "time" ] } lightning = { version = "0.3.0", path = "../lightning", features = ["_test_utils"] } -lightning-invoice = { version = "0.34.0", path = "../lightning-invoice" } +lightning-invoice = { version = "0.35.0", path = "../lightning-invoice" } lightning-liquidity = { version = "0.3.0", path = "../lightning-liquidity", default-features = false, features = ["_test_utils"] } -lightning-persister = { version = "0.2.0", path = "../lightning-persister" } +lightning-persister = { version = "0.3.0", path = "../lightning-persister" } [lints] workspace = true diff --git a/lightning-custom-message/Cargo.toml b/lightning-custom-message/Cargo.toml index ba13aef35c4..854127f9175 100644 --- a/lightning-custom-message/Cargo.toml +++ b/lightning-custom-message/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning-custom-message" -version = "0.2.0+git" +version = "0.3.0+git" authors = ["Jeffrey Czyz"] license = "MIT OR Apache-2.0" repository = "https://github.com/lightningdevkit/rust-lightning" diff --git a/lightning-invoice/Cargo.toml b/lightning-invoice/Cargo.toml index f92d8b999df..deee8ff330a 100644 --- a/lightning-invoice/Cargo.toml +++ b/lightning-invoice/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lightning-invoice" description = "Data structures to parse and serialize BOLT11 lightning invoices" -version = "0.34.0+git" +version = "0.35.0+git" authors = ["Sebastian Geisler "] documentation = "https://docs.rs/lightning-invoice/" license = "MIT OR Apache-2.0" diff --git a/lightning-liquidity/Cargo.toml b/lightning-liquidity/Cargo.toml index 67e82a5fbf8..d83d66f7570 100644 --- a/lightning-liquidity/Cargo.toml +++ b/lightning-liquidity/Cargo.toml @@ -24,7 +24,7 @@ _test_utils = [] [dependencies] lightning = { version = "0.3.0", path = "../lightning", default-features = false } lightning-types = { version = "0.3.0", path = "../lightning-types", default-features = false } -lightning-invoice = { version = "0.34.0", path = "../lightning-invoice", default-features = false, features = ["serde"] } +lightning-invoice = { version = "0.35.0", path = "../lightning-invoice", default-features = false, features = ["serde"] } lightning-macros = { version = "0.2", path = "../lightning-macros" } bitcoin = { version = "0.32.2", default-features = false, features = ["serde"] } @@ -36,8 +36,8 @@ backtrace = { version = "0.3", optional = true } [dev-dependencies] lightning = { version = "0.3.0", path = "../lightning", default-features = false, features = ["_test_utils"] } -lightning-invoice = { version = "0.34.0", path = "../lightning-invoice", default-features = false, features = ["serde", "std"] } -lightning-persister = { version = "0.2.0", path = "../lightning-persister", default-features = false } +lightning-invoice = { version = "0.35.0", path = "../lightning-invoice", default-features = false, features = ["serde", "std"] } +lightning-persister = { version = "0.3.0", path = "../lightning-persister", default-features = false } proptest = "1.0.0" tokio = { version = "1.35", default-features = false, features = [ "rt-multi-thread", "time", "sync", "macros" ] } diff --git a/lightning-macros/Cargo.toml b/lightning-macros/Cargo.toml index 8a20670bad4..822b50816df 100644 --- a/lightning-macros/Cargo.toml +++ b/lightning-macros/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning-macros" -version = "0.2.0+git" +version = "0.2.2+git" authors = ["Elias Rohrer"] license = "MIT OR Apache-2.0" repository = "https://github.com/lightningdevkit/rust-lightning/" diff --git a/lightning-net-tokio/Cargo.toml b/lightning-net-tokio/Cargo.toml index af4845b7397..79b227f44dc 100644 --- a/lightning-net-tokio/Cargo.toml +++ b/lightning-net-tokio/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning-net-tokio" -version = "0.2.0+git" +version = "0.3.0+git" authors = ["Matt Corallo"] license = "MIT OR Apache-2.0" repository = "https://github.com/lightningdevkit/rust-lightning/" diff --git a/lightning-persister/Cargo.toml b/lightning-persister/Cargo.toml index e06803c6b89..19c5ac2545e 100644 --- a/lightning-persister/Cargo.toml +++ b/lightning-persister/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning-persister" -version = "0.2.0+git" +version = "0.3.0+git" authors = ["Valentine Wallace", "Matt Corallo"] license = "MIT OR Apache-2.0" repository = "https://github.com/lightningdevkit/rust-lightning" diff --git a/lightning-rapid-gossip-sync/Cargo.toml b/lightning-rapid-gossip-sync/Cargo.toml index b2cc623ab5b..b623a5aed13 100644 --- a/lightning-rapid-gossip-sync/Cargo.toml +++ b/lightning-rapid-gossip-sync/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning-rapid-gossip-sync" -version = "0.2.0+git" +version = "0.3.0+git" authors = ["Arik Sosman "] license = "MIT OR Apache-2.0" repository = "https://github.com/lightningdevkit/rust-lightning" diff --git a/lightning-transaction-sync/Cargo.toml b/lightning-transaction-sync/Cargo.toml index 1a5a56212ba..4bc37d7ff48 100644 --- a/lightning-transaction-sync/Cargo.toml +++ b/lightning-transaction-sync/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning-transaction-sync" -version = "0.2.0+git" +version = "0.3.0+git" authors = ["Elias Rohrer"] license = "MIT OR Apache-2.0" repository = "https://github.com/lightningdevkit/rust-lightning" diff --git a/lightning/Cargo.toml b/lightning/Cargo.toml index b3b597029da..dbcf9f1bed2 100644 --- a/lightning/Cargo.toml +++ b/lightning/Cargo.toml @@ -35,7 +35,7 @@ default = ["std", "grind_signatures"] [dependencies] lightning-types = { version = "0.3.0", path = "../lightning-types", default-features = false } -lightning-invoice = { version = "0.34.0", path = "../lightning-invoice", default-features = false } +lightning-invoice = { version = "0.35.0", path = "../lightning-invoice", default-features = false } lightning-macros = { version = "0.2", path = "../lightning-macros" } bech32 = { version = "0.11.0", default-features = false } From a123cfa0d4a2c83b329bfea554b8aa920a2e7929 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 3 Feb 2026 15:00:50 +0000 Subject: [PATCH 175/242] Check semver of all workspace crates rather than an explicit list --- .github/workflows/semver.yml | 47 ++++++++---------------------------- 1 file changed, 10 insertions(+), 37 deletions(-) diff --git a/.github/workflows/semver.yml b/.github/workflows/semver.yml index 03a8e46e8a7..de10e562f98 100644 --- a/.github/workflows/semver.yml +++ b/.github/workflows/semver.yml @@ -13,40 +13,13 @@ jobs: steps: - name: Checkout source code uses: actions/checkout@v4 - - name: Check SemVer with default features - uses: obi1kenobi/cargo-semver-checks-action@v2 - with: - feature-group: default-features - - name: Check SemVer *without* default features - uses: obi1kenobi/cargo-semver-checks-action@v2 - with: - feature-group: only-explicit-features - - name: Check lightning-background-processor SemVer - uses: obi1kenobi/cargo-semver-checks-action@v2 - with: - package: lightning-background-processor - feature-group: only-explicit-features - - name: Check lightning-block-sync SemVer - uses: obi1kenobi/cargo-semver-checks-action@v2 - with: - package: lightning-block-sync - feature-group: only-explicit-features - features: rpc-client,rest-client - - name: Check lightning-transaction-sync electrum SemVer - uses: obi1kenobi/cargo-semver-checks-action@v2 - with: - manifest-path: lightning-transaction-sync/Cargo.toml - feature-group: only-explicit-features - features: electrum - - name: Check lightning-transaction-sync esplora-blocking SemVer - uses: obi1kenobi/cargo-semver-checks-action@v2 - with: - manifest-path: lightning-transaction-sync/Cargo.toml - feature-group: only-explicit-features - features: esplora-blocking - - name: Check lightning-transaction-sync esplora-async SemVer - uses: obi1kenobi/cargo-semver-checks-action@v2 - with: - manifest-path: lightning-transaction-sync/Cargo.toml - feature-group: only-explicit-features - features: esplora-async + - name: Install Rust stable toolchain + run: | + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile=minimal --default-toolchain stable + rustup override set stable + - name: Install SemVer Checker + run: cargo install cargo-semver-checks --locked + - name: Check SemVer with all features + run: cargo semver-checks + - name: Check SemVer without any non-default features + run: cargo semver-checks --only-explicit-features From 2d948fdd33bd3f509fae90f588b27c040a15d7aa Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Wed, 4 Feb 2026 09:52:19 -0600 Subject: [PATCH 176/242] Use SignedAmount::unsigned_abs to avoid overflow In debug mode, using SignedAmount::abs can lead to an integer overflow when used with SignedAmount::MIN. Use SignedAmount::unsigned_abs to avoid this. --- lightning/src/ln/channel.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 042b388e1a1..a50365c1148 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -2664,8 +2664,8 @@ impl FundingScope { their_funding_contribution: SignedAmount, counterparty_funding_pubkey: PublicKey, our_new_holder_keys: ChannelPublicKeys, ) -> Self { - debug_assert!(our_funding_contribution.abs() <= SignedAmount::MAX_MONEY); - debug_assert!(their_funding_contribution.abs() <= SignedAmount::MAX_MONEY); + debug_assert!(our_funding_contribution.unsigned_abs() <= Amount::MAX_MONEY); + debug_assert!(their_funding_contribution.unsigned_abs() <= Amount::MAX_MONEY); let post_channel_value = prev_funding.compute_post_splice_value( our_funding_contribution.to_sat(), @@ -12155,7 +12155,7 @@ where fn validate_splice_contributions( &self, our_funding_contribution: SignedAmount, their_funding_contribution: SignedAmount, ) -> Result<(), String> { - if our_funding_contribution.abs() > SignedAmount::MAX_MONEY { + if our_funding_contribution.unsigned_abs() > Amount::MAX_MONEY { return Err(format!( "Channel {} cannot be spliced; our {} contribution exceeds the total bitcoin supply", self.context.channel_id(), @@ -12163,7 +12163,7 @@ where )); } - if their_funding_contribution.abs() > SignedAmount::MAX_MONEY { + if their_funding_contribution.unsigned_abs() > Amount::MAX_MONEY { return Err(format!( "Channel {} cannot be spliced; their {} contribution exceeds the total bitcoin supply", self.context.channel_id(), From 05c9a036313ee34847a44c713b77d76fddc775be Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sun, 1 Feb 2026 01:30:13 +0000 Subject: [PATCH 177/242] Add a `custom` TLV read/write variant At various points we've been stuck in our TLV read/write variants but just want to break out and write some damn code to initialize a field and some more code to decide what to write for a TLV. We added the write-side part of this with the `legacy` TLV read/write variant, but its useful to also be able to specify a function which is called on the read side. Here we add a `custom` TLV read/write variant which calls a method both on read and write to either decide what to write or to map a read value (if any) to the final field. --- lightning-macros/src/lib.rs | 6 ++--- lightning/src/util/ser_macros.rs | 43 ++++++++++++++++++++++++++++++++ 2 files changed, 46 insertions(+), 3 deletions(-) diff --git a/lightning-macros/src/lib.rs b/lightning-macros/src/lib.rs index e784acf72fb..778da45ee8f 100644 --- a/lightning-macros/src/lib.rs +++ b/lightning-macros/src/lib.rs @@ -138,7 +138,7 @@ fn process_fields(group: Group) -> proc_macro::TokenStream { if let TokenTree::Group(group) = ty_info { let first_group_tok = group.stream().into_iter().next().unwrap(); if let TokenTree::Ident(ident) = first_group_tok { - if ident.to_string() == "legacy" { + if ident.to_string() == "legacy" || ident.to_string() == "custom" { continue; } } @@ -155,13 +155,13 @@ fn process_fields(group: Group) -> proc_macro::TokenStream { computed_fields } -/// Scans a match statement for legacy fields which should be skipped. +/// Scans a match statement for legacy or custom fields which should be skipped. /// /// This is used internally in LDK's TLV serialization logic and is not expected to be used by /// other crates. /// /// Wraps a `match self {..}` statement and scans the fields in the match patterns (in the form -/// `ref $field_name: $field_ty`) for types marked `legacy`, skipping those fields. +/// `ref $field_name: $field_ty`) for types marked `legacy` or `custom`, skipping those fields. /// /// Specifically, it expects input like the following, simply dropping `field3` and the /// `: $field_ty` after each field name. diff --git a/lightning/src/util/ser_macros.rs b/lightning/src/util/ser_macros.rs index 86b24e1b849..bd2b5d1983a 100644 --- a/lightning/src/util/ser_macros.rs +++ b/lightning/src/util/ser_macros.rs @@ -63,6 +63,9 @@ macro_rules! _encode_tlv { } $crate::_encode_tlv!($stream, $optional_type, value, option); } }; + ($stream: expr, $optional_type: expr, $optional_field: expr, (custom, $fieldty: ty, $read: expr, $write: expr) $(, $self: ident)?) => { { + $crate::_encode_tlv!($stream, $optional_type, $optional_field, (legacy, $fieldty, $write) $(, $self)?); + } }; ($stream: expr, $type: expr, $field: expr, optional_vec $(, $self: ident)?) => { if !$field.is_empty() { $crate::_encode_tlv!($stream, $type, $field, required_vec); @@ -232,6 +235,9 @@ macro_rules! _get_varint_length_prefixed_tlv_length { ($len: expr, $optional_type: expr, $optional_field: expr, (legacy, $fieldty: ty, $write: expr) $(, $self: ident)?) => { $crate::_get_varint_length_prefixed_tlv_length!($len, $optional_type, $write($($self)?), option); }; + ($len: expr, $optional_type: expr, $optional_field: expr, (custom, $fieldty: ty, $read: expr, $write: expr) $(, $self: ident)?) => { + $crate::_get_varint_length_prefixed_tlv_length!($len, $optional_type, $optional_field, (legacy, $fieldty, $write) $(, $self)?); + }; ($len: expr, $type: expr, $field: expr, optional_vec $(, $self: ident)?) => { if !$field.is_empty() { $crate::_get_varint_length_prefixed_tlv_length!($len, $type, $field, required_vec); @@ -317,6 +323,16 @@ macro_rules! _check_decoded_tlv_order { ($last_seen_type: expr, $typ: expr, $type: expr, $field: ident, (legacy, $fieldty: ty, $write: expr)) => {{ // no-op }}; + ($last_seen_type: expr, $typ: expr, $type: expr, $field: ident, (custom, $fieldty: ty, $read: expr, $write: expr) $(, $self: ident)?) => {{ + // Note that $type may be 0 making the second comparison always false + #[allow(unused_comparisons)] + let invalid_order = + ($last_seen_type.is_none() || $last_seen_type.unwrap() < $type) && $typ.0 > $type; + if invalid_order { + let read_result: Result<_, DecodeError> = $read(None); + $field = read_result?.into(); + } + }}; ($last_seen_type: expr, $typ: expr, $type: expr, $field: ident, (required, explicit_type: $fieldty: ty)) => {{ _check_decoded_tlv_order!($last_seen_type, $typ, $type, $field, required); }}; @@ -385,6 +401,15 @@ macro_rules! _check_missing_tlv { ($last_seen_type: expr, $type: expr, $field: ident, (legacy, $fieldty: ty, $write: expr)) => {{ // no-op }}; + ($last_seen_type: expr, $type: expr, $field: ident, (custom, $fieldty: ty, $read: expr, $write: expr)) => {{ + // Note that $type may be 0 making the second comparison always false + #[allow(unused_comparisons)] + let missing_req_type = $last_seen_type.is_none() || $last_seen_type.unwrap() < $type; + if missing_req_type { + let read_result: Result<_, DecodeError> = $read(None); + $field = read_result?.into(); + } + }}; ($last_seen_type: expr, $type: expr, $field: ident, (required, explicit_type: $fieldty: ty)) => {{ _check_missing_tlv!($last_seen_type, $type, $field, required); }}; @@ -441,6 +466,12 @@ macro_rules! _decode_tlv { ($outer_reader: expr, $reader: expr, $field: ident, (legacy, $fieldty: ty, $write: expr)) => {{ $crate::_decode_tlv!($outer_reader, $reader, $field, (option, explicit_type: $fieldty)); }}; + ($outer_reader: expr, $reader: expr, $field: ident, (custom, $fieldty: ty, $read: expr, $write: expr)) => {{ + let read_field: $fieldty; + $crate::_decode_tlv!($outer_reader, $reader, read_field, required); + let read_result: Result<_, DecodeError> = $read(Some(read_field)); + $field = read_result?.into(); + }}; ($outer_reader: expr, $reader: expr, $field: ident, (required, explicit_type: $fieldty: ty)) => {{ let _field: &$fieldty = &$field; _decode_tlv!($outer_reader, $reader, $field, required); @@ -830,6 +861,9 @@ macro_rules! _init_tlv_based_struct_field { ($field: ident, (legacy, $fieldty: ty, $write: expr)) => { $crate::_init_tlv_based_struct_field!($field, option) }; + ($field: ident, (custom, $fieldty: ty, $read: expr, $write: expr)) => { + $crate::_init_tlv_based_struct_field!($field, required) + }; ($field: ident, (option: $trait: ident $(, $read_arg: expr)?)) => { $crate::_init_tlv_based_struct_field!($field, option) }; @@ -896,6 +930,9 @@ macro_rules! _init_tlv_field_var { ($field: ident, (legacy, $fieldty: ty, $write: expr)) => { $crate::_init_tlv_field_var!($field, (option, explicit_type: $fieldty)); }; + ($field: ident, (custom, $fieldty: ty, $read: expr, $write: expr)) => { + $crate::_init_tlv_field_var!($field, required); + }; ($field: ident, (required, explicit_type: $fieldty: ty)) => { let mut $field = $crate::util::ser::RequiredWrapper::<$fieldty>(None); }; @@ -979,6 +1016,12 @@ macro_rules! _decode_and_build { /// called with the object being serialized and a returned `Option` and is written as a TLV if /// `Some`. When reading, an optional field of type `$ty` is read (which can be used in later /// `default_value` or `static_value` fields by referring to the value by name). +/// If `$fieldty` is `(custom, $ty, $read, $write)` then, when writing, the same behavior as +/// `legacy`, above is used. When reading, if a TLV is present, it is read as `$ty` and the +/// `$read` method is called with `Some(decoded_$ty_object)`. If no TLV is present, the field +/// will be initialized by calling `$read(None)`. `$read` should return a +/// `Result` (note that the processed field type may differ from `$ty`; +/// `$ty` is the type as de/serialized, not necessarily the actual field type). /// /// For example, /// ``` From 77be67b0e82932758ce28d59ad1c47c04a8703fc Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 3 Feb 2026 13:28:29 +0100 Subject: [PATCH 178/242] Group legacy fields in ChannelManagerData to deduplicate comment Reorder the struct fields to place all three `_legacy` fields together at the end, allowing the explanatory comment to appear once instead of being duplicated three times. Co-Authored-By: Claude Opus 4.5 --- lightning/src/ln/channelmanager.rs | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index bbede9589db..970ca05e9e9 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -17224,18 +17224,10 @@ pub(super) struct ChannelManagerData { best_block_height: u32, best_block_hash: BlockHash, channels: Vec>, - // Marked `_legacy` because in versions > 0.2 we are taking steps to remove the requirement of - // regularly persisting the `ChannelManager` and instead rebuild the set of HTLC forwards from - // `Channel{Monitor}` data. See [`ChannelManager::read`]. - forward_htlcs_legacy: HashMap>, claimable_htlcs_list: Vec<(PaymentHash, Vec)>, peer_init_features: Vec<(PublicKey, InitFeatures)>, pending_events_read: VecDeque<(events::Event, Option)>, highest_seen_timestamp: u32, - // Marked `_legacy` because in versions > 0.2 we are taking steps to remove the requirement of - // regularly persisting the `ChannelManager` and instead rebuild the set of HTLC forwards from - // `Channel{Monitor}` data. See [`ChannelManager::read`]. - pending_intercepted_htlcs_legacy: HashMap, pending_outbound_payments: HashMap, pending_claiming_payments: HashMap, received_network_pubkey: Option, @@ -17245,14 +17237,16 @@ pub(super) struct ChannelManagerData { claimable_htlc_purposes: Option>, probing_cookie_secret: Option<[u8; 32]>, claimable_htlc_onion_fields: Option>>, - // Marked `_legacy` because in versions > 0.2 we are taking steps to remove the requirement of - // regularly persisting the `ChannelManager` and instead rebuild the set of HTLC forwards from - // `Channel{Monitor}` data. See [`ChannelManager::read`]. - decode_update_add_htlcs_legacy: HashMap>, inbound_payment_id_secret: Option<[u8; 32]>, in_flight_monitor_updates: Option>>, peer_storage_dir: Option)>>, async_receive_offer_cache: AsyncReceiveOfferCache, + // Marked `_legacy` because in versions > 0.2 we are taking steps to remove the requirement of + // regularly persisting the `ChannelManager` and instead rebuild the set of HTLC forwards from + // `Channel{Monitor}` data. + forward_htlcs_legacy: HashMap>, + pending_intercepted_htlcs_legacy: HashMap, + decode_update_add_htlcs_legacy: HashMap>, } /// Arguments for deserializing [`ChannelManagerData`]. From 4a640d153a58c6d97b8b1ec17a81d599c326d5c1 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 3 Feb 2026 13:30:26 +0100 Subject: [PATCH 179/242] Use consistent unwrap_or_else pattern for optional TLV fields Initialize pending_claiming_payments and monitor_update_blocked_actions_per_peer with None and resolve with unwrap_or_else, matching the pattern used for other optional hash map fields like pending_intercepted_htlcs_legacy. Co-Authored-By: Claude Opus 4.5 --- lightning/src/ln/channelmanager.rs | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 970ca05e9e9..2df6576fa26 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -17398,9 +17398,9 @@ impl<'a, ES: EntropySource, SP: SignerProvider, L: Logger> let mut probing_cookie_secret: Option<[u8; 32]> = None; let mut claimable_htlc_purposes = None; let mut claimable_htlc_onion_fields = None; - let mut pending_claiming_payments = Some(new_hash_map()); + let mut pending_claiming_payments = None; let mut monitor_update_blocked_actions_per_peer: Option>)>> = - Some(Vec::new()); + None; let mut events_override = None; let mut legacy_in_flight_monitor_updates: Option< HashMap<(PublicKey, OutPoint), Vec>, @@ -17494,12 +17494,10 @@ impl<'a, ES: EntropySource, SP: SignerProvider, L: Logger> pending_intercepted_htlcs_legacy: pending_intercepted_htlcs_legacy .unwrap_or_else(new_hash_map), pending_outbound_payments, - // unwrap safety: pending_claiming_payments is guaranteed to be `Some` after read_tlv_fields - pending_claiming_payments: pending_claiming_payments.unwrap(), + pending_claiming_payments: pending_claiming_payments.unwrap_or_else(new_hash_map), received_network_pubkey, - // unwrap safety: monitor_update_blocked_actions_per_peer is guaranteed to be `Some` after read_tlv_fields monitor_update_blocked_actions_per_peer: monitor_update_blocked_actions_per_peer - .unwrap(), + .unwrap_or_else(Vec::new), fake_scid_rand_bytes, claimable_htlc_purposes, probing_cookie_secret, From 47901533a85e1ddc23c7fe8749e6a4371849d08d Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Wed, 4 Feb 2026 10:49:14 +0100 Subject: [PATCH 180/242] Simplify peer_storage_dir from Option to Vec in ChannelManagerData Since there is no semantic difference between None and Some(empty vec) for peer_storage_dir, simplify the type to Vec and use unwrap_or_default() when reading from TLV fields. Co-Authored-By: Claude Opus 4.5 --- lightning/src/ln/channelmanager.rs | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 2df6576fa26..3b3b9c87415 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -17239,7 +17239,7 @@ pub(super) struct ChannelManagerData { claimable_htlc_onion_fields: Option>>, inbound_payment_id_secret: Option<[u8; 32]>, in_flight_monitor_updates: Option>>, - peer_storage_dir: Option)>>, + peer_storage_dir: Vec<(PublicKey, Vec)>, async_receive_offer_cache: AsyncReceiveOfferCache, // Marked `_legacy` because in versions > 0.2 we are taking steps to remove the requirement of // regularly persisting the `ChannelManager` and instead rebuild the set of HTLC forwards from @@ -17506,7 +17506,7 @@ impl<'a, ES: EntropySource, SP: SignerProvider, L: Logger> .unwrap_or_else(new_hash_map), inbound_payment_id_secret, in_flight_monitor_updates, - peer_storage_dir, + peer_storage_dir: peer_storage_dir.unwrap_or_default(), async_receive_offer_cache, }) } @@ -18102,11 +18102,9 @@ impl< let pending_outbounds = OutboundPayments::new(pending_outbound_payments); - if let Some(peer_storage_dir) = peer_storage_dir { - for (peer_pubkey, peer_storage) in peer_storage_dir { - if let Some(peer_state) = per_peer_state.get_mut(&peer_pubkey) { - peer_state.get_mut().unwrap().peer_storage = peer_storage; - } + for (peer_pubkey, peer_storage) in peer_storage_dir { + if let Some(peer_state) = per_peer_state.get_mut(&peer_pubkey) { + peer_state.get_mut().unwrap().peer_storage = peer_storage; } } From 07bcba9722930ce8cf38085e08a9e31650948af4 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 5 Feb 2026 10:34:48 +0100 Subject: [PATCH 181/242] Combine claimable HTLCs with purposes in ChannelManagerData Move the reconstruction of claimable_payments from the main ChannelManager::read into ChannelManagerData::read. This removes the separate claimable_htlc_purposes and claimable_htlc_onion_fields fields from ChannelManagerData, replacing them with the combined claimable_payments HashMap. This requires adding a node_signer parameter to ChannelManagerDataReadArgs to support verification of legacy hop data when reconstructing payment purposes for very old serialized data (pre-0.0.107). Co-Authored-By: Claude Opus 4.5 --- lightning/src/ln/channelmanager.rs | 174 +++++++++++++++-------------- 1 file changed, 90 insertions(+), 84 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 3b3b9c87415..8f987cd13e6 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -17224,7 +17224,7 @@ pub(super) struct ChannelManagerData { best_block_height: u32, best_block_hash: BlockHash, channels: Vec>, - claimable_htlcs_list: Vec<(PaymentHash, Vec)>, + claimable_payments: HashMap, peer_init_features: Vec<(PublicKey, InitFeatures)>, pending_events_read: VecDeque<(events::Event, Option)>, highest_seen_timestamp: u32, @@ -17234,9 +17234,7 @@ pub(super) struct ChannelManagerData { monitor_update_blocked_actions_per_peer: Vec<(PublicKey, BTreeMap>)>, fake_scid_rand_bytes: Option<[u8; 32]>, - claimable_htlc_purposes: Option>, probing_cookie_secret: Option<[u8; 32]>, - claimable_htlc_onion_fields: Option>>, inbound_payment_id_secret: Option<[u8; 32]>, in_flight_monitor_updates: Option>>, peer_storage_dir: Vec<(PublicKey, Vec)>, @@ -17250,18 +17248,25 @@ pub(super) struct ChannelManagerData { } /// Arguments for deserializing [`ChannelManagerData`]. -struct ChannelManagerDataReadArgs<'a, ES: EntropySource, SP: SignerProvider, L: Logger> { +struct ChannelManagerDataReadArgs< + 'a, + ES: EntropySource, + NS: NodeSigner, + SP: SignerProvider, + L: Logger, +> { entropy_source: &'a ES, + node_signer: &'a NS, signer_provider: &'a SP, config: UserConfig, logger: &'a L, } -impl<'a, ES: EntropySource, SP: SignerProvider, L: Logger> - ReadableArgs> for ChannelManagerData +impl<'a, ES: EntropySource, NS: NodeSigner, SP: SignerProvider, L: Logger> + ReadableArgs> for ChannelManagerData { fn read( - reader: &mut R, args: ChannelManagerDataReadArgs<'a, ES, SP, L>, + reader: &mut R, args: ChannelManagerDataReadArgs<'a, ES, NS, SP, L>, ) -> Result { let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION); @@ -17481,13 +17486,86 @@ impl<'a, ES: EntropySource, SP: SignerProvider, L: Logger> // Resolve events_override: if present, it replaces pending_events. let pending_events_read = events_override.unwrap_or(pending_events_read); + // Combine claimable_htlcs_list with their purposes and onion fields. For very old data + // (pre-0.0.107) that lacks purposes, reconstruct them from legacy hop data. + let expanded_inbound_key = args.node_signer.get_expanded_key(); + + let mut claimable_payments = hash_map_with_capacity(claimable_htlcs_list.len()); + if let Some(purposes) = claimable_htlc_purposes { + if purposes.len() != claimable_htlcs_list.len() { + return Err(DecodeError::InvalidValue); + } + if let Some(onion_fields) = claimable_htlc_onion_fields { + if onion_fields.len() != claimable_htlcs_list.len() { + return Err(DecodeError::InvalidValue); + } + for (purpose, (onion, (payment_hash, htlcs))) in purposes + .into_iter() + .zip(onion_fields.into_iter().zip(claimable_htlcs_list.into_iter())) + { + let claimable = ClaimablePayment { purpose, htlcs, onion_fields: onion }; + let existing_payment = claimable_payments.insert(payment_hash, claimable); + if existing_payment.is_some() { + return Err(DecodeError::InvalidValue); + } + } + } else { + for (purpose, (payment_hash, htlcs)) in + purposes.into_iter().zip(claimable_htlcs_list.into_iter()) + { + let claimable = ClaimablePayment { purpose, htlcs, onion_fields: None }; + let existing_payment = claimable_payments.insert(payment_hash, claimable); + if existing_payment.is_some() { + return Err(DecodeError::InvalidValue); + } + } + } + } else { + // LDK versions prior to 0.0.107 did not write a `pending_htlc_purposes`, but do + // include a `_legacy_hop_data` in the `OnionPayload`. + for (payment_hash, htlcs) in claimable_htlcs_list.into_iter() { + if htlcs.is_empty() { + return Err(DecodeError::InvalidValue); + } + let purpose = match &htlcs[0].onion_payload { + OnionPayload::Invoice { _legacy_hop_data } => { + if let Some(hop_data) = _legacy_hop_data { + events::PaymentPurpose::Bolt11InvoicePayment { + payment_preimage: match inbound_payment::verify( + payment_hash, + &hop_data, + 0, + &expanded_inbound_key, + &args.logger, + ) { + Ok((payment_preimage, _)) => payment_preimage, + Err(()) => { + log_error!(args.logger, "Failed to read claimable payment data for HTLC with payment hash {} - was not a pending inbound payment and didn't match our payment key", &payment_hash); + return Err(DecodeError::InvalidValue); + }, + }, + payment_secret: hop_data.payment_secret, + } + } else { + return Err(DecodeError::InvalidValue); + } + }, + OnionPayload::Spontaneous(payment_preimage) => { + events::PaymentPurpose::SpontaneousPayment(*payment_preimage) + }, + }; + claimable_payments + .insert(payment_hash, ClaimablePayment { purpose, htlcs, onion_fields: None }); + } + } + Ok(ChannelManagerData { chain_hash, best_block_height, best_block_hash, channels, forward_htlcs_legacy, - claimable_htlcs_list, + claimable_payments, peer_init_features, pending_events_read, highest_seen_timestamp, @@ -17499,9 +17577,7 @@ impl<'a, ES: EntropySource, SP: SignerProvider, L: Logger> monitor_update_blocked_actions_per_peer: monitor_update_blocked_actions_per_peer .unwrap_or_else(Vec::new), fake_scid_rand_bytes, - claimable_htlc_purposes, probing_cookie_secret, - claimable_htlc_onion_fields, decode_update_add_htlcs_legacy: decode_update_add_htlcs_legacy .unwrap_or_else(new_hash_map), inbound_payment_id_secret, @@ -17741,6 +17817,7 @@ impl< reader, ChannelManagerDataReadArgs { entropy_source: &args.entropy_source, + node_signer: &args.node_signer, signer_provider: &args.signer_provider, config: args.config.clone(), logger: &args.logger, @@ -17782,7 +17859,7 @@ impl< best_block_hash, channels, mut forward_htlcs_legacy, - mut claimable_htlcs_list, + claimable_payments, peer_init_features, mut pending_events_read, highest_seen_timestamp, @@ -17792,9 +17869,7 @@ impl< received_network_pubkey, monitor_update_blocked_actions_per_peer, mut fake_scid_rand_bytes, - claimable_htlc_purposes, mut probing_cookie_secret, - claimable_htlc_onion_fields, mut decode_update_add_htlcs_legacy, mut inbound_payment_id_secret, mut in_flight_monitor_updates, @@ -18738,77 +18813,6 @@ impl< } } - let expanded_inbound_key = args.node_signer.get_expanded_key(); - - let mut claimable_payments = hash_map_with_capacity(claimable_htlcs_list.len()); - if let Some(purposes) = claimable_htlc_purposes { - if purposes.len() != claimable_htlcs_list.len() { - return Err(DecodeError::InvalidValue); - } - if let Some(onion_fields) = claimable_htlc_onion_fields { - if onion_fields.len() != claimable_htlcs_list.len() { - return Err(DecodeError::InvalidValue); - } - for (purpose, (onion, (payment_hash, htlcs))) in purposes - .into_iter() - .zip(onion_fields.into_iter().zip(claimable_htlcs_list.into_iter())) - { - let claimable = ClaimablePayment { purpose, htlcs, onion_fields: onion }; - let existing_payment = claimable_payments.insert(payment_hash, claimable); - if existing_payment.is_some() { - return Err(DecodeError::InvalidValue); - } - } - } else { - for (purpose, (payment_hash, htlcs)) in - purposes.into_iter().zip(claimable_htlcs_list.into_iter()) - { - let claimable = ClaimablePayment { purpose, htlcs, onion_fields: None }; - let existing_payment = claimable_payments.insert(payment_hash, claimable); - if existing_payment.is_some() { - return Err(DecodeError::InvalidValue); - } - } - } - } else { - // LDK versions prior to 0.0.107 did not write a `pending_htlc_purposes`, but do - // include a `_legacy_hop_data` in the `OnionPayload`. - for (payment_hash, htlcs) in claimable_htlcs_list.drain(..) { - if htlcs.is_empty() { - return Err(DecodeError::InvalidValue); - } - let purpose = match &htlcs[0].onion_payload { - OnionPayload::Invoice { _legacy_hop_data } => { - if let Some(hop_data) = _legacy_hop_data { - events::PaymentPurpose::Bolt11InvoicePayment { - payment_preimage: match inbound_payment::verify( - payment_hash, - &hop_data, - 0, - &expanded_inbound_key, - &args.logger, - ) { - Ok((payment_preimage, _)) => payment_preimage, - Err(()) => { - log_error!(args.logger, "Failed to read claimable payment data for HTLC with payment hash {} - was not a pending inbound payment and didn't match our payment key", &payment_hash); - return Err(DecodeError::InvalidValue); - }, - }, - payment_secret: hop_data.payment_secret, - } - } else { - return Err(DecodeError::InvalidValue); - } - }, - OnionPayload::Spontaneous(payment_preimage) => { - events::PaymentPurpose::SpontaneousPayment(*payment_preimage) - }, - }; - claimable_payments - .insert(payment_hash, ClaimablePayment { purpose, htlcs, onion_fields: None }); - } - } - // Similar to the above cases for forwarded payments, if we have any pending inbound HTLCs // which haven't yet been claimed, we may be missing counterparty_node_id info and would // panic if we attempted to claim them at this point. @@ -18839,6 +18843,8 @@ impl< let mut secp_ctx = Secp256k1::new(); secp_ctx.seeded_randomize(&args.entropy_source.get_secure_random_bytes()); + let expanded_inbound_key = args.node_signer.get_expanded_key(); + let our_network_pubkey = match args.node_signer.get_node_id(Recipient::Node) { Ok(key) => key, Err(()) => return Err(DecodeError::InvalidValue), From b509c4a93d5cf46df24d1d1f20b5008741d61745 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Wed, 4 Feb 2026 11:06:25 +0100 Subject: [PATCH 182/242] Simplify in_flight_monitor_updates from Option to HashMap Since there is no semantic difference between None and Some(empty map) for in_flight_monitor_updates, simplify the type to HashMap and use unwrap_or_default() when reading from TLV fields. Co-Authored-By: Claude Opus 4.5 --- lightning/src/ln/channelmanager.rs | 104 +++++++++++++---------------- 1 file changed, 48 insertions(+), 56 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 8f987cd13e6..c0c4aa31f2c 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -17236,7 +17236,7 @@ pub(super) struct ChannelManagerData { fake_scid_rand_bytes: Option<[u8; 32]>, probing_cookie_secret: Option<[u8; 32]>, inbound_payment_id_secret: Option<[u8; 32]>, - in_flight_monitor_updates: Option>>, + in_flight_monitor_updates: HashMap<(PublicKey, ChannelId), Vec>, peer_storage_dir: Vec<(PublicKey, Vec)>, async_receive_offer_cache: AsyncReceiveOfferCache, // Marked `_legacy` because in versions > 0.2 we are taking steps to remove the requirement of @@ -17581,7 +17581,7 @@ impl<'a, ES: EntropySource, NS: NodeSigner, SP: SignerProvider, L: Logger> decode_update_add_htlcs_legacy: decode_update_add_htlcs_legacy .unwrap_or_else(new_hash_map), inbound_payment_id_secret, - in_flight_monitor_updates, + in_flight_monitor_updates: in_flight_monitor_updates.unwrap_or_default(), peer_storage_dir: peer_storage_dir.unwrap_or_default(), async_receive_offer_cache, }) @@ -18258,22 +18258,20 @@ impl< .get(chan_id) .expect("We already checked for monitor presence when loading channels"); let mut max_in_flight_update_id = monitor.get_latest_update_id(); - if let Some(in_flight_upds) = &mut in_flight_monitor_updates { - if let Some(mut chan_in_flight_upds) = - in_flight_upds.remove(&(*counterparty_id, *chan_id)) - { - max_in_flight_update_id = cmp::max( - max_in_flight_update_id, - handle_in_flight_updates!( - *counterparty_id, - chan_in_flight_upds, - monitor, - peer_state, - logger, - "" - ), - ); - } + if let Some(mut chan_in_flight_upds) = + in_flight_monitor_updates.remove(&(*counterparty_id, *chan_id)) + { + max_in_flight_update_id = cmp::max( + max_in_flight_update_id, + handle_in_flight_updates!( + *counterparty_id, + chan_in_flight_upds, + monitor, + peer_state, + logger, + "" + ), + ); } if funded_chan.get_latest_unblocked_monitor_update_id() > max_in_flight_update_id @@ -18302,44 +18300,38 @@ impl< } } - if let Some(in_flight_upds) = in_flight_monitor_updates { - for ((counterparty_id, channel_id), mut chan_in_flight_updates) in in_flight_upds { - let logger = - WithContext::from(&args.logger, Some(counterparty_id), Some(channel_id), None); - if let Some(monitor) = args.channel_monitors.get(&channel_id) { - // Now that we've removed all the in-flight monitor updates for channels that are - // still open, we need to replay any monitor updates that are for closed channels, - // creating the neccessary peer_state entries as we go. - let peer_state_mutex = per_peer_state - .entry(counterparty_id) - .or_insert_with(|| Mutex::new(empty_peer_state())); - let mut peer_state = peer_state_mutex.lock().unwrap(); - handle_in_flight_updates!( - counterparty_id, - chan_in_flight_updates, - monitor, - peer_state, - logger, - "closed " - ); - } else { - log_error!(logger, "A ChannelMonitor is missing even though we have in-flight updates for it! This indicates a potentially-critical violation of the chain::Watch API!"); - log_error!( - logger, - " The ChannelMonitor for channel {} is missing.", - channel_id - ); - log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,"); - log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); - log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds."); - log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning"); - log_error!( - logger, - " Pending in-flight updates are: {:?}", - chan_in_flight_updates - ); - return Err(DecodeError::InvalidValue); - } + for ((counterparty_id, channel_id), mut chan_in_flight_updates) in in_flight_monitor_updates + { + let logger = + WithContext::from(&args.logger, Some(counterparty_id), Some(channel_id), None); + if let Some(monitor) = args.channel_monitors.get(&channel_id) { + // Now that we've removed all the in-flight monitor updates for channels that are + // still open, we need to replay any monitor updates that are for closed channels, + // creating the neccessary peer_state entries as we go. + let peer_state_mutex = per_peer_state + .entry(counterparty_id) + .or_insert_with(|| Mutex::new(empty_peer_state())); + let mut peer_state = peer_state_mutex.lock().unwrap(); + handle_in_flight_updates!( + counterparty_id, + chan_in_flight_updates, + monitor, + peer_state, + logger, + "closed " + ); + } else { + log_error!(logger, "A ChannelMonitor is missing even though we have in-flight updates for it! This indicates a potentially-critical violation of the chain::Watch API!"); + log_error!(logger, " The ChannelMonitor for channel {} is missing.", channel_id); + log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,"); + log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); + log_error!( + logger, + " Without the latest ChannelMonitor we cannot continue without risking funds." + ); + log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning"); + log_error!(logger, " Pending in-flight updates are: {:?}", chan_in_flight_updates); + return Err(DecodeError::InvalidValue); } } From dde2c8225dccfe1da9cad0707c001a19c909d4e9 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 5 Feb 2026 12:24:36 +0100 Subject: [PATCH 183/242] fuzz: fix ChaCha20 encrypt_single_block to preserve data The fuzzing ChaCha20 implementation's encrypt_single_block was not copying src to dest, causing encrypted data to be lost (dest remained zeros). This broke payment flows where metadata is encrypted into payment_secret - the receiver would decrypt zeros and detect the wrong payment method (LdkPaymentHash instead of UserPaymentHash), causing payments to fail with "mismatching preimage". Fix by making encrypt_single_block copy src to dest (identity encryption), matching the behavior of the process() method. Co-Authored-By: Claude Opus 4.5 --- lightning/src/crypto/chacha20.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/lightning/src/crypto/chacha20.rs b/lightning/src/crypto/chacha20.rs index 5b0c16c933f..67f9e93c480 100644 --- a/lightning/src/crypto/chacha20.rs +++ b/lightning/src/crypto/chacha20.rs @@ -321,6 +321,7 @@ mod fuzzy_chacha { ) { debug_assert_eq!(dest.len(), src.len()); debug_assert!(dest.len() <= 32); + dest.copy_from_slice(src); } pub fn encrypt_single_block_in_place( From ee7420929ae36e2d5568f5fd01cf9e124bd5dd77 Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Tue, 27 Jan 2026 15:49:49 -0800 Subject: [PATCH 184/242] Support async signing of interactive-tx initial commitment signatures This commit allows for an async signer to immediately return upon a call to `EcdsaChannelSigner::sign_counterparty_commitment` for the initial commitment signatures of an interactively funded transaction, such that they can call back in via `ChannelManager::signer_unblocked` once the signatures are ready. This is done for both splices and dual-funded channels, though note that the latter still require more work to be integrated. Since `tx_signatures` must be sent only after exchanging `commitment_signed`, we make sure to hold them back if they're ready to be sent until our `commitment_signed` is also ready. --- lightning/src/ln/async_signer_tests.rs | 106 ++++++++++++++++ lightning/src/ln/channel.rs | 163 +++++++++++++++++-------- lightning/src/ln/channelmanager.rs | 19 ++- 3 files changed, 237 insertions(+), 51 deletions(-) diff --git a/lightning/src/ln/async_signer_tests.rs b/lightning/src/ln/async_signer_tests.rs index 53187c14168..ddf17907718 100644 --- a/lightning/src/ln/async_signer_tests.rs +++ b/lightning/src/ln/async_signer_tests.rs @@ -10,9 +10,13 @@ //! Tests for asynchronous signing. These tests verify that the channel state machine behaves //! properly with a signer implementation that asynchronously derives signatures. +use crate::events::bump_transaction::sync::WalletSourceSync; +use crate::ln::funding::SpliceContribution; +use crate::ln::splicing_tests::negotiate_splice_tx; use crate::prelude::*; use crate::util::ser::Writeable; use bitcoin::secp256k1::Secp256k1; +use bitcoin::{Amount, TxOut}; use crate::chain::channelmonitor::LATENCY_GRACE_PERIOD_BLOCKS; use crate::chain::ChannelMonitorUpdateStatus; @@ -1550,3 +1554,105 @@ fn test_async_force_close_on_invalid_secret_for_stale_state() { check_closed_broadcast(&nodes[1], 1, true); check_closed_event(&nodes[1], 1, closure_reason, &[node_id_0], 100_000); } + +#[test] +fn test_async_splice_initial_commit_sig() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; + send_payment(&nodes[0], &[&nodes[1]], 1_000); + + let (initiator, acceptor) = (&nodes[0], &nodes[1]); + let initiator_node_id = initiator.node.get_our_node_id(); + let acceptor_node_id = acceptor.node.get_our_node_id(); + + initiator.disable_channel_signer_op( + &acceptor_node_id, + &channel_id, + SignerOp::SignCounterpartyCommitment, + ); + acceptor.disable_channel_signer_op( + &initiator_node_id, + &channel_id, + SignerOp::SignCounterpartyCommitment, + ); + + // Negotiate a splice up until the signature exchange. + let contribution = SpliceContribution::splice_out(vec![TxOut { + value: Amount::from_sat(1_000), + script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), + }]); + negotiate_splice_tx(initiator, acceptor, channel_id, contribution); + + assert!(initiator.node.get_and_clear_pending_msg_events().is_empty()); + assert!(acceptor.node.get_and_clear_pending_msg_events().is_empty()); + + // Have the initiator sign the funding transaction. We won't see their initial commitment signed + // go out until their signer returns. + let event = get_event!(initiator, Event::FundingTransactionReadyForSigning); + if let Event::FundingTransactionReadyForSigning { unsigned_transaction, .. } = event { + let partially_signed_tx = initiator.wallet_source.sign_tx(unsigned_transaction).unwrap(); + initiator + .node + .funding_transaction_signed(&channel_id, &acceptor_node_id, partially_signed_tx) + .unwrap(); + } + + assert!(initiator.node.get_and_clear_pending_msg_events().is_empty()); + assert!(acceptor.node.get_and_clear_pending_msg_events().is_empty()); + + initiator.enable_channel_signer_op( + &acceptor_node_id, + &channel_id, + SignerOp::SignCounterpartyCommitment, + ); + initiator.node.signer_unblocked(None); + + // Have the acceptor process the message. They should be able to send their `tx_signatures` as + // they go first, but it is held back as their initial `commitment_signed` is not ready yet. + let initiator_commit_sig = get_htlc_update_msgs(initiator, &acceptor_node_id); + acceptor + .node + .handle_commitment_signed(initiator_node_id, &initiator_commit_sig.commitment_signed[0]); + check_added_monitors(acceptor, 1); + assert!(acceptor.node.get_and_clear_pending_msg_events().is_empty()); + + // Reestablish the channel to make sure the acceptor doesn't attempt to retransmit any messages + // that are not ready yet. + initiator.node.peer_disconnected(acceptor_node_id); + acceptor.node.peer_disconnected(initiator_node_id); + reconnect_nodes(ReconnectArgs::new(initiator, acceptor)); + + // Re-enable the acceptor's signer. We should see both their initial `commitment_signed` and + // `tx_signatures` go out. + acceptor.enable_channel_signer_op( + &initiator_node_id, + &channel_id, + SignerOp::SignCounterpartyCommitment, + ); + acceptor.node.signer_unblocked(None); + + let msg_events = acceptor.node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 2, "{msg_events:?}"); + if let MessageSendEvent::UpdateHTLCs { updates, .. } = &msg_events[0] { + initiator.node.handle_commitment_signed(acceptor_node_id, &updates.commitment_signed[0]); + check_added_monitors(initiator, 1); + } else { + panic!("Unexpected event"); + } + if let MessageSendEvent::SendTxSignatures { msg, .. } = &msg_events[1] { + initiator.node.handle_tx_signatures(acceptor_node_id, &msg); + } else { + panic!("Unexpected event"); + } + + let tx_signatures = + get_event_msg!(initiator, MessageSendEvent::SendTxSignatures, acceptor_node_id); + acceptor.node.handle_tx_signatures(initiator_node_id, &tx_signatures); + + let _ = get_event!(initiator, Event::SplicePending); + let _ = get_event!(acceptor, Event::SplicePending); +} diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index a50365c1148..881b81739e7 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1158,6 +1158,8 @@ pub(super) struct SignerResumeUpdates { pub accept_channel: Option, pub funding_created: Option, pub funding_signed: Option, + pub funding_commit_sig: Option, + pub tx_signatures: Option, pub channel_ready: Option, pub order: RAACommitmentOrder, pub closing_signed: Option, @@ -1605,6 +1607,8 @@ where accept_channel: None, funding_created, funding_signed: None, + funding_commit_sig: None, + tx_signatures: None, channel_ready: None, order: chan.context.resend_order.clone(), closing_signed: None, @@ -1621,6 +1625,8 @@ where accept_channel, funding_created: None, funding_signed: None, + funding_commit_sig: None, + tx_signatures: None, channel_ready: None, order: chan.context.resend_order.clone(), closing_signed: None, @@ -3046,8 +3052,9 @@ pub(super) struct ChannelContext { /// setting it again as a side-effect of [`FundedChannel::channel_reestablish`]. signer_pending_commitment_update: bool, /// Similar to [`Self::signer_pending_commitment_update`] but we're waiting to send either a - /// [`msgs::FundingCreated`] or [`msgs::FundingSigned`] depending on if this channel is - /// outbound or inbound. + /// [`msgs::FundingCreated`] for an outbound V1 channel, [`msgs::FundingSigned`] for an inbound + /// V1 channel, or [`msgs::CommitmentSigned`] for a V2 channel (dual-funded) or a funded channel + /// with a pending splice. signer_pending_funding: bool, /// If we attempted to sign a cooperative close transaction but the signer wasn't ready, then this /// will be set to `true`. @@ -6351,7 +6358,7 @@ impl ChannelContext { } fn get_initial_commitment_signed_v2( - &self, funding: &FundingScope, logger: &L, + &mut self, funding: &FundingScope, logger: &L, ) -> Option { let signatures = self.get_initial_counterparty_commitment_signatures(funding, logger); if let Some((signature, htlc_signatures)) = signatures { @@ -6360,6 +6367,7 @@ impl ChannelContext { // We shouldn't expect any HTLCs before `ChannelReady`. debug_assert!(htlc_signatures.is_empty()); } + self.signer_pending_funding = false; Some(msgs::CommitmentSigned { channel_id: self.channel_id, htlc_signatures, @@ -6369,7 +6377,11 @@ impl ChannelContext { partial_signature_with_nonce: None, }) } else { - // TODO(splicing): Support async signing + log_debug!( + logger, + "Initial counterparty commitment signature not available, waiting on async signer" + ); + self.signer_pending_funding = true; None } } @@ -9352,6 +9364,11 @@ where // We want to clear that the monitor update for our `tx_signatures` has completed, but // we may still need to hold back the message until it's ready to be sent. self.context.monitor_pending_tx_signatures = false; + + if self.context.signer_pending_funding { + tx_signatures.take(); + } + let signing_session = self.context.interactive_tx_signing_session.as_ref() .expect("We have a tx_signatures message so we must have a valid signing session"); if !signing_session.holder_sends_tx_signatures_first() @@ -9527,7 +9544,12 @@ where log_trace!(logger, "Attempting to update holder per-commitment point..."); self.holder_commitment_point.try_resolve_pending(&self.context.holder_signer, &self.context.secp_ctx, logger); } - let funding_signed = if self.context.signer_pending_funding && !self.funding.is_outbound() { + + let funding_signed = if self.context.signer_pending_funding + && !self.is_v2_established() + && !self.funding.is_outbound() + && self.pending_splice.is_none() + { let commitment_data = self.context.build_commitment_transaction(&self.funding, // The previous transaction number (i.e., when adding 1) is used because this field // is advanced when handling funding_created, but the point is not advanced until @@ -9537,6 +9559,43 @@ where let counterparty_initial_commitment_tx = commitment_data.tx; self.context.get_funding_signed_msg(&self.funding.channel_transaction_parameters, logger, counterparty_initial_commitment_tx) } else { None }; + + let funding_commit_sig = if self.context.signer_pending_funding + && (self.is_v2_established() || self.pending_splice.is_some()) + { + log_debug!(logger, "Attempting to generate pending initial commitment_signed..."); + let funding = self + .pending_splice + .as_ref() + .and_then(|pending_splice| pending_splice.funding_negotiation.as_ref()) + .and_then(|funding_negotiation| { + debug_assert!(matches!( + funding_negotiation, + FundingNegotiation::AwaitingSignatures { .. } + )); + funding_negotiation.as_funding() + }) + .unwrap_or(&self.funding); + self.context.get_initial_commitment_signed_v2(funding, logger) + } else { + None + }; + + let tx_signatures = if funding_commit_sig.is_some() { + if let Some(signing_session) = self.context.interactive_tx_signing_session.as_ref() { + let should_send_tx_signatures = signing_session.holder_sends_tx_signatures_first() + || signing_session.has_received_tx_signatures(); + should_send_tx_signatures + .then(|| ()) + .and_then(|_| signing_session.holder_tx_signatures().clone()) + } else { + debug_assert!(false); + None + } + } else { + None + }; + // Provide a `channel_ready` message if we need to, but only if we're _not_ still pending // funding. let channel_ready = if self.context.signer_pending_channel_ready && !self.context.signer_pending_funding { @@ -9595,12 +9654,14 @@ where } else { (None, None, None) } } else { (None, None, None) }; - log_trace!(logger, "Signer unblocked with {} commitment_update, {} revoke_and_ack, with resend order {:?}, {} funding_signed, {} channel_ready, - {} closing_signed, {} signed_closing_tx, and {} shutdown result", + log_trace!(logger, "Signer unblocked with {} commitment_update, {} revoke_and_ack, with resend order {:?}, {} funding_signed, \ + {} funding commit_sig, {} tx_signatures, {} channel_ready, {} closing_signed, {} signed_closing_tx, and {} shutdown result", if commitment_update.is_some() { "a" } else { "no" }, if revoke_and_ack.is_some() { "a" } else { "no" }, self.context.resend_order, if funding_signed.is_some() { "a" } else { "no" }, + if funding_commit_sig.is_some() { "a" } else { "no" }, + if tx_signatures.is_some() { "a" } else { "no" }, if channel_ready.is_some() { "a" } else { "no" }, if closing_signed.is_some() { "a" } else { "no" }, if signed_closing_tx.is_some() { "a" } else { "no" }, @@ -9613,6 +9674,8 @@ where accept_channel: None, funding_created: None, funding_signed, + funding_commit_sig, + tx_signatures, channel_ready, order: self.context.resend_order.clone(), closing_signed, @@ -9910,6 +9973,7 @@ where // A receiving node: // - if the `next_funding` TLV is set: + let mut retransmit_funding_commit_sig = None; if let Some(next_funding) = &msg.next_funding { // - if `next_funding_txid` matches the latest interactive funding transaction // or the current channel funding transaction: @@ -9932,49 +9996,7 @@ where && next_funding.should_retransmit(msgs::NextFundingFlag::CommitmentSigned) { // - MUST retransmit its `commitment_signed` for that funding transaction. - let funding = self - .pending_splice - .as_ref() - .and_then(|pending_splice| pending_splice.funding_negotiation.as_ref()) - .and_then(|funding_negotiation| { - if let FundingNegotiation::AwaitingSignatures { funding, .. } = &funding_negotiation { - Some(funding) - } else { - None - } - }) - .or_else(|| Some(&self.funding)) - .filter(|funding| funding.get_funding_txid() == Some(next_funding.txid)) - .ok_or_else(|| { - let message = "Failed to find funding for new commitment_signed".to_owned(); - ChannelError::Close( - ( - message.clone(), - ClosureReason::HolderForceClosed { message, broadcasted_latest_txn: Some(false) }, - ) - ) - })?; - - let commitment_signed = self.context.get_initial_commitment_signed_v2(&funding, logger) - // TODO(splicing): Support async signing - .ok_or_else(|| { - let message = "Failed to get signatures for new commitment_signed".to_owned(); - ChannelError::Close( - ( - message.clone(), - ClosureReason::HolderForceClosed { message, broadcasted_latest_txn: Some(false) }, - ) - ) - })?; - - commitment_update = Some(msgs::CommitmentUpdate { - commitment_signed: vec![commitment_signed], - update_add_htlcs: vec![], - update_fulfill_htlcs: vec![], - update_fail_htlcs: vec![], - update_fail_malformed_htlcs: vec![], - update_fee: None, - }); + retransmit_funding_commit_sig = Some(next_funding.txid); } // - if it has already received `commitment_signed` and it should sign first @@ -10006,6 +10028,47 @@ where "No active signing session. The associated funding transaction may have already been broadcast.".as_bytes().to_vec() }); } } + if let Some(funding_txid) = retransmit_funding_commit_sig { + let funding = self + .pending_splice + .as_ref() + .and_then(|pending_splice| pending_splice.funding_negotiation.as_ref()) + .and_then(|funding_negotiation| { + if let FundingNegotiation::AwaitingSignatures { funding, .. } = &funding_negotiation { + Some(funding) + } else { + None + } + }) + .or_else(|| Some(&self.funding)) + .filter(|funding| funding.get_funding_txid() == Some(funding_txid)) + .ok_or_else(|| { + let message = "Failed to find funding for new commitment_signed".to_owned(); + ChannelError::Close( + ( + message.clone(), + ClosureReason::HolderForceClosed { message, broadcasted_latest_txn: Some(false) }, + ) + ) + })?; + + commitment_update = self + .context + .get_initial_commitment_signed_v2(&funding, logger) + .map(|commitment_signed| + msgs::CommitmentUpdate { + commitment_signed: vec![commitment_signed], + update_add_htlcs: vec![], + update_fulfill_htlcs: vec![], + update_fail_htlcs: vec![], + update_fail_malformed_htlcs: vec![], + update_fee: None, + } + ); + if commitment_update.is_none() { + tx_signatures.take(); + } + } if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(_)) { // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's. diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index fc13d40a2cc..471cf4062c4 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -10216,7 +10216,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }, } - // TODO(dual_funding): For async signing support we need to hold back `tx_signatures` until the `commitment_signed` is ready. if let Some(msg) = tx_signatures { pending_msg_events.push(MessageSendEvent::SendTxSignatures { node_id: counterparty_node_id, @@ -12909,6 +12908,24 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ pending_msg_events .push(MessageSendEvent::SendFundingSigned { node_id, msg }); } + if let Some(msg) = msgs.funding_commit_sig { + pending_msg_events.push(MessageSendEvent::UpdateHTLCs { + node_id, + channel_id, + updates: CommitmentUpdate { + update_add_htlcs: vec![], + update_fulfill_htlcs: vec![], + update_fail_htlcs: vec![], + update_fail_malformed_htlcs: vec![], + update_fee: None, + commitment_signed: vec![msg], + }, + }); + } + if let Some(msg) = msgs.tx_signatures { + pending_msg_events + .push(MessageSendEvent::SendTxSignatures { node_id, msg }); + } if let Some(msg) = msgs.closing_signed { pending_msg_events .push(MessageSendEvent::SendClosingSigned { node_id, msg }); From 98c3cfff8f850b8b00532fc0dd715772928fcee8 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 5 Feb 2026 14:49:38 +0000 Subject: [PATCH 185/242] Switch `SplicePrototype` feature flag to the prod feature bit When we shipped 0.2 we used the feature bit 155 to signal splicing, in line with what eclair was using. However, eclair was actually using that bit to signal splicing on a previous design which is incompatible with the current spec. The result of this was that eclair nodes may attempt to splice using their protocol and we'd fail to deserialize their splice message (resulting in a reconnect, which luckily would clear their splice attempt and return the connection to normal). As we really need to get off of their feature bit and there's not much reason to keep using a non-final-spec bit, we simply redefine `SplicePrototype` to bit 63 here. --- lightning-types/src/features.rs | 28 ++++++++-------------------- 1 file changed, 8 insertions(+), 20 deletions(-) diff --git a/lightning-types/src/features.rs b/lightning-types/src/features.rs index 05a504ab8ca..8bb317f1236 100644 --- a/lightning-types/src/features.rs +++ b/lightning-types/src/features.rs @@ -166,7 +166,7 @@ mod sealed { // Byte 6 ZeroConf, // Byte 7 - Trampoline | SimpleClose | SpliceProduction, + Trampoline | SimpleClose | SpliceProduction | SplicePrototype, // Byte 8 - 16 ,,,,,,,,, // Byte 17 @@ -174,7 +174,7 @@ mod sealed { // Byte 18 , // Byte 19 - HtlcHold | SplicePrototype, + HtlcHold, ] ); define_context!( @@ -195,7 +195,7 @@ mod sealed { // Byte 6 ZeroConf | Keysend, // Byte 7 - Trampoline | SimpleClose | SpliceProduction, + Trampoline | SimpleClose | SpliceProduction | SplicePrototype, // Byte 8 - 16 ,,,,,,,,, // Byte 17 @@ -203,7 +203,7 @@ mod sealed { // Byte 18 , // Byte 19 - HtlcHold | SplicePrototype, + HtlcHold, // Byte 20 - 31 ,,,,,,,,,,,, // Byte 32 @@ -722,7 +722,7 @@ mod sealed { requires_htlc_hold ); define_feature!( - 155, // Splice prototype feature bit as listed in https://github.com/lightning/bolts/issues/605#issuecomment-877237519. + 63, // Actually the SpliceProduction feature SplicePrototype, [InitContext, NodeContext], "Feature flags for channel splicing.", @@ -1441,8 +1441,8 @@ mod tests { // - onion_messages // - option_channel_type | option_scid_alias // - option_zeroconf - // - option_simple_close | option_splice - assert_eq!(node_features.flags.len(), 20); + // - option_simple_close + assert_eq!(node_features.flags.len(), 8); assert_eq!(node_features.flags[0], 0b00000001); assert_eq!(node_features.flags[1], 0b01010001); assert_eq!(node_features.flags[2], 0b10001010); @@ -1450,19 +1450,7 @@ mod tests { assert_eq!(node_features.flags[4], 0b10001000); assert_eq!(node_features.flags[5], 0b10100000); assert_eq!(node_features.flags[6], 0b00001000); - assert_eq!(node_features.flags[7], 0b00100000); - assert_eq!(node_features.flags[8], 0b00000000); - assert_eq!(node_features.flags[9], 0b00000000); - assert_eq!(node_features.flags[10], 0b00000000); - assert_eq!(node_features.flags[11], 0b00000000); - assert_eq!(node_features.flags[12], 0b00000000); - assert_eq!(node_features.flags[13], 0b00000000); - assert_eq!(node_features.flags[14], 0b00000000); - assert_eq!(node_features.flags[15], 0b00000000); - assert_eq!(node_features.flags[16], 0b00000000); - assert_eq!(node_features.flags[17], 0b00000000); - assert_eq!(node_features.flags[18], 0b00000000); - assert_eq!(node_features.flags[19], 0b00001000); + assert_eq!(node_features.flags[7], 0b10100000); } // Check that cleared flags are kept blank when converting back: From 5427b0de7e93ce4ccf63c79de756e0da49e33d0b Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 5 Feb 2026 14:55:58 +0000 Subject: [PATCH 186/242] Drop `SplicePrototype` feature Now that `SplicePrototype` and `SpliceProduction` share the same feature bit, there's not really any reason to have the `SplicePrototype` feature at all. Instead, we drop it, leaving only a `Splice` feature. --- lightning-types/src/features.rs | 27 ++++++++------------------- 1 file changed, 8 insertions(+), 19 deletions(-) diff --git a/lightning-types/src/features.rs b/lightning-types/src/features.rs index 8bb317f1236..22493efc556 100644 --- a/lightning-types/src/features.rs +++ b/lightning-types/src/features.rs @@ -166,7 +166,7 @@ mod sealed { // Byte 6 ZeroConf, // Byte 7 - Trampoline | SimpleClose | SpliceProduction | SplicePrototype, + Trampoline | SimpleClose | Splice, // Byte 8 - 16 ,,,,,,,,, // Byte 17 @@ -195,7 +195,7 @@ mod sealed { // Byte 6 ZeroConf | Keysend, // Byte 7 - Trampoline | SimpleClose | SpliceProduction | SplicePrototype, + Trampoline | SimpleClose | Splice, // Byte 8 - 16 ,,,,,,,,, // Byte 17 @@ -687,14 +687,14 @@ mod sealed { ); define_feature!( 63, - SpliceProduction, + Splice, [InitContext, NodeContext], "Feature flags for channel splicing.", - set_splicing_production_optional, - set_splicing_production_required, - clear_splicing_production, - supports_splicing_production, - requires_splicing_production + set_splicing_optional, + set_splicing_required, + clear_splicing, + supports_splicing, + requires_splicing ); // By default, allocate enough bytes to cover up to Splice. Update this as new features are // added which we expect to appear commonly across contexts. @@ -721,17 +721,6 @@ mod sealed { supports_htlc_hold, requires_htlc_hold ); - define_feature!( - 63, // Actually the SpliceProduction feature - SplicePrototype, - [InitContext, NodeContext], - "Feature flags for channel splicing.", - set_splicing_optional, - set_splicing_required, - clear_splicing, - supports_splicing, - requires_splicing - ); define_feature!( 259, DnsResolver, From b6bd3866fb3824f48b85f7875541f9027c511b44 Mon Sep 17 00:00:00 2001 From: elnosh Date: Thu, 5 Feb 2026 17:48:47 -0500 Subject: [PATCH 187/242] Fix off-by-one for unfunded channel peers When accepting channels manually, it would fail if the # of peers without funded channels was == `MAX_UNFUNDED_CHANNEL_PEERS`, however, it should fail if the # of peers > `MAX_UNFUNDED_CHANNEL_PEERS`. --- lightning/src/ln/channel_open_tests.rs | 2 +- lightning/src/ln/channelmanager.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lightning/src/ln/channel_open_tests.rs b/lightning/src/ln/channel_open_tests.rs index 3a9c266aacd..572aa0c8176 100644 --- a/lightning/src/ln/channel_open_tests.rs +++ b/lightning/src/ln/channel_open_tests.rs @@ -110,7 +110,7 @@ fn test_0conf_limiting() { }; // First, get us up to MAX_UNFUNDED_CHANNEL_PEERS so we can test at the edge - for _ in 0..MAX_UNFUNDED_CHANNEL_PEERS - 1 { + for _ in 0..MAX_UNFUNDED_CHANNEL_PEERS { let random_pk = PublicKey::from_secret_key( &nodes[0].node.secp_ctx, &SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap(), diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 532514a3ae9..1e6dae5beb1 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -10569,7 +10569,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // If this peer already has some channels, a new channel won't increase our number of peers // with unfunded channels, so as long as we aren't over the maximum number of unfunded // channels per-peer we can accept channels from a peer with existing ones. - if is_only_peer_channel && peers_without_funded_channels >= MAX_UNFUNDED_CHANNEL_PEERS { + if is_only_peer_channel && peers_without_funded_channels > MAX_UNFUNDED_CHANNEL_PEERS { let send_msg_err_event = MessageSendEvent::HandleError { node_id: channel.context().get_counterparty_node_id(), action: msgs::ErrorAction::SendErrorMessage { From cad035db3abe3a49efa6f9b20bd984379902f44c Mon Sep 17 00:00:00 2001 From: elnosh Date: Wed, 28 Jan 2026 14:41:37 -0500 Subject: [PATCH 188/242] Remove automatic channel acceptance Removes the `manually_accept_inbound_channels` config option. In upcoming commit we will default to anchor channels which requires users checking if they have enough onchain funds to cover fees in case of a force close. Hence, we move to always require users to manually accept inbound channels. --- fuzz/src/chanmon_consistency.rs | 48 +++-- fuzz/src/full_stack.rs | 23 ++- lightning-background-processor/src/lib.rs | 19 ++ .../tests/lsps2_integration_tests.rs | 4 - lightning/src/events/mod.rs | 55 ++---- lightning/src/ln/async_payments_tests.rs | 1 - lightning/src/ln/async_signer_tests.rs | 22 ++- lightning/src/ln/chanmon_update_fail_tests.rs | 8 +- lightning/src/ln/channel.rs | 1 - lightning/src/ln/channel_open_tests.rs | 174 +++++++---------- lightning/src/ln/channel_state.rs | 7 +- lightning/src/ln/channelmanager.rs | 177 +++++++----------- lightning/src/ln/functional_test_utils.rs | 39 ++-- lightning/src/ln/functional_tests.rs | 26 +-- lightning/src/ln/htlc_reserve_unit_tests.rs | 4 +- lightning/src/ln/invoice_utils.rs | 4 +- lightning/src/ln/monitor_tests.rs | 15 -- lightning/src/ln/payment_tests.rs | 3 - lightning/src/ln/priv_short_conf_tests.rs | 56 +----- lightning/src/ln/reload_tests.rs | 2 +- lightning/src/ln/reorg_tests.rs | 15 +- lightning/src/ln/shutdown_tests.rs | 25 ++- lightning/src/ln/splicing_tests.rs | 1 - lightning/src/ln/update_fee_tests.rs | 15 +- lightning/src/ln/zero_fee_commitment_tests.rs | 3 - lightning/src/util/config.rs | 42 ++--- 26 files changed, 323 insertions(+), 466 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 5fb07431b17..30b95c2095f 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -712,7 +712,6 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { config.reject_inbound_splices = false; if anchors { config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; - config.manually_accept_inbound_channels = true; } let network = Network::Bitcoin; let best_block_timestamp = genesis_block(network).header.time; @@ -763,7 +762,6 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { config.reject_inbound_splices = false; if anchors { config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; - config.manually_accept_inbound_channels = true; } let mut monitors = new_hash_map(); @@ -874,30 +872,28 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { $dest.handle_open_channel($source.get_our_node_id(), &open_channel); let accept_channel = { - if anchors { - let events = $dest.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - if let events::Event::OpenChannelRequest { - ref temporary_channel_id, - ref counterparty_node_id, - .. - } = events[0] - { - let mut random_bytes = [0u8; 16]; - random_bytes - .copy_from_slice(&$dest_keys_manager.get_secure_random_bytes()[..16]); - let user_channel_id = u128::from_be_bytes(random_bytes); - $dest - .accept_inbound_channel( - temporary_channel_id, - counterparty_node_id, - user_channel_id, - None, - ) - .unwrap(); - } else { - panic!("Wrong event type"); - } + let events = $dest.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + if let events::Event::OpenChannelRequest { + ref temporary_channel_id, + ref counterparty_node_id, + .. + } = events[0] + { + let mut random_bytes = [0u8; 16]; + random_bytes + .copy_from_slice(&$dest_keys_manager.get_secure_random_bytes()[..16]); + let user_channel_id = u128::from_be_bytes(random_bytes); + $dest + .accept_inbound_channel( + temporary_channel_id, + counterparty_node_id, + user_channel_id, + None, + ) + .unwrap(); + } else { + panic!("Wrong event type"); } let events = $dest.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); diff --git a/fuzz/src/full_stack.rs b/fuzz/src/full_stack.rs index 600335b5083..3bcc9fcb9df 100644 --- a/fuzz/src/full_stack.rs +++ b/fuzz/src/full_stack.rs @@ -42,15 +42,13 @@ use lightning::chain::{BestBlock, ChannelMonitorUpdateStatus, Confirm, Listen}; use lightning::events::bump_transaction::sync::WalletSourceSync; use lightning::events::Event; use lightning::ln::channel_state::ChannelDetails; -use lightning::ln::channelmanager::{ - ChainParameters, ChannelManager, InterceptId, PaymentId, -}; +use lightning::ln::channelmanager::{ChainParameters, ChannelManager, InterceptId, PaymentId}; use lightning::ln::functional_test_utils::*; use lightning::ln::inbound_payment::ExpandedKey; +use lightning::ln::outbound_payment::{RecipientOnionFields, Retry}; use lightning::ln::peer_handler::{ IgnoringMessageHandler, MessageHandler, PeerManager, SocketDescriptor, }; -use lightning::ln::outbound_payment::{RecipientOnionFields, Retry}; use lightning::ln::script::ShutdownScript; use lightning::ln::types::ChannelId; use lightning::offers::invoice::UnsignedBolt12Invoice; @@ -1122,6 +1120,17 @@ pub fn do_test(mut data: &[u8], logger: &Arc) { Event::SpliceFailed { .. } => { // Splice failed, inputs can be re-spent }, + Event::OpenChannelRequest { + temporary_channel_id, counterparty_node_id, .. + } => { + let _ = loss_detector.manager.accept_inbound_channel( + &temporary_channel_id, + &counterparty_node_id, + 0, + None, + ); + loss_detector.handler.process_events(); + }, _ => {}, } } @@ -1159,7 +1168,7 @@ fn two_peer_forwarding_seed() -> Vec { // our network key ext_from_hex("0100000000000000000000000000000000000000000000000000000000000000", &mut test); // config - ext_from_hex("000000000090000000000000000064000100000000000100ffff0000000000000000ffffffffffffffffffffffffffffffff0000000000000000ffffffffffffffff000000ffffffff00ffff1a000400010000020400000000040200000a08ffffffffffffffff000100000000000000", &mut test); + ext_from_hex("000000000090000000000000000064000100000000000100ffff0000000000000000ffffffffffffffffffffffffffffffff0000000000000000ffffffffffffffff000000ffffffff00ffff1a000400010000020400000000040200000a08ffffffffffffffff0001000000000000", &mut test); // new outbound connection with id 0 ext_from_hex("00", &mut test); @@ -1613,7 +1622,7 @@ fn gossip_exchange_seed() -> Vec { // our network key ext_from_hex("0100000000000000000000000000000000000000000000000000000000000000", &mut test); // config - ext_from_hex("000000000090000000000000000064000100000000000100ffff0000000000000000ffffffffffffffffffffffffffffffff0000000000000000ffffffffffffffff000000ffffffff00ffff1a000400010000020400000000040200000a08ffffffffffffffff000100000000000000", &mut test); + ext_from_hex("000000000090000000000000000064000100000000000100ffff0000000000000000ffffffffffffffffffffffffffffffff0000000000000000ffffffffffffffff000000ffffffff00ffff1a000400010000020400000000040200000a08ffffffffffffffff0001000000000000", &mut test); // new outbound connection with id 0 ext_from_hex("00", &mut test); @@ -1695,7 +1704,7 @@ fn splice_seed() -> Vec { // our network key ext_from_hex("0100000000000000000000000000000000000000000000000000000000000000", &mut test); // config - ext_from_hex("000000000090000000000000000064000100000000000100ffff0000000000000000ffffffffffffffffffffffffffffffff0000000000000000ffffffffffffffff000000ffffffff00ffff1a000400010000020400000000040200000a08ffffffffffffffff000100000000000000", &mut test); + ext_from_hex("000000000090000000000000000064000100000000000100ffff0000000000000000ffffffffffffffffffffffffffffffff0000000000000000ffffffffffffffff000000ffffffff00ffff1a000400010000020400000000040200000a08ffffffffffffffff0001000000000000", &mut test); // new outbound connection with id 0 ext_from_hex("00", &mut test); diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index bb99d65e6b5..659e28114e7 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -2610,6 +2610,25 @@ mod tests { $node_b.node.get_our_node_id() ); $node_b.node.handle_open_channel($node_a.node.get_our_node_id(), &msg_a); + let events = $node_b.node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match &events[0] { + Event::OpenChannelRequest { + temporary_channel_id, counterparty_node_id, .. + } => { + $node_b + .node + .accept_inbound_channel( + temporary_channel_id, + counterparty_node_id, + 42, + None, + ) + .unwrap(); + }, + _ => panic!("Unexpected event"), + }; + let msg_b = get_event_msg!( $node_b, MessageSendEvent::SendAcceptChannel, diff --git a/lightning-liquidity/tests/lsps2_integration_tests.rs b/lightning-liquidity/tests/lsps2_integration_tests.rs index 312199e19ec..33a6dd697cf 100644 --- a/lightning-liquidity/tests/lsps2_integration_tests.rs +++ b/lightning-liquidity/tests/lsps2_integration_tests.rs @@ -1155,7 +1155,6 @@ fn client_trusts_lsp_end_to_end_test() { service_node_config.htlc_interception_flags = HTLCInterceptionFlags::ToInterceptSCIDs as u8; let mut client_node_config = test_default_channel_config(); - client_node_config.manually_accept_inbound_channels = true; client_node_config.channel_config.accept_underpaying_htlcs = true; let node_chanmgrs = create_node_chanmgrs( 3, @@ -1627,7 +1626,6 @@ fn late_payment_forwarded_and_safe_after_force_close_does_not_broadcast() { service_node_config.htlc_interception_flags = HTLCInterceptionFlags::ToInterceptSCIDs as u8; let mut client_node_config = test_default_channel_config(); - client_node_config.manually_accept_inbound_channels = true; client_node_config.channel_config.accept_underpaying_htlcs = true; let node_chanmgrs = create_node_chanmgrs( @@ -1817,7 +1815,6 @@ fn htlc_timeout_before_client_claim_results_in_handling_failed() { service_node_config.htlc_interception_flags = HTLCInterceptionFlags::ToInterceptSCIDs as u8; let mut client_node_config = test_default_channel_config(); - client_node_config.manually_accept_inbound_channels = true; client_node_config.channel_config.accept_underpaying_htlcs = true; let node_chanmgrs = create_node_chanmgrs( @@ -2152,7 +2149,6 @@ fn client_trusts_lsp_partial_fee_does_not_trigger_broadcast() { service_node_config.htlc_interception_flags = HTLCInterceptionFlags::ToInterceptSCIDs as u8; let mut client_node_config = test_default_channel_config(); - client_node_config.manually_accept_inbound_channels = true; client_node_config.channel_config.accept_underpaying_htlcs = true; let node_chanmgrs = create_node_chanmgrs( diff --git a/lightning/src/events/mod.rs b/lightning/src/events/mod.rs index 3d860e9f363..60d934cf199 100644 --- a/lightning/src/events/mod.rs +++ b/lightning/src/events/mod.rs @@ -245,9 +245,7 @@ pub struct ClaimedHTLC { pub channel_id: ChannelId, /// The `user_channel_id` of the channel over which the HTLC was received. This is the value /// passed in to [`ChannelManager::create_channel`] for outbound channels, or to - /// [`ChannelManager::accept_inbound_channel`] for inbound channels if - /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true. Otherwise - /// `user_channel_id` will be randomized for an inbound channel. + /// [`ChannelManager::accept_inbound_channel`] for inbound channels. /// /// This field will be zero for a payment that was serialized prior to LDK version 0.0.117. (This /// should only happen in the case that a payment was claimable prior to LDK version 0.0.117, but @@ -255,7 +253,6 @@ pub struct ClaimedHTLC { /// /// [`ChannelManager::create_channel`]: crate::ln::channelmanager::ChannelManager::create_channel /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel - /// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels pub user_channel_id: u128, /// The block height at which this HTLC expires. pub cltv_expiry: u32, @@ -765,14 +762,11 @@ pub enum Event { /// The script which should be used in the transaction output. output_script: ScriptBuf, /// The `user_channel_id` value passed in to [`ChannelManager::create_channel`] for outbound - /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels if - /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true. Otherwise - /// `user_channel_id` will be randomized for an inbound channel. This may be zero for objects - /// serialized with LDK versions prior to 0.0.113. + /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels. + /// This may be zero for objects serialized with LDK versions prior to 0.0.113. /// /// [`ChannelManager::create_channel`]: crate::ln::channelmanager::ChannelManager::create_channel /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel - /// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels user_channel_id: u128, }, /// Used to indicate that the counterparty node has provided the signature(s) required to @@ -1404,13 +1398,10 @@ pub enum Event { /// The `channel_id` of the channel that is pending confirmation. channel_id: ChannelId, /// The `user_channel_id` value passed in to [`ChannelManager::create_channel`] for outbound - /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels if - /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true. Otherwise - /// `user_channel_id` will be randomized for an inbound channel. + /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels. /// /// [`ChannelManager::create_channel`]: crate::ln::channelmanager::ChannelManager::create_channel /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel - /// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels user_channel_id: u128, /// The `temporary_channel_id` this channel used to be known by during channel establishment. /// @@ -1444,13 +1435,10 @@ pub enum Event { /// The `channel_id` of the channel that is ready. channel_id: ChannelId, /// The `user_channel_id` value passed in to [`ChannelManager::create_channel`] for outbound - /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels if - /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true. Otherwise - /// `user_channel_id` will be randomized for an inbound channel. + /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels. /// /// [`ChannelManager::create_channel`]: crate::ln::channelmanager::ChannelManager::create_channel /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel - /// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels user_channel_id: u128, /// The `node_id` of the channel counterparty. counterparty_node_id: PublicKey, @@ -1466,11 +1454,10 @@ pub enum Event { /// process of closure. This includes previously opened channels, and channels that time out from not being funded. /// /// Note that this event is only triggered for accepted channels: if the - /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true and the channel is - /// rejected, no `ChannelClosed` event will be sent. + /// [`Event::OpenChannelRequest`] was rejected, no `ChannelClosed` event will be sent. /// /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel - /// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels + /// [`Event::OpenChannelRequest`]: Event::OpenChannelRequest /// /// # Failure Behavior and Persistence /// This event will eventually be replayed after failures-to-handle (i.e., the event handler @@ -1480,15 +1467,12 @@ pub enum Event { /// resolving the channel are likely still awaiting confirmation. channel_id: ChannelId, /// The `user_channel_id` value passed in to [`ChannelManager::create_channel`] for outbound - /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels if - /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true. Otherwise - /// `user_channel_id` will be randomized for inbound channels. + /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels. /// This may be zero for inbound channels serialized prior to 0.0.113 and will always be /// zero for objects serialized with LDK versions prior to 0.0.102. /// /// [`ChannelManager::create_channel`]: crate::ln::channelmanager::ChannelManager::create_channel /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel - /// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels user_channel_id: u128, /// The reason the channel was closed. reason: ClosureReason, @@ -1532,13 +1516,10 @@ pub enum Event { /// The `channel_id` of the channel that has a pending splice funding transaction. channel_id: ChannelId, /// The `user_channel_id` value passed in to [`ChannelManager::create_channel`] for outbound - /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels if - /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true. Otherwise - /// `user_channel_id` will be randomized for an inbound channel. + /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels. /// /// [`ChannelManager::create_channel`]: crate::ln::channelmanager::ChannelManager::create_channel /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel - /// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels user_channel_id: u128, /// The `node_id` of the channel counterparty. counterparty_node_id: PublicKey, @@ -1565,13 +1546,10 @@ pub enum Event { /// The `channel_id` of the channel for which the splice failed. channel_id: ChannelId, /// The `user_channel_id` value passed in to [`ChannelManager::create_channel`] for outbound - /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels if - /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true. Otherwise - /// `user_channel_id` will be randomized for an inbound channel. + /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels. /// /// [`ChannelManager::create_channel`]: crate::ln::channelmanager::ChannelManager::create_channel /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel - /// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels user_channel_id: u128, /// The `node_id` of the channel counterparty. counterparty_node_id: PublicKey, @@ -1604,14 +1582,12 @@ pub enum Event { }, /// Indicates a request to open a new channel by a peer. /// + /// This event is triggered for all inbound requests to open a new channel. /// To accept the request (and in the case of a dual-funded channel, not contribute funds), /// call [`ChannelManager::accept_inbound_channel`]. /// To reject the request, call [`ChannelManager::force_close_broadcasting_latest_txn`]. /// Note that a [`ChannelClosed`] event will _not_ be triggered if the channel is rejected. /// - /// The event is only triggered when a new open channel request is received and the - /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true. - /// /// # Failure Behavior and Persistence /// This event will eventually be replayed after failures-to-handle (i.e., the event handler /// returning `Err(ReplayEvent ())`) and won't be persisted across restarts. @@ -1619,7 +1595,6 @@ pub enum Event { /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel /// [`ChannelClosed`]: Event::ChannelClosed /// [`ChannelManager::force_close_broadcasting_latest_txn`]: crate::ln::channelmanager::ChannelManager::force_close_broadcasting_latest_txn - /// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels OpenChannelRequest { /// The temporary channel ID of the channel requested to be opened. /// @@ -1861,11 +1836,11 @@ pub enum Event { /// /// [`ChannelManager::funding_transaction_signed`]: crate::ln::channelmanager::ChannelManager::funding_transaction_signed counterparty_node_id: PublicKey, - /// The `user_channel_id` value passed in for outbound channels, or for inbound channels if - /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true. Otherwise - /// `user_channel_id` will be randomized for inbound channels. + /// The `user_channel_id` value passed in to [`ChannelManager::create_channel`] for outbound + /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels. /// - /// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels + /// [`ChannelManager::create_channel`]: crate::ln::channelmanager::ChannelManager::create_channel + /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel user_channel_id: u128, /// The unsigned transaction to be signed and passed back to /// [`ChannelManager::funding_transaction_signed`]. diff --git a/lightning/src/ln/async_payments_tests.rs b/lightning/src/ln/async_payments_tests.rs index 528cec44c00..8a991b1d98d 100644 --- a/lightning/src/ln/async_payments_tests.rs +++ b/lightning/src/ln/async_payments_tests.rs @@ -3057,7 +3057,6 @@ fn intercepted_hold_htlc() { let chanmon_cfgs = create_chanmon_cfgs(4); let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); let (sender_cfg, mut recipient_cfg) = (often_offline_node_cfg(), often_offline_node_cfg()); - recipient_cfg.manually_accept_inbound_channels = true; recipient_cfg.channel_handshake_limits.force_announced_channel_preference = false; let mut lsp_cfg = test_default_channel_config(); diff --git a/lightning/src/ln/async_signer_tests.rs b/lightning/src/ln/async_signer_tests.rs index 53187c14168..adaf976c7d9 100644 --- a/lightning/src/ln/async_signer_tests.rs +++ b/lightning/src/ln/async_signer_tests.rs @@ -37,12 +37,9 @@ fn test_open_channel() { fn do_test_open_channel(zero_conf: bool) { // Simulate acquiring the commitment point for `open_channel` and `accept_channel` asynchronously. - let mut manually_accept_config = test_default_channel_config(); - manually_accept_config.manually_accept_inbound_channels = zero_conf; - let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_config)]); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); let node_b_id = nodes[1].node.get_our_node_id(); @@ -89,8 +86,15 @@ fn do_test_open_channel(zero_conf: bool) { ev => panic!("Expected OpenChannelRequest, not {:?}", ev), } } else { - let msgs = nodes[1].node.get_and_clear_pending_msg_events(); - assert!(msgs.is_empty(), "Expected no message events; got {:?}", msgs); + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1, "Expected one event, got {}", events.len()); + match &events[0] { + Event::OpenChannelRequest { temporary_channel_id, .. } => nodes[1] + .node + .accept_inbound_channel(temporary_channel_id, &node_a_id, 0, None) + .unwrap(), + ev => panic!("Expected OpenChannelRequest, not {:?}", ev), + } } let channel_id_1 = { @@ -131,7 +135,7 @@ fn do_test_funding_created(signer_ops: Vec) { // nodes[0] --- open_channel --> nodes[1] let mut open_chan_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); - nodes[1].node.handle_open_channel(node_a_id, &open_chan_msg); + handle_and_accept_open_channel(&nodes[1], node_a_id, &open_chan_msg); // nodes[0] <-- accept_channel --- nodes[1] nodes[0].node.handle_accept_channel( @@ -208,7 +212,7 @@ fn do_test_funding_signed(signer_ops: Vec) { // nodes[0] --- open_channel --> nodes[1] let mut open_chan_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); - nodes[1].node.handle_open_channel(node_a_id, &open_chan_msg); + handle_and_accept_open_channel(&nodes[1], node_a_id, &open_chan_msg); // nodes[0] <-- accept_channel --- nodes[1] nodes[0].node.handle_accept_channel( @@ -365,7 +369,6 @@ fn test_funding_signed_0conf() { fn do_test_funding_signed_0conf(signer_ops: Vec) { // Simulate acquiring the signature for `funding_signed` asynchronously for a zero-conf channel. let mut manually_accept_config = test_default_channel_config(); - manually_accept_config.manually_accept_inbound_channels = true; let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); @@ -999,7 +1002,6 @@ fn do_test_async_holder_signatures(keyed_anchors: bool, p2a_anchor: bool, remote let mut config = test_default_channel_config(); config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = keyed_anchors; config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = p2a_anchor; - config.manually_accept_inbound_channels = keyed_anchors || p2a_anchor; let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 3fa2073d5ba..6cf7bcecebc 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -2129,10 +2129,8 @@ fn do_during_funding_monitor_fail( let node_b_id = nodes[1].node.get_our_node_id(); nodes[0].node.create_channel(node_b_id, 100000, 10001, 43, None, None).unwrap(); - nodes[1].node.handle_open_channel( - node_a_id, - &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id), - ); + let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + handle_and_accept_open_channel(&nodes[1], node_a_id, &open_channel_msg); nodes[0].node.handle_accept_channel( node_b_id, &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id), @@ -3218,7 +3216,6 @@ fn do_test_outbound_reload_without_init_mon(use_0conf: bool) { let new_chain_monitor; let mut chan_config = test_default_channel_config(); - chan_config.manually_accept_inbound_channels = true; chan_config.channel_handshake_limits.trust_own_funding_0conf = true; let node_chanmgrs = @@ -3328,7 +3325,6 @@ fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: boo let new_chain_monitor; let mut chan_config = test_default_channel_config(); - chan_config.manually_accept_inbound_channels = true; chan_config.channel_handshake_limits.trust_own_funding_0conf = true; let node_chanmgrs = diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 042b388e1a1..be86d329efc 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -17516,7 +17516,6 @@ mod tests { // Node id for alice and bob doesn't matter to our test vectors. let bob_node_id = crate::util::test_utils::pubkey(2); let mut config = UserConfig::default(); - config.manually_accept_inbound_channels = true; config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = true; let mut chan = OutboundV1Channel::<&Keys>::new( diff --git a/lightning/src/ln/channel_open_tests.rs b/lightning/src/ln/channel_open_tests.rs index 572aa0c8176..7c2a51ea360 100644 --- a/lightning/src/ln/channel_open_tests.rs +++ b/lightning/src/ln/channel_open_tests.rs @@ -62,7 +62,7 @@ fn test_outbound_chans_unlimited() { let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b); for _ in 0..MAX_UNFUNDED_CHANS_PER_PEER { - nodes[1].node.handle_open_channel(node_a, &open_channel_msg); + handle_and_accept_open_channel(&nodes[1], node_a, &open_channel_msg); get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a); open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager); @@ -90,13 +90,10 @@ fn test_outbound_chans_unlimited() { #[test] fn test_0conf_limiting() { - // Tests that we properly limit inbound channels when we have the manual-channel-acceptance - // flag set and (sometimes) accept channels as 0conf. + // Tests that we properly limit inbound channels but accept channels as 0conf. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let mut settings = test_default_channel_config(); - settings.manually_accept_inbound_channels = true; - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(settings)]); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); // Note that create_network connects the nodes together for us @@ -117,17 +114,7 @@ fn test_0conf_limiting() { ); nodes[1].node.peer_connected(random_pk, init_msg, true).unwrap(); - nodes[1].node.handle_open_channel(random_pk, &open_channel_msg); - let events = nodes[1].node.get_and_clear_pending_events(); - match events[0] { - Event::OpenChannelRequest { temporary_channel_id, .. } => { - nodes[1] - .node - .accept_inbound_channel(&temporary_channel_id, &random_pk, 23, None) - .unwrap(); - }, - _ => panic!("Unexpected event"), - } + handle_and_accept_open_channel(&nodes[1], random_pk, &open_channel_msg); get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, random_pk); open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager); @@ -191,7 +178,7 @@ fn test_inbound_anchors_manual_acceptance() { } #[test] -fn test_inbound_anchors_manual_acceptance_overridden() { +fn test_inbound_anchors_config_overridden() { let overrides = ChannelConfigOverrides { handshake_overrides: Some(ChannelHandshakeConfigUpdate { max_inbound_htlc_value_in_flight_percent_of_channel: Some(5), @@ -226,15 +213,12 @@ fn test_inbound_zero_fee_commitments_manual_acceptance() { fn do_test_manual_inbound_accept_with_override( start_cfg: UserConfig, config_overrides: Option, ) -> AcceptChannel { - let mut mannual_accept_cfg = start_cfg.clone(); - mannual_accept_cfg.manually_accept_inbound_channels = true; - let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs( 3, &node_cfgs, - &[Some(start_cfg.clone()), Some(start_cfg.clone()), Some(mannual_accept_cfg.clone())], + &[Some(start_cfg.clone()), Some(start_cfg.clone()), Some(start_cfg)], ); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); @@ -243,22 +227,6 @@ fn do_test_manual_inbound_accept_with_override( nodes[0].node.create_channel(node_b, 100_000, 0, 42, None, None).unwrap(); let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b); - nodes[1].node.handle_open_channel(node_a, &open_channel_msg); - assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); - let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); - match &msg_events[0] { - MessageSendEvent::HandleError { node_id, action } => { - assert_eq!(*node_id, node_a); - match action { - ErrorAction::SendErrorMessage { msg } => { - assert_eq!(msg.data, "No channels with anchor outputs accepted".to_owned()) - }, - _ => panic!("Unexpected error action"), - } - }, - _ => panic!("Unexpected event"), - } - nodes[2].node.handle_open_channel(node_a, &open_channel_msg); let events = nodes[2].node.get_and_clear_pending_events(); match events[0] { @@ -281,7 +249,6 @@ fn test_anchors_zero_fee_htlc_tx_downgrade() { let mut receiver_cfg = test_default_channel_config(); receiver_cfg.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; - receiver_cfg.manually_accept_inbound_channels = true; let start_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(); let end_type = ChannelTypeFeatures::only_static_remote_key(); @@ -303,7 +270,6 @@ fn test_scid_privacy_downgrade() { receiver_cfg.channel_handshake_config.negotiate_anchor_zero_fee_commitments = true; receiver_cfg.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; receiver_cfg.channel_handshake_config.negotiate_scid_privacy = true; - receiver_cfg.manually_accept_inbound_channels = true; let mut start_type = ChannelTypeFeatures::anchors_zero_fee_commitments(); start_type.set_scid_privacy_required(); @@ -328,7 +294,6 @@ fn test_zero_fee_commitments_downgrade() { let mut receiver_cfg = test_default_channel_config(); receiver_cfg.channel_handshake_config.negotiate_anchor_zero_fee_commitments = true; receiver_cfg.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; - receiver_cfg.manually_accept_inbound_channels = true; let start_type = ChannelTypeFeatures::anchors_zero_fee_commitments(); let downgrade_types = vec![ @@ -348,7 +313,6 @@ fn test_zero_fee_commitments_downgrade_to_static_remote() { let mut receiver_cfg = test_default_channel_config(); receiver_cfg.channel_handshake_config.negotiate_anchor_zero_fee_commitments = true; - receiver_cfg.manually_accept_inbound_channels = true; let start_type = ChannelTypeFeatures::anchors_zero_fee_commitments(); let end_type = ChannelTypeFeatures::only_static_remote_key(); @@ -409,7 +373,6 @@ fn test_no_channel_downgrade() { let initiator_cfg = test_default_channel_config(); let mut receiver_cfg = test_default_channel_config(); receiver_cfg.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; - receiver_cfg.manually_accept_inbound_channels = true; let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); @@ -469,7 +432,7 @@ fn test_channel_resumption_fail_post_funding() { nodes[0].node.create_channel(node_b_id, 1_000_000, 0, 42, None, None).unwrap(); let open_chan = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); - nodes[1].node.handle_open_channel(node_a_id, &open_chan); + handle_and_accept_open_channel(&nodes[1], node_a_id, &open_chan); let accept_chan = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); nodes[0].node.handle_accept_channel(node_b_id, &accept_chan); @@ -529,6 +492,22 @@ pub fn test_insane_channel_opens() { |expected_error_str: &str, message_mutator: fn(msgs::OpenChannel) -> msgs::OpenChannel| { let open_channel_mutated = message_mutator(open_channel_message.clone()); nodes[1].node.handle_open_channel(node_a_id, &open_channel_mutated); + let events = nodes[1].node.get_and_clear_pending_events(); + match events[0] { + Event::OpenChannelRequest { + temporary_channel_id, counterparty_node_id, .. + } => match nodes[1].node.accept_inbound_channel( + &temporary_channel_id, + &counterparty_node_id, + 42, + None, + ) { + Err(_) => {}, + _ => panic!(), + }, + _ => panic!("Unexpected event"), + } + let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); let expected_regex = regex::Regex::new(expected_error_str).unwrap(); @@ -625,7 +604,6 @@ pub fn test_insane_channel_opens() { #[xtest(feature = "_externalize_tests")] fn test_insane_zero_fee_channel_open() { let mut cfg = UserConfig::default(); - cfg.manually_accept_inbound_channels = true; cfg.channel_handshake_config.negotiate_anchor_zero_fee_commitments = true; let chanmon_cfgs = create_chanmon_cfgs(2); @@ -746,7 +724,7 @@ fn do_test_sanity_on_in_flight_opens(steps: u8) { if steps & 0x0f == 1 { return; } - nodes[1].node.handle_open_channel(node_a_id, &open_channel); + handle_and_accept_open_channel(&nodes[1], node_a_id, &open_channel); let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); if steps & 0x0f == 2 { @@ -925,17 +903,24 @@ pub fn bolt2_open_channel_sane_dust_limit() { node0_to_1_send_open_channel.channel_reserve_satoshis = 100001; nodes[1].node.handle_open_channel(node_a_id, &node0_to_1_send_open_channel); - let events = nodes[1].node.get_and_clear_pending_msg_events(); - let err_msg = match events[0] { - MessageSendEvent::HandleError { - action: ErrorAction::SendErrorMessage { ref msg }, .. - } => msg.clone(), + let events = nodes[1].node.get_and_clear_pending_events(); + match events[0] { + Event::OpenChannelRequest { temporary_channel_id, counterparty_node_id, .. } => match nodes + [1] + .node + .accept_inbound_channel(&temporary_channel_id, &counterparty_node_id, 42, None) + { + Err(APIError::ChannelUnavailable { err }) => assert_eq!( + err, + "dust_limit_satoshis (547) is greater than the implementation limit (546)" + ), + _ => panic!(), + }, _ => panic!("Unexpected event"), - }; - assert_eq!( - err_msg.data, - "dust_limit_satoshis (547) is greater than the implementation limit (546)" - ); + } + let events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + assert!(matches!(events[0], MessageSendEvent::HandleError { .. })); } #[xtest(feature = "_externalize_tests")] @@ -1022,7 +1007,7 @@ pub fn test_user_configurable_csv_delay() { // We test msg.to_self_delay <= config.their_to_self_delay is enforced in Chanel::accept_channel() nodes[0].node.create_channel(node_b_id, 1000000, 1000000, 42, None, None).unwrap(); let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); - nodes[1].node.handle_open_channel(node_a_id, &open_channel); + handle_and_accept_open_channel(&nodes[1], node_a_id, &open_channel); let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); @@ -1078,24 +1063,19 @@ pub fn test_user_configurable_csv_delay() { } #[xtest(feature = "_externalize_tests")] -pub fn test_manually_accept_inbound_channel_request() { - let mut manually_accept_conf = UserConfig::default(); - manually_accept_conf.manually_accept_inbound_channels = true; - manually_accept_conf.channel_handshake_config.minimum_depth = 1; +pub fn test_accept_inbound_channel_config_override() { + let mut conf = UserConfig::default(); + conf.channel_handshake_config.minimum_depth = 1; let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = - create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(conf.clone())]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); let node_b_id = nodes[1].node.get_our_node_id(); - nodes[0] - .node - .create_channel(node_b_id, 100000, 10001, 42, None, Some(manually_accept_conf)) - .unwrap(); + nodes[0].node.create_channel(node_b_id, 100000, 10001, 42, None, Some(conf)).unwrap(); let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); nodes[1].node.handle_open_channel(node_a_id, &res); @@ -1203,28 +1183,19 @@ pub fn test_manually_accept_inbound_channel_request() { } #[xtest(feature = "_externalize_tests")] -pub fn test_manually_reject_inbound_channel_request() { - let mut manually_accept_conf = UserConfig::default(); - manually_accept_conf.manually_accept_inbound_channels = true; +pub fn test_reject_inbound_channel_request() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = - create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); let node_b_id = nodes[1].node.get_our_node_id(); - nodes[0] - .node - .create_channel(node_b_id, 100000, 10001, 42, None, Some(manually_accept_conf)) - .unwrap(); + nodes[0].node.create_channel(node_b_id, 100000, 10001, 42, None, None).unwrap(); let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); nodes[1].node.handle_open_channel(node_a_id, &res); - - // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before - // rejecting the inbound channel request. assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); let err = "Channel force-closed".to_string(); let events = nodes[1].node.get_and_clear_pending_events(); @@ -1254,29 +1225,19 @@ pub fn test_manually_reject_inbound_channel_request() { #[xtest(feature = "_externalize_tests")] pub fn test_can_not_accept_inbound_channel_twice() { - let mut manually_accept_conf = UserConfig::default(); - manually_accept_conf.manually_accept_inbound_channels = true; let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = - create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); let node_b_id = nodes[1].node.get_our_node_id(); - nodes[0] - .node - .create_channel(node_b_id, 100000, 10001, 42, None, Some(manually_accept_conf)) - .unwrap(); + nodes[0].node.create_channel(node_b_id, 100000, 10001, 42, None, None).unwrap(); let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); nodes[1].node.handle_open_channel(node_a_id, &res); - - // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before - // accepting the inbound channel request. assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - let events = nodes[1].node.get_and_clear_pending_events(); match events[0] { Event::OpenChannelRequest { temporary_channel_id, .. } => { @@ -1359,7 +1320,7 @@ pub fn test_duplicate_temporary_channel_id_from_different_peers() { // Assert that `nodes[0]` can accept both `OpenChannel` requests, even though they use the same // `temporary_channel_id` as they are from different peers. - nodes[0].node.handle_open_channel(node_b_id, &open_chan_msg_chan_1_0); + handle_and_accept_open_channel(&nodes[0], node_b_id, &open_chan_msg_chan_1_0); { let events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -1375,7 +1336,7 @@ pub fn test_duplicate_temporary_channel_id_from_different_peers() { } } - nodes[0].node.handle_open_channel(node_c_id, &open_chan_msg_chan_2_0); + handle_and_accept_open_channel(&nodes[0], node_c_id, &open_chan_msg_chan_2_0); { let events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -1416,7 +1377,8 @@ pub fn test_duplicate_funding_err_in_funding() { let mut open_chan_msg = get_event_msg!(nodes[2], MessageSendEvent::SendOpenChannel, node_b_id); let node_c_temp_chan_id = open_chan_msg.common_fields.temporary_channel_id; open_chan_msg.common_fields.temporary_channel_id = real_channel_id; - nodes[1].node.handle_open_channel(node_c_id, &open_chan_msg); + handle_and_accept_open_channel(&nodes[1], node_c_id, &open_chan_msg); + let mut accept_chan_msg = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_c_id); accept_chan_msg.common_fields.temporary_channel_id = node_c_temp_chan_id; @@ -1461,7 +1423,7 @@ pub fn test_duplicate_chan_id() { // Create an initial channel nodes[0].node.create_channel(node_b_id, 100000, 10001, 42, None, None).unwrap(); let mut open_chan_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); - nodes[1].node.handle_open_channel(node_a_id, &open_chan_msg); + handle_and_accept_open_channel(&nodes[1], node_a_id, &open_chan_msg); nodes[0].node.handle_accept_channel( node_b_id, &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id), @@ -1548,7 +1510,7 @@ pub fn test_duplicate_chan_id() { // Now try to create a second channel which has a duplicate funding output. nodes[0].node.create_channel(node_b_id, 100000, 10001, 42, None, None).unwrap(); let open_chan_2_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); - nodes[1].node.handle_open_channel(node_a_id, &open_chan_2_msg); + handle_and_accept_open_channel(&nodes[1], node_a_id, &open_chan_2_msg); nodes[0].node.handle_accept_channel( node_b_id, &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id), @@ -1652,10 +1614,8 @@ pub fn test_invalid_funding_tx() { let node_b_id = nodes[1].node.get_our_node_id(); nodes[0].node.create_channel(node_b_id, 100_000, 10_000, 42, None, None).unwrap(); - nodes[1].node.handle_open_channel( - node_a_id, - &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id), - ); + let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + handle_and_accept_open_channel(&nodes[1], node_a_id, &open_channel_msg); nodes[0].node.handle_accept_channel( node_b_id, &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id), @@ -1769,10 +1729,9 @@ pub fn test_coinbase_funding_tx() { nodes[0].node.create_channel(node_b_id, 100000, 10001, 42, None, None).unwrap(); let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + handle_and_accept_open_channel(&nodes[1], node_a_id, &open_channel); - nodes[1].node.handle_open_channel(node_a_id, &open_channel); let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); - nodes[0].node.handle_accept_channel(node_b_id, &accept_channel); // Create the coinbase funding transaction. @@ -1831,7 +1790,8 @@ pub fn test_non_final_funding_tx() { nodes[0].node.create_channel(node_b_id, 100_000, 0, 42, None, None).unwrap(); let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); - nodes[1].node.handle_open_channel(node_a_id, &open_channel_message); + handle_and_accept_open_channel(&nodes[1], node_a_id, &open_channel_message); + let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); nodes[0].node.handle_accept_channel(node_b_id, &accept_channel_message); @@ -1890,7 +1850,7 @@ pub fn test_non_final_funding_tx_within_headroom() { nodes[0].node.create_channel(node_b_id, 100_000, 0, 42, None, None).unwrap(); let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); - nodes[1].node.handle_open_channel(node_a_id, &open_channel_message); + handle_and_accept_open_channel(&nodes[1], node_a_id, &open_channel_message); let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); nodes[0].node.handle_accept_channel(node_b_id, &accept_channel_message); @@ -2364,7 +2324,6 @@ pub fn test_accept_inbound_channel_errors_queued() { let mut config0 = test_default_channel_config(); let mut config1 = config0.clone(); config1.channel_handshake_limits.their_to_self_delay = 1000; - config1.manually_accept_inbound_channels = true; config0.channel_handshake_config.our_to_self_delay = 2000; let chanmon_cfgs = create_chanmon_cfgs(2); @@ -2410,8 +2369,8 @@ pub fn test_manual_funding_abandon() { assert!(nodes[0].node.create_channel(node_b_id, 100_000, 0, 42, None, None).is_ok()); let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + handle_and_accept_open_channel(&nodes[1], node_a_id, &open_channel); - nodes[1].node.handle_open_channel(node_a_id, &open_channel); let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); nodes[0].node.handle_accept_channel(node_b_id, &accept_channel); @@ -2459,10 +2418,9 @@ pub fn test_funding_signed_event() { assert!(nodes[0].node.create_channel(node_b_id, 100_000, 0, 42, None, None).is_ok()); let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + handle_and_accept_open_channel(&nodes[1], node_a_id, &open_channel); - nodes[1].node.handle_open_channel(node_a_id, &open_channel); let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); - nodes[0].node.handle_accept_channel(node_b_id, &accept_channel); let (temp_channel_id, tx, funding_outpoint) = create_funding_transaction(&nodes[0], &node_b_id, 100_000, 42); diff --git a/lightning/src/ln/channel_state.rs b/lightning/src/ln/channel_state.rs index 86e53ba3262..9f6a4b0c6a6 100644 --- a/lightning/src/ln/channel_state.rs +++ b/lightning/src/ln/channel_state.rs @@ -366,14 +366,11 @@ pub struct ChannelDetails { /// [`outbound_capacity_msat`]: ChannelDetails::outbound_capacity_msat pub unspendable_punishment_reserve: Option, /// The `user_channel_id` value passed in to [`ChannelManager::create_channel`] for outbound - /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels if - /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true. Otherwise - /// `user_channel_id` will be randomized for an inbound channel. This may be zero for objects - /// serialized with LDK versions prior to 0.0.113. + /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels. + /// This may be zero for objects serialized with LDK versions prior to 0.0.113. /// /// [`ChannelManager::create_channel`]: crate::ln::channelmanager::ChannelManager::create_channel /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel - /// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels pub user_channel_id: u128, /// The currently negotiated fee rate denominated in satoshi per 1000 weight units, /// which is applied to commitment and HTLC transactions. diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 1e6dae5beb1..f38245b885a 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1582,10 +1582,10 @@ pub(super) struct PeerState { pub(super) channel_by_id: HashMap>, /// `temporary_channel_id` -> `InboundChannelRequest`. /// - /// When manual channel acceptance is enabled, this holds all unaccepted inbound channels where - /// the peer is the counterparty. If the channel is accepted, then the entry in this table is - /// removed, and an InboundV1Channel is created and placed in the `inbound_v1_channel_by_id` table. If - /// the channel is rejected, then the entry is simply removed. + /// Holds all unaccepted inbound channels where the peer is the counterparty. + /// If the channel is accepted, then the entry in this table is removed and a Channel is + /// created and placed in the `channel_by_id` table. If the channel is rejected, then + /// the entry is simply removed. pub(super) inbound_channel_request_by_id: HashMap, /// The latest `InitFeatures` we heard from the peer. latest_features: InitFeatures, @@ -2092,9 +2092,8 @@ impl< /// /// ## Accepting Channels /// -/// Inbound channels are initiated by peers and are automatically accepted unless [`ChannelManager`] -/// has [`UserConfig::manually_accept_inbound_channels`] set. In that case, the channel may be -/// either accepted or rejected when handling [`Event::OpenChannelRequest`]. +/// Inbound channels are initiated by peers and must be manually accepted or rejected when +/// handling [`Event::OpenChannelRequest`]. /// /// ``` /// # use bitcoin::secp256k1::PublicKey; @@ -10382,7 +10381,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ ) } - /// Accepts a request to open a channel after a [`events::Event::OpenChannelRequest`], treating + /// Accepts a request to open a channel after a [`Event::OpenChannelRequest`], treating /// it as confirmed immediately. /// /// The `user_channel_id` parameter will be provided back in @@ -10672,8 +10671,9 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ num_unfunded_channels + peer.inbound_channel_request_by_id.len() } - #[rustfmt::skip] - fn internal_open_channel(&self, counterparty_node_id: &PublicKey, msg: OpenChannelMessageRef<'_>) -> Result<(), MsgHandleErrInternal> { + fn internal_open_channel( + &self, counterparty_node_id: &PublicKey, msg: OpenChannelMessageRef<'_>, + ) -> Result<(), MsgHandleErrInternal> { let common_fields = match msg { OpenChannelMessageRef::V1(msg) => &msg.common_fields, OpenChannelMessageRef::V2(msg) => &msg.common_fields, @@ -10684,49 +10684,37 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // Note that the ChannelManager is NOT re-persisted on disk after this, so any changes are // likely to be lost on restart! if common_fields.chain_hash != self.chain_hash { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(), - common_fields.temporary_channel_id)); + return Err(MsgHandleErrInternal::send_err_msg_no_close( + "Unknown genesis block hash".to_owned(), + common_fields.temporary_channel_id, + )); } if !self.config.read().unwrap().accept_inbound_channels { - return Err(MsgHandleErrInternal::send_err_msg_no_close("No inbound channels accepted".to_owned(), - common_fields.temporary_channel_id)); + return Err(MsgHandleErrInternal::send_err_msg_no_close( + "No inbound channels accepted".to_owned(), + common_fields.temporary_channel_id, + )); } - // Get the number of peers with channels, but without funded ones. We don't care too much - // about peers that never open a channel, so we filter by peers that have at least one - // channel, and then limit the number of those with unfunded channels. - let channeled_peers_without_funding = - self.peers_without_funded_channels(|node| node.total_channel_count() > 0); - let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| { - debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close( + let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { + debug_assert!(false); + MsgHandleErrInternal::send_err_msg_no_close( format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), common_fields.temporary_channel_id) - })?; + })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; - // If this peer already has some channels, a new channel won't increase our number of peers - // with unfunded channels, so as long as we aren't over the maximum number of unfunded - // channels per-peer we can accept channels from a peer with existing ones. - if peer_state.total_channel_count() == 0 && - channeled_peers_without_funding >= MAX_UNFUNDED_CHANNEL_PEERS && - !self.config.read().unwrap().manually_accept_inbound_channels - { - return Err(MsgHandleErrInternal::send_err_msg_no_close( - "Have too many peers with unfunded channels, not accepting new ones".to_owned(), - common_fields.temporary_channel_id)); - } - let best_block_height = self.best_block.read().unwrap().height; - if Self::unfunded_channel_count(peer_state, best_block_height) >= MAX_UNFUNDED_CHANS_PER_PEER { + if Self::unfunded_channel_count(peer_state, best_block_height) + >= MAX_UNFUNDED_CHANS_PER_PEER + { return Err(MsgHandleErrInternal::send_err_msg_no_close( format!("Refusing more than {} unfunded channels.", MAX_UNFUNDED_CHANS_PER_PEER), - common_fields.temporary_channel_id)); + common_fields.temporary_channel_id, + )); } let channel_id = common_fields.temporary_channel_id; @@ -10734,20 +10722,20 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if channel_exists { return Err(MsgHandleErrInternal::send_err_msg_no_close( "temporary_channel_id collision for the same peer!".to_owned(), - common_fields.temporary_channel_id)); + common_fields.temporary_channel_id, + )); } - // We can get the channel type at this point already as we'll need it immediately in both the - // manual and the automatic acceptance cases. - let channel_type = channel::channel_type_from_open_channel( - common_fields, &self.channel_type_features() - ).map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, common_fields.temporary_channel_id))?; + let channel_type = + channel::channel_type_from_open_channel(common_fields, &self.channel_type_features()) + .map_err(|e| { + MsgHandleErrInternal::from_chan_no_close(e, common_fields.temporary_channel_id) + })?; - // If we're doing manual acceptance checks on the channel, then defer creation until we're sure we want to accept. - if self.config.read().unwrap().manually_accept_inbound_channels { - let mut pending_events = self.pending_events.lock().unwrap(); - let is_announced = (common_fields.channel_flags & 1) == 1; - pending_events.push_back((events::Event::OpenChannelRequest { + let mut pending_events = self.pending_events.lock().unwrap(); + let is_announced = (common_fields.channel_flags & 1) == 1; + pending_events.push_back(( + events::Event::OpenChannelRequest { temporary_channel_id: common_fields.temporary_channel_id, counterparty_node_id: *counterparty_node_id, funding_satoshis: common_fields.funding_satoshis, @@ -10758,67 +10746,19 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ channel_type, is_announced, params: common_fields.channel_parameters(), - }, None)); - peer_state.inbound_channel_request_by_id.insert(channel_id, InboundChannelRequest { + }, + None, + )); + peer_state.inbound_channel_request_by_id.insert( + channel_id, + InboundChannelRequest { open_channel_msg: match msg { OpenChannelMessageRef::V1(msg) => OpenChannelMessage::V1(msg.clone()), OpenChannelMessageRef::V2(msg) => OpenChannelMessage::V2(msg.clone()), }, ticks_remaining: UNACCEPTED_INBOUND_CHANNEL_AGE_LIMIT_TICKS, - }); - return Ok(()); - } - - // Otherwise create the channel right now. - let mut random_bytes = [0u8; 16]; - random_bytes.copy_from_slice(&self.entropy_source.get_secure_random_bytes()[..16]); - let user_channel_id = u128::from_be_bytes(random_bytes); - - if channel_type.requires_zero_conf() { - return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), common_fields.temporary_channel_id)); - } - if channel_type.requires_anchors_zero_fee_htlc_tx() || channel_type.requires_anchor_zero_fee_commitments() { - return Err(MsgHandleErrInternal::send_err_msg_no_close("No channels with anchor outputs accepted".to_owned(), common_fields.temporary_channel_id)); - } - - let (mut channel, message_send_event) = match msg { - OpenChannelMessageRef::V1(msg) => { - let mut channel = InboundV1Channel::new( - &self.fee_estimator, &self.entropy_source, &self.signer_provider, *counterparty_node_id, - &self.channel_type_features(), &peer_state.latest_features, msg, user_channel_id, - &self.config.read().unwrap(), best_block_height, &self.logger, /*is_0conf=*/false - ).map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.common_fields.temporary_channel_id))?; - let logger = WithChannelContext::from(&self.logger, &channel.context, None); - let message_send_event = channel.accept_inbound_channel(&&logger).map(|msg| { - MessageSendEvent::SendAcceptChannel { - node_id: *counterparty_node_id, - msg, - } - }); - (Channel::from(channel), message_send_event) }, - OpenChannelMessageRef::V2(msg) => { - let channel = PendingV2Channel::new_inbound( - &self.fee_estimator, &self.entropy_source, &self.signer_provider, - self.get_our_node_id(), *counterparty_node_id, &self.channel_type_features(), - &peer_state.latest_features, msg, user_channel_id, - &self.config.read().unwrap(), best_block_height, &self.logger, - ).map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.common_fields.temporary_channel_id))?; - let message_send_event = MessageSendEvent::SendAcceptChannelV2 { - node_id: *counterparty_node_id, - msg: channel.accept_inbound_dual_funded_channel(), - }; - (Channel::from(channel), Some(message_send_event)) - }, - }; - - let outbound_scid_alias = self.create_and_insert_outbound_scid_alias(); - channel.context_mut().set_outbound_scid_alias(outbound_scid_alias); - - if let Some(message_send_event) = message_send_event { - peer_state.pending_msg_events.push(message_send_event); - } - peer_state.channel_by_id.insert(channel.context().channel_id(), channel); + ); Ok(()) } @@ -20168,7 +20108,7 @@ mod tests { let mut funding_tx = None; for idx in 0..super::MAX_UNFUNDED_CHANS_PER_PEER { - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_msg); + handle_and_accept_open_channel(&nodes[1], nodes[0].node.get_our_node_id(), &open_channel_msg); let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); if idx == 0 { @@ -20242,11 +20182,22 @@ mod tests { // open channels. assert!(peer_pks.len() > super::MAX_UNFUNDED_CHANNEL_PEERS - 1); for i in 0..super::MAX_UNFUNDED_CHANNEL_PEERS - 1 { - nodes[1].node.handle_open_channel(peer_pks[i], &open_channel_msg); + handle_and_accept_open_channel(&nodes[1], peer_pks[i], &open_channel_msg); get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, peer_pks[i]); open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager); } nodes[1].node.handle_open_channel(last_random_pk, &open_channel_msg); + let events = nodes[1].node.get_and_clear_pending_events(); + match events[0] { + Event::OpenChannelRequest { temporary_channel_id, .. } => { + assert!(nodes[1] + .node + .accept_inbound_channel(&temporary_channel_id, &last_random_pk, 23, None,) + .is_err()) + }, + _ => panic!("Unexpected event"), + } + assert_eq!(get_err_msg(&nodes[1], &last_random_pk).channel_id, open_channel_msg.common_fields.temporary_channel_id); @@ -20264,7 +20215,7 @@ mod tests { // Further, because the first channel was funded, we can open another channel with // last_random_pk. - nodes[1].node.handle_open_channel(last_random_pk, &open_channel_msg); + handle_and_accept_open_channel(&nodes[1], last_random_pk, &open_channel_msg); get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, last_random_pk); } @@ -20625,6 +20576,16 @@ pub mod bench { }, false).unwrap(); node_a.create_channel(node_b.get_our_node_id(), 8_000_000, 100_000_000, 42, None, None).unwrap(); node_b.handle_open_channel(node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id())); + let events = node_b.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match &events[0] { + Event::OpenChannelRequest { temporary_channel_id, counterparty_node_id, .. } => { + node_b + .accept_inbound_channel(temporary_channel_id, counterparty_node_id, 42, None) + .unwrap(); + }, + _ => panic!("Unexpected event"), + }; node_a.handle_accept_channel(node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id())); let tx; diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index e8965752331..165c08f9332 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -30,7 +30,7 @@ use crate::ln::channelmanager::{ RAACommitmentOrder, MIN_CLTV_EXPIRY_DELTA, }; use crate::ln::funding::FundingTxInput; -use crate::ln::msgs; +use crate::ln::msgs::{self, OpenChannel}; use crate::ln::msgs::{ BaseMessageHandler, ChannelMessageHandler, MessageSendEvent, RoutingMessageHandler, }; @@ -1552,7 +1552,6 @@ pub fn sign_funding_transaction<'a, 'b, 'c>( tx } -// Receiver must have been initialized with manually_accept_inbound_channels set to true. pub fn open_zero_conf_channel<'a, 'b, 'c, 'd>( initiator: &'a Node<'b, 'c, 'd>, receiver: &'a Node<'b, 'c, 'd>, initiator_config: Option, @@ -1600,7 +1599,6 @@ pub fn exchange_open_accept_zero_conf_chan<'a, 'b, 'c, 'd>( accept_channel.common_fields.temporary_channel_id } -// Receiver must have been initialized with manually_accept_inbound_channels set to true. pub fn open_zero_conf_channel_with_value<'a, 'b, 'c, 'd>( initiator: &'a Node<'b, 'c, 'd>, receiver: &'a Node<'b, 'c, 'd>, initiator_config: Option, channel_value_sat: u64, push_msat: u64, @@ -1698,18 +1696,8 @@ pub fn exchange_open_accept_chan<'a, 'b, 'c>( .user_channel_id, 42 ); - node_b.node.handle_open_channel(node_a_id, &open_channel_msg); - if node_b.node.get_current_config().manually_accept_inbound_channels { - let events = node_b.node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match &events[0] { - Event::OpenChannelRequest { temporary_channel_id, counterparty_node_id, .. } => node_b - .node - .accept_inbound_channel(temporary_channel_id, counterparty_node_id, 42, None) - .unwrap(), - _ => panic!("Unexpected event"), - }; - } + handle_and_accept_open_channel(&node_b, node_a_id, &open_channel_msg); + let accept_channel_msg = get_event_msg!(node_b, MessageSendEvent::SendAcceptChannel, node_a_id); assert_eq!(accept_channel_msg.common_fields.temporary_channel_id, create_chan_id); node_a.node.handle_accept_channel(node_b_id, &accept_channel_msg); @@ -1990,7 +1978,8 @@ pub fn create_unannounced_chan_between_nodes_with_value<'a, 'b, 'c, 'd>( .create_channel(node_b_id, channel_value, push_msat, 42, None, Some(no_announce_cfg)) .unwrap(); let open_channel = get_event_msg!(nodes[a], MessageSendEvent::SendOpenChannel, node_b_id); - nodes[b].node.handle_open_channel(node_a_id, &open_channel); + handle_and_accept_open_channel(&nodes[b], node_a_id, &open_channel); + let accept_channel = get_event_msg!(nodes[b], MessageSendEvent::SendAcceptChannel, node_a_id); nodes[a].node.handle_accept_channel(node_b_id, &accept_channel); @@ -2058,6 +2047,20 @@ pub fn create_unannounced_chan_between_nodes_with_value<'a, 'b, 'c, 'd>( (as_channel_ready, tx) } +pub fn handle_and_accept_open_channel(node: &Node, counterparty_id: PublicKey, msg: &OpenChannel) { + node.node.handle_open_channel(counterparty_id, &msg); + let events = node.node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match &events[0] { + Event::OpenChannelRequest { temporary_channel_id, counterparty_node_id, .. } => { + node.node + .accept_inbound_channel(temporary_channel_id, counterparty_node_id, 42, None) + .unwrap(); + }, + _ => panic!("Unexpected event"), + }; +} + pub fn update_nodes_with_chan_announce<'a, 'b, 'c, 'd>( nodes: &'a Vec>, a: usize, b: usize, ann: &msgs::ChannelAnnouncement, upd_1: &msgs::ChannelUpdate, upd_2: &msgs::ChannelUpdate, @@ -4551,7 +4554,6 @@ pub fn test_default_channel_config() -> UserConfig { pub fn test_default_anchors_channel_config() -> UserConfig { let mut config = test_default_channel_config(); config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; - config.manually_accept_inbound_channels = true; config } @@ -5559,7 +5561,8 @@ pub fn create_batch_channel_funding<'a, 'b, 'c>( .unwrap(); let open_channel_msg = get_event_msg!(funding_node, MessageSendEvent::SendOpenChannel, other_node_id); - other_node.node.handle_open_channel(funding_node_id, &open_channel_msg); + handle_and_accept_open_channel(other_node, funding_node_id, &open_channel_msg); + let accept_channel_msg = get_event_msg!(other_node, MessageSendEvent::SendAcceptChannel, funding_node_id); funding_node.node.handle_accept_channel(other_node_id, &accept_channel_msg); diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 8e854b31150..990f1d530bf 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -1164,7 +1164,6 @@ pub fn do_test_multiple_package_conflicts(p2a_anchor: bool) { // transaction. user_cfg.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; user_cfg.channel_handshake_config.negotiate_anchor_zero_fee_commitments = p2a_anchor; - user_cfg.manually_accept_inbound_channels = true; let configs = [Some(user_cfg.clone()), Some(user_cfg.clone()), Some(user_cfg)]; let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &configs); @@ -2506,7 +2505,7 @@ pub fn test_peer_disconnected_before_funding_broadcasted() { let expected_temporary_channel_id = nodes[0].node.create_channel(node_b_id, 1_000_000, 500_000_000, 42, None, None).unwrap(); let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); - nodes[1].node.handle_open_channel(node_a_id, &open_channel); + handle_and_accept_open_channel(&nodes[1], node_a_id, &open_channel); let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); nodes[0].node.handle_accept_channel(node_b_id, &accept_channel); @@ -6365,11 +6364,11 @@ pub fn test_bump_penalty_txn_on_revoked_htlcs() { assert_eq!( node_txn[1].input[0].previous_output, - revoked_htlc_txn[1].input[0].previous_output + revoked_htlc_txn[0].input[0].previous_output ); assert_eq!( node_txn[1].input[1].previous_output, - revoked_htlc_txn[0].input[0].previous_output + revoked_htlc_txn[1].input[0].previous_output ); // node_txn[3] spends the revoked outputs from the revoked_htlc_txn (which only have one @@ -6787,7 +6786,7 @@ pub fn test_override_0msat_htlc_minimum() { let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); assert_eq!(res.common_fields.htlc_minimum_msat, 1); - nodes[1].node.handle_open_channel(node_a_id, &res); + handle_and_accept_open_channel(&nodes[1], node_a_id, &res); let res = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); assert_eq!(res.common_fields.htlc_minimum_msat, 1); } @@ -7566,7 +7565,7 @@ pub fn test_pre_lockin_no_chan_closed_update() { // Create an initial channel nodes[0].node.create_channel(node_b_id, 100000, 10001, 42, None, None).unwrap(); let mut open_chan_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); - nodes[1].node.handle_open_channel(node_a_id, &open_chan_msg); + handle_and_accept_open_channel(&nodes[1], node_a_id, &open_chan_msg); let accept_chan_msg = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); nodes[0].node.handle_accept_channel(node_b_id, &accept_chan_msg); @@ -7885,9 +7884,8 @@ pub fn test_peer_funding_sidechannel() { let node_b_id = nodes[1].node.get_our_node_id(); let temp_chan_id_ab = exchange_open_accept_chan(&nodes[0], &nodes[1], 1_000_000, 0); - let temp_chan_id_ca = exchange_open_accept_chan(&nodes[1], &nodes[0], 1_000_000, 0); - let (_, tx, funding_output) = create_funding_transaction(&nodes[0], &node_b_id, 1_000_000, 42); + let temp_chan_id_ba = exchange_open_accept_chan(&nodes[1], &nodes[0], 1_000_000, 0); let cs_funding_events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(cs_funding_events.len(), 1); @@ -7899,7 +7897,7 @@ pub fn test_peer_funding_sidechannel() { let output_idx = funding_output.index; nodes[1] .node - .funding_transaction_generated_unchecked(temp_chan_id_ca, node_a_id, tx.clone(), output_idx) + .funding_transaction_generated_unchecked(temp_chan_id_ba, node_a_id, tx.clone(), output_idx) .unwrap(); let funding_created_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingCreated, node_a_id); @@ -8603,7 +8601,7 @@ fn do_test_max_dust_htlc_exposure( if on_holder_tx { open_channel.common_fields.dust_limit_satoshis = 546; } - nodes[1].node.handle_open_channel(node_a_id, &open_channel); + handle_and_accept_open_channel(&nodes[1], node_a_id, &open_channel); let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); nodes[0].node.handle_accept_channel(node_b_id, &accept_channel); @@ -9158,7 +9156,6 @@ fn do_test_nondust_htlc_fees_dust_exposure_delta(features: ChannelTypeFeatures) if features == ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies() { default_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; // in addition to the one above, this setting is also needed to create an anchor channel - default_config.manually_accept_inbound_channels = true; } // Set node 1's max dust htlc exposure to 1msat below `expected_dust_exposure_msat` @@ -9566,7 +9563,7 @@ pub fn test_remove_expired_outbound_unfunded_channels() { nodes[0].node.create_channel(node_b_id, 100_000, 0, 42, None, None).unwrap(); let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); - nodes[1].node.handle_open_channel(node_a_id, &open_channel_message); + handle_and_accept_open_channel(&nodes[1], node_a_id, &open_channel_message); let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); nodes[0].node.handle_accept_channel(node_b_id, &accept_channel_message); @@ -9630,7 +9627,7 @@ pub fn test_remove_expired_inbound_unfunded_channels() { nodes[0].node.create_channel(node_b_id, 100_000, 0, 42, None, None).unwrap(); let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); - nodes[1].node.handle_open_channel(node_a_id, &open_channel_message); + handle_and_accept_open_channel(&nodes[1], node_a_id, &open_channel_message); let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); nodes[0].node.handle_accept_channel(node_b_id, &accept_channel_message); @@ -9689,9 +9686,6 @@ fn do_test_manual_broadcast_skips_commitment_until_funding( let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let mut chan_config = test_default_channel_config(); - if zero_conf_open { - chan_config.manually_accept_inbound_channels = true; - } let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(chan_config)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); diff --git a/lightning/src/ln/htlc_reserve_unit_tests.rs b/lightning/src/ln/htlc_reserve_unit_tests.rs index 4c4fbada7dd..e719f5efa74 100644 --- a/lightning/src/ln/htlc_reserve_unit_tests.rs +++ b/lightning/src/ln/htlc_reserve_unit_tests.rs @@ -59,7 +59,7 @@ fn do_test_counterparty_no_reserve(send_from_initiator: bool) { open_channel_message.channel_reserve_satoshis = 0; open_channel_message.common_fields.max_htlc_value_in_flight_msat = 100_000_000; } - nodes[1].node.handle_open_channel(node_a_id, &open_channel_message); + handle_and_accept_open_channel(&nodes[1], node_a_id, &open_channel_message); // Extract the channel accept message from node1 to node0 let mut accept_channel_message = @@ -785,7 +785,6 @@ fn test_fee_spike_violation_fails_htlc() { fn test_zero_fee_commitments_no_fee_spike_buffer() { let mut cfg = test_default_channel_config(); cfg.channel_handshake_config.negotiate_anchor_zero_fee_commitments = true; - cfg.manually_accept_inbound_channels = true; do_test_fee_spike_buffer(Some(cfg), false) } @@ -2144,7 +2143,6 @@ pub fn do_test_dust_limit_fee_accounting(can_afford: bool) { let mut default_config = test_default_channel_config(); default_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; - default_config.manually_accept_inbound_channels = true; let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = diff --git a/lightning/src/ln/invoice_utils.rs b/lightning/src/ln/invoice_utils.rs index 195eb73595a..3eaa369c3f1 100644 --- a/lightning/src/ln/invoice_utils.rs +++ b/lightning/src/ln/invoice_utils.rs @@ -1052,7 +1052,7 @@ mod test { .create_channel(node_a_id, 1_000_000, 500_000_000, 42, None, Some(private_chan_cfg)) .unwrap(); let open_channel = get_event_msg!(nodes[2], MessageSendEvent::SendOpenChannel, node_a_id); - nodes[0].node.handle_open_channel(node_c_id, &open_channel); + handle_and_accept_open_channel(&nodes[0], node_c_id, &open_channel); let accept_channel = get_event_msg!(nodes[0], MessageSendEvent::SendAcceptChannel, node_c_id); nodes[2].node.handle_accept_channel(node_a_id, &accept_channel); @@ -1583,7 +1583,7 @@ mod test { .create_channel(node_d_id, 1_000_000, 500_000_000, 42, None, Some(private_chan_cfg)) .unwrap(); let open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, node_d_id); - nodes[3].node.handle_open_channel(nodes[1].node.get_our_node_id(), &open_channel); + handle_and_accept_open_channel(&nodes[3], node_b_id, &open_channel); let accept_channel = get_event_msg!(nodes[3], MessageSendEvent::SendAcceptChannel, node_b_id); nodes[1].node.handle_accept_channel(nodes[3].node.get_our_node_id(), &accept_channel); diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index c3266ae317f..8074f71be8e 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -319,10 +319,8 @@ fn do_chanmon_claim_value_coop_close(keyed_anchors: bool, p2a_anchor: bool) { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let mut user_config = test_default_channel_config(); user_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; - user_config.manually_accept_inbound_channels = true; user_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = keyed_anchors; user_config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = p2a_anchor; - user_config.manually_accept_inbound_channels = keyed_anchors || p2a_anchor; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(user_config.clone()), Some(user_config)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -473,7 +471,6 @@ fn do_test_claim_value_force_close(keyed_anchors: bool, p2a_anchor: bool, prev_c let mut user_config = test_default_channel_config(); user_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = keyed_anchors; user_config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = p2a_anchor; - user_config.manually_accept_inbound_channels = keyed_anchors || p2a_anchor; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(user_config.clone()), Some(user_config)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -872,7 +869,6 @@ fn do_test_balances_on_local_commitment_htlcs(keyed_anchors: bool, p2a_anchor: b let mut user_config = test_default_channel_config(); user_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = keyed_anchors; user_config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = p2a_anchor; - user_config.manually_accept_inbound_channels = keyed_anchors || p2a_anchor; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(user_config.clone()), Some(user_config)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -1383,7 +1379,6 @@ fn do_test_revoked_counterparty_commitment_balances(keyed_anchors: bool, p2a_anc let mut user_config = test_default_channel_config(); user_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = keyed_anchors; user_config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = p2a_anchor; - user_config.manually_accept_inbound_channels = keyed_anchors || p2a_anchor; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(user_config.clone()), Some(user_config)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -1687,7 +1682,6 @@ fn do_test_revoked_counterparty_htlc_tx_balances(keyed_anchors: bool, p2a_anchor let mut user_config = test_default_channel_config(); user_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = keyed_anchors; user_config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = p2a_anchor; - user_config.manually_accept_inbound_channels = keyed_anchors || p2a_anchor; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(user_config.clone()), Some(user_config)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -1977,7 +1971,6 @@ fn do_test_revoked_counterparty_aggregated_claims(keyed_anchors: bool, p2a_ancho let mut user_config = test_default_channel_config(); user_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = keyed_anchors; user_config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = p2a_anchor; - user_config.manually_accept_inbound_channels = keyed_anchors || p2a_anchor; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(user_config.clone()), Some(user_config)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -2254,7 +2247,6 @@ fn do_test_claimable_balance_correct_while_payment_pending(outbound_payment: boo let mut user_config = test_default_channel_config(); user_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = keyed_anchors; user_config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = p2a_anchor; - user_config.manually_accept_inbound_channels = keyed_anchors || p2a_anchor; let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(user_config.clone()), Some(user_config.clone()), Some(user_config)]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); @@ -2402,7 +2394,6 @@ fn do_test_monitor_rebroadcast_pending_claims(keyed_anchors: bool, p2a_anchor: b let mut config = test_default_channel_config(); config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = keyed_anchors; config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = p2a_anchor; - config.manually_accept_inbound_channels = keyed_anchors || p2a_anchor; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config.clone()), Some(config)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -2528,7 +2519,6 @@ fn do_test_yield_anchors_events(have_htlcs: bool, p2a_anchor: bool) { anchors_config.channel_handshake_config.announce_for_forwarding = true; anchors_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; anchors_config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = p2a_anchor; - anchors_config.manually_accept_inbound_channels = true; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(anchors_config.clone()), Some(anchors_config)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -2730,7 +2720,6 @@ fn do_test_anchors_aggregated_revoked_htlc_tx(p2a_anchor: bool) { let mut anchors_config = test_default_channel_config(); anchors_config.channel_handshake_config.announce_for_forwarding = true; - anchors_config.manually_accept_inbound_channels = true; anchors_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; anchors_config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = p2a_anchor; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(anchors_config.clone()), Some(anchors_config.clone())]); @@ -3037,7 +3026,6 @@ fn do_test_anchors_monitor_fixes_counterparty_payment_script_on_reload(confirm_c let chain_monitor; let mut user_config = test_default_channel_config(); user_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; - user_config.manually_accept_inbound_channels = true; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(user_config.clone()), Some(user_config.clone())]); let node_deserialized; let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -3128,7 +3116,6 @@ fn do_test_monitor_claims_with_random_signatures(keyed_anchors: bool, p2a_anchor let mut user_config = test_default_channel_config(); user_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = keyed_anchors; user_config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = p2a_anchor; - user_config.manually_accept_inbound_channels = keyed_anchors || p2a_anchor; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(user_config.clone()), Some(user_config)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -3419,7 +3406,6 @@ fn do_test_lost_preimage_monitor_events(on_counterparty_tx: bool, p2a_anchor: bo // Here we test that losing `MonitorEvent`s that contain HTLC resolution preimages does not // cause us to lose funds or miss a `PaymentSent` event. let mut cfg = test_default_channel_config(); - cfg.manually_accept_inbound_channels = true; cfg.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; cfg.channel_handshake_config.negotiate_anchor_zero_fee_commitments = p2a_anchor; let cfgs = [Some(cfg.clone()), Some(cfg.clone()), Some(cfg.clone())]; @@ -3601,7 +3587,6 @@ fn do_test_lost_timeout_monitor_events(confirm_tx: CommitmentType, dust_htlcs: b // Here we test that losing `MonitorEvent`s that contain HTLC resolution via timeouts does not // cause us to lose a `PaymentFailed` event. let mut cfg = test_default_channel_config(); - cfg.manually_accept_inbound_channels = true; cfg.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; cfg.channel_handshake_config.negotiate_anchor_zero_fee_commitments = p2a_anchor; let cfgs = [Some(cfg.clone()), Some(cfg.clone()), Some(cfg.clone())]; diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index f73c55fd6c6..e24ad48a7d9 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -1020,7 +1020,6 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let mut manually_accept_config = test_default_channel_config(); - manually_accept_config.manually_accept_inbound_channels = true; let persist_1; let chain_monitor_1; @@ -2210,7 +2209,6 @@ fn do_test_intercepted_payment(test: InterceptTest) { let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let mut zero_conf_chan_config = test_default_channel_config(); - zero_conf_chan_config.manually_accept_inbound_channels = true; let mut intercept_forwards_config = test_default_channel_config(); intercept_forwards_config.htlc_interception_flags = HTLCInterceptionFlags::ToInterceptSCIDs as u8; @@ -4959,7 +4957,6 @@ fn test_htlc_forward_considers_anchor_outputs_value() { // balance to dip below the reserve when considering the value of anchor outputs. let mut config = test_default_channel_config(); config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; - config.manually_accept_inbound_channels = true; config.channel_config.forwarding_fee_base_msat = 0; config.channel_config.forwarding_fee_proportional_millionths = 0; diff --git a/lightning/src/ln/priv_short_conf_tests.rs b/lightning/src/ln/priv_short_conf_tests.rs index 2035af15046..9d30d749aa2 100644 --- a/lightning/src/ln/priv_short_conf_tests.rs +++ b/lightning/src/ln/priv_short_conf_tests.rs @@ -448,7 +448,7 @@ fn test_scid_privacy_negotiation() { .as_ref() .unwrap() .supports_scid_privacy()); - nodes[1].node.handle_open_channel(node_a_id, &second_open_channel); + handle_and_accept_open_channel(&nodes[1], node_a_id, &second_open_channel); nodes[0].node.handle_accept_channel( node_b_id, &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id), @@ -501,7 +501,7 @@ fn test_inbound_scid_privacy() { assert!(open_channel.common_fields.channel_type.as_ref().unwrap().requires_scid_privacy()); - nodes[2].node.handle_open_channel(node_b_id, &open_channel); + handle_and_accept_open_channel(&nodes[2], node_b_id, &open_channel); let accept_channel = get_event_msg!(nodes[2], MessageSendEvent::SendAcceptChannel, node_b_id); nodes[1].node.handle_accept_channel(node_c_id, &accept_channel); @@ -781,7 +781,6 @@ fn test_simple_0conf_channel() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let mut chan_config = test_default_channel_config(); - chan_config.manually_accept_inbound_channels = true; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(chan_config)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -800,7 +799,6 @@ fn test_0conf_channel_with_async_monitor() { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let mut chan_config = test_default_channel_config(); - chan_config.manually_accept_inbound_channels = true; let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(chan_config.clone()), None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); @@ -997,7 +995,6 @@ fn test_0conf_close_no_early_chan_update() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let mut chan_config = test_default_channel_config(); - chan_config.manually_accept_inbound_channels = true; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(chan_config.clone())]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -1024,7 +1021,6 @@ fn test_public_0conf_channel() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let mut chan_config = test_default_channel_config(); - chan_config.manually_accept_inbound_channels = true; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(chan_config.clone())]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -1086,7 +1082,6 @@ fn test_0conf_channel_reorg() { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let mut chan_config = test_default_channel_config(); - chan_config.manually_accept_inbound_channels = true; let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, Some(chan_config.clone())]); @@ -1314,7 +1309,7 @@ fn test_zero_conf_accept_reject() { let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key(); channel_type_features.set_zero_conf_required(); - // 1. Check we reject zero conf channels by default + // Check we can accept zero conf channels via the right method let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -1322,6 +1317,7 @@ fn test_zero_conf_accept_reject() { let node_a_id = nodes[0].node.get_our_node_id(); let node_b_id = nodes[1].node.get_our_node_id(); + // 1. First try the non-0conf method to manually accept nodes[0].node.create_channel(node_b_id, 100000, 10001, 42, None, None).unwrap(); let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); @@ -1330,41 +1326,6 @@ fn test_zero_conf_accept_reject() { nodes[1].node.handle_open_channel(node_a_id, &open_channel_msg); - let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); - match msg_events[0] { - MessageSendEvent::HandleError { - action: ErrorAction::SendErrorMessage { ref msg, .. }, - .. - } => { - assert_eq!(msg.data, "No zero confirmation channels accepted".to_owned()); - }, - _ => panic!(), - } - - // 2. Check we can manually accept zero conf channels via the right method - let mut manually_accept_conf = UserConfig::default(); - manually_accept_conf.manually_accept_inbound_channels = true; - - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = - create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let node_a_id = nodes[0].node.get_our_node_id(); - let node_b_id = nodes[1].node.get_our_node_id(); - - // 2.1 First try the non-0conf method to manually accept - nodes[0] - .node - .create_channel(node_b_id, 100000, 10001, 42, None, Some(manually_accept_conf.clone())) - .unwrap(); - let mut open_channel_msg = - get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); - - open_channel_msg.common_fields.channel_type = Some(channel_type_features.clone()); - - nodes[1].node.handle_open_channel(node_a_id, &open_channel_msg); - // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in the `msg_events`. assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1392,11 +1353,8 @@ fn test_zero_conf_accept_reject() { _ => panic!(), } - // 2.2 Try again with the 0conf method to manually accept - nodes[0] - .node - .create_channel(node_b_id, 100000, 10001, 42, None, Some(manually_accept_conf)) - .unwrap(); + // 2. Try again with the 0conf method to manually accept + nodes[0].node.create_channel(node_b_id, 100000, 10001, 42, None, None).unwrap(); let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); @@ -1439,7 +1397,6 @@ fn test_connect_before_funding() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let mut manually_accept_conf = test_default_channel_config(); - manually_accept_conf.manually_accept_inbound_channels = true; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -1492,7 +1449,6 @@ fn test_0conf_ann_sigs_racing_conf() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let mut chan_config = test_default_channel_config(); - chan_config.manually_accept_inbound_channels = true; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(chan_config.clone())]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index a8206dfe850..7c66d753590 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -254,7 +254,7 @@ fn test_manager_serialize_deserialize_events() { let node_a = nodes.remove(0); let node_b = nodes.remove(0); node_a.node.create_channel(node_b.node.get_our_node_id(), channel_value, push_msat, 42, None, None).unwrap(); - node_b.node.handle_open_channel(node_a.node.get_our_node_id(), &get_event_msg!(node_a, MessageSendEvent::SendOpenChannel, node_b.node.get_our_node_id())); + handle_and_accept_open_channel(&node_b, node_a.node.get_our_node_id(), &get_event_msg!(node_a, MessageSendEvent::SendOpenChannel, node_b.node.get_our_node_id())); node_a.node.handle_accept_channel(node_b.node.get_our_node_id(), &get_event_msg!(node_b, MessageSendEvent::SendAcceptChannel, node_a.node.get_our_node_id())); let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&node_a, &node_b.node.get_our_node_id(), channel_value, 42); diff --git a/lightning/src/ln/reorg_tests.rs b/lightning/src/ln/reorg_tests.rs index b56caf96008..4c1bdd72f2e 100644 --- a/lightning/src/ln/reorg_tests.rs +++ b/lightning/src/ln/reorg_tests.rs @@ -316,6 +316,14 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_ check_added_monitors(&nodes[0], 1); } + let expected_err = "Funding transaction was un-confirmed, originally locked at 6 confs."; + if reload_node && !reorg_after_reload { + handle_announce_close_broadcast_events(&nodes, 0, 1, true, "Channel closed because of an exception: Funding transaction was un-confirmed, originally locked at 6 confs."); + check_added_monitors(&nodes[1], 1); + let reason = ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Channel closed because of an exception: {}", expected_err)) }; + check_closed_event(&nodes[1], 1, reason, &[nodes[0].node.get_our_node_id()], 100000); + } + if reload_node { // Since we currently have a background event pending, it's good to test that we survive a // serialization roundtrip. Further, this tests the somewhat awkward edge-case of dropping @@ -386,7 +394,6 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_ assert_eq!(txn.len(), 1); } - let expected_err = "Funding transaction was un-confirmed, originally locked at 6 confs."; if reorg_after_reload || !reload_node { handle_announce_close_broadcast_events(&nodes, 0, 1, true, "Channel closed because of an exception: Funding transaction was un-confirmed, originally locked at 6 confs."); check_added_monitors(&nodes[1], 1); @@ -404,7 +411,11 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_ nodes[0].node.peer_connected(nodes[1].node.get_our_node_id(), &Init { features: nodes[1].node.init_features(), networks: None, remote_network_address: None }, true).unwrap(); + nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); } + create_announced_chan_between_nodes(&nodes, 0, 1); send_payment(&nodes[0], &[&nodes[1]], 8000000); } @@ -828,7 +839,6 @@ fn do_test_retries_own_commitment_broadcast_after_reorg(keyed_anchors: bool, p2a let mut config = test_default_channel_config(); config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = keyed_anchors; config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = p2a_anchor; - config.manually_accept_inbound_channels = keyed_anchors || p2a_anchor; let persister; let new_chain_monitor; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config.clone()), Some(config.clone())]); @@ -985,7 +995,6 @@ fn do_test_split_htlc_expiry_tracking(use_third_htlc: bool, reorg_out: bool, p2a // This test relies on being able to consolidate HTLC claims into a single transaction, which // requires anchors: let mut config = test_default_channel_config(); - config.manually_accept_inbound_channels = true; config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = p2a_anchor; diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index 982dc788f60..74ffe3cbfd3 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -992,6 +992,17 @@ fn test_unsupported_anysegwit_upfront_shutdown_script() { let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); open_channel.common_fields.shutdown_scriptpubkey = Some(anysegwit_shutdown_script.clone()); nodes[1].node.handle_open_channel(node_a_id, &open_channel); + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match &events[0] { + Event::OpenChannelRequest { temporary_channel_id, counterparty_node_id, .. } => { + assert!(nodes[1] + .node + .accept_inbound_channel(temporary_channel_id, counterparty_node_id, 42, None,) + .is_err()); + }, + _ => panic!("Unexpected event"), + }; let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -1020,7 +1031,8 @@ fn test_unsupported_anysegwit_upfront_shutdown_script() { // Check script when handling an accept_channel message nodes[0].node.create_channel(node_b_id, 100000, 10001, 42, None, None).unwrap(); let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); - nodes[1].node.handle_open_channel(node_a_id, &open_channel); + handle_and_accept_open_channel(&nodes[1], node_a_id, &open_channel); + let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); accept_channel.common_fields.shutdown_scriptpubkey = Some(anysegwit_shutdown_script.clone()); @@ -1058,6 +1070,17 @@ fn test_invalid_upfront_shutdown_script() { open_channel.common_fields.shutdown_scriptpubkey = Some(Builder::new().push_int(0).push_slice(&[0, 0]).into_script()); nodes[1].node.handle_open_channel(node_a_id, &open_channel); + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match &events[0] { + Event::OpenChannelRequest { temporary_channel_id, counterparty_node_id, .. } => { + assert!(nodes[1] + .node + .accept_inbound_channel(temporary_channel_id, counterparty_node_id, 42, None,) + .is_err()); + }, + _ => panic!("Unexpected event"), + }; let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); diff --git a/lightning/src/ln/splicing_tests.rs b/lightning/src/ln/splicing_tests.rs index cf93c6243c4..618271df972 100644 --- a/lightning/src/ln/splicing_tests.rs +++ b/lightning/src/ln/splicing_tests.rs @@ -1530,7 +1530,6 @@ fn do_test_propose_splice_while_disconnected(reload: bool, use_0conf: bool) { let (chain_monitor_0a, chain_monitor_0b, chain_monitor_1a, chain_monitor_1b); let mut config = test_default_channel_config(); if use_0conf { - config.manually_accept_inbound_channels = true; config.channel_handshake_limits.trust_own_funding_0conf = true; } let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config.clone()), Some(config)]); diff --git a/lightning/src/ln/update_fee_tests.rs b/lightning/src/ln/update_fee_tests.rs index 67a07325ad6..69890fb001c 100644 --- a/lightning/src/ln/update_fee_tests.rs +++ b/lightning/src/ln/update_fee_tests.rs @@ -388,8 +388,6 @@ pub fn do_test_update_fee_that_funder_cannot_afford(channel_type_features: Chann let mut default_config = test_default_channel_config(); if channel_type_features == ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies() { default_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; - // this setting is also needed to create an anchor channel - default_config.manually_accept_inbound_channels = true; } let node_chanmgrs = create_node_chanmgrs( @@ -898,6 +896,17 @@ pub fn test_chan_init_feerate_unaffordability() { get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); open_channel_msg.push_msat += 1; nodes[1].node.handle_open_channel(node_a_id, &open_channel_msg); + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match &events[0] { + Event::OpenChannelRequest { temporary_channel_id, counterparty_node_id, .. } => { + assert!(nodes[1] + .node + .accept_inbound_channel(temporary_channel_id, counterparty_node_id, 42, None,) + .is_err()); + }, + _ => panic!("Unexpected event"), + } let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); @@ -1029,7 +1038,6 @@ pub fn do_cannot_afford_on_holding_cell_release( 100; if channel_type_features.supports_anchors_zero_fee_htlc_tx() { default_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; - default_config.manually_accept_inbound_channels = true; } let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); @@ -1372,7 +1380,6 @@ pub fn test_zero_fee_commitments_no_update_fee() { // they'll disconnect and warn if they receive them. let mut cfg = test_default_channel_config(); cfg.channel_handshake_config.negotiate_anchor_zero_fee_commitments = true; - cfg.manually_accept_inbound_channels = true; let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); diff --git a/lightning/src/ln/zero_fee_commitment_tests.rs b/lightning/src/ln/zero_fee_commitment_tests.rs index 2503ad81cde..d287b6e3de1 100644 --- a/lightning/src/ln/zero_fee_commitment_tests.rs +++ b/lightning/src/ln/zero_fee_commitment_tests.rs @@ -18,7 +18,6 @@ fn test_p2a_anchor_values_under_trims_and_rounds() { let mut user_cfg = test_default_channel_config(); user_cfg.channel_handshake_config.our_htlc_minimum_msat = 1; user_cfg.channel_handshake_config.negotiate_anchor_zero_fee_commitments = true; - user_cfg.manually_accept_inbound_channels = true; let configs = [Some(user_cfg.clone()), Some(user_cfg)]; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &configs); @@ -125,7 +124,6 @@ fn test_htlc_claim_chunking() { user_cfg.channel_handshake_config.our_htlc_minimum_msat = 1; user_cfg.channel_handshake_config.negotiate_anchor_zero_fee_commitments = true; user_cfg.channel_handshake_config.our_max_accepted_htlcs = 114; - user_cfg.manually_accept_inbound_channels = true; let configs = [Some(user_cfg.clone()), Some(user_cfg)]; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &configs); @@ -314,7 +312,6 @@ fn test_anchor_tx_too_big() { user_cfg.channel_handshake_config.our_htlc_minimum_msat = 1; user_cfg.channel_handshake_config.negotiate_anchor_zero_fee_commitments = true; user_cfg.channel_handshake_config.our_max_accepted_htlcs = 114; - user_cfg.manually_accept_inbound_channels = true; let configs = [Some(user_cfg.clone()), Some(user_cfg)]; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &configs); diff --git a/lightning/src/util/config.rs b/lightning/src/util/config.rs index feb326cfad6..1dec7bdfc5f 100644 --- a/lightning/src/util/config.rs +++ b/lightning/src/util/config.rs @@ -30,14 +30,14 @@ pub struct ChannelHandshakeConfig { /// both parties have exchanged `splice_locked`. /// /// A lower-bound of `1` is applied, requiring all channels to have a confirmed commitment - /// transaction before operation. If you wish to accept channels with zero confirmations, see - /// [`UserConfig::manually_accept_inbound_channels`] and + /// transaction before operation. If you wish to accept channels with zero confirmations, + /// manually accept them via [`Event::OpenChannelRequest`] using /// [`ChannelManager::accept_inbound_channel_from_trusted_peer_0conf`]. /// /// Default value: `6` /// - /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel /// [`ChannelManager::accept_inbound_channel_from_trusted_peer_0conf`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel_from_trusted_peer_0conf + /// [`Event::OpenChannelRequest`]: crate::events::Event::OpenChannelRequest pub minimum_depth: u32, /// Set to the number of blocks we require our counterparty to wait to claim their money (ie /// the number of blocks we have to punish our counterparty if they broadcast a revoked @@ -162,15 +162,13 @@ pub struct ChannelHandshakeConfig { /// will be treated as one million instead, although channel negotiations will /// fail in that case.) pub their_channel_reserve_proportional_millionths: u32, - /// If set, we attempt to negotiate the `anchors_zero_fee_htlc_tx`option for all future + /// If set, we attempt to negotiate the `anchors_zero_fee_htlc_tx` option for all future /// channels. This feature requires having a reserve of onchain funds readily available to bump /// transactions in the event of a channel force close to avoid the possibility of losing funds. /// - /// Note that if you wish accept inbound channels with anchor outputs, you must enable - /// [`UserConfig::manually_accept_inbound_channels`] and manually accept them with - /// [`ChannelManager::accept_inbound_channel`]. This is done to give you the chance to check - /// whether your reserve of onchain funds is enough to cover the fees for all existing and new - /// channels featuring anchor outputs in the event of a force close. + /// Upon receiving an [`Event::OpenChannelRequest`] for a channel of this type, you must + /// check whether your reserve of onchain funds is enough to cover the fees for all existing + /// and new channels featuring anchor outputs in the event of a force close. /// /// If this option is set, channels may be created that will not be readable by LDK versions /// prior to 0.0.116, causing [`ChannelManager`]'s read method to return a @@ -185,6 +183,7 @@ pub struct ChannelHandshakeConfig { /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel /// [`DecodeError::InvalidValue`]: crate::ln::msgs::DecodeError::InvalidValue + /// [`Event::OpenChannelRequest`]: crate::events::Event::OpenChannelRequest pub negotiate_anchors_zero_fee_htlc_tx: bool, /// If set, we attempt to negotiate the `zero_fee_commitments` option for all future channels. @@ -198,11 +197,9 @@ pub struct ChannelHandshakeConfig { /// funds readily available to bump transactions in the event of a channel force close to avoid /// the possibility of losing funds. /// - /// Note that if you wish accept inbound channels with anchor outputs, you must enable - /// [`UserConfig::manually_accept_inbound_channels`] and manually accept them with - /// [`ChannelManager::accept_inbound_channel`]. This is done to give you the chance to check - /// whether your reserve of onchain funds is enough to cover the fees for all existing and new - /// channels featuring anchor outputs in the event of a force close. + /// Upon receiving an [`Event::OpenChannelRequest`] for a channel of this type, you must + /// check whether your reserve of onchain funds is enough to cover the fees for all existing + /// and new channels featuring anchor outputs in the event of a force close. /// /// If this option is set, channels may be created that will not be readable by LDK versions /// prior to 0.2, causing [`ChannelManager`]'s read method to return a @@ -224,6 +221,7 @@ pub struct ChannelHandshakeConfig { /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel /// [`DecodeError::InvalidValue`]: crate::ln::msgs::DecodeError::InvalidValue + /// [`Event::OpenChannelRequest`]: crate::events::Event::OpenChannelRequest pub negotiate_anchor_zero_fee_commitments: bool, /// The maximum number of HTLCs in-flight from our counterparty towards us at the same time. @@ -998,20 +996,6 @@ pub struct UserConfig { /// /// Default value: `true` pub accept_inbound_channels: bool, - /// If this is set to `true`, the user needs to manually accept inbound requests to open a new - /// channel. - /// - /// When set to `true`, [`Event::OpenChannelRequest`] will be triggered once a request to open a - /// new inbound channel is received through a [`msgs::OpenChannel`] message. In that case, a - /// [`msgs::AcceptChannel`] message will not be sent back to the counterparty node unless the - /// user explicitly chooses to accept the request. - /// - /// Default value: `false` - /// - /// [`Event::OpenChannelRequest`]: crate::events::Event::OpenChannelRequest - /// [`msgs::OpenChannel`]: crate::ln::msgs::OpenChannel - /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel - pub manually_accept_inbound_channels: bool, /// Flags consisting of OR'd values from [`HTLCInterceptionFlags`] which describe HTLCs /// forwarded over this node to intercept. Any HTLCs which are intercepted will generate an /// [`Event::HTLCIntercepted`] event which must be handled to forward or fail the HTLC. @@ -1092,7 +1076,6 @@ impl Default for UserConfig { channel_config: ChannelConfig::default(), accept_forwards_to_priv_channels: false, accept_inbound_channels: true, - manually_accept_inbound_channels: false, htlc_interception_flags: 0, manually_handle_bolt12_invoices: false, enable_dual_funded_channels: false, @@ -1115,7 +1098,6 @@ impl Readable for UserConfig { channel_config: Readable::read(reader)?, accept_forwards_to_priv_channels: Readable::read(reader)?, accept_inbound_channels: Readable::read(reader)?, - manually_accept_inbound_channels: Readable::read(reader)?, htlc_interception_flags: Readable::read(reader)?, manually_handle_bolt12_invoices: Readable::read(reader)?, enable_dual_funded_channels: Readable::read(reader)?, From aebcd1f72501f4185c86b63dc46c2970f0158fa6 Mon Sep 17 00:00:00 2001 From: elnosh Date: Wed, 28 Jan 2026 15:15:45 -0500 Subject: [PATCH 189/242] Default to anchor channels Set `negotiate_anchors_zero_fee_htlc_tx` default to true. --- fuzz/src/chanmon_consistency.rs | 8 ++++---- lightning-background-processor/src/lib.rs | 4 +++- lightning/src/ln/channel.rs | 6 ++++-- lightning/src/ln/channel_open_tests.rs | 8 +++----- lightning/src/ln/channel_type_tests.rs | 8 ++++++-- lightning/src/ln/channelmanager.rs | 4 ++-- lightning/src/ln/functional_test_utils.rs | 1 + lightning/src/util/config.rs | 4 ++-- ...7-manual-channel-accept-default-anchors.txt | 18 ++++++++++++++++++ 9 files changed, 43 insertions(+), 18 deletions(-) create mode 100644 pending_changelog/4337-manual-channel-accept-default-anchors.txt diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 30b95c2095f..530f90efb3a 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -710,8 +710,8 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { config.channel_config.forwarding_fee_proportional_millionths = 0; config.channel_handshake_config.announce_for_forwarding = true; config.reject_inbound_splices = false; - if anchors { - config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; + if !anchors { + config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = false; } let network = Network::Bitcoin; let best_block_timestamp = genesis_block(network).header.time; @@ -760,8 +760,8 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { config.channel_config.forwarding_fee_proportional_millionths = 0; config.channel_handshake_config.announce_for_forwarding = true; config.reject_inbound_splices = false; - if anchors { - config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; + if !anchors { + config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = false; } let mut monitors = new_hash_map(); diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index 659e28114e7..94be01420ad 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -2447,6 +2447,8 @@ mod tests { )); let best_block = BestBlock::from_network(network); let params = ChainParameters { network, best_block }; + let mut config = UserConfig::default(); + config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = false; let manager = Arc::new(ChannelManager::new( Arc::clone(&fee_estimator), Arc::clone(&chain_monitor), @@ -2457,7 +2459,7 @@ mod tests { Arc::clone(&keys_manager), Arc::clone(&keys_manager), Arc::clone(&keys_manager), - UserConfig::default(), + config, params, genesis_block.header.time, )); diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index be86d329efc..bcfd6fd9368 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -16026,7 +16026,8 @@ mod tests { // Create Node A's channel pointing to Node B's pubkey let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); - let config = UserConfig::default(); + let mut config = UserConfig::default(); + config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = false; let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None, &logger).unwrap(); // Create Node B's channel by receiving Node A's open_channel message @@ -16116,7 +16117,8 @@ mod tests { let logger = TestLogger::new(); let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); - let config = UserConfig::default(); + let mut config = UserConfig::default(); + config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = false; let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None, &logger).unwrap(); let commitment_tx_fee_0_htlcs = commit_tx_fee_sat(chan.context.feerate_per_kw, 0, chan.funding.get_channel_type()) * 1000; diff --git a/lightning/src/ln/channel_open_tests.rs b/lightning/src/ln/channel_open_tests.rs index 7c2a51ea360..f1336a0e654 100644 --- a/lightning/src/ln/channel_open_tests.rs +++ b/lightning/src/ln/channel_open_tests.rs @@ -172,8 +172,7 @@ fn test_0conf_limiting() { #[test] fn test_inbound_anchors_manual_acceptance() { - let mut anchors_cfg = test_default_channel_config(); - anchors_cfg.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; + let anchors_cfg = test_default_anchors_channel_config(); do_test_manual_inbound_accept_with_override(anchors_cfg, None); } @@ -191,9 +190,7 @@ fn test_inbound_anchors_config_overridden() { update_overrides: None, }; - let mut anchors_cfg = test_default_channel_config(); - anchors_cfg.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; - + let mut anchors_cfg = test_default_anchors_channel_config(); let accept_message = do_test_manual_inbound_accept_with_override(anchors_cfg, Some(overrides)); assert_eq!(accept_message.common_fields.max_htlc_value_in_flight_msat, 5_000_000); assert_eq!(accept_message.common_fields.htlc_minimum_msat, 1_000); @@ -1066,6 +1063,7 @@ pub fn test_user_configurable_csv_delay() { pub fn test_accept_inbound_channel_config_override() { let mut conf = UserConfig::default(); conf.channel_handshake_config.minimum_depth = 1; + conf.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = false; let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); diff --git a/lightning/src/ln/channel_type_tests.rs b/lightning/src/ln/channel_type_tests.rs index 13470d50614..2b069a6d314 100644 --- a/lightning/src/ln/channel_type_tests.rs +++ b/lightning/src/ln/channel_type_tests.rs @@ -34,8 +34,10 @@ fn test_option_anchors_zero_fee_initial() { let mut expected_type = ChannelTypeFeatures::only_static_remote_key(); expected_type.set_anchors_zero_fee_htlc_tx_required(); + let mut start_cfg = UserConfig::default(); + start_cfg.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = false; do_test_get_initial_channel_type( - UserConfig::default(), + start_cfg, InitFeatures::empty(), ChannelTypeFeatures::only_static_remote_key(), |cfg: &mut UserConfig| { @@ -225,13 +227,15 @@ fn do_test_supports_channel_type(config: UserConfig, expected_channel_type: Chan let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap()); + let mut non_anchors_config = UserConfig::default(); + non_anchors_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = false; // Assert that we get `static_remotekey` when no custom config is negotiated. let channel_a = OutboundV1Channel::<&TestKeysInterface>::new( &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, - &channelmanager::provided_init_features(&UserConfig::default()), + &channelmanager::provided_init_features(&non_anchors_config), 10000000, 100000, 42, diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index f38245b885a..a160907931c 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -2028,7 +2028,7 @@ impl< /// /// ## Opening Channels /// -/// To an open a channel with a peer, call [`create_channel`]. This will initiate the process of +/// To open a channel with a peer, call [`create_channel`]. This will initiate the process of /// opening an outbound channel, which requires self-funding when handling /// [`Event::FundingGenerationReady`]. /// @@ -5344,7 +5344,7 @@ impl< /// using [`ChannelMonitorUpdateStatus::InProgress`]), the payment may be lost on restart. See /// [`ChannelManager::list_recent_payments`] for more information. /// - /// Routes are automatically found using the [`Router] provided on startup. To fix a route for a + /// Routes are automatically found using the [`Router`] provided on startup. To fix a route for a /// particular payment, use [`Self::send_payment_with_route`] or match the [`PaymentId`] passed to /// [`Router::find_route_with_id`]. /// diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 165c08f9332..6ed6f5e39fa 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -4534,6 +4534,7 @@ pub fn create_node_cfgs_with_node_id_message_router<'a>( pub fn test_default_channel_config() -> UserConfig { let mut default_config = UserConfig::default(); + default_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = false; // Set cltv_expiry_delta slightly lower to keep the final CLTV values inside one byte in our // tests so that our script-length checks don't fail (see ACCEPTED_HTLC_SCRIPT_WEIGHT). default_config.channel_config.cltv_expiry_delta = MIN_CLTV_EXPIRY_DELTA; diff --git a/lightning/src/util/config.rs b/lightning/src/util/config.rs index 1dec7bdfc5f..420fad6b1e0 100644 --- a/lightning/src/util/config.rs +++ b/lightning/src/util/config.rs @@ -178,7 +178,7 @@ pub struct ChannelHandshakeConfig { /// counterparties that do not support the `anchors_zero_fee_htlc_tx` option; we will simply /// fall back to a `static_remote_key` channel. /// - /// Default value: `false` (This value is likely to change to `true` in the future.) + /// Default value: `true` /// /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel @@ -252,7 +252,7 @@ impl Default for ChannelHandshakeConfig { announce_for_forwarding: false, commit_upfront_shutdown_pubkey: true, their_channel_reserve_proportional_millionths: 10_000, - negotiate_anchors_zero_fee_htlc_tx: false, + negotiate_anchors_zero_fee_htlc_tx: true, negotiate_anchor_zero_fee_commitments: false, our_max_accepted_htlcs: 50, } diff --git a/pending_changelog/4337-manual-channel-accept-default-anchors.txt b/pending_changelog/4337-manual-channel-accept-default-anchors.txt new file mode 100644 index 00000000000..999b2490939 --- /dev/null +++ b/pending_changelog/4337-manual-channel-accept-default-anchors.txt @@ -0,0 +1,18 @@ +# API Updates + + * `ChannelHandshakeConfig::negotiate_anchors_zero_fee_htlc_tx` + now defaults to `true` (previously `false`). This means anchor output channels + will be negotiated by default for all new channels if the counterparty supports + it, requiring users to maintain an on-chain reserve for fee bumping in the + event of force-closes. + + * All inbound channels now require manual acceptance. + `UserConfig::manually_accept_inbound_channels` has been removed, and + `Event::OpenChannelRequest` will now always be generated for inbound channel + requests. Users must handle this event and call either + `ChannelManager::accept_inbound_channel` (or + `accept_inbound_channel_from_trusted_peer_0conf` for zero-conf channels) to + accept the channel, or `ChannelManager::force_close_broadcasting_latest_txn` + to reject it. This ensures users can verify they have sufficient on-chain + funds before accepting channels with anchor outputs. + From 306eea72eb5c6c031a3748534aa45ebcb6cc9cc2 Mon Sep 17 00:00:00 2001 From: elnosh Date: Tue, 3 Feb 2026 21:26:54 -0500 Subject: [PATCH 190/242] Rename test default channel config Now that anchor channels are the default, rename `test_default_anchors_channel_config` to `test_default_channel_config` and the previous default to legacy. --- lightning-persister/src/test_utils.rs | 4 +- lightning/src/chain/chainmonitor.rs | 7 +- lightning/src/chain/channelmonitor.rs | 3 +- lightning/src/ln/chanmon_update_fail_tests.rs | 17 +- lightning/src/ln/channel_open_tests.rs | 18 +- lightning/src/ln/channelmanager.rs | 2 +- lightning/src/ln/functional_test_utils.rs | 6 +- lightning/src/ln/functional_tests.rs | 221 ++++++++++++++---- lightning/src/ln/htlc_reserve_unit_tests.rs | 39 ++-- lightning/src/ln/invoice_utils.rs | 7 +- lightning/src/ln/monitor_tests.rs | 11 +- lightning/src/ln/payment_tests.rs | 44 +++- lightning/src/ln/reload_tests.rs | 19 +- lightning/src/ln/reorg_tests.rs | 18 +- lightning/src/ln/shutdown_tests.rs | 3 +- lightning/src/ln/splicing_tests.rs | 12 +- lightning/src/ln/update_fee_tests.rs | 40 ++-- lightning/src/util/persist.rs | 4 +- 18 files changed, 329 insertions(+), 146 deletions(-) diff --git a/lightning-persister/src/test_utils.rs b/lightning-persister/src/test_utils.rs index 55208c61491..48b383ad1ea 100644 --- a/lightning-persister/src/test_utils.rs +++ b/lightning-persister/src/test_utils.rs @@ -132,7 +132,9 @@ pub(crate) fn do_test_store(store_0: &K, store_1: &K) { ); node_cfgs[0].chain_monitor = chain_mon_0; node_cfgs[1].chain_monitor = chain_mon_1; - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_b_id = nodes[1].node.get_our_node_id(); diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index 17693f8ca7a..7db1b697c2b 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -1687,7 +1687,12 @@ mod tests { fn test_chainsync_triggers_distributed_monitor_persistence() { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs( + 3, + &node_cfgs, + &[Some(legacy_cfg.clone()), Some(legacy_cfg.clone()), Some(legacy_cfg)], + ); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index c7dd579967a..3e1138c6470 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -6808,7 +6808,8 @@ mod tests { // updates is handled correctly in such conditions. let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); let channel = create_announced_chan_between_nodes(&nodes, 0, 1); create_announced_chan_between_nodes(&nodes, 1, 2); diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 6cf7bcecebc..4475def5427 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -3779,7 +3779,12 @@ fn do_test_durable_preimages_on_closed_channel( let chain_mon; let node_b_reload; - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs( + 3, + &node_cfgs, + &[Some(legacy_cfg.clone()), Some(legacy_cfg.clone()), None], + ); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -3974,7 +3979,8 @@ fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) { let chain_mon; let node_b_reload; - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(legacy_cfg), None, None]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); let node_b_id = nodes[1].node.get_our_node_id(); @@ -4462,7 +4468,9 @@ fn test_claim_to_closed_channel_blocks_forwarded_preimage_removal() { // This tests that behavior. let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(3, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg), None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -4543,7 +4551,8 @@ fn test_claim_to_closed_channel_blocks_claimed_event() { // This tests that behavior. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg), None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); diff --git a/lightning/src/ln/channel_open_tests.rs b/lightning/src/ln/channel_open_tests.rs index f1336a0e654..30276cadadb 100644 --- a/lightning/src/ln/channel_open_tests.rs +++ b/lightning/src/ln/channel_open_tests.rs @@ -172,7 +172,7 @@ fn test_0conf_limiting() { #[test] fn test_inbound_anchors_manual_acceptance() { - let anchors_cfg = test_default_anchors_channel_config(); + let anchors_cfg = test_default_channel_config(); do_test_manual_inbound_accept_with_override(anchors_cfg, None); } @@ -190,7 +190,7 @@ fn test_inbound_anchors_config_overridden() { update_overrides: None, }; - let mut anchors_cfg = test_default_anchors_channel_config(); + let mut anchors_cfg = test_default_channel_config(); let accept_message = do_test_manual_inbound_accept_with_override(anchors_cfg, Some(overrides)); assert_eq!(accept_message.common_fields.max_htlc_value_in_flight_msat, 5_000_000); assert_eq!(accept_message.common_fields.htlc_minimum_msat, 1_000); @@ -306,9 +306,8 @@ fn test_zero_fee_commitments_downgrade_to_static_remote() { // are supported (but not accepted), but not legacy anchors. let mut initiator_cfg = test_default_channel_config(); initiator_cfg.channel_handshake_config.negotiate_anchor_zero_fee_commitments = true; - initiator_cfg.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; - let mut receiver_cfg = test_default_channel_config(); + let mut receiver_cfg = test_legacy_channel_config(); receiver_cfg.channel_handshake_config.negotiate_anchor_zero_fee_commitments = true; let start_type = ChannelTypeFeatures::anchors_zero_fee_commitments(); @@ -367,9 +366,8 @@ fn do_test_channel_type_downgrade( fn test_no_channel_downgrade() { // Tests that the local node will not retry when a `option_static_remote` channel is // rejected by a peer that advertises support for the feature. - let initiator_cfg = test_default_channel_config(); + let initiator_cfg = test_legacy_channel_config(); let mut receiver_cfg = test_default_channel_config(); - receiver_cfg.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); @@ -459,11 +457,11 @@ fn test_channel_resumption_fail_post_funding() { pub fn test_insane_channel_opens() { // Stand up a network of 2 nodes use crate::ln::channel::TOTAL_BITCOIN_SUPPLY_SATOSHIS; - let mut cfg = UserConfig::default(); - cfg.channel_handshake_limits.max_funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1; + let mut legacy_cfg = test_legacy_channel_config(); + legacy_cfg.channel_handshake_limits.max_funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1; let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(cfg.clone())]); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(legacy_cfg.clone())]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -473,7 +471,7 @@ pub fn test_insane_channel_opens() { // funding satoshis let channel_value_sat = 31337; // same as funding satoshis let channel_reserve_satoshis = - get_holder_selected_channel_reserve_satoshis(channel_value_sat, &cfg); + get_holder_selected_channel_reserve_satoshis(channel_value_sat, &legacy_cfg); let push_msat = (channel_value_sat - channel_reserve_satoshis) * 1000; // Have node0 initiate a channel to node1 with aforementioned parameters diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index a160907931c..b6c45856af1 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -20398,7 +20398,7 @@ mod tests { fn test_trigger_lnd_force_close() { let chanmon_cfg = create_chanmon_cfgs(2); let node_cfg = create_node_cfgs(2, &chanmon_cfg); - let user_config = test_default_channel_config(); + let user_config = test_legacy_channel_config(); let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[Some(user_config.clone()), Some(user_config)]); let nodes = create_network(2, &node_cfg, &node_chanmgr); let message = "Channel force-closed".to_owned(); diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 6ed6f5e39fa..2560e77a2ac 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -4532,7 +4532,7 @@ pub fn create_node_cfgs_with_node_id_message_router<'a>( ) } -pub fn test_default_channel_config() -> UserConfig { +pub fn test_legacy_channel_config() -> UserConfig { let mut default_config = UserConfig::default(); default_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = false; // Set cltv_expiry_delta slightly lower to keep the final CLTV values inside one byte in our @@ -4552,8 +4552,8 @@ pub fn test_default_channel_config() -> UserConfig { default_config } -pub fn test_default_anchors_channel_config() -> UserConfig { - let mut config = test_default_channel_config(); +pub fn test_default_channel_config() -> UserConfig { + let mut config = test_legacy_channel_config(); config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; config } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 990f1d530bf..1db31a51fd9 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -272,7 +272,9 @@ pub fn test_duplicate_htlc_different_direction_onchain() { // in opposite directions, even with the same payment secret. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_b_id = nodes[1].node.get_our_node_id(); @@ -399,7 +401,9 @@ pub fn test_duplicate_htlc_different_direction_onchain() { pub fn test_inbound_outbound_capacity_is_not_zero() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); @@ -437,7 +441,12 @@ fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBac // just before the upstream timeout expires let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs( + 3, + &node_cfgs, + &[Some(legacy_cfg.clone()), Some(legacy_cfg.clone()), Some(legacy_cfg)], + ); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); for node in nodes.iter() { @@ -563,7 +572,18 @@ pub fn channel_monitor_network_test() { // tests that ChannelMonitor is able to recover from various states. let chanmon_cfgs = create_chanmon_cfgs(5); let node_cfgs = create_node_cfgs(5, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(5, &node_cfgs, &[None, None, None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs( + 5, + &node_cfgs, + &[ + Some(legacy_cfg.clone()), + Some(legacy_cfg.clone()), + Some(legacy_cfg.clone()), + Some(legacy_cfg.clone()), + Some(legacy_cfg), + ], + ); let nodes = create_network(5, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -796,11 +816,11 @@ pub fn channel_monitor_network_test() { #[xtest(feature = "_externalize_tests")] pub fn test_justice_tx_htlc_timeout() { // Test justice txn built on revoked HTLC-Timeout tx, against both sides - let mut alice_config = test_default_channel_config(); + let mut alice_config = test_legacy_channel_config(); alice_config.channel_handshake_config.announce_for_forwarding = true; alice_config.channel_handshake_limits.force_announced_channel_preference = false; alice_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 5; - let mut bob_config = test_default_channel_config(); + let mut bob_config = test_legacy_channel_config(); bob_config.channel_handshake_config.announce_for_forwarding = true; bob_config.channel_handshake_limits.force_announced_channel_preference = false; bob_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 3; @@ -884,11 +904,11 @@ pub fn test_justice_tx_htlc_timeout() { #[xtest(feature = "_externalize_tests")] pub fn test_justice_tx_htlc_success() { // Test justice txn built on revoked HTLC-Success tx, against both sides - let mut alice_config = test_default_channel_config(); + let mut alice_config = test_legacy_channel_config(); alice_config.channel_handshake_config.announce_for_forwarding = true; alice_config.channel_handshake_limits.force_announced_channel_preference = false; alice_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 5; - let mut bob_config = test_default_channel_config(); + let mut bob_config = test_legacy_channel_config(); bob_config.channel_handshake_config.announce_for_forwarding = true; bob_config.channel_handshake_limits.force_announced_channel_preference = false; bob_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 3; @@ -961,7 +981,9 @@ pub fn revoked_output_claim() { // transaction is broadcast by its counterparty let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -1012,7 +1034,9 @@ fn do_test_forming_justice_tx_from_monitor_updates(broadcast_initial_commitment: WatchtowerPersister::new(destination_script1), ]; let node_cfgs = create_node_cfgs_with_persisters(2, &chanmon_cfgs, persisters.iter().collect()); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -1075,7 +1099,9 @@ pub fn claim_htlc_outputs() { let mut chanmon_cfgs = create_chanmon_cfgs(2); chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true; let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -1388,7 +1414,12 @@ pub fn test_htlc_on_chain_success() { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs( + 3, + &node_cfgs, + &[Some(legacy_cfg.clone()), Some(legacy_cfg.clone()), Some(legacy_cfg)], + ); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -1635,7 +1666,12 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs( + 3, + &node_cfgs, + &[Some(legacy_cfg.clone()), Some(legacy_cfg.clone()), Some(legacy_cfg)], + ); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -1886,7 +1922,12 @@ fn do_test_commitment_revoked_fail_backward_exhaustive( // commitment_signed) we will be free to fail/fulfill the HTLC backwards. let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs( + 3, + &node_cfgs, + &[Some(legacy_cfg.clone()), Some(legacy_cfg.clone()), Some(legacy_cfg)], + ); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -2305,7 +2346,9 @@ pub fn test_htlc_ignore_latest_remote_commitment() { // ignored if we cannot claim them. This originally tickled an invalid unwrap(). let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -2357,7 +2400,12 @@ pub fn test_force_close_fail_back() { // Check which HTLCs are failed-backwards on channel force-closure let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs( + 3, + &node_cfgs, + &[Some(legacy_cfg.clone()), Some(legacy_cfg.clone()), Some(legacy_cfg)], + ); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -3469,7 +3517,9 @@ pub fn test_claim_sizeable_push_msat() { // Incidentally test SpendableOutput event generation due to detection of to_local output on commitment tx let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -3506,7 +3556,9 @@ pub fn test_claim_on_remote_sizeable_push_msat() { // to_remote output is encumbered by a P2WPKH let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -3548,7 +3600,9 @@ pub fn test_claim_on_remote_revoked_sizeable_push_msat() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -3581,7 +3635,9 @@ pub fn test_claim_on_remote_revoked_sizeable_push_msat() { pub fn test_static_spendable_outputs_preimage_tx() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -3631,7 +3687,9 @@ pub fn test_static_spendable_outputs_preimage_tx() { pub fn test_static_spendable_outputs_timeout_tx() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -3742,7 +3800,9 @@ pub fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() { let mut chanmon_cfgs = create_chanmon_cfgs(2); chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true; let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -3821,7 +3881,9 @@ pub fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() { let mut chanmon_cfgs = create_chanmon_cfgs(2); chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true; let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -3908,7 +3970,12 @@ pub fn test_onchain_to_onchain_claim() { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs( + 3, + &node_cfgs, + &[Some(legacy_cfg.clone()), Some(legacy_cfg.clone()), Some(legacy_cfg)], + ); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -4054,7 +4121,7 @@ pub fn test_duplicate_payment_hash_one_failure_one_success() { let node_cfgs = create_node_cfgs(5, &chanmon_cfgs); // When this test was written, the default base fee floated based on the HTLC count. // It is now fixed, so we simply set the fee to the expected value here. - let mut config = test_default_channel_config(); + let mut config = test_legacy_channel_config(); config.channel_config.forwarding_fee_base_msat = 196; let configs = [ @@ -4235,7 +4302,9 @@ pub fn test_duplicate_payment_hash_one_failure_one_success() { pub fn test_dynamic_spendable_outputs_local_htlc_success_tx() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -4304,7 +4373,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno let node_cfgs = create_node_cfgs(6, &chanmon_cfgs); // When this test was written, the default base fee floated based on the HTLC count. // It is now fixed, so we simply set the fee to the expected value here. - let mut config = test_default_channel_config(); + let mut config = test_legacy_channel_config(); config.channel_config.forwarding_fee_base_msat = 196; let configs = [ @@ -4722,7 +4791,9 @@ pub fn test_fail_backwards_previous_remote_announce() { pub fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_b_id = nodes[1].node.get_our_node_id(); @@ -4815,7 +4886,12 @@ pub fn test_key_derivation_params() { node_cfgs.remove(0); node_cfgs.insert(0, node); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs( + 3, + &node_cfgs, + &[Some(legacy_cfg.clone()), Some(legacy_cfg.clone()), Some(legacy_cfg)], + ); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); let node_b_id = nodes[1].node.get_our_node_id(); @@ -4932,7 +5008,9 @@ pub fn test_static_output_closing_tx() { fn do_htlc_claim_local_commitment_only(use_dust: bool) { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -4976,7 +5054,9 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) { fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_b_id = nodes[1].node.get_our_node_id(); @@ -5015,7 +5095,12 @@ fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) { fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs( + 3, + &node_cfgs, + &[Some(legacy_cfg.clone()), Some(legacy_cfg.clone()), Some(legacy_cfg)], + ); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -5107,7 +5192,9 @@ pub fn htlc_claim_single_commitment_only_b() { pub fn test_fail_holding_cell_htlc_upon_free() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -5206,7 +5293,9 @@ pub fn test_fail_holding_cell_htlc_upon_free() { pub fn test_free_and_fail_holding_cell_htlcs() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -5349,7 +5438,7 @@ pub fn test_fail_holding_cell_htlc_upon_free_multihop() { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); // Avoid having to include routing fees in calculations - let mut config = test_default_channel_config(); + let mut config = test_legacy_channel_config(); config.channel_config.forwarding_fee_base_msat = 0; config.channel_config.forwarding_fee_proportional_millionths = 0; let node_chanmgrs = create_node_chanmgrs( @@ -5716,7 +5805,9 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) { let mut chanmon_cfgs = create_chanmon_cfgs(2); chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true; let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -5824,7 +5915,12 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs( + 3, + &node_cfgs, + &[Some(legacy_cfg.clone()), Some(legacy_cfg.clone()), Some(legacy_cfg)], + ); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); let node_b_id = nodes[1].node.get_our_node_id(); @@ -6130,7 +6226,9 @@ pub fn test_bump_penalty_txn_on_revoked_commitment() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -6237,7 +6335,9 @@ pub fn test_bump_penalty_txn_on_revoked_htlcs() { let mut chanmon_cfgs = create_chanmon_cfgs(2); chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true; let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -6439,7 +6539,9 @@ pub fn test_bump_penalty_txn_on_remote_commitment() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let remote_txn = { @@ -7360,7 +7462,9 @@ pub fn test_concurrent_monitor_claim() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -7602,7 +7706,9 @@ pub fn test_htlc_no_detection() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_b_id = nodes[1].node.get_our_node_id(); @@ -7669,7 +7775,12 @@ fn do_test_onchain_htlc_settlement_after_close( // 6) Bob claims the offered output on the broadcasted commitment. let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs( + 3, + &node_cfgs, + &[Some(legacy_cfg.clone()), Some(legacy_cfg.clone()), Some(legacy_cfg)], + ); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -7977,7 +8088,12 @@ pub fn test_error_chans_closed() { // we can test various edge cases around it to ensure we don't regress. let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs( + 3, + &node_cfgs, + &[Some(legacy_cfg.clone()), Some(legacy_cfg.clone()), Some(legacy_cfg)], + ); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); let node_b_id = nodes[1].node.get_our_node_id(); @@ -8076,7 +8192,12 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t // aren't broadcasting transactions too early (ie not broadcasting them at all). let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs( + 3, + &node_cfgs, + &[Some(legacy_cfg.clone()), Some(legacy_cfg.clone()), Some(legacy_cfg)], + ); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); *nodes[0].connect_style.borrow_mut() = ConnectStyle::BestBlockFirstSkippingBlocks; @@ -8551,7 +8672,7 @@ fn do_test_max_dust_htlc_exposure( // might be available again for HTLC processing once the dust bandwidth has cleared up. let chanmon_cfgs = create_chanmon_cfgs(2); - let mut config = test_default_channel_config(); + let mut config = test_legacy_channel_config(); // We hard-code the feerate values here but they're re-calculated furter down and asserted. // If the values ever change below these constants should simply be updated. @@ -8937,7 +9058,7 @@ pub fn test_nondust_htlc_excess_fees_are_dust() { } let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let mut config = test_default_channel_config(); + let mut config = test_legacy_channel_config(); // Set the dust limit to the default value config.channel_config.max_dust_htlc_exposure = MaxDustHTLCExposure::FeeRateMultiplier(10_000); // Make sure the HTLC limits don't get in the way @@ -9152,7 +9273,7 @@ fn do_test_nondust_htlc_fees_dust_exposure_delta(features: ChannelTypeFeatures) assert_eq!(expected_dust_exposure_msat, 528_492); } - let mut default_config = test_default_channel_config(); + let mut default_config = test_legacy_channel_config(); if features == ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies() { default_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; // in addition to the one above, this setting is also needed to create an anchor channel @@ -9685,7 +9806,7 @@ fn do_test_manual_broadcast_skips_commitment_until_funding( // forced to broadcast using `ChannelMonitor::broadcast_latest_holder_commitment_txn`. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let mut chan_config = test_default_channel_config(); + let mut chan_config = test_legacy_channel_config(); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(chan_config)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -9913,7 +10034,7 @@ pub fn test_dust_exposure_holding_cell_assertion() { let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); // Configure nodes with specific dust limits - let mut config = test_default_channel_config(); + let mut config = test_legacy_channel_config(); // Use a fixed dust exposure limit to make the test simpler const DUST_HTLC_VALUE_MSAT: u64 = 500_000; config.channel_config.max_dust_htlc_exposure = MaxDustHTLCExposure::FixedLimitMsat(5_000_000); diff --git a/lightning/src/ln/htlc_reserve_unit_tests.rs b/lightning/src/ln/htlc_reserve_unit_tests.rs index e719f5efa74..5b2ffca5fd4 100644 --- a/lightning/src/ln/htlc_reserve_unit_tests.rs +++ b/lightning/src/ln/htlc_reserve_unit_tests.rs @@ -33,7 +33,9 @@ fn do_test_counterparty_no_reserve(send_from_initiator: bool) { // in normal testing, we test it explicitly here. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -121,7 +123,7 @@ pub fn test_channel_reserve_holding_cell_htlcs() { let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); // When this test was written, the default base fee floated based on the HTLC count. // It is now fixed, so we simply set the fee to the expected value here. - let mut config = test_default_channel_config(); + let mut config = test_legacy_channel_config(); config.channel_config.forwarding_fee_base_msat = 239; let configs = [Some(config.clone()), Some(config.clone()), Some(config.clone())]; @@ -749,7 +751,9 @@ pub fn holding_cell_htlc_counting() { pub fn test_basic_channel_reserve() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); @@ -778,7 +782,8 @@ pub fn test_basic_channel_reserve() { #[xtest(feature = "_externalize_tests")] fn test_fee_spike_violation_fails_htlc() { - do_test_fee_spike_buffer(None, true) + let cfg = test_legacy_channel_config(); + do_test_fee_spike_buffer(Some(cfg), true) } #[test] @@ -987,7 +992,9 @@ pub fn test_chan_reserve_violation_outbound_htlc_inbound_chan() { // this situation. let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let default_config = UserConfig::default(); @@ -1025,7 +1032,9 @@ pub fn test_chan_reserve_violation_inbound_htlc_outbound_channel() { let mut chanmon_cfgs = create_chanmon_cfgs(2); let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_b_id = nodes[1].node.get_our_node_id(); @@ -1104,7 +1113,9 @@ pub fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() { let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let default_config = UserConfig::default(); @@ -1153,7 +1164,9 @@ pub fn test_chan_reserve_dust_inbound_htlcs_inbound_chan() { // calculating our counterparty's commitment transaction fee (this was previously broken). let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 98000000); @@ -1565,7 +1578,9 @@ pub fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() { //BOLT2 Requirement: receiving an amount_msat that the sending node cannot afford at the current feerate_per_kw (while maintaining its channel reserve): SHOULD fail the channel let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -2141,12 +2156,8 @@ pub fn do_test_dust_limit_fee_accounting(can_afford: bool) { let chanmon_cfgs = create_chanmon_cfgs(2); - let mut default_config = test_default_channel_config(); - default_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = - create_node_chanmgrs(2, &node_cfgs, &[Some(default_config.clone()), Some(default_config)]); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); diff --git a/lightning/src/ln/invoice_utils.rs b/lightning/src/ln/invoice_utils.rs index 3eaa369c3f1..1503a9a3a63 100644 --- a/lightning/src/ln/invoice_utils.rs +++ b/lightning/src/ln/invoice_utils.rs @@ -1125,7 +1125,8 @@ mod test { fn test_channels_with_lower_inbound_capacity_than_invoice_amt_hints_filtering() { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(legacy_cfg), None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); let chan_1_0 = create_unannounced_chan_between_nodes_with_value(&nodes, 1, 0, 100_000, 0); let chan_2_0 = create_unannounced_chan_between_nodes_with_value(&nodes, 2, 0, 1_000_000, 0); @@ -1731,7 +1732,9 @@ mod test { chanmon_cfgs[1].keys_manager.backing = make_dyn_keys_interface(&seed_1); chanmon_cfgs[2].keys_manager.backing = make_dyn_keys_interface(&seed_2); let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(4, &node_cfgs, &[Some(legacy_cfg), None, None, None]); let nodes = create_network(4, &node_cfgs, &node_chanmgrs); let chan_0_2 = create_unannounced_chan_between_nodes_with_value(&nodes, 0, 2, 1_000_000, 0); diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 8074f71be8e..cdbd6e669f8 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -175,7 +175,7 @@ fn archive_fully_resolved_monitors() { // Test we archive fully resolved channel monitors at the right time. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let mut user_config = test_default_channel_config(); + let mut user_config = test_legacy_channel_config(); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(user_config.clone()), Some(user_config)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -1121,7 +1121,8 @@ fn test_no_preimage_inbound_htlc_balances() { // have a preimage. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000); @@ -2317,7 +2318,8 @@ fn do_test_restored_packages_retry(check_old_monitor_retries_after_upgrade: bool let persister; let new_chain_monitor; - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let node_deserialized; let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -3830,7 +3832,8 @@ fn test_ladder_preimage_htlc_claims() { // already claimed) resulting in an invalid claim transaction. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_id_0 = nodes[0].node.get_our_node_id(); diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index e24ad48a7d9..40fbae5c6ca 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -212,7 +212,7 @@ fn mpp_retry_overpay() { let chanmon_cfgs = create_chanmon_cfgs(4); let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); - let mut user_config = test_default_channel_config(); + let mut user_config = test_legacy_channel_config(); user_config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100; let mut limited_1 = user_config.clone(); limited_1.channel_handshake_config.our_htlc_minimum_msat = 35_000_000; @@ -782,7 +782,12 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let persister; let new_chain_monitor; - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs( + 3, + &node_cfgs, + &[Some(legacy_cfg.clone()), Some(legacy_cfg.clone()), Some(legacy_cfg)], + ); let node_a_reload; let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); @@ -1019,7 +1024,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let mut manually_accept_config = test_default_channel_config(); + let mut legacy_cfg = test_legacy_channel_config(); let persist_1; let chain_monitor_1; @@ -1028,8 +1033,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { let persist_3; let chain_monitor_3; - let node_chanmgrs = - create_node_chanmgrs(3, &node_cfgs, &[None, Some(manually_accept_config), None]); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(legacy_cfg), None]); let node_a_1; let node_a_2; let node_a_3; @@ -1254,7 +1258,9 @@ fn do_test_dup_htlc_onchain_doesnt_fail_on_reload( let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let persister; let chain_monitor; - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let node_a_reload; let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -2867,7 +2873,9 @@ fn auto_retry_partial_failure() { // Test that we'll retry appropriately on send partial failure and retry partial failure. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -3112,7 +3120,9 @@ fn auto_retry_partial_failure() { fn auto_retry_zero_attempts_send_error() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_b_id = nodes[1].node.get_our_node_id(); @@ -4108,7 +4118,9 @@ fn do_no_missing_sent_on_reload(persist_manager_with_payment: bool, at_midpoint: let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let (persist_a, persist_b, persist_c); let (chain_monitor_a, chain_monitor_b, chain_monitor_c); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let (node_a_1, node_a_2, node_a_3); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -4249,7 +4261,17 @@ fn do_claim_from_closed_chan(fail_payment: bool) { // CLTVs on the paths to different value resulting in a different claim deadline. let chanmon_cfgs = create_chanmon_cfgs(4); let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs( + 4, + &node_cfgs, + &[ + Some(legacy_cfg.clone()), + Some(legacy_cfg.clone()), + Some(legacy_cfg.clone()), + Some(legacy_cfg), + ], + ); let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -5139,7 +5161,7 @@ fn test_non_strict_forwarding() { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let mut config = test_default_channel_config(); + let mut config = test_legacy_channel_config(); config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100; let configs = [Some(config.clone()), Some(config.clone()), Some(config)]; diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index 7c66d753590..45e454973e6 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -368,7 +368,8 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { let persister; let new_chain_monitor; - let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg.clone()), Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes_0_deserialized; let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs); @@ -523,7 +524,8 @@ fn do_test_data_loss_protect(reconnect_panicing: bool, substantially_old: bool, let persister; let new_chain_monitor; - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes_0_deserialized; let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -931,7 +933,7 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht let persister; let new_chain_monitor; - let mut intercept_forwards_config = test_default_channel_config(); + let mut intercept_forwards_config = test_legacy_channel_config(); intercept_forwards_config.htlc_interception_flags = HTLCInterceptionFlags::ToInterceptSCIDs as u8; let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(intercept_forwards_config), None]); @@ -1109,7 +1111,8 @@ fn removed_payment_no_manager_persistence() { let persister; let new_chain_monitor; - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes_1_deserialized; let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); @@ -1326,7 +1329,8 @@ fn test_reload_partial_funding_batch() { let new_persister; let new_chain_monitor; - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg.clone()), Some(legacy_cfg)]); let new_channel_manager; let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); @@ -1461,7 +1465,8 @@ fn test_peer_storage() { let (persister, chain_monitor); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let nodes_0_deserialized; - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -1512,7 +1517,7 @@ fn test_peer_storage() { // TODO: Handle the case where we've completely forgotten about an active channel. reload_node!( nodes[0], - test_default_channel_config(), + test_legacy_channel_config(), &nodes_0_serialized, &[&old_state_monitor[..]], persister, diff --git a/lightning/src/ln/reorg_tests.rs b/lightning/src/ln/reorg_tests.rs index 4c1bdd72f2e..1a4dab9b925 100644 --- a/lightning/src/ln/reorg_tests.rs +++ b/lightning/src/ln/reorg_tests.rs @@ -49,7 +49,8 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) { // before they otherwise would and reorg them out, confirming an HTLC-Success tx instead. let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(legacy_cfg), None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); create_announced_chan_between_nodes(&nodes, 0, 1); @@ -182,7 +183,8 @@ fn test_counterparty_revoked_reorg() { // still be claim-from-able after the reorg. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg), None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000); @@ -255,7 +257,8 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_ let persister; let new_chain_monitor; - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg), None]); let nodes_0_deserialized; let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -466,7 +469,8 @@ fn test_set_outpoints_partial_claiming() { // - disconnect tx, see no tx anymore let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg), None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000); @@ -681,7 +685,8 @@ fn test_htlc_preimage_claim_holder_commitment_after_counterparty_commitment_reor // test that we only claim the currently confirmed commitment. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg), None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1); @@ -756,7 +761,8 @@ fn test_htlc_preimage_claim_prev_counterparty_commitment_after_current_counterpa // confirmed commitment. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg), None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1); diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index 74ffe3cbfd3..d24a4d88d0f 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -1335,7 +1335,8 @@ fn do_test_closing_signed_reinit_timeout(timeout_step: TimeoutStep) { // it manually. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg), None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); let node_b_id = nodes[1].node.get_our_node_id(); diff --git a/lightning/src/ln/splicing_tests.rs b/lightning/src/ln/splicing_tests.rs index 618271df972..08754799da6 100644 --- a/lightning/src/ln/splicing_tests.rs +++ b/lightning/src/ln/splicing_tests.rs @@ -1066,7 +1066,7 @@ fn do_test_splice_commitment_broadcast(splice_status: SpliceStatus, claim_htlcs: // Tests that we're able to enforce HTLCs onchain during the different stages of a splice. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let config = test_default_anchors_channel_config(); + let config = test_default_channel_config(); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config.clone()), Some(config)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -1818,7 +1818,7 @@ fn do_test_propose_splice_while_disconnected(reload: bool, use_0conf: bool) { fn disconnect_on_unexpected_interactive_tx_message() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let config = test_default_anchors_channel_config(); + let config = test_default_channel_config(); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config.clone()), Some(config)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -1857,7 +1857,7 @@ fn disconnect_on_unexpected_interactive_tx_message() { fn fail_splice_on_interactive_tx_error() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let config = test_default_anchors_channel_config(); + let config = test_default_channel_config(); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config.clone()), Some(config)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -1911,7 +1911,7 @@ fn fail_splice_on_interactive_tx_error() { fn fail_splice_on_tx_abort() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let config = test_default_anchors_channel_config(); + let config = test_default_channel_config(); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config.clone()), Some(config)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -1965,7 +1965,7 @@ fn fail_splice_on_tx_abort() { fn fail_splice_on_channel_close() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let config = test_default_anchors_channel_config(); + let config = test_default_channel_config(); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config.clone()), Some(config)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -2016,7 +2016,7 @@ fn fail_splice_on_channel_close() { fn fail_quiescent_action_on_channel_close() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let config = test_default_anchors_channel_config(); + let config = test_default_channel_config(); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config.clone()), Some(config)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); diff --git a/lightning/src/ln/update_fee_tests.rs b/lightning/src/ln/update_fee_tests.rs index 69890fb001c..24ae8525450 100644 --- a/lightning/src/ln/update_fee_tests.rs +++ b/lightning/src/ln/update_fee_tests.rs @@ -385,16 +385,13 @@ pub fn do_test_update_fee_that_funder_cannot_afford(channel_type_features: Chann let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let mut default_config = test_default_channel_config(); + let mut cfg = test_legacy_channel_config(); if channel_type_features == ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies() { - default_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; + cfg.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; } - let node_chanmgrs = create_node_chanmgrs( - 2, - &node_cfgs, - &[Some(default_config.clone()), Some(default_config.clone())], - ); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(cfg.clone()), Some(cfg.clone())]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -411,8 +408,7 @@ pub fn do_test_update_fee_that_funder_cannot_afford(channel_type_features: Chann ); let channel_id = chan.2; let secp_ctx = Secp256k1::new(); - let bs_channel_reserve_sats = - get_holder_selected_channel_reserve_satoshis(channel_value, &default_config); + let bs_channel_reserve_sats = get_holder_selected_channel_reserve_satoshis(channel_value, &cfg); let (anchor_outputs_value_sats, outputs_num_no_htlcs) = if channel_type_features.supports_anchors_zero_fee_htlc_tx() { (ANCHOR_OUTPUT_VALUE_SATOSHI * 2, 4) @@ -546,13 +542,12 @@ pub fn test_update_fee_that_saturates_subs() { // on the commitment transaction that is greater than her balance, we saturate the subtractions, // and force close the channel. - let mut default_config = test_default_channel_config(); + let mut cfg = test_legacy_channel_config(); let secp_ctx = Secp256k1::new(); let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = - create_node_chanmgrs(2, &node_cfgs, &[Some(default_config.clone()), Some(default_config)]); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(cfg.clone()), Some(cfg)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -868,7 +863,9 @@ pub fn test_chan_init_feerate_unaffordability() { let mut chanmon_cfgs = create_chanmon_cfgs(2); let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -1033,16 +1030,14 @@ pub fn do_cannot_afford_on_holding_cell_release( // update_fee from its holding cell, we do not generate any msg events let chanmon_cfgs = create_chanmon_cfgs(2); - let mut default_config = test_default_channel_config(); - default_config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = - 100; + let mut cfg = test_legacy_channel_config(); + cfg.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100; if channel_type_features.supports_anchors_zero_fee_htlc_tx() { - default_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; + cfg.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; } let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = - create_node_chanmgrs(2, &node_cfgs, &[Some(default_config.clone()), Some(default_config)]); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(cfg.clone()), Some(cfg)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -1219,13 +1214,12 @@ pub fn do_can_afford_given_trimmed_htlcs(inequality_regions: core::cmp::Ordering let chanmon_cfgs = create_chanmon_cfgs(2); - let mut default_config = test_default_channel_config(); - default_config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = - 100; + let mut legacy_cfg = test_legacy_channel_config(); + legacy_cfg.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100; let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = - create_node_chanmgrs(2, &node_cfgs, &[Some(default_config.clone()), Some(default_config)]); + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index 1b750c63cd8..cb4bdeb6a51 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -1657,7 +1657,9 @@ mod tests { ); node_cfgs[0].chain_monitor = chain_mon_0; node_cfgs[1].chain_monitor = chain_mon_1; - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); // Check that the persisted channel data is empty before any channels are From 60b5d66e58f5ce7d5ce13a7bb873ad6ff6374a3e Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Thu, 5 Feb 2026 17:12:01 -0800 Subject: [PATCH 191/242] Make get_latest_mon_update_id a helper on TestChainMonitor --- lightning/src/ln/chanmon_update_fail_tests.rs | 71 +++++++++---------- lightning/src/util/test_utils.rs | 5 ++ 2 files changed, 37 insertions(+), 39 deletions(-) diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 3fa2073d5ba..8e616927679 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -49,13 +49,6 @@ use crate::prelude::*; use crate::sync::{Arc, Mutex}; use bitcoin::hashes::Hash; -fn get_latest_mon_update_id<'a, 'b, 'c>( - node: &Node<'a, 'b, 'c>, channel_id: ChannelId, -) -> (u64, u64) { - let monitor_id_state = node.chain_monitor.latest_monitor_update_id.lock().unwrap(); - monitor_id_state.get(&channel_id).unwrap().clone() -} - #[test] fn test_monitor_and_persister_update_fail() { // Test that if both updating the `ChannelMonitor` and persisting the updated @@ -212,7 +205,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { } chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = get_latest_mon_update_id(&nodes[0], channel_id); + let (latest_update, _) = nodes[0].chain_monitor.get_latest_mon_update_id(channel_id); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors(&nodes[0], 0); @@ -404,7 +397,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { // Now fix monitor updating... chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = get_latest_mon_update_id(&nodes[0], channel_id); + let (latest_update, _) = nodes[0].chain_monitor.get_latest_mon_update_id(channel_id); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors(&nodes[0], 0); @@ -757,7 +750,7 @@ fn test_monitor_update_fail_cs() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); + let (latest_update, _) = nodes[1].chain_monitor.get_latest_mon_update_id(channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors(&nodes[1], 0); let responses = nodes[1].node.get_and_clear_pending_msg_events(); @@ -792,7 +785,7 @@ fn test_monitor_update_fail_cs() { } chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = get_latest_mon_update_id(&nodes[0], channel_id); + let (latest_update, _) = nodes[0].chain_monitor.get_latest_mon_update_id(channel_id); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors(&nodes[0], 0); @@ -868,7 +861,7 @@ fn test_monitor_update_fail_no_rebroadcast() { check_added_monitors(&nodes[1], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); + let (latest_update, _) = nodes[1].chain_monitor.get_latest_mon_update_id(channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors(&nodes[1], 0); @@ -938,7 +931,7 @@ fn test_monitor_update_raa_while_paused() { assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors(&nodes[0], 1); - let (latest_update, _) = get_latest_mon_update_id(&nodes[0], channel_id); + let (latest_update, _) = nodes[0].chain_monitor.get_latest_mon_update_id(channel_id); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors(&nodes[0], 0); @@ -1080,7 +1073,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { // Restore monitor updating, ensuring we immediately get a fail-back update and a // update_add update. chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = get_latest_mon_update_id(&nodes[1], chan_2.2); + let (latest_update, _) = nodes[1].chain_monitor.get_latest_mon_update_id(chan_2.2); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_2.2, latest_update); check_added_monitors(&nodes[1], 0); expect_and_process_pending_htlcs_and_htlc_handling_failed( @@ -1354,7 +1347,7 @@ fn test_monitor_update_fail_reestablish() { assert_eq!(bs_channel_upd.contents.channel_flags & 2, 0); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = get_latest_mon_update_id(&nodes[1], chan_1.2); + let (latest_update, _) = nodes[1].chain_monitor.get_latest_mon_update_id(chan_1.2); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_1.2, latest_update); check_added_monitors(&nodes[1], 0); @@ -1439,7 +1432,7 @@ fn raa_no_response_awaiting_raa_state() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors(&nodes[1], 1); - let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); + let (latest_update, _) = nodes[1].chain_monitor.get_latest_mon_update_id(channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); // nodes[1] should be AwaitingRAA here! check_added_monitors(&nodes[1], 0); @@ -1568,7 +1561,7 @@ fn claim_while_disconnected_monitor_update_fail() { // Now un-fail the monitor, which will result in B sending its original commitment update, // receiving the commitment update from A, and the resulting commitment dances. chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); + let (latest_update, _) = nodes[1].chain_monitor.get_latest_mon_update_id(channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors(&nodes[1], 0); @@ -1697,7 +1690,7 @@ fn monitor_failed_no_reestablish_response() { get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, node_b_id); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); + let (latest_update, _) = nodes[1].chain_monitor.get_latest_mon_update_id(channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors(&nodes[1], 0); let bs_responses = get_revoke_commit_msgs(&nodes[1], &node_a_id); @@ -1795,7 +1788,7 @@ fn first_message_on_recv_ordering() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); + let (latest_update, _) = nodes[1].chain_monitor.get_latest_mon_update_id(channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors(&nodes[1], 0); @@ -1894,7 +1887,7 @@ fn test_monitor_update_fail_claim() { // Now restore monitor updating on the 0<->1 channel and claim the funds on B. let channel_id = chan_1.2; - let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); + let (latest_update, _) = nodes[1].chain_monitor.get_latest_mon_update_id(channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); check_added_monitors(&nodes[1], 0); @@ -2023,7 +2016,7 @@ fn test_monitor_update_on_pending_forwards() { check_added_monitors(&nodes[1], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = get_latest_mon_update_id(&nodes[1], chan_1.2); + let (latest_update, _) = nodes[1].chain_monitor.get_latest_mon_update_id(chan_1.2); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_1.2, latest_update); check_added_monitors(&nodes[1], 0); @@ -2094,7 +2087,7 @@ fn monitor_update_claim_fail_no_response() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); + let (latest_update, _) = nodes[1].chain_monitor.get_latest_mon_update_id(channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); check_added_monitors(&nodes[1], 0); @@ -2166,7 +2159,7 @@ fn do_during_funding_monitor_fail( assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = get_latest_mon_update_id(&nodes[0], channel_id); + let (latest_update, _) = nodes[0].chain_monitor.get_latest_mon_update_id(channel_id); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors(&nodes[0], 0); expect_channel_pending_event(&nodes[0], &node_b_id); @@ -2221,7 +2214,7 @@ fn do_during_funding_monitor_fail( } chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); + let (latest_update, _) = nodes[1].chain_monitor.get_latest_mon_update_id(channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors(&nodes[1], 0); @@ -2339,7 +2332,7 @@ fn test_path_paused_mpp() { // And check that, after we successfully update the monitor for chan_2 we can pass the second // HTLC along to nodes[3] and claim the whole payment back to nodes[0]. - let (latest_update, _) = get_latest_mon_update_id(&nodes[0], chan_2_id); + let (latest_update, _) = nodes[0].chain_monitor.get_latest_mon_update_id(chan_2_id); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_2_id, latest_update); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -2787,7 +2780,7 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { // If we finish updating the monitor, we should free the holding cell right away (this did // not occur prior to #756). This should result in a new monitor update. chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (mon_id, _) = get_latest_mon_update_id(&nodes[0], chan_id); + let (mon_id, _) = nodes[0].chain_monitor.get_latest_mon_update_id(chan_id); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_id, mon_id); expect_payment_claimed!(nodes[0], payment_hash_0, 100_000); check_added_monitors(&nodes[0], 1); @@ -3039,7 +3032,7 @@ fn test_temporary_error_during_shutdown() { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = get_latest_mon_update_id(&nodes[0], channel_id); + let (latest_update, _) = nodes[0].chain_monitor.get_latest_mon_update_id(channel_id); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); nodes[1].node.handle_closing_signed( node_a_id, @@ -3049,7 +3042,7 @@ fn test_temporary_error_during_shutdown() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); + let (latest_update, _) = nodes[1].chain_monitor.get_latest_mon_update_id(channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); nodes[0].node.handle_closing_signed( @@ -3095,7 +3088,7 @@ fn double_temp_error() { // `claim_funds` results in a ChannelMonitorUpdate. nodes[1].node.claim_funds(payment_preimage_1); check_added_monitors(&nodes[1], 1); - let (latest_update_1, _) = get_latest_mon_update_id(&nodes[1], channel_id); + let (latest_update_1, _) = nodes[1].chain_monitor.get_latest_mon_update_id(channel_id); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); // Previously, this would've panicked due to a double-call to `Channel::monitor_update_failed`, @@ -3104,7 +3097,7 @@ fn double_temp_error() { check_added_monitors(&nodes[1], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update_2, _) = get_latest_mon_update_id(&nodes[1], channel_id); + let (latest_update_2, _) = nodes[1].chain_monitor.get_latest_mon_update_id(channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update_1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors(&nodes[1], 0); @@ -3511,7 +3504,7 @@ fn do_test_blocked_chan_preimage_release(completion_mode: BlockedUpdateComplMode reconnect_nodes(a_b_reconnect); reconnect_nodes(ReconnectArgs::new(&nodes[2], &nodes[1])); } else if completion_mode == BlockedUpdateComplMode::Async { - let (latest_update, _) = get_latest_mon_update_id(&nodes[1], chan_id_2); + let (latest_update, _) = nodes[1].chain_monitor.get_latest_mon_update_id(chan_id_2); nodes[1] .chain_monitor .chain_monitor @@ -3689,7 +3682,7 @@ fn do_test_inverted_mon_completion_order( // (Finally) complete the A <-> B ChannelMonitorUpdate, ensuring the preimage is durably on // disk in the proper ChannelMonitor, unblocking the B <-> C ChannelMonitor updating // process. - let (_, ab_update_id) = get_latest_mon_update_id(&nodes[1], chan_id_ab); + let (_, ab_update_id) = nodes[1].chain_monitor.get_latest_mon_update_id(chan_id_ab); nodes[1] .chain_monitor .chain_monitor @@ -3722,7 +3715,7 @@ fn do_test_inverted_mon_completion_order( // ChannelMonitorUpdate hasn't yet completed. reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); - let (_, ab_update_id) = get_latest_mon_update_id(&nodes[1], chan_id_ab); + let (_, ab_update_id) = nodes[1].chain_monitor.get_latest_mon_update_id(chan_id_ab); nodes[1] .chain_monitor .chain_monitor @@ -3935,7 +3928,7 @@ fn do_test_durable_preimages_on_closed_channel( // Once the blocked `ChannelMonitorUpdate` *finally* completes, the pending // `PaymentForwarded` event will finally be released. - let (_, ab_update_id) = get_latest_mon_update_id(&nodes[1], chan_id_ab); + let (_, ab_update_id) = nodes[1].chain_monitor.get_latest_mon_update_id(chan_id_ab); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_id_ab, ab_update_id); // If the A<->B channel was closed before we reload, we'll replay the claim against it on @@ -4047,7 +4040,7 @@ fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) { mine_transaction_without_consistency_checks(&nodes[1], &as_closing_tx[0]); } - let (_, bc_update_id) = get_latest_mon_update_id(&nodes[1], chan_id_bc); + let (_, bc_update_id) = nodes[1].chain_monitor.get_latest_mon_update_id(chan_id_bc); let mut events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), if close_during_reload { 2 } else { 1 }); expect_payment_forwarded( @@ -4072,7 +4065,7 @@ fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) { // Once we run event processing the monitor should free, check that it was indeed the B<->C // channel which was updated. check_added_monitors(&nodes[1], if close_during_reload { 2 } else { 1 }); - let (_, post_ev_bc_update_id) = get_latest_mon_update_id(&nodes[1], chan_id_bc); + let (_, post_ev_bc_update_id) = nodes[1].chain_monitor.get_latest_mon_update_id(chan_id_bc); assert!(bc_update_id != post_ev_bc_update_id); // Finally, check that there's nothing left to do on B<->C reconnect and the channel operates @@ -4162,7 +4155,7 @@ fn do_test_glacial_peer_cant_hang(hold_chan_a: bool) { // ...but once we complete the A<->B channel preimage persistence, the B<->C channel // unlocks and we send both peers commitment updates. - let (ab_update_id, _) = get_latest_mon_update_id(&nodes[1], chan_id_ab); + let (ab_update_id, _) = nodes[1].chain_monitor.get_latest_mon_update_id(chan_id_ab); assert!(nodes[1] .chain_monitor .chain_monitor @@ -5122,7 +5115,7 @@ fn test_mpp_claim_to_holding_cell() { check_added_monitors(&nodes[3], 2); // Complete the B <-> D monitor update, freeing the first fulfill. - let (latest_id, _) = get_latest_mon_update_id(&nodes[3], chan_3_id); + let (latest_id, _) = nodes[3].chain_monitor.get_latest_mon_update_id(chan_3_id); nodes[3].chain_monitor.chain_monitor.channel_monitor_updated(chan_3_id, latest_id).unwrap(); let mut b_claim = get_htlc_update_msgs(&nodes[3], &node_b_id); @@ -5133,7 +5126,7 @@ fn test_mpp_claim_to_holding_cell() { // Finally, complete the C <-> D monitor update. Previously, this unlock failed to be processed // due to the existence of the blocked RAA update above. - let (latest_id, _) = get_latest_mon_update_id(&nodes[3], chan_4_id); + let (latest_id, _) = nodes[3].chain_monitor.get_latest_mon_update_id(chan_4_id); nodes[3].chain_monitor.chain_monitor.channel_monitor_updated(chan_4_id, latest_id).unwrap(); // Once we process monitor events (in this case by checking for the `PaymentClaimed` event, the // RAA monitor update blocked above will be released. diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index 34f5d5fe36e..d7f23d32e2a 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -581,6 +581,11 @@ impl<'a> TestChainMonitor<'a> { self.added_monitors.lock().unwrap().push((channel_id, monitor)); self.chain_monitor.load_existing_monitor(channel_id, new_monitor) } + + pub fn get_latest_mon_update_id(&self, channel_id: ChannelId) -> (u64, u64) { + let monitor_id_state = self.latest_monitor_update_id.lock().unwrap(); + monitor_id_state.get(&channel_id).unwrap().clone() + } } impl<'a> chain::Watch for TestChainMonitor<'a> { fn watch_channel( From 7e84268505d0c72d16f4499b53bc51a32c85fe06 Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Tue, 3 Feb 2026 16:09:06 -0800 Subject: [PATCH 192/242] Hold in-flight monitor updates until background event processing We previously assumed background events would eventually be processed prior to another `ChannelManager` write, so we would immediately remove all in-flight monitor updates that completed since the last `ChannelManager` serialization. This isn't always the case, so we now keep them all around until we're ready to handle them, i.e., when `process_background_events` is called. This was discovered while fuzzing `chanmon_consistency_target` on the main branch with some changes that allow it to connect blocks. It was triggered by reloading the `ChannelManager` after a monitor update completion for an outgoing HTLC, calling `ChannelManager::best_block_updated`, and reloading the `ChannelManager` once again. A test is included that provides a minimal reproduction of this case. --- lightning/src/ln/channelmanager.rs | 82 +++++++++++++++++++++--------- lightning/src/ln/reload_tests.rs | 81 +++++++++++++++++++++++++++++ 2 files changed, 140 insertions(+), 23 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 532514a3ae9..3656a03d58d 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1277,7 +1277,11 @@ enum BackgroundEvent { /// Some [`ChannelMonitorUpdate`] (s) completed before we were serialized but we still have /// them marked pending, thus we need to run any [`MonitorUpdateCompletionAction`] (s) pending /// on a channel. - MonitorUpdatesComplete { counterparty_node_id: PublicKey, channel_id: ChannelId }, + MonitorUpdatesComplete { + counterparty_node_id: PublicKey, + channel_id: ChannelId, + highest_update_id_completed: u64, + }, } /// A pointer to a channel that is unblocked when an event is surfaced @@ -8153,8 +8157,21 @@ impl< BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, channel_id, update } => { self.apply_post_close_monitor_update(counterparty_node_id, channel_id, funding_txo, update); }, - BackgroundEvent::MonitorUpdatesComplete { counterparty_node_id, channel_id } => { - self.channel_monitor_updated(&channel_id, None, &counterparty_node_id); + BackgroundEvent::MonitorUpdatesComplete { + counterparty_node_id, + channel_id, + highest_update_id_completed, + } => { + // Now that we can finally handle the background event, remove all in-flight + // monitor updates for this channel that we've known to complete, as they have + // already been persisted to the monitor and can be applied to our internal + // state such that the channel resumes operation if no new updates have been + // made since. + self.channel_monitor_updated( + &channel_id, + Some(highest_update_id_completed), + &counterparty_node_id, + ); }, } } @@ -18134,39 +18151,58 @@ impl< ($counterparty_node_id: expr, $chan_in_flight_upds: expr, $monitor: expr, $peer_state: expr, $logger: expr, $channel_info_log: expr ) => { { + // When all in-flight updates have completed after we were last serialized, we + // need to remove them. However, we can't guarantee that the next serialization + // will have happened after processing the + // `BackgroundEvent::MonitorUpdatesComplete`, so removing them now could lead to the + // channel never being resumed as the event would not be regenerated after another + // reload. At the same time, we don't want to resume the channel now because there + // may be post-update actions to handle. Therefore, we're forced to keep tracking + // the completed in-flight updates (but only when they have all completed) until we + // are processing the `BackgroundEvent::MonitorUpdatesComplete`. let mut max_in_flight_update_id = 0; - let starting_len = $chan_in_flight_upds.len(); - $chan_in_flight_upds.retain(|upd| upd.update_id > $monitor.get_latest_update_id()); - if $chan_in_flight_upds.len() < starting_len { + let num_updates_completed = $chan_in_flight_upds + .iter() + .filter(|update| { + max_in_flight_update_id = cmp::max(max_in_flight_update_id, update.update_id); + update.update_id <= $monitor.get_latest_update_id() + }) + .count(); + if num_updates_completed > 0 { log_debug!( $logger, "{} ChannelMonitorUpdates completed after ChannelManager was last serialized", - starting_len - $chan_in_flight_upds.len() + num_updates_completed, ); } + let all_updates_completed = num_updates_completed == $chan_in_flight_upds.len(); + let funding_txo = $monitor.get_funding_txo(); - for update in $chan_in_flight_upds.iter() { - log_debug!($logger, "Replaying ChannelMonitorUpdate {} for {}channel {}", - update.update_id, $channel_info_log, &$monitor.channel_id()); - max_in_flight_update_id = cmp::max(max_in_flight_update_id, update.update_id); - pending_background_events.push( - BackgroundEvent::MonitorUpdateRegeneratedOnStartup { - counterparty_node_id: $counterparty_node_id, - funding_txo: funding_txo, - channel_id: $monitor.channel_id(), - update: update.clone(), - }); - } - if $chan_in_flight_upds.is_empty() { - // We had some updates to apply, but it turns out they had completed before we - // were serialized, we just weren't notified of that. Thus, we may have to run - // the completion actions for any monitor updates, but otherwise are done. + if all_updates_completed { + log_debug!($logger, "All monitor updates completed since the ChannelManager was last serialized"); pending_background_events.push( BackgroundEvent::MonitorUpdatesComplete { counterparty_node_id: $counterparty_node_id, channel_id: $monitor.channel_id(), + highest_update_id_completed: max_in_flight_update_id, }); } else { + $chan_in_flight_upds.retain(|update| { + let replay = update.update_id > $monitor.get_latest_update_id(); + if replay { + log_debug!($logger, "Replaying ChannelMonitorUpdate {} for {}channel {}", + update.update_id, $channel_info_log, &$monitor.channel_id()); + pending_background_events.push( + BackgroundEvent::MonitorUpdateRegeneratedOnStartup { + counterparty_node_id: $counterparty_node_id, + funding_txo: funding_txo, + channel_id: $monitor.channel_id(), + update: update.clone(), + } + ); + } + replay + }); $peer_state.closed_channel_monitor_update_ids.entry($monitor.channel_id()) .and_modify(|v| *v = cmp::max(max_in_flight_update_id, *v)) .or_insert(max_in_flight_update_id); diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index a8206dfe850..c0432051a62 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -1566,3 +1566,84 @@ fn test_peer_storage() { assert!(res.is_err()); } +#[test] +fn test_hold_completed_inflight_monitor_updates_upon_manager_reload() { + // Test that if a `ChannelMonitorUpdate` completes after the `ChannelManager` is serialized, + // but before it is deserialized, we hold any completed in-flight updates until background event + // processing. Previously, we would remove completed monitor updates from + // `in_flight_monitor_updates` during deserialization, relying on + // [`ChannelManager::process_background_events`] to eventually be called before the + // `ChannelManager` is serialized again such that the channel is resumed and further updates can + // be made. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let (persister_a, persister_b); + let (chain_monitor_a, chain_monitor_b); + + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes_0_deserialized_a; + let nodes_0_deserialized_b; + + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; + + send_payment(&nodes[0], &[&nodes[1]], 1_000_000); + + chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); + + // Send a payment that will be pending due to an async monitor update. + let (route, payment_hash, _, payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], 1_000_000); + let payment_id = PaymentId(payment_hash.0); + let onion = RecipientOnionFields::secret_only(payment_secret); + nodes[0].node.send_payment_with_route(route, payment_hash, onion, payment_id).unwrap(); + check_added_monitors(&nodes[0], 1); + + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + // Serialize the ChannelManager while the monitor update is still in-flight. + let node_0_serialized = nodes[0].node.encode(); + + // Now complete the monitor update by calling force_channel_monitor_updated. + // This updates the monitor's state, but the ChannelManager still thinks it's pending. + let (_, latest_update_id) = nodes[0].chain_monitor.get_latest_mon_update_id(chan_id); + nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_id, latest_update_id); + let monitor_serialized_updated = get_monitor!(nodes[0], chan_id).encode(); + + // Reload the node with the updated monitor. Upon deserialization, the ChannelManager will + // detect that the monitor update completed (monitor's update_id >= the in-flight update_id) + // and queue a `BackgroundEvent::MonitorUpdatesComplete`. + nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + reload_node!( + nodes[0], + test_default_channel_config(), + &node_0_serialized, + &[&monitor_serialized_updated[..]], + persister_a, + chain_monitor_a, + nodes_0_deserialized_a + ); + + // If we serialize again, even though we haven't processed any background events yet, we should + // still see the `BackgroundEvent::MonitorUpdatesComplete` be regenerated on startup. + let node_0_serialized = nodes[0].node.encode(); + reload_node!( + nodes[0], + test_default_channel_config(), + &node_0_serialized, + &[&monitor_serialized_updated[..]], + persister_b, + chain_monitor_b, + nodes_0_deserialized_b + ); + + // Reconnect the nodes. We should finally see the `update_add_htlc` go out, as the reconnection + // should first process `BackgroundEvent::MonitorUpdatesComplete, allowing the channel to be + // resumed. + let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); + reconnect_args.pending_htlc_adds = (0, 1); + reconnect_nodes(reconnect_args); +} + From f128b8504d1724008eab10d37ad9f619657d1a24 Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Thu, 5 Feb 2026 08:49:22 -0800 Subject: [PATCH 193/242] Rustfmt ChannelManager::process_background_events --- lightning/src/ln/channelmanager.rs | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 3656a03d58d..1c7a99300f2 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -8140,9 +8140,11 @@ impl< /// Free the background events, generally called from [`PersistenceNotifierGuard`] constructors. /// /// Expects the caller to have a total_consistency_lock read lock. - #[rustfmt::skip] fn process_background_events(&self) -> NotifyOption { - debug_assert_ne!(self.total_consistency_lock.held_by_thread(), LockHeldState::NotHeldByThread); + debug_assert_ne!( + self.total_consistency_lock.held_by_thread(), + LockHeldState::NotHeldByThread + ); self.background_events_processed_since_startup.store(true, Ordering::Release); @@ -8154,8 +8156,18 @@ impl< for event in background_events.drain(..) { match event { - BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, channel_id, update } => { - self.apply_post_close_monitor_update(counterparty_node_id, channel_id, funding_txo, update); + BackgroundEvent::MonitorUpdateRegeneratedOnStartup { + counterparty_node_id, + funding_txo, + channel_id, + update, + } => { + self.apply_post_close_monitor_update( + counterparty_node_id, + channel_id, + funding_txo, + update, + ); }, BackgroundEvent::MonitorUpdatesComplete { counterparty_node_id, From 924f77f3dd0bbcf2c7bab9d499bf092addba2be0 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 6 Feb 2026 09:37:11 +0100 Subject: [PATCH 194/242] Let Claude run per-crate/feature tests Previously, we instructed Claude to always run `./ci/ci-tests.sh` which doesn't seem to work well in practice, as he seems to also employ some kind of timeout, potentially leading to him getting stuck. Here we update `CLAUDE.md` accordingly, having him only run `cargo test` --- CLAUDE.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/CLAUDE.md b/CLAUDE.md index 611322c12fe..f87bc665bd4 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -10,7 +10,11 @@ See [README.md](README.md) for the workspace layout and [ARCH.md](ARCH.md) for s ## Development Rules -- Always ensure tests pass before committing. To this end, you should run the test suite via `./ci/ci-tests.sh`. +- Always ensure tests pass before committing. To this end, you should run + `cargo +1.75.0 test` for all affected crates and/or features. Upon completion + of the full task you might prompt the user whether they want you to run the + full CI tests via `./ci/ci-tests.sh`. Note however that this script will run + for a very long time, so please don't timeout when you do. - Run `cargo +1.75.0 fmt --all` after every code change - Never add new dependencies unless explicitly requested - Please always disclose the use of any AI tools in commit messages and PR descriptions using a `Co-Authored-By:` line. From 24c492dd9556825bd317cd5cfecee0873cf2796b Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Fri, 30 Jan 2026 15:57:20 +0100 Subject: [PATCH 195/242] fuzz: expand chanmon_consistency to 6 channels (3 per peer pair) This expands the channel monitor consistency fuzz test from 2 channels to 6 channels (3 between A-B and 3 between B-C), enabling future MPP payment testing. Changes: - Extract `connect_peers!` macro from `make_channel!` to avoid duplicate peer connections - Create channel arrays `chan_ab_ids[3]` and `chan_bc_ids[3]` - Store SCIDs in `chan_ab_scids[3]` and `chan_bc_scids[3]` - Use funding transaction versions 1-6 to avoid txid collisions under fuzz hashing (which XORs all bytes to a single byte, causing versions 0-5 to collide between A-B and B-C channel pairs) - Update `test_return!` assertions to expect 3/6/3 channels Co-Authored-By: Claude Opus 4.5 --- fuzz/src/chanmon_consistency.rs | 236 +++++++++++++++++++++++--------- 1 file changed, 174 insertions(+), 62 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 202488d9777..eac993f8e92 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -848,8 +848,8 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { } }}; } - macro_rules! make_channel { - ($source: expr, $dest: expr, $source_monitor: expr, $dest_monitor: expr, $dest_keys_manager: expr, $chan_id: expr) => {{ + macro_rules! connect_peers { + ($source: expr, $dest: expr) => {{ let init_dest = Init { features: $dest.init_features(), networks: None, @@ -862,7 +862,10 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { remote_network_address: None, }; $dest.peer_connected($source.get_our_node_id(), &init_src, false).unwrap(); - + }}; + } + macro_rules! make_channel { + ($source: expr, $dest: expr, $source_monitor: expr, $dest_monitor: expr, $dest_keys_manager: expr, $chan_id: expr) => {{ $source.create_channel($dest.get_our_node_id(), 100_000, 42, 0, None, None).unwrap(); let open_channel = { let events = $source.get_and_clear_pending_msg_events(); @@ -1080,8 +1083,26 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { let mut nodes = [node_a, node_b, node_c]; - let chan_1_id = make_channel!(nodes[0], nodes[1], monitor_a, monitor_b, keys_manager_b, 0); - let chan_2_id = make_channel!(nodes[1], nodes[2], monitor_b, monitor_c, keys_manager_c, 1); + // Connect peers first, then create channels + connect_peers!(nodes[0], nodes[1]); + connect_peers!(nodes[1], nodes[2]); + + // Create 3 channels between A-B and 3 channels between B-C (6 total). + // + // Use version numbers 1-6 to avoid txid collisions under fuzz hashing. + // Fuzz mode uses XOR-based hashing (all bytes XOR to one byte), and + // versions 0-5 cause collisions between A-B and B-C channel pairs + // (e.g., A-B with Version(1) collides with B-C with Version(3)). + let chan_ab_ids = [ + make_channel!(nodes[0], nodes[1], monitor_a, monitor_b, keys_manager_b, 1), + make_channel!(nodes[0], nodes[1], monitor_a, monitor_b, keys_manager_b, 2), + make_channel!(nodes[0], nodes[1], monitor_a, monitor_b, keys_manager_b, 3), + ]; + let chan_bc_ids = [ + make_channel!(nodes[1], nodes[2], monitor_b, monitor_c, keys_manager_c, 4), + make_channel!(nodes[1], nodes[2], monitor_b, monitor_c, keys_manager_c, 5), + make_channel!(nodes[1], nodes[2], monitor_b, monitor_c, keys_manager_c, 6), + ]; // Wipe the transactions-broadcasted set to make sure we don't broadcast any transactions // during normal operation in `test_return`. @@ -1093,15 +1114,34 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { lock_fundings!(nodes); - let chan_a = nodes[0].list_usable_channels()[0].short_channel_id.unwrap(); - let chan_a_id = nodes[0].list_usable_channels()[0].channel_id; - let chan_b = nodes[2].list_usable_channels()[0].short_channel_id.unwrap(); - let chan_b_id = nodes[2].list_usable_channels()[0].channel_id; + // Get SCIDs for all A-B channels (from node A's perspective) + let node_a_chans: Vec<_> = nodes[0].list_usable_channels(); + let chan_ab_scids: [u64; 3] = [ + node_a_chans[0].short_channel_id.unwrap(), + node_a_chans[1].short_channel_id.unwrap(), + node_a_chans[2].short_channel_id.unwrap(), + ]; + let chan_ab_chan_ids: [ChannelId; 3] = + [node_a_chans[0].channel_id, node_a_chans[1].channel_id, node_a_chans[2].channel_id]; + // Get SCIDs for all B-C channels (from node C's perspective) + let node_c_chans: Vec<_> = nodes[2].list_usable_channels(); + let chan_bc_scids: [u64; 3] = [ + node_c_chans[0].short_channel_id.unwrap(), + node_c_chans[1].short_channel_id.unwrap(), + node_c_chans[2].short_channel_id.unwrap(), + ]; + let chan_bc_chan_ids: [ChannelId; 3] = + [node_c_chans[0].channel_id, node_c_chans[1].channel_id, node_c_chans[2].channel_id]; + // Keep old names for backward compatibility in existing code + let chan_a = chan_ab_scids[0]; + let chan_a_id = chan_ab_chan_ids[0]; + let chan_b = chan_bc_scids[0]; + let chan_b_id = chan_bc_chan_ids[0]; let mut p_ctr: u64 = 0; - let mut chan_a_disconnected = false; - let mut chan_b_disconnected = false; + let mut peers_ab_disconnected = false; + let mut peers_bc_disconnected = false; let mut ab_events = Vec::new(); let mut ba_events = Vec::new(); let mut bc_events = Vec::new(); @@ -1116,9 +1156,9 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { macro_rules! test_return { () => {{ - assert_eq!(nodes[0].list_channels().len(), 1); - assert_eq!(nodes[1].list_channels().len(), 2); - assert_eq!(nodes[2].list_channels().len(), 1); + assert_eq!(nodes[0].list_channels().len(), 3); + assert_eq!(nodes[1].list_channels().len(), 6); + assert_eq!(nodes[2].list_channels().len(), 3); // At no point should we have broadcasted any transactions after the initial channel // opens. @@ -1711,29 +1751,45 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { *mon_style[2].borrow_mut() = ChannelMonitorUpdateStatus::Completed; }, - 0x08 => complete_all_monitor_updates(&monitor_a, &chan_1_id), - 0x09 => complete_all_monitor_updates(&monitor_b, &chan_1_id), - 0x0a => complete_all_monitor_updates(&monitor_b, &chan_2_id), - 0x0b => complete_all_monitor_updates(&monitor_c, &chan_2_id), + 0x08 => { + for id in &chan_ab_ids { + complete_all_monitor_updates(&monitor_a, id); + } + }, + 0x09 => { + for id in &chan_ab_ids { + complete_all_monitor_updates(&monitor_b, id); + } + }, + 0x0a => { + for id in &chan_bc_ids { + complete_all_monitor_updates(&monitor_b, id); + } + }, + 0x0b => { + for id in &chan_bc_ids { + complete_all_monitor_updates(&monitor_c, id); + } + }, 0x0c => { - if !chan_a_disconnected { + if !peers_ab_disconnected { nodes[0].peer_disconnected(nodes[1].get_our_node_id()); nodes[1].peer_disconnected(nodes[0].get_our_node_id()); - chan_a_disconnected = true; + peers_ab_disconnected = true; drain_msg_events_on_disconnect!(0); } }, 0x0d => { - if !chan_b_disconnected { + if !peers_bc_disconnected { nodes[1].peer_disconnected(nodes[2].get_our_node_id()); nodes[2].peer_disconnected(nodes[1].get_our_node_id()); - chan_b_disconnected = true; + peers_bc_disconnected = true; drain_msg_events_on_disconnect!(2); } }, 0x0e => { - if chan_a_disconnected { + if peers_ab_disconnected { let init_1 = Init { features: nodes[1].init_features(), networks: None, @@ -1746,11 +1802,11 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { remote_network_address: None, }; nodes[1].peer_connected(nodes[0].get_our_node_id(), &init_0, false).unwrap(); - chan_a_disconnected = false; + peers_ab_disconnected = false; } }, 0x0f => { - if chan_b_disconnected { + if peers_bc_disconnected { let init_2 = Init { features: nodes[2].init_features(), networks: None, @@ -1763,7 +1819,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { remote_network_address: None, }; nodes[2].peer_connected(nodes[1].get_our_node_id(), &init_1, false).unwrap(); - chan_b_disconnected = false; + peers_bc_disconnected = false; } }, @@ -2099,9 +2155,9 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { 0xb0 | 0xb1 | 0xb2 => { // Restart node A, picking among the in-flight `ChannelMonitor`s to use based on // the value of `v` we're matching. - if !chan_a_disconnected { + if !peers_ab_disconnected { nodes[1].peer_disconnected(nodes[0].get_our_node_id()); - chan_a_disconnected = true; + peers_ab_disconnected = true; push_excess_b_events!( nodes[1].get_and_clear_pending_msg_events().drain(..), Some(0) @@ -2117,16 +2173,16 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { 0xb3..=0xbb => { // Restart node B, picking among the in-flight `ChannelMonitor`s to use based on // the value of `v` we're matching. - if !chan_a_disconnected { + if !peers_ab_disconnected { nodes[0].peer_disconnected(nodes[1].get_our_node_id()); - chan_a_disconnected = true; + peers_ab_disconnected = true; nodes[0].get_and_clear_pending_msg_events(); ab_events.clear(); ba_events.clear(); } - if !chan_b_disconnected { + if !peers_bc_disconnected { nodes[2].peer_disconnected(nodes[1].get_our_node_id()); - chan_b_disconnected = true; + peers_bc_disconnected = true; nodes[2].get_and_clear_pending_msg_events(); bc_events.clear(); cb_events.clear(); @@ -2139,9 +2195,9 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { 0xbc | 0xbd | 0xbe => { // Restart node C, picking among the in-flight `ChannelMonitor`s to use based on // the value of `v` we're matching. - if !chan_b_disconnected { + if !peers_bc_disconnected { nodes[1].peer_disconnected(nodes[2].get_our_node_id()); - chan_b_disconnected = true; + peers_bc_disconnected = true; push_excess_b_events!( nodes[1].get_and_clear_pending_msg_events().drain(..), Some(2) @@ -2155,28 +2211,76 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { monitor_c = new_monitor_c; }, - 0xf0 => complete_monitor_update(&monitor_a, &chan_1_id, &complete_first), - 0xf1 => complete_monitor_update(&monitor_a, &chan_1_id, &complete_second), - 0xf2 => complete_monitor_update(&monitor_a, &chan_1_id, &Vec::pop), + 0xf0 => { + for id in &chan_ab_ids { + complete_monitor_update(&monitor_a, id, &complete_first); + } + }, + 0xf1 => { + for id in &chan_ab_ids { + complete_monitor_update(&monitor_a, id, &complete_second); + } + }, + 0xf2 => { + for id in &chan_ab_ids { + complete_monitor_update(&monitor_a, id, &Vec::pop); + } + }, - 0xf4 => complete_monitor_update(&monitor_b, &chan_1_id, &complete_first), - 0xf5 => complete_monitor_update(&monitor_b, &chan_1_id, &complete_second), - 0xf6 => complete_monitor_update(&monitor_b, &chan_1_id, &Vec::pop), + 0xf4 => { + for id in &chan_ab_ids { + complete_monitor_update(&monitor_b, id, &complete_first); + } + }, + 0xf5 => { + for id in &chan_ab_ids { + complete_monitor_update(&monitor_b, id, &complete_second); + } + }, + 0xf6 => { + for id in &chan_ab_ids { + complete_monitor_update(&monitor_b, id, &Vec::pop); + } + }, - 0xf8 => complete_monitor_update(&monitor_b, &chan_2_id, &complete_first), - 0xf9 => complete_monitor_update(&monitor_b, &chan_2_id, &complete_second), - 0xfa => complete_monitor_update(&monitor_b, &chan_2_id, &Vec::pop), + 0xf8 => { + for id in &chan_bc_ids { + complete_monitor_update(&monitor_b, id, &complete_first); + } + }, + 0xf9 => { + for id in &chan_bc_ids { + complete_monitor_update(&monitor_b, id, &complete_second); + } + }, + 0xfa => { + for id in &chan_bc_ids { + complete_monitor_update(&monitor_b, id, &Vec::pop); + } + }, - 0xfc => complete_monitor_update(&monitor_c, &chan_2_id, &complete_first), - 0xfd => complete_monitor_update(&monitor_c, &chan_2_id, &complete_second), - 0xfe => complete_monitor_update(&monitor_c, &chan_2_id, &Vec::pop), + 0xfc => { + for id in &chan_bc_ids { + complete_monitor_update(&monitor_c, id, &complete_first); + } + }, + 0xfd => { + for id in &chan_bc_ids { + complete_monitor_update(&monitor_c, id, &complete_second); + } + }, + 0xfe => { + for id in &chan_bc_ids { + complete_monitor_update(&monitor_c, id, &Vec::pop); + } + }, 0xff => { // Test that no channel is in a stuck state where neither party can send funds even // after we resolve all pending events. // First, make sure peers are all connected to each other - if chan_a_disconnected { + if peers_ab_disconnected { let init_1 = Init { features: nodes[1].init_features(), networks: None, @@ -2189,9 +2293,9 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { remote_network_address: None, }; nodes[1].peer_connected(nodes[0].get_our_node_id(), &init_0, false).unwrap(); - chan_a_disconnected = false; + peers_ab_disconnected = false; } - if chan_b_disconnected { + if peers_bc_disconnected { let init_2 = Init { features: nodes[2].init_features(), networks: None, @@ -2204,7 +2308,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { remote_network_address: None, }; nodes[2].peer_connected(nodes[1].get_our_node_id(), &init_1, false).unwrap(); - chan_b_disconnected = false; + peers_bc_disconnected = false; } macro_rules! process_all_events { @@ -2215,10 +2319,14 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { panic!("It may take may iterations to settle the state, but it should not take forever"); } // Next, make sure no monitor updates are pending - complete_all_monitor_updates(&monitor_a, &chan_1_id); - complete_all_monitor_updates(&monitor_b, &chan_1_id); - complete_all_monitor_updates(&monitor_b, &chan_2_id); - complete_all_monitor_updates(&monitor_c, &chan_2_id); + for id in &chan_ab_ids { + complete_all_monitor_updates(&monitor_a, id); + complete_all_monitor_updates(&monitor_b, id); + } + for id in &chan_bc_ids { + complete_all_monitor_updates(&monitor_b, id); + complete_all_monitor_updates(&monitor_c, id); + } // Then, make sure any current forwards make their way to their destination if process_msg_events!(0, false, ProcessMessages::AllMessages) { last_pass_no_updates = false; @@ -2263,14 +2371,18 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { process_all_events!(); // Finally, make sure that at least one end of each channel can make a substantial payment - assert!( - send(0, 1, chan_a, 10_000_000, &mut p_ctr) - || send(1, 0, chan_a, 10_000_000, &mut p_ctr) - ); - assert!( - send(1, 2, chan_b, 10_000_000, &mut p_ctr) - || send(2, 1, chan_b, 10_000_000, &mut p_ctr) - ); + for &scid in &chan_ab_scids { + assert!( + send(0, 1, scid, 10_000_000, &mut p_ctr) + || send(1, 0, scid, 10_000_000, &mut p_ctr) + ); + } + for &scid in &chan_bc_scids { + assert!( + send(1, 2, scid, 10_000_000, &mut p_ctr) + || send(2, 1, scid, 10_000_000, &mut p_ctr) + ); + } last_htlc_clear_fee_a = fee_est_a.ret_val.load(atomic::Ordering::Acquire); last_htlc_clear_fee_b = fee_est_b.ret_val.load(atomic::Ordering::Acquire); From 61caa03acb81f965915c8eca6c6837e145f0534b Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Fri, 6 Feb 2026 10:33:25 +0100 Subject: [PATCH 196/242] fuzz: add MPP payment support to chanmon_consistency Add multi-path payment (MPP) fuzzing commands that split payments across multiple channels: - send_mpp_payment: direct MPP from source to dest using multiple channels - send_mpp_hop_payment: MPP via intermediate node with multiple channels on either or both hops New fuzz commands: - 0x70: direct MPP 0->1 (uses all 3 A-B channels) - 0x71: MPP 0->1->2, multi channels on first hop (A-B) - 0x72: MPP 0->1->2, multi channels on both hops (A-B and B-C) - 0x73: MPP 0->1->2, multi channels on second hop (B-C) - 0x74: single-channel MPP 0->1 (all parts over one channel) Co-Authored-By: Claude Opus 4.5 --- fuzz/src/chanmon_consistency.rs | 178 ++++++++++++++++++++++++++++++++ 1 file changed, 178 insertions(+) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index eac993f8e92..95e7506d9d4 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -658,6 +658,125 @@ fn send_hop_payment( } } +/// Send an MPP payment directly from source to dest using multiple channels. +#[inline] +fn send_mpp_payment( + source: &ChanMan, dest: &ChanMan, dest_scids: &[u64], amt: u64, payment_secret: PaymentSecret, + payment_hash: PaymentHash, payment_id: PaymentId, +) -> bool { + let num_paths = dest_scids.len(); + if num_paths == 0 { + return false; + } + + let amt_per_path = amt / num_paths as u64; + let mut paths = Vec::with_capacity(num_paths); + + for (i, &dest_scid) in dest_scids.iter().enumerate() { + let path_amt = if i == num_paths - 1 { + amt - amt_per_path * (num_paths as u64 - 1) + } else { + amt_per_path + }; + + paths.push(Path { + hops: vec![RouteHop { + pubkey: dest.get_our_node_id(), + node_features: dest.node_features(), + short_channel_id: dest_scid, + channel_features: dest.channel_features(), + fee_msat: path_amt, + cltv_expiry_delta: 200, + maybe_announced_channel: true, + }], + blinded_tail: None, + }); + } + + let route_params = RouteParameters::from_payment_params_and_value( + PaymentParameters::from_node_id(dest.get_our_node_id(), TEST_FINAL_CLTV), + amt, + ); + let route = Route { paths, route_params: Some(route_params) }; + let onion = RecipientOnionFields::secret_only(payment_secret); + let res = source.send_payment_with_route(route, payment_hash, onion, payment_id); + match res { + Err(_) => false, + Ok(()) => check_payment_send_events(source, payment_id), + } +} + +/// Send an MPP payment from source to dest via middle node. +/// Supports multiple channels on either or both hops. +#[inline] +fn send_mpp_hop_payment( + source: &ChanMan, middle: &ChanMan, middle_scids: &[u64], dest: &ChanMan, dest_scids: &[u64], + amt: u64, payment_secret: PaymentSecret, payment_hash: PaymentHash, payment_id: PaymentId, +) -> bool { + // Create paths by pairing middle_scids with dest_scids + let num_paths = middle_scids.len().max(dest_scids.len()); + if num_paths == 0 { + return false; + } + + let first_hop_fee = 50_000; + let amt_per_path = amt / num_paths as u64; + let fee_per_path = first_hop_fee / num_paths as u64; + let mut paths = Vec::with_capacity(num_paths); + + for i in 0..num_paths { + let middle_scid = middle_scids[i % middle_scids.len()]; + let dest_scid = dest_scids[i % dest_scids.len()]; + + let path_amt = if i == num_paths - 1 { + amt - amt_per_path * (num_paths as u64 - 1) + } else { + amt_per_path + }; + let path_fee = if i == num_paths - 1 { + first_hop_fee - fee_per_path * (num_paths as u64 - 1) + } else { + fee_per_path + }; + + paths.push(Path { + hops: vec![ + RouteHop { + pubkey: middle.get_our_node_id(), + node_features: middle.node_features(), + short_channel_id: middle_scid, + channel_features: middle.channel_features(), + fee_msat: path_fee, + cltv_expiry_delta: 100, + maybe_announced_channel: true, + }, + RouteHop { + pubkey: dest.get_our_node_id(), + node_features: dest.node_features(), + short_channel_id: dest_scid, + channel_features: dest.channel_features(), + fee_msat: path_amt, + cltv_expiry_delta: 200, + maybe_announced_channel: true, + }, + ], + blinded_tail: None, + }); + } + + let route_params = RouteParameters::from_payment_params_and_value( + PaymentParameters::from_node_id(dest.get_our_node_id(), TEST_FINAL_CLTV), + amt, + ); + let route = Route { paths, route_params: Some(route_params) }; + let onion = RecipientOnionFields::secret_only(payment_secret); + let res = source.send_payment_with_route(route, payment_hash, onion, payment_id); + match res { + Err(_) => false, + Ok(()) => check_payment_send_events(source, payment_id), + } +} + #[inline] pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { let out = SearchingOutput::new(underlying_out); @@ -1726,6 +1845,53 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { } }; + // Direct MPP payment (no hop) + let send_mpp_direct = |source_idx: usize, + dest_idx: usize, + dest_scids: &[u64], + amt: u64, + payment_ctr: &mut u64| { + let source = &nodes[source_idx]; + let dest = &nodes[dest_idx]; + let (secret, hash) = get_payment_secret_hash(dest, payment_ctr); + let mut id = PaymentId([0; 32]); + id.0[0..8].copy_from_slice(&payment_ctr.to_ne_bytes()); + let succeeded = send_mpp_payment(source, dest, dest_scids, amt, secret, hash, id); + if succeeded { + pending_payments.borrow_mut()[source_idx].push(id); + } + }; + + // MPP payment via hop - splits payment across multiple channels on either or both hops + let send_mpp_hop = |source_idx: usize, + middle_idx: usize, + middle_scids: &[u64], + dest_idx: usize, + dest_scids: &[u64], + amt: u64, + payment_ctr: &mut u64| { + let source = &nodes[source_idx]; + let middle = &nodes[middle_idx]; + let dest = &nodes[dest_idx]; + let (secret, hash) = get_payment_secret_hash(dest, payment_ctr); + let mut id = PaymentId([0; 32]); + id.0[0..8].copy_from_slice(&payment_ctr.to_ne_bytes()); + let succeeded = send_mpp_hop_payment( + source, + middle, + middle_scids, + dest, + dest_scids, + amt, + secret, + hash, + id, + ); + if succeeded { + pending_payments.borrow_mut()[source_idx].push(id); + } + }; + let v = get_slice!(1)[0]; out.locked_write(format!("READ A BYTE! HANDLING INPUT {:x}...........\n", v).as_bytes()); match v { @@ -1910,6 +2076,18 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { 0x6c => send_hop_noret(0, 1, chan_a, 2, chan_b, 1, &mut p_ctr), 0x6d => send_hop_noret(2, 1, chan_b, 0, chan_a, 1, &mut p_ctr), + // MPP payments + // 0x70: direct MPP from 0 to 1 (multi A-B channels) + 0x70 => send_mpp_direct(0, 1, &chan_ab_scids, 1_000_000, &mut p_ctr), + // 0x71: MPP 0->1->2, multi channels on first hop (A-B) + 0x71 => send_mpp_hop(0, 1, &chan_ab_scids, 2, &[chan_b], 1_000_000, &mut p_ctr), + // 0x72: MPP 0->1->2, multi channels on both hops (A-B and B-C) + 0x72 => send_mpp_hop(0, 1, &chan_ab_scids, 2, &chan_bc_scids, 1_000_000, &mut p_ctr), + // 0x73: MPP 0->1->2, multi channels on second hop (B-C) + 0x73 => send_mpp_hop(0, 1, &[chan_a], 2, &chan_bc_scids, 1_000_000, &mut p_ctr), + // 0x74: direct MPP from 0 to 1, multi parts over single channel + 0x74 => send_mpp_direct(0, 1, &[chan_a, chan_a, chan_a], 1_000_000, &mut p_ctr), + 0x80 => { let mut max_feerate = last_htlc_clear_fee_a; if !anchors { From 0a6b6d4c52c79019b0c2f9042625ff2845633426 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Fri, 6 Feb 2026 10:40:04 +0100 Subject: [PATCH 197/242] fuzz: assert no stuck payments in chanmon_consistency After settling all state at 0xff, verify that pending_payments is empty for all nodes. If payments remain stuck indefinitely, that indicates a bug in payment resolution. Co-Authored-By: Claude Opus 4.5 --- fuzz/src/chanmon_consistency.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 95e7506d9d4..c8aaf124e42 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -2548,6 +2548,16 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { process_all_events!(); + // Verify no payments are stuck - all should have resolved + for (idx, pending) in pending_payments.borrow().iter().enumerate() { + assert!( + pending.is_empty(), + "Node {} has {} stuck pending payments after settling all state", + idx, + pending.len() + ); + } + // Finally, make sure that at least one end of each channel can make a substantial payment for &scid in &chan_ab_scids { assert!( From 830e1b35b6d60ca9b79d83be560198f4174e4972 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 6 Feb 2026 08:51:40 +0100 Subject: [PATCH 198/242] Add `counterparty_node_id` to `Event::SpendableOutputs` This adds an optional `counterparty_node_id` field to the `SpendableOutputs` event, providing users with information about which channel counterparty the spendable outputs belong to. The field uses TLV type 3 (odd) for backwards compatibility, meaning older versions will safely ignore it during deserialization. When reading events serialized by older LDK versions, this field will be `None`. Co-Authored-By: HAL 9000 Signed-off-by: Elias Rohrer --- lightning-background-processor/src/lib.rs | 2 +- lightning-tests/src/upgrade_downgrade_tests.rs | 4 +++- lightning/src/chain/channelmonitor.rs | 1 + lightning/src/events/mod.rs | 15 +++++++++++++-- lightning/src/ln/functional_tests.rs | 2 +- lightning/src/ln/monitor_tests.rs | 4 ++-- lightning/src/ln/reorg_tests.rs | 4 ++-- 7 files changed, 23 insertions(+), 9 deletions(-) diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index bb99d65e6b5..d34ef7de2f9 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -3110,7 +3110,7 @@ mod tests { let event = receiver.recv_timeout(EVENT_DEADLINE).expect("Events not handled within deadline"); match event { - Event::SpendableOutputs { outputs, channel_id } => { + Event::SpendableOutputs { outputs, channel_id, counterparty_node_id: _ } => { nodes[0] .sweeper .track_spendable_outputs(outputs, channel_id, false, Some(153)) diff --git a/lightning-tests/src/upgrade_downgrade_tests.rs b/lightning-tests/src/upgrade_downgrade_tests.rs index 8df670321be..14b0a5c5822 100644 --- a/lightning-tests/src/upgrade_downgrade_tests.rs +++ b/lightning-tests/src/upgrade_downgrade_tests.rs @@ -308,7 +308,9 @@ fn test_0_1_legacy_remote_key_derivation() { connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); let mut spendable_event = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events(); assert_eq!(spendable_event.len(), 1); - if let Event::SpendableOutputs { outputs, channel_id: ev_id } = spendable_event.pop().unwrap() { + if let Event::SpendableOutputs { outputs, channel_id: ev_id, counterparty_node_id: _ } = + spendable_event.pop().unwrap() + { assert_eq!(ev_id.unwrap().0, channel_id); assert_eq!(outputs.len(), 1); let spk = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(); diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index a537ff55874..b109466ab6f 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -5619,6 +5619,7 @@ impl ChannelMonitorImpl { self.pending_events.push(Event::SpendableOutputs { outputs: vec![descriptor], channel_id: Some(self.channel_id()), + counterparty_node_id: Some(self.counterparty_node_id), }); self.spendable_txids_confirmed.push(entry.txid); }, diff --git a/lightning/src/events/mod.rs b/lightning/src/events/mod.rs index 3d860e9f363..a5ac08be23b 100644 --- a/lightning/src/events/mod.rs +++ b/lightning/src/events/mod.rs @@ -1318,6 +1318,10 @@ pub enum Event { /// /// This will always be `Some` for events generated by LDK versions 0.0.117 and above. channel_id: Option, + /// The `node_id` of the channel counterparty. + /// + /// This will always be `Some` for events generated by LDK versions 0.3 and above. + counterparty_node_id: Option, }, /// This event is generated when a payment has been successfully forwarded through us and a /// forwarding fee earned. @@ -2012,11 +2016,12 @@ impl Writeable for Event { }); }, // 4u8 used to be `PendingHTLCsForwardable` - &Event::SpendableOutputs { ref outputs, channel_id } => { + &Event::SpendableOutputs { ref outputs, channel_id, counterparty_node_id } => { 5u8.write(writer)?; write_tlv_fields!(writer, { (0, WithoutLength(outputs), required), (1, channel_id, option), + (3, counterparty_node_id, option), }); }, &Event::HTLCIntercepted { @@ -2521,11 +2526,17 @@ impl MaybeReadable for Event { let mut f = || { let mut outputs = WithoutLength(Vec::new()); let mut channel_id: Option = None; + let mut counterparty_node_id: Option = None; read_tlv_fields!(reader, { (0, outputs, required), (1, channel_id, option), + (3, counterparty_node_id, option), }); - Ok(Some(Event::SpendableOutputs { outputs: outputs.0, channel_id })) + Ok(Some(Event::SpendableOutputs { + outputs: outputs.0, + channel_id, + counterparty_node_id, + })) }; f() }, diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 8e854b31150..a7a062add11 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -3430,7 +3430,7 @@ macro_rules! check_spendable_outputs { let secp_ctx = Secp256k1::new(); for event in events.drain(..) { match event { - Event::SpendableOutputs { mut outputs, channel_id: _ } => { + Event::SpendableOutputs { mut outputs, channel_id: _, counterparty_node_id: _ } => { for outp in outputs.drain(..) { let script = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(); diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 097266cf83f..aebd83ae75d 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -2209,7 +2209,7 @@ fn do_test_revoked_counterparty_aggregated_claims(keyed_anchors: bool, p2a_ancho let spendable_output_events = nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events(); assert_eq!(spendable_output_events.len(), 2); for event in spendable_output_events { - if let Event::SpendableOutputs { outputs, channel_id: _ } = event { + if let Event::SpendableOutputs { outputs, channel_id: _, counterparty_node_id: _ } = event { assert_eq!(outputs.len(), 1); let spend_tx = nodes[1].keys_manager.backing.spend_spendable_outputs( &[&outputs[0]], Vec::new(), ScriptBuf::new_op_return(&[]), 253, None, &Secp256k1::new(), @@ -2992,7 +2992,7 @@ fn do_test_anchors_aggregated_revoked_htlc_tx(p2a_anchor: bool) { // - 1 static to_remote output. assert_eq!(spendable_output_events.len(), 4); for event in spendable_output_events { - if let Event::SpendableOutputs { outputs, channel_id } = event { + if let Event::SpendableOutputs { outputs, channel_id, counterparty_node_id: _ } = event { assert_eq!(outputs.len(), 1); assert!([chan_b.2, chan_a.2].contains(&channel_id.unwrap())); let spend_tx = nodes[0].keys_manager.backing.spend_spendable_outputs( diff --git a/lightning/src/ln/reorg_tests.rs b/lightning/src/ln/reorg_tests.rs index b56caf96008..25ad031bb35 100644 --- a/lightning/src/ln/reorg_tests.rs +++ b/lightning/src/ln/reorg_tests.rs @@ -622,7 +622,7 @@ fn do_test_to_remote_after_local_detection(style: ConnectStyle) { let mut node_a_spendable = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events(); assert_eq!(node_a_spendable.len(), 1); - if let Event::SpendableOutputs { outputs, channel_id } = node_a_spendable.pop().unwrap() { + if let Event::SpendableOutputs { outputs, channel_id, counterparty_node_id: _ } = node_a_spendable.pop().unwrap() { assert_eq!(outputs.len(), 1); assert_eq!(channel_id, Some(chan_id)); let spend_tx = nodes[0].keys_manager.backing.spend_spendable_outputs(&[&outputs[0]], Vec::new(), @@ -643,7 +643,7 @@ fn do_test_to_remote_after_local_detection(style: ConnectStyle) { let mut node_b_spendable = nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events(); assert_eq!(node_b_spendable.len(), 1); - if let Event::SpendableOutputs { outputs, channel_id } = node_b_spendable.pop().unwrap() { + if let Event::SpendableOutputs { outputs, channel_id, counterparty_node_id: _ } = node_b_spendable.pop().unwrap() { assert_eq!(outputs.len(), 1); assert_eq!(channel_id, Some(chan_id)); let spend_tx = nodes[1].keys_manager.backing.spend_spendable_outputs(&[&outputs[0]], Vec::new(), From da9ddd2fafe5c5e9576e7291ec87e460ab2fe091 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 6 Feb 2026 08:56:49 +0100 Subject: [PATCH 199/242] Add `counterparty_node_id` to `TrackedSpendableOutput` This adds an optional `counterparty_node_id` field to `TrackedSpendableOutput` and updates the `track_spendable_outputs` method signatures on both `OutputSweeper` and `OutputSweeperSync` to accept this new parameter. The field uses TLV type 3 (odd) for backwards compatibility. When reading outputs tracked with LDK 0.2 and prior, this field will be `None`. Co-Authored-By: HAL 9000 Signed-off-by: Elias Rohrer --- lightning-background-processor/src/lib.rs | 10 ++++++++-- lightning/src/util/sweep.rs | 18 +++++++++++++++--- 2 files changed, 23 insertions(+), 5 deletions(-) diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index d34ef7de2f9..dce803e6dea 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -3110,10 +3110,16 @@ mod tests { let event = receiver.recv_timeout(EVENT_DEADLINE).expect("Events not handled within deadline"); match event { - Event::SpendableOutputs { outputs, channel_id, counterparty_node_id: _ } => { + Event::SpendableOutputs { outputs, channel_id, counterparty_node_id } => { nodes[0] .sweeper - .track_spendable_outputs(outputs, channel_id, false, Some(153)) + .track_spendable_outputs( + outputs, + channel_id, + counterparty_node_id, + false, + Some(153), + ) .unwrap(); }, _ => panic!("Unexpected event: {:?}", event), diff --git a/lightning/src/util/sweep.rs b/lightning/src/util/sweep.rs index 6815b395d91..b0e3b0bd880 100644 --- a/lightning/src/util/sweep.rs +++ b/lightning/src/util/sweep.rs @@ -32,7 +32,7 @@ use crate::{log_debug, log_error}; use bitcoin::block::Header; use bitcoin::locktime::absolute::LockTime; -use bitcoin::secp256k1::Secp256k1; +use bitcoin::secp256k1::{PublicKey, Secp256k1}; use bitcoin::{BlockHash, ScriptBuf, Transaction, Txid}; use core::future::Future; @@ -55,6 +55,13 @@ pub struct TrackedSpendableOutput { /// /// Will be `None` if no `channel_id` was given to [`OutputSweeper::track_spendable_outputs`] pub channel_id: Option, + /// The `node_id` of the channel counterparty. + /// + /// Will be `None` if no `counterparty_node_id` was given to + /// [`OutputSweeper::track_spendable_outputs`]. + /// + /// This will be `None` for outputs tracked with LDK 0.2 and prior. + pub counterparty_node_id: Option, /// The current status of the output spend. pub status: OutputSpendStatus, } @@ -93,6 +100,7 @@ impl TrackedSpendableOutput { impl_writeable_tlv_based!(TrackedSpendableOutput, { (0, descriptor, required), (2, channel_id, option), + (3, counterparty_node_id, option), (4, status, required), }); @@ -413,7 +421,8 @@ where /// [`Event::SpendableOutputs`]: crate::events::Event::SpendableOutputs pub async fn track_spendable_outputs( &self, output_descriptors: Vec, channel_id: Option, - exclude_static_outputs: bool, delay_until_height: Option, + counterparty_node_id: Option, exclude_static_outputs: bool, + delay_until_height: Option, ) -> Result<(), ()> { let mut relevant_descriptors = output_descriptors .into_iter() @@ -432,6 +441,7 @@ where let output_info = TrackedSpendableOutput { descriptor, channel_id, + counterparty_node_id, status: OutputSpendStatus::PendingInitialBroadcast { delayed_until_height: delay_until_height, }, @@ -1010,11 +1020,13 @@ where /// [`Event::SpendableOutputs`]: crate::events::Event::SpendableOutputs pub fn track_spendable_outputs( &self, output_descriptors: Vec, channel_id: Option, - exclude_static_outputs: bool, delay_until_height: Option, + counterparty_node_id: Option, exclude_static_outputs: bool, + delay_until_height: Option, ) -> Result<(), ()> { let mut fut = pin!(self.sweeper.track_spendable_outputs( output_descriptors, channel_id, + counterparty_node_id, exclude_static_outputs, delay_until_height, )); From 56f12c8b2efdb0ed57988d672bd3ebfd762e2ab7 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 6 Feb 2026 09:37:28 +0100 Subject: [PATCH 200/242] Add `counterparty_node_id` to `TransactionType` variants This adds `counterparty_node_id` tracking to all `TransactionType` variants, enabling downstream users to identify the channel counterparty associated with each broadcast transaction. For single-channel variants (`CooperativeClose`, `UnilateralClose`, `AnchorBump`, `Claim`, `Splice`), the counterparty is stored directly. For multi-channel variants (`Funding`, `Sweep`), the type now uses `Vec<(PublicKey, ChannelId)>` to pair each channel with its counterparty. The `OnchainTxHandler` now stores `counterparty_node_id` and provides a `set_counterparty_node_id` method for initialization during deserialization of older data. Co-Authored-By: HAL 9000 Signed-off-by: Elias Rohrer --- lightning-liquidity/src/lsps2/service.rs | 13 +++---- lightning/src/chain/chaininterface.rs | 19 +++++++--- lightning/src/chain/channelmonitor.rs | 6 ++-- lightning/src/chain/onchaintx.rs | 38 ++++++++++++++------ lightning/src/events/bump_transaction/mod.rs | 22 ++++++++---- lightning/src/ln/channel.rs | 14 +++++--- lightning/src/ln/channelmanager.rs | 28 ++++++++++----- lightning/src/ln/splicing_tests.rs | 20 +++++++---- lightning/src/util/sweep.rs | 20 ++++++----- 9 files changed, 124 insertions(+), 56 deletions(-) diff --git a/lightning-liquidity/src/lsps2/service.rs b/lightning-liquidity/src/lsps2/service.rs index 4c688d39eef..1909e871596 100644 --- a/lightning-liquidity/src/lsps2/service.rs +++ b/lightning-liquidity/src/lsps2/service.rs @@ -2020,21 +2020,22 @@ where // close could then confirm the commitment and trigger unintended on‑chain handling. // To avoid this, we check ChannelManager’s view (`is_channel_ready`) before broadcasting. if let Some(ch_id) = jit_channel.get_channel_id() { - let is_channel_ready = self + let channel_details = self .channel_manager .get_cm() .list_channels() .into_iter() - .any(|cd| cd.channel_id == ch_id && cd.is_channel_ready); + .find(|cd| cd.channel_id == ch_id && cd.is_channel_ready); - if !is_channel_ready { - return; - } + let counterparty_node_id = match channel_details { + Some(cd) => cd.counterparty.node_id, + None => return, + }; if let Some(funding_tx) = jit_channel.get_funding_tx() { self.tx_broadcaster.broadcast_transactions(&[( funding_tx, - TransactionType::Funding { channel_ids: vec![ch_id] }, + TransactionType::Funding { channels: vec![(counterparty_node_id, ch_id)] }, )]); } } diff --git a/lightning/src/chain/chaininterface.rs b/lightning/src/chain/chaininterface.rs index 758fd1a74e2..806e947c153 100644 --- a/lightning/src/chain/chaininterface.rs +++ b/lightning/src/chain/chaininterface.rs @@ -18,6 +18,7 @@ use core::{cmp, ops::Deref}; use crate::ln::types::ChannelId; use crate::prelude::*; +use bitcoin::secp256k1::PublicKey; use bitcoin::transaction::Transaction; /// Represents the class of transaction being broadcast. @@ -33,10 +34,10 @@ pub enum TransactionType { /// /// [`ChannelManager::funding_transaction_generated`]: crate::ln::channelmanager::ChannelManager::funding_transaction_generated Funding { - /// The IDs of the channels being funded. + /// The counterparty node IDs and channel IDs of the channels being funded. /// /// A single funding transaction may establish multiple channels when using batch funding. - channel_ids: Vec, + channels: Vec<(PublicKey, ChannelId)>, }, /// A transaction cooperatively closing a channel. /// @@ -45,6 +46,8 @@ pub enum TransactionType { /// /// [`ChannelManager::close_channel`]: crate::ln::channelmanager::ChannelManager::close_channel CooperativeClose { + /// The `node_id` of the channel counterparty. + counterparty_node_id: PublicKey, /// The ID of the channel being closed. channel_id: ChannelId, }, @@ -56,6 +59,8 @@ pub enum TransactionType { /// /// [`ChannelManager::force_close_broadcasting_latest_txn`]: crate::ln::channelmanager::ChannelManager::force_close_broadcasting_latest_txn UnilateralClose { + /// The `node_id` of the channel counterparty. + counterparty_node_id: PublicKey, /// The ID of the channel being force-closed. channel_id: ChannelId, }, @@ -66,6 +71,8 @@ pub enum TransactionType { /// /// [`BumpTransactionEvent`]: crate::events::bump_transaction::BumpTransactionEvent AnchorBump { + /// The `node_id` of the channel counterparty. + counterparty_node_id: PublicKey, /// The ID of the channel whose closing transaction is being fee-bumped. channel_id: ChannelId, }, @@ -81,6 +88,8 @@ pub enum TransactionType { /// [`ChannelMonitor`]: crate::chain::ChannelMonitor /// [`Event::SpendableOutputs`]: crate::events::Event::SpendableOutputs Claim { + /// The `node_id` of the channel counterparty. + counterparty_node_id: PublicKey, /// The ID of the channel from which outputs are being claimed. channel_id: ChannelId, }, @@ -90,10 +99,10 @@ pub enum TransactionType { /// [`OutputSweeper`]: crate::util::sweep::OutputSweeper /// [`SpendableOutputDescriptor`]: crate::sign::SpendableOutputDescriptor Sweep { - /// The IDs of the channels from which outputs are being swept, if known. + /// The counterparty node IDs and channel IDs from which outputs are being swept, if known. /// /// A single sweep transaction may aggregate outputs from multiple channels. - channel_ids: Vec, + channels: Vec<(PublicKey, ChannelId)>, }, /// A splice transaction modifying an existing channel's funding. /// @@ -101,6 +110,8 @@ pub enum TransactionType { /// /// [`ChannelManager::splice_channel`]: crate::ln::channelmanager::ChannelManager::splice_channel Splice { + /// The `node_id` of the channel counterparty. + counterparty_node_id: PublicKey, /// The ID of the channel being spliced. channel_id: ChannelId, }, diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index b109466ab6f..6205fa895a7 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -1879,8 +1879,8 @@ impl ChannelMonitor { initial_holder_commitment_tx.trust().commitment_number(); let onchain_tx_handler = OnchainTxHandler::new( - channel_id, channel_parameters.channel_value_satoshis, channel_keys_id, - destination_script.into(), keys, channel_parameters.clone(), + channel_id, counterparty_node_id, channel_parameters.channel_value_satoshis, + channel_keys_id, destination_script.into(), keys, channel_parameters.clone(), initial_holder_commitment_tx.clone(), secp_ctx, ); @@ -6644,6 +6644,8 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP }; let dummy_node_id = PublicKey::from_slice(&[2; 33]).unwrap(); + onchain_tx_handler + .set_counterparty_node_id(counterparty_node_id.unwrap_or(dummy_node_id)); let monitor = ChannelMonitor::from_impl(ChannelMonitorImpl { funding: FundingScope { channel_parameters, diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index 8de99eb8601..3eb6d64f3a2 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -18,7 +18,7 @@ use bitcoin::hashes::Hash; use bitcoin::locktime::absolute::LockTime; use bitcoin::script::{Script, ScriptBuf}; use bitcoin::secp256k1; -use bitcoin::secp256k1::{ecdsa::Signature, Secp256k1}; +use bitcoin::secp256k1::{ecdsa::Signature, PublicKey, Secp256k1}; use bitcoin::transaction::OutPoint as BitcoinOutPoint; use bitcoin::transaction::Transaction; @@ -224,6 +224,7 @@ pub(crate) enum FeerateStrategy { #[derive(Clone)] pub struct OnchainTxHandler { channel_id: ChannelId, + counterparty_node_id: PublicKey, channel_value_satoshis: u64, // Deprecated as of 0.2. channel_keys_id: [u8; 32], // Deprecated as of 0.2. destination_script: ScriptBuf, // Deprecated as of 0.2. @@ -287,6 +288,7 @@ impl PartialEq for OnchainTxHandler bool { // `signer`, `secp_ctx`, and `pending_claim_events` are excluded on purpose. self.channel_id == other.channel_id && + self.counterparty_node_id == other.counterparty_node_id && self.channel_value_satoshis == other.channel_value_satoshis && self.channel_keys_id == other.channel_keys_id && self.destination_script == other.destination_script && @@ -358,6 +360,14 @@ impl OnchainTxHandler { pub(crate) fn set_channel_id(&mut self, channel_id: ChannelId) { self.channel_id = channel_id; } + + // `ChannelMonitor`s already track the `counterparty_node_id`, however, due to the + // deserialization order there we can't make use of `ReadableArgs` to hand it into + // `OnchainTxHandler`'s deserialization logic directly. Instead we opt to initialize it with a + // dummy key and override it after reading the respective field via this method. + pub(crate) fn set_counterparty_node_id(&mut self, counterparty_node_id: PublicKey) { + self.counterparty_node_id = counterparty_node_id; + } } impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP, u64, [u8; 32])> @@ -433,17 +443,20 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP read_tlv_fields!(reader, {}); - // `ChannelMonitor`s already track the `channel_id`, however, due to the derserialization - // order there we can't make use of `ReadableArgs` to hand it in directly. Instead we opt - // to initialize it with 0s and override it after reading the respective field via - // `OnchainTxHandler::set_channel_id`. + // `ChannelMonitor`s already track the `channel_id` and `counterparty_node_id`, however, due + // to the deserialization order there we can't make use of `ReadableArgs` to hand them in + // directly. Instead we opt to initialize them with dummy values and override them after + // reading the respective fields via `OnchainTxHandler::set_channel_id` and + // `OnchainTxHandler::set_counterparty_node_id`. let channel_id = ChannelId([0u8; 32]); + let counterparty_node_id = PublicKey::from_slice(&[2; 33]).unwrap(); let mut secp_ctx = Secp256k1::new(); secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes()); Ok(OnchainTxHandler { channel_id, + counterparty_node_id, channel_value_satoshis, channel_keys_id, destination_script, @@ -463,13 +476,14 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP impl OnchainTxHandler { pub(crate) fn new( - channel_id: ChannelId, channel_value_satoshis: u64, channel_keys_id: [u8; 32], - destination_script: ScriptBuf, signer: ChannelSigner, + channel_id: ChannelId, counterparty_node_id: PublicKey, channel_value_satoshis: u64, + channel_keys_id: [u8; 32], destination_script: ScriptBuf, signer: ChannelSigner, channel_parameters: ChannelTransactionParameters, holder_commitment: HolderCommitmentTransaction, secp_ctx: Secp256k1, ) -> Self { OnchainTxHandler { channel_id, + counterparty_node_id, channel_value_satoshis, channel_keys_id, destination_script, @@ -533,7 +547,7 @@ impl OnchainTxHandler { if tx.is_fully_signed() { let log_start = if feerate_was_bumped { "Broadcasting RBF-bumped" } else { "Rebroadcasting" }; log_info!(logger, "{} onchain {}", log_start, log_tx!(tx.0)); - broadcaster.broadcast_transactions(&[(&tx.0, TransactionType::Claim { channel_id: self.channel_id })]); + broadcaster.broadcast_transactions(&[(&tx.0, TransactionType::Claim { counterparty_node_id: self.counterparty_node_id, channel_id: self.channel_id })]); } else { log_info!(logger, "Waiting for signature of unsigned onchain transaction {}", tx.0.compute_txid()); } @@ -875,7 +889,7 @@ impl OnchainTxHandler { OnchainClaim::Tx(tx) => { if tx.is_fully_signed() { log_info!(logger, "Broadcasting onchain {}", log_tx!(tx.0)); - broadcaster.broadcast_transactions(&[(&tx.0, TransactionType::Claim { channel_id: self.channel_id })]); + broadcaster.broadcast_transactions(&[(&tx.0, TransactionType::Claim { counterparty_node_id: self.counterparty_node_id, channel_id: self.channel_id })]); } else { log_info!(logger, "Waiting for signature of unsigned onchain transaction {}", tx.0.compute_txid()); } @@ -1093,7 +1107,7 @@ impl OnchainTxHandler { OnchainClaim::Tx(bump_tx) => { if bump_tx.is_fully_signed() { log_info!(logger, "Broadcasting RBF-bumped onchain {}", log_tx!(bump_tx.0)); - broadcaster.broadcast_transactions(&[(&bump_tx.0, TransactionType::Claim { channel_id: self.channel_id })]); + broadcaster.broadcast_transactions(&[(&bump_tx.0, TransactionType::Claim { counterparty_node_id: self.counterparty_node_id, channel_id: self.channel_id })]); } else { log_info!(logger, "Waiting for signature of RBF-bumped unsigned onchain transaction {}", bump_tx.0.compute_txid()); @@ -1190,7 +1204,7 @@ impl OnchainTxHandler { OnchainClaim::Tx(bump_tx) => { if bump_tx.is_fully_signed() { log_info!(logger, "Broadcasting onchain {}", log_tx!(bump_tx.0)); - broadcaster.broadcast_transactions(&[(&bump_tx.0, TransactionType::Claim { channel_id: self.channel_id })]); + broadcaster.broadcast_transactions(&[(&bump_tx.0, TransactionType::Claim { counterparty_node_id: self.counterparty_node_id, channel_id: self.channel_id })]); } else { log_info!(logger, "Waiting for signature of unsigned onchain transaction {}", bump_tx.0.compute_txid()); } @@ -1368,8 +1382,10 @@ mod tests { } let holder_commit = HolderCommitmentTransaction::dummy(1000000, funding_outpoint, nondust_htlcs); let destination_script = ScriptBuf::new(); + let counterparty_node_id = PublicKey::from_slice(&[2; 33]).unwrap(); let mut tx_handler = OnchainTxHandler::new( ChannelId::from_bytes([0; 32]), + counterparty_node_id, 1000000, [0; 32], destination_script.clone(), diff --git a/lightning/src/events/bump_transaction/mod.rs b/lightning/src/events/bump_transaction/mod.rs index 15ea2775a71..ff034176385 100644 --- a/lightning/src/events/bump_transaction/mod.rs +++ b/lightning/src/events/bump_transaction/mod.rs @@ -774,7 +774,7 @@ where /// transaction spending an anchor output of the commitment transaction to bump its fee and /// broadcasts them to the network as a package. async fn handle_channel_close( - &self, channel_id: ChannelId, claim_id: ClaimId, + &self, channel_id: ChannelId, counterparty_node_id: PublicKey, claim_id: ClaimId, package_target_feerate_sat_per_1000_weight: u32, commitment_tx: &Transaction, commitment_tx_fee_sat: u64, anchor_descriptor: &AnchorDescriptor, ) -> Result<(), ()> { @@ -799,7 +799,7 @@ where package_target_feerate_sat_per_1000_weight); self.broadcaster.broadcast_transactions(&[( &commitment_tx, - TransactionType::UnilateralClose { channel_id }, + TransactionType::UnilateralClose { counterparty_node_id, channel_id }, )]); return Ok(()); } @@ -968,8 +968,11 @@ where commitment_tx.compute_txid() ); self.broadcaster.broadcast_transactions(&[ - (&commitment_tx, TransactionType::UnilateralClose { channel_id }), - (&anchor_tx, TransactionType::AnchorBump { channel_id }), + ( + &commitment_tx, + TransactionType::UnilateralClose { counterparty_node_id, channel_id }, + ), + (&anchor_tx, TransactionType::AnchorBump { counterparty_node_id, channel_id }), ]); return Ok(()); } @@ -978,8 +981,9 @@ where /// Handles a [`BumpTransactionEvent::HTLCResolution`] event variant by producing a /// fully-signed, fee-bumped HTLC transaction that is broadcast to the network. async fn handle_htlc_resolution( - &self, channel_id: ChannelId, claim_id: ClaimId, target_feerate_sat_per_1000_weight: u32, - htlc_descriptors: &[HTLCDescriptor], tx_lock_time: LockTime, + &self, channel_id: ChannelId, counterparty_node_id: PublicKey, claim_id: ClaimId, + target_feerate_sat_per_1000_weight: u32, htlc_descriptors: &[HTLCDescriptor], + tx_lock_time: LockTime, ) -> Result<(), ()> { let channel_type = &htlc_descriptors[0] .channel_derivation_parameters @@ -1205,7 +1209,7 @@ where log_info!(self.logger, "Broadcasting {}", log_tx!(htlc_tx)); self.broadcaster.broadcast_transactions(&[( &htlc_tx, - TransactionType::UnilateralClose { channel_id }, + TransactionType::UnilateralClose { counterparty_node_id, channel_id }, )]); } @@ -1217,6 +1221,7 @@ where match event { BumpTransactionEvent::ChannelClose { channel_id, + counterparty_node_id, claim_id, package_target_feerate_sat_per_1000_weight, commitment_tx, @@ -1232,6 +1237,7 @@ where ); self.handle_channel_close( *channel_id, + *counterparty_node_id, *claim_id, *package_target_feerate_sat_per_1000_weight, commitment_tx, @@ -1249,6 +1255,7 @@ where }, BumpTransactionEvent::HTLCResolution { channel_id, + counterparty_node_id, claim_id, target_feerate_sat_per_1000_weight, htlc_descriptors, @@ -1263,6 +1270,7 @@ where ); self.handle_htlc_resolution( *channel_id, + *counterparty_node_id, *claim_id, *target_feerate_sat_per_1000_weight, htlc_descriptors, diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 2762ab63fc1..eb227a50855 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -2110,6 +2110,7 @@ where }; let channel_id = context.channel_id; + let counterparty_node_id = context.counterparty_node_id; let signing_session = if let Some(signing_session) = context.interactive_tx_signing_session.as_mut() @@ -2223,9 +2224,9 @@ where let funding_tx = funding_tx.map(|tx| { let tx_type = if splice_negotiated.is_some() { - TransactionType::Splice { channel_id } + TransactionType::Splice { counterparty_node_id, channel_id } } else { - TransactionType::Funding { channel_ids: vec![channel_id] } + TransactionType::Funding { channels: vec![(counterparty_node_id, channel_id)] } }; (tx, tx_type) }); @@ -9168,9 +9169,14 @@ where let funding_tx = funding_tx.map(|tx| { let tx_type = if splice_negotiated.is_some() { - TransactionType::Splice { channel_id: self.context.channel_id } + TransactionType::Splice { + counterparty_node_id: self.context.counterparty_node_id, + channel_id: self.context.channel_id, + } } else { - TransactionType::Funding { channel_ids: vec![self.context.channel_id] } + TransactionType::Funding { + channels: vec![(self.context.counterparty_node_id, self.context.channel_id)], + } }; (tx, tx_type) }); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 665a79a9610..8b4a25c2c04 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -6572,7 +6572,10 @@ impl< funding_tx.compute_txid() ); let tx_type = transaction_type.unwrap_or_else(|| TransactionType::Funding { - channel_ids: vec![channel.context().channel_id()], + channels: vec![( + channel.context().get_counterparty_node_id(), + channel.context().channel_id(), + )], }); self.tx_broadcaster.broadcast_transactions(&[(funding_tx, tx_type)]); { @@ -9583,7 +9586,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let removed_batch_state = funding_batch_states.remove(&txid).into_iter().flatten(); let per_peer_state = self.per_peer_state.read().unwrap(); let mut batch_funding_tx = None; - let mut batch_channel_ids = Vec::new(); + let mut batch_channels = Vec::new(); for (channel_id, counterparty_node_id, _) in removed_batch_state { if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { let mut peer_state = peer_state_mutex.lock().unwrap(); @@ -9594,7 +9597,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ funded_chan.context.unbroadcasted_funding(&funded_chan.funding) }); funded_chan.set_batch_ready(); - batch_channel_ids.push(channel_id); + batch_channels.push((counterparty_node_id, channel_id)); let mut pending_events = self.pending_events.lock().unwrap(); emit_channel_pending_event!(pending_events, funded_chan); @@ -9605,7 +9608,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ log_info!(self.logger, "Broadcasting batch funding tx {}", tx.compute_txid()); self.tx_broadcaster.broadcast_transactions(&[( &tx, - TransactionType::Funding { channel_ids: batch_channel_ids }, + TransactionType::Funding { channels: batch_channels }, )]); } } @@ -10273,7 +10276,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ log_info!(logger, "Broadcasting funding transaction with txid {}", tx.compute_txid()); self.tx_broadcaster.broadcast_transactions(&[( &tx, - TransactionType::Funding { channel_ids: vec![channel.context.channel_id()] }, + TransactionType::Funding { channels: vec![(counterparty_node_id, channel.context.channel_id())] }, )]); } } @@ -11715,7 +11718,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ log_info!(logger, "Broadcasting {}", log_tx!(broadcast_tx)); self.tx_broadcaster.broadcast_transactions(&[( &broadcast_tx, - TransactionType::CooperativeClose { channel_id: msg.channel_id }, + TransactionType::CooperativeClose { + counterparty_node_id: *counterparty_node_id, + channel_id: msg.channel_id, + }, )]); let _ = self.handle_error(err, *counterparty_node_id); } @@ -12956,7 +12962,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ log_info!(logger, "Broadcasting closing tx {}", log_tx!(broadcast_tx)); self.tx_broadcaster.broadcast_transactions(&[( &broadcast_tx, - TransactionType::CooperativeClose { channel_id }, + TransactionType::CooperativeClose { + counterparty_node_id: node_id, + channel_id, + }, )]); } } else { @@ -13087,7 +13096,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ log_info!(logger, "Broadcasting {}", log_tx!(tx)); self.tx_broadcaster.broadcast_transactions(&[( &tx, - TransactionType::CooperativeClose { channel_id }, + TransactionType::CooperativeClose { + counterparty_node_id: *cp_id, + channel_id, + }, )]); false } else { diff --git a/lightning/src/ln/splicing_tests.rs b/lightning/src/ln/splicing_tests.rs index 9c7c8b55eac..1ad7fa193bb 100644 --- a/lightning/src/ln/splicing_tests.rs +++ b/lightning/src/ln/splicing_tests.rs @@ -334,14 +334,22 @@ pub fn sign_interactive_funding_tx<'a, 'b, 'c, 'd>( let tx = { let mut initiator_txn = initiator.tx_broadcaster.txn_broadcast_with_types(); assert_eq!(initiator_txn.len(), 1); - let acceptor_txn = acceptor.tx_broadcaster.txn_broadcast_with_types(); - assert_eq!(initiator_txn, acceptor_txn); - let (tx, tx_type) = initiator_txn.remove(0); - // Verify transaction type is Splice + let mut acceptor_txn = acceptor.tx_broadcaster.txn_broadcast_with_types(); + assert_eq!(acceptor_txn.len(), 1); + // Compare transactions only (not types, as counterparty_node_id differs per perspective) + assert_eq!(initiator_txn[0].0, acceptor_txn[0].0); + let (tx, initiator_tx_type) = initiator_txn.remove(0); + let (_, acceptor_tx_type) = acceptor_txn.remove(0); + // Verify transaction types are Splice for both nodes assert!( - matches!(tx_type, TransactionType::Splice { .. }), + matches!(initiator_tx_type, TransactionType::Splice { .. }), "Expected TransactionType::Splice, got {:?}", - tx_type + initiator_tx_type + ); + assert!( + matches!(acceptor_tx_type, TransactionType::Splice { .. }), + "Expected TransactionType::Splice, got {:?}", + acceptor_tx_type ); tx }; diff --git a/lightning/src/util/sweep.rs b/lightning/src/util/sweep.rs index b0e3b0bd880..b70eb274085 100644 --- a/lightning/src/util/sweep.rs +++ b/lightning/src/util/sweep.rs @@ -539,7 +539,7 @@ where // Sweep the outputs. let spending_tx_and_chan_id = self .update_state( - |sweeper_state| -> Result<(Option<(Transaction, Vec)>, bool), ()> { + |sweeper_state| -> Result<(Option<(Transaction, Vec<(PublicKey, ChannelId)>)>, bool), ()> { let cur_height = sweeper_state.best_block.height; let cur_hash = sweeper_state.best_block.block_hash; @@ -581,16 +581,20 @@ where .outputs .iter_mut() .filter(|o| filter_fn(&**o, cur_height)); - let mut channel_ids = Vec::new(); + let mut channels = Vec::new(); for output_info in respend_outputs { if let Some(filter) = self.chain_data_source.as_ref() { let watched_output = output_info.to_watched_output(cur_hash); filter.register_output(watched_output); } - if let Some(channel_id) = output_info.channel_id { - if !channel_ids.contains(&channel_id) { - channel_ids.push(channel_id); + if let (Some(counterparty_node_id), Some(channel_id)) = + (output_info.counterparty_node_id, output_info.channel_id) + { + if !channels.iter().any(|(cp, ch)| { + *cp == counterparty_node_id && *ch == channel_id + }) { + channels.push((counterparty_node_id, channel_id)); } } @@ -598,7 +602,7 @@ where sweeper_state.dirty = true; } - Ok((Some((spending_tx, channel_ids)), false)) + Ok((Some((spending_tx, channels)), false)) } else { Ok((None, false)) } @@ -607,9 +611,9 @@ where .await?; // Persistence completely successfully. If we have a spending transaction, we broadcast it. - if let Some((spending_tx, channel_ids)) = spending_tx_and_chan_id { + if let Some((spending_tx, channels)) = spending_tx_and_chan_id { self.broadcaster - .broadcast_transactions(&[(&spending_tx, TransactionType::Sweep { channel_ids })]); + .broadcast_transactions(&[(&spending_tx, TransactionType::Sweep { channels })]); } Ok(()) From 1d90fce7614e85a5a805332d9f0cee5a6b66a754 Mon Sep 17 00:00:00 2001 From: okekefrancis112 Date: Fri, 6 Feb 2026 17:53:54 +0100 Subject: [PATCH 201/242] fix: test_payment_path_scoring failing --- lightning/src/ln/channelmanager.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 665a79a9610..b9301f5f7c2 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -14204,6 +14204,7 @@ impl< pub fn push_pending_event(&self, event: events::Event) { let mut events = self.pending_events.lock().unwrap(); events.push_back((event, None)); + self.event_persist_notifier.notify(); } #[cfg(test)] From a36c85a92d41ad32b67761d2ff8270b6948362fa Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Mon, 5 Jan 2026 16:20:21 -0500 Subject: [PATCH 202/242] Trivial: document some fields on MonitorRestoreUpdates --- lightning/src/ln/channel.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 2762ab63fc1..8e694303942 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1137,11 +1137,14 @@ pub enum UpdateFulfillCommitFetch { /// The return value of `monitor_updating_restored` pub(super) struct MonitorRestoreUpdates { pub raa: Option, + /// A `CommitmentUpdate` to be sent to our channel peer. pub commitment_update: Option, pub commitment_order: RAACommitmentOrder, pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>, pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, pub finalized_claimed_htlcs: Vec<(HTLCSource, Option)>, + /// Inbound update_adds that are now irrevocably committed to this channel and are ready for the + /// onion to be processed in order to forward or receive the HTLC. pub pending_update_adds: Vec, pub funding_broadcastable: Option, pub channel_ready: Option, From 56b9f417d47297613dfb2fccd87e2fcc43a17f7a Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Fri, 16 Jan 2026 18:03:40 -0500 Subject: [PATCH 203/242] De-dup decode_htlcs from monitor only if channel is closed We recently added support for reconstructing ChannelManager::decode_update_add_htlcs on startup, using data present in the Channels. However, we failed to prune HTLCs from this rebuilt map if a given inbound HTLC was already forwarded to the outbound edge and in the outbound holding cell (this bug could've caused us to double-forward HTLCs, fortunately it never shipped). As part of fixing this bug, we clean up the overall pruning approach by: 1. If the Channel is open, then it is the source of truth for what HTLCs are outbound+pending (including pending in the holding cell) 2. If the Channel is closed, then the corresponding ChannelMonitor is the source of truth for what HTLCs are outbound+pending Previously, we would only consider the monitor's pending HTLCs, which ignored holding cell HTLCs. Co-Authored-By: Claude Opus 4.5 --- lightning/src/ln/channel.rs | 39 +++++++- lightning/src/ln/channelmanager.rs | 31 +++++- lightning/src/ln/functional_test_utils.rs | 20 +++- lightning/src/ln/reload_tests.rs | 111 ++++++++++++++++++++++ 4 files changed, 194 insertions(+), 7 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 8e694303942..3678ccba9a8 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -50,8 +50,8 @@ use crate::ln::channel_state::{ OutboundHTLCDetails, OutboundHTLCStateDetails, }; use crate::ln::channelmanager::{ - self, ChannelReadyOrder, FundingConfirmedMessage, HTLCFailureMsg, HTLCSource, - OpenChannelMessage, PaymentClaimDetails, PendingHTLCInfo, PendingHTLCStatus, + self, ChannelReadyOrder, FundingConfirmedMessage, HTLCFailureMsg, HTLCPreviousHopData, + HTLCSource, OpenChannelMessage, PaymentClaimDetails, PendingHTLCInfo, PendingHTLCStatus, RAACommitmentOrder, SentHTLCId, BREAKDOWN_TIMEOUT, MAX_LOCAL_BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, }; @@ -7852,6 +7852,41 @@ where .collect() } + /// Useful when reconstructing the set of pending HTLC forwards when deserializing the + /// `ChannelManager`. We don't want to cache an HTLC as needing to be forwarded if it's already + /// present in the outbound edge, or else we'll double-forward. + pub(super) fn outbound_htlc_forwards(&self) -> impl Iterator + '_ { + let holding_cell_outbounds = + self.context.holding_cell_htlc_updates.iter().filter_map(|htlc| match htlc { + HTLCUpdateAwaitingACK::AddHTLC { source, .. } => match source { + HTLCSource::PreviousHopData(prev_hop_data) => Some(prev_hop_data.clone()), + _ => None, + }, + _ => None, + }); + let committed_outbounds = + self.context.pending_outbound_htlcs.iter().filter_map(|htlc| match &htlc.source { + HTLCSource::PreviousHopData(prev_hop_data) => Some(prev_hop_data.clone()), + _ => None, + }); + holding_cell_outbounds.chain(committed_outbounds) + } + + #[cfg(test)] + pub(super) fn test_holding_cell_outbound_htlc_forwards_count(&self) -> usize { + self.context + .holding_cell_htlc_updates + .iter() + .filter_map(|htlc| match htlc { + HTLCUpdateAwaitingACK::AddHTLC { source, .. } => match source { + HTLCSource::PreviousHopData(prev_hop_data) => Some(prev_hop_data.clone()), + _ => None, + }, + _ => None, + }) + .count() + } + /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed #[inline] fn mark_outbound_htlc_removed( diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 13197ea44ec..9e522820610 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -10134,6 +10134,16 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } + #[cfg(test)] + pub(crate) fn test_holding_cell_outbound_htlc_forwards_count( + &self, cp_id: PublicKey, chan_id: ChannelId, + ) -> usize { + let per_peer_state = self.per_peer_state.read().unwrap(); + let peer_state = per_peer_state.get(&cp_id).map(|state| state.lock().unwrap()).unwrap(); + let chan = peer_state.channel_by_id.get(&chan_id).and_then(|c| c.as_funded()).unwrap(); + chan.test_holding_cell_outbound_htlc_forwards_count() + } + /// Completes channel resumption after locks have been released. /// /// Processes the [`PostMonitorUpdateChanResume`] returned by @@ -18600,6 +18610,20 @@ impl< let mut peer_state_lock = peer_state_mtx.lock().unwrap(); let peer_state = &mut *peer_state_lock; is_channel_closed = !peer_state.channel_by_id.contains_key(channel_id); + if reconstruct_manager_from_monitors && !is_channel_closed { + if let Some(chan) = peer_state.channel_by_id.get(channel_id) { + if let Some(funded_chan) = chan.as_funded() { + for prev_hop in funded_chan.outbound_htlc_forwards() { + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + &prev_hop, + "HTLC already forwarded to the outbound edge", + &args.logger, + ); + } + } + } + } } for (htlc_source, (htlc, preimage_opt)) in monitor.get_all_current_outbound_htlcs() @@ -18613,6 +18637,10 @@ impl< info.prev_funding_outpoint == prev_hop_data.outpoint && info.prev_htlc_id == prev_hop_data.htlc_id }; + if !is_channel_closed { + continue; + } + // If `reconstruct_manager_from_monitors` is set, we always add all inbound committed // HTLCs to `decode_update_add_htlcs` in the above loop, but we need to prune from // those added HTLCs if they were already forwarded to the outbound edge. Otherwise, @@ -18626,9 +18654,6 @@ impl< ); } - if !is_channel_closed || reconstruct_manager_from_monitors { - continue; - } // The ChannelMonitor is now responsible for this HTLC's // failure/success and will let us know what its outcome is. If we // still have an entry for this HTLC in `forward_htlcs_legacy`, diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 218779123f6..6800078fd6f 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1270,6 +1270,13 @@ pub fn check_added_monitors>(node: & } } +pub fn get_latest_mon_update_id<'a, 'b, 'c>( + node: &Node<'a, 'b, 'c>, channel_id: ChannelId, +) -> (u64, u64) { + let monitor_id_state = node.chain_monitor.latest_monitor_update_id.lock().unwrap(); + monitor_id_state.get(&channel_id).unwrap().clone() +} + fn claimed_htlc_matches_path<'a, 'b, 'c>( origin_node: &Node<'a, 'b, 'c>, path: &[&Node<'a, 'b, 'c>], htlc: &ClaimedHTLC, ) -> bool { @@ -5172,6 +5179,9 @@ pub struct ReconnectArgs<'a, 'b, 'c, 'd> { pub pending_cell_htlc_claims: (usize, usize), pub pending_cell_htlc_fails: (usize, usize), pub pending_raa: (bool, bool), + /// If true, don't assert that pending messages are empty after the commitment dance completes. + /// Useful when holding cell HTLCs will be released and need to be handled by the caller. + pub allow_post_commitment_dance_msgs: (bool, bool), } impl<'a, 'b, 'c, 'd> ReconnectArgs<'a, 'b, 'c, 'd> { @@ -5194,6 +5204,7 @@ impl<'a, 'b, 'c, 'd> ReconnectArgs<'a, 'b, 'c, 'd> { pending_cell_htlc_claims: (0, 0), pending_cell_htlc_fails: (0, 0), pending_raa: (false, false), + allow_post_commitment_dance_msgs: (false, false), } } } @@ -5219,6 +5230,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { pending_raa, pending_responding_commitment_signed, pending_responding_commitment_signed_dup_monitor, + allow_post_commitment_dance_msgs, } = args; connect_nodes(node_a, node_b); let reestablish_1 = get_chan_reestablish_msgs!(node_a, node_b); @@ -5402,11 +5414,13 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { get_event_msg!(node_a, MessageSendEvent::SendRevokeAndACK, node_b_id); // No commitment_signed so get_event_msg's assert(len == 1) passes node_b.node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); - assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors( &node_b, if pending_responding_commitment_signed_dup_monitor.0 { 0 } else { 1 }, ); + if !allow_post_commitment_dance_msgs.0 { + assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); + } } } else { assert!(chan_msgs.2.is_none()); @@ -5516,11 +5530,13 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { get_event_msg!(node_b, MessageSendEvent::SendRevokeAndACK, node_a_id); // No commitment_signed so get_event_msg's assert(len == 1) passes node_a.node.handle_revoke_and_ack(node_b_id, &bs_revoke_and_ack); - assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors( &node_a, if pending_responding_commitment_signed_dup_monitor.1 { 0 } else { 1 }, ); + if !allow_post_commitment_dance_msgs.1 { + assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); + } } } else { assert!(chan_msgs.2.is_none()); diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index c0432051a62..e6061cc3106 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -1319,6 +1319,117 @@ fn test_manager_persisted_post_outbound_edge_forward() { expect_payment_sent(&nodes[0], payment_preimage, None, true, true); } +#[test] +fn test_manager_persisted_post_outbound_edge_holding_cell() { + // Test that we will not double-forward an HTLC after restart if it is already in the outbound + // edge's holding cell, which was previously broken. + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let persister; + let new_chain_monitor; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes_1_deserialized; + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1).2; + let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2; + send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000); + + // Lock in the HTLC from node_a <> node_b. + let amt_msat = 1000; + let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat); + nodes[0].node.send_payment_with_route(route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + check_added_monitors(&nodes[0], 1); + let updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); + nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); + do_commitment_signed_dance(&nodes[1], &nodes[0], &updates.commitment_signed, false, false); + + // Send a 2nd HTLC node_c -> node_b, to force the first HTLC into the holding cell. + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); + let (route_2, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[2], nodes[1], amt_msat); + nodes[2].node.send_payment_with_route(route_2, payment_hash_2, RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); + let send_event = + SendEvent::from_event(nodes[2].node.get_and_clear_pending_msg_events().remove(0)); + nodes[1].node.handle_update_add_htlc(nodes[2].node.get_our_node_id(), &send_event.msgs[0]); + nodes[1].node.handle_commitment_signed_batch_test(nodes[2].node.get_our_node_id(), &send_event.commitment_msg); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + check_added_monitors(&nodes[1], 1); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + // Add the HTLC to the outbound edge, node_b <> node_c. Force the outbound HTLC into the b<>c + // holding cell. + nodes[1].node.process_pending_htlc_forwards(); + check_added_monitors(&nodes[1], 0); + assert_eq!( + nodes[1].node.test_holding_cell_outbound_htlc_forwards_count(nodes[2].node.get_our_node_id(), chan_id_2), + 1 + ); + + // Disconnect peers and reload the forwarding node_b. + nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); + nodes[2].node.peer_disconnected(nodes[1].node.get_our_node_id()); + + let node_b_encoded = nodes[1].node.encode(); + let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id_1).encode(); + let chan_1_monitor_serialized = get_monitor!(nodes[1], chan_id_2).encode(); + reload_node!(nodes[1], node_b_encoded, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized); + + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); + let (latest_update, _) = get_latest_mon_update_id(&nodes[1], chan_id_2); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_id_2, latest_update); + + reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[0])); + + // Reconnect b<>c. Node_b has pending RAA + commitment_signed from the incomplete c->b + // commitment dance, plus an HTLC in the holding cell that will be released after the dance. + let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]); + reconnect_args.pending_raa = (false, true); + reconnect_args.pending_responding_commitment_signed = (false, true); + // Node_c needs a monitor update to catch up after processing node_b's reestablish. + reconnect_args.expect_renegotiated_funding_locked_monitor_update = (false, true); + // The holding cell HTLC will be released after the commitment dance - handle it below. + reconnect_args.allow_post_commitment_dance_msgs = (false, true); + reconnect_nodes(reconnect_args); + + // The holding cell HTLC was released during the reconnect. Complete its commitment dance. + let holding_cell_htlc_msgs = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(holding_cell_htlc_msgs.len(), 1); + match &holding_cell_htlc_msgs[0] { + MessageSendEvent::UpdateHTLCs { node_id, updates, .. } => { + assert_eq!(*node_id, nodes[2].node.get_our_node_id()); + assert_eq!(updates.update_add_htlcs.len(), 1); + nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &updates.update_add_htlcs[0]); + do_commitment_signed_dance(&nodes[2], &nodes[1], &updates.commitment_signed, false, false); + } + _ => panic!("Unexpected message: {:?}", holding_cell_htlc_msgs[0]), + } + + // Ensure node_b won't double-forward the outbound HTLC (this was previously broken). + nodes[1].node.process_pending_htlc_forwards(); + let msgs = nodes[1].node.get_and_clear_pending_msg_events(); + assert!(msgs.is_empty(), "Expected 0 messages, got {:?}", msgs); + + // The a->b->c HTLC is now committed on node_c. The c->b HTLC is committed on node_b. + // Both payments should now be claimable. + expect_and_process_pending_htlcs(&nodes[2], false); + expect_payment_claimable!(nodes[2], payment_hash, payment_secret, amt_msat, None, nodes[2].node.get_our_node_id()); + expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, amt_msat, None, nodes[1].node.get_our_node_id()); + + // Claim the a->b->c payment on node_c. + let path: &[&[_]] = &[&[&nodes[1], &nodes[2]]]; + do_claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], path, payment_preimage)); + expect_payment_sent(&nodes[0], payment_preimage, None, true, true); + + // Claim the c->b payment on node_b. + nodes[1].node.claim_funds(payment_preimage_2); + expect_payment_claimed!(nodes[1], payment_hash_2, amt_msat); + check_added_monitors(&nodes[1], 1); + let mut update = get_htlc_update_msgs(&nodes[1], &nodes[2].node.get_our_node_id()); + nodes[2].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), update.update_fulfill_htlcs.remove(0)); + do_commitment_signed_dance(&nodes[2], &nodes[1], &update.commitment_signed, false, false); + expect_payment_sent(&nodes[2], payment_preimage_2, None, true, true); +} + #[test] fn test_reload_partial_funding_batch() { let chanmon_cfgs = create_chanmon_cfgs(3); From 7304cc9bebbd27bc7a766944d683bb3e02392dcb Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Mon, 2 Feb 2026 16:23:51 -0500 Subject: [PATCH 204/242] Simplify channel_closed check on manager read This cleanup falls out of the changes made in the previous commit. Separated out here for reviewability. --- lightning/src/ln/channelmanager.rs | 248 ++++++++++++++--------------- 1 file changed, 124 insertions(+), 124 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 9e522820610..a0fb1369cf2 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -18626,47 +18626,49 @@ impl< } } - for (htlc_source, (htlc, preimage_opt)) in monitor.get_all_current_outbound_htlcs() - { - let logger = - WithChannelMonitor::from(&args.logger, monitor, Some(htlc.payment_hash)); - let htlc_id = SentHTLCId::from_source(&htlc_source); - match htlc_source { - HTLCSource::PreviousHopData(prev_hop_data) => { - let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| { - info.prev_funding_outpoint == prev_hop_data.outpoint - && info.prev_htlc_id == prev_hop_data.htlc_id - }; - if !is_channel_closed { - continue; - } + if is_channel_closed { + for (htlc_source, (htlc, preimage_opt)) in + monitor.get_all_current_outbound_htlcs() + { + let logger = WithChannelMonitor::from( + &args.logger, + monitor, + Some(htlc.payment_hash), + ); + let htlc_id = SentHTLCId::from_source(&htlc_source); + match htlc_source { + HTLCSource::PreviousHopData(prev_hop_data) => { + let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| { + info.prev_funding_outpoint == prev_hop_data.outpoint + && info.prev_htlc_id == prev_hop_data.htlc_id + }; + + // If `reconstruct_manager_from_monitors` is set, we always add all inbound committed + // HTLCs to `decode_update_add_htlcs` in the above loop, but we need to prune from + // those added HTLCs if they were already forwarded to the outbound edge. Otherwise, + // we'll double-forward. + if reconstruct_manager_from_monitors { + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + &prev_hop_data, + "HTLC already forwarded to the outbound edge", + &&logger, + ); + } - // If `reconstruct_manager_from_monitors` is set, we always add all inbound committed - // HTLCs to `decode_update_add_htlcs` in the above loop, but we need to prune from - // those added HTLCs if they were already forwarded to the outbound edge. Otherwise, - // we'll double-forward. - if reconstruct_manager_from_monitors { + // The ChannelMonitor is now responsible for this HTLC's + // failure/success and will let us know what its outcome is. If we + // still have an entry for this HTLC in `forward_htlcs_legacy`, + // `pending_intercepted_htlcs_legacy`, or + // `decode_update_add_htlcs_legacy`, we were apparently not persisted + // after the monitor was when forwarding the payment. dedup_decode_update_add_htlcs( - &mut decode_update_add_htlcs, + &mut decode_update_add_htlcs_legacy, &prev_hop_data, - "HTLC already forwarded to the outbound edge", + "HTLC was forwarded to the closed channel", &&logger, ); - } - - // The ChannelMonitor is now responsible for this HTLC's - // failure/success and will let us know what its outcome is. If we - // still have an entry for this HTLC in `forward_htlcs_legacy`, - // `pending_intercepted_htlcs_legacy`, or - // `decode_update_add_htlcs_legacy`, we were apparently not persisted - // after the monitor was when forwarding the payment. - dedup_decode_update_add_htlcs( - &mut decode_update_add_htlcs_legacy, - &prev_hop_data, - "HTLC was forwarded to the closed channel", - &&logger, - ); - forward_htlcs_legacy.retain(|_, forwards| { + forward_htlcs_legacy.retain(|_, forwards| { forwards.retain(|forward| { if let HTLCForwardInfo::AddHTLC(htlc_info) = forward { if pending_forward_matches_htlc(&htlc_info) { @@ -18678,7 +18680,7 @@ impl< }); !forwards.is_empty() }); - pending_intercepted_htlcs_legacy.retain(|intercepted_id, htlc_info| { + pending_intercepted_htlcs_legacy.retain(|intercepted_id, htlc_info| { if pending_forward_matches_htlc(&htlc_info) { log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}", &htlc.payment_hash, &monitor.channel_id()); @@ -18690,113 +18692,111 @@ impl< false } else { true } }); - }, - HTLCSource::OutboundRoute { - payment_id, - session_priv, - path, - bolt12_invoice, - .. - } => { - if !is_channel_closed { - continue; - } - if let Some(preimage) = preimage_opt { - let pending_events = Mutex::new(pending_events_read); - let update = PaymentCompleteUpdate { - counterparty_node_id: monitor.get_counterparty_node_id(), - channel_funding_outpoint: monitor.get_funding_txo(), - channel_id: monitor.channel_id(), - htlc_id, - }; - let mut compl_action = Some( + }, + HTLCSource::OutboundRoute { + payment_id, + session_priv, + path, + bolt12_invoice, + .. + } => { + if let Some(preimage) = preimage_opt { + let pending_events = Mutex::new(pending_events_read); + let update = PaymentCompleteUpdate { + counterparty_node_id: monitor.get_counterparty_node_id(), + channel_funding_outpoint: monitor.get_funding_txo(), + channel_id: monitor.channel_id(), + htlc_id, + }; + let mut compl_action = Some( EventCompletionAction::ReleasePaymentCompleteChannelMonitorUpdate(update) ); - pending_outbounds.claim_htlc( - payment_id, - preimage, - bolt12_invoice, - session_priv, - path, - true, - &mut compl_action, - &pending_events, - &logger, - ); - // If the completion action was not consumed, then there was no - // payment to claim, and we need to tell the `ChannelMonitor` - // we don't need to hear about the HTLC again, at least as long - // as the PaymentSent event isn't still sitting around in our - // event queue. - let have_action = if compl_action.is_some() { - let pending_events = pending_events.lock().unwrap(); - pending_events.iter().any(|(_, act)| *act == compl_action) - } else { - false - }; - if !have_action && compl_action.is_some() { - let mut peer_state = per_peer_state + pending_outbounds.claim_htlc( + payment_id, + preimage, + bolt12_invoice, + session_priv, + path, + true, + &mut compl_action, + &pending_events, + &logger, + ); + // If the completion action was not consumed, then there was no + // payment to claim, and we need to tell the `ChannelMonitor` + // we don't need to hear about the HTLC again, at least as long + // as the PaymentSent event isn't still sitting around in our + // event queue. + let have_action = if compl_action.is_some() { + let pending_events = pending_events.lock().unwrap(); + pending_events.iter().any(|(_, act)| *act == compl_action) + } else { + false + }; + if !have_action && compl_action.is_some() { + let mut peer_state = per_peer_state .get(&counterparty_node_id) .map(|state| state.lock().unwrap()) .expect( "Channels originating a preimage must have peer state", ); - let update_id = peer_state + let update_id = peer_state .closed_channel_monitor_update_ids .get_mut(channel_id) .expect( "Channels originating a preimage must have a monitor", ); - // Note that for channels closed pre-0.1, the latest - // update_id is `u64::MAX`. - *update_id = update_id.saturating_add(1); - - pending_background_events.push( - BackgroundEvent::MonitorUpdateRegeneratedOnStartup { - counterparty_node_id: monitor - .get_counterparty_node_id(), - funding_txo: monitor.get_funding_txo(), - channel_id: monitor.channel_id(), - update: ChannelMonitorUpdate { - update_id: *update_id, - channel_id: Some(monitor.channel_id()), - updates: vec![ + // Note that for channels closed pre-0.1, the latest + // update_id is `u64::MAX`. + *update_id = update_id.saturating_add(1); + + pending_background_events.push( + BackgroundEvent::MonitorUpdateRegeneratedOnStartup { + counterparty_node_id: monitor + .get_counterparty_node_id(), + funding_txo: monitor.get_funding_txo(), + channel_id: monitor.channel_id(), + update: ChannelMonitorUpdate { + update_id: *update_id, + channel_id: Some(monitor.channel_id()), + updates: vec![ ChannelMonitorUpdateStep::ReleasePaymentComplete { htlc: htlc_id, }, ], + }, }, - }, - ); + ); + } + pending_events_read = pending_events.into_inner().unwrap(); } - pending_events_read = pending_events.into_inner().unwrap(); - } - }, + }, + } } - } - for (htlc_source, payment_hash) in monitor.get_onchain_failed_outbound_htlcs() { - let logger = - WithChannelMonitor::from(&args.logger, monitor, Some(payment_hash)); - log_info!( - logger, - "Failing HTLC with payment hash {} as it was resolved on-chain.", - payment_hash - ); - let completion_action = Some(PaymentCompleteUpdate { - counterparty_node_id: monitor.get_counterparty_node_id(), - channel_funding_outpoint: monitor.get_funding_txo(), - channel_id: monitor.channel_id(), - htlc_id: SentHTLCId::from_source(&htlc_source), - }); + for (htlc_source, payment_hash) in monitor.get_onchain_failed_outbound_htlcs() { + let logger = + WithChannelMonitor::from(&args.logger, monitor, Some(payment_hash)); + log_info!( + logger, + "Failing HTLC with payment hash {} as it was resolved on-chain.", + payment_hash + ); + let completion_action = Some(PaymentCompleteUpdate { + counterparty_node_id: monitor.get_counterparty_node_id(), + channel_funding_outpoint: monitor.get_funding_txo(), + channel_id: monitor.channel_id(), + htlc_id: SentHTLCId::from_source(&htlc_source), + }); - failed_htlcs.push(( - htlc_source, - payment_hash, - monitor.get_counterparty_node_id(), - monitor.channel_id(), - LocalHTLCFailureReason::OnChainTimeout, - completion_action, - )); + failed_htlcs.push(( + htlc_source, + payment_hash, + monitor.get_counterparty_node_id(), + monitor.channel_id(), + LocalHTLCFailureReason::OnChainTimeout, + completion_action, + )); + } } // Whether the downstream channel was closed or not, try to re-apply any payment From 37375ca386298c86417d3c7ff750fa64a9a920d3 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Wed, 28 Jan 2026 19:03:34 -0500 Subject: [PATCH 205/242] Don't double-forward inbounds resolved in holding cell We recently added support for reconstructing ChannelManager::decode_update_add_htlcs on startup, using data present in the Channels. However, we failed to prune HTLCs from this rebuilt map if a given HTLC was already forwarded+removed from the outbound edge and resolved in the inbound edge's holding cell. Here we fix this bug that would have caused us to double-forward inbound HTLC forwards, which fortunately was not shipped. Co-Authored-By: Claude Opus 4.5 --- lightning/src/ln/channel.rs | 18 ++++++- lightning/src/ln/channelmanager.rs | 2 +- lightning/src/ln/reload_tests.rs | 81 ++++++++++++++++++++++++++++++ 3 files changed, 99 insertions(+), 2 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 3678ccba9a8..622227b2210 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -7839,12 +7839,28 @@ where } /// Useful for reconstructing the set of pending HTLCs when deserializing the `ChannelManager`. - pub(super) fn get_inbound_committed_update_adds(&self) -> Vec { + pub(super) fn inbound_committed_unresolved_htlcs(&self) -> Vec { + // We don't want to return an HTLC as needing processing if it already has a resolution that's + // pending in the holding cell. + let htlc_resolution_in_holding_cell = |id: u64| -> bool { + self.context.holding_cell_htlc_updates.iter().any(|holding_cell_htlc| { + match holding_cell_htlc { + HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => *htlc_id == id, + HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => *htlc_id == id, + HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } => *htlc_id == id, + HTLCUpdateAwaitingACK::AddHTLC { .. } => false, + } + }) + }; + self.context .pending_inbound_htlcs .iter() .filter_map(|htlc| match htlc.state { InboundHTLCState::Committed { ref update_add_htlc_opt } => { + if htlc_resolution_in_holding_cell(htlc.htlc_id) { + return None; + } update_add_htlc_opt.clone() }, _ => None, diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index a0fb1369cf2..2665bf11740 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -18558,7 +18558,7 @@ impl< if let Some(chan) = peer_state.channel_by_id.get(channel_id) { if let Some(funded_chan) = chan.as_funded() { let inbound_committed_update_adds = - funded_chan.get_inbound_committed_update_adds(); + funded_chan.inbound_committed_unresolved_htlcs(); if !inbound_committed_update_adds.is_empty() { // Reconstruct `ChannelManager::decode_update_add_htlcs` from the serialized // `Channel`, as part of removing the requirement to regularly persist the diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index e6061cc3106..826fdbf9b0c 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -1758,3 +1758,84 @@ fn test_hold_completed_inflight_monitor_updates_upon_manager_reload() { reconnect_nodes(reconnect_args); } +#[test] +fn outbound_removed_holding_cell_resolved_no_double_forward() { + // Test that if a forwarding node has an HTLC that is fully removed on the outbound edge + // but where the inbound edge resolution is in the holding cell, and we reload the node in this + // state, that node will not double-forward the HTLC. + + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let persister; + let new_chain_monitor; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes_1_deserialized; + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_0_id = nodes[0].node.get_our_node_id(); + let node_1_id = nodes[1].node.get_our_node_id(); + let node_2_id = nodes[2].node.get_our_node_id(); + + let chan_0_1 = create_announced_chan_between_nodes(&nodes, 0, 1); + let chan_1_2 = create_announced_chan_between_nodes(&nodes, 1, 2); + + let chan_id_0_1 = chan_0_1.2; + let chan_id_1_2 = chan_1_2.2; + + // Send a payment from nodes[0] to nodes[2] via nodes[1]. + let (route, payment_hash, payment_preimage, payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[2], 1_000_000); + send_along_route_with_secret( + &nodes[0], route, &[&[&nodes[1], &nodes[2]]], 1_000_000, payment_hash, payment_secret, + ); + + // Claim the payment on nodes[2]. + nodes[2].node.claim_funds(payment_preimage); + check_added_monitors(&nodes[2], 1); + expect_payment_claimed!(nodes[2], payment_hash, 1_000_000); + + // Disconnect nodes[0] from nodes[1] BEFORE processing the fulfill. + // This forces the inbound fulfill resolution go to into nodes[1]'s holding cell for the inbound + // channel. + nodes[0].node.peer_disconnected(node_1_id); + nodes[1].node.peer_disconnected(node_0_id); + + // Process the fulfill from nodes[2] to nodes[1]. + let updates_2_1 = get_htlc_update_msgs(&nodes[2], &node_1_id); + nodes[1].node.handle_update_fulfill_htlc(node_2_id, updates_2_1.update_fulfill_htlcs[0].clone()); + check_added_monitors(&nodes[1], 1); + do_commitment_signed_dance(&nodes[1], &nodes[2], &updates_2_1.commitment_signed, false, false); + expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); + + // At this point: + // - The outbound HTLC nodes[1]->nodes[2] is resolved and removed + // - The inbound HTLC nodes[0]->nodes[1] is still in a Committed state, with the fulfill + // resolution in nodes[1]'s chan_0_1 holding cell + let node_1_serialized = nodes[1].node.encode(); + let mon_0_1_serialized = get_monitor!(nodes[1], chan_id_0_1).encode(); + let mon_1_2_serialized = get_monitor!(nodes[1], chan_id_1_2).encode(); + + // Reload nodes[1]. + // During deserialization, we previously would have not noticed that the nodes[0]<>nodes[1] HTLC + // had a resolution pending in the holding cell, and reconstructed the ChannelManager's pending + // HTLC state indicating that the HTLC still needed to be forwarded to the outbound edge. + reload_node!( + nodes[1], + node_1_serialized, + &[&mon_0_1_serialized, &mon_1_2_serialized], + persister, + new_chain_monitor, + nodes_1_deserialized + ); + + // Check that nodes[1] doesn't double-forward the HTLC. + nodes[1].node.process_pending_htlc_forwards(); + + // Reconnect nodes[1] to nodes[0]. The claim should be in nodes[1]'s holding cell. + let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[0]); + reconnect_args.pending_cell_htlc_claims = (0, 1); + reconnect_nodes(reconnect_args); + + // nodes[0] should now have received the fulfill and generate PaymentSent. + expect_payment_sent(&nodes[0], payment_preimage, None, true, true); +} From a5c8bceefbd3765506f6d50590d91083221952ed Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Wed, 21 Jan 2026 15:52:42 -0500 Subject: [PATCH 206/242] Mark legacy pre-0.3 inbound htlcs on persist In 0.3+, we are taking steps to remove the requirement of regularly persisting the ChannelManager and instead rebuild the set of HTLC forwards (and the manager generally) from Channel{Monitor} data. We previously merged support for reconstructing the ChannelManager::decode_update_add_htlcs map from channel data, using a new HTLC onion field that will be present for inbound HTLCs received on 0.3+ only. However, we now want to add support for pruning this field once it's no longer needed so it doesn't get persisted every time the manager gets persisted. At the same time, in a future LDK version we need to detect whether the field was ever present to begin with to prevent upgrading with legacy HTLCs present. We accomplish both by converting the plain update_add option that was previously serialized to an enum that can indicate whether the HTLC is from 0.2- versus 0.3+-with-onion-pruned (a variant for the latter is added in the next commit). Actual pruning of the new update_add field is added in the next commit. --- lightning/src/ln/channel.rs | 98 +++++++++++++++++++++++++------------ lightning/src/util/ser.rs | 20 +++++--- 2 files changed, 80 insertions(+), 38 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 622227b2210..8a4ef19fe37 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -85,6 +85,7 @@ use crate::util::errors::APIError; use crate::util::logger::{Logger, Record, WithContext}; use crate::util::scid_utils::{block_from_scid, scid_from_parts}; use crate::util::ser::{Readable, ReadableArgs, RequiredWrapper, Writeable, Writer}; +use crate::{impl_readable_for_vec, impl_writeable_for_vec}; use alloc::collections::{btree_map, BTreeMap}; @@ -216,7 +217,7 @@ enum InboundHTLCState { /// Used to rebuild `ChannelManager` HTLC state on restart. Previously the manager would track /// and persist all HTLC forwards and receives itself, but newer LDK versions avoid relying on /// its persistence and instead reconstruct state based on `Channel` and `ChannelMonitor` data. - update_add_htlc_opt: Option, + update_add_htlc: InboundUpdateAdd, }, /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we /// created it we would have put it in the holding cell instead). When they next revoke_and_ack @@ -307,6 +308,31 @@ impl InboundHTLCState { } } +/// A field of `InboundHTLCState::Committed` containing the HTLC's `update_add_htlc` message. If +/// the HTLC is a forward and gets irrevocably committed to the outbound edge, we convert to +/// `InboundUpdateAdd::Forwarded`, thus pruning the onion and not persisting it on every +/// `ChannelManager` persist. +/// +/// Useful for reconstructing the pending HTLC set on startup. +#[derive(Debug)] +enum InboundUpdateAdd { + /// The inbound committed HTLC's update_add_htlc message. + WithOnion { update_add_htlc: msgs::UpdateAddHTLC }, + /// This HTLC was received pre-LDK 0.3, before we started persisting the onion for inbound + /// committed HTLCs. + Legacy, +} + +impl_writeable_tlv_based_enum_upgradable!(InboundUpdateAdd, + (0, WithOnion) => { + (0, update_add_htlc, required), + }, + (2, Legacy) => {}, +); + +impl_writeable_for_vec!(&InboundUpdateAdd); +impl_readable_for_vec!(InboundUpdateAdd); + #[cfg_attr(test, derive(Debug))] struct InboundHTLCOutput { htlc_id: u64, @@ -7856,12 +7882,14 @@ where self.context .pending_inbound_htlcs .iter() - .filter_map(|htlc| match htlc.state { - InboundHTLCState::Committed { ref update_add_htlc_opt } => { + .filter_map(|htlc| match &htlc.state { + InboundHTLCState::Committed { + update_add_htlc: InboundUpdateAdd::WithOnion { update_add_htlc }, + } => { if htlc_resolution_in_holding_cell(htlc.htlc_id) { return None; } - update_add_htlc_opt.clone() + Some(update_add_htlc.clone()) }, _ => None, }) @@ -8863,7 +8891,8 @@ where false }; if swap { - let mut state = InboundHTLCState::Committed { update_add_htlc_opt: None }; + let mut state = + InboundHTLCState::Committed { update_add_htlc: InboundUpdateAdd::Legacy }; mem::swap(&mut state, &mut htlc.state); if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(resolution) = state { @@ -8904,9 +8933,8 @@ where to_forward_infos.push((forward_info, htlc.htlc_id)); htlc.state = InboundHTLCState::Committed { // HTLCs will only be in state `InboundHTLCResolution::Resolved` if they were - // received on an old pre-0.0.123 version of LDK. In this case, the HTLC is - // required to be resolved prior to upgrading to 0.1+ per CHANGELOG.md. - update_add_htlc_opt: None, + // received on LDK 0.1-. + update_add_htlc: InboundUpdateAdd::Legacy, }; }, } @@ -8915,7 +8943,9 @@ where log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash); pending_update_adds.push(update_add_htlc.clone()); htlc.state = InboundHTLCState::Committed { - update_add_htlc_opt: Some(update_add_htlc), + update_add_htlc: InboundUpdateAdd::WithOnion { + update_add_htlc, + }, }; }, } @@ -14602,7 +14632,7 @@ impl Writeable for FundedChannel { } } let mut removed_htlc_attribution_data: Vec<&Option> = Vec::new(); - let mut inbound_committed_update_adds: Vec<&Option> = Vec::new(); + let mut inbound_committed_update_adds: Vec<&InboundUpdateAdd> = Vec::new(); (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?; for htlc in self.context.pending_inbound_htlcs.iter() { if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state { @@ -14622,9 +14652,9 @@ impl Writeable for FundedChannel { 2u8.write(writer)?; htlc_resolution.write(writer)?; }, - &InboundHTLCState::Committed { ref update_add_htlc_opt } => { + &InboundHTLCState::Committed { ref update_add_htlc } => { 3u8.write(writer)?; - inbound_committed_update_adds.push(update_add_htlc_opt); + inbound_committed_update_adds.push(update_add_htlc); }, &InboundHTLCState::LocalRemoved(ref removal_reason) => { 4u8.write(writer)?; @@ -15093,7 +15123,7 @@ impl<'a, 'b, 'c, ES: EntropySource, SP: SignerProvider> }; InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution) }, - 3 => InboundHTLCState::Committed { update_add_htlc_opt: None }, + 3 => InboundHTLCState::Committed { update_add_htlc: InboundUpdateAdd::Legacy }, 4 => { let reason = match ::read(reader)? { 0 => InboundHTLCRemovalReason::FailRelay(msgs::OnionErrorPacket { @@ -15399,7 +15429,7 @@ impl<'a, 'b, 'c, ES: EntropySource, SP: SignerProvider> let mut pending_outbound_held_htlc_flags_opt: Option>> = None; let mut holding_cell_held_htlc_flags_opt: Option>> = None; - let mut inbound_committed_update_adds_opt: Option>> = None; + let mut inbound_committed_update_adds_opt: Option> = None; let mut holding_cell_accountable: Option> = None; let mut pending_outbound_accountable: Option> = None; @@ -15583,8 +15613,8 @@ impl<'a, 'b, 'c, ES: EntropySource, SP: SignerProvider> if let Some(update_adds) = inbound_committed_update_adds_opt { let mut iter = update_adds.into_iter(); for htlc in pending_inbound_htlcs.iter_mut() { - if let InboundHTLCState::Committed { ref mut update_add_htlc_opt } = htlc.state { - *update_add_htlc_opt = iter.next().ok_or(DecodeError::InvalidValue)?; + if let InboundHTLCState::Committed { ref mut update_add_htlc } = htlc.state { + *update_add_htlc = iter.next().ok_or(DecodeError::InvalidValue)?; } } if iter.next().is_some() { @@ -15952,8 +15982,8 @@ mod tests { use crate::ln::chan_utils::{self, commit_tx_fee_sat, ChannelTransactionParameters}; use crate::ln::channel::{ AwaitingChannelReadyFlags, ChannelState, FundedChannel, HTLCCandidate, HTLCInitiator, - HTLCUpdateAwaitingACK, InboundHTLCOutput, InboundHTLCState, InboundV1Channel, - OutboundHTLCOutput, OutboundHTLCState, OutboundV1Channel, + HTLCUpdateAwaitingACK, InboundHTLCOutput, InboundHTLCState, InboundUpdateAdd, + InboundV1Channel, OutboundHTLCOutput, OutboundHTLCState, OutboundV1Channel, }; use crate::ln::channel::{ MAX_FUNDING_SATOSHIS_NO_WUMBO, MIN_THEIR_CHAN_RESERVE_SATOSHIS, @@ -15996,6 +16026,10 @@ mod tests { use bitcoin::{ScriptBuf, WPubkeyHash, WitnessProgram, WitnessVersion}; use std::cmp; + fn dummy_inbound_update_add() -> InboundUpdateAdd { + InboundUpdateAdd::Legacy + } + #[test] #[rustfmt::skip] fn test_channel_state_order() { @@ -16198,7 +16232,7 @@ mod tests { amount_msat: htlc_amount_msat, payment_hash: PaymentHash(Sha256::hash(&[42; 32]).to_byte_array()), cltv_expiry: 300000000, - state: InboundHTLCState::Committed { update_add_htlc_opt: None }, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }); node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput { @@ -17047,7 +17081,7 @@ mod tests { amount_msat: 1000000, cltv_expiry: 500, payment_hash: PaymentHash::from(payment_preimage_0), - state: InboundHTLCState::Committed { update_add_htlc_opt: None }, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }); let payment_preimage_1 = @@ -17057,7 +17091,7 @@ mod tests { amount_msat: 2000000, cltv_expiry: 501, payment_hash: PaymentHash::from(payment_preimage_1), - state: InboundHTLCState::Committed { update_add_htlc_opt: None }, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }); let payment_preimage_2 = @@ -17099,7 +17133,7 @@ mod tests { amount_msat: 4000000, cltv_expiry: 504, payment_hash: PaymentHash::from(payment_preimage_4), - state: InboundHTLCState::Committed { update_add_htlc_opt: None }, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }); // commitment tx with all five HTLCs untrimmed (minimum feerate) @@ -17488,7 +17522,7 @@ mod tests { amount_msat: 2000000, cltv_expiry: 501, payment_hash: PaymentHash::from(payment_preimage_1), - state: InboundHTLCState::Committed { update_add_htlc_opt: None }, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }); chan.context.pending_outbound_htlcs.clear(); @@ -17741,7 +17775,7 @@ mod tests { amount_msat: 5000000, cltv_expiry: 920150, payment_hash: PaymentHash::from(htlc_in_preimage), - state: InboundHTLCState::Committed { update_add_htlc_opt: None }, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, })); chan.context.pending_outbound_htlcs.extend( @@ -17805,7 +17839,7 @@ mod tests { amount_msat, cltv_expiry: 920150, payment_hash: PaymentHash::from(htlc_in_preimage), - state: InboundHTLCState::Committed { update_add_htlc_opt: None }, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }, )); @@ -17872,7 +17906,7 @@ mod tests { amount_msat: 100000, cltv_expiry: 920125, payment_hash: htlc_0_in_hash, - state: InboundHTLCState::Committed { update_add_htlc_opt: None }, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }); let htlc_1_in_preimage = @@ -17890,7 +17924,7 @@ mod tests { amount_msat: 49900000, cltv_expiry: 920125, payment_hash: htlc_1_in_hash, - state: InboundHTLCState::Committed { update_add_htlc_opt: None }, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }); chan.context.pending_outbound_htlcs.extend( @@ -17943,7 +17977,7 @@ mod tests { amount_msat: 30000, payment_hash, cltv_expiry: 920125, - state: InboundHTLCState::Committed { update_add_htlc_opt: None }, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }, )); @@ -17985,7 +18019,7 @@ mod tests { amount_msat: 29525, payment_hash, cltv_expiry: 920125, - state: InboundHTLCState::Committed { update_add_htlc_opt: None }, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }, )); @@ -18023,7 +18057,7 @@ mod tests { amount_msat: 29525, payment_hash, cltv_expiry: 920125, - state: InboundHTLCState::Committed { update_add_htlc_opt: None }, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }, )); @@ -18061,7 +18095,7 @@ mod tests { amount_msat: 29753, payment_hash, cltv_expiry: 920125, - state: InboundHTLCState::Committed { update_add_htlc_opt: None }, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }, )); @@ -18114,7 +18148,7 @@ mod tests { amount_msat, cltv_expiry, payment_hash, - state: InboundHTLCState::Committed { update_add_htlc_opt: None }, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }), ); diff --git a/lightning/src/util/ser.rs b/lightning/src/util/ser.rs index f821aa5afc0..6579c0353a3 100644 --- a/lightning/src/util/ser.rs +++ b/lightning/src/util/ser.rs @@ -979,13 +979,15 @@ where } } -// Vectors +/// Write number of items in a vec followed by each element, without writing a length-prefix for +/// each element. +#[macro_export] macro_rules! impl_writeable_for_vec { ($ty: ty $(, $name: ident)*) => { impl<$($name : Writeable),*> Writeable for Vec<$ty> { #[inline] fn write(&self, w: &mut W) -> Result<(), io::Error> { - CollectionLength(self.len() as u64).write(w)?; + $crate::util::ser::CollectionLength(self.len() as u64).write(w)?; for elem in self.iter() { elem.write(w)?; } @@ -994,15 +996,21 @@ macro_rules! impl_writeable_for_vec { } } } +/// Read the number of items in a vec followed by each element, without reading a length prefix for +/// each element. +/// +/// Each element is read with `MaybeReadable`, meaning if an element cannot be read then it is +/// skipped without returning `DecodeError::InvalidValue`. +#[macro_export] macro_rules! impl_readable_for_vec { ($ty: ty $(, $name: ident)*) => { impl<$($name : Readable),*> Readable for Vec<$ty> { #[inline] - fn read(r: &mut R) -> Result { - let len: CollectionLength = Readable::read(r)?; - let mut ret = Vec::with_capacity(cmp::min(len.0 as usize, MAX_BUF_SIZE / core::mem::size_of::<$ty>())); + fn read(r: &mut R) -> Result { + let len: $crate::util::ser::CollectionLength = Readable::read(r)?; + let mut ret = Vec::with_capacity(cmp::min(len.0 as usize, $crate::util::ser::MAX_BUF_SIZE / core::mem::size_of::<$ty>())); for _ in 0..len.0 { - if let Some(val) = MaybeReadable::read(r)? { + if let Some(val) = $crate::util::ser::MaybeReadable::read(r)? { ret.push(val); } } From 3cc64f042fd13e16f37c5e2f665f40f2680bb430 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Mon, 5 Jan 2026 16:28:00 -0500 Subject: [PATCH 207/242] Prune inbound HTLC onions once forwarded We store inbound committed HTLCs' onions in Channels for use in reconstructing the pending HTLC set on ChannelManager read. If an HTLC has been forwarded to the outbound edge, we no longer need to persist the inbound edge's onion and can prune it here. --- lightning/src/ln/channel.rs | 48 ++++++++++++++++++++++++++++-- lightning/src/ln/channelmanager.rs | 44 +++++++++++++++++++++++++++ lightning/src/ln/reload_tests.rs | 20 +++++++++++++ 3 files changed, 110 insertions(+), 2 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 8a4ef19fe37..8a2bc30af91 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -318,6 +318,18 @@ impl InboundHTLCState { enum InboundUpdateAdd { /// The inbound committed HTLC's update_add_htlc message. WithOnion { update_add_htlc: msgs::UpdateAddHTLC }, + /// This inbound HTLC is a forward that was irrevocably committed to the outbound edge, allowing + /// its onion to be pruned and no longer persisted. + Forwarded { + /// Useful if we need to fail or claim this HTLC backwards after restart, if it's missing in the + /// outbound edge. + hop_data: HTLCPreviousHopData, + /// Useful if we need to claim this HTLC backwards after a restart and it's missing in the + /// outbound edge, to generate an accurate [`Event::PaymentForwarded`]. + /// + /// [`Event::PaymentForwarded`]: crate::events::Event::PaymentForwarded + outbound_amt_msat: u64, + }, /// This HTLC was received pre-LDK 0.3, before we started persisting the onion for inbound /// committed HTLCs. Legacy, @@ -328,6 +340,10 @@ impl_writeable_tlv_based_enum_upgradable!(InboundUpdateAdd, (0, update_add_htlc, required), }, (2, Legacy) => {}, + (4, Forwarded) => { + (0, hop_data, required), + (2, outbound_amt_msat, required), + }, ); impl_writeable_for_vec!(&InboundUpdateAdd); @@ -1177,6 +1193,10 @@ pub(super) struct MonitorRestoreUpdates { pub channel_ready_order: ChannelReadyOrder, pub announcement_sigs: Option, pub tx_signatures: Option, + /// The sources of outbound HTLCs that were forwarded and irrevocably committed on this channel + /// (the outbound edge), along with their outbound amounts. Useful to store in the inbound HTLC + /// to ensure it gets resolved. + pub committed_outbound_htlc_sources: Vec<(HTLCPreviousHopData, u64)>, } /// The return value of `signer_maybe_unblocked` @@ -7931,6 +7951,22 @@ where .count() } + /// This inbound HTLC was irrevocably forwarded to the outbound edge, so we no longer need to + /// persist its onion. + pub(super) fn prune_inbound_htlc_onion( + &mut self, htlc_id: u64, hop_data: HTLCPreviousHopData, outbound_amt_msat: u64, + ) { + for htlc in self.context.pending_inbound_htlcs.iter_mut() { + if htlc.htlc_id == htlc_id { + if let InboundHTLCState::Committed { ref mut update_add_htlc } = htlc.state { + *update_add_htlc = InboundUpdateAdd::Forwarded { hop_data, outbound_amt_msat }; + return; + } + } + } + debug_assert!(false, "If we go to prune an inbound HTLC it should be present") + } + /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed #[inline] fn mark_outbound_htlc_removed( @@ -9532,6 +9568,14 @@ where mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills); let mut pending_update_adds = Vec::new(); mem::swap(&mut pending_update_adds, &mut self.context.monitor_pending_update_adds); + let committed_outbound_htlc_sources = self.context.pending_outbound_htlcs.iter().filter_map(|htlc| { + if let &OutboundHTLCState::LocalAnnounced(_) = &htlc.state { + if let HTLCSource::PreviousHopData(prev_hop_data) = &htlc.source { + return Some((prev_hop_data.clone(), htlc.amount_msat)) + } + } + None + }).collect(); if self.context.channel_state.is_peer_disconnected() { self.context.monitor_pending_revoke_and_ack = false; @@ -9540,7 +9584,7 @@ where raa: None, commitment_update: None, commitment_order: RAACommitmentOrder::RevokeAndACKFirst, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, pending_update_adds, funding_broadcastable, channel_ready, announcement_sigs, tx_signatures: None, - channel_ready_order, + channel_ready_order, committed_outbound_htlc_sources }; } @@ -9571,7 +9615,7 @@ where MonitorRestoreUpdates { raa, commitment_update, commitment_order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, pending_update_adds, funding_broadcastable, channel_ready, announcement_sigs, tx_signatures, - channel_ready_order, + channel_ready_order, committed_outbound_htlc_sources } } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 2665bf11740..e50a9b8a2c3 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1408,6 +1408,7 @@ enum PostMonitorUpdateChanResume { decode_update_add_htlcs: Option<(u64, Vec)>, finalized_claimed_htlcs: Vec<(HTLCSource, Option)>, failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, + committed_outbound_htlc_sources: Vec<(HTLCPreviousHopData, u64)>, }, } @@ -9586,6 +9587,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ decode_update_add_htlcs: Option<(u64, Vec)>, finalized_claimed_htlcs: Vec<(HTLCSource, Option)>, failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, + committed_outbound_htlc_sources: Vec<(HTLCPreviousHopData, u64)>, ) { // If the channel belongs to a batch funding transaction, the progress of the batch // should be updated as we have received funding_signed and persisted the monitor. @@ -9656,6 +9658,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }; self.fail_htlc_backwards_internal(&failure.0, &failure.1, &failure.2, receiver, None); } + self.prune_persisted_inbound_htlc_onions(committed_outbound_htlc_sources); } fn handle_monitor_update_completion_actions< @@ -10130,6 +10133,33 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ decode_update_add_htlcs, finalized_claimed_htlcs: updates.finalized_claimed_htlcs, failed_htlcs: updates.failed_htlcs, + committed_outbound_htlc_sources: updates.committed_outbound_htlc_sources, + } + } + } + + /// We store inbound committed HTLCs' onions in `Channel`s for use in reconstructing the pending + /// HTLC set on `ChannelManager` read. If an HTLC has been irrevocably forwarded to the outbound + /// edge, we no longer need to persist the inbound edge's onion and can prune it here. + fn prune_persisted_inbound_htlc_onions( + &self, committed_outbound_htlc_sources: Vec<(HTLCPreviousHopData, u64)>, + ) { + let per_peer_state = self.per_peer_state.read().unwrap(); + for (source, outbound_amt_msat) in committed_outbound_htlc_sources { + let counterparty_node_id = match source.counterparty_node_id.as_ref() { + Some(id) => id, + None => continue, + }; + let mut peer_state = + match per_peer_state.get(counterparty_node_id).map(|state| state.lock().unwrap()) { + Some(peer_state) => peer_state, + None => continue, + }; + + if let Some(chan) = + peer_state.channel_by_id.get_mut(&source.channel_id).and_then(|c| c.as_funded_mut()) + { + chan.prune_inbound_htlc_onion(source.htlc_id, source, outbound_amt_msat); } } } @@ -10144,6 +10174,18 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ chan.test_holding_cell_outbound_htlc_forwards_count() } + #[cfg(test)] + /// Useful to check that we prune inbound HTLC onions once they are irrevocably forwarded to the + /// outbound edge, see [`Self::prune_persisted_inbound_htlc_onions`]. + pub(crate) fn test_get_inbound_committed_htlcs_with_onion( + &self, cp_id: PublicKey, chan_id: ChannelId, + ) -> usize { + let per_peer_state = self.per_peer_state.read().unwrap(); + let peer_state = per_peer_state.get(&cp_id).map(|state| state.lock().unwrap()).unwrap(); + let chan = peer_state.channel_by_id.get(&chan_id).and_then(|c| c.as_funded()).unwrap(); + chan.inbound_committed_unresolved_htlcs().len() + } + /// Completes channel resumption after locks have been released. /// /// Processes the [`PostMonitorUpdateChanResume`] returned by @@ -10169,6 +10211,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ decode_update_add_htlcs, finalized_claimed_htlcs, failed_htlcs, + committed_outbound_htlc_sources, } => { self.post_monitor_update_unlock( channel_id, @@ -10179,6 +10222,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ decode_update_add_htlcs, finalized_claimed_htlcs, failed_htlcs, + committed_outbound_htlc_sources, ); }, } diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index 826fdbf9b0c..360ffe2dc1f 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -1211,6 +1211,13 @@ fn do_manager_persisted_pre_outbound_edge_forward(intercept_htlc: bool) { let updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); do_commitment_signed_dance(&nodes[1], &nodes[0], &updates.commitment_signed, false, false); + // While an inbound HTLC is committed in a channel but not yet forwarded, we store its onion in + // the `Channel` in case we need to remember it on restart. Once it's irrevocably forwarded to the + // outbound edge, we can prune it on the inbound edge. + assert_eq!( + nodes[1].node.test_get_inbound_committed_htlcs_with_onion(nodes[0].node.get_our_node_id(), chan_id_1), + 1 + ); // Decode the HTLC onion but don't forward it to the next hop, such that the HTLC ends up in // `ChannelManager::forward_htlcs` or `ChannelManager::pending_intercepted_htlcs`. @@ -1232,6 +1239,13 @@ fn do_manager_persisted_pre_outbound_edge_forward(intercept_htlc: bool) { args_b_c.send_announcement_sigs = (true, true); reconnect_nodes(args_b_c); + // Before an inbound HTLC is irrevocably forwarded, its onion should still be persisted within the + // inbound edge channel. + assert_eq!( + nodes[1].node.test_get_inbound_committed_htlcs_with_onion(nodes[0].node.get_our_node_id(), chan_id_1), + 1 + ); + // Forward the HTLC and ensure we can claim it post-reload. nodes[1].node.process_pending_htlc_forwards(); @@ -1254,6 +1268,12 @@ fn do_manager_persisted_pre_outbound_edge_forward(intercept_htlc: bool) { nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &updates.update_add_htlcs[0]); do_commitment_signed_dance(&nodes[2], &nodes[1], &updates.commitment_signed, false, false); expect_and_process_pending_htlcs(&nodes[2], false); + // After an inbound HTLC is irrevocably forwarded, its onion should be pruned within the inbound + // edge channel. + assert_eq!( + nodes[1].node.test_get_inbound_committed_htlcs_with_onion(nodes[0].node.get_our_node_id(), chan_id_1), + 0 + ); expect_payment_claimable!(nodes[2], payment_hash, payment_secret, amt_msat, None, nodes[2].node.get_our_node_id()); let path: &[&[_]] = &[&[&nodes[1], &nodes[2]]]; From d435e5bb46bf281ab5ec0d1d3be8ba809a743588 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Mon, 2 Feb 2026 17:16:04 -0500 Subject: [PATCH 208/242] Deterministic reconstruct_manager option in tests We recently merged (test-only, for now) support for the ChannelManager reconstructing its set of pending HTLCs from Channel{Monitor} data, rather than using its own persisted maps. But because we want test coverage of both the new reconstruction codepaths as well as the old persisted map codepaths, in tests we would decide between those two sets of codepaths randomly. We now want to add some tests that require using the new codepaths, so here we add an option to explicitly set whether to reconstruct or not rather than choosing randomly. --- lightning/src/ln/channelmanager.rs | 55 ++++++++++++++--------- lightning/src/ln/functional_test_utils.rs | 8 +++- lightning/src/ln/reload_tests.rs | 2 + 3 files changed, 43 insertions(+), 22 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index e50a9b8a2c3..569bb371459 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -17819,6 +17819,15 @@ pub struct ChannelManagerReadArgs< /// /// This is not exported to bindings users because we have no HashMap bindings pub channel_monitors: HashMap>, + + /// Whether the `ChannelManager` should attempt to reconstruct its set of pending HTLCs from + /// `Channel{Monitor}` data rather than its own persisted maps, which is planned to become + /// the default behavior in upcoming versions. + /// + /// If `None`, whether we reconstruct or use the legacy maps will be decided randomly during + /// `ChannelManager::from_channel_manager_data`. + #[cfg(test)] + pub reconstruct_manager_from_monitors: Option, } impl< @@ -17856,6 +17865,8 @@ impl< channel_monitors: hash_map_from_iter( channel_monitors.drain(..).map(|monitor| (monitor.channel_id(), monitor)), ), + #[cfg(test)] + reconstruct_manager_from_monitors: None, } } } @@ -18553,26 +18564,30 @@ impl< #[cfg(not(test))] let reconstruct_manager_from_monitors = false; #[cfg(test)] - let reconstruct_manager_from_monitors = { - use core::hash::{BuildHasher, Hasher}; - - match std::env::var("LDK_TEST_REBUILD_MGR_FROM_MONITORS") { - Ok(val) => match val.as_str() { - "1" => true, - "0" => false, - _ => panic!("LDK_TEST_REBUILD_MGR_FROM_MONITORS must be 0 or 1, got: {}", val), - }, - Err(_) => { - let rand_val = - std::collections::hash_map::RandomState::new().build_hasher().finish(); - if rand_val % 2 == 0 { - true - } else { - false - } - }, - } - }; + let reconstruct_manager_from_monitors = + args.reconstruct_manager_from_monitors.unwrap_or_else(|| { + use core::hash::{BuildHasher, Hasher}; + + match std::env::var("LDK_TEST_REBUILD_MGR_FROM_MONITORS") { + Ok(val) => match val.as_str() { + "1" => true, + "0" => false, + _ => panic!( + "LDK_TEST_REBUILD_MGR_FROM_MONITORS must be 0 or 1, got: {}", + val + ), + }, + Err(_) => { + let rand_val = + std::collections::hash_map::RandomState::new().build_hasher().finish(); + if rand_val % 2 == 0 { + true + } else { + false + } + }, + } + }); // If there's any preimages for forwarded HTLCs hanging around in ChannelMonitors we // should ensure we try them again on the inbound edge. We put them here and do so after we diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 6800078fd6f..25b54087e93 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -911,6 +911,8 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> { tx_broadcaster: &broadcaster, logger: &self.logger, channel_monitors, + #[cfg(test)] + reconstruct_manager_from_monitors: None, }, ) .unwrap(); @@ -1309,7 +1311,7 @@ fn check_claimed_htlcs_match_route<'a, 'b, 'c>( pub fn _reload_node<'a, 'b, 'c>( node: &'a Node<'a, 'b, 'c>, config: UserConfig, chanman_encoded: &[u8], - monitors_encoded: &[&[u8]], + monitors_encoded: &[&[u8]], _reconstruct_manager_from_monitors: Option, ) -> TestChannelManager<'b, 'c> { let mut monitors_read = Vec::with_capacity(monitors_encoded.len()); for encoded in monitors_encoded { @@ -1343,6 +1345,8 @@ pub fn _reload_node<'a, 'b, 'c>( tx_broadcaster: node.tx_broadcaster, logger: node.logger, channel_monitors, + #[cfg(test)] + reconstruct_manager_from_monitors: _reconstruct_manager_from_monitors, }, ) .unwrap() @@ -1378,7 +1382,7 @@ macro_rules! reload_node { $node.chain_monitor = &$new_chain_monitor; $new_channelmanager = - _reload_node(&$node, $new_config, &chanman_encoded, $monitors_encoded); + _reload_node(&$node, $new_config, &chanman_encoded, $monitors_encoded, None); $node.node = &$new_channelmanager; $node.onion_messenger.set_offers_handler(&$new_channelmanager); $node.onion_messenger.set_async_payments_handler(&$new_channelmanager); diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index 360ffe2dc1f..cac187174cf 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -438,6 +438,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { tx_broadcaster: nodes[0].tx_broadcaster, logger: &logger, channel_monitors: node_0_stale_monitors.iter().map(|monitor| { (monitor.channel_id(), monitor) }).collect(), + reconstruct_manager_from_monitors: None, }) { } else { panic!("If the monitor(s) are stale, this indicates a bug and we should get an Err return"); }; @@ -456,6 +457,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { tx_broadcaster: nodes[0].tx_broadcaster, logger: &logger, channel_monitors: node_0_monitors.iter().map(|monitor| { (monitor.channel_id(), monitor) }).collect(), + reconstruct_manager_from_monitors: None, }).unwrap(); nodes_0_deserialized = nodes_0_deserialized_tmp; assert!(nodes_0_read.is_empty()); From 30dbf40e6fa26c44a1660dd42f5c9148171d856e Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Tue, 3 Feb 2026 12:37:45 -0500 Subject: [PATCH 209/242] Trivially refactor reload_node macro Cleans it up a bit in preparation for adding a new variant in the next commit. --- lightning/src/ln/functional_test_utils.rs | 38 +++++++++++++++++++---- 1 file changed, 32 insertions(+), 6 deletions(-) diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 25b54087e93..07f11ed72e8 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1366,8 +1366,10 @@ pub fn _reload_node<'a, 'b, 'c>( } #[macro_export] -macro_rules! reload_node { - ($node: expr, $new_config: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: ident, $new_chain_monitor: ident, $new_channelmanager: ident) => { +macro_rules! _reload_node_inner { + ($node: expr, $new_config: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: + ident, $new_chain_monitor: ident, $new_channelmanager: ident, $reconstruct_pending_htlcs: expr + ) => { let chanman_encoded = $chanman_encoded; $persister = $crate::util::test_utils::TestPersister::new(); @@ -1381,22 +1383,46 @@ macro_rules! reload_node { ); $node.chain_monitor = &$new_chain_monitor; - $new_channelmanager = - _reload_node(&$node, $new_config, &chanman_encoded, $monitors_encoded, None); + $new_channelmanager = _reload_node( + &$node, + $new_config, + &chanman_encoded, + $monitors_encoded, + $reconstruct_pending_htlcs, + ); $node.node = &$new_channelmanager; $node.onion_messenger.set_offers_handler(&$new_channelmanager); $node.onion_messenger.set_async_payments_handler(&$new_channelmanager); }; +} + +#[macro_export] +macro_rules! reload_node { + // Reload the node using the node's current config ($node: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: ident, $new_chain_monitor: ident, $new_channelmanager: ident) => { let config = $node.node.get_current_config(); - reload_node!( + _reload_node_inner!( $node, config, $chanman_encoded, $monitors_encoded, $persister, $new_chain_monitor, - $new_channelmanager + $new_channelmanager, + None + ); + }; + // Reload the node with the new provided config + ($node: expr, $new_config: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: ident, $new_chain_monitor: ident, $new_channelmanager: ident) => { + _reload_node_inner!( + $node, + $new_config, + $chanman_encoded, + $monitors_encoded, + $persister, + $new_chain_monitor, + $new_channelmanager, + None ); }; } From 979d5648764da2a71b1f67c10ec329efa80ec5e7 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Fri, 23 Jan 2026 15:50:10 -0500 Subject: [PATCH 210/242] Check pruned HTLCs were resolved on startup In a recent commit, we added support for pruning an inbound HTLC's persisted onion once the HTLC has been irrevocably forwarded to the outbound edge. Here, we add a check on startup that those inbound HTLCs were actually handled. Specifically, we check that the inbound HTLC is either (a) currently present in the outbound edge or (b) was removed via claim. If neither of those are true, we infer that the HTLC was removed from the outbound edge via fail and fail the inbound HTLC backwards. --- lightning/src/ln/channel.rs | 34 ++-- lightning/src/ln/channelmanager.rs | 150 +++++++++++++-- lightning/src/ln/functional_test_utils.rs | 17 ++ lightning/src/ln/reload_tests.rs | 222 ++++++++++++++++++++++ 4 files changed, 396 insertions(+), 27 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 8a2bc30af91..e783f483063 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -314,8 +314,8 @@ impl InboundHTLCState { /// `ChannelManager` persist. /// /// Useful for reconstructing the pending HTLC set on startup. -#[derive(Debug)] -enum InboundUpdateAdd { +#[derive(Debug, Clone)] +pub(super) enum InboundUpdateAdd { /// The inbound committed HTLC's update_add_htlc message. WithOnion { update_add_htlc: msgs::UpdateAddHTLC }, /// This inbound HTLC is a forward that was irrevocably committed to the outbound edge, allowing @@ -7885,7 +7885,9 @@ where } /// Useful for reconstructing the set of pending HTLCs when deserializing the `ChannelManager`. - pub(super) fn inbound_committed_unresolved_htlcs(&self) -> Vec { + pub(super) fn inbound_committed_unresolved_htlcs( + &self, + ) -> Vec<(PaymentHash, InboundUpdateAdd)> { // We don't want to return an HTLC as needing processing if it already has a resolution that's // pending in the holding cell. let htlc_resolution_in_holding_cell = |id: u64| -> bool { @@ -7903,13 +7905,11 @@ where .pending_inbound_htlcs .iter() .filter_map(|htlc| match &htlc.state { - InboundHTLCState::Committed { - update_add_htlc: InboundUpdateAdd::WithOnion { update_add_htlc }, - } => { + InboundHTLCState::Committed { update_add_htlc } => { if htlc_resolution_in_holding_cell(htlc.htlc_id) { return None; } - Some(update_add_htlc.clone()) + Some((htlc.payment_hash, update_add_htlc.clone())) }, _ => None, }) @@ -7919,18 +7919,24 @@ where /// Useful when reconstructing the set of pending HTLC forwards when deserializing the /// `ChannelManager`. We don't want to cache an HTLC as needing to be forwarded if it's already /// present in the outbound edge, or else we'll double-forward. - pub(super) fn outbound_htlc_forwards(&self) -> impl Iterator + '_ { + pub(super) fn outbound_htlc_forwards( + &self, + ) -> impl Iterator + '_ { let holding_cell_outbounds = self.context.holding_cell_htlc_updates.iter().filter_map(|htlc| match htlc { - HTLCUpdateAwaitingACK::AddHTLC { source, .. } => match source { - HTLCSource::PreviousHopData(prev_hop_data) => Some(prev_hop_data.clone()), + HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => match source { + HTLCSource::PreviousHopData(prev_hop_data) => { + Some((*payment_hash, prev_hop_data.clone())) + }, _ => None, }, _ => None, }); let committed_outbounds = self.context.pending_outbound_htlcs.iter().filter_map(|htlc| match &htlc.source { - HTLCSource::PreviousHopData(prev_hop_data) => Some(prev_hop_data.clone()), + HTLCSource::PreviousHopData(prev_hop_data) => { + Some((htlc.payment_hash, prev_hop_data.clone())) + }, _ => None, }); holding_cell_outbounds.chain(committed_outbounds) @@ -7967,6 +7973,12 @@ where debug_assert!(false, "If we go to prune an inbound HTLC it should be present") } + /// Useful for testing crash scenarios where the holding cell is not persisted. + #[cfg(test)] + pub(super) fn test_clear_holding_cell(&mut self) { + self.context.holding_cell_htlc_updates.clear() + } + /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed #[inline] fn mark_outbound_htlc_removed( diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 569bb371459..f42d2947153 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -59,9 +59,9 @@ use crate::ln::chan_utils::selected_commitment_sat_per_1000_weight; use crate::ln::channel::QuiescentAction; use crate::ln::channel::{ self, hold_time_since, Channel, ChannelError, ChannelUpdateStatus, DisconnectResult, - FundedChannel, FundingTxSigned, InboundV1Channel, OutboundV1Channel, PendingV2Channel, - ReconnectionMsg, ShutdownResult, SpliceFundingFailed, StfuResponse, UpdateFulfillCommitFetch, - WithChannelContext, + FundedChannel, FundingTxSigned, InboundUpdateAdd, InboundV1Channel, OutboundV1Channel, + PendingV2Channel, ReconnectionMsg, ShutdownResult, SpliceFundingFailed, StfuResponse, + UpdateFulfillCommitFetch, WithChannelContext, }; use crate::ln::channel_state::ChannelDetails; use crate::ln::funding::SpliceContribution; @@ -10183,7 +10183,20 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state = per_peer_state.get(&cp_id).map(|state| state.lock().unwrap()).unwrap(); let chan = peer_state.channel_by_id.get(&chan_id).and_then(|c| c.as_funded()).unwrap(); - chan.inbound_committed_unresolved_htlcs().len() + chan.inbound_committed_unresolved_htlcs() + .iter() + .filter(|(_, htlc)| matches!(htlc, InboundUpdateAdd::WithOnion { .. })) + .count() + } + + #[cfg(test)] + /// Useful for testing crash scenarios where the holding cell of a channel is not persisted. + pub(crate) fn test_clear_channel_holding_cell(&self, cp_id: PublicKey, chan_id: ChannelId) { + let per_peer_state = self.per_peer_state.read().unwrap(); + let mut peer_state = per_peer_state.get(&cp_id).map(|state| state.lock().unwrap()).unwrap(); + let chan = + peer_state.channel_by_id.get_mut(&chan_id).and_then(|c| c.as_funded_mut()).unwrap(); + chan.test_clear_holding_cell(); } /// Completes channel resumption after locks have been released. @@ -18293,7 +18306,7 @@ impl< } // Post-deserialization processing - let mut decode_update_add_htlcs = new_hash_map(); + let mut decode_update_add_htlcs: HashMap> = new_hash_map(); if fake_scid_rand_bytes.is_none() { fake_scid_rand_bytes = Some(args.entropy_source.get_secure_random_bytes()); } @@ -18594,6 +18607,30 @@ impl< // have a fully-constructed `ChannelManager` at the end. let mut pending_claims_to_replay = Vec::new(); + // If we find an inbound HTLC that claims to already be forwarded to the outbound edge, we + // store an identifier for it here and verify that it is either (a) present in the outbound + // edge or (b) removed from the outbound edge via claim. If it's in neither of these states, we + // infer that it was removed from the outbound edge via fail, and fail it backwards to ensure + // that it is handled. + let mut already_forwarded_htlcs: HashMap< + (ChannelId, PaymentHash), + Vec<(HTLCPreviousHopData, u64)>, + > = new_hash_map(); + let prune_forwarded_htlc = |already_forwarded_htlcs: &mut HashMap< + (ChannelId, PaymentHash), + Vec<(HTLCPreviousHopData, u64)>, + >, + prev_hop: &HTLCPreviousHopData, + payment_hash: &PaymentHash| { + if let hash_map::Entry::Occupied(mut entry) = + already_forwarded_htlcs.entry((prev_hop.channel_id, *payment_hash)) + { + entry.get_mut().retain(|(htlc, _)| prev_hop.htlc_id != htlc.htlc_id); + if entry.get().is_empty() { + entry.remove(); + } + } + }; { // If we're tracking pending payments, ensure we haven't lost any by looking at the // ChannelMonitor data for any channels for which we do not have authorative state @@ -18616,16 +18653,33 @@ impl< if reconstruct_manager_from_monitors { if let Some(chan) = peer_state.channel_by_id.get(channel_id) { if let Some(funded_chan) = chan.as_funded() { + let scid_alias = funded_chan.context.outbound_scid_alias(); let inbound_committed_update_adds = funded_chan.inbound_committed_unresolved_htlcs(); - if !inbound_committed_update_adds.is_empty() { - // Reconstruct `ChannelManager::decode_update_add_htlcs` from the serialized - // `Channel`, as part of removing the requirement to regularly persist the - // `ChannelManager`. - decode_update_add_htlcs.insert( - funded_chan.context.outbound_scid_alias(), - inbound_committed_update_adds, - ); + for (payment_hash, htlc) in inbound_committed_update_adds { + match htlc { + InboundUpdateAdd::WithOnion { update_add_htlc } => { + // Reconstruct `ChannelManager::decode_update_add_htlcs` from the serialized + // `Channel` as part of removing the requirement to regularly persist the + // `ChannelManager`. + decode_update_add_htlcs + .entry(scid_alias) + .or_insert_with(Vec::new) + .push(update_add_htlc); + }, + InboundUpdateAdd::Forwarded { + hop_data, + outbound_amt_msat, + } => { + already_forwarded_htlcs + .entry((hop_data.channel_id, payment_hash)) + .or_insert_with(Vec::new) + .push((hop_data, outbound_amt_msat)); + }, + InboundUpdateAdd::Legacy => { + return Err(DecodeError::InvalidValue) + }, + } } } } @@ -18672,13 +18726,19 @@ impl< if reconstruct_manager_from_monitors && !is_channel_closed { if let Some(chan) = peer_state.channel_by_id.get(channel_id) { if let Some(funded_chan) = chan.as_funded() { - for prev_hop in funded_chan.outbound_htlc_forwards() { + for (payment_hash, prev_hop) in funded_chan.outbound_htlc_forwards() + { dedup_decode_update_add_htlcs( &mut decode_update_add_htlcs, &prev_hop, "HTLC already forwarded to the outbound edge", &args.logger, ); + prune_forwarded_htlc( + &mut already_forwarded_htlcs, + &prev_hop, + &payment_hash, + ); } } } @@ -18713,6 +18773,11 @@ impl< "HTLC already forwarded to the outbound edge", &&logger, ); + prune_forwarded_htlc( + &mut already_forwarded_htlcs, + &prev_hop_data, + &htlc.payment_hash, + ); } // The ChannelMonitor is now responsible for this HTLC's @@ -19160,7 +19225,7 @@ impl< if reconstruct_manager_from_monitors { // De-duplicate HTLCs that are present in both `failed_htlcs` and `decode_update_add_htlcs`. // Omitting this de-duplication could lead to redundant HTLC processing and/or bugs. - for (src, _, _, _, _, _) in failed_htlcs.iter() { + for (src, payment_hash, _, _, _, _) in failed_htlcs.iter() { if let HTLCSource::PreviousHopData(prev_hop_data) = src { dedup_decode_update_add_htlcs( &mut decode_update_add_htlcs, @@ -19168,6 +19233,7 @@ impl< "HTLC was failed backwards during manager read", &args.logger, ); + prune_forwarded_htlc(&mut already_forwarded_htlcs, prev_hop_data, payment_hash); } } @@ -19313,9 +19379,46 @@ impl< }; let mut processed_claims: HashSet> = new_hash_set(); - for (_, monitor) in args.channel_monitors.iter() { + for (channel_id, monitor) in args.channel_monitors.iter() { for (payment_hash, (payment_preimage, payment_claims)) in monitor.get_stored_preimages() { + // If we have unresolved inbound committed HTLCs that were already forwarded to the + // outbound edge and removed via claim, we need to make sure to claim them backwards via + // adding them to `pending_claims_to_replay`. + if let Some(forwarded_htlcs) = + already_forwarded_htlcs.remove(&(*channel_id, payment_hash)) + { + for (hop_data, outbound_amt_msat) in forwarded_htlcs { + let new_pending_claim = + !pending_claims_to_replay.iter().any(|(src, _, _, _, _, _, _)| { + matches!(src, HTLCSource::PreviousHopData(hop) if hop.htlc_id == hop_data.htlc_id && hop.channel_id == hop_data.channel_id) + }); + if new_pending_claim { + let counterparty_node_id = monitor.get_counterparty_node_id(); + let is_channel_closed = channel_manager + .per_peer_state + .read() + .unwrap() + .get(&counterparty_node_id) + .map_or(true, |peer_state_mtx| { + !peer_state_mtx + .lock() + .unwrap() + .channel_by_id + .contains_key(channel_id) + }); + pending_claims_to_replay.push(( + HTLCSource::PreviousHopData(hop_data), + payment_preimage, + outbound_amt_msat, + is_channel_closed, + counterparty_node_id, + monitor.get_funding_txo(), + *channel_id, + )); + } + } + } if !payment_claims.is_empty() { for payment_claim in payment_claims { if processed_claims.contains(&payment_claim.mpp_parts) { @@ -19557,6 +19660,21 @@ impl< channel_manager .fail_htlc_backwards_internal(&source, &hash, &reason, receiver, ev_action); } + for ((_, hash), htlcs) in already_forwarded_htlcs.into_iter() { + for (htlc, _) in htlcs { + let channel_id = htlc.channel_id; + let node_id = htlc.counterparty_node_id; + let source = HTLCSource::PreviousHopData(htlc); + let failure_reason = LocalHTLCFailureReason::TemporaryChannelFailure; + let failure_data = channel_manager.get_htlc_inbound_temp_fail_data(failure_reason); + let reason = HTLCFailReason::reason(failure_reason, failure_data); + let receiver = HTLCHandlingFailureType::Forward { node_id, channel_id }; + // The event completion action is only relevant for HTLCs that originate from our node, not + // forwarded HTLCs. + channel_manager + .fail_htlc_backwards_internal(&source, &hash, &reason, receiver, None); + } + } for ( source, diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 07f11ed72e8..07f7c0bd8f3 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1425,6 +1425,23 @@ macro_rules! reload_node { None ); }; + // Reload the node and have the `ChannelManager` use new codepaths that reconstruct its set of + // pending HTLCs from `Channel{Monitor}` data. + ($node: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: + ident, $new_chain_monitor: ident, $new_channelmanager: ident, $reconstruct_pending_htlcs: expr + ) => { + let config = $node.node.get_current_config(); + _reload_node_inner!( + $node, + config, + $chanman_encoded, + $monitors_encoded, + $persister, + $new_chain_monitor, + $new_channelmanager, + $reconstruct_pending_htlcs + ); + }; } pub fn create_funding_transaction<'a, 'b, 'c>( diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index cac187174cf..fa0c77bc9d4 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -1861,3 +1861,225 @@ fn outbound_removed_holding_cell_resolved_no_double_forward() { // nodes[0] should now have received the fulfill and generate PaymentSent. expect_payment_sent(&nodes[0], payment_preimage, None, true, true); } + +#[test] +fn test_reload_node_with_preimage_in_monitor_claims_htlc() { + // Test that if a forwarding node has an HTLC that was irrevocably removed on the outbound edge + // via claim but is still forwarded-and-unresolved in the inbound edge, that HTLC will not be + // failed back on the inbound edge on reload. + // + // For context, the ChannelManager is moving towards reconstructing the pending inbound HTLC set + // from Channel data on startup. If we find an inbound HTLC that is flagged as already-forwarded, + // we then check that the HTLC is either (a) still present in the outbound edge or (b) removed + // from the outbound edge but with a preimage present in the corresponding ChannelMonitor, + // indicating that it was removed from the outbound edge via claim. If neither of those are the + // case, we infer that the HTLC was removed from the outbound edge via failure and fail the HTLC + // backwards. + // + // Here we ensure that inbound HTLCs in case (b) above will not be failed backwards on manager + // reload. + + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let persister; + let new_chain_monitor; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes_1_deserialized; + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_0_id = nodes[0].node.get_our_node_id(); + let node_1_id = nodes[1].node.get_our_node_id(); + let node_2_id = nodes[2].node.get_our_node_id(); + + let chan_0_1 = create_announced_chan_between_nodes(&nodes, 0, 1); + let chan_1_2 = create_announced_chan_between_nodes(&nodes, 1, 2); + + let chan_id_0_1 = chan_0_1.2; + let chan_id_1_2 = chan_1_2.2; + + // Send a payment from nodes[0] to nodes[2] via nodes[1]. + let (route, payment_hash, payment_preimage, payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[2], 1_000_000); + send_along_route_with_secret( + &nodes[0], route, &[&[&nodes[1], &nodes[2]]], 1_000_000, payment_hash, payment_secret, + ); + + // Claim the payment on nodes[2]. + nodes[2].node.claim_funds(payment_preimage); + check_added_monitors(&nodes[2], 1); + expect_payment_claimed!(nodes[2], payment_hash, 1_000_000); + + // Disconnect nodes[0] from nodes[1] BEFORE processing the fulfill. + // This prevents the claim from propagating back, leaving the inbound HTLC in ::Forwarded state. + nodes[0].node.peer_disconnected(node_1_id); + nodes[1].node.peer_disconnected(node_0_id); + + // Process the fulfill from nodes[2] to nodes[1]. + // This stores the preimage in nodes[1]'s monitor for chan_1_2. + let updates_2_1 = get_htlc_update_msgs(&nodes[2], &node_1_id); + nodes[1].node.handle_update_fulfill_htlc(node_2_id, updates_2_1.update_fulfill_htlcs[0].clone()); + check_added_monitors(&nodes[1], 1); + do_commitment_signed_dance(&nodes[1], &nodes[2], &updates_2_1.commitment_signed, false, false); + expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); + + // Clear the holding cell's claim entry on chan_0_1 before serialization. + // This simulates a crash where the HTLC was fully removed from the outbound edge but is still + // present on the inbound edge without a resolution. + nodes[1].node.test_clear_channel_holding_cell(node_0_id, chan_id_0_1); + + // At this point: + // - The inbound HTLC on nodes[1] (from nodes[0]) is in ::Forwarded state + // - The preimage IS in nodes[1]'s monitor for chan_1_2 + // - The outbound HTLC to nodes[2] is resolved + // + // Serialize nodes[1] state and monitors before reloading. + let node_1_serialized = nodes[1].node.encode(); + let mon_0_1_serialized = get_monitor!(nodes[1], chan_id_0_1).encode(); + let mon_1_2_serialized = get_monitor!(nodes[1], chan_id_1_2).encode(); + + // Reload nodes[1]. + // During deserialization, we track inbound HTLCs that purport to already be forwarded on the + // outbound edge. If any are entirely missing from the outbound edge with no preimage available, + // they will be failed backwards. Otherwise, as in this case where a preimage is available, the + // payment should be claimed backwards. + reload_node!( + nodes[1], + node_1_serialized, + &[&mon_0_1_serialized, &mon_1_2_serialized], + persister, + new_chain_monitor, + nodes_1_deserialized, + Some(true) + ); + + // When the claim is reconstructed during reload, a PaymentForwarded event is generated. + // This event has next_user_channel_id as None since the outbound HTLC was already removed. + // Fetching events triggers the pending monitor update (adding preimage) to be applied. + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match &events[0] { + Event::PaymentForwarded { total_fee_earned_msat: Some(1000), .. } => {}, + _ => panic!("Expected PaymentForwarded event"), + } + check_added_monitors(&nodes[1], 1); + + // Reconnect nodes[1] to nodes[0]. The claim should be in nodes[1]'s holding cell. + let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[0]); + reconnect_args.pending_cell_htlc_claims = (0, 1); + reconnect_nodes(reconnect_args); + + // nodes[0] should now have received the fulfill and generate PaymentSent. + expect_payment_sent(&nodes[0], payment_preimage, None, true, true); +} + +#[test] +fn test_reload_node_without_preimage_fails_htlc() { + // Test that if a forwarding node has an HTLC that was removed on the outbound edge via failure + // but is still forwarded-and-unresolved in the inbound edge, that HTLC will be correctly + // failed back on reload via the already_forwarded_htlcs mechanism. + // + // For context, the ChannelManager reconstructs the pending inbound HTLC set from Channel data + // on startup. If an inbound HTLC is present but flagged as already-forwarded, we check that + // the HTLC is either (a) still present in the outbound edge or (b) removed from the outbound + // edge but with a preimage present in the corresponding ChannelMonitor, indicating it was + // removed via claim. If neither, we infer the HTLC was removed via failure and fail it back. + // + // Here we test the failure case: no preimage is present, so the HTLC should be failed back. + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let persister; + let new_chain_monitor; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes_1_deserialized; + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_0_id = nodes[0].node.get_our_node_id(); + let node_1_id = nodes[1].node.get_our_node_id(); + let node_2_id = nodes[2].node.get_our_node_id(); + + let chan_0_1 = create_announced_chan_between_nodes(&nodes, 0, 1); + let chan_1_2 = create_announced_chan_between_nodes(&nodes, 1, 2); + + let chan_id_0_1 = chan_0_1.2; + let chan_id_1_2 = chan_1_2.2; + + // Send a payment from nodes[0] to nodes[2] via nodes[1]. + let (route, payment_hash, _, payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[2], 1_000_000); + send_along_route_with_secret( + &nodes[0], route, &[&[&nodes[1], &nodes[2]]], 1_000_000, payment_hash, payment_secret, + ); + + // Disconnect nodes[0] from nodes[1] BEFORE processing the failure. + // This prevents the fail from propagating back, leaving the inbound HTLC in ::Forwarded state. + nodes[0].node.peer_disconnected(node_1_id); + nodes[1].node.peer_disconnected(node_0_id); + + // Fail the payment on nodes[2] and process the failure to nodes[1]. + // This removes the outbound HTLC and queues a fail in the holding cell. + nodes[2].node.fail_htlc_backwards(&payment_hash); + expect_and_process_pending_htlcs_and_htlc_handling_failed( + &nodes[2], &[HTLCHandlingFailureType::Receive { payment_hash }] + ); + check_added_monitors(&nodes[2], 1); + + let updates_2_1 = get_htlc_update_msgs(&nodes[2], &node_1_id); + nodes[1].node.handle_update_fail_htlc(node_2_id, &updates_2_1.update_fail_htlcs[0]); + do_commitment_signed_dance(&nodes[1], &nodes[2], &updates_2_1.commitment_signed, false, false); + expect_and_process_pending_htlcs_and_htlc_handling_failed( + &nodes[1], &[HTLCHandlingFailureType::Forward { node_id: Some(node_2_id), channel_id: chan_id_1_2 }] + ); + + // Clear the holding cell's fail entry on chan_0_1 before serialization. + // This simulates a crash where the HTLC was fully removed from the outbound edge but is still + // present on the inbound edge without a resolution. Otherwise, we would not be able to exercise + // the desired failure paths due to the holding cell failure resolution being present. + nodes[1].node.test_clear_channel_holding_cell(node_0_id, chan_id_0_1); + + // Now serialize. The state has: + // - Inbound HTLC on chan_0_1 in ::Forwarded state + // - Outbound HTLC on chan_1_2 resolved (not present) + // - No preimage in monitors (it was a failure) + // - No holding cell entry for the fail (we cleared it) + let node_1_serialized = nodes[1].node.encode(); + let mon_0_1_serialized = get_monitor!(nodes[1], chan_id_0_1).encode(); + let mon_1_2_serialized = get_monitor!(nodes[1], chan_id_1_2).encode(); + + // Reload nodes[1]. + // The already_forwarded_htlcs mechanism should detect: + // - Inbound HTLC is in ::Forwarded state + // - Outbound HTLC is not present in outbound channel + // - No preimage in monitors + // Therefore it should fail the HTLC backwards. + reload_node!( + nodes[1], + node_1_serialized, + &[&mon_0_1_serialized, &mon_1_2_serialized], + persister, + new_chain_monitor, + nodes_1_deserialized, + Some(true) + ); + + // After reload, nodes[1] should have generated an HTLCHandlingFailed event. + let events = nodes[1].node.get_and_clear_pending_events(); + assert!(!events.is_empty(), "Expected HTLCHandlingFailed event"); + for event in events { + match event { + Event::HTLCHandlingFailed { .. } => {}, + _ => panic!("Unexpected event {:?}", event), + } + } + + // Process the failure so it goes back into chan_0_1's holding cell. + nodes[1].node.process_pending_htlc_forwards(); + check_added_monitors(&nodes[1], 0); // No monitor update yet (peer disconnected) + + // Reconnect nodes[1] to nodes[0]. The fail should be in nodes[1]'s holding cell. + let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[0]); + reconnect_args.pending_cell_htlc_fails = (0, 1); + reconnect_nodes(reconnect_args); + + // nodes[0] should now have received the failure and generate PaymentFailed. + expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new()); +} From 0ca79af5c3871e80f67d418e04468f7028881b99 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Fri, 16 Jan 2026 16:53:55 -0500 Subject: [PATCH 211/242] Support deleting legacy forward map persistence in 0.5 In 0.3+, we are taking steps to remove the requirement of regularly persisting the ChannelManager and instead rebuild the set of HTLC forwards (and the manager generally) from Channel{Monitor} data. We previously merged support for reconstructing the ChannelManager::decode_update_add_htlcs map from channel data, using a new HTLC onion field that will be present for inbound HTLCs received on 0.3+ only. The plan is that in upcoming LDK versions, the manager will reconstruct this map and the other forward/claimable/pending HTLC maps will automatically repopulate themselves on the next call to process_pending_htlc_forwards. As such, once we're in a future version that reconstructs the pending HTLC set, we can stop persisting the legacy ChannelManager maps such as forward_htlcs, pending_intercepted_htlcs since they will never be used. For 0.3 to be compatible with this future version, in this commit we detect that the manager was last written on a version of LDK that doesn't persist the legacy maps. In that case, we don't try to read the old forwards map and run the new reconstruction logic only. --- lightning/src/ln/channelmanager.rs | 58 ++++++++++++++++++++---------- 1 file changed, 39 insertions(+), 19 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index f42d2947153..808e776fed6 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -16631,6 +16631,17 @@ pub fn provided_init_features(config: &UserConfig) -> InitFeatures { const SERIALIZATION_VERSION: u8 = 1; const MIN_SERIALIZATION_VERSION: u8 = 1; +// We plan to start writing this version in 0.5. +// +// LDK 0.5+ will reconstruct the set of pending HTLCs from `Channel{Monitor}` data that started +// being written in 0.3, ignoring legacy `ChannelManager` HTLC maps on read and not writing them. +// LDK 0.5+ will automatically fail to read if the pending HTLC set cannot be reconstructed, i.e. +// if we were last written with pending HTLCs on 0.2- or if the new 0.3+ fields are missing. +// +// If 0.3 or 0.4 reads this manager version, it knows that the legacy maps were not written and +// acts accordingly. +const RECONSTRUCT_HTLCS_FROM_CHANS_VERSION: u8 = 5; + impl_writeable_tlv_based!(PhantomRouteHints, { (2, channels, required_vec), (4, phantom_scid, required), @@ -17382,6 +17393,8 @@ pub(super) struct ChannelManagerData { forward_htlcs_legacy: HashMap>, pending_intercepted_htlcs_legacy: HashMap, decode_update_add_htlcs_legacy: HashMap>, + // The `ChannelManager` version that was written. + version: u8, } /// Arguments for deserializing [`ChannelManagerData`]. @@ -17405,7 +17418,7 @@ impl<'a, ES: EntropySource, NS: NodeSigner, SP: SignerProvider, L: Logger> fn read( reader: &mut R, args: ChannelManagerDataReadArgs<'a, ES, NS, SP, L>, ) -> Result { - let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION); + let version = read_ver_prefix!(reader, SERIALIZATION_VERSION); let chain_hash: ChainHash = Readable::read(reader)?; let best_block_height: u32 = Readable::read(reader)?; @@ -17427,21 +17440,26 @@ impl<'a, ES: EntropySource, NS: NodeSigner, SP: SignerProvider, L: Logger> channels.push(channel); } - let forward_htlcs_count: u64 = Readable::read(reader)?; - let mut forward_htlcs_legacy: HashMap> = - hash_map_with_capacity(cmp::min(forward_htlcs_count as usize, 128)); - for _ in 0..forward_htlcs_count { - let short_channel_id = Readable::read(reader)?; - let pending_forwards_count: u64 = Readable::read(reader)?; - let mut pending_forwards = Vec::with_capacity(cmp::min( - pending_forwards_count as usize, - MAX_ALLOC_SIZE / mem::size_of::(), - )); - for _ in 0..pending_forwards_count { - pending_forwards.push(Readable::read(reader)?); - } - forward_htlcs_legacy.insert(short_channel_id, pending_forwards); - } + let forward_htlcs_legacy: HashMap> = + if version < RECONSTRUCT_HTLCS_FROM_CHANS_VERSION { + let forward_htlcs_count: u64 = Readable::read(reader)?; + let mut fwds = hash_map_with_capacity(cmp::min(forward_htlcs_count as usize, 128)); + for _ in 0..forward_htlcs_count { + let short_channel_id = Readable::read(reader)?; + let pending_forwards_count: u64 = Readable::read(reader)?; + let mut pending_forwards = Vec::with_capacity(cmp::min( + pending_forwards_count as usize, + MAX_ALLOC_SIZE / mem::size_of::(), + )); + for _ in 0..pending_forwards_count { + pending_forwards.push(Readable::read(reader)?); + } + fwds.insert(short_channel_id, pending_forwards); + } + fwds + } else { + new_hash_map() + }; let claimable_htlcs_count: u64 = Readable::read(reader)?; let mut claimable_htlcs_list = @@ -17721,6 +17739,7 @@ impl<'a, ES: EntropySource, NS: NodeSigner, SP: SignerProvider, L: Logger> in_flight_monitor_updates: in_flight_monitor_updates.unwrap_or_default(), peer_storage_dir: peer_storage_dir.unwrap_or_default(), async_receive_offer_cache, + version, }) } } @@ -18023,6 +18042,7 @@ impl< mut in_flight_monitor_updates, peer_storage_dir, async_receive_offer_cache, + version: _version, } = data; let empty_peer_state = || PeerState { @@ -18572,10 +18592,10 @@ impl< // persist that state, relying on it being up-to-date on restart. Newer versions are moving // towards reducing this reliance on regular persistence of the `ChannelManager`, and instead // reconstruct HTLC/payment state based on `Channel{Monitor}` data if - // `reconstruct_manager_from_monitors` is set below. Currently it is only set in tests, randomly - // to ensure the legacy codepaths also have test coverage. + // `reconstruct_manager_from_monitors` is set below. Currently we set in tests randomly to + // ensure the legacy codepaths also have test coverage. #[cfg(not(test))] - let reconstruct_manager_from_monitors = false; + let reconstruct_manager_from_monitors = _version >= RECONSTRUCT_HTLCS_FROM_CHANS_VERSION; #[cfg(test)] let reconstruct_manager_from_monitors = args.reconstruct_manager_from_monitors.unwrap_or_else(|| { From 3b75eee99c9af8a515ff3203f4f0df135e229c27 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Fri, 6 Feb 2026 14:11:41 -0500 Subject: [PATCH 212/242] Fix docs on ChannelMonitor::payment_preimages --- lightning/src/chain/channelmonitor.rs | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index a537ff55874..3c94b919d97 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -1256,18 +1256,19 @@ pub(crate) struct ChannelMonitorImpl { // deserialization current_holder_commitment_number: u64, - /// The set of payment hashes from inbound payments for which we know the preimage. Payment - /// preimages that are not included in any unrevoked local commitment transaction or unrevoked - /// remote commitment transactions are automatically removed when commitment transactions are - /// revoked. Note that this happens one revocation after it theoretically could, leaving - /// preimages present here for the previous state even when the channel is "at rest". This is a - /// good safety buffer, but also is important as it ensures we retain payment preimages for the - /// previous local commitment transaction, which may have been broadcast already when we see - /// the revocation (in setups with redundant monitors). + /// The set of payment hashes from inbound payments and forwards for which we know the preimage. + /// Payment preimages that are not included in any unrevoked local commitment transaction or + /// unrevoked remote commitment transactions are automatically removed when commitment + /// transactions are revoked. Note that this happens one revocation after it theoretically could, + /// leaving preimages present here for the previous state even when the channel is "at rest". + /// This is a good safety buffer, but also is important as it ensures we retain payment preimages + /// for the previous local commitment transaction, which may have been broadcast already when we + /// see the revocation (in setups with redundant monitors). /// /// We also store [`PaymentClaimDetails`] here, tracking the payment information(s) for this /// preimage for inbound payments. This allows us to rebuild the inbound payment information on - /// startup even if we lost our `ChannelManager`. + /// startup even if we lost our `ChannelManager`. For forwardeds, the list of + /// [`PaymentClaimDetails`] is empty. payment_preimages: HashMap)>, // Note that `MonitorEvent`s MUST NOT be generated during update processing, only generated From 1829dc9f3e4aaaf5504d7eb21697ab58d1ba4d79 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Fri, 6 Feb 2026 17:25:43 -0600 Subject: [PATCH 213/242] Fix thread starvation in test_single_channel_multiple_mpp The busy-wait loop polling for PaymentClaimed events had no yield, causing it to continuously acquire ChannelManager locks via get_and_clear_pending_events(). This could starve the claim_funds thread of lock access, preventing it from ever queuing the event. Add a yield_now() call matching the pattern used by the other two spin loops in this test. Co-Authored-By: Claude Opus 4.6 --- lightning/src/ln/chanmon_update_fail_tests.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 3fa2073d5ba..e492562d54d 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -4715,6 +4715,9 @@ fn test_single_channel_multiple_mpp() { } have_event = true; } + if !have_event { + std::thread::yield_now(); + } } }); From e71ad81ad0c6240015541e492167fddca7264266 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Thu, 5 Feb 2026 15:28:50 -0600 Subject: [PATCH 214/242] Add a read closure to the `legacy` TLV variant Update the `legacy` TLV read/write variant signature from `(legacy, $fieldty, $write)` to `(legacy, $fieldty, $read, $write)`, adding a read closure parameter matching the `custom` variant's signature. The read closure is applied in `_check_missing_tlv!` after all TLV fields are read but before `static_value` fields consume legacy values. This preserves backwards compatibility with `static_value` and `default_value` expressions that reference legacy field variables as `Option<$fieldty>` during TLV reading. The read closure signature differs from `custom`: `FnOnce(Option<&$fieldty>) -> Result<(), DecodeError>`. All existing usage sites return `Ok(())` as their read closure (no-op). Co-Authored-By: Claude Opus 4.6 --- lightning/src/chain/package.rs | 2 +- lightning/src/ln/channel_state.rs | 6 ++--- lightning/src/ln/onion_utils.rs | 6 ++--- lightning/src/ln/outbound_payment.rs | 6 ++--- lightning/src/util/ser_macros.rs | 34 ++++++++++++++++------------ 5 files changed, 29 insertions(+), 25 deletions(-) diff --git a/lightning/src/chain/package.rs b/lightning/src/chain/package.rs index 0abe3534341..0ef8855242b 100644 --- a/lightning/src/chain/package.rs +++ b/lightning/src/chain/package.rs @@ -183,7 +183,7 @@ impl_writeable_tlv_based!(RevokedOutput, { (12, on_counterparty_tx_csv, required), // Unused since 0.1, this setting causes downgrades to before 0.1 to refuse to // aggregate `RevokedOutput` claims, which is the more conservative stance. - (14, is_counterparty_balance_on_anchors, (legacy, (), |_| Some(()))), + (14, is_counterparty_balance_on_anchors, (legacy, (), |_| Ok(()), |_| Some(()))), (15, channel_parameters, (option: ReadableArgs, None)), // Added in 0.2. }); diff --git a/lightning/src/ln/channel_state.rs b/lightning/src/ln/channel_state.rs index 86e53ba3262..eda79e03308 100644 --- a/lightning/src/ln/channel_state.rs +++ b/lightning/src/ln/channel_state.rs @@ -607,9 +607,9 @@ impl_writeable_tlv_based!(ChannelDetails, { (10, channel_value_satoshis, required), (12, unspendable_punishment_reserve, option), // Note that _user_channel_id_low is used below, but rustc warns anyway - (14, _user_channel_id_low, (legacy, u64, + (14, _user_channel_id_low, (legacy, u64, |_| Ok(()), |us: &ChannelDetails| Some(us.user_channel_id as u64))), - (16, _balance_msat, (legacy, u64, |us: &ChannelDetails| Some(us.next_outbound_htlc_limit_msat))), + (16, _balance_msat, (legacy, u64, |_| Ok(()), |us: &ChannelDetails| Some(us.next_outbound_htlc_limit_msat))), (18, outbound_capacity_msat, required), (19, next_outbound_htlc_limit_msat, (default_value, outbound_capacity_msat)), (20, inbound_capacity_msat, required), @@ -623,7 +623,7 @@ impl_writeable_tlv_based!(ChannelDetails, { (33, inbound_htlc_minimum_msat, option), (35, inbound_htlc_maximum_msat, option), // Note that _user_channel_id_high is used below, but rustc warns anyway - (37, _user_channel_id_high, (legacy, u64, + (37, _user_channel_id_high, (legacy, u64, |_| Ok(()), |us: &ChannelDetails| Some((us.user_channel_id >> 64) as u64))), (39, feerate_sat_per_1000_weight, option), (41, channel_shutdown_state, option), diff --git a/lightning/src/ln/onion_utils.rs b/lightning/src/ln/onion_utils.rs index d48fcb25179..605f27e9666 100644 --- a/lightning/src/ln/onion_utils.rs +++ b/lightning/src/ln/onion_utils.rs @@ -1943,14 +1943,14 @@ impl Readable for HTLCFailReason { impl_writeable_tlv_based_enum!(HTLCFailReasonRepr, (0, LightningError) => { - (0, data, (legacy, Vec, |us| + (0, data, (legacy, Vec, |_| Ok(()), |us| if let &HTLCFailReasonRepr::LightningError { err: msgs::OnionErrorPacket { ref data, .. }, .. } = us { Some(data) } else { None }) ), - (1, attribution_data, (legacy, AttributionData, |us| + (1, attribution_data, (legacy, AttributionData, |_| Ok(()), |us| if let &HTLCFailReasonRepr::LightningError { err: msgs::OnionErrorPacket { ref attribution_data, .. }, .. } = us { attribution_data.as_ref() } else { @@ -1961,7 +1961,7 @@ impl_writeable_tlv_based_enum!(HTLCFailReasonRepr, (_unused, err, (static_value, msgs::OnionErrorPacket { data: data.ok_or(DecodeError::InvalidValue)?, attribution_data })), }, (1, Reason) => { - (0, _failure_code, (legacy, u16, + (0, _failure_code, (legacy, u16, |_| Ok(()), |r: &HTLCFailReasonRepr| match r { HTLCFailReasonRepr::LightningError{ .. } => None, HTLCFailReasonRepr::Reason{ failure_reason, .. } => Some(failure_reason.failure_code()) diff --git a/lightning/src/ln/outbound_payment.rs b/lightning/src/ln/outbound_payment.rs index ea33bb5d263..170e4e13830 100644 --- a/lightning/src/ln/outbound_payment.rs +++ b/lightning/src/ln/outbound_payment.rs @@ -2731,7 +2731,7 @@ impl_writeable_tlv_based_enum_upgradable!(PendingOutboundPayment, (5, AwaitingInvoice) => { (0, expiration, required), (2, retry_strategy, required), - (4, _max_total_routing_fee_msat, (legacy, u64, + (4, _max_total_routing_fee_msat, (legacy, u64, |_| Ok(()), |us: &PendingOutboundPayment| match us { PendingOutboundPayment::AwaitingInvoice { route_params_config, .. } => route_params_config.max_total_routing_fee_msat, _ => None, @@ -2748,7 +2748,7 @@ impl_writeable_tlv_based_enum_upgradable!(PendingOutboundPayment, (7, InvoiceReceived) => { (0, payment_hash, required), (2, retry_strategy, required), - (4, _max_total_routing_fee_msat, (legacy, u64, + (4, _max_total_routing_fee_msat, (legacy, u64, |_| Ok(()), |us: &PendingOutboundPayment| match us { PendingOutboundPayment::InvoiceReceived { route_params_config, .. } => route_params_config.max_total_routing_fee_msat, _ => None, @@ -2779,7 +2779,7 @@ impl_writeable_tlv_based_enum_upgradable!(PendingOutboundPayment, (11, AwaitingOffer) => { (0, expiration, required), (2, retry_strategy, required), - (4, _max_total_routing_fee_msat, (legacy, u64, + (4, _max_total_routing_fee_msat, (legacy, u64, |_| Ok(()), |us: &PendingOutboundPayment| match us { PendingOutboundPayment::AwaitingOffer { route_params_config, .. } => route_params_config.max_total_routing_fee_msat, _ => None, diff --git a/lightning/src/util/ser_macros.rs b/lightning/src/util/ser_macros.rs index bd2b5d1983a..cc95fe619e8 100644 --- a/lightning/src/util/ser_macros.rs +++ b/lightning/src/util/ser_macros.rs @@ -45,7 +45,7 @@ macro_rules! _encode_tlv { field.write($stream)?; } }; - ($stream: expr, $optional_type: expr, $optional_field: expr, (legacy, $fieldty: ty, $write: expr) $(, $self: ident)?) => { { + ($stream: expr, $optional_type: expr, $optional_field: expr, (legacy, $fieldty: ty, $read: expr, $write: expr) $(, $self: ident)?) => { { let value: Option<_> = $write($($self)?); #[cfg(debug_assertions)] { @@ -64,7 +64,7 @@ macro_rules! _encode_tlv { $crate::_encode_tlv!($stream, $optional_type, value, option); } }; ($stream: expr, $optional_type: expr, $optional_field: expr, (custom, $fieldty: ty, $read: expr, $write: expr) $(, $self: ident)?) => { { - $crate::_encode_tlv!($stream, $optional_type, $optional_field, (legacy, $fieldty, $write) $(, $self)?); + $crate::_encode_tlv!($stream, $optional_type, $optional_field, (legacy, $fieldty, $read, $write) $(, $self)?); } }; ($stream: expr, $type: expr, $field: expr, optional_vec $(, $self: ident)?) => { if !$field.is_empty() { @@ -232,11 +232,11 @@ macro_rules! _get_varint_length_prefixed_tlv_length { $len.0 += field_len; } }; - ($len: expr, $optional_type: expr, $optional_field: expr, (legacy, $fieldty: ty, $write: expr) $(, $self: ident)?) => { + ($len: expr, $optional_type: expr, $optional_field: expr, (legacy, $fieldty: ty, $read: expr, $write: expr) $(, $self: ident)?) => { $crate::_get_varint_length_prefixed_tlv_length!($len, $optional_type, $write($($self)?), option); }; ($len: expr, $optional_type: expr, $optional_field: expr, (custom, $fieldty: ty, $read: expr, $write: expr) $(, $self: ident)?) => { - $crate::_get_varint_length_prefixed_tlv_length!($len, $optional_type, $optional_field, (legacy, $fieldty, $write) $(, $self)?); + $crate::_get_varint_length_prefixed_tlv_length!($len, $optional_type, $optional_field, (legacy, $fieldty, $read, $write) $(, $self)?); }; ($len: expr, $type: expr, $field: expr, optional_vec $(, $self: ident)?) => { if !$field.is_empty() { @@ -320,7 +320,7 @@ macro_rules! _check_decoded_tlv_order { ($last_seen_type: expr, $typ: expr, $type: expr, $field: ident, (option, explicit_type: $fieldty: ty)) => {{ // no-op }}; - ($last_seen_type: expr, $typ: expr, $type: expr, $field: ident, (legacy, $fieldty: ty, $write: expr)) => {{ + ($last_seen_type: expr, $typ: expr, $type: expr, $field: ident, (legacy, $fieldty: ty, $read: expr, $write: expr)) => {{ // no-op }}; ($last_seen_type: expr, $typ: expr, $type: expr, $field: ident, (custom, $fieldty: ty, $read: expr, $write: expr) $(, $self: ident)?) => {{ @@ -398,8 +398,10 @@ macro_rules! _check_missing_tlv { ($last_seen_type: expr, $type: expr, $field: ident, (option, explicit_type: $fieldty: ty)) => {{ // no-op }}; - ($last_seen_type: expr, $type: expr, $field: ident, (legacy, $fieldty: ty, $write: expr)) => {{ - // no-op + ($last_seen_type: expr, $type: expr, $field: ident, (legacy, $fieldty: ty, $read: expr, $write: expr)) => {{ + use $crate::ln::msgs::DecodeError; + let read_result: Result<(), DecodeError> = $read($field.as_ref()); + read_result?; }}; ($last_seen_type: expr, $type: expr, $field: ident, (custom, $fieldty: ty, $read: expr, $write: expr)) => {{ // Note that $type may be 0 making the second comparison always false @@ -463,7 +465,7 @@ macro_rules! _decode_tlv { let _field: &Option<$fieldty> = &$field; $crate::_decode_tlv!($outer_reader, $reader, $field, option); }}; - ($outer_reader: expr, $reader: expr, $field: ident, (legacy, $fieldty: ty, $write: expr)) => {{ + ($outer_reader: expr, $reader: expr, $field: ident, (legacy, $fieldty: ty, $read: expr, $write: expr)) => {{ $crate::_decode_tlv!($outer_reader, $reader, $field, (option, explicit_type: $fieldty)); }}; ($outer_reader: expr, $reader: expr, $field: ident, (custom, $fieldty: ty, $read: expr, $write: expr)) => {{ @@ -858,7 +860,7 @@ macro_rules! _init_tlv_based_struct_field { ($field: ident, option) => { $field }; - ($field: ident, (legacy, $fieldty: ty, $write: expr)) => { + ($field: ident, (legacy, $fieldty: ty, $read: expr, $write: expr)) => { $crate::_init_tlv_based_struct_field!($field, option) }; ($field: ident, (custom, $fieldty: ty, $read: expr, $write: expr)) => { @@ -927,7 +929,7 @@ macro_rules! _init_tlv_field_var { ($field: ident, (option, explicit_type: $fieldty: ty)) => { let mut $field: Option<$fieldty> = None; }; - ($field: ident, (legacy, $fieldty: ty, $write: expr)) => { + ($field: ident, (legacy, $fieldty: ty, $read: expr, $write: expr)) => { $crate::_init_tlv_field_var!($field, (option, explicit_type: $fieldty)); }; ($field: ident, (custom, $fieldty: ty, $read: expr, $write: expr)) => { @@ -1012,10 +1014,12 @@ macro_rules! _decode_and_build { /// [`MaybeReadable`], requiring the TLV to be present. /// If `$fieldty` is `optional_vec`, then `$field` is a [`Vec`], which needs to have its individual elements serialized. /// Note that for `optional_vec` no bytes are written if the vec is empty -/// If `$fieldty` is `(legacy, $ty, $write)` then, when writing, the function $write will be +/// If `$fieldty` is `(legacy, $ty, $read, $write)` then, when writing, the function $write will be /// called with the object being serialized and a returned `Option` and is written as a TLV if -/// `Some`. When reading, an optional field of type `$ty` is read (which can be used in later -/// `default_value` or `static_value` fields by referring to the value by name). +/// `Some`. When reading, an optional field of type `$ty` is read, and after all TLV fields are +/// read, the `$read` closure is called with the `Option<&$ty>` value. The `$read` closure should +/// return a `Result<(), DecodeError>`. Legacy field values can be used in later +/// `default_value` or `static_value` fields by referring to the value by name. /// If `$fieldty` is `(custom, $ty, $read, $write)` then, when writing, the same behavior as /// `legacy`, above is used. When reading, if a TLV is present, it is read as `$ty` and the /// `$read` method is called with `Some(decoded_$ty_object)`. If no TLV is present, the field @@ -1039,7 +1043,7 @@ macro_rules! _decode_and_build { /// (1, tlv_default_integer, (default_value, 7)), /// (2, tlv_optional_integer, option), /// (3, tlv_vec_type_integer, optional_vec), -/// (4, unwritten_type, (legacy, u32, |us: &LightningMessage| Some(us.tlv_integer))), +/// (4, unwritten_type, (legacy, u32, |_| Ok(()), |us: &LightningMessage| Some(us.tlv_integer))), /// (_unused, tlv_upgraded_integer, (static_value, unwritten_type.unwrap_or(0) * 2)) /// }); /// ``` @@ -1931,7 +1935,7 @@ mod tests { new_field: (u8, u8), } impl_writeable_tlv_based!(ExpandedField, { - (0, old_field, (legacy, u8, |us: &ExpandedField| Some(us.new_field.0))), + (0, old_field, (legacy, u8, |_| Ok(()), |us: &ExpandedField| Some(us.new_field.0))), (1, new_field, (default_value, (old_field.ok_or(DecodeError::InvalidValue)?, 0))), }); From 366178336302049d4366113565d82409f36a8aa7 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 9 Feb 2026 13:52:21 +0000 Subject: [PATCH 215/242] Use `DirEntry::file_type` rather than `metadata...` in `list` In the discussions at #3799 it was noted that `DirEntry::file_type` will often use cached information rather than making a fresh syscall, fixing the `list` race condition where we lose files while iterating the directory for some filesystems on some Unix platforms. For some reason, that fix didn't make it into the merged PR, and we rather stuck with `DirEntry::metadata()` which *always* does a fresh syscall and always exhibits the problematic behavior. Here we simply swap for `DirEntry::file_type` which at least fixes the issue for "some filesome filesystems (among them: Btrfs, ext2, ext3, and ext4)" (per `readdir(3)`). --- lightning-persister/src/fs_store.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lightning-persister/src/fs_store.rs b/lightning-persister/src/fs_store.rs index 73c24dc6fc0..3129748afda 100644 --- a/lightning-persister/src/fs_store.rs +++ b/lightning-persister/src/fs_store.rs @@ -560,15 +560,15 @@ fn dir_entry_is_key(dir_entry: &fs::DirEntry) -> Result Date: Mon, 2 Feb 2026 18:04:48 +0000 Subject: [PATCH 216/242] Pass the `addr` field of `tor_connect_outbound` to connection setup When `setup_outbound` was used to setup a connection proxied over Tor, it previously set the remote address of the peer to the address of the Tor proxy. This address of the Tor proxy was assigned to the `PeerDetails::socket_address` for that peer in `PeerManager::list_peers`, and if it was not a private IPv4 or IPv6 address, it was also reported to the peer in our init message. This commit refactors `tor_connect_outbound` to pass its own peer address parameter directly to the connection setup code. This peer address will now appear in `PeerManager::list_peers` for outbound Tor connections made using `tor_connect_outbound`, and will be reported to the peer in our init message if it is not a private IPv4 or IPv6 address. --- lightning-net-tokio/src/lib.rs | 170 ++++++++++++++++++++++++++++++--- 1 file changed, 157 insertions(+), 13 deletions(-) diff --git a/lightning-net-tokio/src/lib.rs b/lightning-net-tokio/src/lib.rs index eec0e424eaa..ee129669410 100644 --- a/lightning-net-tokio/src/lib.rs +++ b/lightning-net-tokio/src/lib.rs @@ -384,6 +384,16 @@ where PM::Target: APeerManager, { let remote_addr = get_addr_from_stream(&stream); + setup_outbound_internal(peer_manager, their_node_id, stream, remote_addr) +} + +fn setup_outbound_internal( + peer_manager: PM, their_node_id: PublicKey, stream: StdTcpStream, + remote_addr: Option, +) -> impl std::future::Future +where + PM::Target: APeerManager, +{ let (reader, mut write_receiver, read_receiver, us) = Connection::new(stream); #[cfg(test)] let last_us = Arc::clone(&us); @@ -478,8 +488,15 @@ where /// Routes [`connect_outbound`] through Tor. Implements stream isolation for each connection /// using a stream isolation parameter sourced from [`EntropySource::get_secure_random_bytes`]. /// +/// The `addr` parameter will be set to the [`PeerDetails::socket_address`] for that peer in +/// [`PeerManager::list_peers`], and if it is not a private IPv4 or IPv6 address, it will also +/// reported to the peer in our init message. +/// /// Returns a future (as the fn is async) that yields another future, see [`connect_outbound`] for /// details on this return value. +/// +/// [`PeerDetails::socket_address`]: lightning::ln::peer_handler::PeerDetails::socket_address +/// [`PeerManager::list_peers`]: lightning::ln::peer_handler::PeerManager::list_peers pub async fn tor_connect_outbound( peer_manager: PM, their_node_id: PublicKey, addr: SocketAddress, tor_proxy_addr: SocketAddr, entropy_source: ES, @@ -488,12 +505,14 @@ where PM::Target: APeerManager, { let connect_fut = async { - tor_connect(addr, tor_proxy_addr, entropy_source).await.map(|s| s.into_std().unwrap()) + tor_connect(addr.clone(), tor_proxy_addr, entropy_source) + .await + .map(|s| s.into_std().unwrap()) }; if let Ok(Ok(stream)) = time::timeout(Duration::from_secs(TOR_CONNECT_OUTBOUND_TIMEOUT), connect_fut).await { - Some(setup_outbound(peer_manager, their_node_id, stream)) + Some(setup_outbound_internal(peer_manager, their_node_id, stream, Some(addr))) } else { None } @@ -772,7 +791,7 @@ mod tests { use lightning::ln::types::ChannelId; use lightning::routing::gossip::NodeId; use lightning::types::features::*; - use lightning::util::test_utils::TestNodeSigner; + use lightning::util::test_utils::{TestLogger, TestNodeSigner}; use tokio::sync::mpsc; @@ -781,13 +800,6 @@ mod tests { use std::sync::{Arc, Mutex}; use std::time::Duration; - pub struct TestLogger(); - impl lightning::util::logger::Logger for TestLogger { - fn log(&self, record: lightning::util::logger::Record) { - println!("{}", record); - } - } - struct MsgHandler { expected_pubkey: PublicKey, pubkey_connected: mpsc::Sender<()>, @@ -981,7 +993,7 @@ mod tests { a_msg_handler, 0, &[1; 32], - Arc::new(TestLogger()), + Arc::new(TestLogger::new()), Arc::new(TestNodeSigner::new(a_key)), )); @@ -1005,7 +1017,7 @@ mod tests { b_msg_handler, 0, &[2; 32], - Arc::new(TestLogger()), + Arc::new(TestLogger::new()), Arc::new(TestNodeSigner::new(b_key)), )); @@ -1068,7 +1080,7 @@ mod tests { a_msg_handler, 0, &[1; 32], - Arc::new(TestLogger()), + Arc::new(TestLogger::new()), Arc::new(TestNodeSigner::new(a_key)), )); @@ -1154,4 +1166,136 @@ mod tests { assert!(tor_connect(addr, tor_proxy_addr, &entropy_source).await.is_err()); } } + + async fn test_remote_address_with_override(b_addr_override: Option) { + let secp_ctx = Secp256k1::new(); + let a_key = SecretKey::from_slice(&[1; 32]).unwrap(); + let b_key = SecretKey::from_slice(&[1; 32]).unwrap(); + let a_pub = PublicKey::from_secret_key(&secp_ctx, &a_key); + let b_pub = PublicKey::from_secret_key(&secp_ctx, &b_key); + + let (a_connected_sender, mut a_connected) = mpsc::channel(1); + let (a_disconnected_sender, _a_disconnected) = mpsc::channel(1); + let a_handler = Arc::new(MsgHandler { + expected_pubkey: b_pub, + pubkey_connected: a_connected_sender, + pubkey_disconnected: a_disconnected_sender, + disconnected_flag: AtomicBool::new(false), + msg_events: Mutex::new(Vec::new()), + }); + let a_msg_handler = MessageHandler { + chan_handler: Arc::clone(&a_handler), + route_handler: Arc::clone(&a_handler), + onion_message_handler: Arc::new(IgnoringMessageHandler {}), + custom_message_handler: Arc::new(IgnoringMessageHandler {}), + send_only_message_handler: Arc::new(IgnoringMessageHandler {}), + }; + let a_logger = Arc::new(TestLogger::new()); + let a_manager = Arc::new(PeerManager::new( + a_msg_handler, + 0, + &[1; 32], + Arc::clone(&a_logger), + Arc::new(TestNodeSigner::new(a_key)), + )); + + let (b_connected_sender, mut b_connected) = mpsc::channel(1); + let (b_disconnected_sender, _b_disconnected) = mpsc::channel(1); + let b_handler = Arc::new(MsgHandler { + expected_pubkey: a_pub, + pubkey_connected: b_connected_sender, + pubkey_disconnected: b_disconnected_sender, + disconnected_flag: AtomicBool::new(false), + msg_events: Mutex::new(Vec::new()), + }); + let b_msg_handler = MessageHandler { + chan_handler: Arc::clone(&b_handler), + route_handler: Arc::clone(&b_handler), + onion_message_handler: Arc::new(IgnoringMessageHandler {}), + custom_message_handler: Arc::new(IgnoringMessageHandler {}), + send_only_message_handler: Arc::new(IgnoringMessageHandler {}), + }; + let b_logger = Arc::new(TestLogger::new()); + let b_manager = Arc::new(PeerManager::new( + b_msg_handler, + 0, + &[2; 32], + Arc::clone(&b_logger), + Arc::new(TestNodeSigner::new(b_key)), + )); + + // We bind on localhost, hoping the environment is properly configured with a local + // address. This may not always be the case in containers and the like, so if this test is + // failing for you check that you have a loopback interface and it is configured with + // 127.0.0.1. + let (conn_a, conn_b) = make_tcp_connection(); + + // Given that `make_tcp_connection` binds the peer to 127.0.0.1, + // `get_addr_from_stream` always returns a private address, and will not be reported to the peer + // in the init message. + let b_addr = b_addr_override + .clone() + .unwrap_or_else(|| super::get_addr_from_stream(&conn_a).unwrap()); + let _fut_a = super::setup_outbound_internal( + Arc::clone(&a_manager), + b_pub, + conn_a, + Some(b_addr.clone()), + ); + let _fut_b = super::setup_inbound(Arc::clone(&b_manager), conn_b); + + tokio::time::timeout(Duration::from_secs(10), a_connected.recv()).await.unwrap(); + tokio::time::timeout(Duration::from_secs(1), b_connected.recv()).await.unwrap(); + + // Check `PeerDetails::socket_address` + + let mut peers = a_manager.list_peers(); + assert_eq!(peers.len(), 1); + let peer = peers.pop().unwrap(); + assert_eq!(peer.socket_address, Some(b_addr)); + + // Check the init message sent to the peer + + let mainnet_hash = ChainHash::using_genesis_block(Network::Testnet); + let a_init_msg = Init { + features: InitFeatures::empty(), + networks: Some(vec![mainnet_hash]), + // We set it to the override here because addresses from the stream are private addresses, + // so they are filtered out and not reported to the peer + remote_network_address: b_addr_override, + }; + a_logger.assert_log( + "lightning::ln::peer_handler", + format!("Enqueueing message Init({:?})", a_init_msg), + 1, + ); + } + + #[tokio::test] + async fn test_remote_address() { + // Test that the remote address of the peer passed to `setup_outbound_internal` is set correctly in the + // corresponding `PeerDetails::socket_address` returned from `PeerManager::list_peers`, and if it is + // not a private address, that it is reported to the peer in the init message. + + // This tests a private address read from `get_addr_from_stream` + test_remote_address_with_override(None).await; + // Make sure these are not private IPv4 or IPv6 addresses; we assert they are present in the init message + test_remote_address_with_override(Some(SocketAddress::TcpIpV4 { + addr: [0xab; 4], + port: 0xabab, + })) + .await; + test_remote_address_with_override(Some(SocketAddress::TcpIpV6 { + addr: [0x2a; 16], + port: 0x2a2a, + })) + .await; + let torproject_onion_addr_str = + "2gzyxa5ihm7nsggfxnu52rck2vv4rvmdlkiu3zzui5du4xyclen53wid.onion:80"; + let torproject_onion_addr: SocketAddress = torproject_onion_addr_str.parse().unwrap(); + test_remote_address_with_override(Some(torproject_onion_addr)).await; + let torproject_addr_str = "torproject.org:80"; + let torproject_addr: SocketAddress = torproject_addr_str.parse().unwrap(); + test_remote_address_with_override(Some(torproject_addr)).await; + } } From bd358f345b2f9012d631a7b452fbd43b1cee9e84 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 10 Feb 2026 00:01:17 +0000 Subject: [PATCH 217/242] Drop `proptest`s in `lightning-liquidity` `proptest`'s transitive dependency tree has always been somewhat large, but one of them (`rusty-fork`'s `tempfile` dependency) just went ahead with a bump of their `rand` dependency, breaking our MSRV yet again. Because we don't actually use `proptest` for anything interesting, the simplest solution is to simply drop it, which we do here. Note that we'll likely transition the LSPS5 URL type to simply use the `bitreq` URL type over the next few days anyway, so there's not much reason to care about its continued test coverage. Further, in writing this commit it was discovered that our tests in `lsps2/utils.rs` were actually broken on the vast majority of inputs, but proptest wasn't testing with any interesting test cases at all, causing it to be missed entirely! --- ci/ci-tests.sh | 3 - lightning-liquidity/Cargo.toml | 1 - lightning-liquidity/src/lsps2/service.rs | 94 +++++++------- lightning-liquidity/src/lsps2/utils.rs | 29 +---- lightning-liquidity/src/lsps5/url_utils.rs | 135 --------------------- 5 files changed, 45 insertions(+), 217 deletions(-) diff --git a/ci/ci-tests.sh b/ci/ci-tests.sh index 820935f9100..83b2af277f5 100755 --- a/ci/ci-tests.sh +++ b/ci/ci-tests.sh @@ -16,9 +16,6 @@ PIN_RELEASE_DEPS # pin the release dependencies in our main workspace # The backtrace v0.3.75 crate relies on rustc 1.82 [ "$RUSTC_MINOR_VERSION" -lt 82 ] && cargo update -p backtrace --precise "0.3.74" --quiet -# proptest 1.9.0 requires rustc 1.82.0 -[ "$RUSTC_MINOR_VERSION" -lt 82 ] && cargo update -p proptest --precise "1.8.0" --quiet - # Starting with version 1.2.0, the `idna_adapter` crate has an MSRV of rustc 1.81.0. [ "$RUSTC_MINOR_VERSION" -lt 81 ] && cargo update -p idna_adapter --precise "1.1.0" --quiet diff --git a/lightning-liquidity/Cargo.toml b/lightning-liquidity/Cargo.toml index d83d66f7570..c6cb4ee294b 100644 --- a/lightning-liquidity/Cargo.toml +++ b/lightning-liquidity/Cargo.toml @@ -39,7 +39,6 @@ lightning = { version = "0.3.0", path = "../lightning", default-features = false lightning-invoice = { version = "0.35.0", path = "../lightning-invoice", default-features = false, features = ["serde", "std"] } lightning-persister = { version = "0.3.0", path = "../lightning-persister", default-features = false } -proptest = "1.0.0" tokio = { version = "1.35", default-features = false, features = [ "rt-multi-thread", "time", "sync", "macros" ] } parking_lot = { version = "0.12", default-features = false } diff --git a/lightning-liquidity/src/lsps2/service.rs b/lightning-liquidity/src/lsps2/service.rs index 1909e871596..35942dcd624 100644 --- a/lightning-liquidity/src/lsps2/service.rs +++ b/lightning-liquidity/src/lsps2/service.rs @@ -2351,64 +2351,58 @@ mod tests { use crate::lsps0::ser::LSPSDateTime; - use proptest::prelude::*; - use bitcoin::{absolute::LockTime, transaction::Version}; use core::str::FromStr; const MAX_VALUE_MSAT: u64 = 21_000_000_0000_0000_000; - fn arb_forward_amounts() -> impl Strategy { - (1u64..MAX_VALUE_MSAT, 1u64..MAX_VALUE_MSAT, 1u64..MAX_VALUE_MSAT, 1u64..MAX_VALUE_MSAT) - .prop_map(|(a, b, c, d)| { - (a, b, c, core::cmp::min(d, a.saturating_add(b).saturating_add(c))) - }) - } + #[test] + fn rand_test_calculate_amount_to_forward() { + use std::collections::hash_map::RandomState; + use std::hash::{BuildHasher, Hasher}; + + let total_fee_msat = RandomState::new().build_hasher().finish() % MAX_VALUE_MSAT; + let htlc_count = (RandomState::new().build_hasher().finish() % 10) as u8; + + let mut htlcs = Vec::new(); + let mut total_received_msat = 0; + let mut htlc_values = Vec::new(); + for i in 0..htlc_count { + let expected_outbound_amount_msat = + RandomState::new().build_hasher().finish() % MAX_VALUE_MSAT; + if total_received_msat + expected_outbound_amount_msat > MAX_VALUE_MSAT { + break; + } + total_received_msat += expected_outbound_amount_msat; + htlc_values.push(total_received_msat); + htlcs.push(InterceptedHTLC { + intercept_id: InterceptId([i; 32]), + expected_outbound_amount_msat, + payment_hash: PaymentHash([i; 32]), + }); + } - proptest! { - #[test] - fn proptest_calculate_amount_to_forward((o_0, o_1, o_2, total_fee_msat) in arb_forward_amounts()) { - let htlcs = vec![ - InterceptedHTLC { - intercept_id: InterceptId([0; 32]), - expected_outbound_amount_msat: o_0, - payment_hash: PaymentHash([0; 32]), - }, - InterceptedHTLC { - intercept_id: InterceptId([1; 32]), - expected_outbound_amount_msat: o_1, - payment_hash: PaymentHash([0; 32]), - }, - InterceptedHTLC { - intercept_id: InterceptId([2; 32]), - expected_outbound_amount_msat: o_2, - payment_hash: PaymentHash([0; 32]), - }, - ]; + if total_fee_msat > total_received_msat { + return; + } - let result = calculate_amount_to_forward_per_htlc(&htlcs, total_fee_msat); - let total_received_msat = o_0 + o_1 + o_2; + let result = calculate_amount_to_forward_per_htlc(&htlcs, total_fee_msat); - if total_received_msat < total_fee_msat { - assert_eq!(result.len(), 0); - } else { - assert_ne!(result.len(), 0); - assert_eq!(result[0].0, htlcs[0].intercept_id); - assert_eq!(result[1].0, htlcs[1].intercept_id); - assert_eq!(result[2].0, htlcs[2].intercept_id); - assert!(result[0].1 <= o_0); - assert!(result[1].1 <= o_1); - assert!(result[2].1 <= o_2); - - let result_sum = result.iter().map(|(_, f)| f).sum::(); - assert_eq!(total_received_msat - result_sum, total_fee_msat); - let five_pct = result_sum as f32 * 0.05; - let fair_share_0 = (o_0 as f32 / total_received_msat as f32) * result_sum as f32; - assert!(result[0].1 as f32 <= fair_share_0 + five_pct); - let fair_share_1 = (o_1 as f32 / total_received_msat as f32) * result_sum as f32; - assert!(result[1].1 as f32 <= fair_share_1 + five_pct); - let fair_share_2 = (o_2 as f32 / total_received_msat as f32) * result_sum as f32; - assert!(result[2].1 as f32 <= fair_share_2 + five_pct); + if total_received_msat < total_fee_msat { + assert_eq!(result.len(), 0); + } else { + assert_eq!(result.len(), htlcs.len()); + let result_sum = result.iter().map(|(_, f)| f).sum::(); + assert_eq!(total_received_msat - result_sum, total_fee_msat); + let five_pct = result_sum as f32 * 0.05; + + for ((htlc, htlc_value), res) in htlcs.iter().zip(htlc_values).zip(result.iter()) { + assert_eq!(res.0, htlc.intercept_id); + assert!(res.1 <= htlc_value); + + let fair_share = + (htlc_value as f32 / total_received_msat as f32) * result_sum as f32; + assert!(res.1 as f32 <= fair_share + five_pct); } } } diff --git a/lightning-liquidity/src/lsps2/utils.rs b/lightning-liquidity/src/lsps2/utils.rs index 9f75a869a0e..998b1d2964d 100644 --- a/lightning-liquidity/src/lsps2/utils.rs +++ b/lightning-liquidity/src/lsps2/utils.rs @@ -60,33 +60,6 @@ pub fn compute_opening_fee( ) -> Option { payment_size_msat .checked_mul(opening_fee_proportional) - .and_then(|f| f.checked_add(999999)) - .and_then(|f| f.checked_div(1000000)) + .map(|f| f.div_ceil(1_000_000)) .map(|f| core::cmp::max(f, opening_fee_min_fee_msat)) } - -#[cfg(test)] -mod tests { - use super::*; - use proptest::prelude::*; - - const MAX_VALUE_MSAT: u64 = 21_000_000_0000_0000_000; - - fn arb_opening_fee_params() -> impl Strategy { - (0u64..MAX_VALUE_MSAT, 0u64..MAX_VALUE_MSAT, 0u64..MAX_VALUE_MSAT) - } - - proptest! { - #[test] - fn test_compute_opening_fee((payment_size_msat, opening_fee_min_fee_msat, opening_fee_proportional) in arb_opening_fee_params()) { - if let Some(res) = compute_opening_fee(payment_size_msat, opening_fee_min_fee_msat, opening_fee_proportional) { - assert!(res >= opening_fee_min_fee_msat); - assert_eq!(res as f32, (payment_size_msat as f32 * opening_fee_proportional as f32)); - } else { - // Check we actually overflowed. - let max_value = u64::MAX as u128; - assert!((payment_size_msat as u128 * opening_fee_proportional as u128) > max_value); - } - } - } -} diff --git a/lightning-liquidity/src/lsps5/url_utils.rs b/lightning-liquidity/src/lsps5/url_utils.rs index c9d5f9e79c7..2d49c10ff08 100644 --- a/lightning-liquidity/src/lsps5/url_utils.rs +++ b/lightning-liquidity/src/lsps5/url_utils.rs @@ -102,138 +102,3 @@ impl Readable for LSPSUrl { Ok(Self(Readable::read(reader)?)) } } - -#[cfg(test)] -mod tests { - use super::*; - use crate::alloc::string::ToString; - use alloc::vec::Vec; - use proptest::prelude::*; - - #[test] - fn test_extremely_long_url() { - let url_str = format!("https://{}/path", "a".repeat(1000)).to_string(); - let url_chars = url_str.chars().count(); - let result = LSPSUrl::parse(url_str); - - assert!(result.is_ok()); - let url = result.unwrap(); - assert_eq!(url.0 .0.chars().count(), url_chars); - } - - #[test] - fn test_parse_http_url() { - let url_str = "http://example.com/path".to_string(); - let url = LSPSUrl::parse(url_str).unwrap_err(); - assert_eq!(url, LSPS5ProtocolError::UnsupportedProtocol); - } - - #[test] - fn valid_lsps_url() { - let test_vec: Vec<&'static str> = vec![ - "https://www.example.org/push?l=1234567890abcopqrstuv&c=best", - "https://www.example.com/path", - "https://example.org", - "https://example.com:8080/path", - "https://api.example.com/v1/resources", - "https://example.com/page#section1", - "https://example.com/search?q=test#results", - "https://user:pass@example.com/", - "https://192.168.1.1/admin", - "https://example.com://path", - "https://example.com/path%20with%20spaces", - "https://example_example.com/path?query=with&spaces=true", - ]; - for url_str in test_vec { - let url = LSPSUrl::parse(url_str.to_string()); - assert!(url.is_ok(), "Failed to parse URL: {}", url_str); - } - } - - #[test] - fn invalid_lsps_url() { - let test_vec = vec![ - "ftp://ftp.example.org/pub/files/document.pdf", - "sftp://user:password@sftp.example.com:22/uploads/", - "ssh://username@host.com:2222", - "lightning://03a.example.com/invoice?amount=10000", - "ftp://user@ftp.example.com/files/", - "https://例子.测试/path", - "a123+-.://example.com", - "a123+-.://example.com", - "https:\\\\example.com\\path", - "https:///whatever", - "https://example.com/path with spaces", - ]; - for url_str in test_vec { - let url = LSPSUrl::parse(url_str.to_string()); - assert!(url.is_err(), "Expected error for URL: {}", url_str); - } - } - - #[test] - fn parsing_errors() { - let test_vec = vec![ - "example.com/path", - "https://bad domain.com/", - "https://example.com\0/path", - "https://", - "ht@ps://example.com", - "http!://example.com", - "1https://example.com", - "https://://example.com", - "https://example.com:port/path", - "https://:8080/path", - "https:", - "://", - "https://example.com\0/path", - ]; - for url_str in test_vec { - let url = LSPSUrl::parse(url_str.to_string()); - assert!(url.is_err(), "Expected error for URL: {}", url_str); - } - } - - fn host_strategy() -> impl Strategy { - prop_oneof![ - proptest::string::string_regex( - "[a-z0-9]+(?:-[a-z0-9]+)*(?:\\.[a-z0-9]+(?:-[a-z0-9]+)*)*" - ) - .unwrap(), - (0u8..=255u8, 0u8..=255u8, 0u8..=255u8, 0u8..=255u8) - .prop_map(|(a, b, c, d)| format!("{}.{}.{}.{}", a, b, c, d)) - ] - } - - proptest! { - #[test] - fn proptest_parse_round_trip( - host in host_strategy(), - port in proptest::option::of(0u16..=65535u16), - path in proptest::option::of(proptest::string::string_regex("[a-zA-Z0-9._%&=:@/-]{0,20}").unwrap()), - query in proptest::option::of(proptest::string::string_regex("[a-zA-Z0-9._%&=:@/-]{0,20}").unwrap()), - fragment in proptest::option::of(proptest::string::string_regex("[a-zA-Z0-9._%&=:@/-]{0,20}").unwrap()) - ) { - let mut url = format!("https://{}", host); - if let Some(p) = port { - url.push_str(&format!(":{}", p)); - } - if let Some(pth) = &path { - url.push('/'); - url.push_str(pth); - } - if let Some(q) = &query { - url.push('?'); - url.push_str(q); - } - if let Some(f) = &fragment { - url.push('#'); - url.push_str(f); - } - - let parsed = LSPSUrl::parse(url.clone()).expect("should parse"); - prop_assert_eq!(parsed.url(), url.as_str()); - prop_assert_eq!(parsed.url_length(), url.chars().count()); - } - } -} From 4deb2f7d266de3f41b58479418fb0b26e3d6a025 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 10 Feb 2026 00:41:09 +0000 Subject: [PATCH 218/242] Bump `lightning-types` crate version to fix semver tests 5427b0de7e93ce4ccf63c79de756e0da49e33d0b changed the `lightning-types` API but we forgot to bump the crate version to make semver tests pass. --- lightning-dns-resolver/Cargo.toml | 2 +- lightning-invoice/Cargo.toml | 2 +- lightning-liquidity/Cargo.toml | 2 +- lightning-types/Cargo.toml | 2 +- lightning/Cargo.toml | 4 ++-- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/lightning-dns-resolver/Cargo.toml b/lightning-dns-resolver/Cargo.toml index 44caf273ff2..5299b2f4676 100644 --- a/lightning-dns-resolver/Cargo.toml +++ b/lightning-dns-resolver/Cargo.toml @@ -11,7 +11,7 @@ rust-version = "1.75" [dependencies] lightning = { version = "0.3.0", path = "../lightning", default-features = false } -lightning-types = { version = "0.3.0", path = "../lightning-types", default-features = false } +lightning-types = { version = "0.4.0", path = "../lightning-types", default-features = false } dnssec-prover = { version = "0.6", default-features = false, features = [ "std", "tokio" ] } tokio = { version = "1.0", default-features = false, features = ["rt"] } diff --git a/lightning-invoice/Cargo.toml b/lightning-invoice/Cargo.toml index deee8ff330a..2b5d570f43f 100644 --- a/lightning-invoice/Cargo.toml +++ b/lightning-invoice/Cargo.toml @@ -20,7 +20,7 @@ std = [] [dependencies] bech32 = { version = "0.11.0", default-features = false } -lightning-types = { version = "0.3.0", path = "../lightning-types", default-features = false } +lightning-types = { version = "0.4.0", path = "../lightning-types", default-features = false } serde = { version = "1.0", optional = true, default-features = false, features = ["alloc"] } bitcoin = { version = "0.32.4", default-features = false, features = ["secp-recovery"] } diff --git a/lightning-liquidity/Cargo.toml b/lightning-liquidity/Cargo.toml index c6cb4ee294b..61f41c15d38 100644 --- a/lightning-liquidity/Cargo.toml +++ b/lightning-liquidity/Cargo.toml @@ -23,7 +23,7 @@ _test_utils = [] [dependencies] lightning = { version = "0.3.0", path = "../lightning", default-features = false } -lightning-types = { version = "0.3.0", path = "../lightning-types", default-features = false } +lightning-types = { version = "0.4.0", path = "../lightning-types", default-features = false } lightning-invoice = { version = "0.35.0", path = "../lightning-invoice", default-features = false, features = ["serde"] } lightning-macros = { version = "0.2", path = "../lightning-macros" } diff --git a/lightning-types/Cargo.toml b/lightning-types/Cargo.toml index 89bd919836f..eddd3d27fb0 100644 --- a/lightning-types/Cargo.toml +++ b/lightning-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning-types" -version = "0.3.0+git" +version = "0.4.0+git" authors = ["Matt Corallo"] license = "MIT OR Apache-2.0" repository = "https://github.com/lightningdevkit/rust-lightning/" diff --git a/lightning/Cargo.toml b/lightning/Cargo.toml index dbcf9f1bed2..fd6c5052359 100644 --- a/lightning/Cargo.toml +++ b/lightning/Cargo.toml @@ -34,7 +34,7 @@ grind_signatures = [] default = ["std", "grind_signatures"] [dependencies] -lightning-types = { version = "0.3.0", path = "../lightning-types", default-features = false } +lightning-types = { version = "0.4.0", path = "../lightning-types", default-features = false } lightning-invoice = { version = "0.35.0", path = "../lightning-invoice", default-features = false } lightning-macros = { version = "0.2", path = "../lightning-macros" } @@ -53,7 +53,7 @@ inventory = { version = "0.3", optional = true } [dev-dependencies] regex = "1.5.6" -lightning-types = { version = "0.3.0", path = "../lightning-types", features = ["_test_utils"] } +lightning-types = { version = "0.4.0", path = "../lightning-types", features = ["_test_utils"] } lightning-macros = { path = "../lightning-macros" } parking_lot = { version = "0.12", default-features = false } From db2a7eb716babf3645bf89f10e9bfd70a0b67eb5 Mon Sep 17 00:00:00 2001 From: elnosh Date: Mon, 9 Feb 2026 20:22:01 -0500 Subject: [PATCH 219/242] Update changelog and remove manually_accept references --- lightning/src/ln/async_signer_tests.rs | 4 +--- lightning/src/ln/priv_short_conf_tests.rs | 4 +--- .../3137-accept-dual-funding-without-contributing.txt | 5 ++--- 3 files changed, 4 insertions(+), 9 deletions(-) diff --git a/lightning/src/ln/async_signer_tests.rs b/lightning/src/ln/async_signer_tests.rs index 7d28a137d0a..04bca524925 100644 --- a/lightning/src/ln/async_signer_tests.rs +++ b/lightning/src/ln/async_signer_tests.rs @@ -372,11 +372,9 @@ fn test_funding_signed_0conf() { fn do_test_funding_signed_0conf(signer_ops: Vec) { // Simulate acquiring the signature for `funding_signed` asynchronously for a zero-conf channel. - let mut manually_accept_config = test_default_channel_config(); - let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_config)]); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); let node_b_id = nodes[1].node.get_our_node_id(); diff --git a/lightning/src/ln/priv_short_conf_tests.rs b/lightning/src/ln/priv_short_conf_tests.rs index 9d30d749aa2..a5ccac780f9 100644 --- a/lightning/src/ln/priv_short_conf_tests.rs +++ b/lightning/src/ln/priv_short_conf_tests.rs @@ -1396,9 +1396,7 @@ fn test_connect_before_funding() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let mut manually_accept_conf = test_default_channel_config(); - - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf)]); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); let node_b_id = nodes[1].node.get_our_node_id(); diff --git a/pending_changelog/3137-accept-dual-funding-without-contributing.txt b/pending_changelog/3137-accept-dual-funding-without-contributing.txt index 9ea8de24e54..5e1d0de2d86 100644 --- a/pending_changelog/3137-accept-dual-funding-without-contributing.txt +++ b/pending_changelog/3137-accept-dual-funding-without-contributing.txt @@ -7,9 +7,8 @@ differentiate between an inbound request for a dual-funded (V2) or non-dual-funded (V1) channel to be opened, with value being either of the enum variants `InboundChannelFunds::DualFunded` and `InboundChannelFunds::PushMsat(u64)` corresponding to V2 and V1 channel open requests respectively. - * If `manually_accept_inbound_channels` is false, then V2 channels will be accepted automatically; the - same behaviour as V1 channels. Otherwise, `ChannelManager::accept_inbound_channel()` can also be used - to manually accept an inbound V2 channel. + * Similar to V1 channels, `ChannelManager::accept_inbound_channel()` can also be used + to accept an inbound V2 channel. * 0conf dual-funded channels are not supported. * RBF of dual-funded channel funding transactions is not supported. From 8edfc91522e19df5bb3c7ce0002541c510621582 Mon Sep 17 00:00:00 2001 From: elnosh Date: Mon, 9 Feb 2026 20:30:47 -0500 Subject: [PATCH 220/242] Use handle_and_accept_open_channel in async_signer test --- lightning/src/ln/async_signer_tests.rs | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/lightning/src/ln/async_signer_tests.rs b/lightning/src/ln/async_signer_tests.rs index 04bca524925..b81279c10ac 100644 --- a/lightning/src/ln/async_signer_tests.rs +++ b/lightning/src/ln/async_signer_tests.rs @@ -70,9 +70,9 @@ fn do_test_open_channel(zero_conf: bool) { // Handle an inbound channel simulating an async signer. nodes[1].disable_next_channel_signer_op(SignerOp::GetPerCommitmentPoint); - nodes[1].node.handle_open_channel(node_a_id, &open_chan_msg); if zero_conf { + nodes[1].node.handle_open_channel(node_a_id, &open_chan_msg); let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1, "Expected one event, got {}", events.len()); match &events[0] { @@ -90,15 +90,7 @@ fn do_test_open_channel(zero_conf: bool) { ev => panic!("Expected OpenChannelRequest, not {:?}", ev), } } else { - let events = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1, "Expected one event, got {}", events.len()); - match &events[0] { - Event::OpenChannelRequest { temporary_channel_id, .. } => nodes[1] - .node - .accept_inbound_channel(temporary_channel_id, &node_a_id, 0, None) - .unwrap(), - ev => panic!("Expected OpenChannelRequest, not {:?}", ev), - } + handle_and_accept_open_channel(&nodes[1], node_a_id, &open_chan_msg); } let channel_id_1 = { From 4e32d105ed3ad0b0a8dc49fb39e7da585ec877af Mon Sep 17 00:00:00 2001 From: elnosh Date: Mon, 9 Feb 2026 20:38:22 -0500 Subject: [PATCH 221/242] Remove explicit usage of test_default_channel_config test_default_channel_config is the default now so it does not need to be set explicitly in some of the tests. Removes unnecessary extra None config. --- lightning/src/ln/reorg_tests.rs | 4 ++-- lightning/src/ln/splicing_tests.rs | 18 ++++++------------ 2 files changed, 8 insertions(+), 14 deletions(-) diff --git a/lightning/src/ln/reorg_tests.rs b/lightning/src/ln/reorg_tests.rs index dac92cddc97..b39e8d31a75 100644 --- a/lightning/src/ln/reorg_tests.rs +++ b/lightning/src/ln/reorg_tests.rs @@ -686,7 +686,7 @@ fn test_htlc_preimage_claim_holder_commitment_after_counterparty_commitment_reor let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let legacy_cfg = test_legacy_channel_config(); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg), None, None]); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg), None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1); @@ -762,7 +762,7 @@ fn test_htlc_preimage_claim_prev_counterparty_commitment_after_current_counterpa let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let legacy_cfg = test_legacy_channel_config(); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg), None, None]); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg), None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1); diff --git a/lightning/src/ln/splicing_tests.rs b/lightning/src/ln/splicing_tests.rs index ace1783327d..4846f7137cc 100644 --- a/lightning/src/ln/splicing_tests.rs +++ b/lightning/src/ln/splicing_tests.rs @@ -1081,8 +1081,7 @@ fn do_test_splice_commitment_broadcast(splice_status: SpliceStatus, claim_htlcs: // Tests that we're able to enforce HTLCs onchain during the different stages of a splice. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let config = test_default_channel_config(); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config.clone()), Some(config)]); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_id_0 = nodes[0].node.get_our_node_id(); @@ -1833,8 +1832,7 @@ fn do_test_propose_splice_while_disconnected(reload: bool, use_0conf: bool) { fn disconnect_on_unexpected_interactive_tx_message() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let config = test_default_channel_config(); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config.clone()), Some(config)]); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let initiator = &nodes[0]; @@ -1872,8 +1870,7 @@ fn disconnect_on_unexpected_interactive_tx_message() { fn fail_splice_on_interactive_tx_error() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let config = test_default_channel_config(); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config.clone()), Some(config)]); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let initiator = &nodes[0]; @@ -1926,8 +1923,7 @@ fn fail_splice_on_interactive_tx_error() { fn fail_splice_on_tx_abort() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let config = test_default_channel_config(); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config.clone()), Some(config)]); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let initiator = &nodes[0]; @@ -1980,8 +1976,7 @@ fn fail_splice_on_tx_abort() { fn fail_splice_on_channel_close() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let config = test_default_channel_config(); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config.clone()), Some(config)]); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let initiator = &nodes[0]; @@ -2031,8 +2026,7 @@ fn fail_splice_on_channel_close() { fn fail_quiescent_action_on_channel_close() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let config = test_default_channel_config(); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config.clone()), Some(config)]); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let initiator = &nodes[0]; From eb31aeb1b8c8ac7093678abe4441be111d7cf558 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Fri, 6 Feb 2026 15:12:45 -0500 Subject: [PATCH 222/242] Trivial: use full path in test macros Useful when using these macros in lightning-tests/upgrade_downgrade_tests --- lightning/src/ln/functional_test_utils.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index a5461154a02..d5a29785a94 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1383,7 +1383,7 @@ macro_rules! _reload_node_inner { ); $node.chain_monitor = &$new_chain_monitor; - $new_channelmanager = _reload_node( + $new_channelmanager = $crate::ln::functional_test_utils::_reload_node( &$node, $new_config, &chanman_encoded, @@ -1401,7 +1401,7 @@ macro_rules! reload_node { // Reload the node using the node's current config ($node: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: ident, $new_chain_monitor: ident, $new_channelmanager: ident) => { let config = $node.node.get_current_config(); - _reload_node_inner!( + $crate::_reload_node_inner!( $node, config, $chanman_encoded, @@ -1414,7 +1414,7 @@ macro_rules! reload_node { }; // Reload the node with the new provided config ($node: expr, $new_config: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: ident, $new_chain_monitor: ident, $new_channelmanager: ident) => { - _reload_node_inner!( + $crate::_reload_node_inner!( $node, $new_config, $chanman_encoded, @@ -1431,7 +1431,7 @@ macro_rules! reload_node { ident, $new_chain_monitor: ident, $new_channelmanager: ident, $reconstruct_pending_htlcs: expr ) => { let config = $node.node.get_current_config(); - _reload_node_inner!( + $crate::_reload_node_inner!( $node, config, $chanman_encoded, @@ -2971,7 +2971,7 @@ pub fn check_payment_claimable( #[cfg(any(test, ldk_bench, feature = "_test_utils"))] macro_rules! expect_payment_claimable { ($node: expr, $expected_payment_hash: expr, $expected_payment_secret: expr, $expected_recv_value: expr) => { - expect_payment_claimable!( + $crate::expect_payment_claimable!( $node, $expected_payment_hash, $expected_payment_secret, From 07b3deff29e4e3d6eeb164780170e9d9247352b5 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Mon, 9 Feb 2026 14:16:12 -0500 Subject: [PATCH 223/242] Split method to reconstruct pending HTLCs into two In the next commit, we want to dedup fields between the InboundUpdateAdd::Forwarded's HTLCPreviousHopData and the outer InboundHTLCOutput/Channel structs, since many fields are duplicated in both places at the moment. As part of doing this cleanly, we first refactor the method that retrieves these InboundUpdateAdds for reconstructing the set of pending HTLCs during ChannelManager deconstruction. Co-Authored-By: Claude Opus 4.5 --- lightning/src/ln/channel.rs | 57 +++++++++++++++++++++--------- lightning/src/ln/channelmanager.rs | 57 +++++++++++++----------------- 2 files changed, 64 insertions(+), 50 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 3236ebdefed..88d2e32e764 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -315,7 +315,7 @@ impl InboundHTLCState { /// /// Useful for reconstructing the pending HTLC set on startup. #[derive(Debug, Clone)] -pub(super) enum InboundUpdateAdd { +enum InboundUpdateAdd { /// The inbound committed HTLC's update_add_htlc message. WithOnion { update_add_htlc: msgs::UpdateAddHTLC }, /// This inbound HTLC is a forward that was irrevocably committed to the outbound edge, allowing @@ -7885,10 +7885,35 @@ where Ok(()) } - /// Useful for reconstructing the set of pending HTLCs when deserializing the `ChannelManager`. - pub(super) fn inbound_committed_unresolved_htlcs( + /// Returns true if any committed inbound HTLCs were received pre-LDK 0.3 and cannot be used + /// during `ChannelManager` deserialization to reconstruct the set of pending HTLCs. + pub(super) fn has_legacy_inbound_htlcs(&self) -> bool { + self.context.pending_inbound_htlcs.iter().any(|htlc| { + matches!( + &htlc.state, + InboundHTLCState::Committed { update_add_htlc: InboundUpdateAdd::Legacy } + ) + }) + } + + /// Returns committed inbound HTLCs whose onion has not yet been decoded and processed. Useful + /// for reconstructing the set of pending HTLCs when deserializing the `ChannelManager`. + pub(super) fn inbound_htlcs_pending_decode( + &self, + ) -> impl Iterator + '_ { + self.context.pending_inbound_htlcs.iter().filter_map(|htlc| match &htlc.state { + InboundHTLCState::Committed { + update_add_htlc: InboundUpdateAdd::WithOnion { update_add_htlc }, + } => Some(update_add_htlc.clone()), + _ => None, + }) + } + + /// Returns committed inbound HTLCs that have been forwarded but not yet fully resolved. Useful + /// when reconstructing the set of pending HTLCs when deserializing the `ChannelManager`. + pub(super) fn inbound_forwarded_htlcs( &self, - ) -> Vec<(PaymentHash, InboundUpdateAdd)> { + ) -> impl Iterator + '_ { // We don't want to return an HTLC as needing processing if it already has a resolution that's // pending in the holding cell. let htlc_resolution_in_holding_cell = |id: u64| -> bool { @@ -7902,19 +7927,17 @@ where }) }; - self.context - .pending_inbound_htlcs - .iter() - .filter_map(|htlc| match &htlc.state { - InboundHTLCState::Committed { update_add_htlc } => { - if htlc_resolution_in_holding_cell(htlc.htlc_id) { - return None; - } - Some((htlc.payment_hash, update_add_htlc.clone())) - }, - _ => None, - }) - .collect() + self.context.pending_inbound_htlcs.iter().filter_map(move |htlc| match &htlc.state { + InboundHTLCState::Committed { + update_add_htlc: InboundUpdateAdd::Forwarded { hop_data, outbound_amt_msat }, + } => { + if htlc_resolution_in_holding_cell(htlc.htlc_id) { + return None; + } + Some((htlc.payment_hash, hop_data.clone(), *outbound_amt_msat)) + }, + _ => None, + }) } /// Useful when reconstructing the set of pending HTLC forwards when deserializing the diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index e840d705b8e..bdc0155054f 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -59,9 +59,9 @@ use crate::ln::chan_utils::selected_commitment_sat_per_1000_weight; use crate::ln::channel::QuiescentAction; use crate::ln::channel::{ self, hold_time_since, Channel, ChannelError, ChannelUpdateStatus, DisconnectResult, - FundedChannel, FundingTxSigned, InboundUpdateAdd, InboundV1Channel, OutboundV1Channel, - PendingV2Channel, ReconnectionMsg, ShutdownResult, SpliceFundingFailed, StfuResponse, - UpdateFulfillCommitFetch, WithChannelContext, + FundedChannel, FundingTxSigned, InboundV1Channel, OutboundV1Channel, PendingV2Channel, + ReconnectionMsg, ShutdownResult, SpliceFundingFailed, StfuResponse, UpdateFulfillCommitFetch, + WithChannelContext, }; use crate::ln::channel_state::ChannelDetails; use crate::ln::funding::SpliceContribution; @@ -10185,10 +10185,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state = per_peer_state.get(&cp_id).map(|state| state.lock().unwrap()).unwrap(); let chan = peer_state.channel_by_id.get(&chan_id).and_then(|c| c.as_funded()).unwrap(); - chan.inbound_committed_unresolved_htlcs() - .iter() - .filter(|(_, htlc)| matches!(htlc, InboundUpdateAdd::WithOnion { .. })) - .count() + chan.inbound_htlcs_pending_decode().count() } #[cfg(test)] @@ -18626,33 +18623,27 @@ impl< if reconstruct_manager_from_monitors { if let Some(chan) = peer_state.channel_by_id.get(channel_id) { if let Some(funded_chan) = chan.as_funded() { + // Legacy HTLCs are from pre-LDK 0.3 and cannot be reconstructed. + if funded_chan.has_legacy_inbound_htlcs() { + return Err(DecodeError::InvalidValue); + } + // Reconstruct `ChannelManager::decode_update_add_htlcs` from the serialized + // `Channel` as part of removing the requirement to regularly persist the + // `ChannelManager`. let scid_alias = funded_chan.context.outbound_scid_alias(); - let inbound_committed_update_adds = - funded_chan.inbound_committed_unresolved_htlcs(); - for (payment_hash, htlc) in inbound_committed_update_adds { - match htlc { - InboundUpdateAdd::WithOnion { update_add_htlc } => { - // Reconstruct `ChannelManager::decode_update_add_htlcs` from the serialized - // `Channel` as part of removing the requirement to regularly persist the - // `ChannelManager`. - decode_update_add_htlcs - .entry(scid_alias) - .or_insert_with(Vec::new) - .push(update_add_htlc); - }, - InboundUpdateAdd::Forwarded { - hop_data, - outbound_amt_msat, - } => { - already_forwarded_htlcs - .entry((hop_data.channel_id, payment_hash)) - .or_insert_with(Vec::new) - .push((hop_data, outbound_amt_msat)); - }, - InboundUpdateAdd::Legacy => { - return Err(DecodeError::InvalidValue) - }, - } + for update_add_htlc in funded_chan.inbound_htlcs_pending_decode() { + decode_update_add_htlcs + .entry(scid_alias) + .or_insert_with(Vec::new) + .push(update_add_htlc); + } + for (payment_hash, hop_data, outbound_amt_msat) in + funded_chan.inbound_forwarded_htlcs() + { + already_forwarded_htlcs + .entry((hop_data.channel_id, payment_hash)) + .or_insert_with(Vec::new) + .push((hop_data, outbound_amt_msat)); } } } From d3e9cd018dfeab5e4c0884eee73988c3ecc1fb1e Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Mon, 9 Feb 2026 14:29:11 -0500 Subject: [PATCH 224/242] Dedup data in InboundUpdateAdd::Forwarded::hop_data Previously, the InboundUpdateAdd::Forwarded enum variant contained an HTLCPreviousHopData, which had a lot of fields that were redundant with the outer InboundHTLCOutput/Channel structs. Here we dedup those fields, which is important because the pending InboundUpdateAdds are persisted whenever the ChannelManager is persisted. --- lightning/src/ln/channel.rs | 69 ++++++++++++++++++++++++------ lightning/src/ln/channelmanager.rs | 2 +- 2 files changed, 56 insertions(+), 15 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 88d2e32e764..b12061bf118 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -50,10 +50,10 @@ use crate::ln::channel_state::{ OutboundHTLCDetails, OutboundHTLCStateDetails, }; use crate::ln::channelmanager::{ - self, ChannelReadyOrder, FundingConfirmedMessage, HTLCFailureMsg, HTLCPreviousHopData, - HTLCSource, OpenChannelMessage, PaymentClaimDetails, PendingHTLCInfo, PendingHTLCStatus, - RAACommitmentOrder, SentHTLCId, BREAKDOWN_TIMEOUT, MAX_LOCAL_BREAKDOWN_TIMEOUT, - MIN_CLTV_EXPIRY_DELTA, + self, BlindedFailure, ChannelReadyOrder, FundingConfirmedMessage, HTLCFailureMsg, + HTLCPreviousHopData, HTLCSource, OpenChannelMessage, PaymentClaimDetails, PendingHTLCInfo, + PendingHTLCStatus, RAACommitmentOrder, SentHTLCId, BREAKDOWN_TIMEOUT, + MAX_LOCAL_BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, }; use crate::ln::funding::{FundingTxInput, SpliceContribution}; use crate::ln::interactivetxs::{ @@ -320,12 +320,16 @@ enum InboundUpdateAdd { WithOnion { update_add_htlc: msgs::UpdateAddHTLC }, /// This inbound HTLC is a forward that was irrevocably committed to the outbound edge, allowing /// its onion to be pruned and no longer persisted. + /// + /// Contains data that is useful if we need to fail or claim this HTLC backwards after a restart + /// and it's missing in the outbound edge. Forwarded { - /// Useful if we need to fail or claim this HTLC backwards after restart, if it's missing in the - /// outbound edge. - hop_data: HTLCPreviousHopData, - /// Useful if we need to claim this HTLC backwards after a restart and it's missing in the - /// outbound edge, to generate an accurate [`Event::PaymentForwarded`]. + incoming_packet_shared_secret: [u8; 32], + phantom_shared_secret: Option<[u8; 32]>, + trampoline_shared_secret: Option<[u8; 32]>, + blinded_failure: Option, + /// Useful for generating an accurate [`Event::PaymentForwarded`], if we need to claim this + /// HTLC post-restart. /// /// [`Event::PaymentForwarded`]: crate::events::Event::PaymentForwarded outbound_amt_msat: u64, @@ -341,8 +345,11 @@ impl_writeable_tlv_based_enum_upgradable!(InboundUpdateAdd, }, (2, Legacy) => {}, (4, Forwarded) => { - (0, hop_data, required), + (0, incoming_packet_shared_secret, required), (2, outbound_amt_msat, required), + (4, phantom_shared_secret, option), + (6, trampoline_shared_secret, option), + (8, blinded_failure, option), }, ); @@ -7927,14 +7934,42 @@ where }) }; + let prev_outbound_scid_alias = self.context.outbound_scid_alias(); + let user_channel_id = self.context.get_user_id(); + let channel_id = self.context.channel_id(); + let outpoint = self.funding_outpoint(); + let counterparty_node_id = self.context.get_counterparty_node_id(); + self.context.pending_inbound_htlcs.iter().filter_map(move |htlc| match &htlc.state { InboundHTLCState::Committed { - update_add_htlc: InboundUpdateAdd::Forwarded { hop_data, outbound_amt_msat }, + update_add_htlc: + InboundUpdateAdd::Forwarded { + incoming_packet_shared_secret, + phantom_shared_secret, + trampoline_shared_secret, + blinded_failure, + outbound_amt_msat, + }, } => { if htlc_resolution_in_holding_cell(htlc.htlc_id) { return None; } - Some((htlc.payment_hash, hop_data.clone(), *outbound_amt_msat)) + // The reconstructed `HTLCPreviousHopData` is used to fail or claim the HTLC backwards + // post-restart, if it is missing in the outbound edge. + let hop_data = HTLCPreviousHopData { + prev_outbound_scid_alias, + user_channel_id: Some(user_channel_id), + htlc_id: htlc.htlc_id, + incoming_packet_shared_secret: *incoming_packet_shared_secret, + phantom_shared_secret: *phantom_shared_secret, + trampoline_shared_secret: *trampoline_shared_secret, + blinded_failure: *blinded_failure, + channel_id, + outpoint, + counterparty_node_id: Some(counterparty_node_id), + cltv_expiry: Some(htlc.cltv_expiry), + }; + Some((htlc.payment_hash, hop_data, *outbound_amt_msat)) }, _ => None, }) @@ -7984,12 +8019,18 @@ where /// This inbound HTLC was irrevocably forwarded to the outbound edge, so we no longer need to /// persist its onion. pub(super) fn prune_inbound_htlc_onion( - &mut self, htlc_id: u64, hop_data: HTLCPreviousHopData, outbound_amt_msat: u64, + &mut self, htlc_id: u64, hop_data: &HTLCPreviousHopData, outbound_amt_msat: u64, ) { for htlc in self.context.pending_inbound_htlcs.iter_mut() { if htlc.htlc_id == htlc_id { if let InboundHTLCState::Committed { ref mut update_add_htlc } = htlc.state { - *update_add_htlc = InboundUpdateAdd::Forwarded { hop_data, outbound_amt_msat }; + *update_add_htlc = InboundUpdateAdd::Forwarded { + incoming_packet_shared_secret: hop_data.incoming_packet_shared_secret, + phantom_shared_secret: hop_data.phantom_shared_secret, + trampoline_shared_secret: hop_data.trampoline_shared_secret, + blinded_failure: hop_data.blinded_failure, + outbound_amt_msat, + }; return; } } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index bdc0155054f..68eeb7c4e15 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -10161,7 +10161,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some(chan) = peer_state.channel_by_id.get_mut(&source.channel_id).and_then(|c| c.as_funded_mut()) { - chan.prune_inbound_htlc_onion(source.htlc_id, source, outbound_amt_msat); + chan.prune_inbound_htlc_onion(source.htlc_id, &source, outbound_amt_msat); } } } From 1685661365f3a4c6c8df6011ab2970322c5335d7 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Wed, 11 Feb 2026 14:35:41 +0100 Subject: [PATCH 225/242] Restrict CI build matrix to Linux+MSRV for PRs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Only run the full build matrix (Linux/Windows/macOS × stable/beta/MSRV) on pushes to main. PR and non-main push builds now only run Linux with the MSRV toolchain (1.75.0), which is the most important gate for catching issues. Co-Authored-By: Claude Opus 4.6 --- .github/workflows/build.yml | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 6ae6d83ddd3..c0593d43def 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -30,8 +30,14 @@ jobs: strategy: fail-fast: false matrix: - platform: [ self-hosted, windows-latest, macos-latest ] - toolchain: [ stable, beta, 1.75.0 ] # 1.75.0 is the MSRV for all crates + platform: >- + ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' + && fromJSON('["self-hosted","windows-latest","macos-latest"]') + || fromJSON('["self-hosted"]') }} + toolchain: >- + ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' + && fromJSON('["stable","beta","1.75.0"]') + || fromJSON('["1.75.0"]') }} exclude: - platform: windows-latest toolchain: 1.75.0 From 13a83d562221460bb9798a075dd2bd05342a9c36 Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Fri, 6 Feb 2026 10:11:01 -0800 Subject: [PATCH 226/242] Refactor missing peer/channel error from ChannelManager message handlers We have the same error being returned from several `ChannelManager` message handlers, so we DRY it up. Doing so also lets us get rid of the inlined `format!` call, which for some reason prevents `rustfmt` from formatting code around it. --- lightning/src/ln/channelmanager.rs | 343 ++++++++++++++++------------- lightning/src/ln/payment_tests.rs | 18 +- lightning/src/ln/reload_tests.rs | 14 +- lightning/src/ln/shutdown_tests.rs | 6 +- 4 files changed, 212 insertions(+), 169 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index e840d705b8e..a069b01f532 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -939,6 +939,7 @@ struct MsgHandleErrInternal { shutdown_finish: Option<(ShutdownResult, Option<(msgs::ChannelUpdate, NodeId, NodeId)>)>, tx_abort: Option, } + impl MsgHandleErrInternal { fn send_err_msg_no_close(err: String, channel_id: ChannelId) -> Self { Self { @@ -954,6 +955,20 @@ impl MsgHandleErrInternal { } } + fn no_such_peer(counterparty_node_id: &PublicKey, channel_id: ChannelId) -> Self { + let err = + format!("No such peer for the passed counterparty_node_id {counterparty_node_id}"); + Self::send_err_msg_no_close(err, channel_id) + } + + fn no_such_channel_for_peer(counterparty_node_id: &PublicKey, channel_id: ChannelId) -> Self { + let err = format!( + "Got a message for a channel from the wrong node! No such channel_id {} for the passed counterparty_node_id {}", + channel_id, counterparty_node_id + ); + Self::send_err_msg_no_close(err, channel_id) + } + fn from_no_close(err: msgs::LightningError) -> Self { Self { err, closes_channel: false, shutdown_finish: None, tx_abort: None } } @@ -10812,9 +10827,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close( - format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), - common_fields.temporary_channel_id) + MsgHandleErrInternal::no_such_peer( + counterparty_node_id, + common_fields.temporary_channel_id, + ) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -10884,7 +10900,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.common_fields.temporary_channel_id) + MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.common_fields.temporary_channel_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -10905,7 +10921,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.common_fields.temporary_channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::no_such_channel_for_peer(counterparty_node_id, msg.common_fields.temporary_channel_id)) } }; let mut pending_events = self.pending_events.lock().unwrap(); @@ -10925,49 +10941,59 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let best_block = *self.best_block.read().unwrap(); let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| { - debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.temporary_channel_id) - })?; + let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { + debug_assert!(false); + MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.temporary_channel_id) + })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; - let (mut chan, funding_msg_opt, monitor) = - match peer_state.channel_by_id.remove(&msg.temporary_channel_id) - .map(Channel::into_unfunded_inbound_v1) - { - Some(Ok(inbound_chan)) => { - let logger = WithChannelContext::from(&self.logger, &inbound_chan.context, None); - match inbound_chan.funding_created(msg, best_block, &self.signer_provider, &&logger) { - Ok(res) => res, - Err((inbound_chan, err)) => { - // We've already removed this inbound channel from the map in `PeerState` - // above so at this point we just need to clean up any lingering entries - // concerning this channel as it is safe to do so. - debug_assert!(matches!(err, ChannelError::Close(_))); - let mut chan = Channel::from(inbound_chan); - return Err(self.locked_handle_force_close( + let (mut chan, funding_msg_opt, monitor) = match peer_state + .channel_by_id + .remove(&msg.temporary_channel_id) + .map(Channel::into_unfunded_inbound_v1) + { + Some(Ok(inbound_chan)) => { + let logger = WithChannelContext::from(&self.logger, &inbound_chan.context, None); + match inbound_chan.funding_created(msg, best_block, &self.signer_provider, &&logger) + { + Ok(res) => res, + Err((inbound_chan, err)) => { + // We've already removed this inbound channel from the map in `PeerState` + // above so at this point we just need to clean up any lingering entries + // concerning this channel as it is safe to do so. + debug_assert!(matches!(err, ChannelError::Close(_))); + let mut chan = Channel::from(inbound_chan); + return Err(self + .locked_handle_force_close( &mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, err, &mut chan, - ).1); - }, - } - }, - Some(Err(mut chan)) => { - let err_msg = format!("Got an unexpected funding_created message from peer with counterparty_node_id {}", counterparty_node_id); - let err = ChannelError::close(err_msg); - return Err(self.locked_handle_force_close( + ) + .1); + }, + } + }, + Some(Err(mut chan)) => { + let err_msg = format!("Got an unexpected funding_created message from peer with counterparty_node_id {}", counterparty_node_id); + let err = ChannelError::close(err_msg); + return Err(self + .locked_handle_force_close( &mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, err, &mut chan, - ).1); - }, - None => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id)) - }; + ) + .1); + }, + None => { + return Err(MsgHandleErrInternal::no_such_channel_for_peer( + counterparty_node_id, + msg.temporary_channel_id, + )) + }, + }; let funded_channel_id = chan.context.channel_id(); @@ -11114,7 +11140,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let peer_state_mutex = per_peer_state.get(&counterparty_node_id) .ok_or_else(|| { debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), ChannelId([0; 32])) + MsgHandleErrInternal::no_such_peer(&counterparty_node_id, ChannelId([0; 32])) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); @@ -11152,7 +11178,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.channel_id) + MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); @@ -11209,10 +11235,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close( - format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), - channel_id, - ) + MsgHandleErrInternal::no_such_peer(counterparty_node_id, channel_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -11228,26 +11251,27 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ Err((error, splice_funding_failed)) => { if let Some(splice_funding_failed) = splice_funding_failed { let pending_events = &mut self.pending_events.lock().unwrap(); - pending_events.push_back((events::Event::SpliceFailed { - channel_id, - counterparty_node_id: *counterparty_node_id, - user_channel_id: channel.context().get_user_id(), - abandoned_funding_txo: splice_funding_failed.funding_txo, - channel_type: splice_funding_failed.channel_type.clone(), - contributed_inputs: splice_funding_failed.contributed_inputs, - contributed_outputs: splice_funding_failed.contributed_outputs, - }, None)); + pending_events.push_back(( + events::Event::SpliceFailed { + channel_id, + counterparty_node_id: *counterparty_node_id, + user_channel_id: channel.context().get_user_id(), + abandoned_funding_txo: splice_funding_failed.funding_txo, + channel_type: splice_funding_failed.channel_type.clone(), + contributed_inputs: splice_funding_failed.contributed_inputs, + contributed_outputs: splice_funding_failed.contributed_outputs, + }, + None, + )); } Err(MsgHandleErrInternal::from_chan_no_close(error, channel_id)) }, } }, - hash_map::Entry::Vacant(_) => { - Err(MsgHandleErrInternal::send_err_msg_no_close(format!( - "Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", - counterparty_node_id), channel_id) - ) - } + hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::no_such_channel_for_peer( + counterparty_node_id, + channel_id, + )), } } @@ -11289,9 +11313,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let peer_state_mutex = per_peer_state.get(&counterparty_node_id) .ok_or_else(|| { debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close( - format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), - msg.channel_id) + MsgHandleErrInternal::no_such_peer(&counterparty_node_id, msg.channel_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -11386,7 +11408,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } }, hash_map::Entry::Vacant(_) => { - Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) + Err(MsgHandleErrInternal::no_such_channel_for_peer(&counterparty_node_id, msg.channel_id)) } } } @@ -11398,9 +11420,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close( - format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), - msg.channel_id) + MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -11467,7 +11487,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ Ok(()) }, hash_map::Entry::Vacant(_) => { - Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) + Err(MsgHandleErrInternal::no_such_channel_for_peer(counterparty_node_id, msg.channel_id)) } } } @@ -11479,9 +11499,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close( - format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), - msg.channel_id) + MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -11519,7 +11537,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ Ok(persist) }, hash_map::Entry::Vacant(_) => { - Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) + Err(MsgHandleErrInternal::no_such_channel_for_peer(counterparty_node_id, msg.channel_id)) } } } @@ -11532,7 +11550,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.channel_id) + MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -11583,7 +11601,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } }, hash_map::Entry::Vacant(_) => { - Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) + Err(MsgHandleErrInternal::no_such_channel_for_peer(counterparty_node_id, msg.channel_id)) } } } @@ -11596,13 +11614,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close( - format!( - "Can't find a peer matching the passed counterparty node_id {}", - counterparty_node_id - ), - msg.channel_id, - ) + MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -11681,7 +11693,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }, } } else { - return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)); + return Err(MsgHandleErrInternal::no_such_channel_for_peer( + counterparty_node_id, + msg.channel_id, + )); } } for htlc_source in dropped_htlcs.drain(..) { @@ -11703,13 +11718,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close( - format!( - "Can't find a peer matching the passed counterparty node_id {}", - counterparty_node_id - ), - msg.channel_id, - ) + MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) })?; let logger; let tx_err: Option<(_, Result)> = { @@ -11724,10 +11733,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ try_channel_entry!(self, peer_state, res, chan_entry); debug_assert_eq!(tx_shutdown_result.is_some(), chan.is_shutdown()); if let Some(msg) = closing_signed { - peer_state.pending_msg_events.push(MessageSendEvent::SendClosingSigned { - node_id: counterparty_node_id.clone(), - msg, - }); + peer_state.pending_msg_events.push( + MessageSendEvent::SendClosingSigned { + node_id: counterparty_node_id.clone(), + msg, + }, + ); } if let Some((tx, close_res)) = tx_shutdown_result { // We're done with this channel, we've got a signed closing transaction and @@ -11735,18 +11746,34 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // also implies there are no pending HTLCs left on the channel, so we can // fully delete it from tracking (the channel monitor is still around to // watch for old state broadcasts)! - let err = self.locked_handle_funded_coop_close(&mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, close_res, chan); + let err = self.locked_handle_funded_coop_close( + &mut peer_state.closed_channel_monitor_update_ids, + &mut peer_state.in_flight_monitor_updates, + close_res, + chan, + ); chan_entry.remove(); Some((tx, Err(err))) } else { None } } else { - return try_channel_entry!(self, peer_state, Err(ChannelError::close( - "Got a closing_signed message for an unfunded channel!".into())), chan_entry); + return try_channel_entry!( + self, + peer_state, + Err(ChannelError::close( + "Got a closing_signed message for an unfunded channel!".into() + )), + chan_entry + ); } }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) + hash_map::Entry::Vacant(_) => { + return Err(MsgHandleErrInternal::no_such_channel_for_peer( + counterparty_node_id, + msg.channel_id, + )) + }, } }; mem::drop(per_peer_state); @@ -11796,7 +11823,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.channel_id) + MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -11809,7 +11836,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ "Got an update_add_htlc message for an unfunded channel!".into())), chan_entry); } }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::no_such_channel_for_peer(counterparty_node_id, msg.channel_id)) } Ok(()) } @@ -11823,28 +11850,32 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close( - format!( - "Can't find a peer matching the passed counterparty node_id {}", - counterparty_node_id - ), - msg.channel_id, - ) + MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan_entry) => { if let Some(chan) = chan_entry.get_mut().as_funded_mut() { - let res = try_channel_entry!(self, peer_state, chan.update_fulfill_htlc(&msg), chan_entry); + let res = try_channel_entry!( + self, + peer_state, + chan.update_fulfill_htlc(&msg), + chan_entry + ); if let HTLCSource::PreviousHopData(prev_hop) = &res.0 { - let logger = WithChannelContext::from(&self.logger, &chan.context, None); + let logger = + WithChannelContext::from(&self.logger, &chan.context, None); log_trace!(logger, "Holding the next revoke_and_ack until the preimage is durably persisted in the inbound edge's ChannelMonitor", ); - peer_state.actions_blocking_raa_monitor_updates.entry(msg.channel_id) + peer_state + .actions_blocking_raa_monitor_updates + .entry(msg.channel_id) .or_insert_with(Vec::new) - .push(RAAMonitorUpdateBlockingAction::from_prev_hop_data(&prev_hop)); + .push(RAAMonitorUpdateBlockingAction::from_prev_hop_data( + &prev_hop, + )); } // Note that we do not need to push an `actions_blocking_raa_monitor_updates` // entry here, even though we *do* need to block the next RAA monitor update. @@ -11852,15 +11883,30 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // `ReleaseRAAChannelMonitorUpdate` action to the event generated when the // outbound HTLC is claimed. This is guaranteed to all complete before we // process the RAA as messages are processed from single peers serially. - funding_txo = chan.funding.get_funding_txo().expect("We won't accept a fulfill until funded"); + funding_txo = chan + .funding + .get_funding_txo() + .expect("We won't accept a fulfill until funded"); next_user_channel_id = chan.context.get_user_id(); res } else { - return try_channel_entry!(self, peer_state, Err(ChannelError::close( - "Got an update_fulfill_htlc message for an unfunded channel!".into())), chan_entry); + return try_channel_entry!( + self, + peer_state, + Err(ChannelError::close( + "Got an update_fulfill_htlc message for an unfunded channel!" + .into() + )), + chan_entry + ); } }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) + hash_map::Entry::Vacant(_) => { + return Err(MsgHandleErrInternal::no_such_channel_for_peer( + counterparty_node_id, + msg.channel_id, + )) + }, } }; self.claim_funds_internal( @@ -11888,7 +11934,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.channel_id) + MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -11901,7 +11947,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ "Got an update_fail_htlc message for an unfunded channel!".into())), chan_entry); } }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::no_such_channel_for_peer(counterparty_node_id, msg.channel_id)) } Ok(()) } @@ -11914,7 +11960,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.channel_id) + MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -11932,7 +11978,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } Ok(()) }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::no_such_channel_for_peer(counterparty_node_id, msg.channel_id)) } } @@ -11943,7 +11989,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.channel_id) + MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -11998,7 +12044,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } Ok(()) }, - hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) + hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::no_such_channel_for_peer(counterparty_node_id, msg.channel_id)) } } @@ -12008,7 +12054,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), channel_id) + MsgHandleErrInternal::no_such_peer(counterparty_node_id, channel_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -12040,7 +12086,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } Ok(()) }, - hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), channel_id)) + hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::no_such_channel_for_peer(counterparty_node_id, channel_id)) } } @@ -12150,7 +12196,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let mut peer_state_lock = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.channel_id) + MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) }).map(|mtx| mtx.lock().unwrap())?; let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { @@ -12186,7 +12232,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ "Got a revoke_and_ack message for an unfunded channel!".into())), chan_entry); } }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::no_such_channel_for_peer(counterparty_node_id, msg.channel_id)) } }; self.fail_holding_cell_htlcs(htlcs_to_fail, msg.channel_id, counterparty_node_id); @@ -12203,7 +12249,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.channel_id) + MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -12217,7 +12263,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ "Got an update_fee message for an unfunded channel!".into())), chan_entry); } }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::no_such_channel_for_peer(counterparty_node_id, msg.channel_id)) } Ok(()) } @@ -12227,9 +12273,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close( - format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), - msg.channel_id + MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id ) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); @@ -12275,9 +12319,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ return try_channel_entry!(self, peer_state, err, chan_entry); } }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close( - format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), - msg.channel_id + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::no_such_channel_for_peer(counterparty_node_id, msg.channel_id )) } } @@ -12288,7 +12330,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.channel_id) + MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -12318,7 +12360,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ "Got an announcement_signatures message for an unfunded channel!".into())), chan_entry); } }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::no_such_channel_for_peer(counterparty_node_id, msg.channel_id)) } Ok(()) } @@ -12387,9 +12429,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close( - format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), - msg.channel_id + MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id ) })?; let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id), None); @@ -12478,9 +12518,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ my_current_funding_locked: None, }, }); - return Err(MsgHandleErrInternal::send_err_msg_no_close( - format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", - counterparty_node_id), msg.channel_id) + return Err(MsgHandleErrInternal::no_such_channel_for_peer(counterparty_node_id, msg.channel_id) ) } } @@ -12506,7 +12544,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.channel_id) + MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -12516,10 +12554,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // Look for the channel match peer_state.channel_by_id.entry(msg.channel_id) { - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!( - "Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}, channel_id {}", - counterparty_node_id, msg.channel_id, - ), msg.channel_id)), + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::no_such_channel_for_peer(counterparty_node_id, msg.channel_id)), hash_map::Entry::Occupied(mut chan_entry) => { if self.config.read().unwrap().reject_inbound_splices { let err = ChannelError::WarnAndDisconnect( @@ -12553,17 +12588,14 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.channel_id) + MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; // Look for the channel match peer_state.channel_by_id.entry(msg.channel_id) { - hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::send_err_msg_no_close(format!( - "Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", - counterparty_node_id - ), msg.channel_id)), + hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::no_such_channel_for_peer(counterparty_node_id, msg.channel_id)), hash_map::Entry::Occupied(mut chan_entry) => { if let Some(ref mut funded_channel) = chan_entry.get_mut().as_funded_mut() { let splice_ack_res = funded_channel.splice_ack( @@ -12588,13 +12620,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close( - format!( - "Can't find a peer matching the passed counterparty node_id {}", - counterparty_node_id - ), - msg.channel_id, - ) + MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -12602,11 +12628,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // Look for the channel match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Vacant(_) => { - let err = format!( - "Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", + return Err(MsgHandleErrInternal::no_such_channel_for_peer( counterparty_node_id, - ); - return Err(MsgHandleErrInternal::send_err_msg_no_close(err, msg.channel_id)); + msg.channel_id, + )); }, hash_map::Entry::Occupied(mut chan_entry) => { if let Some(chan) = chan_entry.get_mut().as_funded_mut() { diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 6e47e21ca8b..32a93d20936 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -895,8 +895,13 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { } => { assert_eq!(node_id, node_b_id); nodes[1].node.handle_error(node_a_id, msg); - check_closed_event(&nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", - &node_b_id)) }, &[node_a_id], 100000); + let peer_msg = format!( + "Got a message for a channel from the wrong node! No such channel_id {} for the passed counterparty_node_id {}", + chan_id, node_b_id + ); + let reason = + ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(peer_msg) }; + check_closed_event(&nodes[1], 1, reason, &[node_a_id], 100000); check_added_monitors(&nodes[1], 1); assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); nodes[1].tx_broadcaster.clear(); @@ -1101,11 +1106,12 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { } => { assert_eq!(node_id, node_b_id); nodes[1].node.handle_error(node_a_id, msg); - let msg = format!( - "Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", - &node_b_id + let peer_msg = format!( + "Got a message for a channel from the wrong node! No such channel_id {} for the passed counterparty_node_id {}", + chan_id, node_b_id ); - let reason = ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(msg) }; + let reason = + ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(peer_msg) }; check_closed_event(&nodes[1], 1, reason, &[node_a_id], 100000); check_added_monitors(&nodes[1], 1); bs_commitment_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index c7e7175602d..919ed969161 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -691,7 +691,11 @@ fn do_test_data_loss_protect(reconnect_panicing: bool, substantially_old: bool, if let MessageSendEvent::HandleError { ref action, .. } = nodes[0].node.get_and_clear_pending_msg_events()[1] { match action { &ErrorAction::SendErrorMessage { ref msg } => { - assert_eq!(msg.data, format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id())); + let peer_msg = format!( + "Got a message for a channel from the wrong node! No such channel_id {} for the passed counterparty_node_id {}", + chan.2, nodes[1].node.get_our_node_id() + ); + assert_eq!(msg.data, peer_msg); err_msgs_0.push(msg.clone()); }, _ => panic!("Unexpected event!"), @@ -703,8 +707,12 @@ fn do_test_data_loss_protect(reconnect_panicing: bool, substantially_old: bool, nodes[1].node.handle_error(nodes[0].node.get_our_node_id(), &err_msgs_0[0]); assert!(nodes[1].node.list_usable_channels().is_empty()); check_added_monitors(&nodes[1], 1); - check_closed_event(&nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id())) } - , &[nodes[0].node.get_our_node_id()], 1000000); + let peer_msg = format!( + "Got a message for a channel from the wrong node! No such channel_id {} for the passed counterparty_node_id {}", + chan.2, nodes[1].node.get_our_node_id() + ); + let reason = ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(peer_msg) }; + check_closed_event(&nodes[1], 1, reason, &[nodes[0].node.get_our_node_id()], 1000000); check_closed_broadcast!(nodes[1], false); } } diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index 50c8f72f9be..870f00ee9df 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -836,7 +836,11 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { // closing_signed so we do it ourselves check_closed_broadcast!(nodes[1], false); check_added_monitors(&nodes[1], 1); - let reason = ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &node_b_id)) }; + let peer_msg = format!( + "Got a message for a channel from the wrong node! No such channel_id {} for the passed counterparty_node_id {}", + chan_1.2, node_b_id + ); + let reason = ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(peer_msg) }; check_closed_event(&nodes[1], 1, reason, &[node_a_id], 100000); } From b967390934c871b13c4af3713908e7530f499aae Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Fri, 6 Feb 2026 13:50:43 -0800 Subject: [PATCH 227/242] Refactor missing peer/channel APIError from ChannelManager methods We have the same error being returned from several `ChannelManager` API methods, so we DRY it up. Doing so also lets us get rid of the inlined `format!` call, which for some reason prevents `rustfmt` from formatting code around it. --- lightning/src/ln/channelmanager.rs | 143 +++++++++++------------------ lightning/src/ln/payment_tests.rs | 2 +- lightning/src/util/errors.rs | 25 +++++ 3 files changed, 80 insertions(+), 90 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index a069b01f532..efd5026ff25 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3844,8 +3844,9 @@ impl< { let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}") })?; + let peer_state_mutex = per_peer_state + .get(counterparty_node_id) + .ok_or_else(|| APIError::no_such_peer(counterparty_node_id))?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -3909,12 +3910,7 @@ impl< } }, hash_map::Entry::Vacant(_) => { - return Err(APIError::ChannelUnavailable { - err: format!( - "Channel with id {} not found for the passed counterparty node_id {}", - chan_id, counterparty_node_id, - ), - }); + return Err(APIError::no_such_channel_for_peer(chan_id, counterparty_node_id)); }, } } @@ -4209,11 +4205,7 @@ impl< ) -> Result<(), APIError> { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = - per_peer_state.get(peer_node_id).ok_or_else(|| APIError::ChannelUnavailable { - err: format!( - "Can't find a peer matching the passed counterparty node_id {peer_node_id}", - ), - })?; + per_peer_state.get(peer_node_id).ok_or_else(|| APIError::no_such_peer(peer_node_id))?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; let logger = WithContext::from(&self.logger, Some(*peer_node_id), Some(*channel_id), None); @@ -4257,11 +4249,7 @@ impl< // events anyway. Ok(()) } else { - Err(APIError::ChannelUnavailable { - err: format!( - "Channel with id {channel_id} not found for the passed counterparty node_id {peer_node_id}", - ), - }) + Err(APIError::no_such_channel_for_peer(channel_id, peer_node_id)) } } @@ -4605,11 +4593,10 @@ impl< ) -> Result<(), APIError> { let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = match per_peer_state.get(counterparty_node_id).ok_or_else(|| { - APIError::ChannelUnavailable { - err: format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), - } - }) { + let peer_state_mutex = match per_peer_state + .get(counterparty_node_id) + .ok_or_else(|| APIError::no_such_peer(counterparty_node_id)) + { Ok(p) => p, Err(e) => return Err(e), }; @@ -4654,12 +4641,9 @@ impl< }) } }, - hash_map::Entry::Vacant(_) => Err(APIError::ChannelUnavailable { - err: format!( - "Channel with id {} not found for the passed counterparty node_id {}", - channel_id, counterparty_node_id, - ), - }), + hash_map::Entry::Vacant(_) => { + Err(APIError::no_such_channel_for_peer(channel_id, counterparty_node_id)) + }, } } @@ -4685,11 +4669,10 @@ impl< ) -> Result<(), APIError> { let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = match per_peer_state.get(counterparty_node_id).ok_or_else(|| { - APIError::ChannelUnavailable { - err: format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), - } - }) { + let peer_state_mutex = match per_peer_state + .get(counterparty_node_id) + .ok_or_else(|| APIError::no_such_peer(counterparty_node_id)) + { Ok(p) => p, Err(e) => return Err(e), }; @@ -4742,12 +4725,9 @@ impl< }) } }, - hash_map::Entry::Vacant(_) => Err(APIError::ChannelUnavailable { - err: format!( - "Channel with id {} not found for the passed counterparty node_id {}", - channel_id, counterparty_node_id, - ), - }), + hash_map::Entry::Vacant(_) => { + Err(APIError::no_such_channel_for_peer(channel_id, counterparty_node_id)) + }, } } @@ -5965,12 +5945,12 @@ impl< /// which checks the correctness of the funding transaction given the associated channel. #[rustfmt::skip] fn funding_transaction_generated_intern) -> Result>( - &self, temporary_channel_id: ChannelId, counterparty_node_id: PublicKey, funding_transaction: Transaction, is_batch_funding: bool, - mut find_funding_output: FundingOutput, is_manual_broadcast: bool, - ) -> Result<(), APIError> { + &self, temporary_channel_id: ChannelId, counterparty_node_id: PublicKey, funding_transaction: Transaction, is_batch_funding: bool, + mut find_funding_output: FundingOutput, is_manual_broadcast: bool, + ) -> Result<(), APIError> { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(&counterparty_node_id) - .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}") })?; + .ok_or_else(|| APIError::no_such_peer(&counterparty_node_id))?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -6410,9 +6390,7 @@ impl< let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); if peer_state_mutex_opt.is_none() { - funding_tx_signed_result = Err(APIError::ChannelUnavailable { - err: format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}") - }); + funding_tx_signed_result = Err(APIError::no_such_peer(counterparty_node_id)); return NotifyOption::SkipPersistNoEvents; } @@ -6550,12 +6528,8 @@ impl< } }, hash_map::Entry::Vacant(_) => { - funding_tx_signed_result = Err(APIError::ChannelUnavailable { - err: format!( - "Channel with id {} not found for the passed counterparty node_id {}", - channel_id, counterparty_node_id - ), - }); + funding_tx_signed_result = + Err(APIError::no_such_channel_for_peer(channel_id, counterparty_node_id)); return NotifyOption::SkipPersistNoEvents; }, } @@ -6638,15 +6612,16 @@ impl< let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}") })?; + .ok_or_else(|| APIError::no_such_peer(counterparty_node_id))?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; for channel_id in channel_ids { if !peer_state.has_channel(channel_id) { - return Err(APIError::ChannelUnavailable { - err: format!("Channel with id {} not found for the passed counterparty node_id {}", channel_id, counterparty_node_id), - }); + return Err(APIError::no_such_channel_for_peer( + channel_id, + counterparty_node_id, + )); }; } for channel_id in channel_ids { @@ -6741,12 +6716,9 @@ impl< let outbound_scid_alias = { let peer_state_lock = self.per_peer_state.read().unwrap(); - let peer_state_mutex = - peer_state_lock.get(&next_node_id).ok_or_else(|| APIError::ChannelUnavailable { - err: format!( - "Can't find a peer matching the passed counterparty node_id {next_node_id}" - ), - })?; + let peer_state_mutex = peer_state_lock + .get(&next_node_id) + .ok_or_else(|| APIError::no_such_peer(&next_node_id))?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.get(next_hop_channel_id) { @@ -6779,11 +6751,10 @@ impl< logger, "Channel not found when attempting to forward intercepted HTLC" ); - return Err(APIError::ChannelUnavailable { - err: format!( - "Channel with id {next_hop_channel_id} not found for the passed counterparty node_id {next_node_id}" - ), - }); + return Err(APIError::no_such_channel_for_peer( + next_hop_channel_id, + &next_node_id, + )); }, } }; @@ -10565,11 +10536,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { log_error!(logger, "Can't find peer matching the passed counterparty node_id"); - - let err_str = format!( - "Can't find a peer matching the passed counterparty node_id {counterparty_node_id}" - ); - APIError::ChannelUnavailable { err: err_str } + APIError::no_such_peer(counterparty_node_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -13236,9 +13203,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); if peer_state_mutex_opt.is_none() { - result = Err(APIError::ChannelUnavailable { - err: format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}") - }); + result = Err(APIError::no_such_peer(counterparty_node_id)); return notify; } @@ -13272,10 +13237,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } }, hash_map::Entry::Vacant(_) => { - result = Err(APIError::ChannelUnavailable { - err: format!("Channel with id {} not found for the passed counterparty node_id {}", - channel_id, counterparty_node_id), - }); + result = Err(APIError::no_such_channel_for_peer( + channel_id, + counterparty_node_id, + )); }, } @@ -13293,9 +13258,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let initiator = { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| APIError::ChannelUnavailable { - err: format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}") - })?; + .ok_or_else(|| APIError::no_such_peer(counterparty_node_id))?; let mut peer_state = peer_state_mutex.lock().unwrap(); match peer_state.channel_by_id.entry(*channel_id) { hash_map::Entry::Occupied(mut chan_entry) => { @@ -13307,10 +13270,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }) } }, - hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable { - err: format!("Channel with id {} not found for the passed counterparty node_id {}", - channel_id, counterparty_node_id), - }), + hash_map::Entry::Vacant(_) => { + return Err(APIError::no_such_channel_for_peer( + channel_id, + counterparty_node_id, + )) + }, } }; self.check_free_holding_cells(); @@ -20340,13 +20305,13 @@ mod tests { #[rustfmt::skip] fn check_unkown_peer_error(res_err: Result, expected_public_key: PublicKey) { - let expected_message = format!("Can't find a peer matching the passed counterparty node_id {}", expected_public_key); + let expected_message = format!("No such peer for the passed counterparty_node_id {}", expected_public_key); check_api_error_message(expected_message, res_err) } #[rustfmt::skip] fn check_channel_unavailable_error(res_err: Result, expected_channel_id: ChannelId, peer_node_id: PublicKey) { - let expected_message = format!("Channel with id {} not found for the passed counterparty node_id {}", expected_channel_id, peer_node_id); + let expected_message = format!("No such channel_id {} for the passed counterparty_node_id {}", expected_channel_id, peer_node_id); check_api_error_message(expected_message, res_err) } diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 32a93d20936..0eace2eab08 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -2312,7 +2312,7 @@ fn do_test_intercepted_payment(test: InterceptTest) { let unknown_chan_id_err = nodes[1].node.forward_intercepted_htlc(intercept_id, &chan_id, node_c_id, outbound_amt); let err = format!( - "Channel with id {} not found for the passed counterparty node_id {}", + "No such channel_id {} for the passed counterparty_node_id {}", chan_id, node_c_id, ); assert_eq!(unknown_chan_id_err, Err(APIError::ChannelUnavailable { err })); diff --git a/lightning/src/util/errors.rs b/lightning/src/util/errors.rs index eaaf0130ca2..cd72d60327f 100644 --- a/lightning/src/util/errors.rs +++ b/lightning/src/util/errors.rs @@ -9,7 +9,10 @@ //! Error types live here. +use bitcoin::secp256k1::PublicKey; + use crate::ln::script::ShutdownScript; +use crate::ln::types::ChannelId; #[allow(unused_imports)] use crate::prelude::*; @@ -90,6 +93,28 @@ impl fmt::Debug for APIError { } } +impl APIError { + pub(crate) fn no_such_peer(counterparty_node_id: &PublicKey) -> Self { + Self::ChannelUnavailable { + err: format!( + "No such peer for the passed counterparty_node_id {}", + counterparty_node_id + ), + } + } + + pub(crate) fn no_such_channel_for_peer( + channel_id: &ChannelId, counterparty_node_id: &PublicKey, + ) -> Self { + Self::ChannelUnavailable { + err: format!( + "No such channel_id {} for the passed counterparty_node_id {}", + channel_id, counterparty_node_id + ), + } + } +} + impl_writeable_tlv_based_enum_upgradable!(APIError, (0, APIMisuseError) => { (0, err, required), }, (2, FeeRateTooHigh) => { From 4d35de573cac7ac4fe610687b6010cd4284346f4 Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Tue, 10 Feb 2026 14:08:59 -0800 Subject: [PATCH 228/242] Rustfmt ChannelManager::internal_tx_complete --- lightning/src/ln/channelmanager.rs | 60 +++++++++++++++++------------- 1 file changed, 34 insertions(+), 26 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index efd5026ff25..9a62d775e2e 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -11274,14 +11274,14 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }) } - #[rustfmt::skip] - fn internal_tx_complete(&self, counterparty_node_id: PublicKey, msg: &msgs::TxComplete) -> Result { + fn internal_tx_complete( + &self, counterparty_node_id: PublicKey, msg: &msgs::TxComplete, + ) -> Result { let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = per_peer_state.get(&counterparty_node_id) - .ok_or_else(|| { - debug_assert!(false); - MsgHandleErrInternal::no_such_peer(&counterparty_node_id, msg.channel_id) - })?; + let peer_state_mutex = per_peer_state.get(&counterparty_node_id).ok_or_else(|| { + debug_assert!(false); + MsgHandleErrInternal::no_such_peer(&counterparty_node_id, msg.channel_id) + })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { @@ -11291,8 +11291,11 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ Ok(tx_complete_result) => { let mut persist = NotifyOption::SkipPersistNoEvents; - if let Some(interactive_tx_msg_send) = tx_complete_result.interactive_tx_msg_send { - let msg_send_event = interactive_tx_msg_send.into_msg_send_event(counterparty_node_id); + if let Some(interactive_tx_msg_send) = + tx_complete_result.interactive_tx_msg_send + { + let msg_send_event = + interactive_tx_msg_send.into_msg_send_event(counterparty_node_id); peer_state.pending_msg_events.push(msg_send_event); persist = NotifyOption::SkipPersistHandleEvents; }; @@ -11307,7 +11310,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }, None, )); - // // We have a successful signing session that we need to persist. persist = NotifyOption::DoPersist; } @@ -11345,10 +11347,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }); } if let Some(tx_signatures) = tx_signatures { - peer_state.pending_msg_events.push(MessageSendEvent::SendTxSignatures { - node_id: counterparty_node_id, - msg: tx_signatures, - }); + peer_state.pending_msg_events.push( + MessageSendEvent::SendTxSignatures { + node_id: counterparty_node_id, + msg: tx_signatures, + }, + ); } // We have a successful signing session that we need to persist. @@ -11360,23 +11364,27 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ Err((error, splice_funding_failed)) => { if let Some(splice_funding_failed) = splice_funding_failed { let pending_events = &mut self.pending_events.lock().unwrap(); - pending_events.push_back((events::Event::SpliceFailed { - channel_id: msg.channel_id, - counterparty_node_id, - user_channel_id: chan.context().get_user_id(), - abandoned_funding_txo: splice_funding_failed.funding_txo, - channel_type: splice_funding_failed.channel_type.clone(), - contributed_inputs: splice_funding_failed.contributed_inputs, - contributed_outputs: splice_funding_failed.contributed_outputs, - }, None)); + pending_events.push_back(( + events::Event::SpliceFailed { + channel_id: msg.channel_id, + counterparty_node_id, + user_channel_id: chan.context().get_user_id(), + abandoned_funding_txo: splice_funding_failed.funding_txo, + channel_type: splice_funding_failed.channel_type.clone(), + contributed_inputs: splice_funding_failed.contributed_inputs, + contributed_outputs: splice_funding_failed.contributed_outputs, + }, + None, + )); } Err(MsgHandleErrInternal::from_chan_no_close(error, msg.channel_id)) }, } }, - hash_map::Entry::Vacant(_) => { - Err(MsgHandleErrInternal::no_such_channel_for_peer(&counterparty_node_id, msg.channel_id)) - } + hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::no_such_channel_for_peer( + &counterparty_node_id, + msg.channel_id, + )), } } From 146f29a9a9edcac5c1a1cd6b52c28a300d7aced7 Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Tue, 10 Feb 2026 14:09:33 -0800 Subject: [PATCH 229/242] Rustfmt ChannelManager::internal_tx_signatures --- lightning/src/ln/channelmanager.rs | 52 ++++++++++++++++++------------ 1 file changed, 31 insertions(+), 21 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 9a62d775e2e..dfdbfe4c07d 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -11388,15 +11388,14 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } - #[rustfmt::skip] - fn internal_tx_signatures(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxSignatures) - -> Result<(), MsgHandleErrInternal> { + fn internal_tx_signatures( + &self, counterparty_node_id: &PublicKey, msg: &msgs::TxSignatures, + ) -> Result<(), MsgHandleErrInternal> { let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| { - debug_assert!(false); - MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) - })?; + let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { + debug_assert!(false); + MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) + })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { @@ -11424,19 +11423,28 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ debug_assert!(counterparty_initial_commitment_signed_result.is_none()); if let Some(tx_signatures) = tx_signatures { - peer_state.pending_msg_events.push(MessageSendEvent::SendTxSignatures { - node_id: *counterparty_node_id, - msg: tx_signatures, - }); + peer_state.pending_msg_events.push( + MessageSendEvent::SendTxSignatures { + node_id: *counterparty_node_id, + msg: tx_signatures, + }, + ); } if let Some(splice_locked) = splice_locked { - peer_state.pending_msg_events.push(MessageSendEvent::SendSpliceLocked { - node_id: *counterparty_node_id, - msg: splice_locked, - }); + peer_state.pending_msg_events.push( + MessageSendEvent::SendSpliceLocked { + node_id: *counterparty_node_id, + msg: splice_locked, + }, + ); } if let Some((ref funding_tx, ref tx_type)) = funding_tx { - self.broadcast_interactive_funding(chan, funding_tx, Some(tx_type.clone()), &self.logger); + self.broadcast_interactive_funding( + chan, + funding_tx, + Some(tx_type.clone()), + &self.logger, + ); } if let Some(splice_negotiated) = splice_negotiated { self.pending_events.lock().unwrap().push_back(( @@ -11446,7 +11454,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ user_channel_id: chan.context.get_user_id(), new_funding_txo: splice_negotiated.funding_txo, channel_type: splice_negotiated.channel_type, - new_funding_redeem_script: splice_negotiated.funding_redeem_script, + new_funding_redeem_script: splice_negotiated + .funding_redeem_script, }, None, )); @@ -11461,9 +11470,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } Ok(()) }, - hash_map::Entry::Vacant(_) => { - Err(MsgHandleErrInternal::no_such_channel_for_peer(counterparty_node_id, msg.channel_id)) - } + hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::no_such_channel_for_peer( + counterparty_node_id, + msg.channel_id, + )), } } From 0eadc17ebea02f483e2fb36d9e3392928fd39487 Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Tue, 10 Feb 2026 14:10:07 -0800 Subject: [PATCH 230/242] Rustfmt ChannelManager::internal_tx_abort --- lightning/src/ln/channelmanager.rs | 46 ++++++++++++++++-------------- 1 file changed, 25 insertions(+), 21 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index dfdbfe4c07d..3a3003eca9d 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -11477,21 +11477,21 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } - #[rustfmt::skip] - fn internal_tx_abort(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxAbort) - -> Result { + fn internal_tx_abort( + &self, counterparty_node_id: &PublicKey, msg: &msgs::TxAbort, + ) -> Result { let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| { - debug_assert!(false); - MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) - })?; + let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { + debug_assert!(false); + MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) + })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan_entry) => { let res = chan_entry.get_mut().tx_abort(msg, &self.logger); - let (tx_abort, splice_failed) = try_channel_entry!(self, peer_state, res, chan_entry); + let (tx_abort, splice_failed) = + try_channel_entry!(self, peer_state, res, chan_entry); let persist = if tx_abort.is_some() || splice_failed.is_some() { NotifyOption::DoPersist @@ -11508,22 +11508,26 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some(splice_funding_failed) = splice_failed { let pending_events = &mut self.pending_events.lock().unwrap(); - pending_events.push_back((events::Event::SpliceFailed { - channel_id: msg.channel_id, - counterparty_node_id: *counterparty_node_id, - user_channel_id: chan_entry.get().context().get_user_id(), - abandoned_funding_txo: splice_funding_failed.funding_txo, - channel_type: splice_funding_failed.channel_type, - contributed_inputs: splice_funding_failed.contributed_inputs, - contributed_outputs: splice_funding_failed.contributed_outputs, - }, None)); + pending_events.push_back(( + events::Event::SpliceFailed { + channel_id: msg.channel_id, + counterparty_node_id: *counterparty_node_id, + user_channel_id: chan_entry.get().context().get_user_id(), + abandoned_funding_txo: splice_funding_failed.funding_txo, + channel_type: splice_funding_failed.channel_type, + contributed_inputs: splice_funding_failed.contributed_inputs, + contributed_outputs: splice_funding_failed.contributed_outputs, + }, + None, + )); } Ok(persist) }, - hash_map::Entry::Vacant(_) => { - Err(MsgHandleErrInternal::no_such_channel_for_peer(counterparty_node_id, msg.channel_id)) - } + hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::no_such_channel_for_peer( + counterparty_node_id, + msg.channel_id, + )), } } From e1d0566dd2d8f163986e4ee0fa971a8986b06112 Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Tue, 10 Feb 2026 14:10:23 -0800 Subject: [PATCH 231/242] Rustfmt ChannelManager::internal_splice_ack --- lightning/src/ln/channelmanager.rs | 40 ++++++++++++++++++++---------- 1 file changed, 27 insertions(+), 13 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 3a3003eca9d..89056ef11e3 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -12571,33 +12571,47 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } /// Handle incoming splice request ack, transition channel to splice-pending (unless some check fails). - #[rustfmt::skip] - fn internal_splice_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::SpliceAck) -> Result<(), MsgHandleErrInternal> { + fn internal_splice_ack( + &self, counterparty_node_id: &PublicKey, msg: &msgs::SpliceAck, + ) -> Result<(), MsgHandleErrInternal> { let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| { - debug_assert!(false); - MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) - })?; + let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { + debug_assert!(false); + MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) + })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; // Look for the channel match peer_state.channel_by_id.entry(msg.channel_id) { - hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::no_such_channel_for_peer(counterparty_node_id, msg.channel_id)), + hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::no_such_channel_for_peer( + counterparty_node_id, + msg.channel_id, + )), hash_map::Entry::Occupied(mut chan_entry) => { if let Some(ref mut funded_channel) = chan_entry.get_mut().as_funded_mut() { let splice_ack_res = funded_channel.splice_ack( - msg, &self.signer_provider, &self.entropy_source, - &self.get_our_node_id(), &self.logger + msg, + &self.signer_provider, + &self.entropy_source, + &self.get_our_node_id(), + &self.logger, ); - let tx_msg_opt = try_channel_entry!(self, peer_state, splice_ack_res, chan_entry); + let tx_msg_opt = + try_channel_entry!(self, peer_state, splice_ack_res, chan_entry); if let Some(tx_msg) = tx_msg_opt { - peer_state.pending_msg_events.push(tx_msg.into_msg_send_event(counterparty_node_id.clone())); + peer_state + .pending_msg_events + .push(tx_msg.into_msg_send_event(counterparty_node_id.clone())); } Ok(()) } else { - try_channel_entry!(self, peer_state, Err(ChannelError::close("Channel is not funded, cannot be spliced".into())), chan_entry) + try_channel_entry!( + self, + peer_state, + Err(ChannelError::close("Channel is not funded, cannot be spliced".into())), + chan_entry + ) } }, } From 775261921261ff91b94781edfce10e08a0b0d2be Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Tue, 10 Feb 2026 14:12:12 -0800 Subject: [PATCH 232/242] Rustfmt ChannelManager::internal_splice_init --- lightning/src/ln/channelmanager.rs | 38 ++++++++++++++++++++---------- 1 file changed, 26 insertions(+), 12 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 89056ef11e3..eb526c41f19 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -12527,14 +12527,14 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } /// Handle incoming splice request, transition channel to splice-pending (unless some check fails). - #[rustfmt::skip] - fn internal_splice_init(&self, counterparty_node_id: &PublicKey, msg: &msgs::SpliceInit) -> Result<(), MsgHandleErrInternal> { + fn internal_splice_init( + &self, counterparty_node_id: &PublicKey, msg: &msgs::SpliceInit, + ) -> Result<(), MsgHandleErrInternal> { let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| { - debug_assert!(false); - MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) - })?; + let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { + debug_assert!(false); + MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) + })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -12543,19 +12543,28 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // Look for the channel match peer_state.channel_by_id.entry(msg.channel_id) { - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::no_such_channel_for_peer(counterparty_node_id, msg.channel_id)), + hash_map::Entry::Vacant(_) => { + return Err(MsgHandleErrInternal::no_such_channel_for_peer( + counterparty_node_id, + msg.channel_id, + )) + }, hash_map::Entry::Occupied(mut chan_entry) => { if self.config.read().unwrap().reject_inbound_splices { let err = ChannelError::WarnAndDisconnect( - "Inbound channel splices are currently not allowed".to_owned() + "Inbound channel splices are currently not allowed".to_owned(), ); return Err(MsgHandleErrInternal::from_chan_no_close(err, msg.channel_id)); } if let Some(ref mut funded_channel) = chan_entry.get_mut().as_funded_mut() { let init_res = funded_channel.splice_init( - msg, our_funding_contribution, &self.signer_provider, &self.entropy_source, - &self.get_our_node_id(), &self.logger + msg, + our_funding_contribution, + &self.signer_provider, + &self.entropy_source, + &self.get_our_node_id(), + &self.logger, ); let splice_ack_msg = try_channel_entry!(self, peer_state, init_res, chan_entry); peer_state.pending_msg_events.push(MessageSendEvent::SendSpliceAck { @@ -12564,7 +12573,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }); Ok(()) } else { - try_channel_entry!(self, peer_state, Err(ChannelError::close("Channel is not funded, cannot be spliced".into())), chan_entry) + try_channel_entry!( + self, + peer_state, + Err(ChannelError::close("Channel is not funded, cannot be spliced".into())), + chan_entry + ) } }, } From fb6d61aa50226a1eb55b66a1d0e22e5abde85aaf Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Tue, 10 Feb 2026 14:15:24 -0800 Subject: [PATCH 233/242] Rustfmt ChannelManager::internal_commitment_signed --- lightning/src/ln/channelmanager.rs | 38 ++++++++++++++++++++---------- 1 file changed, 26 insertions(+), 12 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index eb526c41f19..a21456f0fd5 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -11971,15 +11971,15 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } - #[rustfmt::skip] - fn internal_commitment_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(), MsgHandleErrInternal> { + fn internal_commitment_signed( + &self, counterparty_node_id: &PublicKey, msg: &msgs::CommitmentSigned, + ) -> Result<(), MsgHandleErrInternal> { let best_block = *self.best_block.read().unwrap(); let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| { - debug_assert!(false); - MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) - })?; + let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { + debug_assert!(false); + MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) + })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { @@ -11988,12 +11988,22 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let logger = WithChannelContext::from(&self.logger, &chan.context(), None); let funding_txo = chan.funding().get_funding_txo(); let (monitor_opt, monitor_update_opt) = try_channel_entry!( - self, peer_state, chan.commitment_signed(msg, best_block, &self.signer_provider, &self.fee_estimator, &&logger), - chan_entry); + self, + peer_state, + chan.commitment_signed( + msg, + best_block, + &self.signer_provider, + &self.fee_estimator, + &&logger + ), + chan_entry + ); if let Some(chan) = chan.as_funded_mut() { if let Some(monitor) = monitor_opt { - let monitor_res = self.chain_monitor.watch_channel(monitor.channel_id(), monitor); + let monitor_res = + self.chain_monitor.watch_channel(monitor.channel_id(), monitor); if let Ok(persist_state) = monitor_res { if let Some(data) = self.handle_initial_monitor( &mut peer_state.in_flight_monitor_updates, @@ -12008,7 +12018,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ self.handle_post_monitor_update_chan_resume(data); } } else { - let logger = WithChannelContext::from(&self.logger, &chan.context, None); + let logger = + WithChannelContext::from(&self.logger, &chan.context, None); log_error!(logger, "Persisting initial ChannelMonitor failed, implying the channel ID was duplicated"); let msg = "Channel ID was a duplicate"; let reason = ClosureReason::ProcessingError { err: msg.to_owned() }; @@ -12033,7 +12044,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } Ok(()) }, - hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::no_such_channel_for_peer(counterparty_node_id, msg.channel_id)) + hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::no_such_channel_for_peer( + counterparty_node_id, + msg.channel_id, + )), } } From 4bb33b2d45f80cfb5a5b922fd20d646288296dd4 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 16 Jan 2026 09:51:29 +0100 Subject: [PATCH 234/242] Drop `ChannelHandshakeLimits::max_funding_satoshis` Previously, LDK would by default limit channels pre-Wumbo sizes and leave it to the user to bump `ChannelHandshakeLimits::max_funding_satoshis`. This has mostly historical reasons that aimed to allow limiting risk when Lightning and LDK were not as matured as today. By now, we do however expect ~all users to eventually want to bump this limit, and having them pick an arbitrary value (or pick a default ourselves) is kinda odd. Users that still want to limit risks have ample other means to do so, for example manually rejecting inbound channels via the manual-acceptence flow (via `Event::OpenChannelRequest`) or soon even limiting risk on a per-HTLC basis via general purpose HTLC interception. Furthermore, it turns out that our current implementation is wrong, as we do always announce `Wumbo`/`option_supports_large_channels` support via the `IN` feature in `ChannelManager` defaults, irrespective of what limit is configured. This has us announcing support for Wumbo channels to only then reject inbound requests in case a counterparty dares to actually try to open one. To address this, we here simply propose to drop the `max_funding_satoshis` field and corresponding checks entirely, and do what we've announced to the network for a long time: enable Wumbo by default. --- fuzz/src/full_stack.rs | 6 +++--- lightning/src/ln/channel.rs | 21 +-------------------- lightning/src/ln/channel_open_tests.rs | 16 +--------------- lightning/src/util/config.rs | 8 -------- 4 files changed, 5 insertions(+), 46 deletions(-) diff --git a/fuzz/src/full_stack.rs b/fuzz/src/full_stack.rs index 39588bcdc50..f7f912cfd48 100644 --- a/fuzz/src/full_stack.rs +++ b/fuzz/src/full_stack.rs @@ -1170,7 +1170,7 @@ fn two_peer_forwarding_seed() -> Vec { // our network key ext_from_hex("0100000000000000000000000000000000000000000000000000000000000000", &mut test); // config - ext_from_hex("000000000090000000000000000064000100000000000100ffff0000000000000000ffffffffffffffffffffffffffffffff0000000000000000ffffffffffffffff000000ffffffff00ffff1a000400010000020400000000040200000a08ffffffffffffffff0001000000000000", &mut test); + ext_from_hex("000000000090000000000000000064000100000000000100ffff00000000000000ffffffffffffffffff0000000000000000ffffffffffffffff000000ffffffff00ffff1a000400010000020400000000040200000a08ffffffffffffffff0001000000000000", &mut test); // new outbound connection with id 0 ext_from_hex("00", &mut test); @@ -1624,7 +1624,7 @@ fn gossip_exchange_seed() -> Vec { // our network key ext_from_hex("0100000000000000000000000000000000000000000000000000000000000000", &mut test); // config - ext_from_hex("000000000090000000000000000064000100000000000100ffff0000000000000000ffffffffffffffffffffffffffffffff0000000000000000ffffffffffffffff000000ffffffff00ffff1a000400010000020400000000040200000a08ffffffffffffffff0001000000000000", &mut test); + ext_from_hex("000000000090000000000000000064000100000000000100ffff00000000000000ffffffffffffffffff0000000000000000ffffffffffffffff000000ffffffff00ffff1a000400010000020400000000040200000a08ffffffffffffffff0001000000000000", &mut test); // new outbound connection with id 0 ext_from_hex("00", &mut test); @@ -1706,7 +1706,7 @@ fn splice_seed() -> Vec { // our network key ext_from_hex("0100000000000000000000000000000000000000000000000000000000000000", &mut test); // config - ext_from_hex("000000000090000000000000000064000100000000000100ffff0000000000000000ffffffffffffffffffffffffffffffff0000000000000000ffffffffffffffff000000ffffffff00ffff1a000400010000020400000000040200000a08ffffffffffffffff0001000000000000", &mut test); + ext_from_hex("000000000090000000000000000064000100000000000100ffff00000000000000ffffffffffffffffff0000000000000000ffffffffffffffff000000ffffffff00ffff1a000400010000020400000000040200000a08ffffffffffffffff0001000000000000", &mut test); // new outbound connection with id 0 ext_from_hex("00", &mut test); diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 3236ebdefed..7e6ee7f2c35 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -3563,13 +3563,6 @@ impl ChannelContext { return Err(ChannelError::close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT))); } - // Check sanity of message fields: - if channel_value_satoshis > config.channel_handshake_limits.max_funding_satoshis { - return Err(ChannelError::close(format!( - "Per our config, funding must be at most {}. It was {}. Peer contribution: {}. Our contribution: {}", - config.channel_handshake_limits.max_funding_satoshis, channel_value_satoshis, - open_channel_fields.funding_satoshis, our_funding_satoshis))); - } if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS { return Err(ChannelError::close(format!("Funding must be smaller than the total bitcoin supply. It was {}", channel_value_satoshis))); } @@ -16046,10 +16039,7 @@ mod tests { AwaitingChannelReadyFlags, ChannelState, FundedChannel, HTLCCandidate, HTLCInitiator, HTLCUpdateAwaitingACK, InboundHTLCOutput, InboundHTLCState, InboundUpdateAdd, InboundV1Channel, OutboundHTLCOutput, OutboundHTLCState, OutboundV1Channel, - }; - use crate::ln::channel::{ - MAX_FUNDING_SATOSHIS_NO_WUMBO, MIN_THEIR_CHAN_RESERVE_SATOSHIS, - TOTAL_BITCOIN_SUPPLY_SATOSHIS, + MIN_THEIR_CHAN_RESERVE_SATOSHIS, }; use crate::ln::channel_keys::{RevocationBasepoint, RevocationKey}; use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; @@ -16106,15 +16096,6 @@ mod tests { assert!(ChannelState::ChannelReady(ChannelReadyFlags::new()) < ChannelState::ShutdownComplete); } - #[test] - fn test_max_funding_satoshis_no_wumbo() { - assert_eq!(TOTAL_BITCOIN_SUPPLY_SATOSHIS, 21_000_000 * 100_000_000); - assert!( - MAX_FUNDING_SATOSHIS_NO_WUMBO <= TOTAL_BITCOIN_SUPPLY_SATOSHIS, - "MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence" - ); - } - #[cfg(ldk_test_vectors)] struct Keys { signer: crate::sign::InMemorySigner, diff --git a/lightning/src/ln/channel_open_tests.rs b/lightning/src/ln/channel_open_tests.rs index 059639330f8..08cabc053c5 100644 --- a/lightning/src/ln/channel_open_tests.rs +++ b/lightning/src/ln/channel_open_tests.rs @@ -457,8 +457,7 @@ fn test_channel_resumption_fail_post_funding() { pub fn test_insane_channel_opens() { // Stand up a network of 2 nodes use crate::ln::channel::TOTAL_BITCOIN_SUPPLY_SATOSHIS; - let mut legacy_cfg = test_legacy_channel_config(); - legacy_cfg.channel_handshake_limits.max_funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1; + let legacy_cfg = test_legacy_channel_config(); let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(legacy_cfg.clone())]); @@ -524,19 +523,6 @@ pub fn test_insane_channel_opens() { use crate::ln::channelmanager::MAX_LOCAL_BREAKDOWN_TIMEOUT; - // Test all mutations that would make the channel open message insane - insane_open_helper( - format!( - "Per our config, funding must be at most {}. It was {}", - TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1, - TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2 - ) - .as_str(), - |mut msg| { - msg.common_fields.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2; - msg - }, - ); insane_open_helper( format!( "Funding must be smaller than the total bitcoin supply. It was {}", diff --git a/lightning/src/util/config.rs b/lightning/src/util/config.rs index 420fad6b1e0..dd55d5c2130 100644 --- a/lightning/src/util/config.rs +++ b/lightning/src/util/config.rs @@ -10,7 +10,6 @@ //! Various user-configurable channel limits and settings which ChannelManager //! applies for you. -use crate::ln::channel::MAX_FUNDING_SATOSHIS_NO_WUMBO; use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, MAX_LOCAL_BREAKDOWN_TIMEOUT}; #[cfg(fuzzing)] @@ -300,11 +299,6 @@ pub struct ChannelHandshakeLimits { /// Default value: `1000` /// (Minimum of [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]) pub min_funding_satoshis: u64, - /// Maximum allowed satoshis when a channel is funded. This is supplied by the sender and so - /// only applies to inbound channels. - /// - /// Default value: `2^24 - 1` - pub max_funding_satoshis: u64, /// The remote node sets a limit on the minimum size of HTLCs we can send to them. This allows /// you to limit the maximum minimum-size they can require. /// @@ -374,7 +368,6 @@ impl Default for ChannelHandshakeLimits { fn default() -> Self { ChannelHandshakeLimits { min_funding_satoshis: 1000, - max_funding_satoshis: MAX_FUNDING_SATOSHIS_NO_WUMBO, max_htlc_minimum_msat: u64::MAX, min_max_htlc_value_in_flight_msat: 0, max_channel_reserve_satoshis: u64::MAX, @@ -395,7 +388,6 @@ impl Readable for ChannelHandshakeLimits { fn read(reader: &mut R) -> Result { Ok(Self { min_funding_satoshis: Readable::read(reader)?, - max_funding_satoshis: Readable::read(reader)?, max_htlc_minimum_msat: Readable::read(reader)?, min_max_htlc_value_in_flight_msat: Readable::read(reader)?, max_channel_reserve_satoshis: Readable::read(reader)?, From 3246010beba323077c90f7e703258f4d5bfc0b44 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Thu, 12 Feb 2026 15:15:51 -0500 Subject: [PATCH 235/242] Trivial: ChannelManager::read var rename prefactor Makes an upcoming commit cleaner: when we add a next_hop variable we want to distinguish it from the previous hop. --- lightning/src/ln/channelmanager.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 68eeb7c4e15..cc95424dbbd 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -18637,13 +18637,13 @@ impl< .or_insert_with(Vec::new) .push(update_add_htlc); } - for (payment_hash, hop_data, outbound_amt_msat) in + for (payment_hash, prev_hop, outbound_amt_msat) in funded_chan.inbound_forwarded_htlcs() { already_forwarded_htlcs - .entry((hop_data.channel_id, payment_hash)) + .entry((prev_hop.channel_id, payment_hash)) .or_insert_with(Vec::new) - .push((hop_data, outbound_amt_msat)); + .push((prev_hop, outbound_amt_msat)); } } } @@ -19352,14 +19352,14 @@ impl< if let Some(forwarded_htlcs) = already_forwarded_htlcs.remove(&(*channel_id, payment_hash)) { - for (hop_data, outbound_amt_msat) in forwarded_htlcs { + for (prev_hop, outbound_amt_msat) in forwarded_htlcs { let new_pending_claim = !pending_claims_to_replay.iter().any(|(src, _, _, _, _, _, _)| { - matches!(src, HTLCSource::PreviousHopData(hop) if hop.htlc_id == hop_data.htlc_id && hop.channel_id == hop_data.channel_id) + matches!(src, HTLCSource::PreviousHopData(hop) if hop.htlc_id == prev_hop.htlc_id && hop.channel_id == prev_hop.channel_id) }); if new_pending_claim { let counterparty_node_id = monitor.get_counterparty_node_id(); - let is_channel_closed = channel_manager + let is_downstream_closed = channel_manager .per_peer_state .read() .unwrap() @@ -19372,10 +19372,10 @@ impl< .contains_key(channel_id) }); pending_claims_to_replay.push(( - HTLCSource::PreviousHopData(hop_data), + HTLCSource::PreviousHopData(prev_hop), payment_preimage, outbound_amt_msat, - is_channel_closed, + is_downstream_closed, counterparty_node_id, monitor.get_funding_txo(), *channel_id, From 70ae54fb21b1440c6532aa95fb1d60ede9df96da Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Thu, 12 Feb 2026 15:20:31 -0500 Subject: [PATCH 236/242] Trivial: user_channel_id in pending_claims_to_replay Adds support for passing user_channel_id into the pending_claims_to_replay vec, which is used by the ChannelManager on startup. For now user_channel_id is always set to None, but in upcoming commits we will set it to Some when the downstream channel is still open (this is currently a bug). Separated out here for reviewability. --- lightning/src/ln/channelmanager.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index cc95424dbbd..ac2af352e34 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -18988,7 +18988,7 @@ impl< Some((htlc_source, payment_preimage, htlc.amount_msat, is_channel_closed, monitor.get_counterparty_node_id(), - monitor.get_funding_txo(), monitor.channel_id())) + monitor.get_funding_txo(), monitor.channel_id(), None)) } else { None } } else { // If it was an outbound payment, we've handled it above - if a preimage @@ -19354,7 +19354,7 @@ impl< { for (prev_hop, outbound_amt_msat) in forwarded_htlcs { let new_pending_claim = - !pending_claims_to_replay.iter().any(|(src, _, _, _, _, _, _)| { + !pending_claims_to_replay.iter().any(|(src, _, _, _, _, _, _, _)| { matches!(src, HTLCSource::PreviousHopData(hop) if hop.htlc_id == prev_hop.htlc_id && hop.channel_id == prev_hop.channel_id) }); if new_pending_claim { @@ -19379,6 +19379,7 @@ impl< counterparty_node_id, monitor.get_funding_txo(), *channel_id, + None, )); } } @@ -19648,6 +19649,7 @@ impl< downstream_node_id, downstream_funding, downstream_channel_id, + downstream_user_channel_id, ) in pending_claims_to_replay { // We use `downstream_closed` in place of `from_onchain` here just as a guess - we @@ -19663,7 +19665,7 @@ impl< downstream_node_id, downstream_funding, downstream_channel_id, - None, + downstream_user_channel_id, None, None, ); From b3b59e6dfa51b7e2f0fe62575c48693ffeb12332 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Tue, 10 Feb 2026 15:18:58 -0500 Subject: [PATCH 237/242] Persist outbound channel info in inbound HTLCs We need these fields to generate a correct PaymentForwarded event if we need to claim this inbound HTLC backwards after restart and it's already been claimed and removed on the outbound edge. --- lightning/src/ln/channel.rs | 53 +++++++++++++++++++++--------- lightning/src/ln/channelmanager.rs | 42 ++++++++++++++++++----- 2 files changed, 72 insertions(+), 23 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index b12061bf118..37a0661de76 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -308,6 +308,32 @@ impl InboundHTLCState { } } +/// Information about the outbound hop for a forwarded HTLC. Useful for generating an accurate +/// [`Event::PaymentForwarded`] if we need to claim this HTLC post-restart. +/// +/// [`Event::PaymentForwarded`]: crate::events::Event::PaymentForwarded +#[derive(Debug, Copy, Clone)] +pub(super) struct OutboundHop { + /// The amount forwarded outbound. + pub(super) amt_msat: u64, + /// The outbound channel this HTLC was forwarded over. + pub(super) channel_id: ChannelId, + /// The next-hop recipient of this HTLC. + pub(super) node_id: PublicKey, + /// The outbound channel's funding outpoint. + pub(super) funding_txo: OutPoint, + /// The outbound channel's user channel ID. + pub(super) user_channel_id: u128, +} + +impl_writeable_tlv_based!(OutboundHop, { + (0, amt_msat, required), + (2, channel_id, required), + (4, node_id, required), + (6, funding_txo, required), + (8, user_channel_id, required), +}); + /// A field of `InboundHTLCState::Committed` containing the HTLC's `update_add_htlc` message. If /// the HTLC is a forward and gets irrevocably committed to the outbound edge, we convert to /// `InboundUpdateAdd::Forwarded`, thus pruning the onion and not persisting it on every @@ -328,11 +354,7 @@ enum InboundUpdateAdd { phantom_shared_secret: Option<[u8; 32]>, trampoline_shared_secret: Option<[u8; 32]>, blinded_failure: Option, - /// Useful for generating an accurate [`Event::PaymentForwarded`], if we need to claim this - /// HTLC post-restart. - /// - /// [`Event::PaymentForwarded`]: crate::events::Event::PaymentForwarded - outbound_amt_msat: u64, + outbound_hop: OutboundHop, }, /// This HTLC was received pre-LDK 0.3, before we started persisting the onion for inbound /// committed HTLCs. @@ -346,7 +368,7 @@ impl_writeable_tlv_based_enum_upgradable!(InboundUpdateAdd, (2, Legacy) => {}, (4, Forwarded) => { (0, incoming_packet_shared_secret, required), - (2, outbound_amt_msat, required), + (2, outbound_hop, required), (4, phantom_shared_secret, option), (6, trampoline_shared_secret, option), (8, blinded_failure, option), @@ -7948,7 +7970,7 @@ where phantom_shared_secret, trampoline_shared_secret, blinded_failure, - outbound_amt_msat, + outbound_hop: OutboundHop { amt_msat, .. }, }, } => { if htlc_resolution_in_holding_cell(htlc.htlc_id) { @@ -7956,7 +7978,7 @@ where } // The reconstructed `HTLCPreviousHopData` is used to fail or claim the HTLC backwards // post-restart, if it is missing in the outbound edge. - let hop_data = HTLCPreviousHopData { + let prev_hop_data = HTLCPreviousHopData { prev_outbound_scid_alias, user_channel_id: Some(user_channel_id), htlc_id: htlc.htlc_id, @@ -7969,7 +7991,7 @@ where counterparty_node_id: Some(counterparty_node_id), cltv_expiry: Some(htlc.cltv_expiry), }; - Some((htlc.payment_hash, hop_data, *outbound_amt_msat)) + Some((htlc.payment_hash, prev_hop_data, *amt_msat)) }, _ => None, }) @@ -8019,17 +8041,18 @@ where /// This inbound HTLC was irrevocably forwarded to the outbound edge, so we no longer need to /// persist its onion. pub(super) fn prune_inbound_htlc_onion( - &mut self, htlc_id: u64, hop_data: &HTLCPreviousHopData, outbound_amt_msat: u64, + &mut self, htlc_id: u64, prev_hop_data: &HTLCPreviousHopData, + outbound_hop_data: OutboundHop, ) { for htlc in self.context.pending_inbound_htlcs.iter_mut() { if htlc.htlc_id == htlc_id { if let InboundHTLCState::Committed { ref mut update_add_htlc } = htlc.state { *update_add_htlc = InboundUpdateAdd::Forwarded { - incoming_packet_shared_secret: hop_data.incoming_packet_shared_secret, - phantom_shared_secret: hop_data.phantom_shared_secret, - trampoline_shared_secret: hop_data.trampoline_shared_secret, - blinded_failure: hop_data.blinded_failure, - outbound_amt_msat, + incoming_packet_shared_secret: prev_hop_data.incoming_packet_shared_secret, + phantom_shared_secret: prev_hop_data.phantom_shared_secret, + trampoline_shared_secret: prev_hop_data.trampoline_shared_secret, + blinded_failure: prev_hop_data.blinded_failure, + outbound_hop: outbound_hop_data, }; return; } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index ac2af352e34..b7b39698bb4 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -59,9 +59,9 @@ use crate::ln::chan_utils::selected_commitment_sat_per_1000_weight; use crate::ln::channel::QuiescentAction; use crate::ln::channel::{ self, hold_time_since, Channel, ChannelError, ChannelUpdateStatus, DisconnectResult, - FundedChannel, FundingTxSigned, InboundV1Channel, OutboundV1Channel, PendingV2Channel, - ReconnectionMsg, ShutdownResult, SpliceFundingFailed, StfuResponse, UpdateFulfillCommitFetch, - WithChannelContext, + FundedChannel, FundingTxSigned, InboundV1Channel, OutboundHop, OutboundV1Channel, + PendingV2Channel, ReconnectionMsg, ShutdownResult, SpliceFundingFailed, StfuResponse, + UpdateFulfillCommitFetch, WithChannelContext, }; use crate::ln::channel_state::ChannelDetails; use crate::ln::funding::SpliceContribution; @@ -1402,6 +1402,8 @@ enum PostMonitorUpdateChanResume { Unblocked { channel_id: ChannelId, counterparty_node_id: PublicKey, + funding_txo: OutPoint, + user_channel_id: u128, unbroadcasted_batch_funding_txid: Option, update_actions: Vec, htlc_forwards: Option, @@ -9582,8 +9584,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ /// Handles actions which need to complete after a [`ChannelMonitorUpdate`] has been applied /// which can happen after the per-peer state lock has been dropped. fn post_monitor_update_unlock( - &self, channel_id: ChannelId, counterparty_node_id: PublicKey, - unbroadcasted_batch_funding_txid: Option, + &self, channel_id: ChannelId, counterparty_node_id: PublicKey, funding_txo: OutPoint, + user_channel_id: u128, unbroadcasted_batch_funding_txid: Option, update_actions: Vec, htlc_forwards: Option, decode_update_add_htlcs: Option<(u64, Vec)>, @@ -9660,7 +9662,13 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }; self.fail_htlc_backwards_internal(&failure.0, &failure.1, &failure.2, receiver, None); } - self.prune_persisted_inbound_htlc_onions(committed_outbound_htlc_sources); + self.prune_persisted_inbound_htlc_onions( + channel_id, + counterparty_node_id, + funding_txo, + user_channel_id, + committed_outbound_htlc_sources, + ); } fn handle_monitor_update_completion_actions< @@ -10129,6 +10137,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ PostMonitorUpdateChanResume::Unblocked { channel_id: chan_id, counterparty_node_id, + funding_txo: chan.funding_outpoint(), + user_channel_id: chan.context.get_user_id(), unbroadcasted_batch_funding_txid, update_actions, htlc_forwards, @@ -10144,7 +10154,9 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ /// HTLC set on `ChannelManager` read. If an HTLC has been irrevocably forwarded to the outbound /// edge, we no longer need to persist the inbound edge's onion and can prune it here. fn prune_persisted_inbound_htlc_onions( - &self, committed_outbound_htlc_sources: Vec<(HTLCPreviousHopData, u64)>, + &self, outbound_channel_id: ChannelId, outbound_node_id: PublicKey, + outbound_funding_txo: OutPoint, outbound_user_channel_id: u128, + committed_outbound_htlc_sources: Vec<(HTLCPreviousHopData, u64)>, ) { let per_peer_state = self.per_peer_state.read().unwrap(); for (source, outbound_amt_msat) in committed_outbound_htlc_sources { @@ -10161,7 +10173,17 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some(chan) = peer_state.channel_by_id.get_mut(&source.channel_id).and_then(|c| c.as_funded_mut()) { - chan.prune_inbound_htlc_onion(source.htlc_id, &source, outbound_amt_msat); + chan.prune_inbound_htlc_onion( + source.htlc_id, + &source, + OutboundHop { + amt_msat: outbound_amt_msat, + channel_id: outbound_channel_id, + node_id: outbound_node_id, + funding_txo: outbound_funding_txo, + user_channel_id: outbound_user_channel_id, + }, + ); } } } @@ -10217,6 +10239,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ PostMonitorUpdateChanResume::Unblocked { channel_id, counterparty_node_id, + funding_txo, + user_channel_id, unbroadcasted_batch_funding_txid, update_actions, htlc_forwards, @@ -10228,6 +10252,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ self.post_monitor_update_unlock( channel_id, counterparty_node_id, + funding_txo, + user_channel_id, unbroadcasted_batch_funding_txid, update_actions, htlc_forwards, From 48010cbadf6723b679c90e840de0de36d8415265 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Tue, 10 Feb 2026 16:33:14 -0500 Subject: [PATCH 238/242] Fix PaymentForwarded fields on restart claim Previously, we were spuriously using the upstream channel's info when we should've been using the downstream channel's. --- lightning/src/ln/channel.rs | 6 +++--- lightning/src/ln/channelmanager.rs | 25 ++++++++++++------------- lightning/src/ln/reload_tests.rs | 8 +------- 3 files changed, 16 insertions(+), 23 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 37a0661de76..4a0d1175b8e 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -7942,7 +7942,7 @@ where /// when reconstructing the set of pending HTLCs when deserializing the `ChannelManager`. pub(super) fn inbound_forwarded_htlcs( &self, - ) -> impl Iterator + '_ { + ) -> impl Iterator + '_ { // We don't want to return an HTLC as needing processing if it already has a resolution that's // pending in the holding cell. let htlc_resolution_in_holding_cell = |id: u64| -> bool { @@ -7970,7 +7970,7 @@ where phantom_shared_secret, trampoline_shared_secret, blinded_failure, - outbound_hop: OutboundHop { amt_msat, .. }, + outbound_hop, }, } => { if htlc_resolution_in_holding_cell(htlc.htlc_id) { @@ -7991,7 +7991,7 @@ where counterparty_node_id: Some(counterparty_node_id), cltv_expiry: Some(htlc.cltv_expiry), }; - Some((htlc.payment_hash, prev_hop_data, *amt_msat)) + Some((htlc.payment_hash, prev_hop_data, *outbound_hop)) }, _ => None, }) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index b7b39698bb4..897f10cf2f4 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -18610,11 +18610,11 @@ impl< // that it is handled. let mut already_forwarded_htlcs: HashMap< (ChannelId, PaymentHash), - Vec<(HTLCPreviousHopData, u64)>, + Vec<(HTLCPreviousHopData, OutboundHop)>, > = new_hash_map(); let prune_forwarded_htlc = |already_forwarded_htlcs: &mut HashMap< (ChannelId, PaymentHash), - Vec<(HTLCPreviousHopData, u64)>, + Vec<(HTLCPreviousHopData, OutboundHop)>, >, prev_hop: &HTLCPreviousHopData, payment_hash: &PaymentHash| { @@ -18663,13 +18663,13 @@ impl< .or_insert_with(Vec::new) .push(update_add_htlc); } - for (payment_hash, prev_hop, outbound_amt_msat) in + for (payment_hash, prev_hop, next_hop) in funded_chan.inbound_forwarded_htlcs() { already_forwarded_htlcs .entry((prev_hop.channel_id, payment_hash)) .or_insert_with(Vec::new) - .push((prev_hop, outbound_amt_msat)); + .push((prev_hop, next_hop)); } } } @@ -19378,34 +19378,33 @@ impl< if let Some(forwarded_htlcs) = already_forwarded_htlcs.remove(&(*channel_id, payment_hash)) { - for (prev_hop, outbound_amt_msat) in forwarded_htlcs { + for (prev_hop, next_hop) in forwarded_htlcs { let new_pending_claim = !pending_claims_to_replay.iter().any(|(src, _, _, _, _, _, _, _)| { matches!(src, HTLCSource::PreviousHopData(hop) if hop.htlc_id == prev_hop.htlc_id && hop.channel_id == prev_hop.channel_id) }); if new_pending_claim { - let counterparty_node_id = monitor.get_counterparty_node_id(); let is_downstream_closed = channel_manager .per_peer_state .read() .unwrap() - .get(&counterparty_node_id) + .get(&next_hop.node_id) .map_or(true, |peer_state_mtx| { !peer_state_mtx .lock() .unwrap() .channel_by_id - .contains_key(channel_id) + .contains_key(&next_hop.channel_id) }); pending_claims_to_replay.push(( HTLCSource::PreviousHopData(prev_hop), payment_preimage, - outbound_amt_msat, + next_hop.amt_msat, is_downstream_closed, - counterparty_node_id, - monitor.get_funding_txo(), - *channel_id, - None, + next_hop.node_id, + next_hop.funding_txo, + next_hop.channel_id, + Some(next_hop.user_channel_id), )); } } diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index c7e7175602d..42986bc41b1 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -1958,14 +1958,8 @@ fn test_reload_node_with_preimage_in_monitor_claims_htlc() { ); // When the claim is reconstructed during reload, a PaymentForwarded event is generated. - // This event has next_user_channel_id as None since the outbound HTLC was already removed. // Fetching events triggers the pending monitor update (adding preimage) to be applied. - let events = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match &events[0] { - Event::PaymentForwarded { total_fee_earned_msat: Some(1000), .. } => {}, - _ => panic!("Expected PaymentForwarded event"), - } + expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); check_added_monitors(&nodes[1], 1); // Reconnect nodes[1] to nodes[0]. The claim should be in nodes[1]'s holding cell. From 0d6dcc910558c558e763c02c17d609dc0410d835 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Tue, 10 Feb 2026 16:51:46 -0500 Subject: [PATCH 239/242] Fix missing user_channel_id in PaymentForwarded Previously, if a forwarding node reloaded mid-HTLC-forward with a preimage in the outbound edge monitor and the outbound edge channel still open, and subsequently reclaimed the inbound HTLC backwards, the PaymentForwarded event would be missing the next_user_channel_id field. --- lightning/src/ln/chanmon_update_fail_tests.rs | 7 ++++++- lightning/src/ln/channelmanager.rs | 12 +++++++----- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 5a0c37bd61d..e5f6b7259ff 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -3938,7 +3938,12 @@ fn do_test_durable_preimages_on_closed_channel( let evs = nodes[1].node.get_and_clear_pending_events(); assert_eq!(evs.len(), if close_chans_before_reload { 2 } else { 1 }); for ev in evs { - if let Event::PaymentForwarded { .. } = ev { + if let Event::PaymentForwarded { claim_from_onchain_tx, next_user_channel_id, .. } = ev { + if !claim_from_onchain_tx { + // If the outbound channel is still open, the `next_user_channel_id` should be available. + // This was previously broken. + assert!(next_user_channel_id.is_some()) + } } else { panic!(); } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 897f10cf2f4..40342d72700 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -18707,14 +18707,16 @@ impl< } } for (channel_id, monitor) in args.channel_monitors.iter() { - let mut is_channel_closed = true; + let (mut is_channel_closed, mut user_channel_id_opt) = (true, None); let counterparty_node_id = monitor.get_counterparty_node_id(); if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) { let mut peer_state_lock = peer_state_mtx.lock().unwrap(); let peer_state = &mut *peer_state_lock; - is_channel_closed = !peer_state.channel_by_id.contains_key(channel_id); - if reconstruct_manager_from_monitors && !is_channel_closed { - if let Some(chan) = peer_state.channel_by_id.get(channel_id) { + if let Some(chan) = peer_state.channel_by_id.get(channel_id) { + is_channel_closed = false; + user_channel_id_opt = Some(chan.context().get_user_id()); + + if reconstruct_manager_from_monitors { if let Some(funded_chan) = chan.as_funded() { for (payment_hash, prev_hop) in funded_chan.outbound_htlc_forwards() { @@ -19014,7 +19016,7 @@ impl< Some((htlc_source, payment_preimage, htlc.amount_msat, is_channel_closed, monitor.get_counterparty_node_id(), - monitor.get_funding_txo(), monitor.channel_id(), None)) + monitor.get_funding_txo(), monitor.channel_id(), user_channel_id_opt)) } else { None } } else { // If it was an outbound payment, we've handled it above - if a preimage From 5daf51c00764572f1cecd38c6f9dd16f69f172db Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Wed, 11 Feb 2026 16:14:19 -0500 Subject: [PATCH 240/242] Test restart-claim of two MPP holding cell HTLCs Test that if we restart and had two inbound MPP-part HTLCs received over the same channel in the holding cell prior to shutdown, and we lost the holding cell prior to restart, those HTLCs will still be claimed backwards. Test largely written by Claude --- lightning/src/ln/chanmon_update_fail_tests.rs | 3 +- lightning/src/ln/functional_test_utils.rs | 22 ++- lightning/src/ln/reload_tests.rs | 145 ++++++++++++++++++ 3 files changed, 161 insertions(+), 9 deletions(-) diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index e5f6b7259ff..b421114e911 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -3519,8 +3519,9 @@ fn do_test_blocked_chan_preimage_release(completion_mode: BlockedUpdateComplMode .node .handle_commitment_signed_batch_test(node_a_id, &as_htlc_fulfill.commitment_signed); check_added_monitors(&nodes[1], 1); - let (a, raa) = do_main_commitment_signed_dance(&nodes[1], &nodes[0], false); + let (a, raa, holding_cell) = do_main_commitment_signed_dance(&nodes[1], &nodes[0], false); assert!(a.is_none()); + assert!(holding_cell.is_empty()); nodes[1].node.handle_revoke_and_ack(node_a_id, &raa); check_added_monitors(&nodes[1], 1); diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index d5a29785a94..d3902b26201 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -2672,20 +2672,23 @@ pub fn commitment_signed_dance_through_cp_raa( node_a: &Node<'_, '_, '_>, node_b: &Node<'_, '_, '_>, fail_backwards: bool, includes_claim: bool, ) -> Option { - let (extra_msg_option, bs_revoke_and_ack) = + let (extra_msg_option, bs_revoke_and_ack, node_b_holding_cell_htlcs) = do_main_commitment_signed_dance(node_a, node_b, fail_backwards); + assert!(node_b_holding_cell_htlcs.is_empty()); node_a.node.handle_revoke_and_ack(node_b.node.get_our_node_id(), &bs_revoke_and_ack); check_added_monitors(node_a, if includes_claim { 0 } else { 1 }); extra_msg_option } /// Does the main logic in the commitment_signed dance. After the first `commitment_signed` has -/// been delivered, this method picks up and delivers the response `revoke_and_ack` and -/// `commitment_signed`, returning the recipient's `revoke_and_ack` and any extra message it may -/// have included. +/// been delivered, delivers the response `revoke_and_ack` and `commitment_signed`, and returns: +/// - The recipient's `revoke_and_ack` +/// - The recipient's extra message (if any) after handling the commitment_signed +/// - Any messages released from the initiator's holding cell after handling the `revoke_and_ack` +/// (e.g., a second HTLC on the same channel) pub fn do_main_commitment_signed_dance( node_a: &Node<'_, '_, '_>, node_b: &Node<'_, '_, '_>, fail_backwards: bool, -) -> (Option, msgs::RevokeAndACK) { +) -> (Option, msgs::RevokeAndACK, Vec) { let node_a_id = node_a.node.get_our_node_id(); let node_b_id = node_b.node.get_our_node_id(); @@ -2693,7 +2696,9 @@ pub fn do_main_commitment_signed_dance( check_added_monitors(&node_b, 0); assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); node_b.node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); - assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); + // Handling the RAA may release HTLCs from node_b's holding cell (e.g., if multiple HTLCs + // were sent over the same channel and the second was queued behind the first). + let node_b_holding_cell_htlcs = node_b.node.get_and_clear_pending_msg_events(); check_added_monitors(&node_b, 1); node_b.node.handle_commitment_signed_batch_test(node_a_id, &as_commitment_signed); let (bs_revoke_and_ack, extra_msg_option) = { @@ -2716,7 +2721,7 @@ pub fn do_main_commitment_signed_dance( assert!(node_a.node.get_and_clear_pending_events().is_empty()); assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); } - (extra_msg_option, bs_revoke_and_ack) + (extra_msg_option, bs_revoke_and_ack, node_b_holding_cell_htlcs) } /// Runs the commitment_signed dance by delivering the commitment_signed and handling the @@ -2733,9 +2738,10 @@ pub fn commitment_signed_dance_return_raa( .node .handle_commitment_signed_batch_test(node_b.node.get_our_node_id(), commitment_signed); check_added_monitors(&node_a, 1); - let (extra_msg_option, bs_revoke_and_ack) = + let (extra_msg_option, bs_revoke_and_ack, node_b_holding_cell_htlcs) = do_main_commitment_signed_dance(&node_a, &node_b, fail_backwards); assert!(extra_msg_option.is_none()); + assert!(node_b_holding_cell_htlcs.is_empty()); bs_revoke_and_ack } diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index 42986bc41b1..d1e34cb7c71 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -2082,3 +2082,148 @@ fn test_reload_node_without_preimage_fails_htlc() { // nodes[0] should now have received the failure and generate PaymentFailed. expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new()); } + +#[test] +fn test_reload_with_mpp_claims_on_same_channel() { + // Test that if a forwarding node has two HTLCs for the same MPP payment that were both + // irrevocably removed on the outbound edge via claim but are still forwarded-and-unresolved + // on the inbound edge, both HTLCs will be claimed backwards on restart. + // + // Topology: + // nodes[0] ----chan_0_1----> nodes[1] ----chan_1_2_a----> nodes[2] + // \----chan_1_2_b---/ + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let persister; + let new_chain_monitor; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes_1_deserialized; + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_0_id = nodes[0].node.get_our_node_id(); + let node_1_id = nodes[1].node.get_our_node_id(); + let node_2_id = nodes[2].node.get_our_node_id(); + + let chan_0_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 2_000_000, 0); + let chan_1_2_a = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0); + let chan_1_2_b = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0); + + let chan_id_0_1 = chan_0_1.2; + let chan_id_1_2_a = chan_1_2_a.2; + let chan_id_1_2_b = chan_1_2_b.2; + + // Send an MPP payment large enough that the router must split it across both outbound channels. + // Each 1M sat outbound channel has 100M msat max in-flight, so 150M msat requires splitting. + let amt_msat = 150_000_000; + let (route, payment_hash, payment_preimage, payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat); + + let payment_id = PaymentId(nodes[0].keys_manager.backing.get_secure_random_bytes()); + nodes[0].node.send_payment_with_route( + route, payment_hash, RecipientOnionFields::secret_only(payment_secret), payment_id, + ).unwrap(); + check_added_monitors(&nodes[0], 1); + + // Forward the first HTLC nodes[0] -> nodes[1] -> nodes[2]. Note that the second HTLC is released + // from the holding cell during the first HTLC's commitment_signed_dance. + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let payment_event_1 = SendEvent::from_event(events.remove(0)); + + nodes[1].node.handle_update_add_htlc(node_0_id, &payment_event_1.msgs[0]); + check_added_monitors(&nodes[1], 0); + nodes[1].node.handle_commitment_signed_batch_test(node_0_id, &payment_event_1.commitment_msg); + check_added_monitors(&nodes[1], 1); + let (_, raa, holding_cell_htlcs) = + do_main_commitment_signed_dance(&nodes[1], &nodes[0], false); + assert_eq!(holding_cell_htlcs.len(), 1); + let payment_event_2 = holding_cell_htlcs.into_iter().next().unwrap(); + nodes[1].node.handle_revoke_and_ack(node_0_id, &raa); + check_added_monitors(&nodes[1], 1); + + nodes[1].node.process_pending_htlc_forwards(); + check_added_monitors(&nodes[1], 1); + let mut events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let ev_1_2 = events.remove(0); + pass_along_path( + &nodes[1], &[&nodes[2]], amt_msat, payment_hash, Some(payment_secret), ev_1_2, false, None, + ); + + // Second HTLC: full path nodes[0] -> nodes[1] -> nodes[2]. PaymentClaimable expected at end. + pass_along_path( + &nodes[0], &[&nodes[1], &nodes[2]], amt_msat, payment_hash, Some(payment_secret), + payment_event_2, true, None, + ); + + // Claim the HTLCs such that they're fully removed from the outbound edge, but disconnect + // node_0<>node_1 so that they can't be claimed backwards by node_1. + nodes[2].node.claim_funds(payment_preimage); + check_added_monitors(&nodes[2], 2); + expect_payment_claimed!(nodes[2], payment_hash, amt_msat); + + nodes[0].node.peer_disconnected(node_1_id); + nodes[1].node.peer_disconnected(node_0_id); + + let mut events = nodes[2].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 2); + for ev in events { + match ev { + MessageSendEvent::UpdateHTLCs { ref node_id, ref updates, .. } => { + assert_eq!(*node_id, node_1_id); + assert_eq!(updates.update_fulfill_htlcs.len(), 1); + nodes[1].node.handle_update_fulfill_htlc(node_2_id, updates.update_fulfill_htlcs[0].clone()); + check_added_monitors(&nodes[1], 1); + do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, false, false); + }, + _ => panic!("Unexpected event"), + } + } + + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 2); + for event in events { + expect_payment_forwarded( + event, &nodes[1], &nodes[0], &nodes[2], Some(1000), None, false, false, false, + ); + } + + // Clear the holding cell's claim entries on chan_0_1 before serialization. + // This simulates a crash where both HTLCs were fully removed on the outbound edges but are + // still present on the inbound edge without a resolution. + nodes[1].node.test_clear_channel_holding_cell(node_0_id, chan_id_0_1); + + let node_1_serialized = nodes[1].node.encode(); + let mon_0_1_serialized = get_monitor!(nodes[1], chan_id_0_1).encode(); + let mon_1_2_a_serialized = get_monitor!(nodes[1], chan_id_1_2_a).encode(); + let mon_1_2_b_serialized = get_monitor!(nodes[1], chan_id_1_2_b).encode(); + + reload_node!( + nodes[1], + node_1_serialized, + &[&mon_0_1_serialized, &mon_1_2_a_serialized, &mon_1_2_b_serialized], + persister, + new_chain_monitor, + nodes_1_deserialized, + Some(true) + ); + + // When the claims are reconstructed during reload, PaymentForwarded events are regenerated. + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 2); + for event in events { + expect_payment_forwarded( + event, &nodes[1], &nodes[0], &nodes[2], Some(1000), None, false, false, false, + ); + } + // Fetching events triggers the pending monitor updates (one for each HTLC preimage) to be applied. + check_added_monitors(&nodes[1], 2); + + // Reconnect nodes[1] to nodes[0]. Both claims should be in nodes[1]'s holding cell. + let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[0]); + reconnect_args.pending_cell_htlc_claims = (0, 2); + reconnect_nodes(reconnect_args); + + // nodes[0] should now have received both fulfills and generate PaymentSent. + expect_payment_sent(&nodes[0], payment_preimage, None, true, true); +} From ab0ba65923158791a9afa196e275c4a9d375a673 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Thu, 12 Feb 2026 15:30:16 -0500 Subject: [PATCH 241/242] Update RECONSTRUCT_HTLCS_FROM_CHANS_VERSION 5 -> 2 We previously had 5 due to wanting some flexibility to bump versions in between, but eventually concluded that wasn't necessary. --- lightning/src/ln/channelmanager.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 40342d72700..5a4f569d879 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -16616,7 +16616,7 @@ const MIN_SERIALIZATION_VERSION: u8 = 1; // // If 0.3 or 0.4 reads this manager version, it knows that the legacy maps were not written and // acts accordingly. -const RECONSTRUCT_HTLCS_FROM_CHANS_VERSION: u8 = 5; +const RECONSTRUCT_HTLCS_FROM_CHANS_VERSION: u8 = 2; impl_writeable_tlv_based!(PhantomRouteHints, { (2, channels, required_vec), From d79566b599f3b0d7e03eeff56b14d523f08cd75c Mon Sep 17 00:00:00 2001 From: Samarth Goyal Date: Fri, 13 Feb 2026 23:12:18 +0530 Subject: [PATCH 242/242] channelmanager: improve block connection logging for easier debugging. Fixes #2348 --- lightning/src/ln/channelmanager.rs | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 149d691404b..7d97f424181 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -14951,6 +14951,9 @@ impl< "Blocks must be connected in chain-order - the connected block height must be one greater than the previous height"); } + log_info!(self.logger, "Block {} at height {} connected with {} relevant transactions", + header.block_hash(), height, txdata.len()); + self.transactions_confirmed(header, txdata, height); self.best_block_updated(header, height); } @@ -15000,7 +15003,16 @@ impl< // See the docs for `ChannelManagerReadArgs` for more. let block_hash = header.block_hash(); - log_trace!(self.logger, "{} transactions included in block {} at height {} provided", txdata.len(), block_hash, height); + log_info!(self.logger, "{} transactions included in block {} at height {} provided", txdata.len(), block_hash, height); + + // Log individual txids when the list is small enough to be useful for debugging + // block sync issues. Cap at 10 to avoid flooding the logs. + if !txdata.is_empty() && txdata.len() <= 10 { + for (_, tx) in txdata.iter() { + log_debug!(self.logger, " Confirmed txid {} in block {} at height {}", + tx.compute_txid(), block_hash, height); + } + } let _persistence_guard = PersistenceNotifierGuard::optionally_notify_skipping_background_events( @@ -15032,7 +15044,7 @@ impl< // See the docs for `ChannelManagerReadArgs` for more. let block_hash = header.block_hash(); - log_trace!(self.logger, "New best block: {} at height {}", block_hash, height); + log_info!(self.logger, "New best block: {} at height {}", block_hash, height); let _persistence_guard = PersistenceNotifierGuard::optionally_notify_skipping_background_events(