diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 3c50b2a0041..c0593d43def 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -30,8 +30,14 @@ jobs: strategy: fail-fast: false matrix: - platform: [ self-hosted, windows-latest, macos-latest ] - toolchain: [ stable, beta, 1.75.0 ] # 1.75.0 is the MSRV for all crates + platform: >- + ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' + && fromJSON('["self-hosted","windows-latest","macos-latest"]') + || fromJSON('["self-hosted"]') }} + toolchain: >- + ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' + && fromJSON('["stable","beta","1.75.0"]') + || fromJSON('["1.75.0"]') }} exclude: - platform: windows-latest toolchain: 1.75.0 @@ -62,52 +68,35 @@ jobs: - name: Set RUSTFLAGS to deny warnings if: "matrix.toolchain == '1.75.0'" run: echo "RUSTFLAGS=-D warnings" >> "$GITHUB_ENV" - - name: Run CI script - shell: bash # Default on Winblows is powershell - run: CI_ENV=1 CI_MINIMIZE_DISK_USAGE=1 ./ci/ci-tests.sh - - build-tx-sync: - strategy: - fail-fast: false - matrix: - platform: [ ubuntu-latest, macos-latest ] - toolchain: [ stable, beta, 1.75.0 ] - runs-on: ${{ matrix.platform }} - steps: - - name: Checkout source code - uses: actions/checkout@v4 - - name: Install Rust ${{ matrix.toolchain }} toolchain - run: | - curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile=minimal --default-toolchain ${{ matrix.toolchain }} - - name: Set RUSTFLAGS to deny warnings - if: "matrix.toolchain == '1.75.0'" - run: echo "RUSTFLAGS=-D warnings" >> "$GITHUB_ENV" - name: Enable caching for bitcoind + if: matrix.platform != 'windows-latest' id: cache-bitcoind uses: actions/cache@v4 with: path: bin/bitcoind-${{ runner.os }}-${{ runner.arch }} key: bitcoind-${{ runner.os }}-${{ runner.arch }} - name: Enable caching for electrs + if: matrix.platform != 'windows-latest' id: cache-electrs uses: actions/cache@v4 with: path: bin/electrs-${{ runner.os }}-${{ runner.arch }} key: electrs-${{ runner.os }}-${{ runner.arch }} - name: Download bitcoind/electrs - if: "steps.cache-bitcoind.outputs.cache-hit != 'true' || steps.cache-electrs.outputs.cache-hit != 'true'" + if: "matrix.platform != 'windows-latest' && (steps.cache-bitcoind.outputs.cache-hit != 'true' || steps.cache-electrs.outputs.cache-hit != 'true')" run: | source ./contrib/download_bitcoind_electrs.sh mkdir bin mv "$BITCOIND_EXE" bin/bitcoind-${{ runner.os }}-${{ runner.arch }} mv "$ELECTRS_EXE" bin/electrs-${{ runner.os }}-${{ runner.arch }} - name: Set bitcoind/electrs environment variables + if: matrix.platform != 'windows-latest' run: | echo "BITCOIND_EXE=$( pwd )/bin/bitcoind-${{ runner.os }}-${{ runner.arch }}" >> "$GITHUB_ENV" echo "ELECTRS_EXE=$( pwd )/bin/electrs-${{ runner.os }}-${{ runner.arch }}" >> "$GITHUB_ENV" - name: Run CI script shell: bash # Default on Winblows is powershell - run: CI_ENV=1 CI_MINIMIZE_DISK_USAGE=1 ./ci/ci-tx-sync-tests.sh + run: CI_ENV=1 CI_MINIMIZE_DISK_USAGE=1 ./ci/ci-tests.sh coverage: needs: fuzz @@ -337,3 +326,19 @@ jobs: run: cargo fmt --check - name: Run rustfmt checks on lightning-tests run: cd lightning-tests && cargo fmt --check + tor-connect: + runs-on: ubuntu-latest + env: + TOOLCHAIN: 1.75.0 + steps: + - name: Checkout source code + uses: actions/checkout@v4 + - name: Install tor + run: | + sudo apt install -y tor + - name: Install Rust ${{ env.TOOLCHAIN }} toolchain + run: | + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile=minimal --default-toolchain ${{ env.TOOLCHAIN }} + - name: Test tor connections using lightning-net-tokio + run: | + TOR_PROXY="127.0.0.1:9050" RUSTFLAGS="--cfg=tor" cargo test --verbose --color always -p lightning-net-tokio diff --git a/.github/workflows/ldk-node-integration.yml b/.github/workflows/ldk-node-integration.yml index 136a60bd98a..446abd40a07 100644 --- a/.github/workflows/ldk-node-integration.yml +++ b/.github/workflows/ldk-node-integration.yml @@ -39,6 +39,19 @@ jobs: lightning-transaction-sync = { path = "../rust-lightning/lightning-transaction-sync" } lightning-liquidity = { path = "../rust-lightning/lightning-liquidity" } lightning-macros = { path = "../rust-lightning/lightning-macros" } + + [patch."https://github.com/lightningdevkit/rust-lightning"] + lightning = { path = "../rust-lightning/lightning" } + lightning-types = { path = "../rust-lightning/lightning-types" } + lightning-invoice = { path = "../rust-lightning/lightning-invoice" } + lightning-net-tokio = { path = "../rust-lightning/lightning-net-tokio" } + lightning-persister = { path = "../rust-lightning/lightning-persister" } + lightning-background-processor = { path = "../rust-lightning/lightning-background-processor" } + lightning-rapid-gossip-sync = { path = "../rust-lightning/lightning-rapid-gossip-sync" } + lightning-block-sync = { path = "../rust-lightning/lightning-block-sync" } + lightning-transaction-sync = { path = "../rust-lightning/lightning-transaction-sync" } + lightning-liquidity = { path = "../rust-lightning/lightning-liquidity" } + lightning-macros = { path = "../rust-lightning/lightning-macros" } EOF cargo check cargo check --features uniffi diff --git a/.github/workflows/semver.yml b/.github/workflows/semver.yml index 03017e19320..de10e562f98 100644 --- a/.github/workflows/semver.yml +++ b/.github/workflows/semver.yml @@ -17,40 +17,9 @@ jobs: run: | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile=minimal --default-toolchain stable rustup override set stable - - name: Check SemVer with default features - uses: obi1kenobi/cargo-semver-checks-action@v2 - with: - feature-group: default-features - - name: Check SemVer *without* default features - uses: obi1kenobi/cargo-semver-checks-action@v2 - with: - feature-group: only-explicit-features - - name: Check lightning-background-processor SemVer - uses: obi1kenobi/cargo-semver-checks-action@v2 - with: - package: lightning-background-processor - feature-group: only-explicit-features - - name: Check lightning-block-sync SemVer - uses: obi1kenobi/cargo-semver-checks-action@v2 - with: - package: lightning-block-sync - feature-group: only-explicit-features - features: rpc-client,rest-client - - name: Check lightning-transaction-sync electrum SemVer - uses: obi1kenobi/cargo-semver-checks-action@v2 - with: - manifest-path: lightning-transaction-sync/Cargo.toml - feature-group: only-explicit-features - features: electrum - - name: Check lightning-transaction-sync esplora-blocking SemVer - uses: obi1kenobi/cargo-semver-checks-action@v2 - with: - manifest-path: lightning-transaction-sync/Cargo.toml - feature-group: only-explicit-features - features: esplora-blocking - - name: Check lightning-transaction-sync esplora-async SemVer - uses: obi1kenobi/cargo-semver-checks-action@v2 - with: - manifest-path: lightning-transaction-sync/Cargo.toml - feature-group: only-explicit-features - features: esplora-async + - name: Install SemVer Checker + run: cargo install cargo-semver-checks --locked + - name: Check SemVer with all features + run: cargo semver-checks + - name: Check SemVer without any non-default features + run: cargo semver-checks --only-explicit-features diff --git a/.gitignore b/.gitignore index ed10eb14387..56e94616eeb 100644 --- a/.gitignore +++ b/.gitignore @@ -16,3 +16,4 @@ lightning-dns-resolver/target ext-functional-test-demo/target no-std-check/target msrv-no-dev-deps-check/target +lightning-tests/target diff --git a/CHANGELOG.md b/CHANGELOG.md index a51c5fda8bd..6e83ef2a14d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,29 @@ +# 0.2.1 - Jan 29, 2025 - "Electrum Confirmations Logged" + +## API Updates + * The `AttributionData` struct is now public, correcting an issue where it was + accidentally sealed preventing construction of some messages (#4268). + * The async background processor now exits even if work remains to be done as + soon as the sleeper returns the exit flag (#4259). + +## Bug Fixes + * The presence of unconfirmed transactions no longer causes + `ElectrumSyncClient` to spuriously fail to sync (#4341). + * `ChannelManager::splice_channel` now properly fails immediately if the + peer does not support splicing (#4262, #4274). + * A spurious debug assertion was removed which could fail in cases where an + HTLC fails to be forwarded after being accepted (#4312). + * Many log calls related to outbound payments were corrected to include a + `payment_hash` field (#4342). + + +# 0.1.9 - Jan 26, 2026 - "Electrum Confirmations" + +## Bug Fixes + * The presence of unconfirmed transactions no longer causes + `ElectrumSyncClient` to spuriously fail to sync (#4341). + + # 0.2 - Dec 2, 2025 - "Natively Asynchronous Splicing" ## API Updates diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 00000000000..f87bc665bd4 --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,21 @@ +# CLAUDE.md + +This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. + +See [CONTRIBUTING.md](CONTRIBUTING.md) for build commands, testing, code style, and development workflow. + +## Workspace Structure + +See [README.md](README.md) for the workspace layout and [ARCH.md](ARCH.md) for some additional remark regarding important parts of LDK's architecture. + +## Development Rules + +- Always ensure tests pass before committing. To this end, you should run + `cargo +1.75.0 test` for all affected crates and/or features. Upon completion + of the full task you might prompt the user whether they want you to run the + full CI tests via `./ci/ci-tests.sh`. Note however that this script will run + for a very long time, so please don't timeout when you do. +- Run `cargo +1.75.0 fmt --all` after every code change +- Never add new dependencies unless explicitly requested +- Please always disclose the use of any AI tools in commit messages and PR descriptions using a `Co-Authored-By:` line. +- When adding new `.rs` files, please ensure to always add the licensing header as found, e.g., in `lightning/src/lib.rs` and other files. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1bc431a3110..ad25fb10558 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -176,6 +176,28 @@ Fuzzing is heavily encouraged: you will find all related material under `fuzz/` Mutation testing is work-in-progress; any contribution there would be warmly welcomed. +### Environment Variables + +* `LDK_TEST_CONNECT_STYLE` - Override the random block connect style used in tests for deterministic runs. Valid values: + * `BEST_BLOCK_FIRST` + * `BEST_BLOCK_FIRST_SKIPPING_BLOCKS` + * `BEST_BLOCK_FIRST_REORGS_ONLY_TIP` + * `TRANSACTIONS_FIRST` + * `TRANSACTIONS_FIRST_SKIPPING_BLOCKS` + * `TRANSACTIONS_DUPLICATIVELY_FIRST_SKIPPING_BLOCKS` + * `HIGHLY_REDUNDANT_TRANSACTIONS_FIRST_SKIPPING_BLOCKS` + * `TRANSACTIONS_FIRST_REORGS_ONLY_TIP` + * `FULL_BLOCK_VIA_LISTEN` + * `FULL_BLOCK_DISCONNECTIONS_SKIPPING_VIA_LISTEN` + +* `LDK_TEST_DETERMINISTIC_HASHES` - When set to `1`, uses deterministic hash map iteration order in tests. This ensures consistent test output across runs, useful for comparing logs before and after changes. + +* `LDK_TEST_REBUILD_MGR_FROM_MONITORS` - If set to `1`, on test node reload the `ChannelManager`'s + HTLC set will be reconstructed from `Channel{Monitor}` persisted data. If `0`, test nodes will be + reloaded from persisted `ChannelManager` data using legacy code paths. This ensures consistent + test output across runs, useful for comparing logs before and after changes, since otherwise the + selection of which codepaths to be used on reload will be chosen randomly. + C/C++ Bindings -------------- diff --git a/Cargo.toml b/Cargo.toml index f9f7406339e..1eb7b572d8b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,11 +16,11 @@ members = [ "lightning-macros", "lightning-dns-resolver", "lightning-liquidity", + "lightning-transaction-sync", "possiblyrandom", ] exclude = [ - "lightning-transaction-sync", "lightning-tests", "ext-functional-test-demo", "no-std-check", @@ -67,4 +67,5 @@ check-cfg = [ "cfg(require_route_graph_test)", "cfg(simple_close)", "cfg(peer_storage)", + "cfg(tor)", ] diff --git a/ci/check-lint.sh b/ci/check-lint.sh index 39c10692310..c0724267bf8 100755 --- a/ci/check-lint.sh +++ b/ci/check-lint.sh @@ -13,6 +13,7 @@ CLIPPY() { -A clippy::unwrap-or-default \ -A clippy::upper_case_acronyms \ -A clippy::swap-with-temporary \ + -A clippy::assertions-on-constants \ `# Things where we do odd stuff on purpose ` \ -A clippy::unusual_byte_groupings \ -A clippy::unit_arg \ @@ -107,7 +108,8 @@ CLIPPY() { -A clippy::useless_conversion \ -A clippy::manual_repeat_n `# to be removed once we hit MSRV 1.86` \ -A clippy::manual_is_multiple_of `# to be removed once we hit MSRV 1.87` \ - -A clippy::uninlined-format-args + -A clippy::uninlined-format-args \ + -A clippy::manual-async-fn # Not really sure why this is even a warning when there's a Send bound } CLIPPY diff --git a/ci/ci-tests.sh b/ci/ci-tests.sh index 91ead9903cb..83b2af277f5 100755 --- a/ci/ci-tests.sh +++ b/ci/ci-tests.sh @@ -2,7 +2,6 @@ #shellcheck disable=SC2002,SC2207 set -eox pipefail -# Currently unused as we don't have to pin anything for MSRV: RUSTC_MINOR_VERSION=$(rustc --version | awk '{ split($2,a,"."); print a[2] }') # Some crates require pinning to meet our MSRV even for our downstream users, @@ -15,116 +14,133 @@ function PIN_RELEASE_DEPS { PIN_RELEASE_DEPS # pin the release dependencies in our main workspace # The backtrace v0.3.75 crate relies on rustc 1.82 -[ "$RUSTC_MINOR_VERSION" -lt 82 ] && cargo update -p backtrace --precise "0.3.74" --verbose +[ "$RUSTC_MINOR_VERSION" -lt 82 ] && cargo update -p backtrace --precise "0.3.74" --quiet -# proptest 1.9.0 requires rustc 1.82.0 -[ "$RUSTC_MINOR_VERSION" -lt 82 ] && cargo update -p proptest --precise "1.8.0" --verbose +# Starting with version 1.2.0, the `idna_adapter` crate has an MSRV of rustc 1.81.0. +[ "$RUSTC_MINOR_VERSION" -lt 81 ] && cargo update -p idna_adapter --precise "1.1.0" --quiet export RUST_BACKTRACE=1 echo -e "\n\nChecking the workspace, except lightning-transaction-sync." -cargo check --verbose --color always +cargo check --quiet --color always WORKSPACE_MEMBERS=( $(cat Cargo.toml | tr '\n' '\r' | sed 's/\r //g' | tr '\r' '\n' | grep '^members =' | sed 's/members.*=.*\[//' | tr -d '"' | tr ',' ' ') ) echo -e "\n\nTesting the workspace, except lightning-transaction-sync." -cargo test --verbose --color always +cargo test --quiet --color always echo -e "\n\nTesting upgrade from prior versions of LDK" pushd lightning-tests -cargo test +cargo test --quiet popd echo -e "\n\nChecking and building docs for all workspace members individually..." for DIR in "${WORKSPACE_MEMBERS[@]}"; do - cargo check -p "$DIR" --verbose --color always - cargo doc -p "$DIR" --document-private-items + cargo check -p "$DIR" --quiet --color always + cargo doc -p "$DIR" --quiet --document-private-items done echo -e "\n\nChecking and testing lightning with features" -cargo test -p lightning --verbose --color always --features dnssec -cargo check -p lightning --verbose --color always --features dnssec -cargo doc -p lightning --document-private-items --features dnssec +cargo test -p lightning --quiet --color always --features dnssec +cargo check -p lightning --quiet --color always --features dnssec +cargo doc -p lightning --quiet --document-private-items --features dnssec echo -e "\n\nChecking and testing Block Sync Clients with features" -cargo test -p lightning-block-sync --verbose --color always --features rest-client -cargo check -p lightning-block-sync --verbose --color always --features rest-client -cargo test -p lightning-block-sync --verbose --color always --features rpc-client -cargo check -p lightning-block-sync --verbose --color always --features rpc-client -cargo test -p lightning-block-sync --verbose --color always --features rpc-client,rest-client -cargo check -p lightning-block-sync --verbose --color always --features rpc-client,rest-client -cargo test -p lightning-block-sync --verbose --color always --features rpc-client,rest-client,tokio -cargo check -p lightning-block-sync --verbose --color always --features rpc-client,rest-client,tokio +cargo test -p lightning-block-sync --quiet --color always --features rest-client +cargo check -p lightning-block-sync --quiet --color always --features rest-client +cargo test -p lightning-block-sync --quiet --color always --features rpc-client +cargo check -p lightning-block-sync --quiet --color always --features rpc-client +cargo test -p lightning-block-sync --quiet --color always --features rpc-client,rest-client +cargo check -p lightning-block-sync --quiet --color always --features rpc-client,rest-client +cargo test -p lightning-block-sync --quiet --color always --features rpc-client,rest-client,tokio +cargo check -p lightning-block-sync --quiet --color always --features rpc-client,rest-client,tokio + +echo -e "\n\nChecking Transaction Sync Clients with features." +cargo check -p lightning-transaction-sync --quiet --color always --features esplora-blocking +cargo check -p lightning-transaction-sync --quiet --color always --features esplora-async +cargo check -p lightning-transaction-sync --quiet --color always --features esplora-async-https +cargo check -p lightning-transaction-sync --quiet --color always --features electrum + +if [ -z "$CI_ENV" ] && [[ -z "$BITCOIND_EXE" || -z "$ELECTRS_EXE" ]]; then + echo -e "\n\nSkipping testing Transaction Sync Clients due to BITCOIND_EXE or ELECTRS_EXE being unset." + cargo check -p lightning-transaction-sync --tests +else + echo -e "\n\nTesting Transaction Sync Clients with features." + cargo test -p lightning-transaction-sync --quiet --color always --features esplora-blocking + cargo test -p lightning-transaction-sync --quiet --color always --features esplora-async + cargo test -p lightning-transaction-sync --quiet --color always --features esplora-async-https + cargo test -p lightning-transaction-sync --quiet --color always --features electrum +fi echo -e "\n\nChecking and testing lightning-persister with features" -cargo test -p lightning-persister --verbose --color always --features tokio -cargo check -p lightning-persister --verbose --color always --features tokio -cargo doc -p lightning-persister --document-private-items --features tokio +cargo test -p lightning-persister --quiet --color always --features tokio +cargo check -p lightning-persister --quiet --color always --features tokio +cargo doc -p lightning-persister --quiet --document-private-items --features tokio echo -e "\n\nTest Custom Message Macros" -cargo test -p lightning-custom-message --verbose --color always +cargo test -p lightning-custom-message --quiet --color always [ "$CI_MINIMIZE_DISK_USAGE" != "" ] && cargo clean echo -e "\n\nTest backtrace-debug builds" -cargo test -p lightning --verbose --color always --features backtrace +cargo test -p lightning --quiet --color always --features backtrace echo -e "\n\nTesting no_std builds" for DIR in lightning-invoice lightning-rapid-gossip-sync lightning-liquidity; do - cargo test -p $DIR --verbose --color always --no-default-features + cargo test -p $DIR --quiet --color always --no-default-features done -cargo test -p lightning --verbose --color always --no-default-features -cargo test -p lightning-background-processor --verbose --color always --no-default-features +cargo test -p lightning --quiet --color always --no-default-features +cargo test -p lightning-background-processor --quiet --color always --no-default-features echo -e "\n\nTesting c_bindings builds" # Note that because `$RUSTFLAGS` is not passed through to doctest builds we cannot selectively # disable doctests in `c_bindings` so we skip doctests entirely here. -RUSTFLAGS="$RUSTFLAGS --cfg=c_bindings" cargo test --verbose --color always --lib --bins --tests +RUSTFLAGS="$RUSTFLAGS --cfg=c_bindings" cargo test --quiet --color always --lib --bins --tests for DIR in lightning-invoice lightning-rapid-gossip-sync; do # check if there is a conflict between no_std and the c_bindings cfg - RUSTFLAGS="$RUSTFLAGS --cfg=c_bindings" cargo test -p $DIR --verbose --color always --no-default-features + RUSTFLAGS="$RUSTFLAGS --cfg=c_bindings" cargo test -p $DIR --quiet --color always --no-default-features done # Note that because `$RUSTFLAGS` is not passed through to doctest builds we cannot selectively # disable doctests in `c_bindings` so we skip doctests entirely here. -RUSTFLAGS="$RUSTFLAGS --cfg=c_bindings" cargo test -p lightning-background-processor --verbose --color always --no-default-features --lib --bins --tests -RUSTFLAGS="$RUSTFLAGS --cfg=c_bindings" cargo test -p lightning --verbose --color always --no-default-features --lib --bins --tests +RUSTFLAGS="$RUSTFLAGS --cfg=c_bindings" cargo test -p lightning-background-processor --quiet --color always --no-default-features --lib --bins --tests +RUSTFLAGS="$RUSTFLAGS --cfg=c_bindings" cargo test -p lightning --quiet --color always --no-default-features --lib --bins --tests echo -e "\n\nTesting other crate-specific builds" # Note that outbound_commitment_test only runs in this mode because of hardcoded signature values -RUSTFLAGS="$RUSTFLAGS --cfg=ldk_test_vectors" cargo test -p lightning --verbose --color always --no-default-features --features=std +RUSTFLAGS="$RUSTFLAGS --cfg=ldk_test_vectors" cargo test -p lightning --quiet --color always --no-default-features --features=std # This one only works for lightning-invoice # check that compile with no_std and serde works in lightning-invoice -cargo test -p lightning-invoice --verbose --color always --no-default-features --features serde +cargo test -p lightning-invoice --quiet --color always --no-default-features --features serde echo -e "\n\nTesting no_std build on a downstream no-std crate" # check no-std compatibility across dependencies pushd no-std-check -cargo check --verbose --color always +cargo check --quiet --color always [ "$CI_MINIMIZE_DISK_USAGE" != "" ] && cargo clean popd # Test that we can build downstream code with only the "release pins". pushd msrv-no-dev-deps-check PIN_RELEASE_DEPS -cargo check +cargo check --quiet [ "$CI_MINIMIZE_DISK_USAGE" != "" ] && cargo clean popd if [ -f "$(which arm-none-eabi-gcc)" ]; then pushd no-std-check - cargo build --target=thumbv7m-none-eabi + cargo build --quiet --target=thumbv7m-none-eabi [ "$CI_MINIMIZE_DISK_USAGE" != "" ] && cargo clean popd fi echo -e "\n\nTest cfg-flag builds" -RUSTFLAGS="--cfg=taproot" cargo test --verbose --color always -p lightning +RUSTFLAGS="--cfg=taproot" cargo test --quiet --color always -p lightning [ "$CI_MINIMIZE_DISK_USAGE" != "" ] && cargo clean -RUSTFLAGS="--cfg=simple_close" cargo test --verbose --color always -p lightning +RUSTFLAGS="--cfg=simple_close" cargo test --quiet --color always -p lightning [ "$CI_MINIMIZE_DISK_USAGE" != "" ] && cargo clean -RUSTFLAGS="--cfg=lsps1_service" cargo test --verbose --color always -p lightning-liquidity +RUSTFLAGS="--cfg=lsps1_service" cargo test --quiet --color always -p lightning-liquidity [ "$CI_MINIMIZE_DISK_USAGE" != "" ] && cargo clean -RUSTFLAGS="--cfg=peer_storage" cargo test --verbose --color always -p lightning +RUSTFLAGS="--cfg=peer_storage" cargo test --quiet --color always -p lightning diff --git a/ci/ci-tx-sync-tests.sh b/ci/ci-tx-sync-tests.sh deleted file mode 100755 index 0839e2ced3d..00000000000 --- a/ci/ci-tx-sync-tests.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash -set -eox pipefail - -RUSTC_MINOR_VERSION=$(rustc --version | awk '{ split($2,a,"."); print a[2] }') - -pushd lightning-transaction-sync - -# Some crates require pinning to meet our MSRV even for our downstream users, -# which we do here. -# Further crates which appear only as dev-dependencies are pinned further down. -function PIN_RELEASE_DEPS { - return 0 # Don't fail the script if our rustc is higher than the last check -} - -PIN_RELEASE_DEPS # pin the release dependencies - -# Starting with version 1.2.0, the `idna_adapter` crate has an MSRV of rustc 1.81.0. -[ "$RUSTC_MINOR_VERSION" -lt 81 ] && cargo update -p idna_adapter --precise "1.1.0" --verbose - -export RUST_BACKTRACE=1 - -echo -e "\n\nChecking Transaction Sync Clients with features." -cargo check --verbose --color always --features esplora-blocking -cargo check --verbose --color always --features esplora-async -cargo check --verbose --color always --features esplora-async-https -cargo check --verbose --color always --features electrum - -if [ -z "$CI_ENV" ] && [[ -z "$BITCOIND_EXE" || -z "$ELECTRS_EXE" ]]; then - echo -e "\n\nSkipping testing Transaction Sync Clients due to BITCOIND_EXE or ELECTRS_EXE being unset." - cargo check --tests -else - echo -e "\n\nTesting Transaction Sync Clients with features." - cargo test --verbose --color always --features esplora-blocking - cargo test --verbose --color always --features esplora-async - cargo test --verbose --color always --features esplora-async-https - cargo test --verbose --color always --features electrum -fi - -popd diff --git a/fuzz/README.md b/fuzz/README.md index 4b6e0d12457..cfdab4940bc 100644 --- a/fuzz/README.md +++ b/fuzz/README.md @@ -68,6 +68,19 @@ cargo +nightly fuzz run --features "libfuzzer_fuzz" msg_ping_target Note: If you encounter a `SIGKILL` during run/build check for OOM in kernel logs and consider increasing RAM size for VM. +##### Fast builds for development + +The default build uses LTO and single codegen unit, which is slow. For faster iteration during +development, use the `-D` (dev) flag: + +```shell +cargo +nightly fuzz run --features "libfuzzer_fuzz" -D msg_ping_target +``` + +The `-D` flag builds in development mode with faster compilation (still has optimizations via +`opt-level = 1`). The first build will be slow as it rebuilds the standard library with +sanitizer instrumentation, but subsequent builds will be fast. + If you wish to just generate fuzzing binary executables for `libFuzzer` and not run them: ```shell cargo +nightly fuzz build --features "libfuzzer_fuzz" msg_ping_target diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index aca232471d6..87d58da4832 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -36,7 +36,9 @@ use bitcoin::WPubkeyHash; use lightning::blinded_path::message::{BlindedMessagePath, MessageContext, MessageForwardNode}; use lightning::blinded_path::payment::{BlindedPaymentPath, ReceiveTlvs}; use lightning::chain; -use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator}; +use lightning::chain::chaininterface::{ + TransactionType, BroadcasterInterface, ConfirmationTarget, FeeEstimator, +}; use lightning::chain::channelmonitor::{ChannelMonitor, MonitorEvent}; use lightning::chain::transaction::OutPoint; use lightning::chain::{ @@ -49,7 +51,6 @@ use lightning::ln::channel::{ use lightning::ln::channel_state::ChannelDetails; use lightning::ln::channelmanager::{ ChainParameters, ChannelManager, ChannelManagerReadArgs, PaymentId, RecentPaymentDetails, - RecipientOnionFields, }; use lightning::ln::functional_test_utils::*; use lightning::ln::funding::{FundingTxInput, SpliceContribution}; @@ -58,6 +59,7 @@ use lightning::ln::msgs::{ BaseMessageHandler, ChannelMessageHandler, CommitmentUpdate, Init, MessageSendEvent, UpdateAddHTLC, }; +use lightning::ln::outbound_payment::RecipientOnionFields; use lightning::ln::script::ShutdownScript; use lightning::ln::types::ChannelId; use lightning::offers::invoice::UnsignedBolt12Invoice; @@ -155,9 +157,15 @@ impl MessageRouter for FuzzRouter { } } -pub struct TestBroadcaster {} +pub struct TestBroadcaster { + txn_broadcasted: RefCell>, +} impl BroadcasterInterface for TestBroadcaster { - fn broadcast_transactions(&self, _txs: &[&Transaction]) {} + fn broadcast_transactions(&self, txs: &[(&Transaction, TransactionType)]) { + for (tx, _broadcast_type) in txs { + self.txn_broadcasted.borrow_mut().push((*tx).clone()); + } + } } pub struct VecWriter(pub Vec); @@ -299,8 +307,10 @@ impl chain::Watch for TestChainMonitor { persisted_monitor: ser.0, pending_monitors: Vec::new(), }, - Ok(chain::ChannelMonitorUpdateStatus::InProgress) => { - panic!("The test currently doesn't test initial-persistence via the async pipeline") + Ok(chain::ChannelMonitorUpdateStatus::InProgress) => LatestMonitorState { + persisted_monitor_id: monitor_id, + persisted_monitor: Vec::new(), + pending_monitors: vec![(monitor_id, ser.0)], }, Ok(chain::ChannelMonitorUpdateStatus::UnrecoverableError) => panic!(), Err(()) => panic!(), @@ -332,7 +342,7 @@ impl chain::Watch for TestChainMonitor { deserialized_monitor .update_monitor( update, - &&TestBroadcaster {}, + &&TestBroadcaster { txn_broadcasted: RefCell::new(Vec::new()) }, &&FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }, &self.logger, ) @@ -536,44 +546,20 @@ type ChanMan<'a> = ChannelManager< >; #[inline] -fn get_payment_secret_hash( - dest: &ChanMan, payment_id: &mut u8, -) -> Option<(PaymentSecret, PaymentHash)> { - let mut payment_hash; - for _ in 0..256 { - payment_hash = PaymentHash(Sha256::hash(&[*payment_id; 1]).to_byte_array()); - if let Ok(payment_secret) = - dest.create_inbound_payment_for_hash(payment_hash, None, 3600, None) - { - return Some((payment_secret, payment_hash)); - } - *payment_id = payment_id.wrapping_add(1); - } - None -} - -#[inline] -fn send_noret( - source: &ChanMan, dest: &ChanMan, dest_chan_id: u64, amt: u64, payment_id: &mut u8, - payment_idx: &mut u64, -) { - send_payment(source, dest, dest_chan_id, amt, payment_id, payment_idx); +fn get_payment_secret_hash(dest: &ChanMan, payment_ctr: &mut u64) -> (PaymentSecret, PaymentHash) { + *payment_ctr += 1; + let payment_hash = PaymentHash(Sha256::hash(&[*payment_ctr as u8]).to_byte_array()); + let payment_secret = dest + .create_inbound_payment_for_hash(payment_hash, None, 3600, None) + .expect("create_inbound_payment_for_hash failed"); + (payment_secret, payment_hash) } #[inline] fn send_payment( - source: &ChanMan, dest: &ChanMan, dest_chan_id: u64, amt: u64, payment_id: &mut u8, - payment_idx: &mut u64, + source: &ChanMan, dest: &ChanMan, dest_chan_id: u64, amt: u64, payment_secret: PaymentSecret, + payment_hash: PaymentHash, payment_id: PaymentId, ) -> bool { - let (payment_secret, payment_hash) = - if let Some((secret, hash)) = get_payment_secret_hash(dest, payment_id) { - (secret, hash) - } else { - return true; - }; - let mut payment_id = [0; 32]; - payment_id[0..8].copy_from_slice(&payment_idx.to_ne_bytes()); - *payment_idx += 1; let (min_value_sendable, max_value_sendable) = source .list_usable_channels() .iter() @@ -600,7 +586,6 @@ fn send_payment( route_params: Some(route_params.clone()), }; let onion = RecipientOnionFields::secret_only(payment_secret); - let payment_id = PaymentId(payment_id); let res = source.send_payment_with_route(route, payment_hash, onion, payment_id); match res { Err(err) => { @@ -615,41 +600,15 @@ fn send_payment( } } -#[inline] -fn send_hop_noret( - source: &ChanMan, middle: &ChanMan, middle_chan_id: u64, dest: &ChanMan, dest_chan_id: u64, - amt: u64, payment_id: &mut u8, payment_idx: &mut u64, -) { - send_hop_payment( - source, - middle, - middle_chan_id, - dest, - dest_chan_id, - amt, - payment_id, - payment_idx, - ); -} - #[inline] fn send_hop_payment( - source: &ChanMan, middle: &ChanMan, middle_chan_id: u64, dest: &ChanMan, dest_chan_id: u64, - amt: u64, payment_id: &mut u8, payment_idx: &mut u64, + source: &ChanMan, middle: &ChanMan, middle_scid: u64, dest: &ChanMan, dest_scid: u64, amt: u64, + payment_secret: PaymentSecret, payment_hash: PaymentHash, payment_id: PaymentId, ) -> bool { - let (payment_secret, payment_hash) = - if let Some((secret, hash)) = get_payment_secret_hash(dest, payment_id) { - (secret, hash) - } else { - return true; - }; - let mut payment_id = [0; 32]; - payment_id[0..8].copy_from_slice(&payment_idx.to_ne_bytes()); - *payment_idx += 1; let (min_value_sendable, max_value_sendable) = source .list_usable_channels() .iter() - .find(|chan| chan.short_channel_id == Some(middle_chan_id)) + .find(|chan| chan.short_channel_id == Some(middle_scid)) .map(|chan| (chan.next_outbound_htlc_minimum_msat, chan.next_outbound_htlc_limit_msat)) .unwrap_or((0, 0)); let first_hop_fee = 50_000; @@ -663,7 +622,7 @@ fn send_hop_payment( RouteHop { pubkey: middle.get_our_node_id(), node_features: middle.node_features(), - short_channel_id: middle_chan_id, + short_channel_id: middle_scid, channel_features: middle.channel_features(), fee_msat: first_hop_fee, cltv_expiry_delta: 100, @@ -672,7 +631,7 @@ fn send_hop_payment( RouteHop { pubkey: dest.get_our_node_id(), node_features: dest.node_features(), - short_channel_id: dest_chan_id, + short_channel_id: dest_scid, channel_features: dest.channel_features(), fee_msat: amt, cltv_expiry_delta: 200, @@ -684,7 +643,6 @@ fn send_hop_payment( route_params: Some(route_params.clone()), }; let onion = RecipientOnionFields::secret_only(payment_secret); - let payment_id = PaymentId(payment_id); let res = source.send_payment_with_route(route, payment_hash, onion, payment_id); match res { Err(err) => { @@ -700,12 +658,151 @@ fn send_hop_payment( } } +/// Send an MPP payment directly from source to dest using multiple channels. +#[inline] +fn send_mpp_payment( + source: &ChanMan, dest: &ChanMan, dest_scids: &[u64], amt: u64, payment_secret: PaymentSecret, + payment_hash: PaymentHash, payment_id: PaymentId, +) -> bool { + let num_paths = dest_scids.len(); + if num_paths == 0 { + return false; + } + + let amt_per_path = amt / num_paths as u64; + let mut paths = Vec::with_capacity(num_paths); + + for (i, &dest_scid) in dest_scids.iter().enumerate() { + let path_amt = if i == num_paths - 1 { + amt - amt_per_path * (num_paths as u64 - 1) + } else { + amt_per_path + }; + + paths.push(Path { + hops: vec![RouteHop { + pubkey: dest.get_our_node_id(), + node_features: dest.node_features(), + short_channel_id: dest_scid, + channel_features: dest.channel_features(), + fee_msat: path_amt, + cltv_expiry_delta: 200, + maybe_announced_channel: true, + }], + blinded_tail: None, + }); + } + + let route_params = RouteParameters::from_payment_params_and_value( + PaymentParameters::from_node_id(dest.get_our_node_id(), TEST_FINAL_CLTV), + amt, + ); + let route = Route { paths, route_params: Some(route_params) }; + let onion = RecipientOnionFields::secret_only(payment_secret); + let res = source.send_payment_with_route(route, payment_hash, onion, payment_id); + match res { + Err(_) => false, + Ok(()) => check_payment_send_events(source, payment_id), + } +} + +/// Send an MPP payment from source to dest via middle node. +/// Supports multiple channels on either or both hops. +#[inline] +fn send_mpp_hop_payment( + source: &ChanMan, middle: &ChanMan, middle_scids: &[u64], dest: &ChanMan, dest_scids: &[u64], + amt: u64, payment_secret: PaymentSecret, payment_hash: PaymentHash, payment_id: PaymentId, +) -> bool { + // Create paths by pairing middle_scids with dest_scids + let num_paths = middle_scids.len().max(dest_scids.len()); + if num_paths == 0 { + return false; + } + + let first_hop_fee = 50_000; + let amt_per_path = amt / num_paths as u64; + let fee_per_path = first_hop_fee / num_paths as u64; + let mut paths = Vec::with_capacity(num_paths); + + for i in 0..num_paths { + let middle_scid = middle_scids[i % middle_scids.len()]; + let dest_scid = dest_scids[i % dest_scids.len()]; + + let path_amt = if i == num_paths - 1 { + amt - amt_per_path * (num_paths as u64 - 1) + } else { + amt_per_path + }; + let path_fee = if i == num_paths - 1 { + first_hop_fee - fee_per_path * (num_paths as u64 - 1) + } else { + fee_per_path + }; + + paths.push(Path { + hops: vec![ + RouteHop { + pubkey: middle.get_our_node_id(), + node_features: middle.node_features(), + short_channel_id: middle_scid, + channel_features: middle.channel_features(), + fee_msat: path_fee, + cltv_expiry_delta: 100, + maybe_announced_channel: true, + }, + RouteHop { + pubkey: dest.get_our_node_id(), + node_features: dest.node_features(), + short_channel_id: dest_scid, + channel_features: dest.channel_features(), + fee_msat: path_amt, + cltv_expiry_delta: 200, + maybe_announced_channel: true, + }, + ], + blinded_tail: None, + }); + } + + let route_params = RouteParameters::from_payment_params_and_value( + PaymentParameters::from_node_id(dest.get_our_node_id(), TEST_FINAL_CLTV), + amt, + ); + let route = Route { paths, route_params: Some(route_params) }; + let onion = RecipientOnionFields::secret_only(payment_secret); + let res = source.send_payment_with_route(route, payment_hash, onion, payment_id); + match res { + Err(_) => false, + Ok(()) => check_payment_send_events(source, payment_id), + } +} + #[inline] pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { let out = SearchingOutput::new(underlying_out); - let broadcast = Arc::new(TestBroadcaster {}); + let broadcast = Arc::new(TestBroadcaster { txn_broadcasted: RefCell::new(Vec::new()) }); let router = FuzzRouter {}; + // Read initial monitor styles from fuzz input (1 byte: 2 bits per node) + let initial_mon_styles = if !data.is_empty() { data[0] } else { 0 }; + let mon_style = [ + RefCell::new(if initial_mon_styles & 0b01 != 0 { + ChannelMonitorUpdateStatus::InProgress + } else { + ChannelMonitorUpdateStatus::Completed + }), + RefCell::new(if initial_mon_styles & 0b10 != 0 { + ChannelMonitorUpdateStatus::InProgress + } else { + ChannelMonitorUpdateStatus::Completed + }), + RefCell::new(if initial_mon_styles & 0b100 != 0 { + ChannelMonitorUpdateStatus::InProgress + } else { + ChannelMonitorUpdateStatus::Completed + }), + ]; + macro_rules! make_node { ($node_id: expr, $fee_estimator: expr) => {{ let logger: Arc = @@ -725,7 +822,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { logger.clone(), $fee_estimator.clone(), Arc::new(TestPersister { - update_ret: Mutex::new(ChannelMonitorUpdateStatus::Completed), + update_ret: Mutex::new(mon_style[$node_id as usize].borrow().clone()), }), Arc::clone(&keys_manager), )); @@ -734,9 +831,8 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { config.channel_config.forwarding_fee_proportional_millionths = 0; config.channel_handshake_config.announce_for_forwarding = true; config.reject_inbound_splices = false; - if anchors { - config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; - config.manually_accept_inbound_channels = true; + if !anchors { + config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = false; } let network = Network::Bitcoin; let best_block_timestamp = genesis_block(network).header.time; @@ -762,9 +858,6 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { }}; } - let default_mon_style = RefCell::new(ChannelMonitorUpdateStatus::Completed); - let mon_style = [default_mon_style.clone(), default_mon_style.clone(), default_mon_style]; - let reload_node = |ser: &Vec, node_id: u8, old_monitors: &TestChainMonitor, @@ -788,9 +881,8 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { config.channel_config.forwarding_fee_proportional_millionths = 0; config.channel_handshake_config.announce_for_forwarding = true; config.reject_inbound_splices = false; - if anchors { - config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; - config.manually_accept_inbound_channels = true; + if !anchors { + config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = false; } let mut monitors = new_hash_map(); @@ -860,8 +952,21 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { }; let mut channel_txn = Vec::new(); - macro_rules! make_channel { - ($source: expr, $dest: expr, $dest_keys_manager: expr, $chan_id: expr) => {{ + macro_rules! complete_all_pending_monitor_updates { + ($monitor: expr) => {{ + for (channel_id, state) in $monitor.latest_monitors.lock().unwrap().iter_mut() { + for (id, data) in state.pending_monitors.drain(..) { + $monitor.chain_monitor.channel_monitor_updated(*channel_id, id).unwrap(); + if id >= state.persisted_monitor_id { + state.persisted_monitor_id = id; + state.persisted_monitor = data; + } + } + } + }}; + } + macro_rules! connect_peers { + ($source: expr, $dest: expr) => {{ let init_dest = Init { features: $dest.init_features(), networks: None, @@ -874,7 +979,10 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { remote_network_address: None, }; $dest.peer_connected($source.get_our_node_id(), &init_src, false).unwrap(); - + }}; + } + macro_rules! make_channel { + ($source: expr, $dest: expr, $source_monitor: expr, $dest_monitor: expr, $dest_keys_manager: expr, $chan_id: expr) => {{ $source.create_channel($dest.get_our_node_id(), 100_000, 42, 0, None, None).unwrap(); let open_channel = { let events = $source.get_and_clear_pending_msg_events(); @@ -888,30 +996,28 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { $dest.handle_open_channel($source.get_our_node_id(), &open_channel); let accept_channel = { - if anchors { - let events = $dest.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - if let events::Event::OpenChannelRequest { - ref temporary_channel_id, - ref counterparty_node_id, - .. - } = events[0] - { - let mut random_bytes = [0u8; 16]; - random_bytes - .copy_from_slice(&$dest_keys_manager.get_secure_random_bytes()[..16]); - let user_channel_id = u128::from_be_bytes(random_bytes); - $dest - .accept_inbound_channel( - temporary_channel_id, - counterparty_node_id, - user_channel_id, - None, - ) - .unwrap(); - } else { - panic!("Wrong event type"); - } + let events = $dest.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + if let events::Event::OpenChannelRequest { + ref temporary_channel_id, + ref counterparty_node_id, + .. + } = events[0] + { + let mut random_bytes = [0u8; 16]; + random_bytes + .copy_from_slice(&$dest_keys_manager.get_secure_random_bytes()[..16]); + let user_channel_id = u128::from_be_bytes(random_bytes); + $dest + .accept_inbound_channel( + temporary_channel_id, + counterparty_node_id, + user_channel_id, + None, + ) + .unwrap(); + } else { + panic!("Wrong event type"); } let events = $dest.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -965,12 +1071,14 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { } }; $dest.handle_funding_created($source.get_our_node_id(), &funding_created); + // Complete any pending monitor updates for dest after watch_channel + complete_all_pending_monitor_updates!($dest_monitor); - let funding_signed = { + let (funding_signed, channel_id) = { let events = $dest.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); if let MessageSendEvent::SendFundingSigned { ref msg, .. } = events[0] { - msg.clone() + (msg.clone(), msg.channel_id.clone()) } else { panic!("Wrong event type"); } @@ -984,19 +1092,22 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { } $source.handle_funding_signed($dest.get_our_node_id(), &funding_signed); + // Complete any pending monitor updates for source after watch_channel + complete_all_pending_monitor_updates!($source_monitor); + let events = $source.get_and_clear_pending_events(); assert_eq!(events.len(), 1); - let channel_id = if let events::Event::ChannelPending { + if let events::Event::ChannelPending { ref counterparty_node_id, - ref channel_id, + channel_id: ref event_channel_id, .. } = events[0] { assert_eq!(counterparty_node_id, &$dest.get_our_node_id()); - channel_id.clone() + assert_eq!(*event_channel_id, channel_id); } else { panic!("Wrong event type"); - }; + } channel_id }}; @@ -1087,8 +1198,30 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { let mut nodes = [node_a, node_b, node_c]; - let chan_1_id = make_channel!(nodes[0], nodes[1], keys_manager_b, 0); - let chan_2_id = make_channel!(nodes[1], nodes[2], keys_manager_c, 1); + // Connect peers first, then create channels + connect_peers!(nodes[0], nodes[1]); + connect_peers!(nodes[1], nodes[2]); + + // Create 3 channels between A-B and 3 channels between B-C (6 total). + // + // Use version numbers 1-6 to avoid txid collisions under fuzz hashing. + // Fuzz mode uses XOR-based hashing (all bytes XOR to one byte), and + // versions 0-5 cause collisions between A-B and B-C channel pairs + // (e.g., A-B with Version(1) collides with B-C with Version(3)). + let chan_ab_ids = [ + make_channel!(nodes[0], nodes[1], monitor_a, monitor_b, keys_manager_b, 1), + make_channel!(nodes[0], nodes[1], monitor_a, monitor_b, keys_manager_b, 2), + make_channel!(nodes[0], nodes[1], monitor_a, monitor_b, keys_manager_b, 3), + ]; + let chan_bc_ids = [ + make_channel!(nodes[1], nodes[2], monitor_b, monitor_c, keys_manager_c, 4), + make_channel!(nodes[1], nodes[2], monitor_b, monitor_c, keys_manager_c, 5), + make_channel!(nodes[1], nodes[2], monitor_b, monitor_c, keys_manager_c, 6), + ]; + + // Wipe the transactions-broadcasted set to make sure we don't broadcast any transactions + // during normal operation in `test_return`. + broadcast.txn_broadcasted.borrow_mut().clear(); for node in nodes.iter() { confirm_txn!(node); @@ -1096,16 +1229,34 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { lock_fundings!(nodes); - let chan_a = nodes[0].list_usable_channels()[0].short_channel_id.unwrap(); - let chan_a_id = nodes[0].list_usable_channels()[0].channel_id; - let chan_b = nodes[2].list_usable_channels()[0].short_channel_id.unwrap(); - let chan_b_id = nodes[2].list_usable_channels()[0].channel_id; - - let mut p_id: u8 = 0; - let mut p_idx: u64 = 0; - - let mut chan_a_disconnected = false; - let mut chan_b_disconnected = false; + // Get SCIDs for all A-B channels (from node A's perspective) + let node_a_chans: Vec<_> = nodes[0].list_usable_channels(); + let chan_ab_scids: [u64; 3] = [ + node_a_chans[0].short_channel_id.unwrap(), + node_a_chans[1].short_channel_id.unwrap(), + node_a_chans[2].short_channel_id.unwrap(), + ]; + let chan_ab_chan_ids: [ChannelId; 3] = + [node_a_chans[0].channel_id, node_a_chans[1].channel_id, node_a_chans[2].channel_id]; + // Get SCIDs for all B-C channels (from node C's perspective) + let node_c_chans: Vec<_> = nodes[2].list_usable_channels(); + let chan_bc_scids: [u64; 3] = [ + node_c_chans[0].short_channel_id.unwrap(), + node_c_chans[1].short_channel_id.unwrap(), + node_c_chans[2].short_channel_id.unwrap(), + ]; + let chan_bc_chan_ids: [ChannelId; 3] = + [node_c_chans[0].channel_id, node_c_chans[1].channel_id, node_c_chans[2].channel_id]; + // Keep old names for backward compatibility in existing code + let chan_a = chan_ab_scids[0]; + let chan_a_id = chan_ab_chan_ids[0]; + let chan_b = chan_bc_scids[0]; + let chan_b_id = chan_bc_chan_ids[0]; + + let mut p_ctr: u64 = 0; + + let mut peers_ab_disconnected = false; + let mut peers_bc_disconnected = false; let mut ab_events = Vec::new(); let mut ba_events = Vec::new(); let mut bc_events = Vec::new(); @@ -1115,16 +1266,24 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { let mut node_b_ser = nodes[1].encode(); let mut node_c_ser = nodes[2].encode(); + let pending_payments = RefCell::new([Vec::new(), Vec::new(), Vec::new()]); + let resolved_payments = RefCell::new([Vec::new(), Vec::new(), Vec::new()]); + macro_rules! test_return { () => {{ - assert_eq!(nodes[0].list_channels().len(), 1); - assert_eq!(nodes[1].list_channels().len(), 2); - assert_eq!(nodes[2].list_channels().len(), 1); + assert_eq!(nodes[0].list_channels().len(), 3); + assert_eq!(nodes[1].list_channels().len(), 6); + assert_eq!(nodes[2].list_channels().len(), 3); + + // At no point should we have broadcasted any transactions after the initial channel + // opens. + assert!(broadcast.txn_broadcasted.borrow().is_empty()); + return; }}; } - let mut read_pos = 0; + let mut read_pos = 1; // First byte was consumed for initial mon_style macro_rules! get_slice { ($len: expr) => {{ let slice_len = $len as usize; @@ -1509,6 +1668,8 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { let mut claim_set = new_hash_map(); let mut events = nodes[$node].get_and_clear_pending_events(); let had_events = !events.is_empty(); + let mut pending_payments = pending_payments.borrow_mut(); + let mut resolved_payments = resolved_payments.borrow_mut(); for event in events.drain(..) { match event { events::Event::PaymentClaimable { payment_hash, .. } => { @@ -1520,11 +1681,32 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { } } }, - events::Event::PaymentSent { .. } => {}, + events::Event::PaymentSent { payment_id, .. } => { + let sent_id = payment_id.unwrap(); + let idx_opt = + pending_payments[$node].iter().position(|id| *id == sent_id); + if let Some(idx) = idx_opt { + pending_payments[$node].remove(idx); + resolved_payments[$node].push(sent_id); + } else { + assert!(resolved_payments[$node].contains(&sent_id)); + } + }, + events::Event::PaymentFailed { payment_id, .. } => { + let idx_opt = + pending_payments[$node].iter().position(|id| *id == payment_id); + if let Some(idx) = idx_opt { + pending_payments[$node].remove(idx); + resolved_payments[$node].push(payment_id); + } else if !resolved_payments[$node].contains(&payment_id) { + // Payment failed immediately on send, so it was never added to + // pending_payments. Add it to resolved_payments to track it. + resolved_payments[$node].push(payment_id); + } + }, events::Event::PaymentClaimed { .. } => {}, events::Event::PaymentPathSuccessful { .. } => {}, events::Event::PaymentPathFailed { .. } => {}, - events::Event::PaymentFailed { .. } => {}, events::Event::ProbeSuccessful { .. } | events::Event::ProbeFailed { .. } => { // Even though we don't explicitly send probes, because probes are @@ -1613,6 +1795,99 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { } }; + let send = + |source_idx: usize, dest_idx: usize, dest_chan_id, amt, payment_ctr: &mut u64| { + let source = &nodes[source_idx]; + let dest = &nodes[dest_idx]; + let (secret, hash) = get_payment_secret_hash(dest, payment_ctr); + let mut id = PaymentId([0; 32]); + id.0[0..8].copy_from_slice(&payment_ctr.to_ne_bytes()); + let succeeded = send_payment(source, dest, dest_chan_id, amt, secret, hash, id); + if succeeded { + pending_payments.borrow_mut()[source_idx].push(id); + } + succeeded + }; + let send_noret = |source_idx, dest_idx, dest_chan_id, amt, payment_ctr: &mut u64| { + send(source_idx, dest_idx, dest_chan_id, amt, payment_ctr); + }; + + let send_hop_noret = |source_idx: usize, + middle_idx: usize, + middle_scid: u64, + dest_idx: usize, + dest_scid: u64, + amt: u64, + payment_ctr: &mut u64| { + let source = &nodes[source_idx]; + let middle = &nodes[middle_idx]; + let dest = &nodes[dest_idx]; + let (secret, hash) = get_payment_secret_hash(dest, payment_ctr); + let mut id = PaymentId([0; 32]); + id.0[0..8].copy_from_slice(&payment_ctr.to_ne_bytes()); + let succeeded = send_hop_payment( + source, + middle, + middle_scid, + dest, + dest_scid, + amt, + secret, + hash, + id, + ); + if succeeded { + pending_payments.borrow_mut()[source_idx].push(id); + } + }; + + // Direct MPP payment (no hop) + let send_mpp_direct = |source_idx: usize, + dest_idx: usize, + dest_scids: &[u64], + amt: u64, + payment_ctr: &mut u64| { + let source = &nodes[source_idx]; + let dest = &nodes[dest_idx]; + let (secret, hash) = get_payment_secret_hash(dest, payment_ctr); + let mut id = PaymentId([0; 32]); + id.0[0..8].copy_from_slice(&payment_ctr.to_ne_bytes()); + let succeeded = send_mpp_payment(source, dest, dest_scids, amt, secret, hash, id); + if succeeded { + pending_payments.borrow_mut()[source_idx].push(id); + } + }; + + // MPP payment via hop - splits payment across multiple channels on either or both hops + let send_mpp_hop = |source_idx: usize, + middle_idx: usize, + middle_scids: &[u64], + dest_idx: usize, + dest_scids: &[u64], + amt: u64, + payment_ctr: &mut u64| { + let source = &nodes[source_idx]; + let middle = &nodes[middle_idx]; + let dest = &nodes[dest_idx]; + let (secret, hash) = get_payment_secret_hash(dest, payment_ctr); + let mut id = PaymentId([0; 32]); + id.0[0..8].copy_from_slice(&payment_ctr.to_ne_bytes()); + let succeeded = send_mpp_hop_payment( + source, + middle, + middle_scids, + dest, + dest_scids, + amt, + secret, + hash, + id, + ); + if succeeded { + pending_payments.borrow_mut()[source_idx].push(id); + } + }; + let v = get_slice!(1)[0]; out.locked_write(format!("READ A BYTE! HANDLING INPUT {:x}...........\n", v).as_bytes()); match v { @@ -1638,29 +1913,45 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { *mon_style[2].borrow_mut() = ChannelMonitorUpdateStatus::Completed; }, - 0x08 => complete_all_monitor_updates(&monitor_a, &chan_1_id), - 0x09 => complete_all_monitor_updates(&monitor_b, &chan_1_id), - 0x0a => complete_all_monitor_updates(&monitor_b, &chan_2_id), - 0x0b => complete_all_monitor_updates(&monitor_c, &chan_2_id), + 0x08 => { + for id in &chan_ab_ids { + complete_all_monitor_updates(&monitor_a, id); + } + }, + 0x09 => { + for id in &chan_ab_ids { + complete_all_monitor_updates(&monitor_b, id); + } + }, + 0x0a => { + for id in &chan_bc_ids { + complete_all_monitor_updates(&monitor_b, id); + } + }, + 0x0b => { + for id in &chan_bc_ids { + complete_all_monitor_updates(&monitor_c, id); + } + }, 0x0c => { - if !chan_a_disconnected { + if !peers_ab_disconnected { nodes[0].peer_disconnected(nodes[1].get_our_node_id()); nodes[1].peer_disconnected(nodes[0].get_our_node_id()); - chan_a_disconnected = true; + peers_ab_disconnected = true; drain_msg_events_on_disconnect!(0); } }, 0x0d => { - if !chan_b_disconnected { + if !peers_bc_disconnected { nodes[1].peer_disconnected(nodes[2].get_our_node_id()); nodes[2].peer_disconnected(nodes[1].get_our_node_id()); - chan_b_disconnected = true; + peers_bc_disconnected = true; drain_msg_events_on_disconnect!(2); } }, 0x0e => { - if chan_a_disconnected { + if peers_ab_disconnected { let init_1 = Init { features: nodes[1].init_features(), networks: None, @@ -1673,11 +1964,11 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { remote_network_address: None, }; nodes[1].peer_connected(nodes[0].get_our_node_id(), &init_0, false).unwrap(); - chan_a_disconnected = false; + peers_ab_disconnected = false; } }, 0x0f => { - if chan_b_disconnected { + if peers_bc_disconnected { let init_2 = Init { features: nodes[2].init_features(), networks: None, @@ -1690,7 +1981,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { remote_network_address: None, }; nodes[2].peer_connected(nodes[1].get_our_node_id(), &init_1, false).unwrap(); - chan_b_disconnected = false; + peers_bc_disconnected = false; } }, @@ -1725,93 +2016,73 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { 0x27 => process_ev_noret!(2, false), // 1/10th the channel size: - 0x30 => send_noret(&nodes[0], &nodes[1], chan_a, 10_000_000, &mut p_id, &mut p_idx), - 0x31 => send_noret(&nodes[1], &nodes[0], chan_a, 10_000_000, &mut p_id, &mut p_idx), - 0x32 => send_noret(&nodes[1], &nodes[2], chan_b, 10_000_000, &mut p_id, &mut p_idx), - 0x33 => send_noret(&nodes[2], &nodes[1], chan_b, 10_000_000, &mut p_id, &mut p_idx), - 0x34 => send_hop_noret( - &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 10_000_000, &mut p_id, &mut p_idx, - ), - 0x35 => send_hop_noret( - &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 10_000_000, &mut p_id, &mut p_idx, - ), - - 0x38 => send_noret(&nodes[0], &nodes[1], chan_a, 1_000_000, &mut p_id, &mut p_idx), - 0x39 => send_noret(&nodes[1], &nodes[0], chan_a, 1_000_000, &mut p_id, &mut p_idx), - 0x3a => send_noret(&nodes[1], &nodes[2], chan_b, 1_000_000, &mut p_id, &mut p_idx), - 0x3b => send_noret(&nodes[2], &nodes[1], chan_b, 1_000_000, &mut p_id, &mut p_idx), - 0x3c => send_hop_noret( - &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 1_000_000, &mut p_id, &mut p_idx, - ), - 0x3d => send_hop_noret( - &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 1_000_000, &mut p_id, &mut p_idx, - ), - - 0x40 => send_noret(&nodes[0], &nodes[1], chan_a, 100_000, &mut p_id, &mut p_idx), - 0x41 => send_noret(&nodes[1], &nodes[0], chan_a, 100_000, &mut p_id, &mut p_idx), - 0x42 => send_noret(&nodes[1], &nodes[2], chan_b, 100_000, &mut p_id, &mut p_idx), - 0x43 => send_noret(&nodes[2], &nodes[1], chan_b, 100_000, &mut p_id, &mut p_idx), - 0x44 => send_hop_noret( - &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 100_000, &mut p_id, &mut p_idx, - ), - 0x45 => send_hop_noret( - &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 100_000, &mut p_id, &mut p_idx, - ), - - 0x48 => send_noret(&nodes[0], &nodes[1], chan_a, 10_000, &mut p_id, &mut p_idx), - 0x49 => send_noret(&nodes[1], &nodes[0], chan_a, 10_000, &mut p_id, &mut p_idx), - 0x4a => send_noret(&nodes[1], &nodes[2], chan_b, 10_000, &mut p_id, &mut p_idx), - 0x4b => send_noret(&nodes[2], &nodes[1], chan_b, 10_000, &mut p_id, &mut p_idx), - 0x4c => send_hop_noret( - &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 10_000, &mut p_id, &mut p_idx, - ), - 0x4d => send_hop_noret( - &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 10_000, &mut p_id, &mut p_idx, - ), - - 0x50 => send_noret(&nodes[0], &nodes[1], chan_a, 1_000, &mut p_id, &mut p_idx), - 0x51 => send_noret(&nodes[1], &nodes[0], chan_a, 1_000, &mut p_id, &mut p_idx), - 0x52 => send_noret(&nodes[1], &nodes[2], chan_b, 1_000, &mut p_id, &mut p_idx), - 0x53 => send_noret(&nodes[2], &nodes[1], chan_b, 1_000, &mut p_id, &mut p_idx), - 0x54 => send_hop_noret( - &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 1_000, &mut p_id, &mut p_idx, - ), - 0x55 => send_hop_noret( - &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 1_000, &mut p_id, &mut p_idx, - ), - - 0x58 => send_noret(&nodes[0], &nodes[1], chan_a, 100, &mut p_id, &mut p_idx), - 0x59 => send_noret(&nodes[1], &nodes[0], chan_a, 100, &mut p_id, &mut p_idx), - 0x5a => send_noret(&nodes[1], &nodes[2], chan_b, 100, &mut p_id, &mut p_idx), - 0x5b => send_noret(&nodes[2], &nodes[1], chan_b, 100, &mut p_id, &mut p_idx), - 0x5c => send_hop_noret( - &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 100, &mut p_id, &mut p_idx, - ), - 0x5d => send_hop_noret( - &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 100, &mut p_id, &mut p_idx, - ), - - 0x60 => send_noret(&nodes[0], &nodes[1], chan_a, 10, &mut p_id, &mut p_idx), - 0x61 => send_noret(&nodes[1], &nodes[0], chan_a, 10, &mut p_id, &mut p_idx), - 0x62 => send_noret(&nodes[1], &nodes[2], chan_b, 10, &mut p_id, &mut p_idx), - 0x63 => send_noret(&nodes[2], &nodes[1], chan_b, 10, &mut p_id, &mut p_idx), - 0x64 => send_hop_noret( - &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 10, &mut p_id, &mut p_idx, - ), - 0x65 => send_hop_noret( - &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 10, &mut p_id, &mut p_idx, - ), - - 0x68 => send_noret(&nodes[0], &nodes[1], chan_a, 1, &mut p_id, &mut p_idx), - 0x69 => send_noret(&nodes[1], &nodes[0], chan_a, 1, &mut p_id, &mut p_idx), - 0x6a => send_noret(&nodes[1], &nodes[2], chan_b, 1, &mut p_id, &mut p_idx), - 0x6b => send_noret(&nodes[2], &nodes[1], chan_b, 1, &mut p_id, &mut p_idx), - 0x6c => send_hop_noret( - &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 1, &mut p_id, &mut p_idx, - ), - 0x6d => send_hop_noret( - &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 1, &mut p_id, &mut p_idx, - ), + 0x30 => send_noret(0, 1, chan_a, 10_000_000, &mut p_ctr), + 0x31 => send_noret(1, 0, chan_a, 10_000_000, &mut p_ctr), + 0x32 => send_noret(1, 2, chan_b, 10_000_000, &mut p_ctr), + 0x33 => send_noret(2, 1, chan_b, 10_000_000, &mut p_ctr), + 0x34 => send_hop_noret(0, 1, chan_a, 2, chan_b, 10_000_000, &mut p_ctr), + 0x35 => send_hop_noret(2, 1, chan_b, 0, chan_a, 10_000_000, &mut p_ctr), + + 0x38 => send_noret(0, 1, chan_a, 1_000_000, &mut p_ctr), + 0x39 => send_noret(1, 0, chan_a, 1_000_000, &mut p_ctr), + 0x3a => send_noret(1, 2, chan_b, 1_000_000, &mut p_ctr), + 0x3b => send_noret(2, 1, chan_b, 1_000_000, &mut p_ctr), + 0x3c => send_hop_noret(0, 1, chan_a, 2, chan_b, 1_000_000, &mut p_ctr), + 0x3d => send_hop_noret(2, 1, chan_b, 0, chan_a, 1_000_000, &mut p_ctr), + + 0x40 => send_noret(0, 1, chan_a, 100_000, &mut p_ctr), + 0x41 => send_noret(1, 0, chan_a, 100_000, &mut p_ctr), + 0x42 => send_noret(1, 2, chan_b, 100_000, &mut p_ctr), + 0x43 => send_noret(2, 1, chan_b, 100_000, &mut p_ctr), + 0x44 => send_hop_noret(0, 1, chan_a, 2, chan_b, 100_000, &mut p_ctr), + 0x45 => send_hop_noret(2, 1, chan_b, 0, chan_a, 100_000, &mut p_ctr), + + 0x48 => send_noret(0, 1, chan_a, 10_000, &mut p_ctr), + 0x49 => send_noret(1, 0, chan_a, 10_000, &mut p_ctr), + 0x4a => send_noret(1, 2, chan_b, 10_000, &mut p_ctr), + 0x4b => send_noret(2, 1, chan_b, 10_000, &mut p_ctr), + 0x4c => send_hop_noret(0, 1, chan_a, 2, chan_b, 10_000, &mut p_ctr), + 0x4d => send_hop_noret(2, 1, chan_b, 0, chan_a, 10_000, &mut p_ctr), + + 0x50 => send_noret(0, 1, chan_a, 1_000, &mut p_ctr), + 0x51 => send_noret(1, 0, chan_a, 1_000, &mut p_ctr), + 0x52 => send_noret(1, 2, chan_b, 1_000, &mut p_ctr), + 0x53 => send_noret(2, 1, chan_b, 1_000, &mut p_ctr), + 0x54 => send_hop_noret(0, 1, chan_a, 2, chan_b, 1_000, &mut p_ctr), + 0x55 => send_hop_noret(2, 1, chan_b, 0, chan_a, 1_000, &mut p_ctr), + + 0x58 => send_noret(0, 1, chan_a, 100, &mut p_ctr), + 0x59 => send_noret(1, 0, chan_a, 100, &mut p_ctr), + 0x5a => send_noret(1, 2, chan_b, 100, &mut p_ctr), + 0x5b => send_noret(2, 1, chan_b, 100, &mut p_ctr), + 0x5c => send_hop_noret(0, 1, chan_a, 2, chan_b, 100, &mut p_ctr), + 0x5d => send_hop_noret(2, 1, chan_b, 0, chan_a, 100, &mut p_ctr), + + 0x60 => send_noret(0, 1, chan_a, 10, &mut p_ctr), + 0x61 => send_noret(1, 0, chan_a, 10, &mut p_ctr), + 0x62 => send_noret(1, 2, chan_b, 10, &mut p_ctr), + 0x63 => send_noret(2, 1, chan_b, 10, &mut p_ctr), + 0x64 => send_hop_noret(0, 1, chan_a, 2, chan_b, 10, &mut p_ctr), + 0x65 => send_hop_noret(2, 1, chan_b, 0, chan_a, 10, &mut p_ctr), + + 0x68 => send_noret(0, 1, chan_a, 1, &mut p_ctr), + 0x69 => send_noret(1, 0, chan_a, 1, &mut p_ctr), + 0x6a => send_noret(1, 2, chan_b, 1, &mut p_ctr), + 0x6b => send_noret(2, 1, chan_b, 1, &mut p_ctr), + 0x6c => send_hop_noret(0, 1, chan_a, 2, chan_b, 1, &mut p_ctr), + 0x6d => send_hop_noret(2, 1, chan_b, 0, chan_a, 1, &mut p_ctr), + + // MPP payments + // 0x70: direct MPP from 0 to 1 (multi A-B channels) + 0x70 => send_mpp_direct(0, 1, &chan_ab_scids, 1_000_000, &mut p_ctr), + // 0x71: MPP 0->1->2, multi channels on first hop (A-B) + 0x71 => send_mpp_hop(0, 1, &chan_ab_scids, 2, &[chan_b], 1_000_000, &mut p_ctr), + // 0x72: MPP 0->1->2, multi channels on both hops (A-B and B-C) + 0x72 => send_mpp_hop(0, 1, &chan_ab_scids, 2, &chan_bc_scids, 1_000_000, &mut p_ctr), + // 0x73: MPP 0->1->2, multi channels on second hop (B-C) + 0x73 => send_mpp_hop(0, 1, &[chan_a], 2, &chan_bc_scids, 1_000_000, &mut p_ctr), + // 0x74: direct MPP from 0 to 1, multi parts over single channel + 0x74 => send_mpp_direct(0, 1, &[chan_a, chan_a, chan_a], 1_000_000, &mut p_ctr), 0x80 => { let mut max_feerate = last_htlc_clear_fee_a; @@ -1860,11 +2131,8 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { 0xa0 => { let input = FundingTxInput::new_p2wpkh(coinbase_tx.clone(), 0).unwrap(); - let contribution = SpliceContribution::SpliceIn { - value: Amount::from_sat(10_000), - inputs: vec![input], - change_script: None, - }; + let contribution = + SpliceContribution::splice_in(Amount::from_sat(10_000), vec![input], None); let funding_feerate_sat_per_kw = fee_est_a.ret_val.load(atomic::Ordering::Acquire); if let Err(e) = nodes[0].splice_channel( &chan_a_id, @@ -1882,11 +2150,8 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { }, 0xa1 => { let input = FundingTxInput::new_p2wpkh(coinbase_tx.clone(), 1).unwrap(); - let contribution = SpliceContribution::SpliceIn { - value: Amount::from_sat(10_000), - inputs: vec![input], - change_script: None, - }; + let contribution = + SpliceContribution::splice_in(Amount::from_sat(10_000), vec![input], None); let funding_feerate_sat_per_kw = fee_est_b.ret_val.load(atomic::Ordering::Acquire); if let Err(e) = nodes[1].splice_channel( &chan_a_id, @@ -1904,11 +2169,8 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { }, 0xa2 => { let input = FundingTxInput::new_p2wpkh(coinbase_tx.clone(), 0).unwrap(); - let contribution = SpliceContribution::SpliceIn { - value: Amount::from_sat(10_000), - inputs: vec![input], - change_script: None, - }; + let contribution = + SpliceContribution::splice_in(Amount::from_sat(10_000), vec![input], None); let funding_feerate_sat_per_kw = fee_est_b.ret_val.load(atomic::Ordering::Acquire); if let Err(e) = nodes[1].splice_channel( &chan_b_id, @@ -1926,11 +2188,8 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { }, 0xa3 => { let input = FundingTxInput::new_p2wpkh(coinbase_tx.clone(), 1).unwrap(); - let contribution = SpliceContribution::SpliceIn { - value: Amount::from_sat(10_000), - inputs: vec![input], - change_script: None, - }; + let contribution = + SpliceContribution::splice_in(Amount::from_sat(10_000), vec![input], None); let funding_feerate_sat_per_kw = fee_est_c.ret_val.load(atomic::Ordering::Acquire); if let Err(e) = nodes[2].splice_channel( &chan_b_id, @@ -1958,12 +2217,10 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { .map(|chan| chan.outbound_capacity_msat) .unwrap(); if outbound_capacity_msat >= 20_000_000 { - let contribution = SpliceContribution::SpliceOut { - outputs: vec![TxOut { - value: Amount::from_sat(MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS), - script_pubkey: coinbase_tx.output[0].script_pubkey.clone(), - }], - }; + let contribution = SpliceContribution::splice_out(vec![TxOut { + value: Amount::from_sat(MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS), + script_pubkey: coinbase_tx.output[0].script_pubkey.clone(), + }]); let funding_feerate_sat_per_kw = fee_est_a.ret_val.load(atomic::Ordering::Acquire); if let Err(e) = nodes[0].splice_channel( @@ -1989,12 +2246,10 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { .map(|chan| chan.outbound_capacity_msat) .unwrap(); if outbound_capacity_msat >= 20_000_000 { - let contribution = SpliceContribution::SpliceOut { - outputs: vec![TxOut { - value: Amount::from_sat(MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS), - script_pubkey: coinbase_tx.output[1].script_pubkey.clone(), - }], - }; + let contribution = SpliceContribution::splice_out(vec![TxOut { + value: Amount::from_sat(MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS), + script_pubkey: coinbase_tx.output[1].script_pubkey.clone(), + }]); let funding_feerate_sat_per_kw = fee_est_b.ret_val.load(atomic::Ordering::Acquire); if let Err(e) = nodes[1].splice_channel( @@ -2020,12 +2275,10 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { .map(|chan| chan.outbound_capacity_msat) .unwrap(); if outbound_capacity_msat >= 20_000_000 { - let contribution = SpliceContribution::SpliceOut { - outputs: vec![TxOut { - value: Amount::from_sat(MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS), - script_pubkey: coinbase_tx.output[1].script_pubkey.clone(), - }], - }; + let contribution = SpliceContribution::splice_out(vec![TxOut { + value: Amount::from_sat(MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS), + script_pubkey: coinbase_tx.output[1].script_pubkey.clone(), + }]); let funding_feerate_sat_per_kw = fee_est_b.ret_val.load(atomic::Ordering::Acquire); if let Err(e) = nodes[1].splice_channel( @@ -2051,12 +2304,10 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { .map(|chan| chan.outbound_capacity_msat) .unwrap(); if outbound_capacity_msat >= 20_000_000 { - let contribution = SpliceContribution::SpliceOut { - outputs: vec![TxOut { - value: Amount::from_sat(MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS), - script_pubkey: coinbase_tx.output[2].script_pubkey.clone(), - }], - }; + let contribution = SpliceContribution::splice_out(vec![TxOut { + value: Amount::from_sat(MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS), + script_pubkey: coinbase_tx.output[2].script_pubkey.clone(), + }]); let funding_feerate_sat_per_kw = fee_est_c.ret_val.load(atomic::Ordering::Acquire); if let Err(e) = nodes[2].splice_channel( @@ -2078,9 +2329,9 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { 0xb0 | 0xb1 | 0xb2 => { // Restart node A, picking among the in-flight `ChannelMonitor`s to use based on // the value of `v` we're matching. - if !chan_a_disconnected { + if !peers_ab_disconnected { nodes[1].peer_disconnected(nodes[0].get_our_node_id()); - chan_a_disconnected = true; + peers_ab_disconnected = true; push_excess_b_events!( nodes[1].get_and_clear_pending_msg_events().drain(..), Some(0) @@ -2096,16 +2347,16 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { 0xb3..=0xbb => { // Restart node B, picking among the in-flight `ChannelMonitor`s to use based on // the value of `v` we're matching. - if !chan_a_disconnected { + if !peers_ab_disconnected { nodes[0].peer_disconnected(nodes[1].get_our_node_id()); - chan_a_disconnected = true; + peers_ab_disconnected = true; nodes[0].get_and_clear_pending_msg_events(); ab_events.clear(); ba_events.clear(); } - if !chan_b_disconnected { + if !peers_bc_disconnected { nodes[2].peer_disconnected(nodes[1].get_our_node_id()); - chan_b_disconnected = true; + peers_bc_disconnected = true; nodes[2].get_and_clear_pending_msg_events(); bc_events.clear(); cb_events.clear(); @@ -2118,9 +2369,9 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { 0xbc | 0xbd | 0xbe => { // Restart node C, picking among the in-flight `ChannelMonitor`s to use based on // the value of `v` we're matching. - if !chan_b_disconnected { + if !peers_bc_disconnected { nodes[1].peer_disconnected(nodes[2].get_our_node_id()); - chan_b_disconnected = true; + peers_bc_disconnected = true; push_excess_b_events!( nodes[1].get_and_clear_pending_msg_events().drain(..), Some(2) @@ -2134,28 +2385,76 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { monitor_c = new_monitor_c; }, - 0xf0 => complete_monitor_update(&monitor_a, &chan_1_id, &complete_first), - 0xf1 => complete_monitor_update(&monitor_a, &chan_1_id, &complete_second), - 0xf2 => complete_monitor_update(&monitor_a, &chan_1_id, &Vec::pop), + 0xf0 => { + for id in &chan_ab_ids { + complete_monitor_update(&monitor_a, id, &complete_first); + } + }, + 0xf1 => { + for id in &chan_ab_ids { + complete_monitor_update(&monitor_a, id, &complete_second); + } + }, + 0xf2 => { + for id in &chan_ab_ids { + complete_monitor_update(&monitor_a, id, &Vec::pop); + } + }, - 0xf4 => complete_monitor_update(&monitor_b, &chan_1_id, &complete_first), - 0xf5 => complete_monitor_update(&monitor_b, &chan_1_id, &complete_second), - 0xf6 => complete_monitor_update(&monitor_b, &chan_1_id, &Vec::pop), + 0xf4 => { + for id in &chan_ab_ids { + complete_monitor_update(&monitor_b, id, &complete_first); + } + }, + 0xf5 => { + for id in &chan_ab_ids { + complete_monitor_update(&monitor_b, id, &complete_second); + } + }, + 0xf6 => { + for id in &chan_ab_ids { + complete_monitor_update(&monitor_b, id, &Vec::pop); + } + }, - 0xf8 => complete_monitor_update(&monitor_b, &chan_2_id, &complete_first), - 0xf9 => complete_monitor_update(&monitor_b, &chan_2_id, &complete_second), - 0xfa => complete_monitor_update(&monitor_b, &chan_2_id, &Vec::pop), + 0xf8 => { + for id in &chan_bc_ids { + complete_monitor_update(&monitor_b, id, &complete_first); + } + }, + 0xf9 => { + for id in &chan_bc_ids { + complete_monitor_update(&monitor_b, id, &complete_second); + } + }, + 0xfa => { + for id in &chan_bc_ids { + complete_monitor_update(&monitor_b, id, &Vec::pop); + } + }, - 0xfc => complete_monitor_update(&monitor_c, &chan_2_id, &complete_first), - 0xfd => complete_monitor_update(&monitor_c, &chan_2_id, &complete_second), - 0xfe => complete_monitor_update(&monitor_c, &chan_2_id, &Vec::pop), + 0xfc => { + for id in &chan_bc_ids { + complete_monitor_update(&monitor_c, id, &complete_first); + } + }, + 0xfd => { + for id in &chan_bc_ids { + complete_monitor_update(&monitor_c, id, &complete_second); + } + }, + 0xfe => { + for id in &chan_bc_ids { + complete_monitor_update(&monitor_c, id, &Vec::pop); + } + }, 0xff => { // Test that no channel is in a stuck state where neither party can send funds even // after we resolve all pending events. // First, make sure peers are all connected to each other - if chan_a_disconnected { + if peers_ab_disconnected { let init_1 = Init { features: nodes[1].init_features(), networks: None, @@ -2168,9 +2467,9 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { remote_network_address: None, }; nodes[1].peer_connected(nodes[0].get_our_node_id(), &init_0, false).unwrap(); - chan_a_disconnected = false; + peers_ab_disconnected = false; } - if chan_b_disconnected { + if peers_bc_disconnected { let init_2 = Init { features: nodes[2].init_features(), networks: None, @@ -2183,7 +2482,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { remote_network_address: None, }; nodes[2].peer_connected(nodes[1].get_our_node_id(), &init_1, false).unwrap(); - chan_b_disconnected = false; + peers_bc_disconnected = false; } macro_rules! process_all_events { @@ -2194,10 +2493,14 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { panic!("It may take may iterations to settle the state, but it should not take forever"); } // Next, make sure no monitor updates are pending - complete_all_monitor_updates(&monitor_a, &chan_1_id); - complete_all_monitor_updates(&monitor_b, &chan_1_id); - complete_all_monitor_updates(&monitor_b, &chan_2_id); - complete_all_monitor_updates(&monitor_c, &chan_2_id); + for id in &chan_ab_ids { + complete_all_monitor_updates(&monitor_a, id); + complete_all_monitor_updates(&monitor_b, id); + } + for id in &chan_bc_ids { + complete_all_monitor_updates(&monitor_b, id); + complete_all_monitor_updates(&monitor_c, id); + } // Then, make sure any current forwards make their way to their destination if process_msg_events!(0, false, ProcessMessages::AllMessages) { last_pass_no_updates = false; @@ -2241,19 +2544,29 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { process_all_events!(); + // Verify no payments are stuck - all should have resolved + for (idx, pending) in pending_payments.borrow().iter().enumerate() { + assert!( + pending.is_empty(), + "Node {} has {} stuck pending payments after settling all state", + idx, + pending.len() + ); + } + // Finally, make sure that at least one end of each channel can make a substantial payment - assert!( - send_payment(&nodes[0], &nodes[1], chan_a, 10_000_000, &mut p_id, &mut p_idx) - || send_payment( - &nodes[1], &nodes[0], chan_a, 10_000_000, &mut p_id, &mut p_idx - ) - ); - assert!( - send_payment(&nodes[1], &nodes[2], chan_b, 10_000_000, &mut p_id, &mut p_idx) - || send_payment( - &nodes[2], &nodes[1], chan_b, 10_000_000, &mut p_id, &mut p_idx - ) - ); + for &scid in &chan_ab_scids { + assert!( + send(0, 1, scid, 10_000_000, &mut p_ctr) + || send(1, 0, scid, 10_000_000, &mut p_ctr) + ); + } + for &scid in &chan_bc_scids { + assert!( + send(1, 2, scid, 10_000_000, &mut p_ctr) + || send(2, 1, scid, 10_000_000, &mut p_ctr) + ); + } last_htlc_clear_fee_a = fee_est_a.ret_val.load(atomic::Ordering::Acquire); last_htlc_clear_fee_b = fee_est_b.ret_val.load(atomic::Ordering::Acquire); diff --git a/fuzz/src/full_stack.rs b/fuzz/src/full_stack.rs index 722db37bbd6..f7f912cfd48 100644 --- a/fuzz/src/full_stack.rs +++ b/fuzz/src/full_stack.rs @@ -21,7 +21,7 @@ use bitcoin::network::Network; use bitcoin::opcodes; use bitcoin::script::{Builder, ScriptBuf}; use bitcoin::transaction::Version; -use bitcoin::transaction::{Transaction, TxOut}; +use bitcoin::transaction::{Transaction, TxIn, TxOut}; use bitcoin::hash_types::{BlockHash, Txid}; use bitcoin::hashes::sha256::Hash as Sha256; @@ -30,20 +30,24 @@ use bitcoin::hashes::Hash as _; use bitcoin::hex::FromHex; use bitcoin::WPubkeyHash; +use lightning::ln::funding::{FundingTxInput, SpliceContribution}; + use lightning::blinded_path::message::{BlindedMessagePath, MessageContext, MessageForwardNode}; use lightning::blinded_path::payment::{BlindedPaymentPath, ReceiveTlvs}; use lightning::chain; -use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator}; +use lightning::chain::chaininterface::{ + TransactionType, BroadcasterInterface, ConfirmationTarget, FeeEstimator, +}; use lightning::chain::chainmonitor; use lightning::chain::transaction::OutPoint; use lightning::chain::{BestBlock, ChannelMonitorUpdateStatus, Confirm, Listen}; +use lightning::events::bump_transaction::sync::WalletSourceSync; use lightning::events::Event; use lightning::ln::channel_state::ChannelDetails; -use lightning::ln::channelmanager::{ - ChainParameters, ChannelManager, InterceptId, PaymentId, RecipientOnionFields, Retry, -}; +use lightning::ln::channelmanager::{ChainParameters, ChannelManager, InterceptId, PaymentId}; use lightning::ln::functional_test_utils::*; use lightning::ln::inbound_payment::ExpandedKey; +use lightning::ln::outbound_payment::{RecipientOnionFields, Retry}; use lightning::ln::peer_handler::{ IgnoringMessageHandler, MessageHandler, PeerManager, SocketDescriptor, }; @@ -62,11 +66,11 @@ use lightning::sign::{ }; use lightning::types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; use lightning::util::config::{ChannelConfig, UserConfig}; -use lightning::util::errors::APIError; use lightning::util::hash_tables::*; use lightning::util::logger::Logger; use lightning::util::ser::{Readable, Writeable}; use lightning::util::test_channel_signer::{EnforcementState, TestChannelSigner}; +use lightning::util::test_utils::TestWalletSource; use lightning_invoice::RawBolt11Invoice; @@ -184,8 +188,8 @@ struct TestBroadcaster { txn_broadcasted: Mutex>, } impl BroadcasterInterface for TestBroadcaster { - fn broadcast_transactions(&self, txs: &[&Transaction]) { - let owned_txs: Vec = txs.iter().map(|tx| (*tx).clone()).collect(); + fn broadcast_transactions(&self, txs: &[(&Transaction, TransactionType)]) { + let owned_txs: Vec = txs.iter().map(|(tx, _)| (*tx).clone()).collect(); self.txn_broadcasted.lock().unwrap().extend(owned_txs); } } @@ -648,6 +652,26 @@ pub fn do_test(mut data: &[u8], logger: &Arc) { let mut pending_funding_generation: Vec<(ChannelId, PublicKey, u64, ScriptBuf)> = Vec::new(); let mut pending_funding_signatures = new_hash_map(); + // Set up a wallet with a coinbase transaction for splice funding + let wallet_secret = SecretKey::from_slice(&[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 2, + ]) + .unwrap(); + let wallet = TestWalletSource::new(wallet_secret); + let coinbase_tx = Transaction { + version: Version::TWO, + lock_time: LockTime::ZERO, + input: vec![TxIn { ..Default::default() }], + output: vec![TxOut { + value: Amount::from_sat(1_000_000), + script_pubkey: wallet.get_change_script().unwrap(), + }], + }; + let coinbase_txid = coinbase_tx.compute_txid(); + wallet + .add_utxo(bitcoin::OutPoint { txid: coinbase_txid, vout: 0 }, Amount::from_sat(1_000_000)); + loop { match get_slice!(1)[0] { 0 => { @@ -985,6 +1009,71 @@ pub fn do_test(mut data: &[u8], logger: &Arc) { rng_output.copy_from_slice(&get_slice!(32)); *keys_manager.rng_output.borrow_mut() = rng_output; }, + // Splice-in: add funds to a channel + 50 => { + let mut channels = channelmanager.list_channels(); + let channel_id_idx = get_slice!(1)[0] as usize; + if channel_id_idx >= channels.len() { + return; + } + channels.sort_by(|a, b| a.channel_id.cmp(&b.channel_id)); + let chan = &channels[channel_id_idx]; + // Only splice funded channels + if chan.funding_txo.is_none() { + continue; + } + let splice_in_sats = slice_to_be24(get_slice!(3)) as u64; + if splice_in_sats == 0 { + continue; + } + // Create a funding input from the coinbase transaction + if let Ok(input) = FundingTxInput::new_p2wpkh(coinbase_tx.clone(), 0) { + let contribution = SpliceContribution::splice_in( + Amount::from_sat(splice_in_sats.min(900_000)), // Cap at available funds minus fees + vec![input], + Some(wallet.get_change_script().unwrap()), + ); + let _ = channelmanager.splice_channel( + &chan.channel_id, + &chan.counterparty.node_id, + contribution, + 253, // funding_feerate_per_kw + None, + ); + } + }, + // Splice-out: remove funds from a channel + 51 => { + let mut channels = channelmanager.list_channels(); + let channel_id_idx = get_slice!(1)[0] as usize; + if channel_id_idx >= channels.len() { + return; + } + channels.sort_by(|a, b| a.channel_id.cmp(&b.channel_id)); + let chan = &channels[channel_id_idx]; + // Only splice funded channels with sufficient capacity + if chan.funding_txo.is_none() || chan.channel_value_satoshis < 20_000 { + continue; + } + let splice_out_sats = slice_to_be24(get_slice!(3)) as u64; + if splice_out_sats == 0 { + continue; + } + // Cap splice-out at a reasonable portion of channel capacity + let max_splice_out = chan.channel_value_satoshis / 4; + let splice_out_sats = splice_out_sats.min(max_splice_out).max(546); // At least dust limit + let contribution = SpliceContribution::splice_out(vec![TxOut { + value: Amount::from_sat(splice_out_sats), + script_pubkey: wallet.get_change_script().unwrap(), + }]); + let _ = channelmanager.splice_channel( + &chan.channel_id, + &chan.counterparty.node_id, + contribution, + 253, // funding_feerate_per_kw + None, + ); + }, _ => return, } loss_detector.handler.process_events(); @@ -1013,6 +1102,37 @@ pub fn do_test(mut data: &[u8], logger: &Arc) { intercepted_htlcs.push(intercept_id); } }, + Event::FundingTransactionReadyForSigning { + channel_id, + counterparty_node_id, + unsigned_transaction, + .. + } => { + // Sign the funding transaction and provide it back to the channel manager + let signed_tx = wallet.sign_tx(unsigned_transaction).unwrap(); + let _ = channelmanager.funding_transaction_signed( + &channel_id, + &counterparty_node_id, + signed_tx, + ); + }, + Event::SplicePending { .. } => { + // Splice negotiation completed, waiting for confirmation + }, + Event::SpliceFailed { .. } => { + // Splice failed, inputs can be re-spent + }, + Event::OpenChannelRequest { + temporary_channel_id, counterparty_node_id, .. + } => { + let _ = loss_detector.manager.accept_inbound_channel( + &temporary_channel_id, + &counterparty_node_id, + 0, + None, + ); + loss_detector.handler.process_events(); + }, _ => {}, } } @@ -1050,7 +1170,7 @@ fn two_peer_forwarding_seed() -> Vec { // our network key ext_from_hex("0100000000000000000000000000000000000000000000000000000000000000", &mut test); // config - ext_from_hex("000000000090000000000000000064000100000000000100ffff0000000000000000ffffffffffffffffffffffffffffffff0000000000000000ffffffffffffffff000000ffffffff00ffff1a000400010000020400000000040200000a08ffffffffffffffff000100000000000000", &mut test); + ext_from_hex("000000000090000000000000000064000100000000000100ffff00000000000000ffffffffffffffffff0000000000000000ffffffffffffffff000000ffffffff00ffff1a000400010000020400000000040200000a08ffffffffffffffff0001000000000000", &mut test); // new outbound connection with id 0 ext_from_hex("00", &mut test); @@ -1504,7 +1624,7 @@ fn gossip_exchange_seed() -> Vec { // our network key ext_from_hex("0100000000000000000000000000000000000000000000000000000000000000", &mut test); // config - ext_from_hex("000000000090000000000000000064000100000000000100ffff0000000000000000ffffffffffffffffffffffffffffffff0000000000000000ffffffffffffffff000000ffffffff00ffff1a000400010000020400000000040200000a08ffffffffffffffff000100000000000000", &mut test); + ext_from_hex("000000000090000000000000000064000100000000000100ffff00000000000000ffffffffffffffffff0000000000000000ffffffffffffffff000000ffffffff00ffff1a000400010000020400000000040200000a08ffffffffffffffff0001000000000000", &mut test); // new outbound connection with id 0 ext_from_hex("00", &mut test); @@ -1578,6 +1698,221 @@ fn gossip_exchange_seed() -> Vec { test } +fn splice_seed() -> Vec { + // This seed sets up a channel between two peers and attempts a splice-in operation that becomes + // locked. + let mut test = Vec::new(); + + // our network key + ext_from_hex("0100000000000000000000000000000000000000000000000000000000000000", &mut test); + // config + ext_from_hex("000000000090000000000000000064000100000000000100ffff00000000000000ffffffffffffffffff0000000000000000ffffffffffffffff000000ffffffff00ffff1a000400010000020400000000040200000a08ffffffffffffffff0001000000000000", &mut test); + + // new outbound connection with id 0 + ext_from_hex("00", &mut test); + // peer's pubkey + ext_from_hex("030000000000000000000000000000000000000000000000000000000000000002", &mut test); + // inbound read from peer id 0 of len 50 + ext_from_hex("030032", &mut test); + // noise act two (0||pubkey||mac) + ext_from_hex("00 030000000000000000000000000000000000000000000000000000000000000002 03000000000000000000000000000000", &mut test); + + // inbound read from peer id 0 of len 18 + ext_from_hex("030012", &mut test); + // message header indicating message length 28 (init with extended features for splicing) + // init message = type(2) + global_len(2) + global(2) + features_len(2) + features(20) = 28 = 0x1c + ext_from_hex("001c 03000000000000000000000000000000", &mut test); + // inbound read from peer id 0 of len 44 (28 message + 16 MAC) + ext_from_hex("03002c", &mut test); + // init message (type 16) with splicing (bit 155) and quiescence (bit 35) enabled + // Features: 20 bytes with bit 155 (splicing) and bit 35 (quiescence) set + // Wire format (big-endian): 0x08 at byte 0 for bit 155, zeros for bytes 1-11, original 8 bytes at 12-19 + // 20 bytes = 08 + 11 zeros + 8 original bytes = 080000000000000000000000 + aaa210aa2a0a9aaa + ext_from_hex("0010 00021aaa 0014 080000000000000000000000aaa210aa2a0a9aaa 03000000000000000000000000000000", &mut test); + + // inbound read from peer id 0 of len 18 + ext_from_hex("030012", &mut test); + // message header indicating message length 327 + ext_from_hex("0147 03000000000000000000000000000000", &mut test); + // inbound read from peer id 0 of len 254 + ext_from_hex("0300fe", &mut test); + // beginning of open_channel message + ext_from_hex("0020 6fe28c0ab6f1b372c1a6a246ae63f74f931e8365e15a089c68d6190000000000 ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb181909679 000000000000c350 0000000000000000 0000000000000162 ffffffffffffffff 0000000000000222 0000000000000000 000000fd 0006 01e3 030000000000000000000000000000000000000000000000000000000000000001 030000000000000000000000000000000000000000000000000000000000000002 030000000000000000000000000000000000000000000000000000000000000003 030000000000000000000000000000000000000000000000000000000000000004", &mut test); + // inbound read from peer id 0 of len 89 + ext_from_hex("030059", &mut test); + // rest of open_channel and mac + ext_from_hex("030000000000000000000000000000000000000000000000000000000000000005 020900000000000000000000000000000000000000000000000000000000000000 01 0000 01021000 03000000000000000000000000000000", &mut test); + + // client should now respond with accept_channel + + // inbound read from peer id 0 of len 18 + ext_from_hex("030012", &mut test); + // message header indicating message length 132 + ext_from_hex("0084 03000000000000000000000000000000", &mut test); + // inbound read from peer id 0 of len 148 + ext_from_hex("030094", &mut test); + // funding_created and mac + ext_from_hex("0022 ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb181909679 c000000000000000000000000000000000000000000000000000000000000000 0000 00000000000000000000000000000000000000000000000000000000000000dc0100000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000", &mut test); + // client should now respond with funding_signed + + // connect a block with one transaction of len 94 + ext_from_hex("0c005e", &mut test); + // the funding transaction + ext_from_hex("020000000100000000000000000000000000000000000000000000000000000000000000000000000000ffffffff0150c3000000000000220020530000000000000000000000000000000000000000000000000000000000000000000000", &mut test); + // connect blocks to confirm the funding transaction (need minimum_depth confirmations) + for _ in 0..12 { + ext_from_hex("0c0000", &mut test); + } + // by now client should have sent a channel_ready + + // inbound read from peer id 0 of len 18 + ext_from_hex("030012", &mut test); + // message header indicating message length 67 + ext_from_hex("0043 03000000000000000000000000000000", &mut test); + // inbound read from peer id 0 of len 83 + ext_from_hex("030053", &mut test); + // channel_ready and mac + ext_from_hex("0024 c000000000000000000000000000000000000000000000000000000000000000 020800000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000", &mut test); + + // Channel is now established and ready for splicing! + + // Initiate splice-in on channel 0 (opcode 50) + // Format: 50 + // Channel index 0, splice amount 0x010000 (65536 sats) + ext_from_hex("32 00 010000", &mut test); + + // After splice_channel is called, we should receive a SendStfu event. + // The peer needs to respond with stfu to acknowledge quiescence. + // inbound read from peer id 0 of len 18 + ext_from_hex("030012", &mut test); + // message header indicating message length 35 (stfu message: type(2) + channel_id(32) + initiator(1) = 35 = 0x23) + ext_from_hex("0023 03000000000000000000000000000000", &mut test); + // inbound read from peer id 0 of len 51 (35 message + 16 MAC) + ext_from_hex("030033", &mut test); + // stfu message (type 2): channel_id (32 bytes) + initiator (1 byte) + mac + // channel_id = c000...0000, initiator = 0 (peer is not initiator, responding to our stfu) + ext_from_hex("0002 c000000000000000000000000000000000000000000000000000000000000000 00 03000000000000000000000000000000", &mut test); + + // After receiving peer's stfu, we send SpliceInit. Peer responds with SpliceAck. + // Message type IDs: SpliceAck = 81 (0x0051) + // inbound read from peer id 0 of len 18 + ext_from_hex("030012", &mut test); + // message header indicating message length 75 (SpliceAck: type(2) + channel_id(32) + funding_contribution(8) + funding_pubkey(33) = 75 = 0x4b) + ext_from_hex("004b 03000000000000000000000000000000", &mut test); + // inbound read from peer id 0 of len 91 (75 message + 16 MAC) + ext_from_hex("03005b", &mut test); + // SpliceAck message (type 81 = 0x0051): channel_id + funding_contribution + funding_pubkey + mac + // channel_id = c000...0000, funding_contribution = 0 (i64), funding_pubkey = valid 33-byte compressed pubkey + ext_from_hex("0051 c000000000000000000000000000000000000000000000000000000000000000 0000000000000000 030000000000000000000000000000000000000000000000000000000000000001 03000000000000000000000000000000", &mut test); + + // Now we're in interactive tx negotiation. We send TxAddInput for our new funding input. + // Peer responds with TxComplete (they have no inputs/outputs to add). + // Message type IDs: TxComplete = 70 (0x0046) + // inbound read from peer id 0 of len 18 + ext_from_hex("030012", &mut test); + // message header indicating message length 34 (TxComplete: type(2) + channel_id(32) = 34 = 0x22) + ext_from_hex("0022 03000000000000000000000000000000", &mut test); + // inbound read from peer id 0 of len 50 (34 message + 16 MAC) + ext_from_hex("030032", &mut test); + // TxComplete message (type 70 = 0x0046): channel_id + mac + ext_from_hex("0046 c000000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000", &mut test); + + // After peer's first TxComplete, we send another TxAddInput (for the shared input - existing funding). + // We also send TxAddOutput for the new funding output. + // Peer needs to respond with another TxComplete. + // inbound read from peer id 0 of len 18 + ext_from_hex("030012", &mut test); + // message header indicating message length 34 (TxComplete) + ext_from_hex("0022 03000000000000000000000000000000", &mut test); + // inbound read from peer id 0 of len 50 (34 message + 16 MAC) + ext_from_hex("030032", &mut test); + // TxComplete message + ext_from_hex("0046 c000000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000", &mut test); + + // We continue sending our inputs/outputs, peer continues with TxComplete. + // inbound read from peer id 0 of len 18 + ext_from_hex("030012", &mut test); + // message header indicating message length 34 (TxComplete) + ext_from_hex("0022 03000000000000000000000000000000", &mut test); + // inbound read from peer id 0 of len 50 (34 message + 16 MAC) + ext_from_hex("030032", &mut test); + // TxComplete message + ext_from_hex("0046 c000000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000", &mut test); + + // More TxComplete responses as we add our outputs + // inbound read from peer id 0 of len 18 + ext_from_hex("030012", &mut test); + // message header indicating message length 34 (TxComplete) + ext_from_hex("0022 03000000000000000000000000000000", &mut test); + // inbound read from peer id 0 of len 50 (34 message + 16 MAC) + ext_from_hex("030032", &mut test); + // TxComplete message + ext_from_hex("0046 c000000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000", &mut test); + + // After we send our TxComplete, the interactive tx negotiation completes. + // Both sides now need to exchange commitment_signed messages. + // Message type IDs: CommitmentSigned = 132 (0x0084) + // For splice, we need to include the funding_txid TLV. + // Message format: type(2) + channel_id(32) + signature(64) + num_htlcs(2) + TLV(type=0, len=32, txid=32) = 134 bytes + // The signature must encode the sighash first byte (f7) in r, following the fuzz pattern. + // inbound read from peer id 0 of len 18 + ext_from_hex("030012", &mut test); + // message header indicating message length 134 (0x86) + ext_from_hex("0086 03000000000000000000000000000000", &mut test); + // inbound read from peer id 0 of len 150 (134 message + 16 MAC) + ext_from_hex("030096", &mut test); + // CommitmentSigned message with proper signature (r=f7, s=01...) and funding_txid TLV + // signature r encodes sighash first byte f7, s follows the pattern from funding_created + // TLV type 1 (odd/optional) for funding_txid as per impl_writeable_msg!(CommitmentSigned, ...) + // Note: txid is encoded in reverse byte order (Bitcoin standard), so to get display 0000...0033, encode 3300...0000 + ext_from_hex("0084 c000000000000000000000000000000000000000000000000000000000000000 00000000000000000000000000000000000000000000000000000000000000f7 0100000000000000000000000000000000000000000000000000000000000000 0000 01 20 3300000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000", &mut test); + + // After commitment_signed exchange, we need to exchange tx_signatures. + // Message type IDs: TxSignatures = 71 (0x0047) + // TxSignatures: type(2) + channel_id(32) + txid(32) + num_witnesses(2) + TLV(type=0, len=64, shared_input_sig) + // With shared_input_signature: 2 + 32 + 32 + 2 + 1 + 1 + 64 = 134 = 0x86 + // inbound read from peer id 0 of len 18 + ext_from_hex("030012", &mut test); + // message header indicating message length 134 (0x86) + ext_from_hex("0086 03000000000000000000000000000000", &mut test); + // inbound read from peer id 0 of len 150 (134 message + 16 MAC) + ext_from_hex("030096", &mut test); + // TxSignatures message with shared_input_signature TLV (type 0) + // txid must match the splice funding txid (0x33 in reverse byte order) + // shared_input_signature: 64-byte fuzz signature for the shared input + ext_from_hex("0047 c000000000000000000000000000000000000000000000000000000000000000 3300000000000000000000000000000000000000000000000000000000000000 0000 00 40 00000000000000000000000000000000000000000000000000000000000000dc 0100000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000", &mut test); + + // Connect a block with the splice funding transaction to confirm it + // The splice funding tx: version(4) + input_count(1) + txid(32) + vout(4) + script_len(1) + sequence(4) + // + output_count(1) + value(8) + script_len(1) + script(34) + locktime(4) = 94 bytes = 0x5e + // Transaction structure from FundingTransactionReadyForSigning: + // - Input: spending c000...00:0 with sequence 0xfffffffd + // - Output: 115536 sats to OP_0 PUSH32 6e00...00 + // - Locktime: 13 + ext_from_hex("0c005e", &mut test); + ext_from_hex("02000000 01 c000000000000000000000000000000000000000000000000000000000000000 00000000 00 fdffffff 01 50c3010000000000 22 00206e00000000000000000000000000000000000000000000000000000000000000 0d000000", &mut test); + + // Connect additional blocks to reach minimum_depth confirmations + for _ in 0..5 { + ext_from_hex("0c0000", &mut test); + } + + // After confirmation, exchange splice_locked messages. + // Message type IDs: SpliceLocked = 77 (0x004d) + // SpliceLocked: type(2) + channel_id(32) + splice_txid(32) = 66 = 0x42 + // inbound read from peer id 0 of len 18 + ext_from_hex("030012", &mut test); + // message header indicating message length 66 + ext_from_hex("0042 03000000000000000000000000000000", &mut test); + // inbound read from peer id 0 of len 82 (66 message + 16 MAC) + ext_from_hex("030052", &mut test); + // SpliceLocked message (type 77 = 0x004d): channel_id + splice_txid + mac + // splice_txid must match the splice funding txid (0x33 in reverse byte order) + ext_from_hex("004d c000000000000000000000000000000000000000000000000000000000000000 3300000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000", &mut test); + + test +} + pub fn write_fst_seeds(path: &str) { use std::fs::File; use std::io::Write; @@ -1589,6 +1924,10 @@ pub fn write_fst_seeds(path: &str) { let mut f = File::create(path.to_owned() + "/gossip_exchange_seed").unwrap(); let gossip_exchange = gossip_exchange_seed(); f.write_all(&gossip_exchange).unwrap(); + + let mut f = File::create(path.to_owned() + "/splice_seed").unwrap(); + let splice = splice_seed(); + f.write_all(&splice).unwrap(); } #[cfg(test)] @@ -1666,4 +2005,38 @@ mod tests { assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Sending message to all peers except Some(PublicKey(0000000000000000000000000000000000000000000000000000000000000002ff00000000000000000000000000000000000000000000000000000000000002)): ChannelUpdate { signature: 3026020200a602200303030303030303030303030303030303030303030303030303030303030303, contents: UnsignedChannelUpdate { chain_hash: 6fe28c0ab6f1b372c1a6a246ae63f74f931e8365e15a089c68d6190000000000, short_channel_id: 42, timestamp: 44, message_flags: 1, channel_flags: 0, cltv_expiry_delta: 40, htlc_minimum_msat: 0, htlc_maximum_msat: 100000000, fee_base_msat: 0, fee_proportional_millionths: 0, excess_data: [] } }".to_string())), Some(&1)); assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Sending message to all peers except Some(PublicKey(0000000000000000000000000000000000000000000000000000000000000002ff00000000000000000000000000000000000000000000000000000000000002)) or the announced node: NodeAnnouncement { signature: 302502012802200303030303030303030303030303030303030303030303030303030303030303, contents: UnsignedNodeAnnouncement { features: [], timestamp: 43, node_id: NodeId(030303030303030303030303030303030303030303030303030303030303030303), rgb: [0, 0, 0], alias: NodeAlias([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), addresses: [], excess_address_data: [], excess_data: [] } }".to_string())), Some(&1)); } + + #[test] + fn test_splice_seed() { + let test = super::splice_seed(); + + let logger = Arc::new(TrackingLogger { lines: Mutex::new(HashMap::new()) }); + super::do_test(&test, &(Arc::clone(&logger) as Arc)); + + let log_entries = logger.lines.lock().unwrap(); + + // Channel open + assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling SendAcceptChannel event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000002 for channel ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb181909679".to_string())), Some(&1)); + assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling SendFundingSigned event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000002 for channel c000000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&1)); + assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling SendChannelReady event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000002 for channel c000000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&1)); + + // Quiescence + assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling SendStfu event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000002 for channel c000000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&1)); + + // Splice handshake + assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling SendSpliceInit event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000002 for channel c000000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&1)); + + // Interactive transaction negotiation + assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling SendTxAddInput event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000002 for channel c000000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&2)); // One for the shared input, one for the wallet input + assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling SendTxAddOutput event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000002 for channel c000000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&2)); // One for the shared output, one for the change output + assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling SendTxComplete event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000002 for channel c000000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&1)); + + // Transaction signing + assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling UpdateHTLCs event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000002 with 0 adds, 0 fulfills, 0 fails, 1 commits for channel c000000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&1)); + assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling SendTxSignatures event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000002 for channel c000000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&1)); + + // Splice locked + assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling SendSpliceLocked event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000002 for channel c000000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&1)); + assert_eq!(log_entries.get(&("lightning::ln::channel".to_string(), "Promoting splice funding txid 0000000000000000000000000000000000000000000000000000000000000033".to_string())), Some(&1)); + } } diff --git a/fuzz/src/process_onion_failure.rs b/fuzz/src/process_onion_failure.rs index 1bc9900718a..ac70562c006 100644 --- a/fuzz/src/process_onion_failure.rs +++ b/fuzz/src/process_onion_failure.rs @@ -9,10 +9,12 @@ use lightning::{ ln::{ channelmanager::{HTLCSource, PaymentId}, msgs::OnionErrorPacket, + onion_utils, }, routing::router::{BlindedTail, Path, RouteHop, TrampolineHop}, types::features::{ChannelFeatures, NodeFeatures}, util::logger::Logger, + util::ser::Readable, }; // Imports that need to be added manually @@ -126,19 +128,18 @@ fn do_test(data: &[u8], out: Out) { let failure_data = get_slice!(failure_len); let attribution_data = if get_bool!() { - Some(lightning::ln::AttributionData { - hold_times: get_slice!(80).try_into().unwrap(), - hmacs: get_slice!(840).try_into().unwrap(), - }) + let mut bytes = get_slice!(80 + 840); + let data: onion_utils::AttributionData = Readable::read(&mut bytes).unwrap(); + Some(data) } else { None }; let encrypted_packet = OnionErrorPacket { data: failure_data.into(), attribution_data: attribution_data.clone() }; - lightning::ln::process_onion_failure(&secp_ctx, &logger, &htlc_source, encrypted_packet); + onion_utils::process_onion_failure(&secp_ctx, &logger, &htlc_source, encrypted_packet); if let Some(attribution_data) = attribution_data { - lightning::ln::decode_fulfill_attribution_data( + onion_utils::decode_fulfill_attribution_data( &secp_ctx, &logger, &path, diff --git a/fuzz/src/router.rs b/fuzz/src/router.rs index af29a0221a9..2e5b15fc7f4 100644 --- a/fuzz/src/router.rs +++ b/fuzz/src/router.rs @@ -31,6 +31,7 @@ use lightning::types::features::{BlindedHopFeatures, Bolt12InvoiceFeatures}; use lightning::util::config::UserConfig; use lightning::util::hash_tables::*; use lightning::util::ser::LengthReadable; +use lightning::util::wakers::Notifier; use bitcoin::hashes::Hash; use bitcoin::network::Network; @@ -88,12 +89,11 @@ impl InputData { } } -struct FuzzChainSource<'a, 'b, Out: test_logger::Output> { +struct FuzzChainSource { input: Arc, - net_graph: &'a NetworkGraph<&'b test_logger::TestLogger>, } -impl UtxoLookup for FuzzChainSource<'_, '_, Out> { - fn get_utxo(&self, _chain_hash: &ChainHash, _short_channel_id: u64) -> UtxoResult { +impl UtxoLookup for FuzzChainSource { + fn get_utxo(&self, _chain_hash: &ChainHash, _scid: u64, notifier: Arc) -> UtxoResult { let input_slice = self.input.get_slice(2); if input_slice.is_none() { return UtxoResult::Sync(Err(UtxoLookupError::UnknownTx)); @@ -107,17 +107,17 @@ impl UtxoLookup for FuzzChainSource<'_, '_, Out> { &[0, _] => UtxoResult::Sync(Err(UtxoLookupError::UnknownChain)), &[1, _] => UtxoResult::Sync(Err(UtxoLookupError::UnknownTx)), &[2, _] => { - let future = UtxoFuture::new(); - future.resolve_without_forwarding(self.net_graph, Ok(txo_res)); + let future = UtxoFuture::new(notifier); + future.resolve(Ok(txo_res)); UtxoResult::Async(future.clone()) }, &[3, _] => { - let future = UtxoFuture::new(); - future.resolve_without_forwarding(self.net_graph, Err(UtxoLookupError::UnknownTx)); + let future = UtxoFuture::new(notifier); + future.resolve(Err(UtxoLookupError::UnknownTx)); UtxoResult::Async(future.clone()) }, &[4, _] => { - UtxoResult::Async(UtxoFuture::new()) // the future will never resolve + UtxoResult::Async(UtxoFuture::new(notifier)) // the future will never resolve }, &[..] => UtxoResult::Sync(Ok(txo_res)), } @@ -197,7 +197,7 @@ pub fn do_test(data: &[u8], out: Out) { let our_pubkey = get_pubkey!(); let net_graph = NetworkGraph::new(Network::Bitcoin, &logger); - let chain_source = FuzzChainSource { input: Arc::clone(&input), net_graph: &net_graph }; + let chain_source = FuzzChainSource { input: Arc::clone(&input) }; let mut node_pks = new_hash_map(); let mut scid = 42; @@ -335,9 +335,7 @@ pub fn do_test(data: &[u8], out: Out) { node_pks.insert(get_pubkey_from_node_id!(msg.node_id_1), ()); node_pks.insert(get_pubkey_from_node_id!(msg.node_id_2), ()); let _ = net_graph - .update_channel_from_unsigned_announcement::<&FuzzChainSource<'_, '_, Out>>( - &msg, &None, - ); + .update_channel_from_unsigned_announcement::<&FuzzChainSource>(&msg, &None); }, 2 => { let msg = diff --git a/lightning-background-processor/Cargo.toml b/lightning-background-processor/Cargo.toml index ef0a9840613..e958919f4d8 100644 --- a/lightning-background-processor/Cargo.toml +++ b/lightning-background-processor/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning-background-processor" -version = "0.2.0+git" +version = "0.3.0+git" authors = ["Valentine Wallace "] license = "MIT OR Apache-2.0" repository = "https://github.com/lightningdevkit/rust-lightning" @@ -25,16 +25,16 @@ bitcoin = { version = "0.32.2", default-features = false } bitcoin_hashes = { version = "0.14.0", default-features = false } bitcoin-io = { version = "0.1.2", default-features = false } lightning = { version = "0.3.0", path = "../lightning", default-features = false } -lightning-rapid-gossip-sync = { version = "0.2.0", path = "../lightning-rapid-gossip-sync", default-features = false } -lightning-liquidity = { version = "0.2.0", path = "../lightning-liquidity", default-features = false } +lightning-rapid-gossip-sync = { version = "0.3.0", path = "../lightning-rapid-gossip-sync", default-features = false } +lightning-liquidity = { version = "0.3.0", path = "../lightning-liquidity", default-features = false } possiblyrandom = { version = "0.2", path = "../possiblyrandom", default-features = false } [dev-dependencies] tokio = { version = "1.35", features = [ "macros", "rt", "rt-multi-thread", "sync", "time" ] } lightning = { version = "0.3.0", path = "../lightning", features = ["_test_utils"] } -lightning-invoice = { version = "0.34.0", path = "../lightning-invoice" } -lightning-liquidity = { version = "0.2.0", path = "../lightning-liquidity", default-features = false, features = ["_test_utils"] } -lightning-persister = { version = "0.2.0", path = "../lightning-persister" } +lightning-invoice = { version = "0.35.0", path = "../lightning-invoice" } +lightning-liquidity = { version = "0.3.0", path = "../lightning-liquidity", default-features = false, features = ["_test_utils"] } +lightning-persister = { version = "0.3.0", path = "../lightning-persister" } [lints] workspace = true diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index cdf1b2e5aa3..f052f3d8d4c 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -30,9 +30,11 @@ mod fwd_batch; use fwd_batch::BatchDelay; +#[cfg(not(c_bindings))] use lightning::chain; +#[cfg(not(c_bindings))] use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator}; -use lightning::chain::chainmonitor::{ChainMonitor, Persist}; +use lightning::chain::chainmonitor::AChainMonitor; #[cfg(feature = "std")] use lightning::events::EventHandler; #[cfg(feature = "std")] @@ -41,6 +43,8 @@ use lightning::events::ReplayEvent; use lightning::events::{Event, PathFailure}; use lightning::util::ser::Writeable; +#[cfg(not(c_bindings))] +use lightning::io::Error; use lightning::ln::channelmanager::AChannelManager; use lightning::ln::msgs::OnionMessageHandler; use lightning::ln::peer_handler::APeerManager; @@ -48,9 +52,11 @@ use lightning::onion_message::messenger::AOnionMessenger; use lightning::routing::gossip::{NetworkGraph, P2PGossipSync}; use lightning::routing::scoring::{ScoreUpdate, WriteableScore}; use lightning::routing::utxo::UtxoLookup; -use lightning::sign::{ - ChangeDestinationSource, ChangeDestinationSourceSync, EntropySource, OutputSpender, -}; +#[cfg(not(c_bindings))] +use lightning::sign::EntropySource; +use lightning::sign::{ChangeDestinationSource, ChangeDestinationSourceSync, OutputSpender}; +#[cfg(not(c_bindings))] +use lightning::util::async_poll::MaybeSend; use lightning::util::logger::Logger; use lightning::util::persist::{ KVStore, KVStoreSync, KVStoreSyncWrapper, CHANNEL_MANAGER_PERSISTENCE_KEY, @@ -60,6 +66,7 @@ use lightning::util::persist::{ SCORER_PERSISTENCE_PRIMARY_NAMESPACE, SCORER_PERSISTENCE_SECONDARY_NAMESPACE, }; use lightning::util::sweep::{OutputSweeper, OutputSweeperSync}; +use lightning::util::wakers::Future; #[cfg(feature = "std")] use lightning::util::wakers::Sleeper; use lightning_rapid_gossip_sync::RapidGossipSync; @@ -83,7 +90,11 @@ use std::time::Instant; #[cfg(not(feature = "std"))] use alloc::boxed::Box; #[cfg(all(not(c_bindings), not(feature = "std")))] +use alloc::string::String; +#[cfg(all(not(c_bindings), not(feature = "std")))] use alloc::sync::Arc; +#[cfg(all(not(c_bindings), not(feature = "std")))] +use alloc::vec::Vec; /// `BackgroundProcessor` takes care of tasks that (1) need to happen periodically to keep /// Rust-Lightning running properly, and (2) either can or should be run in the background. Its @@ -109,6 +120,7 @@ use alloc::sync::Arc; /// /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager /// [`ChannelManager::timer_tick_occurred`]: lightning::ln::channelmanager::ChannelManager::timer_tick_occurred +/// [`ChainMonitor::rebroadcast_pending_claims`]: lightning::chain::chainmonitor::ChainMonitor::rebroadcast_pending_claims /// [`ChannelMonitor`]: lightning::chain::channelmonitor::ChannelMonitor /// [`Event`]: lightning::events::Event /// [`PeerManager::timer_tick_occurred`]: lightning::ln::peer_handler::PeerManager::timer_tick_occurred @@ -163,6 +175,16 @@ const SWEEPER_TIMER: Duration = Duration::from_secs(30); #[cfg(test)] const SWEEPER_TIMER: Duration = Duration::from_secs(1); +#[cfg(not(test))] +const FIRST_ARCHIVE_STALE_MONITORS_TIMER: Duration = Duration::from_secs(15); +#[cfg(test)] +const FIRST_ARCHIVE_STALE_MONITORS_TIMER: Duration = Duration::ZERO; + +#[cfg(not(test))] +const ARCHIVE_STALE_MONITORS_TIMER: Duration = Duration::from_secs(60 * 10); +#[cfg(test)] +const ARCHIVE_STALE_MONITORS_TIMER: Duration = Duration::from_secs(1); + /// core::cmp::min is not currently const, so we define a trivial (and equivalent) replacement const fn min_duration(a: Duration, b: Duration) -> Duration { if a.as_nanos() < b.as_nanos() { @@ -181,12 +203,9 @@ pub enum GossipSync< P: Deref>, R: Deref>, G: Deref>, - U: Deref, - L: Deref, -> where - U::Target: UtxoLookup, - L::Target: Logger, -{ + U: UtxoLookup, + L: Logger, +> { /// Gossip sync via the lightning peer-to-peer network as defined by BOLT 7. P2P(P), /// Rapid gossip sync from a trusted server. @@ -199,12 +218,9 @@ impl< P: Deref>, R: Deref>, G: Deref>, - U: Deref, - L: Deref, + U: UtxoLookup, + L: Logger, > GossipSync -where - U::Target: UtxoLookup, - L::Target: Logger, { fn network_graph(&self) -> Option<&G> { match self { @@ -227,18 +243,23 @@ where GossipSync::None => None, } } + + fn validation_completion_future(&self) -> Option { + match self { + GossipSync::P2P(gossip_sync) => Some(gossip_sync.validation_completion_future()), + GossipSync::Rapid(_) => None, + GossipSync::None => None, + } + } } /// This is not exported to bindings users as the bindings concretize everything and have constructors for us impl< P: Deref>, G: Deref>, - U: Deref, - L: Deref, + U: UtxoLookup, + L: Logger, > GossipSync, G, U, L> -where - U::Target: UtxoLookup, - L::Target: Logger, { /// Initializes a new [`GossipSync::P2P`] variant. pub fn p2p(gossip_sync: P) -> Self { @@ -251,7 +272,7 @@ impl< 'a, R: Deref>, G: Deref>, - L: Deref, + L: Logger, > GossipSync< &P2PGossipSync, @@ -259,8 +280,7 @@ impl< G, &'a (dyn UtxoLookup + Send + Sync), L, - > where - L::Target: Logger, + > { /// Initializes a new [`GossipSync::Rapid`] variant. pub fn rapid(gossip_sync: R) -> Self { @@ -269,15 +289,14 @@ impl< } /// This is not exported to bindings users as the bindings concretize everything and have constructors for us -impl<'a, L: Deref> +impl<'a, L: Logger> GossipSync< &P2PGossipSync<&'a NetworkGraph, &'a (dyn UtxoLookup + Send + Sync), L>, &RapidGossipSync<&'a NetworkGraph, L>, &'a NetworkGraph, &'a (dyn UtxoLookup + Send + Sync), L, - > where - L::Target: Logger, + > { /// Initializes a new [`GossipSync::None`] variant. pub fn none() -> Self { @@ -285,10 +304,7 @@ impl<'a, L: Deref> } } -fn handle_network_graph_update(network_graph: &NetworkGraph, event: &Event) -where - L::Target: Logger, -{ +fn handle_network_graph_update(network_graph: &NetworkGraph, event: &Event) { if let Event::PaymentPathFailed { failure: PathFailure::OnPath { network_update: Some(ref upd) }, .. @@ -393,51 +409,68 @@ type DynChannelManager = lightning::ln::channelmanager::ChannelManager< pub const NO_ONION_MESSENGER: Option< Arc< dyn AOnionMessenger< - EntropySource = dyn EntropySource + Send + Sync, - ES = &(dyn EntropySource + Send + Sync), - NodeSigner = dyn lightning::sign::NodeSigner + Send + Sync, - NS = &(dyn lightning::sign::NodeSigner + Send + Sync), - Logger = dyn Logger + Send + Sync, - L = &'static (dyn Logger + Send + Sync), - NodeIdLookUp = DynChannelManager, + EntropySource = &(dyn EntropySource + Send + Sync), + NodeSigner = &(dyn lightning::sign::NodeSigner + Send + Sync), + Logger = &'static (dyn Logger + Send + Sync), NL = &'static DynChannelManager, - MessageRouter = DynMessageRouter, - MR = &'static DynMessageRouter, - OffersMessageHandler = lightning::ln::peer_handler::IgnoringMessageHandler, - OMH = &'static lightning::ln::peer_handler::IgnoringMessageHandler, - AsyncPaymentsMessageHandler = lightning::ln::peer_handler::IgnoringMessageHandler, - APH = &'static lightning::ln::peer_handler::IgnoringMessageHandler, - DNSResolverMessageHandler = lightning::ln::peer_handler::IgnoringMessageHandler, - DRH = &'static lightning::ln::peer_handler::IgnoringMessageHandler, - CustomOnionMessageHandler = lightning::ln::peer_handler::IgnoringMessageHandler, - CMH = &'static lightning::ln::peer_handler::IgnoringMessageHandler, + MessageRouter = &'static DynMessageRouter, + OMH = lightning::ln::peer_handler::IgnoringMessageHandler, + APH = lightning::ln::peer_handler::IgnoringMessageHandler, + DRH = lightning::ln::peer_handler::IgnoringMessageHandler, + CMH = lightning::ln::peer_handler::IgnoringMessageHandler, > + Send + Sync, >, > = None; +#[cfg(not(c_bindings))] +/// A panicking implementation of [`KVStore`] that is used in [`NO_LIQUIDITY_MANAGER`]. +pub struct DummyKVStore; + +#[cfg(not(c_bindings))] +impl KVStore for DummyKVStore { + fn read( + &self, _: &str, _: &str, _: &str, + ) -> impl core::future::Future, Error>> + MaybeSend + 'static { + async { unimplemented!() } + } + + fn write( + &self, _: &str, _: &str, _: &str, _: Vec, + ) -> impl core::future::Future> + MaybeSend + 'static { + async { unimplemented!() } + } + + fn remove( + &self, _: &str, _: &str, _: &str, _: bool, + ) -> impl core::future::Future> + MaybeSend + 'static { + async { unimplemented!() } + } + + fn list( + &self, _: &str, _: &str, + ) -> impl core::future::Future, Error>> + MaybeSend + 'static { + async { unimplemented!() } + } +} + /// When initializing a background processor without a liquidity manager, this can be used to avoid /// specifying a concrete `LiquidityManager` type. #[cfg(not(c_bindings))] pub const NO_LIQUIDITY_MANAGER: Option< Arc< dyn ALiquidityManager< - EntropySource = dyn EntropySource + Send + Sync, - ES = &(dyn EntropySource + Send + Sync), - NodeSigner = dyn lightning::sign::NodeSigner + Send + Sync, - NS = &(dyn lightning::sign::NodeSigner + Send + Sync), + EntropySource = &(dyn EntropySource + Send + Sync), + NodeSigner = &(dyn lightning::sign::NodeSigner + Send + Sync), AChannelManager = DynChannelManager, CM = &DynChannelManager, - Filter = dyn chain::Filter + Send + Sync, C = &(dyn chain::Filter + Send + Sync), - KVStore = dyn lightning::util::persist::KVStore + Send + Sync, - K = &(dyn lightning::util::persist::KVStore + Send + Sync), + K = &DummyKVStore, TimeProvider = dyn lightning_liquidity::utils::time::TimeProvider + Send + Sync, TP = &(dyn lightning_liquidity::utils::time::TimeProvider + Send + Sync), - BroadcasterInterface = dyn lightning::chain::chaininterface::BroadcasterInterface - + Send - + Sync, - T = &(dyn BroadcasterInterface + Send + Sync), + BroadcasterInterface = &(dyn lightning::chain::chaininterface::BroadcasterInterface + + Send + + Sync), > + Send + Sync, >, @@ -449,22 +482,18 @@ pub const NO_LIQUIDITY_MANAGER: Option< pub const NO_LIQUIDITY_MANAGER_SYNC: Option< Arc< dyn ALiquidityManagerSync< - EntropySource = dyn EntropySource + Send + Sync, - ES = &(dyn EntropySource + Send + Sync), - NodeSigner = dyn lightning::sign::NodeSigner + Send + Sync, - NS = &(dyn lightning::sign::NodeSigner + Send + Sync), + EntropySource = &(dyn EntropySource + Send + Sync), + NodeSigner = &(dyn lightning::sign::NodeSigner + Send + Sync), AChannelManager = DynChannelManager, CM = &DynChannelManager, - Filter = dyn chain::Filter + Send + Sync, C = &(dyn chain::Filter + Send + Sync), KVStoreSync = dyn lightning::util::persist::KVStoreSync + Send + Sync, KS = &(dyn lightning::util::persist::KVStoreSync + Send + Sync), TimeProvider = dyn lightning_liquidity::utils::time::TimeProvider + Send + Sync, TP = &(dyn lightning_liquidity::utils::time::TimeProvider + Send + Sync), - BroadcasterInterface = dyn lightning::chain::chaininterface::BroadcasterInterface - + Send - + Sync, - T = &(dyn BroadcasterInterface + Send + Sync), + BroadcasterInterface = &(dyn lightning::chain::chaininterface::BroadcasterInterface + + Send + + Sync), > + Send + Sync, >, @@ -481,12 +510,14 @@ pub(crate) mod futures_util { C: Future + Unpin, D: Future + Unpin, E: Future + Unpin, + F: Future + Unpin, > { pub a: A, pub b: B, pub c: C, pub d: D, pub e: E, + pub f: F, } pub(crate) enum SelectorOutput { @@ -495,6 +526,7 @@ pub(crate) mod futures_util { C, D, E, + F, } impl< @@ -503,7 +535,8 @@ pub(crate) mod futures_util { C: Future + Unpin, D: Future + Unpin, E: Future + Unpin, - > Future for Selector + F: Future + Unpin, + > Future for Selector { type Output = SelectorOutput; fn poll( @@ -541,6 +574,12 @@ pub(crate) mod futures_util { }, Poll::Pending => {}, } + match Pin::new(&mut self.f).poll(ctx) { + Poll::Ready(()) => { + return Poll::Ready(SelectorOutput::F); + }, + Poll::Pending => {}, + } Poll::Pending } } @@ -567,6 +606,12 @@ pub(crate) mod futures_util { } } + impl + Unpin> From> for OptionalSelector { + fn from(optional_future: Option) -> Self { + Self { optional_future } + } + } + // If we want to poll a future without an async context to figure out if it has completed or // not without awaiting, we need a Waker, which needs a vtable...we fill it with dummy values // but sadly there's a good bit of boilerplate here. @@ -880,17 +925,12 @@ use futures_util::{dummy_waker, Joiner, OptionalSelector, Selector, SelectorOutp ///``` pub async fn process_events_async< 'a, - UL: Deref, - CF: Deref, - T: Deref, - F: Deref, + UL: UtxoLookup, G: Deref>, - L: Deref, - P: Deref, + L: Logger, EventHandlerFuture: core::future::Future>, EventHandler: Fn(Event) -> EventHandlerFuture, - ES: Deref, - M: Deref::Signer, CF, T, F, L, P, ES>>, + M: Deref, CM: Deref, OM: Deref, PGS: Deref>, @@ -898,9 +938,19 @@ pub async fn process_events_async< PM: Deref, LM: Deref, D: Deref, - O: Deref, - K: Deref, - OS: Deref>, + O: OutputSpender, + K: KVStore, + OS: Deref< + Target = OutputSweeper< + ::Broadcaster, + D, + ::FeeEstimator, + ::Filter, + K, + L, + O, + >, + >, S: Deref, SC: for<'b> WriteableScore<'b>, SleepFuture: core::future::Future + core::marker::Unpin, @@ -913,20 +963,12 @@ pub async fn process_events_async< sleeper: Sleeper, mobile_interruptable_platform: bool, fetch_time: FetchTime, ) -> Result<(), lightning::io::Error> where - UL::Target: UtxoLookup, - CF::Target: chain::Filter, - T::Target: BroadcasterInterface, - F::Target: FeeEstimator, - L::Target: Logger, - P::Target: Persist<::Signer>, - ES::Target: EntropySource, + M::Target: AChainMonitor::Signer, Logger = L>, CM::Target: AChannelManager, OM::Target: AOnionMessenger, PM::Target: APeerManager, LM::Target: ALiquidityManager, - O::Target: OutputSpender, D::Target: ChangeDestinationSource, - K::Target: KVStore, { let async_event_handler = |event| { let network_graph = gossip_sync.network_graph(); @@ -970,7 +1012,7 @@ where log_trace!(logger, "Calling ChannelManager's timer_tick_occurred on startup"); channel_manager.get_cm().timer_tick_occurred(); log_trace!(logger, "Rebroadcasting monitor's pending claims on startup"); - chain_monitor.rebroadcast_pending_claims(); + chain_monitor.get_cm().rebroadcast_pending_claims(); let mut last_freshness_call = sleeper(FRESHNESS_TIMER); let mut last_onion_message_handler_call = sleeper(ONION_MESSAGE_HANDLER_TIMER); @@ -979,14 +1021,16 @@ where let mut last_scorer_persist_call = sleeper(SCORER_PERSIST_TIMER); let mut last_rebroadcast_call = sleeper(REBROADCAST_TIMER); let mut last_sweeper_call = sleeper(SWEEPER_TIMER); + let mut last_archive_call = sleeper(FIRST_ARCHIVE_STALE_MONITORS_TIMER); let mut have_pruned = false; let mut have_decayed_scorer = false; + let mut have_archived = false; let mut last_forwards_processing_call = sleeper(batch_delay.get()); loop { channel_manager.get_cm().process_pending_events_async(async_event_handler).await; - chain_monitor.process_pending_events_async(async_event_handler).await; + chain_monitor.get_cm().process_pending_events_async(async_event_handler).await; if let Some(om) = &onion_messenger { om.get_om().process_pending_events_async(async_event_handler).await } @@ -1019,18 +1063,13 @@ where if mobile_interruptable_platform { await_start = Some(sleeper(Duration::from_secs(1))); } - let om_fut = if let Some(om) = onion_messenger.as_ref() { - let fut = om.get_om().get_update_future(); - OptionalSelector { optional_future: Some(fut) } - } else { - OptionalSelector { optional_future: None } - }; - let lm_fut = if let Some(lm) = liquidity_manager.as_ref() { - let fut = lm.get_lm().get_pending_msgs_or_needs_persist_future(); - OptionalSelector { optional_future: Some(fut) } - } else { - OptionalSelector { optional_future: None } - }; + let om_fut: OptionalSelector<_> = + onion_messenger.as_ref().map(|om| om.get_om().get_update_future()).into(); + let lm_fut: OptionalSelector<_> = liquidity_manager + .as_ref() + .map(|lm| lm.get_lm().get_pending_msgs_or_needs_persist_future()) + .into(); + let gv_fut: OptionalSelector<_> = gossip_sync.validation_completion_future().into(); let needs_processing = channel_manager.get_cm().needs_pending_htlc_processing(); let sleep_delay = match (needs_processing, mobile_interruptable_platform) { (true, true) => batch_delay.get().min(Duration::from_millis(100)), @@ -1041,12 +1080,17 @@ where let fut = Selector { a: sleeper(sleep_delay), b: channel_manager.get_cm().get_event_or_persistence_needed_future(), - c: chain_monitor.get_update_future(), + c: chain_monitor.get_cm().get_update_future(), d: om_fut, e: lm_fut, + f: gv_fut, }; match fut.await { - SelectorOutput::B | SelectorOutput::C | SelectorOutput::D | SelectorOutput::E => {}, + SelectorOutput::B + | SelectorOutput::C + | SelectorOutput::D + | SelectorOutput::E + | SelectorOutput::F => {}, SelectorOutput::A(exit) => { if exit { break; @@ -1108,11 +1152,31 @@ where log_trace!(logger, "Done persisting ChannelManager."); } - // Note that we want to run a graph prune once not long after startup before - // falling back to our usual hourly prunes. This avoids short-lived clients never - // pruning their network graph. We run once 60 seconds after startup before - // continuing our normal cadence. For RGS, since 60 seconds is likely too long, - // we prune after an initial sync completes. + // Note that we want to archive stale ChannelMonitors and run a network graph prune once + // not long after startup before falling back to their usual infrequent runs. This avoids + // short-lived clients never archiving stale ChannelMonitors or pruning their network + // graph. For network graph pruning, in the case of RGS sync, we run a prune immediately + // after initial sync completes, otherwise we do so on a timer which should be long enough + // to give us a chance to get most of the network graph from our peers. + let archive_timer = if have_archived { + ARCHIVE_STALE_MONITORS_TIMER + } else { + FIRST_ARCHIVE_STALE_MONITORS_TIMER + }; + let archive_timer_elapsed = { + match check_and_reset_sleeper(&mut last_archive_call, || sleeper(archive_timer)) { + Some(false) => true, + Some(true) => break, + None => false, + } + }; + if archive_timer_elapsed { + log_trace!(logger, "Archiving stale ChannelMonitors."); + chain_monitor.get_cm().archive_fully_resolved_channel_monitors(); + have_archived = true; + log_trace!(logger, "Archived stale ChannelMonitors."); + } + let prune_timer = if gossip_sync.prunable_network_graph().is_some() { NETWORK_PRUNE_TIMER } else { @@ -1298,7 +1362,7 @@ where match check_and_reset_sleeper(&mut last_rebroadcast_call, || sleeper(REBROADCAST_TIMER)) { Some(false) => { log_trace!(logger, "Rebroadcasting monitor's pending claims"); - chain_monitor.rebroadcast_pending_claims(); + chain_monitor.get_cm().rebroadcast_pending_claims(); }, Some(true) => break, None => {}, @@ -1359,17 +1423,12 @@ fn check_and_reset_sleeper< /// Async events processor that is based on [`process_events_async`] but allows for [`KVStoreSync`] to be used for /// synchronous background persistence. pub async fn process_events_async_with_kv_store_sync< - UL: Deref, - CF: Deref, - T: Deref, - F: Deref, + UL: UtxoLookup, G: Deref>, - L: Deref, - P: Deref, + L: Logger, EventHandlerFuture: core::future::Future>, EventHandler: Fn(Event) -> EventHandlerFuture, - ES: Deref, - M: Deref::Signer, CF, T, F, L, P, ES>>, + M: Deref, CM: Deref, OM: Deref, PGS: Deref>, @@ -1377,9 +1436,19 @@ pub async fn process_events_async_with_kv_store_sync< PM: Deref, LM: Deref, D: Deref, - O: Deref, + O: OutputSpender, K: Deref, - OS: Deref>, + OS: Deref< + Target = OutputSweeperSync< + ::Broadcaster, + D, + ::FeeEstimator, + ::Filter, + K, + L, + O, + >, + >, S: Deref, SC: for<'b> WriteableScore<'b>, SleepFuture: core::future::Future + core::marker::Unpin, @@ -1392,18 +1461,11 @@ pub async fn process_events_async_with_kv_store_sync< sleeper: Sleeper, mobile_interruptable_platform: bool, fetch_time: FetchTime, ) -> Result<(), lightning::io::Error> where - UL::Target: UtxoLookup, - CF::Target: chain::Filter, - T::Target: BroadcasterInterface, - F::Target: FeeEstimator, - L::Target: Logger, - P::Target: Persist<::Signer>, - ES::Target: EntropySource, + M::Target: AChainMonitor::Signer, Logger = L>, CM::Target: AChannelManager, OM::Target: AOnionMessenger, PM::Target: APeerManager, LM::Target: ALiquidityManager, - O::Target: OutputSpender, D::Target: ChangeDestinationSourceSync, K::Target: KVStoreSync, { @@ -1473,21 +1535,11 @@ impl BackgroundProcessor { /// [`NetworkGraph::write`]: lightning::routing::gossip::NetworkGraph#impl-Writeable pub fn start< 'a, - UL: 'static + Deref, - CF: 'static + Deref, - T: 'static + Deref, - F: 'static + Deref + Send, + UL: 'static + UtxoLookup, G: 'static + Deref>, L: 'static + Deref + Send, - P: 'static + Deref, EH: 'static + EventHandler + Send, - ES: 'static + Deref + Send, - M: 'static - + Deref< - Target = ChainMonitor<::Signer, CF, T, F, L, P, ES>, - > - + Send - + Sync, + M: 'static + Deref + Send + Sync, CM: 'static + Deref + Send, OM: 'static + Deref + Send, PGS: 'static + Deref> + Send, @@ -1497,28 +1549,34 @@ impl BackgroundProcessor { S: 'static + Deref + Send + Sync, SC: for<'b> WriteableScore<'b>, D: 'static + Deref, - O: 'static + Deref, + O: 'static + OutputSpender, K: 'static + Deref + Send, - OS: 'static + Deref> + Send, + OS: 'static + + Deref< + Target = OutputSweeperSync< + ::Broadcaster, + D, + ::FeeEstimator, + ::Filter, + K, + L, + O, + >, + > + + Send, >( kv_store: K, event_handler: EH, chain_monitor: M, channel_manager: CM, onion_messenger: Option, gossip_sync: GossipSync, peer_manager: PM, liquidity_manager: Option, sweeper: Option, logger: L, scorer: Option, ) -> Self where - UL::Target: 'static + UtxoLookup, - CF::Target: 'static + chain::Filter, - T::Target: 'static + BroadcasterInterface, - F::Target: 'static + FeeEstimator, L::Target: 'static + Logger, - P::Target: 'static + Persist<::Signer>, - ES::Target: 'static + EntropySource, + M::Target: AChainMonitor::Signer, Logger = L>, CM::Target: AChannelManager, OM::Target: AOnionMessenger, PM::Target: APeerManager, LM::Target: ALiquidityManagerSync, D::Target: ChangeDestinationSourceSync, - O::Target: 'static + OutputSpender, K::Target: 'static + KVStoreSync, { let stop_thread = Arc::new(AtomicBool::new(false)); @@ -1553,7 +1611,7 @@ impl BackgroundProcessor { log_trace!(logger, "Calling ChannelManager's timer_tick_occurred on startup"); channel_manager.get_cm().timer_tick_occurred(); log_trace!(logger, "Rebroadcasting monitor's pending claims on startup"); - chain_monitor.rebroadcast_pending_claims(); + chain_monitor.get_cm().rebroadcast_pending_claims(); let mut last_freshness_call = Instant::now(); let mut last_onion_message_handler_call = Instant::now(); @@ -1562,15 +1620,17 @@ impl BackgroundProcessor { let mut last_scorer_persist_call = Instant::now(); let mut last_rebroadcast_call = Instant::now(); let mut last_sweeper_call = Instant::now(); + let mut last_archive_call = Instant::now(); let mut have_pruned = false; let mut have_decayed_scorer = false; + let mut have_archived = false; let mut cur_batch_delay = batch_delay.get(); let mut last_forwards_processing_call = Instant::now(); loop { channel_manager.get_cm().process_pending_events(&event_handler); - chain_monitor.process_pending_events(&event_handler); + chain_monitor.get_cm().process_pending_events(&event_handler); if let Some(om) = &onion_messenger { om.get_om().process_pending_events(&event_handler) }; @@ -1596,28 +1656,18 @@ impl BackgroundProcessor { log_trace!(logger, "Terminating background processor."); break; } - let sleeper = match (onion_messenger.as_ref(), liquidity_manager.as_ref()) { - (Some(om), Some(lm)) => Sleeper::from_four_futures( - &channel_manager.get_cm().get_event_or_persistence_needed_future(), - &chain_monitor.get_update_future(), - &om.get_om().get_update_future(), - &lm.get_lm().get_pending_msgs_or_needs_persist_future(), - ), - (Some(om), None) => Sleeper::from_three_futures( - &channel_manager.get_cm().get_event_or_persistence_needed_future(), - &chain_monitor.get_update_future(), - &om.get_om().get_update_future(), - ), - (None, Some(lm)) => Sleeper::from_three_futures( - &channel_manager.get_cm().get_event_or_persistence_needed_future(), - &chain_monitor.get_update_future(), - &lm.get_lm().get_pending_msgs_or_needs_persist_future(), - ), - (None, None) => Sleeper::from_two_futures( - &channel_manager.get_cm().get_event_or_persistence_needed_future(), - &chain_monitor.get_update_future(), - ), - }; + let om_fut = onion_messenger.as_ref().map(|om| om.get_om().get_update_future()); + let lm_fut = liquidity_manager + .as_ref() + .map(|lm| lm.get_lm().get_pending_msgs_or_needs_persist_future()); + let gv_fut = gossip_sync.validation_completion_future(); + let always_futures = [ + channel_manager.get_cm().get_event_or_persistence_needed_future(), + chain_monitor.get_cm().get_update_future(), + ]; + let futures = always_futures.into_iter().chain(om_fut).chain(lm_fut).chain(gv_fut); + let sleeper = Sleeper::from_futures(futures); + let batch_delay = if channel_manager.get_cm().needs_pending_htlc_processing() { batch_delay.get() } else { @@ -1652,11 +1702,26 @@ impl BackgroundProcessor { }); } - // Note that we want to run a graph prune once not long after startup before - // falling back to our usual hourly prunes. This avoids short-lived clients never - // pruning their network graph. We run once 60 seconds after startup before - // continuing our normal cadence. For RGS, since 60 seconds is likely too long, - // we prune after an initial sync completes. + // Note that we want to archive stale ChannelMonitors and run a network graph prune once + // not long after startup before falling back to their usual infrequent runs. This avoids + // short-lived clients never archiving stale ChannelMonitors or pruning their network + // graph. For network graph pruning, in the case of RGS sync, we run a prune immediately + // after initial sync completes, otherwise we do so on a timer which should be long enough + // to give us a chance to get most of the network graph from our peers. + let archive_timer = if have_archived { + ARCHIVE_STALE_MONITORS_TIMER + } else { + FIRST_ARCHIVE_STALE_MONITORS_TIMER + }; + let archive_timer_elapsed = last_archive_call.elapsed() > archive_timer; + if archive_timer_elapsed { + log_trace!(logger, "Archiving stale ChannelMonitors."); + chain_monitor.get_cm().archive_fully_resolved_channel_monitors(); + have_archived = true; + last_archive_call = Instant::now(); + log_trace!(logger, "Archived stale ChannelMonitors."); + } + let prune_timer = if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER }; let prune_timer_elapsed = last_prune_call.elapsed() > prune_timer; @@ -1736,7 +1801,7 @@ impl BackgroundProcessor { } if last_rebroadcast_call.elapsed() > REBROADCAST_TIMER { log_trace!(logger, "Rebroadcasting monitor's pending claims"); - chain_monitor.rebroadcast_pending_claims(); + chain_monitor.get_cm().rebroadcast_pending_claims(); last_rebroadcast_call = Instant::now(); } } @@ -1881,6 +1946,26 @@ mod tests { const EVENT_DEADLINE: Duration = Duration::from_millis(5 * (FRESHNESS_TIMER.as_millis() as u64)); + /// Reads a directory and returns only non-`.tmp` files. + /// The file system may return files in any order, and during persistence + /// operations there may be temporary `.tmp` files present. + fn list_monitor_files(dir: &str) -> Vec { + std::fs::read_dir(dir) + .unwrap() + .filter_map(|entry| { + let entry = entry.unwrap(); + let path_str = entry.path().to_str().unwrap().to_lowercase(); + // Skip any .tmp files that may exist during persistence. + // On Windows, ReplaceFileW creates backup files with .TMP (uppercase). + if path_str.ends_with(".tmp") { + None + } else { + Some(entry) + } + }) + .collect() + } + #[derive(Clone, Hash, PartialEq, Eq)] struct TestDescriptor {} impl SocketDescriptor for TestDescriptor { @@ -2362,6 +2447,8 @@ mod tests { )); let best_block = BestBlock::from_network(network); let params = ChainParameters { network, best_block }; + let mut config = UserConfig::default(); + config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = false; let manager = Arc::new(ChannelManager::new( Arc::clone(&fee_estimator), Arc::clone(&chain_monitor), @@ -2372,7 +2459,7 @@ mod tests { Arc::clone(&keys_manager), Arc::clone(&keys_manager), Arc::clone(&keys_manager), - UserConfig::default(), + config, params, genesis_block.header.time, )); @@ -2525,6 +2612,25 @@ mod tests { $node_b.node.get_our_node_id() ); $node_b.node.handle_open_channel($node_a.node.get_our_node_id(), &msg_a); + let events = $node_b.node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match &events[0] { + Event::OpenChannelRequest { + temporary_channel_id, counterparty_node_id, .. + } => { + $node_b + .node + .accept_inbound_channel( + temporary_channel_id, + counterparty_node_id, + 42, + None, + ) + .unwrap(); + }, + _ => panic!("Unexpected event"), + }; + let msg_b = get_event_msg!( $node_b, MessageSendEvent::SendAcceptChannel, @@ -3025,10 +3131,16 @@ mod tests { let event = receiver.recv_timeout(EVENT_DEADLINE).expect("Events not handled within deadline"); match event { - Event::SpendableOutputs { outputs, channel_id } => { + Event::SpendableOutputs { outputs, channel_id, counterparty_node_id } => { nodes[0] .sweeper - .track_spendable_outputs(outputs, channel_id, false, Some(153)) + .track_spendable_outputs( + outputs, + channel_id, + counterparty_node_id, + false, + Some(153), + ) .unwrap(); }, _ => panic!("Unexpected event: {:?}", event), @@ -3659,4 +3771,107 @@ mod tests { exit_sender.send(()).unwrap(); t1.await.unwrap().unwrap(); } + + #[test] + fn test_monitor_archive() { + let (persist_dir, nodes) = create_nodes(2, "test_monitor_archive"); + // Open a channel, but don't confirm it so that it prunes immediately on FC. + open_channel!(nodes[0], nodes[1], 100000); + + let data_dir = nodes[1].kv_store.get_data_dir(); + let persister = Arc::new(Persister::new(data_dir)); + let event_handler = |_: _| Ok(()); + let bp = BackgroundProcessor::start( + persister, + event_handler, + Arc::clone(&nodes[1].chain_monitor), + Arc::clone(&nodes[1].node), + Some(Arc::clone(&nodes[1].messenger)), + nodes[1].p2p_gossip_sync(), + Arc::clone(&nodes[1].peer_manager), + Some(Arc::clone(&nodes[1].liquidity_manager)), + Some(Arc::clone(&nodes[1].sweeper)), + Arc::clone(&nodes[1].logger), + Some(Arc::clone(&nodes[1].scorer)), + ); + + let dir = format!("{}_persister_1/monitors", &persist_dir); + let mut mons = list_monitor_files(&dir); + assert_eq!(mons.len(), 1); + let mon = mons.pop().unwrap(); + + // Because the channel wasn't funded, we'll archive the ChannelMonitor immedaitely after + // its force-closed (at least on node B, which didn't put their money into it). + nodes[1].node.force_close_all_channels_broadcasting_latest_txn("".to_owned()); + loop { + let mons = list_monitor_files(&dir); + if mons.is_empty() { + break; + } + assert_eq!(mons.len(), 1); + assert_eq!(mons[0].path(), mon.path()); + } + + bp.stop().unwrap(); + } + + #[tokio::test] + #[cfg(not(c_bindings))] + async fn test_monitor_archive_async() { + let (persist_dir, nodes) = create_nodes(2, "test_monitor_archive_async"); + // Open a channel, but don't confirm it so that it prunes immediately on FC. + open_channel!(nodes[0], nodes[1], 100000); + + let kv_store = KVStoreSyncWrapper(Arc::clone(&nodes[0].kv_store)); + let sweeper_async: &'static OutputSweeper<_, _, _, _, _, _, _> = unsafe { + &*(nodes[0].sweeper.sweeper_async() as *const OutputSweeper<_, _, _, _, _, _, _>) + as &'static OutputSweeper<_, _, _, _, _, _, _> + }; + let (exit_sender, exit_receiver) = tokio::sync::watch::channel(()); + let bp_future = tokio::spawn(super::process_events_async( + kv_store, + move |_: Event| async move { Ok(()) }, + Arc::clone(&nodes[1].chain_monitor), + Arc::clone(&nodes[1].node), + crate::NO_ONION_MESSENGER, + nodes[1].no_gossip_sync(), + Arc::clone(&nodes[1].peer_manager), + crate::NO_LIQUIDITY_MANAGER, + Some(sweeper_async), + Arc::clone(&nodes[1].logger), + Some(Arc::clone(&nodes[1].scorer)), + move |dur: Duration| { + let mut exit_receiver = exit_receiver.clone(); + Box::pin(async move { + tokio::select! { + _ = tokio::time::sleep(dur) => false, + _ = exit_receiver.changed() => true, + } + }) + }, + false, + || Some(Duration::ZERO), + )); + + let dir = format!("{}_persister_1/monitors", &persist_dir); + let mut mons = list_monitor_files(&dir); + assert_eq!(mons.len(), 1); + let mon = mons.pop().unwrap(); + + // Because the channel wasn't funded, we'll archive the ChannelMonitor immedaitely after + // its force-closed (at least on node B, which didn't put their money into it). + nodes[1].node.force_close_all_channels_broadcasting_latest_txn("".to_owned()); + loop { + let mons = list_monitor_files(&dir); + if mons.is_empty() { + break; + } + assert_eq!(mons.len(), 1); + assert_eq!(mons[0].path(), mon.path()); + tokio::task::yield_now().await; + } + + exit_sender.send(()).unwrap(); + bp_future.await.unwrap().unwrap(); + } } diff --git a/lightning-block-sync/Cargo.toml b/lightning-block-sync/Cargo.toml index 51b19e3901e..97f199963ac 100644 --- a/lightning-block-sync/Cargo.toml +++ b/lightning-block-sync/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning-block-sync" -version = "0.2.0+git" +version = "0.3.0+git" authors = ["Jeffrey Czyz", "Matt Corallo"] license = "MIT OR Apache-2.0" repository = "https://github.com/lightningdevkit/rust-lightning" diff --git a/lightning-block-sync/src/gossip.rs b/lightning-block-sync/src/gossip.rs index 0fe221b9231..477e2785782 100644 --- a/lightning-block-sync/src/gossip.rs +++ b/lightning-block-sync/src/gossip.rs @@ -2,23 +2,21 @@ //! current UTXO set. This module defines an implementation of the LDK API required to do so //! against a [`BlockSource`] which implements a few additional methods for accessing the UTXO set. -use crate::{AsyncBlockSourceResult, BlockData, BlockSource, BlockSourceError}; +use crate::{BlockData, BlockSource, BlockSourceError, BlockSourceResult}; use bitcoin::block::Block; use bitcoin::constants::ChainHash; use bitcoin::hash_types::BlockHash; use bitcoin::transaction::{OutPoint, TxOut}; -use lightning::ln::peer_handler::APeerManager; -use lightning::routing::gossip::{NetworkGraph, P2PGossipSync}; use lightning::routing::utxo::{UtxoFuture, UtxoLookup, UtxoLookupError, UtxoResult}; -use lightning::util::logger::Logger; use lightning::util::native_async::FutureSpawner; +use lightning::util::wakers::Notifier; use std::collections::VecDeque; use std::future::Future; use std::ops::Deref; -use std::pin::Pin; +use std::pin::{pin, Pin}; use std::sync::{Arc, Mutex}; use std::task::Poll; @@ -35,11 +33,13 @@ pub trait UtxoSource: BlockSource + 'static { /// for gossip validation. fn get_block_hash_by_height<'a>( &'a self, block_height: u32, - ) -> AsyncBlockSourceResult<'a, BlockHash>; + ) -> impl Future> + Send + 'a; /// Returns true if the given output has *not* been spent, i.e. is a member of the current UTXO /// set. - fn is_output_unspent<'a>(&'a self, outpoint: OutPoint) -> AsyncBlockSourceResult<'a, bool>; + fn is_output_unspent<'a>( + &'a self, outpoint: OutPoint, + ) -> impl Future> + Send + 'a; } #[cfg(feature = "tokio")] @@ -47,42 +47,49 @@ pub trait UtxoSource: BlockSource + 'static { pub struct TokioSpawner; #[cfg(feature = "tokio")] impl FutureSpawner for TokioSpawner { - fn spawn + Send + 'static>(&self, future: T) { - tokio::spawn(future); + type E = tokio::task::JoinError; + type SpawnedFutureResult = tokio::task::JoinHandle; + fn spawn + Send + 'static>( + &self, future: F, + ) -> Self::SpawnedFutureResult { + tokio::spawn(future) } } /// A trivial future which joins two other futures and polls them at the same time, returning only /// once both complete. pub(crate) struct Joiner< - A: Future), BlockSourceError>> + Unpin, - B: Future> + Unpin, + 'a, + A: Future), BlockSourceError>>, + B: Future>, > { - pub a: A, - pub b: B, + pub a: Pin<&'a mut A>, + pub b: Pin<&'a mut B>, a_res: Option<(BlockHash, Option)>, b_res: Option, } impl< - A: Future), BlockSourceError>> + Unpin, - B: Future> + Unpin, - > Joiner + 'a, + A: Future), BlockSourceError>>, + B: Future>, + > Joiner<'a, A, B> { - fn new(a: A, b: B) -> Self { + fn new(a: Pin<&'a mut A>, b: Pin<&'a mut B>) -> Self { Self { a, b, a_res: None, b_res: None } } } impl< - A: Future), BlockSourceError>> + Unpin, - B: Future> + Unpin, - > Future for Joiner + 'a, + A: Future), BlockSourceError>>, + B: Future>, + > Future for Joiner<'a, A, B> { type Output = Result<((BlockHash, Option), BlockHash), BlockSourceError>; fn poll(mut self: Pin<&mut Self>, ctx: &mut core::task::Context<'_>) -> Poll { if self.a_res.is_none() { - match Pin::new(&mut self.a).poll(ctx) { + match self.a.as_mut().poll(ctx) { Poll::Ready(res) => { if let Ok(ok) = res { self.a_res = Some(ok); @@ -94,7 +101,7 @@ impl< } } if self.b_res.is_none() { - match Pin::new(&mut self.b).poll(ctx) { + match self.b.as_mut().poll(ctx) { Poll::Ready(res) => { if let Ok(ok) = res { self.b_res = Some(ok); @@ -122,46 +129,28 @@ impl< /// value of 1024 should more than suffice), and ensure you have sufficient file descriptors /// available on both Bitcoin Core and your LDK application for each request to hold its own /// connection. -pub struct GossipVerifier< - S: FutureSpawner, - Blocks: Deref + Send + Sync + 'static + Clone, - L: Deref + Send + Sync + 'static, -> where +pub struct GossipVerifier +where Blocks::Target: UtxoSource, - L::Target: Logger, { source: Blocks, - peer_manager_wake: Arc, - gossiper: Arc>, Arc, L>>, spawn: S, block_cache: Arc>>, } const BLOCK_CACHE_SIZE: usize = 5; -impl - GossipVerifier +impl GossipVerifier where Blocks::Target: UtxoSource, - L::Target: Logger, { - /// Constructs a new [`GossipVerifier`]. + /// Constructs a new [`GossipVerifier`] for use in a [`P2PGossipSync`]. /// - /// This is expected to be given to a [`P2PGossipSync`] (initially constructed with `None` for - /// the UTXO lookup) via [`P2PGossipSync::add_utxo_lookup`]. - pub fn new( - source: Blocks, spawn: S, gossiper: Arc>, Arc, L>>, - peer_manager: APM, - ) -> Self - where - APM::Target: APeerManager, - { - let peer_manager_wake = Arc::new(move || peer_manager.as_ref().process_events()); + /// [`P2PGossipSync`]: lightning::routing::gossip::P2PGossipSync + pub fn new(source: Blocks, spawn: S) -> Self { Self { source, spawn, - gossiper, - peer_manager_wake, block_cache: Arc::new(Mutex::new(VecDeque::with_capacity(BLOCK_CACHE_SIZE))), } } @@ -200,10 +189,12 @@ where } } - let ((_, tip_height_opt), block_hash) = - Joiner::new(source.get_best_block(), source.get_block_hash_by_height(block_height)) - .await - .map_err(|_| UtxoLookupError::UnknownTx)?; + let ((_, tip_height_opt), block_hash) = Joiner::new( + pin!(source.get_best_block()), + pin!(source.get_block_hash_by_height(block_height)), + ) + .await + .map_err(|_| UtxoLookupError::UnknownTx)?; if let Some(tip_height) = tip_height_opt { // If the block doesn't yet have five confirmations, error out. // @@ -248,35 +239,18 @@ where } } -impl Deref - for GossipVerifier -where - Blocks::Target: UtxoSource, - L::Target: Logger, -{ - type Target = Self; - fn deref(&self) -> &Self { - self - } -} - -impl UtxoLookup - for GossipVerifier +impl UtxoLookup for GossipVerifier where Blocks::Target: UtxoSource, - L::Target: Logger, { - fn get_utxo(&self, _chain_hash: &ChainHash, short_channel_id: u64) -> UtxoResult { - let res = UtxoFuture::new(); + fn get_utxo(&self, _chain_hash: &ChainHash, scid: u64, notifier: Arc) -> UtxoResult { + let res = UtxoFuture::new(notifier); let fut = res.clone(); let source = self.source.clone(); - let gossiper = Arc::clone(&self.gossiper); let block_cache = Arc::clone(&self.block_cache); - let pmw = Arc::clone(&self.peer_manager_wake); - self.spawn.spawn(async move { - let res = Self::retrieve_utxo(source, block_cache, short_channel_id).await; - fut.resolve(gossiper.network_graph(), &*gossiper, res); - (pmw)(); + let _not_polled = self.spawn.spawn(async move { + let res = Self::retrieve_utxo(source, block_cache, scid).await; + fut.resolve(res); }); UtxoResult::Async(res) } diff --git a/lightning-block-sync/src/lib.rs b/lightning-block-sync/src/lib.rs index 8656ba6ec6b..02593047658 100644 --- a/lightning-block-sync/src/lib.rs +++ b/lightning-block-sync/src/lib.rs @@ -53,7 +53,6 @@ use lightning::chain::{BestBlock, Listen}; use std::future::Future; use std::ops::Deref; -use std::pin::Pin; /// Abstract type for retrieving block headers and data. pub trait BlockSource: Sync + Send { @@ -65,12 +64,13 @@ pub trait BlockSource: Sync + Send { /// when `height_hint` is `None`. fn get_header<'a>( &'a self, header_hash: &'a BlockHash, height_hint: Option, - ) -> AsyncBlockSourceResult<'a, BlockHeaderData>; + ) -> impl Future> + Send + 'a; /// Returns the block for a given hash. A headers-only block source should return a `Transient` /// error. - fn get_block<'a>(&'a self, header_hash: &'a BlockHash) - -> AsyncBlockSourceResult<'a, BlockData>; + fn get_block<'a>( + &'a self, header_hash: &'a BlockHash, + ) -> impl Future> + Send + 'a; /// Returns the hash of the best block and, optionally, its height. /// @@ -78,18 +78,14 @@ pub trait BlockSource: Sync + Send { /// to allow for a more efficient lookup. /// /// [`get_header`]: Self::get_header - fn get_best_block(&self) -> AsyncBlockSourceResult<'_, (BlockHash, Option)>; + fn get_best_block<'a>( + &'a self, + ) -> impl Future)>> + Send + 'a; } /// Result type for `BlockSource` requests. pub type BlockSourceResult = Result; -// TODO: Replace with BlockSourceResult once `async` trait functions are supported. For details, -// see: https://areweasyncyet.rs. -/// Result type for asynchronous `BlockSource` requests. -pub type AsyncBlockSourceResult<'a, T> = - Pin> + 'a + Send>>; - /// Error type for `BlockSource` requests. /// /// Transient errors may be resolved when re-polling, but no attempt will be made to re-poll on diff --git a/lightning-block-sync/src/poll.rs b/lightning-block-sync/src/poll.rs index 843cc961899..13e0403c3b6 100644 --- a/lightning-block-sync/src/poll.rs +++ b/lightning-block-sync/src/poll.rs @@ -1,14 +1,12 @@ //! Adapters that make one or more [`BlockSource`]s simpler to poll for new chain tip transitions. -use crate::{ - AsyncBlockSourceResult, BlockData, BlockHeaderData, BlockSource, BlockSourceError, - BlockSourceResult, -}; +use crate::{BlockData, BlockHeaderData, BlockSource, BlockSourceError, BlockSourceResult}; use bitcoin::hash_types::BlockHash; use bitcoin::network::Network; use lightning::chain::BestBlock; +use std::future::Future; use std::ops::Deref; /// The `Poll` trait defines behavior for polling block sources for a chain tip and retrieving @@ -22,17 +20,17 @@ pub trait Poll { /// Returns a chain tip in terms of its relationship to the provided chain tip. fn poll_chain_tip<'a>( &'a self, best_known_chain_tip: ValidatedBlockHeader, - ) -> AsyncBlockSourceResult<'a, ChainTip>; + ) -> impl Future> + Send + 'a; /// Returns the header that preceded the given header in the chain. fn look_up_previous_header<'a>( &'a self, header: &'a ValidatedBlockHeader, - ) -> AsyncBlockSourceResult<'a, ValidatedBlockHeader>; + ) -> impl Future> + Send + 'a; /// Returns the block associated with the given header. fn fetch_block<'a>( &'a self, header: &'a ValidatedBlockHeader, - ) -> AsyncBlockSourceResult<'a, ValidatedBlock>; + ) -> impl Future> + Send + 'a; } /// A chain tip relative to another chain tip in terms of block hash and chainwork. @@ -217,8 +215,8 @@ impl + Sized + Send + Sync, T: BlockSource + ?Sized> Poll { fn poll_chain_tip<'a>( &'a self, best_known_chain_tip: ValidatedBlockHeader, - ) -> AsyncBlockSourceResult<'a, ChainTip> { - Box::pin(async move { + ) -> impl Future> + Send + 'a { + async move { let (block_hash, height) = self.block_source.get_best_block().await?; if block_hash == best_known_chain_tip.header.block_hash() { return Ok(ChainTip::Common); @@ -231,13 +229,13 @@ impl + Sized + Send + Sync, T: BlockSource + ?Sized> Poll } else { Ok(ChainTip::Worse(chain_tip)) } - }) + } } fn look_up_previous_header<'a>( &'a self, header: &'a ValidatedBlockHeader, - ) -> AsyncBlockSourceResult<'a, ValidatedBlockHeader> { - Box::pin(async move { + ) -> impl Future> + Send + 'a { + async move { if header.height == 0 { return Err(BlockSourceError::persistent("genesis block reached")); } @@ -252,15 +250,13 @@ impl + Sized + Send + Sync, T: BlockSource + ?Sized> Poll header.check_builds_on(&previous_header, self.network)?; Ok(previous_header) - }) + } } fn fetch_block<'a>( &'a self, header: &'a ValidatedBlockHeader, - ) -> AsyncBlockSourceResult<'a, ValidatedBlock> { - Box::pin(async move { - self.block_source.get_block(&header.block_hash).await?.validate(header.block_hash) - }) + ) -> impl Future> + Send + 'a { + async move { self.block_source.get_block(&header.block_hash).await?.validate(header.block_hash) } } } diff --git a/lightning-block-sync/src/rest.rs b/lightning-block-sync/src/rest.rs index 1f79ab4a0b0..619981bb4d0 100644 --- a/lightning-block-sync/src/rest.rs +++ b/lightning-block-sync/src/rest.rs @@ -4,13 +4,14 @@ use crate::convert::GetUtxosResponse; use crate::gossip::UtxoSource; use crate::http::{BinaryResponse, HttpClient, HttpEndpoint, JsonResponse}; -use crate::{AsyncBlockSourceResult, BlockData, BlockHeaderData, BlockSource}; +use crate::{BlockData, BlockHeaderData, BlockSource, BlockSourceResult}; use bitcoin::hash_types::BlockHash; use bitcoin::OutPoint; use std::convert::TryFrom; use std::convert::TryInto; +use std::future::Future; use std::sync::Mutex; /// A simple REST client for requesting resources using HTTP `GET`. @@ -49,49 +50,51 @@ impl RestClient { impl BlockSource for RestClient { fn get_header<'a>( &'a self, header_hash: &'a BlockHash, _height: Option, - ) -> AsyncBlockSourceResult<'a, BlockHeaderData> { - Box::pin(async move { + ) -> impl Future> + Send + 'a { + async move { let resource_path = format!("headers/1/{}.json", header_hash.to_string()); Ok(self.request_resource::(&resource_path).await?) - }) + } } fn get_block<'a>( &'a self, header_hash: &'a BlockHash, - ) -> AsyncBlockSourceResult<'a, BlockData> { - Box::pin(async move { + ) -> impl Future> + Send + 'a { + async move { let resource_path = format!("block/{}.bin", header_hash.to_string()); Ok(BlockData::FullBlock( self.request_resource::(&resource_path).await?, )) - }) + } } - fn get_best_block<'a>(&'a self) -> AsyncBlockSourceResult<'a, (BlockHash, Option)> { - Box::pin( - async move { Ok(self.request_resource::("chaininfo.json").await?) }, - ) + fn get_best_block<'a>( + &'a self, + ) -> impl Future)>> + Send + 'a { + async move { Ok(self.request_resource::("chaininfo.json").await?) } } } impl UtxoSource for RestClient { fn get_block_hash_by_height<'a>( &'a self, block_height: u32, - ) -> AsyncBlockSourceResult<'a, BlockHash> { - Box::pin(async move { + ) -> impl Future> + Send + 'a { + async move { let resource_path = format!("blockhashbyheight/{}.bin", block_height); Ok(self.request_resource::(&resource_path).await?) - }) + } } - fn is_output_unspent<'a>(&'a self, outpoint: OutPoint) -> AsyncBlockSourceResult<'a, bool> { - Box::pin(async move { + fn is_output_unspent<'a>( + &'a self, outpoint: OutPoint, + ) -> impl Future> + Send + 'a { + async move { let resource_path = format!("getutxos/{}-{}.json", outpoint.txid.to_string(), outpoint.vout); let utxo_result = self.request_resource::(&resource_path).await?; Ok(utxo_result.hit_bitmap_nonempty) - }) + } } } diff --git a/lightning-block-sync/src/rpc.rs b/lightning-block-sync/src/rpc.rs index 3df50a2267b..d851ba2ccf0 100644 --- a/lightning-block-sync/src/rpc.rs +++ b/lightning-block-sync/src/rpc.rs @@ -3,7 +3,7 @@ use crate::gossip::UtxoSource; use crate::http::{HttpClient, HttpEndpoint, HttpError, JsonResponse}; -use crate::{AsyncBlockSourceResult, BlockData, BlockHeaderData, BlockSource}; +use crate::{BlockData, BlockHeaderData, BlockSource, BlockSourceResult}; use bitcoin::hash_types::BlockHash; use bitcoin::OutPoint; @@ -16,6 +16,7 @@ use std::convert::TryFrom; use std::convert::TryInto; use std::error::Error; use std::fmt; +use std::future::Future; use std::sync::atomic::{AtomicUsize, Ordering}; /// An error returned by the RPC server. @@ -135,47 +136,51 @@ impl RpcClient { impl BlockSource for RpcClient { fn get_header<'a>( &'a self, header_hash: &'a BlockHash, _height: Option, - ) -> AsyncBlockSourceResult<'a, BlockHeaderData> { - Box::pin(async move { + ) -> impl Future> + Send + 'a { + async move { let header_hash = serde_json::json!(header_hash.to_string()); Ok(self.call_method("getblockheader", &[header_hash]).await?) - }) + } } fn get_block<'a>( &'a self, header_hash: &'a BlockHash, - ) -> AsyncBlockSourceResult<'a, BlockData> { - Box::pin(async move { + ) -> impl Future> + Send + 'a { + async move { let header_hash = serde_json::json!(header_hash.to_string()); let verbosity = serde_json::json!(0); Ok(BlockData::FullBlock(self.call_method("getblock", &[header_hash, verbosity]).await?)) - }) + } } - fn get_best_block<'a>(&'a self) -> AsyncBlockSourceResult<'a, (BlockHash, Option)> { - Box::pin(async move { Ok(self.call_method("getblockchaininfo", &[]).await?) }) + fn get_best_block<'a>( + &'a self, + ) -> impl Future)>> + Send + 'a { + async move { Ok(self.call_method("getblockchaininfo", &[]).await?) } } } impl UtxoSource for RpcClient { fn get_block_hash_by_height<'a>( &'a self, block_height: u32, - ) -> AsyncBlockSourceResult<'a, BlockHash> { - Box::pin(async move { + ) -> impl Future> + Send + 'a { + async move { let height_param = serde_json::json!(block_height); Ok(self.call_method("getblockhash", &[height_param]).await?) - }) + } } - fn is_output_unspent<'a>(&'a self, outpoint: OutPoint) -> AsyncBlockSourceResult<'a, bool> { - Box::pin(async move { + fn is_output_unspent<'a>( + &'a self, outpoint: OutPoint, + ) -> impl Future> + Send + 'a { + async move { let txid_param = serde_json::json!(outpoint.txid.to_string()); let vout_param = serde_json::json!(outpoint.vout); let include_mempool = serde_json::json!(false); let utxo_opt: serde_json::Value = self.call_method("gettxout", &[txid_param, vout_param, include_mempool]).await?; Ok(!utxo_opt.is_null()) - }) + } } } diff --git a/lightning-block-sync/src/test_utils.rs b/lightning-block-sync/src/test_utils.rs index d307c4506eb..40788e4d08c 100644 --- a/lightning-block-sync/src/test_utils.rs +++ b/lightning-block-sync/src/test_utils.rs @@ -1,7 +1,6 @@ use crate::poll::{Validate, ValidatedBlockHeader}; use crate::{ - AsyncBlockSourceResult, BlockData, BlockHeaderData, BlockSource, BlockSourceError, - UnboundedCache, + BlockData, BlockHeaderData, BlockSource, BlockSourceError, BlockSourceResult, UnboundedCache, }; use bitcoin::block::{Block, Header, Version}; @@ -17,6 +16,7 @@ use lightning::chain::BestBlock; use std::cell::RefCell; use std::collections::VecDeque; +use std::future::Future; #[derive(Default)] pub struct Blockchain { @@ -141,8 +141,8 @@ impl Blockchain { impl BlockSource for Blockchain { fn get_header<'a>( &'a self, header_hash: &'a BlockHash, _height_hint: Option, - ) -> AsyncBlockSourceResult<'a, BlockHeaderData> { - Box::pin(async move { + ) -> impl Future> + Send + 'a { + async move { if self.without_headers { return Err(BlockSourceError::persistent("header not found")); } @@ -158,13 +158,13 @@ impl BlockSource for Blockchain { } } Err(BlockSourceError::transient("header not found")) - }) + } } fn get_block<'a>( &'a self, header_hash: &'a BlockHash, - ) -> AsyncBlockSourceResult<'a, BlockData> { - Box::pin(async move { + ) -> impl Future> + Send + 'a { + async move { for (height, block) in self.blocks.iter().enumerate() { if block.header.block_hash() == *header_hash { if let Some(without_blocks) = &self.without_blocks { @@ -181,11 +181,13 @@ impl BlockSource for Blockchain { } } Err(BlockSourceError::transient("block not found")) - }) + } } - fn get_best_block<'a>(&'a self) -> AsyncBlockSourceResult<'a, (BlockHash, Option)> { - Box::pin(async move { + fn get_best_block<'a>( + &'a self, + ) -> impl Future)>> + Send + 'a { + async move { match self.blocks.last() { None => Err(BlockSourceError::transient("empty chain")), Some(block) => { @@ -193,7 +195,7 @@ impl BlockSource for Blockchain { Ok((block.block_hash(), Some(height))) }, } - }) + } } } diff --git a/lightning-custom-message/Cargo.toml b/lightning-custom-message/Cargo.toml index ba13aef35c4..854127f9175 100644 --- a/lightning-custom-message/Cargo.toml +++ b/lightning-custom-message/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning-custom-message" -version = "0.2.0+git" +version = "0.3.0+git" authors = ["Jeffrey Czyz"] license = "MIT OR Apache-2.0" repository = "https://github.com/lightningdevkit/rust-lightning" diff --git a/lightning-dns-resolver/Cargo.toml b/lightning-dns-resolver/Cargo.toml index 44caf273ff2..5299b2f4676 100644 --- a/lightning-dns-resolver/Cargo.toml +++ b/lightning-dns-resolver/Cargo.toml @@ -11,7 +11,7 @@ rust-version = "1.75" [dependencies] lightning = { version = "0.3.0", path = "../lightning", default-features = false } -lightning-types = { version = "0.3.0", path = "../lightning-types", default-features = false } +lightning-types = { version = "0.4.0", path = "../lightning-types", default-features = false } dnssec-prover = { version = "0.6", default-features = false, features = [ "std", "tokio" ] } tokio = { version = "1.0", default-features = false, features = ["rt"] } diff --git a/lightning-dns-resolver/src/lib.rs b/lightning-dns-resolver/src/lib.rs index 125d4316d12..e9578844cf8 100644 --- a/lightning-dns-resolver/src/lib.rs +++ b/lightning-dns-resolver/src/lib.rs @@ -6,7 +6,6 @@ #![deny(rustdoc::private_intra_doc_links)] use std::net::SocketAddr; -use std::ops::Deref; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Arc, Mutex}; @@ -30,10 +29,7 @@ const WE_REQUIRE_32_OR_64_BIT_USIZE: u8 = 424242; /// A resolver which implements [`DNSResolverMessageHandler`] and replies to [`DNSSECQuery`] /// messages with with [`DNSSECProof`]s. -pub struct OMDomainResolver -where - PH::Target: DNSResolverMessageHandler, -{ +pub struct OMDomainResolver { state: Arc, proof_handler: Option, runtime_handle: Mutex>, @@ -56,10 +52,7 @@ impl OMDomainResolver { } } -impl OMDomainResolver -where - PH::Target: DNSResolverMessageHandler, -{ +impl OMDomainResolver { /// Creates a new [`OMDomainResolver`] given the [`SocketAddr`] of a DNS resolver listening on /// TCP (e.g. 8.8.8.8:53, 1.1.1.1:53 or your local DNS resolver). /// @@ -103,10 +96,7 @@ where } } -impl DNSResolverMessageHandler for OMDomainResolver -where - PH::Target: DNSResolverMessageHandler, -{ +impl DNSResolverMessageHandler for OMDomainResolver { fn handle_dnssec_proof(&self, proof: DNSSECProof, context: DNSResolverContext) { if let Some(proof_handler) = &self.proof_handler { proof_handler.handle_dnssec_proof(proof, context); @@ -169,12 +159,12 @@ mod test { use lightning::ln::msgs::{ BaseMessageHandler, ChannelMessageHandler, Init, OnionMessageHandler, }; - use lightning::ln::peer_handler::IgnoringMessageHandler; use lightning::offers::offer::Offer; use lightning::onion_message::dns_resolution::{HumanReadableName, OMNameResolver}; use lightning::onion_message::messenger::{ AOnionMessenger, Destination, MessageRouter, OnionMessagePath, OnionMessenger, }; + use lightning::routing::router::DEFAULT_PAYMENT_DUMMY_HOPS; use lightning::sign::{KeysManager, NodeSigner, ReceiveAuthKey, Recipient}; use lightning::types::features::InitFeatures; use lightning::types::payment::PaymentHash; @@ -183,7 +173,6 @@ mod test { use lightning::expect_payment_claimed; use lightning_types::string::UntrustedString; - use std::ops::Deref; use std::sync::Mutex; use std::time::{Duration, Instant, SystemTime}; @@ -195,12 +184,6 @@ mod test { eprintln!("{:<8} {}", self.node, record); } } - impl Deref for TestLogger { - type Target = TestLogger; - fn deref(&self) -> &TestLogger { - self - } - } struct DummyNodeLookup {} impl NodeIdLookUp for DummyNodeLookup { @@ -208,12 +191,6 @@ mod test { None } } - impl Deref for DummyNodeLookup { - type Target = DummyNodeLookup; - fn deref(&self) -> &DummyNodeLookup { - self - } - } struct DirectlyConnectedRouter {} impl MessageRouter for DirectlyConnectedRouter { @@ -236,17 +213,12 @@ mod test { recipient, local_node_receive_key, context, + false, &keys, secp_ctx, )]) } } - impl Deref for DirectlyConnectedRouter { - type Target = DirectlyConnectedRouter; - fn deref(&self) -> &DirectlyConnectedRouter { - self - } - } struct URIResolver { resolved_uri: Mutex>, @@ -345,6 +317,7 @@ mod test { payer_id, receive_key, query_context, + false, &*payer_keys, &secp_ctx, ); @@ -419,6 +392,12 @@ mod test { let updates = get_htlc_update_msgs(&nodes[0], &payee_id); nodes[1].node.handle_update_add_htlc(payer_id, &updates.update_add_htlcs[0]); do_commitment_signed_dance(&nodes[1], &nodes[0], &updates.commitment_signed, false, false); + + for _ in 0..DEFAULT_PAYMENT_DUMMY_HOPS { + assert!(nodes[1].node.needs_pending_htlc_processing()); + nodes[1].node.process_pending_htlc_forwards(); + } + expect_and_process_pending_htlcs(&nodes[1], false); let claimable_events = nodes[1].node.get_and_clear_pending_events(); diff --git a/lightning-invoice/Cargo.toml b/lightning-invoice/Cargo.toml index f92d8b999df..2b5d570f43f 100644 --- a/lightning-invoice/Cargo.toml +++ b/lightning-invoice/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lightning-invoice" description = "Data structures to parse and serialize BOLT11 lightning invoices" -version = "0.34.0+git" +version = "0.35.0+git" authors = ["Sebastian Geisler "] documentation = "https://docs.rs/lightning-invoice/" license = "MIT OR Apache-2.0" @@ -20,7 +20,7 @@ std = [] [dependencies] bech32 = { version = "0.11.0", default-features = false } -lightning-types = { version = "0.3.0", path = "../lightning-types", default-features = false } +lightning-types = { version = "0.4.0", path = "../lightning-types", default-features = false } serde = { version = "1.0", optional = true, default-features = false, features = ["alloc"] } bitcoin = { version = "0.32.4", default-features = false, features = ["secp-recovery"] } diff --git a/lightning-invoice/src/de.rs b/lightning-invoice/src/de.rs index 0747015a457..f1bbe29440a 100644 --- a/lightning-invoice/src/de.rs +++ b/lightning-invoice/src/de.rs @@ -16,7 +16,7 @@ use crate::Bolt11Bech32; use bitcoin::hashes::sha256; use bitcoin::hashes::Hash; use bitcoin::{PubkeyHash, ScriptHash, WitnessVersion}; -use lightning_types::payment::PaymentSecret; +use lightning_types::payment::{PaymentHash, PaymentSecret}; use lightning_types::routing::{RouteHint, RouteHintHop, RoutingFees}; use bitcoin::secp256k1::ecdsa::{RecoverableSignature, RecoveryId}; @@ -89,6 +89,18 @@ impl FromBase32 for PaymentSecret { } } +impl FromBase32 for PaymentHash { + type Err = Bolt11ParseError; + + fn from_base32(field_data: &[Fe32]) -> Result { + if field_data.len() != 52 { + return Err(Bolt11ParseError::InvalidSliceLength(field_data.len(), 52, "PaymentHash")); + } + let data_bytes = <[u8; 32]>::from_base32(field_data)?; + Ok(PaymentHash(data_bytes)) + } +} + impl FromBase32 for Bolt11InvoiceFeatures { type Err = Bolt11ParseError; @@ -540,7 +552,7 @@ impl FromBase32 for TaggedField { match tag.to_u8() { constants::TAG_PAYMENT_HASH => { - Ok(TaggedField::PaymentHash(Sha256::from_base32(field_data)?)) + Ok(TaggedField::PaymentHash(PaymentHash::from_base32(field_data)?)) }, constants::TAG_DESCRIPTION => { Ok(TaggedField::Description(Description::from_base32(field_data)?)) @@ -1068,8 +1080,9 @@ mod test { use crate::TaggedField::*; use crate::{ Bolt11InvoiceSignature, Currency, PositiveTimestamp, RawBolt11Invoice, RawDataPart, - RawHrp, Sha256, SiPrefix, SignedRawBolt11Invoice, + RawHrp, SiPrefix, SignedRawBolt11Invoice, }; + use bitcoin::hex::FromHex; use bitcoin::secp256k1::ecdsa::{RecoverableSignature, RecoveryId}; use lightning_types::features::Bolt11InvoiceFeatures; @@ -1077,45 +1090,51 @@ mod test { let expected_features = Bolt11InvoiceFeatures::from_le_bytes(vec![0, 130, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8]); let invoice_str = "lnbc25m1pvjluezpp5qqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqypqdq5vdhkven9v5sxyetpdeessp5zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zygs9q5sqqqqqqqqqqqqqqqpqsq67gye39hfg3zd8rgc80k32tvy9xk2xunwm5lzexnvpx6fd77en8qaq424dxgt56cag2dpt359k3ssyhetktkpqh24jqnjyw6uqd08sgptq44qu"; - let invoice = - SignedRawBolt11Invoice { - raw_invoice: RawBolt11Invoice { - hrp: RawHrp { - currency: Currency::Bitcoin, - raw_amount: Some(25), - si_prefix: Some(SiPrefix::Milli), - }, - data: RawDataPart { - timestamp: PositiveTimestamp::from_unix_timestamp(1496314658).unwrap(), - tagged_fields: vec ! [ - PaymentHash(Sha256(sha256::Hash::from_str( - "0001020304050607080900010203040506070809000102030405060708090102" - ).unwrap())).into(), + let invoice = SignedRawBolt11Invoice { + raw_invoice: RawBolt11Invoice { + hrp: RawHrp { + currency: Currency::Bitcoin, + raw_amount: Some(25), + si_prefix: Some(SiPrefix::Milli), + }, + data: RawDataPart { + timestamp: PositiveTimestamp::from_unix_timestamp(1496314658).unwrap(), + tagged_fields: vec ! [ + crate::TaggedField::PaymentHash(crate::PaymentHash( + <[u8; 32]>::try_from( + Vec::from_hex( + "0001020304050607080900010203040506070809000102030405060708090102", + ) + .unwrap(), + ) + .unwrap(), + )) + .into(), Description(crate::Description::new("coffee beans".to_owned()).unwrap()).into(), PaymentSecret(crate::PaymentSecret([17; 32])).into(), Features(expected_features).into()], - }, }, - hash: [ - 0xb1, 0x96, 0x46, 0xc3, 0xbc, 0x56, 0x76, 0x1d, 0x20, 0x65, 0x6e, 0x0e, 0x32, - 0xec, 0xd2, 0x69, 0x27, 0xb7, 0x62, 0x6e, 0x2a, 0x8b, 0xe6, 0x97, 0x71, 0x9f, - 0xf8, 0x7e, 0x44, 0x54, 0x55, 0xb9, - ], - signature: Bolt11InvoiceSignature( - RecoverableSignature::from_compact( - &[ - 0xd7, 0x90, 0x4c, 0xc4, 0xb7, 0x4a, 0x22, 0x26, 0x9c, 0x68, 0xc1, 0xdf, - 0x68, 0xa9, 0x6c, 0x21, 0x4d, 0x65, 0x1b, 0x93, 0x76, 0xe9, 0xf1, 0x64, - 0xd3, 0x60, 0x4d, 0xa4, 0xb7, 0xde, 0xcc, 0xce, 0x0e, 0x82, 0xaa, 0xab, - 0x4c, 0x85, 0xd3, 0x58, 0xea, 0x14, 0xd0, 0xae, 0x34, 0x2d, 0xa3, 0x08, - 0x12, 0xf9, 0x5d, 0x97, 0x60, 0x82, 0xea, 0xac, 0x81, 0x39, 0x11, 0xda, - 0xe0, 0x1a, 0xf3, 0xc1, - ], - RecoveryId::from_i32(1).unwrap(), - ) - .unwrap(), - ), - }; + }, + hash: [ + 0xb1, 0x96, 0x46, 0xc3, 0xbc, 0x56, 0x76, 0x1d, 0x20, 0x65, 0x6e, 0x0e, 0x32, 0xec, + 0xd2, 0x69, 0x27, 0xb7, 0x62, 0x6e, 0x2a, 0x8b, 0xe6, 0x97, 0x71, 0x9f, 0xf8, 0x7e, + 0x44, 0x54, 0x55, 0xb9, + ], + signature: Bolt11InvoiceSignature( + RecoverableSignature::from_compact( + &[ + 0xd7, 0x90, 0x4c, 0xc4, 0xb7, 0x4a, 0x22, 0x26, 0x9c, 0x68, 0xc1, 0xdf, + 0x68, 0xa9, 0x6c, 0x21, 0x4d, 0x65, 0x1b, 0x93, 0x76, 0xe9, 0xf1, 0x64, + 0xd3, 0x60, 0x4d, 0xa4, 0xb7, 0xde, 0xcc, 0xce, 0x0e, 0x82, 0xaa, 0xab, + 0x4c, 0x85, 0xd3, 0x58, 0xea, 0x14, 0xd0, 0xae, 0x34, 0x2d, 0xa3, 0x08, + 0x12, 0xf9, 0x5d, 0x97, 0x60, 0x82, 0xea, 0xac, 0x81, 0x39, 0x11, 0xda, + 0xe0, 0x1a, 0xf3, 0xc1, + ], + RecoveryId::from_i32(1).unwrap(), + ) + .unwrap(), + ), + }; assert_eq!(invoice_str, invoice.to_string()); assert_eq!(invoice_str.parse(), Ok(invoice)); } @@ -1125,8 +1144,9 @@ mod test { use crate::TaggedField::*; use crate::{ Bolt11InvoiceSignature, Currency, PositiveTimestamp, RawBolt11Invoice, RawDataPart, - RawHrp, Sha256, SignedRawBolt11Invoice, + RawHrp, SignedRawBolt11Invoice, }; + use bitcoin::hex::FromHex; use bitcoin::secp256k1::ecdsa::{RecoverableSignature, RecoveryId}; assert_eq!( @@ -1143,9 +1163,16 @@ mod test { data: RawDataPart { timestamp: PositiveTimestamp::from_unix_timestamp(1496314658).unwrap(), tagged_fields: vec ! [ - PaymentHash(Sha256(sha256::Hash::from_str( - "0001020304050607080900010203040506070809000102030405060708090102" - ).unwrap())).into(), + crate::TaggedField::PaymentHash(crate::PaymentHash( + <[u8; 32]>::try_from( + Vec::from_hex( + "0001020304050607080900010203040506070809000102030405060708090102", + ) + .unwrap(), + ) + .unwrap(), + )) + .into(), Description( crate::Description::new( "Please consider supporting this project".to_owned() @@ -1289,9 +1316,10 @@ mod test { use crate::TaggedField::*; use crate::{ Bolt11Invoice, Bolt11InvoiceFeatures, Bolt11InvoiceSignature, Currency, - PositiveTimestamp, RawBolt11Invoice, RawDataPart, RawHrp, RawTaggedField, Sha256, + PositiveTimestamp, RawBolt11Invoice, RawDataPart, RawHrp, RawTaggedField, SignedRawBolt11Invoice, }; + use bitcoin::hex::FromHex; use bitcoin::secp256k1::ecdsa::{RecoverableSignature, RecoveryId}; use bitcoin::secp256k1::PublicKey; use lightning_types::routing::{RouteHint, RouteHintHop, RoutingFees}; @@ -1310,10 +1338,13 @@ mod test { } // Invoice fields - let payment_hash = sha256::Hash::from_str( - "0001020304050607080900010203040506070809000102030405060708090102", - ) - .unwrap(); + let payment_hash = crate::PaymentHash( + <[u8; 32]>::try_from( + Vec::from_hex("0001020304050607080900010203040506070809000102030405060708090102") + .unwrap(), + ) + .unwrap(), + ); let description = "A".repeat(639); let fallback_addr = crate::Fallback::SegWitProgram { version: bitcoin::WitnessVersion::V0, @@ -1346,7 +1377,7 @@ mod test { data: RawDataPart { timestamp: PositiveTimestamp::from_unix_timestamp(1496314658).unwrap(), tagged_fields: vec![ - PaymentHash(Sha256(payment_hash)).into(), + crate::TaggedField::PaymentHash(payment_hash).into(), Description(crate::Description::new(description).unwrap()).into(), PayeePubKey(crate::PayeePubKey(payee_pk)).into(), ExpiryTime(crate::ExpiryTime(std::time::Duration::from_secs(u64::MAX))).into(), @@ -1414,4 +1445,18 @@ mod test { assert!(parse_is_code_length_err(&too_long)); assert!(!parse_is_code_length_err(&too_long[..too_long.len() - 1])); } + + #[test] + fn test_payment_hash_from_base32_invalid_len() { + use crate::PaymentHash; + + // PaymentHash must be 52 base32 characters (32 bytes). + // Test with 51 characters (too short). + let input = vec![Fe32::try_from(0).unwrap(); 51]; + assert!(PaymentHash::from_base32(&input).is_err()); + + // Test with 53 characters (too long). + let input = vec![Fe32::try_from(0).unwrap(); 53]; + assert!(PaymentHash::from_base32(&input).is_err()); + } } diff --git a/lightning-invoice/src/lib.rs b/lightning-invoice/src/lib.rs index 0dbb3bdb0e8..4ee9acb5f27 100644 --- a/lightning-invoice/src/lib.rs +++ b/lightning-invoice/src/lib.rs @@ -40,7 +40,6 @@ use bitcoin::secp256k1::ecdsa::RecoverableSignature; use bitcoin::secp256k1::PublicKey; use bitcoin::secp256k1::{Message, Secp256k1}; -use alloc::boxed::Box; use alloc::string; use core::cmp::Ordering; use core::fmt::{self, Display, Formatter}; @@ -55,7 +54,7 @@ use core::time::Duration; use serde::{de::Error, Deserialize, Deserializer, Serialize, Serializer}; #[doc(no_inline)] -pub use lightning_types::payment::PaymentSecret; +pub use lightning_types::payment::{PaymentHash, PaymentSecret}; #[doc(no_inline)] pub use lightning_types::routing::{RouteHint, RouteHintHop, RoutingFees}; use lightning_types::string::UntrustedString; @@ -190,7 +189,7 @@ impl Checksum for Bolt11Bech32 { /// /// use lightning_types::payment::PaymentSecret; /// -/// use lightning_invoice::{Currency, InvoiceBuilder}; +/// use lightning_invoice::{Currency, InvoiceBuilder, PaymentHash}; /// /// # #[cfg(not(feature = "std"))] /// # fn main() {} @@ -204,7 +203,7 @@ impl Checksum for Bolt11Bech32 { /// ][..] /// ).unwrap(); /// -/// let payment_hash = sha256::Hash::from_slice(&[0; 32][..]).unwrap(); +/// let payment_hash = PaymentHash([0; 32]); /// let payment_secret = PaymentSecret([42u8; 32]); /// /// let invoice = InvoiceBuilder::new(Currency::Bitcoin) @@ -522,7 +521,7 @@ impl Ord for RawTaggedField { #[allow(missing_docs)] #[derive(Clone, Debug, Hash, Eq, PartialEq, Ord, PartialOrd)] pub enum TaggedField { - PaymentHash(Sha256), + PaymentHash(PaymentHash), Description(Description), PayeePubKey(PayeePubKey), DescriptionHash(Sha256), @@ -794,8 +793,8 @@ impl InvoiceBuilder { /// Set the payment hash. This function is only available if no payment hash was set. - pub fn payment_hash(mut self, hash: sha256::Hash) -> InvoiceBuilder { - self.tagged_fields.push(TaggedField::PaymentHash(Sha256(hash))); + pub fn payment_hash(mut self, hash: PaymentHash) -> InvoiceBuilder { + self.tagged_fields.push(TaggedField::PaymentHash(hash)); self.set_flags() } } @@ -1080,8 +1079,8 @@ macro_rules! find_all_extract { #[allow(missing_docs)] impl RawBolt11Invoice { /// Hash the HRP (as bytes) and signatureless data part (as Fe32 iterator) - fn hash_from_parts<'s>( - hrp_bytes: &[u8], data_without_signature: Box + 's>, + fn hash_from_parts<'s, I: Iterator + 's>( + hrp_bytes: &[u8], data_without_signature: I, ) -> [u8; 32] { use crate::bech32::Fe32IterExt; use bitcoin::hashes::HashEngine; @@ -1159,7 +1158,7 @@ impl RawBolt11Invoice { self.data.tagged_fields.iter().filter_map(match_raw) } - pub fn payment_hash(&self) -> Option<&Sha256> { + pub fn payment_hash(&self) -> Option<&PaymentHash> { find_extract!(self.known_tagged_fields(), TaggedField::PaymentHash(ref x), x) } @@ -1461,8 +1460,8 @@ impl Bolt11Invoice { } /// Returns the hash to which we will receive the preimage on completion of the payment - pub fn payment_hash(&self) -> &sha256::Hash { - &self.signed_invoice.payment_hash().expect("checked by constructor").0 + pub fn payment_hash(&self) -> PaymentHash { + *self.signed_invoice.payment_hash().expect("checked by constructor") } /// Return the description or a hash of it for longer ones @@ -1925,10 +1924,7 @@ impl<'de> Deserialize<'de> for Bolt11Invoice { #[cfg(test)] mod test { - use bitcoin::hashes::sha256; use bitcoin::ScriptBuf; - use std::str::FromStr; - #[test] fn test_system_time_bounds_assumptions() { assert_eq!( @@ -1940,16 +1936,22 @@ mod test { #[test] fn test_calc_invoice_hash() { use crate::TaggedField::*; - use crate::{Currency, PositiveTimestamp, RawBolt11Invoice, RawDataPart, RawHrp}; + use crate::{ + Currency, PaymentHash, PositiveTimestamp, RawBolt11Invoice, RawDataPart, RawHrp, + }; + use bitcoin::hex::FromHex; let invoice = RawBolt11Invoice { hrp: RawHrp { currency: Currency::Bitcoin, raw_amount: None, si_prefix: None }, data: RawDataPart { timestamp: PositiveTimestamp::from_unix_timestamp(1496314658).unwrap(), tagged_fields: vec![ - PaymentHash(crate::Sha256( - sha256::Hash::from_str( - "0001020304050607080900010203040506070809000102030405060708090102", + crate::TaggedField::PaymentHash(PaymentHash( + <[u8; 32]>::try_from( + Vec::from_hex( + "0001020304050607080900010203040506070809000102030405060708090102", + ) + .unwrap(), ) .unwrap(), )) @@ -1978,51 +1980,58 @@ mod test { fn test_check_signature() { use crate::TaggedField::*; use crate::{ - Bolt11InvoiceSignature, Currency, PositiveTimestamp, RawBolt11Invoice, RawDataPart, - RawHrp, Sha256, SignedRawBolt11Invoice, + Bolt11InvoiceSignature, Currency, PaymentHash, PositiveTimestamp, RawBolt11Invoice, + RawDataPart, RawHrp, SignedRawBolt11Invoice, }; + use bitcoin::hex::FromHex; use bitcoin::secp256k1::ecdsa::{RecoverableSignature, RecoveryId}; use bitcoin::secp256k1::Secp256k1; use bitcoin::secp256k1::{PublicKey, SecretKey}; - let invoice = - SignedRawBolt11Invoice { - raw_invoice: RawBolt11Invoice { - hrp: RawHrp { currency: Currency::Bitcoin, raw_amount: None, si_prefix: None }, - data: RawDataPart { - timestamp: PositiveTimestamp::from_unix_timestamp(1496314658).unwrap(), - tagged_fields: vec ! [ - PaymentHash(Sha256(sha256::Hash::from_str( - "0001020304050607080900010203040506070809000102030405060708090102" - ).unwrap())).into(), + let invoice = SignedRawBolt11Invoice { + raw_invoice: RawBolt11Invoice { + hrp: RawHrp { currency: Currency::Bitcoin, raw_amount: None, si_prefix: None }, + data: RawDataPart { + timestamp: PositiveTimestamp::from_unix_timestamp(1496314658).unwrap(), + tagged_fields: vec ! [ + crate::TaggedField::PaymentHash(PaymentHash( + <[u8; 32]>::try_from( + Vec::from_hex( + "0001020304050607080900010203040506070809000102030405060708090102", + ) + .unwrap(), + ) + .unwrap(), + )) + .into(), Description( crate::Description::new( "Please consider supporting this project".to_owned() ).unwrap() ).into(), ], - }, }, - hash: [ - 0xc3, 0xd4, 0xe8, 0x3f, 0x64, 0x6f, 0xa7, 0x9a, 0x39, 0x3d, 0x75, 0x27, 0x7b, - 0x1d, 0x85, 0x8d, 0xb1, 0xd1, 0xf7, 0xab, 0x71, 0x37, 0xdc, 0xb7, 0x83, 0x5d, - 0xb2, 0xec, 0xd5, 0x18, 0xe1, 0xc9, - ], - signature: Bolt11InvoiceSignature( - RecoverableSignature::from_compact( - &[ - 0x38u8, 0xec, 0x68, 0x91, 0x34, 0x5e, 0x20, 0x41, 0x45, 0xbe, 0x8a, - 0x3a, 0x99, 0xde, 0x38, 0xe9, 0x8a, 0x39, 0xd6, 0xa5, 0x69, 0x43, 0x4e, - 0x18, 0x45, 0xc8, 0xaf, 0x72, 0x05, 0xaf, 0xcf, 0xcc, 0x7f, 0x42, 0x5f, - 0xcd, 0x14, 0x63, 0xe9, 0x3c, 0x32, 0x88, 0x1e, 0xad, 0x0d, 0x6e, 0x35, - 0x6d, 0x46, 0x7e, 0xc8, 0xc0, 0x25, 0x53, 0xf9, 0xaa, 0xb1, 0x5e, 0x57, - 0x38, 0xb1, 0x1f, 0x12, 0x7f, - ], - RecoveryId::from_i32(0).unwrap(), - ) - .unwrap(), - ), - }; + }, + hash: [ + 0xc3, 0xd4, 0xe8, 0x3f, 0x64, 0x6f, 0xa7, 0x9a, 0x39, 0x3d, 0x75, 0x27, 0x7b, 0x1d, + 0x85, 0x8d, 0xb1, 0xd1, 0xf7, 0xab, 0x71, 0x37, 0xdc, 0xb7, 0x83, 0x5d, 0xb2, 0xec, + 0xd5, 0x18, 0xe1, 0xc9, + ], + signature: Bolt11InvoiceSignature( + RecoverableSignature::from_compact( + &[ + 0x38u8, 0xec, 0x68, 0x91, 0x34, 0x5e, 0x20, 0x41, 0x45, 0xbe, 0x8a, 0x3a, + 0x99, 0xde, 0x38, 0xe9, 0x8a, 0x39, 0xd6, 0xa5, 0x69, 0x43, 0x4e, 0x18, + 0x45, 0xc8, 0xaf, 0x72, 0x05, 0xaf, 0xcf, 0xcc, 0x7f, 0x42, 0x5f, 0xcd, + 0x14, 0x63, 0xe9, 0x3c, 0x32, 0x88, 0x1e, 0xad, 0x0d, 0x6e, 0x35, 0x6d, + 0x46, 0x7e, 0xc8, 0xc0, 0x25, 0x53, 0xf9, 0xaa, 0xb1, 0x5e, 0x57, 0x38, + 0xb1, 0x1f, 0x12, 0x7f, + ], + RecoveryId::from_i32(0).unwrap(), + ) + .unwrap(), + ), + }; assert!(invoice.check_signature()); @@ -2050,9 +2059,10 @@ mod test { fn test_check_feature_bits() { use crate::TaggedField::*; use crate::{ - Bolt11Invoice, Bolt11SemanticError, Currency, PositiveTimestamp, RawBolt11Invoice, - RawDataPart, RawHrp, Sha256, + Bolt11Invoice, Bolt11SemanticError, Currency, PaymentHash, PositiveTimestamp, + RawBolt11Invoice, RawDataPart, RawHrp, }; + use bitcoin::hex::FromHex; use bitcoin::secp256k1::Secp256k1; use bitcoin::secp256k1::SecretKey; use lightning_types::features::Bolt11InvoiceFeatures; @@ -2064,9 +2074,12 @@ mod test { data: RawDataPart { timestamp: PositiveTimestamp::from_unix_timestamp(1496314658).unwrap(), tagged_fields: vec![ - PaymentHash(Sha256( - sha256::Hash::from_str( - "0001020304050607080900010203040506070809000102030405060708090102", + crate::TaggedField::PaymentHash(PaymentHash( + <[u8; 32]>::try_from( + Vec::from_hex( + "0001020304050607080900010203040506070809000102030405060708090102", + ) + .unwrap(), ) .unwrap(), )) @@ -2174,7 +2187,7 @@ mod test { let builder = InvoiceBuilder::new(Currency::Bitcoin) .description("Test".into()) - .payment_hash(sha256::Hash::from_slice(&[0; 32][..]).unwrap()) + .payment_hash(PaymentHash([0; 32])) .duration_since_epoch(Duration::from_secs(1234567)); let invoice = builder.clone().amount_milli_satoshis(1500).build_raw().unwrap(); @@ -2196,7 +2209,7 @@ mod test { use std::iter::FromIterator; let builder = InvoiceBuilder::new(Currency::Bitcoin) - .payment_hash(sha256::Hash::from_slice(&[0; 32][..]).unwrap()) + .payment_hash(PaymentHash([0; 32])) .duration_since_epoch(Duration::from_secs(1234567)) .min_final_cltv_expiry_delta(144); @@ -2300,7 +2313,7 @@ mod test { .private_route(route_1.clone()) .private_route(route_2.clone()) .description_hash(sha256::Hash::from_slice(&[3; 32][..]).unwrap()) - .payment_hash(sha256::Hash::from_slice(&[21; 32][..]).unwrap()) + .payment_hash(PaymentHash([21; 32])) .payment_secret(PaymentSecret([42; 32])) .basic_mpp(); @@ -2340,7 +2353,7 @@ mod test { sha256::Hash::from_slice(&[3; 32][..]).unwrap() )) ); - assert_eq!(invoice.payment_hash(), &sha256::Hash::from_slice(&[21; 32][..]).unwrap()); + assert_eq!(invoice.payment_hash(), PaymentHash([21; 32])); assert_eq!(invoice.payment_secret(), &PaymentSecret([42; 32])); let mut expected_features = Bolt11InvoiceFeatures::empty(); @@ -2361,7 +2374,7 @@ mod test { let signed_invoice = InvoiceBuilder::new(Currency::Bitcoin) .description("Test".into()) - .payment_hash(sha256::Hash::from_slice(&[0; 32][..]).unwrap()) + .payment_hash(PaymentHash([0; 32])) .payment_secret(PaymentSecret([0; 32])) .duration_since_epoch(Duration::from_secs(1234567)) .build_raw() @@ -2387,7 +2400,7 @@ mod test { let signed_invoice = InvoiceBuilder::new(Currency::Bitcoin) .description("Test".into()) - .payment_hash(sha256::Hash::from_slice(&[0; 32][..]).unwrap()) + .payment_hash(PaymentHash([0; 32])) .payment_secret(PaymentSecret([0; 32])) .duration_since_epoch(Duration::from_secs(1234567)) .build_raw() @@ -2428,13 +2441,13 @@ mod test { #[test] fn raw_tagged_field_ordering() { - use crate::{ - sha256, Description, Fe32, RawTaggedField, Sha256, TaggedField, UntrustedString, - }; + use crate::{Description, Fe32, PaymentHash, RawTaggedField, TaggedField, UntrustedString}; + use bitcoin::hex::FromHex; - let field10 = RawTaggedField::KnownSemantics(TaggedField::PaymentHash(Sha256( - sha256::Hash::from_str( - "0001020304050607080900010203040506070809000102030405060708090102", + let field10 = RawTaggedField::KnownSemantics(crate::TaggedField::PaymentHash(PaymentHash( + <[u8; 32]>::try_from( + Vec::from_hex("0001020304050607080900010203040506070809000102030405060708090102") + .unwrap(), ) .unwrap(), ))); diff --git a/lightning-invoice/src/ser.rs b/lightning-invoice/src/ser.rs index 5c93fa84ae0..35d5dc024ea 100644 --- a/lightning-invoice/src/ser.rs +++ b/lightning-invoice/src/ser.rs @@ -1,4 +1,3 @@ -use alloc::boxed::Box; use core::fmt; use core::fmt::{Display, Formatter}; use core::{array, iter}; @@ -8,19 +7,33 @@ use bech32::{ByteIterExt, Fe32, Fe32IterExt}; use super::{ constants, Bolt11Invoice, Bolt11InvoiceFeatures, Bolt11InvoiceSignature, Currency, Description, - ExpiryTime, Fallback, MinFinalCltvExpiryDelta, PayeePubKey, PaymentSecret, PositiveTimestamp, - PrivateRoute, RawDataPart, RawHrp, RawTaggedField, RouteHintHop, Sha256, SiPrefix, - SignedRawBolt11Invoice, TaggedField, + ExpiryTime, Fallback, MinFinalCltvExpiryDelta, PayeePubKey, PaymentHash, PaymentSecret, + PositiveTimestamp, PrivateRoute, RawDataPart, RawHrp, RawTaggedField, RouteHintHop, Sha256, + SiPrefix, SignedRawBolt11Invoice, TaggedField, }; +macro_rules! define_iterator_enum { + ($name: ident, $($n: ident),*) => { + enum $name<$($n: Iterator,)*> { + $($n($n),)* + } + impl<$($n: Iterator,)*> Iterator for $name<$($n,)*> { + type Item = Fe32; + fn next(&mut self) -> Option { + match self { + $(Self::$n(iter) => iter.next(),)* + } + } + } + } +} + /// Objects that can be encoded to base32 (bech32). /// -/// Private to this crate to avoid polluting the API. +/// Private to this crate (except in fuzzing) to avoid polluting the API. pub trait Base32Iterable { - /// apoelstra: In future we want to replace this Box with an explicit - /// associated type, to avoid the allocation. But we cannot do this until - /// Rust 1.65 and GATs since the iterator may contain a reference to self. - fn fe_iter<'s>(&'s self) -> Box + 's>; + /// Serialize this object, returning an iterator over bech32 field elements. + fn fe_iter<'s>(&'s self) -> impl Iterator + 's; } /// Interface to calculate the length of the base32 representation before actually serializing @@ -32,7 +45,7 @@ pub(crate) trait Base32Len: Base32Iterable { // Base32Iterable & Base32Len implementations are here, because the traits are in this module. impl Base32Iterable for [u8; N] { - fn fe_iter<'s>(&'s self) -> Box + 's> { + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { self[..].fe_iter() } } @@ -45,8 +58,8 @@ impl Base32Len for [u8; N] { } impl Base32Iterable for [u8] { - fn fe_iter<'s>(&'s self) -> Box + 's> { - Box::new(self.iter().copied().bytes_to_fes()) + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + self.iter().copied().bytes_to_fes() } } @@ -58,8 +71,8 @@ impl Base32Len for [u8] { } impl Base32Iterable for Vec { - fn fe_iter<'s>(&'s self) -> Box + 's> { - Box::new(self.iter().copied().bytes_to_fes()) + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + self.iter().copied().bytes_to_fes() } } @@ -71,8 +84,8 @@ impl Base32Len for Vec { } impl Base32Iterable for PaymentSecret { - fn fe_iter<'s>(&'s self) -> Box + 's> { - Box::new(self.0[..].fe_iter()) + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + self.0[..].fe_iter() } } @@ -82,13 +95,25 @@ impl Base32Len for PaymentSecret { } } +impl Base32Iterable for PaymentHash { + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + self.0[..].fe_iter() + } +} + +impl Base32Len for PaymentHash { + fn base32_len(&self) -> usize { + 52 + } +} + impl Base32Iterable for Bolt11InvoiceFeatures { /// Convert to 5-bit values, by unpacking the 5 bit groups, /// putting the bytes from right-to-left, /// starting from the rightmost bit, /// and taking the resulting 5-bit values in reverse (left-to-right), /// with the leading 0's skipped. - fn fe_iter<'s>(&'s self) -> Box + 's> { + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { // Fe32 conversion cannot be used, because this packs from right, right-to-left let mut input_iter = self.le_flags().iter(); // Carry bits, 0..7 bits @@ -126,7 +151,7 @@ impl Base32Iterable for Bolt11InvoiceFeatures { output.push(Fe32::try_from(next_out8 & 31u8).expect("<32")) } // Take result in reverse order, and skip leading 0s - Box::new(output.into_iter().rev().skip_while(|e| *e == Fe32::Q)) + output.into_iter().rev().skip_while(|e| *e == Fe32::Q) } } @@ -241,36 +266,35 @@ fn encoded_int_be_base32_size(int: u64) -> usize { } impl Base32Iterable for RawDataPart { - fn fe_iter<'s>(&'s self) -> Box + 's> { + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { let ts_iter = self.timestamp.fe_iter(); let fields_iter = self.tagged_fields.iter().map(RawTaggedField::fe_iter).flatten(); - Box::new(ts_iter.chain(fields_iter)) + ts_iter.chain(fields_iter) } } impl Base32Iterable for PositiveTimestamp { - fn fe_iter<'s>(&'s self) -> Box + 's> { + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { let fes = encode_int_be_base32(self.as_unix_timestamp()); debug_assert!(fes.len() <= 7, "Invalid timestamp length"); let to_pad = 7 - fes.len(); - Box::new(core::iter::repeat(Fe32::Q).take(to_pad).chain(fes)) + core::iter::repeat(Fe32::Q).take(to_pad).chain(fes) } } impl Base32Iterable for RawTaggedField { - fn fe_iter<'s>(&'s self) -> Box + 's> { - // Annoyingly, when we move to explicit types, we will need an - // explicit enum holding the two iterator variants. + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + define_iterator_enum!(TwoIters, A, B); match *self { - RawTaggedField::UnknownSemantics(ref content) => Box::new(content.iter().copied()), - RawTaggedField::KnownSemantics(ref tagged_field) => tagged_field.fe_iter(), + RawTaggedField::UnknownSemantics(ref content) => TwoIters::A(content.iter().copied()), + RawTaggedField::KnownSemantics(ref tagged_field) => TwoIters::B(tagged_field.fe_iter()), } } } impl Base32Iterable for Sha256 { - fn fe_iter<'s>(&'s self) -> Box + 's> { - Box::new(self.0[..].fe_iter()) + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + self.0[..].fe_iter() } } @@ -281,8 +305,8 @@ impl Base32Len for Sha256 { } impl Base32Iterable for Description { - fn fe_iter<'s>(&'s self) -> Box + 's> { - Box::new(self.0 .0.as_bytes().fe_iter()) + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + self.0 .0.as_bytes().fe_iter() } } @@ -293,8 +317,8 @@ impl Base32Len for Description { } impl Base32Iterable for PayeePubKey { - fn fe_iter<'s>(&'s self) -> Box + 's> { - Box::new(self.serialize().into_iter().bytes_to_fes()) + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + self.serialize().into_iter().bytes_to_fes() } } @@ -305,8 +329,8 @@ impl Base32Len for PayeePubKey { } impl Base32Iterable for ExpiryTime { - fn fe_iter<'s>(&'s self) -> Box + 's> { - Box::new(encode_int_be_base32(self.as_seconds())) + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + encode_int_be_base32(self.as_seconds()) } } @@ -317,8 +341,8 @@ impl Base32Len for ExpiryTime { } impl Base32Iterable for MinFinalCltvExpiryDelta { - fn fe_iter<'s>(&'s self) -> Box + 's> { - Box::new(encode_int_be_base32(self.0)) + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + encode_int_be_base32(self.0) } } @@ -329,8 +353,8 @@ impl Base32Len for MinFinalCltvExpiryDelta { } impl Base32Iterable for Fallback { - fn fe_iter<'s>(&'s self) -> Box + 's> { - Box::new(match *self { + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + match *self { Fallback::SegWitProgram { version: v, program: ref p } => { let v = Fe32::try_from(v.to_num()).expect("valid version"); core::iter::once(v).chain(p[..].fe_iter()) @@ -343,7 +367,7 @@ impl Base32Iterable for Fallback { // 18 'J' core::iter::once(Fe32::J).chain(hash[..].fe_iter()) }, - }) + } } } @@ -371,7 +395,7 @@ type RouteHintHopIter = iter::Chain< >; impl Base32Iterable for PrivateRoute { - fn fe_iter<'s>(&'s self) -> Box + 's> { + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { fn serialize_to_iter(hop: &RouteHintHop) -> RouteHintHopIter { let i1 = hop.src_node_id.serialize().into_iter(); let i2 = u64::to_be_bytes(hop.short_channel_id).into_iter(); @@ -381,7 +405,7 @@ impl Base32Iterable for PrivateRoute { i1.chain(i2).chain(i3).chain(i4).chain(i5) } - Box::new(self.0 .0.iter().map(serialize_to_iter).flatten().bytes_to_fes()) + self.0 .0.iter().map(serialize_to_iter).flatten().bytes_to_fes() } } @@ -391,16 +415,11 @@ impl Base32Len for PrivateRoute { } } -// Shorthand type -type TaggedFieldIter = core::iter::Chain, I>; - impl Base32Iterable for TaggedField { - fn fe_iter<'s>(&'s self) -> Box + 's> { + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { /// Writes a tagged field: tag, length and data. `tag` should be in `0..32` otherwise the /// function will panic. - fn write_tagged_field<'s, P>( - tag: u8, payload: &'s P, - ) -> TaggedFieldIter + 's>> + fn write_tagged_field<'s, P>(tag: u8, payload: &'s P) -> impl Iterator + 's where P: Base32Iterable + Base32Len + ?Sized, { @@ -416,54 +435,49 @@ impl Base32Iterable for TaggedField { .chain(payload.fe_iter()) } - // we will also need a giant enum for this - Box::new(match *self { + define_iterator_enum!(ManyIters, A, B, C, D, E, F, G, H, I, J, K); + match *self { TaggedField::PaymentHash(ref hash) => { - write_tagged_field(constants::TAG_PAYMENT_HASH, hash) + ManyIters::A(write_tagged_field(constants::TAG_PAYMENT_HASH, hash)) }, TaggedField::Description(ref description) => { - write_tagged_field(constants::TAG_DESCRIPTION, description) + ManyIters::B(write_tagged_field(constants::TAG_DESCRIPTION, description)) }, TaggedField::PayeePubKey(ref pub_key) => { - write_tagged_field(constants::TAG_PAYEE_PUB_KEY, pub_key) + ManyIters::C(write_tagged_field(constants::TAG_PAYEE_PUB_KEY, pub_key)) }, TaggedField::DescriptionHash(ref hash) => { - write_tagged_field(constants::TAG_DESCRIPTION_HASH, hash) + ManyIters::D(write_tagged_field(constants::TAG_DESCRIPTION_HASH, hash)) }, TaggedField::ExpiryTime(ref duration) => { - write_tagged_field(constants::TAG_EXPIRY_TIME, duration) + ManyIters::E(write_tagged_field(constants::TAG_EXPIRY_TIME, duration)) }, TaggedField::MinFinalCltvExpiryDelta(ref expiry) => { - write_tagged_field(constants::TAG_MIN_FINAL_CLTV_EXPIRY_DELTA, expiry) + ManyIters::F(write_tagged_field(constants::TAG_MIN_FINAL_CLTV_EXPIRY_DELTA, expiry)) }, TaggedField::Fallback(ref fallback_address) => { - write_tagged_field(constants::TAG_FALLBACK, fallback_address) + ManyIters::G(write_tagged_field(constants::TAG_FALLBACK, fallback_address)) }, TaggedField::PrivateRoute(ref route_hops) => { - write_tagged_field(constants::TAG_PRIVATE_ROUTE, route_hops) + ManyIters::H(write_tagged_field(constants::TAG_PRIVATE_ROUTE, route_hops)) }, TaggedField::PaymentSecret(ref payment_secret) => { - write_tagged_field(constants::TAG_PAYMENT_SECRET, payment_secret) + ManyIters::I(write_tagged_field(constants::TAG_PAYMENT_SECRET, payment_secret)) }, TaggedField::PaymentMetadata(ref payment_metadata) => { - write_tagged_field(constants::TAG_PAYMENT_METADATA, payment_metadata) + ManyIters::J(write_tagged_field(constants::TAG_PAYMENT_METADATA, payment_metadata)) }, TaggedField::Features(ref features) => { - write_tagged_field(constants::TAG_FEATURES, features) + ManyIters::K(write_tagged_field(constants::TAG_FEATURES, features)) }, - }) + } } } impl Base32Iterable for Bolt11InvoiceSignature { - fn fe_iter<'s>(&'s self) -> Box + 's> { + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { let (recovery_id, signature) = self.0.serialize_compact(); - Box::new( - signature - .into_iter() - .chain(core::iter::once(recovery_id.to_i32() as u8)) - .bytes_to_fes(), - ) + signature.into_iter().chain(core::iter::once(recovery_id.to_i32() as u8)).bytes_to_fes() } } diff --git a/lightning-invoice/src/test_ser_de.rs b/lightning-invoice/src/test_ser_de.rs index e2e4d764ac2..efeed83980e 100644 --- a/lightning-invoice/src/test_ser_de.rs +++ b/lightning-invoice/src/test_ser_de.rs @@ -1,7 +1,11 @@ use crate::de::FromBase32; use crate::ser::{Base32Iterable, Base32Len}; -use crate::{sha256, PayeePubKey, PaymentSecret, PositiveTimestamp, RawDataPart, Sha256}; +use crate::{ + sha256, PayeePubKey, PaymentHash, PaymentSecret, PositiveTimestamp, RawDataPart, + RawTaggedField, Sha256, TaggedField, +}; use bech32::Fe32; +use bitcoin::hex::FromHex; use core::fmt::Debug; use std::str::FromStr; @@ -173,11 +177,12 @@ fn bolt11_invoice_features() { #[test] fn raw_tagged_field() { - use crate::TaggedField::PaymentHash; - - let field = PaymentHash(Sha256( - sha256::Hash::from_str("0001020304050607080900010203040506070809000102030405060708090102") - .unwrap(), + let field = TaggedField::PaymentHash(PaymentHash( + <[u8; 32]>::try_from( + Vec::from_hex("0001020304050607080900010203040506070809000102030405060708090102") + .unwrap(), + ) + .unwrap(), )); ser_de_test(field, "pp5qqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqypq"); } @@ -202,17 +207,15 @@ fn description() { #[test] fn raw_data_part() { - use crate::TaggedField::PaymentHash; - let raw_data_part = RawDataPart { timestamp: PositiveTimestamp::from_unix_timestamp(10000).unwrap(), - tagged_fields: vec![PaymentHash(Sha256( - sha256::Hash::from_str( - "0001020304050607080900010203040506070809000102030405060708090102", + tagged_fields: vec![RawTaggedField::KnownSemantics(TaggedField::PaymentHash(PaymentHash( + <[u8; 32]>::try_from( + Vec::from_hex("0001020304050607080900010203040506070809000102030405060708090102") + .unwrap(), ) .unwrap(), - )) - .into()], + )))], }; ser_de_test(raw_data_part, "qqqqfcspp5qqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqypq"); } diff --git a/lightning-invoice/tests/ser_de.rs b/lightning-invoice/tests/ser_de.rs index b4d3fa758e1..353878a9c52 100644 --- a/lightning-invoice/tests/ser_de.rs +++ b/lightning-invoice/tests/ser_de.rs @@ -18,9 +18,15 @@ fn get_test_tuples() -> Vec<(String, SignedRawBolt11Invoice, bool, bool)> { InvoiceBuilder::new(Currency::Bitcoin) .duration_since_epoch(Duration::from_secs(1496314658)) .payment_secret(PaymentSecret([0x11; 32])) - .payment_hash(sha256::Hash::from_str( - "0001020304050607080900010203040506070809000102030405060708090102" - ).unwrap()) + .payment_hash(lightning_invoice::PaymentHash( + <[u8; 32]>::try_from( + Vec::from_hex( + "0001020304050607080900010203040506070809000102030405060708090102", + ) + .unwrap(), + ) + .unwrap(), + )) .description("Please consider supporting this project".to_owned()) .build_raw() .unwrap() @@ -39,9 +45,15 @@ fn get_test_tuples() -> Vec<(String, SignedRawBolt11Invoice, bool, bool)> { .amount_milli_satoshis(250_000_000) .duration_since_epoch(Duration::from_secs(1496314658)) .payment_secret(PaymentSecret([0x11; 32])) - .payment_hash(sha256::Hash::from_str( - "0001020304050607080900010203040506070809000102030405060708090102" - ).unwrap()) + .payment_hash(lightning_invoice::PaymentHash( + <[u8; 32]>::try_from( + Vec::from_hex( + "0001020304050607080900010203040506070809000102030405060708090102", + ) + .unwrap(), + ) + .unwrap(), + )) .description("1 cup coffee".to_owned()) .expiry_time(Duration::from_secs(60)) .build_raw() @@ -61,9 +73,15 @@ fn get_test_tuples() -> Vec<(String, SignedRawBolt11Invoice, bool, bool)> { .amount_milli_satoshis(250_000_000) .duration_since_epoch(Duration::from_secs(1496314658)) .payment_secret(PaymentSecret([0x11; 32])) - .payment_hash(sha256::Hash::from_str( - "0001020304050607080900010203040506070809000102030405060708090102" - ).unwrap()) + .payment_hash(lightning_invoice::PaymentHash( + <[u8; 32]>::try_from( + Vec::from_hex( + "0001020304050607080900010203040506070809000102030405060708090102", + ) + .unwrap(), + ) + .unwrap(), + )) .description("ナンセンス 1杯".to_owned()) .expiry_time(Duration::from_secs(60)) .build_raw() @@ -84,9 +102,15 @@ fn get_test_tuples() -> Vec<(String, SignedRawBolt11Invoice, bool, bool)> { .duration_since_epoch(Duration::from_secs(1496314658)) .description_hash(sha256::Hash::hash(b"One piece of chocolate cake, one icecream cone, one pickle, one slice of swiss cheese, one slice of salami, one lollypop, one piece of cherry pie, one sausage, one cupcake, and one slice of watermelon")) .payment_secret(PaymentSecret([0x11; 32])) - .payment_hash(sha256::Hash::from_str( - "0001020304050607080900010203040506070809000102030405060708090102" - ).unwrap()) + .payment_hash(lightning_invoice::PaymentHash( + <[u8; 32]>::try_from( + Vec::from_hex( + "0001020304050607080900010203040506070809000102030405060708090102", + ) + .unwrap(), + ) + .unwrap(), + )) .build_raw() .unwrap() .sign(|_| { @@ -105,9 +129,15 @@ fn get_test_tuples() -> Vec<(String, SignedRawBolt11Invoice, bool, bool)> { .duration_since_epoch(Duration::from_secs(1496314658)) .description_hash(sha256::Hash::hash(b"One piece of chocolate cake, one icecream cone, one pickle, one slice of swiss cheese, one slice of salami, one lollypop, one piece of cherry pie, one sausage, one cupcake, and one slice of watermelon")) .payment_secret(PaymentSecret([0x11; 32])) - .payment_hash(sha256::Hash::from_str( - "0001020304050607080900010203040506070809000102030405060708090102" - ).unwrap()) + .payment_hash(lightning_invoice::PaymentHash( + <[u8; 32]>::try_from( + Vec::from_hex( + "0001020304050607080900010203040506070809000102030405060708090102", + ) + .unwrap(), + ) + .unwrap(), + )) .fallback(Fallback::PubKeyHash(PubkeyHash::from_slice(&[49, 114, 181, 101, 79, 102, 131, 200, 251, 20, 105, 89, 211, 71, 206, 48, 60, 174, 76, 167]).unwrap())) .build_raw() .unwrap() @@ -127,9 +157,15 @@ fn get_test_tuples() -> Vec<(String, SignedRawBolt11Invoice, bool, bool)> { .duration_since_epoch(Duration::from_secs(1496314658)) .description_hash(sha256::Hash::hash(b"One piece of chocolate cake, one icecream cone, one pickle, one slice of swiss cheese, one slice of salami, one lollypop, one piece of cherry pie, one sausage, one cupcake, and one slice of watermelon")) .payment_secret(PaymentSecret([0x11; 32])) - .payment_hash(sha256::Hash::from_str( - "0001020304050607080900010203040506070809000102030405060708090102" - ).unwrap()) + .payment_hash(lightning_invoice::PaymentHash( + <[u8; 32]>::try_from( + Vec::from_hex( + "0001020304050607080900010203040506070809000102030405060708090102", + ) + .unwrap(), + ) + .unwrap(), + )) .fallback(Fallback::PubKeyHash(PubkeyHash::from_slice(&[4, 182, 31, 125, 193, 234, 13, 201, 148, 36, 70, 76, 196, 6, 77, 197, 100, 217, 30, 137]).unwrap())) .private_route(RouteHint(vec![RouteHintHop { src_node_id: PublicKey::from_slice(&>::from_hex( @@ -166,9 +202,15 @@ fn get_test_tuples() -> Vec<(String, SignedRawBolt11Invoice, bool, bool)> { .duration_since_epoch(Duration::from_secs(1496314658)) .description_hash(sha256::Hash::hash(b"One piece of chocolate cake, one icecream cone, one pickle, one slice of swiss cheese, one slice of salami, one lollypop, one piece of cherry pie, one sausage, one cupcake, and one slice of watermelon")) .payment_secret(PaymentSecret([0x11; 32])) - .payment_hash(sha256::Hash::from_str( - "0001020304050607080900010203040506070809000102030405060708090102" - ).unwrap()) + .payment_hash(lightning_invoice::PaymentHash( + <[u8; 32]>::try_from( + Vec::from_hex( + "0001020304050607080900010203040506070809000102030405060708090102", + ) + .unwrap(), + ) + .unwrap(), + )) .fallback(Fallback::ScriptHash(ScriptHash::from_slice(&[143, 85, 86, 59, 154, 25, 243, 33, 194, 17, 233, 185, 243, 140, 223, 104, 110, 160, 120, 69]).unwrap())) .build_raw() .unwrap() @@ -188,9 +230,15 @@ fn get_test_tuples() -> Vec<(String, SignedRawBolt11Invoice, bool, bool)> { .duration_since_epoch(Duration::from_secs(1496314658)) .description_hash(sha256::Hash::hash(b"One piece of chocolate cake, one icecream cone, one pickle, one slice of swiss cheese, one slice of salami, one lollypop, one piece of cherry pie, one sausage, one cupcake, and one slice of watermelon")) .payment_secret(PaymentSecret([0x11; 32])) - .payment_hash(sha256::Hash::from_str( - "0001020304050607080900010203040506070809000102030405060708090102" - ).unwrap()) + .payment_hash(lightning_invoice::PaymentHash( + <[u8; 32]>::try_from( + Vec::from_hex( + "0001020304050607080900010203040506070809000102030405060708090102", + ) + .unwrap(), + ) + .unwrap(), + )) .fallback(Fallback::SegWitProgram { version: WitnessVersion::V0, program: vec![117, 30, 118, 232, 25, 145, 150, 212, 84, 148, 28, 69, 209, 179, 163, 35, 241, 67, 59, 214] }) @@ -212,9 +260,15 @@ fn get_test_tuples() -> Vec<(String, SignedRawBolt11Invoice, bool, bool)> { .duration_since_epoch(Duration::from_secs(1496314658)) .description_hash(sha256::Hash::hash(b"One piece of chocolate cake, one icecream cone, one pickle, one slice of swiss cheese, one slice of salami, one lollypop, one piece of cherry pie, one sausage, one cupcake, and one slice of watermelon")) .payment_secret(PaymentSecret([0x11; 32])) - .payment_hash(sha256::Hash::from_str( - "0001020304050607080900010203040506070809000102030405060708090102" - ).unwrap()) + .payment_hash(lightning_invoice::PaymentHash( + <[u8; 32]>::try_from( + Vec::from_hex( + "0001020304050607080900010203040506070809000102030405060708090102", + ) + .unwrap(), + ) + .unwrap(), + )) .fallback(Fallback::SegWitProgram { version: WitnessVersion::V0, program: vec![24, 99, 20, 60, 20, 197, 22, 104, 4, 189, 25, 32, 51, 86, 218, 19, 108, 152, 86, 120, 205, 77, 39, 161, 184, 198, 50, 150, 4, 144, 50, 98] }) @@ -235,9 +289,15 @@ fn get_test_tuples() -> Vec<(String, SignedRawBolt11Invoice, bool, bool)> { .amount_milli_satoshis(967878534) .duration_since_epoch(Duration::from_secs(1572468703)) .payment_secret(PaymentSecret([0x11; 32])) - .payment_hash(sha256::Hash::from_str( - "462264ede7e14047e9b249da94fefc47f41f7d02ee9b091815a5506bc8abf75f" - ).unwrap()) + .payment_hash(lightning_invoice::PaymentHash( + <[u8; 32]>::try_from( + Vec::from_hex( + "462264ede7e14047e9b249da94fefc47f41f7d02ee9b091815a5506bc8abf75f", + ) + .unwrap(), + ) + .unwrap(), + )) .expiry_time(Duration::from_secs(604800)) .min_final_cltv_expiry_delta(10) .description("Blockstream Store: 88.85 USD for Blockstream Ledger Nano S x 1, \"Back In My Day\" Sticker x 2, \"I Got Lightning Working\" Sticker x 2 and 1 more items".to_owned()) @@ -267,9 +327,15 @@ fn get_test_tuples() -> Vec<(String, SignedRawBolt11Invoice, bool, bool)> { .amount_milli_satoshis(2_500_000_000) .duration_since_epoch(Duration::from_secs(1496314658)) .payment_secret(PaymentSecret([0x11; 32])) - .payment_hash(sha256::Hash::from_str( - "0001020304050607080900010203040506070809000102030405060708090102" - ).unwrap()) + .payment_hash(lightning_invoice::PaymentHash( + <[u8; 32]>::try_from( + Vec::from_hex( + "0001020304050607080900010203040506070809000102030405060708090102", + ) + .unwrap(), + ) + .unwrap(), + )) .description("coffee beans".to_owned()) .build_raw() .unwrap() @@ -288,9 +354,15 @@ fn get_test_tuples() -> Vec<(String, SignedRawBolt11Invoice, bool, bool)> { .amount_milli_satoshis(2_500_000_000) .duration_since_epoch(Duration::from_secs(1496314658)) .payment_secret(PaymentSecret([0x11; 32])) - .payment_hash(sha256::Hash::from_str( - "0001020304050607080900010203040506070809000102030405060708090102" - ).unwrap()) + .payment_hash(lightning_invoice::PaymentHash( + <[u8; 32]>::try_from( + Vec::from_hex( + "0001020304050607080900010203040506070809000102030405060708090102", + ) + .unwrap(), + ) + .unwrap(), + )) .description("coffee beans".to_owned()) .build_raw() .unwrap() @@ -309,9 +381,15 @@ fn get_test_tuples() -> Vec<(String, SignedRawBolt11Invoice, bool, bool)> { .amount_milli_satoshis(2_500_000_000) .duration_since_epoch(Duration::from_secs(1496314658)) .payment_secret(PaymentSecret([0x11; 32])) - .payment_hash(sha256::Hash::from_str( - "0001020304050607080900010203040506070809000102030405060708090102" - ).unwrap()) + .payment_hash(lightning_invoice::PaymentHash( + <[u8; 32]>::try_from( + Vec::from_hex( + "0001020304050607080900010203040506070809000102030405060708090102", + ) + .unwrap(), + ) + .unwrap(), + )) .description("coffee beans".to_owned()) .build_raw() .unwrap() @@ -329,9 +407,15 @@ fn get_test_tuples() -> Vec<(String, SignedRawBolt11Invoice, bool, bool)> { InvoiceBuilder::new(Currency::Bitcoin) .amount_milli_satoshis(1_000_000_000) .duration_since_epoch(Duration::from_secs(1496314658)) - .payment_hash(sha256::Hash::from_str( - "0001020304050607080900010203040506070809000102030405060708090102" - ).unwrap()) + .payment_hash(lightning_invoice::PaymentHash( + <[u8; 32]>::try_from( + Vec::from_hex( + "0001020304050607080900010203040506070809000102030405060708090102", + ) + .unwrap(), + ) + .unwrap(), + )) .description("payment metadata inside".to_owned()) .payment_metadata(>::from_hex("01fafaf0").unwrap()) .require_payment_metadata() @@ -355,9 +439,15 @@ fn get_test_tuples() -> Vec<(String, SignedRawBolt11Invoice, bool, bool)> { InvoiceBuilder::new(Currency::Bitcoin) .amount_milli_satoshis(1_000_000_000) .duration_since_epoch(Duration::from_secs(1496314658)) - .payment_hash(sha256::Hash::from_str( - "0001020304050607080900010203040506070809000102030405060708090102" - ).unwrap()) + .payment_hash(lightning_invoice::PaymentHash( + <[u8; 32]>::try_from( + Vec::from_hex( + "0001020304050607080900010203040506070809000102030405060708090102", + ) + .unwrap(), + ) + .unwrap(), + )) .description("payment metadata inside".to_owned()) .payment_metadata(>::from_hex("01fafaf0").unwrap()) .require_payment_metadata() @@ -378,9 +468,15 @@ fn get_test_tuples() -> Vec<(String, SignedRawBolt11Invoice, bool, bool)> { InvoiceBuilder::new(Currency::Bitcoin) .duration_since_epoch(Duration::from_secs(1496314658)) .payment_secret(PaymentSecret([0x11; 32])) - .payment_hash(sha256::Hash::from_str( - "0001020304050607080900010203040506070809000102030405060708090102" - ).unwrap()) + .payment_hash(lightning_invoice::PaymentHash( + <[u8; 32]>::try_from( + Vec::from_hex( + "0001020304050607080900010203040506070809000102030405060708090102", + ) + .unwrap(), + ) + .unwrap(), + )) .description("Please consider supporting this project".to_owned()) .build_raw() .unwrap() diff --git a/lightning-liquidity/Cargo.toml b/lightning-liquidity/Cargo.toml index 2f83077cabc..61f41c15d38 100644 --- a/lightning-liquidity/Cargo.toml +++ b/lightning-liquidity/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning-liquidity" -version = "0.2.0+git" +version = "0.3.0+git" authors = ["John Cantrell ", "Elias Rohrer "] homepage = "https://lightningdevkit.org/" license = "MIT OR Apache-2.0" @@ -23,8 +23,8 @@ _test_utils = [] [dependencies] lightning = { version = "0.3.0", path = "../lightning", default-features = false } -lightning-types = { version = "0.3.0", path = "../lightning-types", default-features = false } -lightning-invoice = { version = "0.34.0", path = "../lightning-invoice", default-features = false, features = ["serde"] } +lightning-types = { version = "0.4.0", path = "../lightning-types", default-features = false } +lightning-invoice = { version = "0.35.0", path = "../lightning-invoice", default-features = false, features = ["serde"] } lightning-macros = { version = "0.2", path = "../lightning-macros" } bitcoin = { version = "0.32.2", default-features = false, features = ["serde"] } @@ -36,10 +36,9 @@ backtrace = { version = "0.3", optional = true } [dev-dependencies] lightning = { version = "0.3.0", path = "../lightning", default-features = false, features = ["_test_utils"] } -lightning-invoice = { version = "0.34.0", path = "../lightning-invoice", default-features = false, features = ["serde", "std"] } -lightning-persister = { version = "0.2.0", path = "../lightning-persister", default-features = false } +lightning-invoice = { version = "0.35.0", path = "../lightning-invoice", default-features = false, features = ["serde", "std"] } +lightning-persister = { version = "0.3.0", path = "../lightning-persister", default-features = false } -proptest = "1.0.0" tokio = { version = "1.35", default-features = false, features = [ "rt-multi-thread", "time", "sync", "macros" ] } parking_lot = { version = "0.12", default-features = false } diff --git a/lightning-liquidity/src/events/event_queue.rs b/lightning-liquidity/src/events/event_queue.rs index 0d6e3a0ec54..9fb8a250a9a 100644 --- a/lightning-liquidity/src/events/event_queue.rs +++ b/lightning-liquidity/src/events/event_queue.rs @@ -12,7 +12,6 @@ use alloc::collections::VecDeque; use alloc::vec::Vec; use core::future::Future; -use core::ops::Deref; use core::task::{Poll, Waker}; use lightning::ln::msgs::DecodeError; @@ -25,10 +24,7 @@ use lightning::util::wakers::Notifier; /// The maximum queue size we allow before starting to drop events. pub const MAX_EVENT_QUEUE_SIZE: usize = 1000; -pub(crate) struct EventQueue -where - K::Target: KVStore, -{ +pub(crate) struct EventQueue { state: Mutex, waker: Mutex>, #[cfg(feature = "std")] @@ -37,10 +33,7 @@ where persist_notifier: Arc, } -impl EventQueue -where - K::Target: KVStore, -{ +impl EventQueue { pub fn new( queue: VecDeque, kv_store: K, persist_notifier: Arc, ) -> Self { @@ -164,14 +157,9 @@ struct QueueState { // A guard type that will notify about new events when dropped. #[must_use] -pub(crate) struct EventQueueNotifierGuard<'a, K: Deref + Clone>(&'a EventQueue) -where - K::Target: KVStore; - -impl<'a, K: Deref + Clone> EventQueueNotifierGuard<'a, K> -where - K::Target: KVStore, -{ +pub(crate) struct EventQueueNotifierGuard<'a, K: KVStore + Clone>(&'a EventQueue); + +impl<'a, K: KVStore + Clone> EventQueueNotifierGuard<'a, K> { pub fn enqueue>(&self, event: E) { let mut state_lock = self.0.state.lock().unwrap(); if state_lock.queue.len() < MAX_EVENT_QUEUE_SIZE { @@ -183,10 +171,7 @@ where } } -impl<'a, K: Deref + Clone> Drop for EventQueueNotifierGuard<'a, K> -where - K::Target: KVStore, -{ +impl<'a, K: KVStore + Clone> Drop for EventQueueNotifierGuard<'a, K> { fn drop(&mut self) { let (should_notify, should_persist_notify) = { let state_lock = self.0.state.lock().unwrap(); @@ -208,14 +193,9 @@ where } } -struct EventFuture<'a, K: Deref + Clone>(&'a EventQueue) -where - K::Target: KVStore; +struct EventFuture<'a, K: KVStore + Clone>(&'a EventQueue); -impl Future for EventFuture<'_, K> -where - K::Target: KVStore, -{ +impl Future for EventFuture<'_, K> { type Output = LiquidityEvent; fn poll( diff --git a/lightning-liquidity/src/lsps0/client.rs b/lightning-liquidity/src/lsps0/client.rs index d300936e2b2..298cb304b51 100644 --- a/lightning-liquidity/src/lsps0/client.rs +++ b/lightning-liquidity/src/lsps0/client.rs @@ -22,24 +22,14 @@ use lightning::util::persist::KVStore; use bitcoin::secp256k1::PublicKey; -use core::ops::Deref; - /// A message handler capable of sending and handling bLIP-50 / LSPS0 messages. -pub struct LSPS0ClientHandler -where - ES::Target: EntropySource, - K::Target: KVStore, -{ +pub struct LSPS0ClientHandler { entropy_source: ES, pending_messages: Arc, pending_events: Arc>, } -impl LSPS0ClientHandler -where - ES::Target: EntropySource, - K::Target: KVStore, -{ +impl LSPS0ClientHandler { /// Returns a new instance of [`LSPS0ClientHandler`]. pub(crate) fn new( entropy_source: ES, pending_messages: Arc, pending_events: Arc>, @@ -89,10 +79,8 @@ where } } -impl LSPSProtocolMessageHandler for LSPS0ClientHandler -where - ES::Target: EntropySource, - K::Target: KVStore, +impl LSPSProtocolMessageHandler + for LSPS0ClientHandler { type ProtocolMessage = LSPS0Message; const PROTOCOL_NUMBER: Option = None; diff --git a/lightning-liquidity/src/lsps1/client.rs b/lightning-liquidity/src/lsps1/client.rs index 4a79fb64887..2cbfb04c86a 100644 --- a/lightning-liquidity/src/lsps1/client.rs +++ b/lightning-liquidity/src/lsps1/client.rs @@ -30,8 +30,6 @@ use lightning::util::persist::KVStore; use bitcoin::secp256k1::PublicKey; use bitcoin::Address; -use core::ops::Deref; - /// Client-side configuration options for bLIP-51 / LSPS1 channel requests. #[derive(Clone, Debug)] pub struct LSPS1ClientConfig { @@ -47,11 +45,7 @@ struct PeerState { } /// The main object allowing to send and receive bLIP-51 / LSPS1 messages. -pub struct LSPS1ClientHandler -where - ES::Target: EntropySource, - K::Target: KVStore, -{ +pub struct LSPS1ClientHandler { entropy_source: ES, pending_messages: Arc, pending_events: Arc>, @@ -59,11 +53,7 @@ where config: LSPS1ClientConfig, } -impl LSPS1ClientHandler -where - ES::Target: EntropySource, - K::Target: KVStore, -{ +impl LSPS1ClientHandler { /// Constructs an `LSPS1ClientHandler`. pub(crate) fn new( entropy_source: ES, pending_messages: Arc, @@ -432,10 +422,8 @@ where } } -impl LSPSProtocolMessageHandler for LSPS1ClientHandler -where - ES::Target: EntropySource, - K::Target: KVStore, +impl LSPSProtocolMessageHandler + for LSPS1ClientHandler { type ProtocolMessage = LSPS1Message; const PROTOCOL_NUMBER: Option = Some(1); diff --git a/lightning-liquidity/src/lsps1/service.rs b/lightning-liquidity/src/lsps1/service.rs index 8afea1b4345..d7010652c37 100644 --- a/lightning-liquidity/src/lsps1/service.rs +++ b/lightning-liquidity/src/lsps1/service.rs @@ -132,12 +132,9 @@ impl PeerState { } /// The main object allowing to send and receive bLIP-51 / LSPS1 messages. -pub struct LSPS1ServiceHandler +pub struct LSPS1ServiceHandler where - ES::Target: EntropySource, CM::Target: AChannelManager, - C::Target: Filter, - K::Target: KVStore, { entropy_source: ES, channel_manager: CM, @@ -148,13 +145,10 @@ where config: LSPS1ServiceConfig, } -impl LSPS1ServiceHandler +impl + LSPS1ServiceHandler where - ES::Target: EntropySource, CM::Target: AChannelManager, - C::Target: Filter, - ES::Target: EntropySource, - K::Target: KVStore, { /// Constructs a `LSPS1ServiceHandler`. pub(crate) fn new( @@ -421,13 +415,10 @@ where } } -impl LSPSProtocolMessageHandler +impl LSPSProtocolMessageHandler for LSPS1ServiceHandler where - ES::Target: EntropySource, CM::Target: AChannelManager, - C::Target: Filter, - K::Target: KVStore, { type ProtocolMessage = LSPS1Message; const PROTOCOL_NUMBER: Option = Some(1); diff --git a/lightning-liquidity/src/lsps2/client.rs b/lightning-liquidity/src/lsps2/client.rs index 83aa7e3e99f..21b57162010 100644 --- a/lightning-liquidity/src/lsps2/client.rs +++ b/lightning-liquidity/src/lsps2/client.rs @@ -13,7 +13,6 @@ use alloc::string::{String, ToString}; use lightning::util::persist::KVStore; use core::default::Default; -use core::ops::Deref; use crate::events::EventQueue; use crate::lsps0::ser::{LSPSProtocolMessageHandler, LSPSRequestId, LSPSResponseError}; @@ -68,11 +67,7 @@ impl PeerState { /// opened. Please refer to the [`bLIP-52 / LSPS2 specification`] for more information. /// /// [`bLIP-52 / LSPS2 specification`]: https://github.com/lightning/blips/blob/master/blip-0052.md#trust-models -pub struct LSPS2ClientHandler -where - ES::Target: EntropySource, - K::Target: KVStore, -{ +pub struct LSPS2ClientHandler { entropy_source: ES, pending_messages: Arc, pending_events: Arc>, @@ -80,11 +75,7 @@ where config: LSPS2ClientConfig, } -impl LSPS2ClientHandler -where - ES::Target: EntropySource, - K::Target: KVStore, -{ +impl LSPS2ClientHandler { /// Constructs an `LSPS2ClientHandler`. pub(crate) fn new( entropy_source: ES, pending_messages: Arc, @@ -375,10 +366,8 @@ where } } -impl LSPSProtocolMessageHandler for LSPS2ClientHandler -where - ES::Target: EntropySource, - K::Target: KVStore, +impl LSPSProtocolMessageHandler + for LSPS2ClientHandler { type ProtocolMessage = LSPS2Message; const PROTOCOL_NUMBER: Option = Some(2); diff --git a/lightning-liquidity/src/lsps2/service.rs b/lightning-liquidity/src/lsps2/service.rs index b1aaa119161..35942dcd624 100644 --- a/lightning-liquidity/src/lsps2/service.rs +++ b/lightning-liquidity/src/lsps2/service.rs @@ -9,7 +9,6 @@ //! Contains the main bLIP-52 / LSPS2 server-side object, [`LSPS2ServiceHandler`]. -use alloc::boxed::Box; use alloc::string::{String, ToString}; use alloc::vec::Vec; use lightning::util::persist::KVStore; @@ -17,6 +16,7 @@ use lightning::util::persist::KVStore; use core::cmp::Ordering as CmpOrdering; use core::future::Future as StdFuture; use core::ops::Deref; +use core::pin::pin; use core::sync::atomic::{AtomicUsize, Ordering}; use core::task; @@ -40,7 +40,7 @@ use crate::prelude::{new_hash_map, HashMap}; use crate::sync::{Arc, Mutex, MutexGuard, RwLock}; use crate::utils::async_poll::dummy_waker; -use lightning::chain::chaininterface::BroadcasterInterface; +use lightning::chain::chaininterface::{BroadcasterInterface, TransactionType}; use lightning::events::HTLCHandlingFailureType; use lightning::ln::channelmanager::{AChannelManager, FailureCode, InterceptId}; use lightning::ln::msgs::{ErrorAction, LightningError}; @@ -702,11 +702,9 @@ macro_rules! get_or_insert_peer_state_entry { } /// The main object allowing to send and receive bLIP-52 / LSPS2 messages. -pub struct LSPS2ServiceHandler +pub struct LSPS2ServiceHandler where CM::Target: AChannelManager, - K::Target: KVStore, - T::Target: BroadcasterInterface, { channel_manager: CM, kv_store: K, @@ -721,11 +719,9 @@ where persistence_in_flight: AtomicUsize, } -impl LSPS2ServiceHandler +impl LSPS2ServiceHandler where CM::Target: AChannelManager, - K::Target: KVStore, - T::Target: BroadcasterInterface, { /// Constructs a `LSPS2ServiceHandler`. pub(crate) fn new( @@ -2023,33 +2019,33 @@ where // (for example when a forwarded HTLC nears expiry). Broadcasting funding after a // close could then confirm the commitment and trigger unintended on‑chain handling. // To avoid this, we check ChannelManager’s view (`is_channel_ready`) before broadcasting. - let channel_id_opt = jit_channel.get_channel_id(); - if let Some(ch_id) = channel_id_opt { - let is_channel_ready = self + if let Some(ch_id) = jit_channel.get_channel_id() { + let channel_details = self .channel_manager .get_cm() .list_channels() .into_iter() - .any(|cd| cd.channel_id == ch_id && cd.is_channel_ready); - if !is_channel_ready { - return; - } - } else { - return; - } + .find(|cd| cd.channel_id == ch_id && cd.is_channel_ready); + + let counterparty_node_id = match channel_details { + Some(cd) => cd.counterparty.node_id, + None => return, + }; - if let Some(funding_tx) = jit_channel.get_funding_tx() { - self.tx_broadcaster.broadcast_transactions(&[funding_tx]); + if let Some(funding_tx) = jit_channel.get_funding_tx() { + self.tx_broadcaster.broadcast_transactions(&[( + funding_tx, + TransactionType::Funding { channels: vec![(counterparty_node_id, ch_id)] }, + )]); + } } } } -impl LSPSProtocolMessageHandler +impl LSPSProtocolMessageHandler for LSPS2ServiceHandler where CM::Target: AChannelManager, - K::Target: KVStore, - T::Target: BroadcasterInterface, { type ProtocolMessage = LSPS2Message; const PROTOCOL_NUMBER: Option = Some(2); @@ -2119,20 +2115,21 @@ fn calculate_amount_to_forward_per_htlc( /// A synchroneous wrapper around [`LSPS2ServiceHandler`] to be used in contexts where async is not /// available. -pub struct LSPS2ServiceHandlerSync<'a, CM: Deref, K: Deref + Clone, T: Deref + Clone> -where +pub struct LSPS2ServiceHandlerSync< + 'a, + CM: Deref, + K: KVStore + Clone, + T: BroadcasterInterface + Clone, +> where CM::Target: AChannelManager, - K::Target: KVStore, - T::Target: BroadcasterInterface, { inner: &'a LSPS2ServiceHandler, } -impl<'a, CM: Deref, K: Deref + Clone, T: Deref + Clone> LSPS2ServiceHandlerSync<'a, CM, K, T> +impl<'a, CM: Deref, K: KVStore + Clone, T: BroadcasterInterface + Clone> + LSPS2ServiceHandlerSync<'a, CM, K, T> where CM::Target: AChannelManager, - K::Target: KVStore, - T::Target: BroadcasterInterface, { pub(crate) fn from_inner(inner: &'a LSPS2ServiceHandler) -> Self { Self { inner } @@ -2176,7 +2173,7 @@ where &self, counterparty_node_id: &PublicKey, request_id: LSPSRequestId, intercept_scid: u64, cltv_expiry_delta: u32, client_trusts_lsp: bool, user_channel_id: u128, ) -> Result<(), APIError> { - let mut fut = Box::pin(self.inner.invoice_parameters_generated( + let mut fut = pin!(self.inner.invoice_parameters_generated( counterparty_node_id, request_id, intercept_scid, @@ -2205,7 +2202,7 @@ where &self, intercept_scid: u64, intercept_id: InterceptId, expected_outbound_amount_msat: u64, payment_hash: PaymentHash, ) -> Result<(), APIError> { - let mut fut = Box::pin(self.inner.htlc_intercepted( + let mut fut = pin!(self.inner.htlc_intercepted( intercept_scid, intercept_id, expected_outbound_amount_msat, @@ -2231,7 +2228,7 @@ where pub fn htlc_handling_failed( &self, failure_type: HTLCHandlingFailureType, ) -> Result<(), APIError> { - let mut fut = Box::pin(self.inner.htlc_handling_failed(failure_type)); + let mut fut = pin!(self.inner.htlc_handling_failed(failure_type)); let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); @@ -2252,7 +2249,7 @@ where pub fn payment_forwarded( &self, next_channel_id: ChannelId, skimmed_fee_msat: u64, ) -> Result<(), APIError> { - let mut fut = Box::pin(self.inner.payment_forwarded(next_channel_id, skimmed_fee_msat)); + let mut fut = pin!(self.inner.payment_forwarded(next_channel_id, skimmed_fee_msat)); let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); @@ -2293,7 +2290,7 @@ where &self, counterparty_node_id: &PublicKey, user_channel_id: u128, ) -> Result<(), APIError> { let mut fut = - Box::pin(self.inner.channel_open_abandoned(counterparty_node_id, user_channel_id)); + pin!(self.inner.channel_open_abandoned(counterparty_node_id, user_channel_id)); let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); @@ -2312,8 +2309,7 @@ where pub fn channel_open_failed( &self, counterparty_node_id: &PublicKey, user_channel_id: u128, ) -> Result<(), APIError> { - let mut fut = - Box::pin(self.inner.channel_open_failed(counterparty_node_id, user_channel_id)); + let mut fut = pin!(self.inner.channel_open_failed(counterparty_node_id, user_channel_id)); let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); @@ -2335,7 +2331,7 @@ where &self, user_channel_id: u128, channel_id: &ChannelId, counterparty_node_id: &PublicKey, ) -> Result<(), APIError> { let mut fut = - Box::pin(self.inner.channel_ready(user_channel_id, channel_id, counterparty_node_id)); + pin!(self.inner.channel_ready(user_channel_id, channel_id, counterparty_node_id)); let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); @@ -2355,64 +2351,58 @@ mod tests { use crate::lsps0::ser::LSPSDateTime; - use proptest::prelude::*; - use bitcoin::{absolute::LockTime, transaction::Version}; use core::str::FromStr; const MAX_VALUE_MSAT: u64 = 21_000_000_0000_0000_000; - fn arb_forward_amounts() -> impl Strategy { - (1u64..MAX_VALUE_MSAT, 1u64..MAX_VALUE_MSAT, 1u64..MAX_VALUE_MSAT, 1u64..MAX_VALUE_MSAT) - .prop_map(|(a, b, c, d)| { - (a, b, c, core::cmp::min(d, a.saturating_add(b).saturating_add(c))) - }) - } + #[test] + fn rand_test_calculate_amount_to_forward() { + use std::collections::hash_map::RandomState; + use std::hash::{BuildHasher, Hasher}; + + let total_fee_msat = RandomState::new().build_hasher().finish() % MAX_VALUE_MSAT; + let htlc_count = (RandomState::new().build_hasher().finish() % 10) as u8; + + let mut htlcs = Vec::new(); + let mut total_received_msat = 0; + let mut htlc_values = Vec::new(); + for i in 0..htlc_count { + let expected_outbound_amount_msat = + RandomState::new().build_hasher().finish() % MAX_VALUE_MSAT; + if total_received_msat + expected_outbound_amount_msat > MAX_VALUE_MSAT { + break; + } + total_received_msat += expected_outbound_amount_msat; + htlc_values.push(total_received_msat); + htlcs.push(InterceptedHTLC { + intercept_id: InterceptId([i; 32]), + expected_outbound_amount_msat, + payment_hash: PaymentHash([i; 32]), + }); + } - proptest! { - #[test] - fn proptest_calculate_amount_to_forward((o_0, o_1, o_2, total_fee_msat) in arb_forward_amounts()) { - let htlcs = vec![ - InterceptedHTLC { - intercept_id: InterceptId([0; 32]), - expected_outbound_amount_msat: o_0, - payment_hash: PaymentHash([0; 32]), - }, - InterceptedHTLC { - intercept_id: InterceptId([1; 32]), - expected_outbound_amount_msat: o_1, - payment_hash: PaymentHash([0; 32]), - }, - InterceptedHTLC { - intercept_id: InterceptId([2; 32]), - expected_outbound_amount_msat: o_2, - payment_hash: PaymentHash([0; 32]), - }, - ]; + if total_fee_msat > total_received_msat { + return; + } - let result = calculate_amount_to_forward_per_htlc(&htlcs, total_fee_msat); - let total_received_msat = o_0 + o_1 + o_2; + let result = calculate_amount_to_forward_per_htlc(&htlcs, total_fee_msat); - if total_received_msat < total_fee_msat { - assert_eq!(result.len(), 0); - } else { - assert_ne!(result.len(), 0); - assert_eq!(result[0].0, htlcs[0].intercept_id); - assert_eq!(result[1].0, htlcs[1].intercept_id); - assert_eq!(result[2].0, htlcs[2].intercept_id); - assert!(result[0].1 <= o_0); - assert!(result[1].1 <= o_1); - assert!(result[2].1 <= o_2); - - let result_sum = result.iter().map(|(_, f)| f).sum::(); - assert_eq!(total_received_msat - result_sum, total_fee_msat); - let five_pct = result_sum as f32 * 0.05; - let fair_share_0 = (o_0 as f32 / total_received_msat as f32) * result_sum as f32; - assert!(result[0].1 as f32 <= fair_share_0 + five_pct); - let fair_share_1 = (o_1 as f32 / total_received_msat as f32) * result_sum as f32; - assert!(result[1].1 as f32 <= fair_share_1 + five_pct); - let fair_share_2 = (o_2 as f32 / total_received_msat as f32) * result_sum as f32; - assert!(result[2].1 as f32 <= fair_share_2 + five_pct); + if total_received_msat < total_fee_msat { + assert_eq!(result.len(), 0); + } else { + assert_eq!(result.len(), htlcs.len()); + let result_sum = result.iter().map(|(_, f)| f).sum::(); + assert_eq!(total_received_msat - result_sum, total_fee_msat); + let five_pct = result_sum as f32 * 0.05; + + for ((htlc, htlc_value), res) in htlcs.iter().zip(htlc_values).zip(result.iter()) { + assert_eq!(res.0, htlc.intercept_id); + assert!(res.1 <= htlc_value); + + let fair_share = + (htlc_value as f32 / total_received_msat as f32) * result_sum as f32; + assert!(res.1 as f32 <= fair_share + five_pct); } } } diff --git a/lightning-liquidity/src/lsps2/utils.rs b/lightning-liquidity/src/lsps2/utils.rs index 9f75a869a0e..998b1d2964d 100644 --- a/lightning-liquidity/src/lsps2/utils.rs +++ b/lightning-liquidity/src/lsps2/utils.rs @@ -60,33 +60,6 @@ pub fn compute_opening_fee( ) -> Option { payment_size_msat .checked_mul(opening_fee_proportional) - .and_then(|f| f.checked_add(999999)) - .and_then(|f| f.checked_div(1000000)) + .map(|f| f.div_ceil(1_000_000)) .map(|f| core::cmp::max(f, opening_fee_min_fee_msat)) } - -#[cfg(test)] -mod tests { - use super::*; - use proptest::prelude::*; - - const MAX_VALUE_MSAT: u64 = 21_000_000_0000_0000_000; - - fn arb_opening_fee_params() -> impl Strategy { - (0u64..MAX_VALUE_MSAT, 0u64..MAX_VALUE_MSAT, 0u64..MAX_VALUE_MSAT) - } - - proptest! { - #[test] - fn test_compute_opening_fee((payment_size_msat, opening_fee_min_fee_msat, opening_fee_proportional) in arb_opening_fee_params()) { - if let Some(res) = compute_opening_fee(payment_size_msat, opening_fee_min_fee_msat, opening_fee_proportional) { - assert!(res >= opening_fee_min_fee_msat); - assert_eq!(res as f32, (payment_size_msat as f32 * opening_fee_proportional as f32)); - } else { - // Check we actually overflowed. - let max_value = u64::MAX as u128; - assert!((payment_size_msat as u128 * opening_fee_proportional as u128) > max_value); - } - } - } -} diff --git a/lightning-liquidity/src/lsps5/client.rs b/lightning-liquidity/src/lsps5/client.rs index 1c6f8b8a250..26c0b180421 100644 --- a/lightning-liquidity/src/lsps5/client.rs +++ b/lightning-liquidity/src/lsps5/client.rs @@ -35,8 +35,6 @@ use alloc::collections::VecDeque; use alloc::string::String; use lightning::util::persist::KVStore; -use core::ops::Deref; - impl PartialEq for (LSPSRequestId, (LSPS5AppName, LSPS5WebhookUrl)) { fn eq(&self, other: &LSPSRequestId) -> bool { &self.0 == other @@ -125,11 +123,7 @@ impl PeerState { /// [`lsps5.list_webhooks`]: super::msgs::LSPS5Request::ListWebhooks /// [`lsps5.remove_webhook`]: super::msgs::LSPS5Request::RemoveWebhook /// [`LSPS5Validator`]: super::validator::LSPS5Validator -pub struct LSPS5ClientHandler -where - ES::Target: EntropySource, - K::Target: KVStore, -{ +pub struct LSPS5ClientHandler { pending_messages: Arc, pending_events: Arc>, entropy_source: ES, @@ -137,11 +131,7 @@ where _config: LSPS5ClientConfig, } -impl LSPS5ClientHandler -where - ES::Target: EntropySource, - K::Target: KVStore, -{ +impl LSPS5ClientHandler { /// Constructs an `LSPS5ClientHandler`. pub(crate) fn new( entropy_source: ES, pending_messages: Arc, @@ -426,10 +416,8 @@ where } } -impl LSPSProtocolMessageHandler for LSPS5ClientHandler -where - ES::Target: EntropySource, - K::Target: KVStore, +impl LSPSProtocolMessageHandler + for LSPS5ClientHandler { type ProtocolMessage = LSPS5Message; const PROTOCOL_NUMBER: Option = Some(5); diff --git a/lightning-liquidity/src/lsps5/event.rs b/lightning-liquidity/src/lsps5/event.rs index c12273808ef..30e3aea5687 100644 --- a/lightning-liquidity/src/lsps5/event.rs +++ b/lightning-liquidity/src/lsps5/event.rs @@ -30,9 +30,9 @@ pub enum LSPS5ServiceEvent { /// via their registered webhook. /// /// The LSP should send an HTTP POST to the [`url`], using the - /// JSON-serialized [`notification`] as the body and including the `headers`. - /// If the HTTP request fails, the LSP may implement a retry policy according to its - /// implementation preferences. + /// JSON-serialized [`notification`] (via [`WebhookNotification::to_request_body`]) as the body + /// and including the `headers`. If the HTTP request fails, the LSP may implement a retry + /// policy according to its implementation preferences. /// /// The notification is signed using the LSP's node ID to ensure authenticity /// when received by the client. The client verifies this signature using diff --git a/lightning-liquidity/src/lsps5/msgs.rs b/lightning-liquidity/src/lsps5/msgs.rs index e457c299bfe..363a3255f92 100644 --- a/lightning-liquidity/src/lsps5/msgs.rs +++ b/lightning-liquidity/src/lsps5/msgs.rs @@ -565,6 +565,12 @@ impl WebhookNotification { pub fn onion_message_incoming() -> Self { Self { method: WebhookNotificationMethod::LSPS5OnionMessageIncoming } } + + /// Encodes this notification into JSON which can be sent as the body of an HTTP request to + /// deliver the notification. + pub fn to_request_body(&self) -> String { + serde_json::to_string(self).unwrap() + } } impl Serialize for WebhookNotification { diff --git a/lightning-liquidity/src/lsps5/service.rs b/lightning-liquidity/src/lsps5/service.rs index 53fa96ee565..4678d38dc9a 100644 --- a/lightning-liquidity/src/lsps5/service.rs +++ b/lightning-liquidity/src/lsps5/service.rs @@ -125,11 +125,9 @@ impl Default for LSPS5ServiceConfig { /// [`LSPS5ServiceEvent::SendWebhookNotification`]: super::event::LSPS5ServiceEvent::SendWebhookNotification /// [`app_name`]: super::msgs::LSPS5AppName /// [`lsps5.webhook_registered`]: super::msgs::WebhookNotificationMethod::LSPS5WebhookRegistered -pub struct LSPS5ServiceHandler +pub struct LSPS5ServiceHandler where CM::Target: AChannelManager, - NS::Target: NodeSigner, - K::Target: KVStore, TP::Target: TimeProvider, { config: LSPS5ServiceConfig, @@ -144,11 +142,9 @@ where persistence_in_flight: AtomicUsize, } -impl LSPS5ServiceHandler +impl LSPS5ServiceHandler where CM::Target: AChannelManager, - NS::Target: NodeSigner, - K::Target: KVStore, TP::Target: TimeProvider, { /// Constructs a `LSPS5ServiceHandler` using the given time provider. @@ -694,12 +690,10 @@ where } } -impl LSPSProtocolMessageHandler +impl LSPSProtocolMessageHandler for LSPS5ServiceHandler where CM::Target: AChannelManager, - NS::Target: NodeSigner, - K::Target: KVStore, TP::Target: TimeProvider, { type ProtocolMessage = LSPS5Message; diff --git a/lightning-liquidity/src/lsps5/url_utils.rs b/lightning-liquidity/src/lsps5/url_utils.rs index c9d5f9e79c7..2d49c10ff08 100644 --- a/lightning-liquidity/src/lsps5/url_utils.rs +++ b/lightning-liquidity/src/lsps5/url_utils.rs @@ -102,138 +102,3 @@ impl Readable for LSPSUrl { Ok(Self(Readable::read(reader)?)) } } - -#[cfg(test)] -mod tests { - use super::*; - use crate::alloc::string::ToString; - use alloc::vec::Vec; - use proptest::prelude::*; - - #[test] - fn test_extremely_long_url() { - let url_str = format!("https://{}/path", "a".repeat(1000)).to_string(); - let url_chars = url_str.chars().count(); - let result = LSPSUrl::parse(url_str); - - assert!(result.is_ok()); - let url = result.unwrap(); - assert_eq!(url.0 .0.chars().count(), url_chars); - } - - #[test] - fn test_parse_http_url() { - let url_str = "http://example.com/path".to_string(); - let url = LSPSUrl::parse(url_str).unwrap_err(); - assert_eq!(url, LSPS5ProtocolError::UnsupportedProtocol); - } - - #[test] - fn valid_lsps_url() { - let test_vec: Vec<&'static str> = vec![ - "https://www.example.org/push?l=1234567890abcopqrstuv&c=best", - "https://www.example.com/path", - "https://example.org", - "https://example.com:8080/path", - "https://api.example.com/v1/resources", - "https://example.com/page#section1", - "https://example.com/search?q=test#results", - "https://user:pass@example.com/", - "https://192.168.1.1/admin", - "https://example.com://path", - "https://example.com/path%20with%20spaces", - "https://example_example.com/path?query=with&spaces=true", - ]; - for url_str in test_vec { - let url = LSPSUrl::parse(url_str.to_string()); - assert!(url.is_ok(), "Failed to parse URL: {}", url_str); - } - } - - #[test] - fn invalid_lsps_url() { - let test_vec = vec![ - "ftp://ftp.example.org/pub/files/document.pdf", - "sftp://user:password@sftp.example.com:22/uploads/", - "ssh://username@host.com:2222", - "lightning://03a.example.com/invoice?amount=10000", - "ftp://user@ftp.example.com/files/", - "https://例子.测试/path", - "a123+-.://example.com", - "a123+-.://example.com", - "https:\\\\example.com\\path", - "https:///whatever", - "https://example.com/path with spaces", - ]; - for url_str in test_vec { - let url = LSPSUrl::parse(url_str.to_string()); - assert!(url.is_err(), "Expected error for URL: {}", url_str); - } - } - - #[test] - fn parsing_errors() { - let test_vec = vec![ - "example.com/path", - "https://bad domain.com/", - "https://example.com\0/path", - "https://", - "ht@ps://example.com", - "http!://example.com", - "1https://example.com", - "https://://example.com", - "https://example.com:port/path", - "https://:8080/path", - "https:", - "://", - "https://example.com\0/path", - ]; - for url_str in test_vec { - let url = LSPSUrl::parse(url_str.to_string()); - assert!(url.is_err(), "Expected error for URL: {}", url_str); - } - } - - fn host_strategy() -> impl Strategy { - prop_oneof![ - proptest::string::string_regex( - "[a-z0-9]+(?:-[a-z0-9]+)*(?:\\.[a-z0-9]+(?:-[a-z0-9]+)*)*" - ) - .unwrap(), - (0u8..=255u8, 0u8..=255u8, 0u8..=255u8, 0u8..=255u8) - .prop_map(|(a, b, c, d)| format!("{}.{}.{}.{}", a, b, c, d)) - ] - } - - proptest! { - #[test] - fn proptest_parse_round_trip( - host in host_strategy(), - port in proptest::option::of(0u16..=65535u16), - path in proptest::option::of(proptest::string::string_regex("[a-zA-Z0-9._%&=:@/-]{0,20}").unwrap()), - query in proptest::option::of(proptest::string::string_regex("[a-zA-Z0-9._%&=:@/-]{0,20}").unwrap()), - fragment in proptest::option::of(proptest::string::string_regex("[a-zA-Z0-9._%&=:@/-]{0,20}").unwrap()) - ) { - let mut url = format!("https://{}", host); - if let Some(p) = port { - url.push_str(&format!(":{}", p)); - } - if let Some(pth) = &path { - url.push('/'); - url.push_str(pth); - } - if let Some(q) = &query { - url.push('?'); - url.push_str(q); - } - if let Some(f) = &fragment { - url.push('#'); - url.push_str(f); - } - - let parsed = LSPSUrl::parse(url.clone()).expect("should parse"); - prop_assert_eq!(parsed.url(), url.as_str()); - prop_assert_eq!(parsed.url_length(), url.chars().count()); - } - } -} diff --git a/lightning-liquidity/src/manager.rs b/lightning-liquidity/src/manager.rs index 60fb94c5af2..1f11fc8add7 100644 --- a/lightning-liquidity/src/manager.rs +++ b/lightning-liquidity/src/manager.rs @@ -7,7 +7,6 @@ // You may not use this file except in accordance with one or both of these // licenses. -use alloc::boxed::Box; use alloc::string::ToString; use alloc::vec::Vec; @@ -61,6 +60,7 @@ use bitcoin::secp256k1::PublicKey; use core::future::Future as StdFuture; use core::ops::Deref; +use core::pin::pin; use core::task; const LSPS_FEATURE_BIT: usize = 729; @@ -104,71 +104,59 @@ pub struct LiquidityClientConfig { /// languages. pub trait ALiquidityManager { /// A type implementing [`EntropySource`] - type EntropySource: EntropySource + ?Sized; - /// A type that may be dereferenced to [`Self::EntropySource`]. - type ES: Deref + Clone; + type EntropySource: EntropySource + Clone; /// A type implementing [`NodeSigner`] - type NodeSigner: NodeSigner + ?Sized; - /// A type that may be dereferenced to [`Self::NodeSigner`]. - type NS: Deref + Clone; + type NodeSigner: NodeSigner + Clone; /// A type implementing [`AChannelManager`] type AChannelManager: AChannelManager + ?Sized; /// A type that may be dereferenced to [`Self::AChannelManager`]. type CM: Deref + Clone; /// A type implementing [`Filter`]. - type Filter: Filter + ?Sized; - /// A type that may be dereferenced to [`Self::Filter`]. - type C: Deref + Clone; + type C: Filter + Clone; /// A type implementing [`KVStore`]. - type KVStore: KVStore + ?Sized; - /// A type that may be dereferenced to [`Self::KVStore`]. - type K: Deref + Clone; + type K: KVStore + Clone; /// A type implementing [`TimeProvider`]. type TimeProvider: TimeProvider + ?Sized; /// A type that may be dereferenced to [`Self::TimeProvider`]. type TP: Deref + Clone; /// A type implementing [`BroadcasterInterface`]. - type BroadcasterInterface: BroadcasterInterface + ?Sized; - /// A type that may be dereferenced to [`Self::BroadcasterInterface`]. - type T: Deref + Clone; + type BroadcasterInterface: BroadcasterInterface + Clone; /// Returns a reference to the actual [`LiquidityManager`] object. fn get_lm( &self, - ) -> &LiquidityManager; + ) -> &LiquidityManager< + Self::EntropySource, + Self::NodeSigner, + Self::CM, + Self::C, + Self::K, + Self::TP, + Self::BroadcasterInterface, + >; } impl< - ES: Deref + Clone, - NS: Deref + Clone, + ES: EntropySource + Clone, + NS: NodeSigner + Clone, CM: Deref + Clone, - C: Deref + Clone, - K: Deref + Clone, + C: Filter + Clone, + K: KVStore + Clone, TP: Deref + Clone, - T: Deref + Clone, + T: BroadcasterInterface + Clone, > ALiquidityManager for LiquidityManager where - ES::Target: EntropySource, - NS::Target: NodeSigner, CM::Target: AChannelManager, - C::Target: Filter, - K::Target: KVStore, TP::Target: TimeProvider, - T::Target: BroadcasterInterface, { - type EntropySource = ES::Target; - type ES = ES; - type NodeSigner = NS::Target; - type NS = NS; + type EntropySource = ES; + type NodeSigner = NS; type AChannelManager = CM::Target; type CM = CM; - type Filter = C::Target; type C = C; - type KVStore = K::Target; type K = K; type TimeProvider = TP::Target; type TP = TP; - type BroadcasterInterface = T::Target; - type T = T; + type BroadcasterInterface = T; fn get_lm(&self) -> &LiquidityManager { self } @@ -180,21 +168,15 @@ where /// languages. pub trait ALiquidityManagerSync { /// A type implementing [`EntropySource`] - type EntropySource: EntropySource + ?Sized; - /// A type that may be dereferenced to [`Self::EntropySource`]. - type ES: Deref + Clone; + type EntropySource: EntropySource + Clone; /// A type implementing [`NodeSigner`] - type NodeSigner: NodeSigner + ?Sized; - /// A type that may be dereferenced to [`Self::NodeSigner`]. - type NS: Deref + Clone; + type NodeSigner: NodeSigner + Clone; /// A type implementing [`AChannelManager`] type AChannelManager: AChannelManager + ?Sized; /// A type that may be dereferenced to [`Self::AChannelManager`]. type CM: Deref + Clone; /// A type implementing [`Filter`]. - type Filter: Filter + ?Sized; - /// A type that may be dereferenced to [`Self::Filter`]. - type C: Deref + Clone; + type C: Filter + Clone; /// A type implementing [`KVStoreSync`]. type KVStoreSync: KVStoreSync + ?Sized; /// A type that may be dereferenced to [`Self::KVStoreSync`]. @@ -204,72 +186,70 @@ pub trait ALiquidityManagerSync { /// A type that may be dereferenced to [`Self::TimeProvider`]. type TP: Deref + Clone; /// A type implementing [`BroadcasterInterface`]. - type BroadcasterInterface: BroadcasterInterface + ?Sized; - /// A type that may be dereferenced to [`Self::BroadcasterInterface`]. - type T: Deref + Clone; + type BroadcasterInterface: BroadcasterInterface + Clone; /// Returns the inner async [`LiquidityManager`] for testing purposes. #[cfg(any(test, feature = "_test_utils"))] fn get_lm_async( &self, ) -> &LiquidityManager< - Self::ES, - Self::NS, + Self::EntropySource, + Self::NodeSigner, Self::CM, Self::C, KVStoreSyncWrapper, Self::TP, - Self::T, + Self::BroadcasterInterface, >; /// Returns a reference to the actual [`LiquidityManager`] object. fn get_lm( &self, - ) -> &LiquidityManagerSync; + ) -> &LiquidityManagerSync< + Self::EntropySource, + Self::NodeSigner, + Self::CM, + Self::C, + Self::KS, + Self::TP, + Self::BroadcasterInterface, + >; } impl< - ES: Deref + Clone, - NS: Deref + Clone, + ES: EntropySource + Clone, + NS: NodeSigner + Clone, CM: Deref + Clone, - C: Deref + Clone, + C: Filter + Clone, KS: Deref + Clone, TP: Deref + Clone, - T: Deref + Clone, + T: BroadcasterInterface + Clone, > ALiquidityManagerSync for LiquidityManagerSync where - ES::Target: EntropySource, - NS::Target: NodeSigner, CM::Target: AChannelManager, - C::Target: Filter, KS::Target: KVStoreSync, TP::Target: TimeProvider, - T::Target: BroadcasterInterface, { - type EntropySource = ES::Target; - type ES = ES; - type NodeSigner = NS::Target; - type NS = NS; + type EntropySource = ES; + type NodeSigner = NS; type AChannelManager = CM::Target; type CM = CM; - type Filter = C::Target; type C = C; type KVStoreSync = KS::Target; type KS = KS; type TimeProvider = TP::Target; type TP = TP; - type BroadcasterInterface = T::Target; - type T = T; + type BroadcasterInterface = T; /// Returns the inner async [`LiquidityManager`] for testing purposes. #[cfg(any(test, feature = "_test_utils"))] fn get_lm_async( &self, ) -> &LiquidityManager< - Self::ES, - Self::NS, + Self::EntropySource, + Self::NodeSigner, Self::CM, Self::C, KVStoreSyncWrapper, Self::TP, - Self::T, + Self::BroadcasterInterface, > { &self.inner } @@ -298,21 +278,16 @@ where /// [`Event::HTLCHandlingFailed`]: lightning::events::Event::HTLCHandlingFailed /// [`Event::PaymentForwarded`]: lightning::events::Event::PaymentForwarded pub struct LiquidityManager< - ES: Deref + Clone, - NS: Deref + Clone, + ES: EntropySource + Clone, + NS: NodeSigner + Clone, CM: Deref + Clone, - C: Deref + Clone, - K: Deref + Clone, + C: Filter + Clone, + K: KVStore + Clone, TP: Deref + Clone, - T: Deref + Clone, + T: BroadcasterInterface + Clone, > where - ES::Target: EntropySource, - NS::Target: NodeSigner, CM::Target: AChannelManager, - C::Target: Filter, - K::Target: KVStore, TP::Target: TimeProvider, - T::Target: BroadcasterInterface, { pending_messages: Arc, pending_events: Arc>, @@ -337,20 +312,15 @@ pub struct LiquidityManager< #[cfg(feature = "time")] impl< - ES: Deref + Clone, - NS: Deref + Clone, + ES: EntropySource + Clone, + NS: NodeSigner + Clone, CM: Deref + Clone, - C: Deref + Clone, - K: Deref + Clone, - T: Deref + Clone, + C: Filter + Clone, + K: KVStore + Clone, + T: BroadcasterInterface + Clone, > LiquidityManager where - ES::Target: EntropySource, - NS::Target: NodeSigner, CM::Target: AChannelManager, - C::Target: Filter, - K::Target: KVStore, - T::Target: BroadcasterInterface, { /// Constructor for the [`LiquidityManager`] using the default system clock /// @@ -378,22 +348,17 @@ where } impl< - ES: Deref + Clone, - NS: Deref + Clone, + ES: EntropySource + Clone, + NS: NodeSigner + Clone, CM: Deref + Clone, - C: Deref + Clone, - K: Deref + Clone, + C: Filter + Clone, + K: KVStore + Clone, TP: Deref + Clone, - T: Deref + Clone, + T: BroadcasterInterface + Clone, > LiquidityManager where - ES::Target: EntropySource, - NS::Target: NodeSigner, CM::Target: AChannelManager, - C::Target: Filter, - K::Target: KVStore, TP::Target: TimeProvider, - T::Target: BroadcasterInterface, { /// Constructor for the [`LiquidityManager`] with a custom time provider. /// @@ -805,22 +770,17 @@ where } impl< - ES: Deref + Clone, - NS: Deref + Clone, + ES: EntropySource + Clone, + NS: NodeSigner + Clone, CM: Deref + Clone, - C: Deref + Clone, - K: Deref + Clone, + C: Filter + Clone, + K: KVStore + Clone, TP: Deref + Clone, - T: Deref + Clone, + T: BroadcasterInterface + Clone, > CustomMessageReader for LiquidityManager where - ES::Target: EntropySource, - NS::Target: NodeSigner, CM::Target: AChannelManager, - C::Target: Filter, - K::Target: KVStore, TP::Target: TimeProvider, - T::Target: BroadcasterInterface, { type CustomMessage = RawLSPSMessage; @@ -837,22 +797,17 @@ where } impl< - ES: Deref + Clone, - NS: Deref + Clone, + ES: EntropySource + Clone, + NS: NodeSigner + Clone, CM: Deref + Clone, - C: Deref + Clone, - K: Deref + Clone, + C: Filter + Clone, + K: KVStore + Clone, TP: Deref + Clone, - T: Deref + Clone, + T: BroadcasterInterface + Clone, > CustomMessageHandler for LiquidityManager where - ES::Target: EntropySource, - NS::Target: NodeSigner, CM::Target: AChannelManager, - C::Target: Filter, - K::Target: KVStore, TP::Target: TimeProvider, - T::Target: BroadcasterInterface, { fn handle_custom_message( &self, msg: Self::CustomMessage, sender_node_id: PublicKey, @@ -971,22 +926,17 @@ where } impl< - ES: Deref + Clone, - NS: Deref + Clone, + ES: EntropySource + Clone, + NS: NodeSigner + Clone, CM: Deref + Clone, - C: Deref + Clone, - K: Deref + Clone, + C: Filter + Clone, + K: KVStore + Clone, TP: Deref + Clone, - T: Deref + Clone, + T: BroadcasterInterface + Clone, > Listen for LiquidityManager where - ES::Target: EntropySource, - NS::Target: NodeSigner, CM::Target: AChannelManager, - C::Target: Filter, - K::Target: KVStore, TP::Target: TimeProvider, - T::Target: BroadcasterInterface, { fn filtered_block_connected( &self, header: &bitcoin::block::Header, txdata: &chain::transaction::TransactionData, @@ -1017,22 +967,17 @@ where } impl< - ES: Deref + Clone, - NS: Deref + Clone, + ES: EntropySource + Clone, + NS: NodeSigner + Clone, CM: Deref + Clone, - C: Deref + Clone, - K: Deref + Clone, + C: Filter + Clone, + K: KVStore + Clone, TP: Deref + Clone, - T: Deref + Clone, + T: BroadcasterInterface + Clone, > Confirm for LiquidityManager where - ES::Target: EntropySource, - NS::Target: NodeSigner, CM::Target: AChannelManager, - C::Target: Filter, - K::Target: KVStore, TP::Target: TimeProvider, - T::Target: BroadcasterInterface, { fn transactions_confirmed( &self, _header: &bitcoin::block::Header, _txdata: &chain::transaction::TransactionData, @@ -1063,41 +1008,33 @@ where /// A synchroneous wrapper around [`LiquidityManager`] to be used in contexts where async is not /// available. pub struct LiquidityManagerSync< - ES: Deref + Clone, - NS: Deref + Clone, + ES: EntropySource + Clone, + NS: NodeSigner + Clone, CM: Deref + Clone, - C: Deref + Clone, + C: Filter + Clone, KS: Deref + Clone, TP: Deref + Clone, - T: Deref + Clone, + T: BroadcasterInterface + Clone, > where - ES::Target: EntropySource, - NS::Target: NodeSigner, CM::Target: AChannelManager, - C::Target: Filter, KS::Target: KVStoreSync, TP::Target: TimeProvider, - T::Target: BroadcasterInterface, { inner: LiquidityManager, TP, T>, } #[cfg(feature = "time")] impl< - ES: Deref + Clone, - NS: Deref + Clone, + ES: EntropySource + Clone, + NS: NodeSigner + Clone, CM: Deref + Clone, - C: Deref + Clone, + C: Filter + Clone, KS: Deref + Clone, - T: Deref + Clone, + T: BroadcasterInterface + Clone, > LiquidityManagerSync where - ES::Target: EntropySource, - NS::Target: NodeSigner, CM::Target: AChannelManager, KS::Target: KVStoreSync, - C::Target: Filter, - T::Target: BroadcasterInterface, { /// Constructor for the [`LiquidityManagerSync`] using the default system clock /// @@ -1110,7 +1047,7 @@ where ) -> Result { let kv_store = KVStoreSyncWrapper(kv_store_sync); - let mut fut = Box::pin(LiquidityManager::new( + let mut fut = pin!(LiquidityManager::new( entropy_source, node_signer, channel_manager, @@ -1136,22 +1073,18 @@ where } impl< - ES: Deref + Clone, - NS: Deref + Clone, + ES: EntropySource + Clone, + NS: NodeSigner + Clone, CM: Deref + Clone, - C: Deref + Clone, + C: Filter + Clone, KS: Deref + Clone, TP: Deref + Clone, - T: Deref + Clone, + T: BroadcasterInterface + Clone, > LiquidityManagerSync where - ES::Target: EntropySource, - NS::Target: NodeSigner, CM::Target: AChannelManager, - C::Target: Filter, KS::Target: KVStoreSync, TP::Target: TimeProvider, - T::Target: BroadcasterInterface, { /// Constructor for the [`LiquidityManagerSync`] with a custom time provider. /// @@ -1163,7 +1096,7 @@ where client_config: Option, time_provider: TP, ) -> Result { let kv_store = KVStoreSyncWrapper(kv_store_sync); - let mut fut = Box::pin(LiquidityManager::new_with_custom_time_provider( + let mut fut = pin!(LiquidityManager::new_with_custom_time_provider( entropy_source, node_signer, channel_manager, @@ -1295,7 +1228,7 @@ where pub fn persist(&self) -> Result { let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); - match Box::pin(self.inner.persist()).as_mut().poll(&mut ctx) { + match pin!(self.inner.persist()).as_mut().poll(&mut ctx) { task::Poll::Ready(result) => result, task::Poll::Pending => { // In a sync context, we can't wait for the future to complete. @@ -1306,22 +1239,18 @@ where } impl< - ES: Deref + Clone, - NS: Deref + Clone, + ES: EntropySource + Clone, + NS: NodeSigner + Clone, CM: Deref + Clone, - C: Deref + Clone, + C: Filter + Clone, KS: Deref + Clone, TP: Deref + Clone, - T: Deref + Clone, + T: BroadcasterInterface + Clone, > CustomMessageReader for LiquidityManagerSync where - ES::Target: EntropySource, - NS::Target: NodeSigner, CM::Target: AChannelManager, - C::Target: Filter, KS::Target: KVStoreSync, TP::Target: TimeProvider, - T::Target: BroadcasterInterface, { type CustomMessage = RawLSPSMessage; @@ -1333,22 +1262,18 @@ where } impl< - ES: Deref + Clone, - NS: Deref + Clone, + ES: EntropySource + Clone, + NS: NodeSigner + Clone, CM: Deref + Clone, - C: Deref + Clone, + C: Filter + Clone, KS: Deref + Clone, TP: Deref + Clone, - T: Deref + Clone, + T: BroadcasterInterface + Clone, > CustomMessageHandler for LiquidityManagerSync where - ES::Target: EntropySource, - NS::Target: NodeSigner, CM::Target: AChannelManager, - C::Target: Filter, KS::Target: KVStoreSync, TP::Target: TimeProvider, - T::Target: BroadcasterInterface, { fn handle_custom_message( &self, msg: Self::CustomMessage, sender_node_id: PublicKey, @@ -1380,22 +1305,18 @@ where } impl< - ES: Deref + Clone, - NS: Deref + Clone, + ES: EntropySource + Clone, + NS: NodeSigner + Clone, CM: Deref + Clone, - C: Deref + Clone, + C: Filter + Clone, KS: Deref + Clone, TP: Deref + Clone, - T: Deref + Clone, + T: BroadcasterInterface + Clone, > Listen for LiquidityManagerSync where - ES::Target: EntropySource, - NS::Target: NodeSigner, CM::Target: AChannelManager, - C::Target: Filter, KS::Target: KVStoreSync, TP::Target: TimeProvider, - T::Target: BroadcasterInterface, { fn filtered_block_connected( &self, header: &bitcoin::block::Header, txdata: &chain::transaction::TransactionData, @@ -1410,22 +1331,18 @@ where } impl< - ES: Deref + Clone, - NS: Deref + Clone, + ES: EntropySource + Clone, + NS: NodeSigner + Clone, CM: Deref + Clone, - C: Deref + Clone, + C: Filter + Clone, KS: Deref + Clone, TP: Deref + Clone, - T: Deref + Clone, + T: BroadcasterInterface + Clone, > Confirm for LiquidityManagerSync where - ES::Target: EntropySource, - NS::Target: NodeSigner, CM::Target: AChannelManager, - C::Target: Filter, KS::Target: KVStoreSync, TP::Target: TimeProvider, - T::Target: BroadcasterInterface, { fn transactions_confirmed( &self, header: &bitcoin::block::Header, txdata: &chain::transaction::TransactionData, diff --git a/lightning-liquidity/src/persist.rs b/lightning-liquidity/src/persist.rs index ec0d5a6ddd3..d0199440514 100644 --- a/lightning-liquidity/src/persist.rs +++ b/lightning-liquidity/src/persist.rs @@ -22,8 +22,6 @@ use lightning::util::ser::Readable; use bitcoin::secp256k1::PublicKey; use alloc::collections::VecDeque; - -use core::ops::Deref; use core::str::FromStr; /// The primary namespace under which the [`LiquidityManager`] will be persisted. @@ -51,12 +49,9 @@ pub const LSPS2_SERVICE_PERSISTENCE_SECONDARY_NAMESPACE: &str = "lsps2_service"; /// [`LSPS5ServiceHandler`]: crate::lsps5::service::LSPS5ServiceHandler pub const LSPS5_SERVICE_PERSISTENCE_SECONDARY_NAMESPACE: &str = "lsps5_service"; -pub(crate) async fn read_event_queue( +pub(crate) async fn read_event_queue( kv_store: K, -) -> Result>, lightning::io::Error> -where - K::Target: KVStore, -{ +) -> Result>, lightning::io::Error> { let read_fut = kv_store.read( LIQUIDITY_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, LIQUIDITY_MANAGER_EVENT_QUEUE_PERSISTENCE_SECONDARY_NAMESPACE, @@ -85,12 +80,9 @@ where Ok(Some(queue.0)) } -pub(crate) async fn read_lsps2_service_peer_states( +pub(crate) async fn read_lsps2_service_peer_states( kv_store: K, -) -> Result>, lightning::io::Error> -where - K::Target: KVStore, -{ +) -> Result>, lightning::io::Error> { let mut res = new_hash_map(); for stored_key in kv_store @@ -129,12 +121,9 @@ where Ok(res) } -pub(crate) async fn read_lsps5_service_peer_states( +pub(crate) async fn read_lsps5_service_peer_states( kv_store: K, -) -> Result, lightning::io::Error> -where - K::Target: KVStore, -{ +) -> Result, lightning::io::Error> { let mut res = new_hash_map(); for stored_key in kv_store diff --git a/lightning-liquidity/src/utils/mod.rs b/lightning-liquidity/src/utils/mod.rs index b66d3eb7ead..32b50443350 100644 --- a/lightning-liquidity/src/utils/mod.rs +++ b/lightning-liquidity/src/utils/mod.rs @@ -1,7 +1,7 @@ //! Utilities for LSPS5 service. use alloc::string::String; -use core::{fmt::Write, ops::Deref}; +use core::fmt::Write; use lightning::sign::EntropySource; @@ -23,10 +23,7 @@ pub fn scid_from_human_readable_string(human_readable_scid: &str) -> Result(entropy_source: &ES) -> LSPSRequestId -where - ES::Target: EntropySource, -{ +pub(crate) fn generate_request_id(entropy_source: &ES) -> LSPSRequestId { let bytes = entropy_source.get_secure_random_bytes(); LSPSRequestId(hex_str(&bytes[0..16])) } diff --git a/lightning-liquidity/tests/lsps2_integration_tests.rs b/lightning-liquidity/tests/lsps2_integration_tests.rs index 82f93b5990c..33a6dd697cf 100644 --- a/lightning-liquidity/tests/lsps2_integration_tests.rs +++ b/lightning-liquidity/tests/lsps2_integration_tests.rs @@ -7,19 +7,10 @@ use common::{ get_lsps_message, LSPSNodes, LSPSNodesWithPayer, LiquidityNode, }; -use lightning::check_added_monitors; use lightning::events::{ClosureReason, Event}; use lightning::get_event_msg; -use lightning::ln::channelmanager::PaymentId; -use lightning::ln::channelmanager::Retry; -use lightning::ln::functional_test_utils::create_funding_transaction; -use lightning::ln::functional_test_utils::do_commitment_signed_dance; -use lightning::ln::functional_test_utils::expect_channel_pending_event; -use lightning::ln::functional_test_utils::expect_channel_ready_event; -use lightning::ln::functional_test_utils::expect_payment_sent; -use lightning::ln::functional_test_utils::test_default_channel_config; -use lightning::ln::functional_test_utils::SendEvent; -use lightning::ln::functional_test_utils::{connect_blocks, create_chan_between_nodes_with_value}; +use lightning::ln::channelmanager::{OptionalBolt11PaymentParams, PaymentId}; +use lightning::ln::functional_test_utils::*; use lightning::ln::msgs::BaseMessageHandler; use lightning::ln::msgs::ChannelMessageHandler; use lightning::ln::msgs::MessageSendEvent; @@ -46,6 +37,7 @@ use lightning::ln::peer_handler::CustomMessageHandler; use lightning::log_error; use lightning::routing::router::{RouteHint, RouteHintHop}; use lightning::sign::NodeSigner; +use lightning::util::config::HTLCInterceptionFlags; use lightning::util::errors::APIError; use lightning::util::logger::Logger; use lightning::util::test_utils::{TestBroadcaster, TestStore}; @@ -54,7 +46,6 @@ use lightning_invoice::{Bolt11Invoice, InvoiceBuilder, RoutingFees}; use lightning_types::payment::PaymentHash; -use bitcoin::hashes::{sha256, Hash}; use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey}; use bitcoin::Network; use lightning_types::payment::PaymentPreimage; @@ -145,10 +136,6 @@ fn create_jit_invoice( htlc_maximum_msat: None, }]); - let payment_hash = sha256::Hash::from_slice(&payment_hash.0).map_err(|e| { - log_error!(node.logger, "Invalid payment hash: {:?}", e); - })?; - let currency = Network::Bitcoin.into(); let mut invoice_builder = InvoiceBuilder::new(currency) .description(description.to_string()) @@ -1165,10 +1152,9 @@ fn client_trusts_lsp_end_to_end_test() { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let mut service_node_config = test_default_channel_config(); - service_node_config.accept_intercept_htlcs = true; + service_node_config.htlc_interception_flags = HTLCInterceptionFlags::ToInterceptSCIDs as u8; let mut client_node_config = test_default_channel_config(); - client_node_config.manually_accept_inbound_channels = true; client_node_config.channel_config.accept_underpaying_htlcs = true; let node_chanmgrs = create_node_chanmgrs( 3, @@ -1219,14 +1205,13 @@ fn client_trusts_lsp_end_to_end_test() { .node .pay_for_bolt11_invoice( &invoice, - PaymentId(invoice.payment_hash().to_byte_array()), + PaymentId(invoice.payment_hash().0), None, - Default::default(), - Retry::Attempts(3), + OptionalBolt11PaymentParams::default(), ) .unwrap(); - check_added_monitors!(payer_node, 1); + check_added_monitors(&payer_node, 1); let events = payer_node.node.get_and_clear_pending_msg_events(); let ev = SendEvent::from_event(events[0].clone()); service_node.inner.node.handle_update_add_htlc(payer_node_id, &ev.msgs[0]); @@ -1566,7 +1551,7 @@ fn create_channel_with_manual_broadcast( let funding_created = get_event_msg!(service_node, MessageSendEvent::SendFundingCreated, *client_node_id); client_node.node.handle_funding_created(*service_node_id, &funding_created); - check_added_monitors!(client_node.inner, 1); + check_added_monitors(&client_node.inner, 1); let bs_signed_locked = client_node.node.get_and_clear_pending_msg_events(); assert_eq!(bs_signed_locked.len(), 2); @@ -1602,7 +1587,7 @@ fn create_channel_with_manual_broadcast( _ => panic!("Unexpected event"), } expect_channel_pending_event(&client_node, &service_node_id); - check_added_monitors!(service_node.inner, 1); + check_added_monitors(&service_node.inner, 1); as_channel_ready = get_event_msg!(service_node, MessageSendEvent::SendChannelReady, *client_node_id); @@ -1638,10 +1623,9 @@ fn late_payment_forwarded_and_safe_after_force_close_does_not_broadcast() { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let mut service_node_config = test_default_channel_config(); - service_node_config.accept_intercept_htlcs = true; + service_node_config.htlc_interception_flags = HTLCInterceptionFlags::ToInterceptSCIDs as u8; let mut client_node_config = test_default_channel_config(); - client_node_config.manually_accept_inbound_channels = true; client_node_config.channel_config.accept_underpaying_htlcs = true; let node_chanmgrs = create_node_chanmgrs( @@ -1692,14 +1676,13 @@ fn late_payment_forwarded_and_safe_after_force_close_does_not_broadcast() { .node .pay_for_bolt11_invoice( &invoice, - PaymentId(invoice.payment_hash().to_byte_array()), + PaymentId(invoice.payment_hash().0), None, - Default::default(), - Retry::Attempts(3), + OptionalBolt11PaymentParams::default(), ) .unwrap(); - check_added_monitors!(payer_node, 1); + check_added_monitors(&payer_node, 1); let events = payer_node.node.get_and_clear_pending_msg_events(); let ev = SendEvent::from_event(events[0].clone()); service_node.inner.node.handle_update_add_htlc(payer_node_id, &ev.msgs[0]); @@ -1722,7 +1705,7 @@ fn late_payment_forwarded_and_safe_after_force_close_does_not_broadcast() { *requested_next_hop_scid, *intercept_id, *expected_outbound_amount_msat, - PaymentHash(invoice.payment_hash().to_byte_array()), + invoice.payment_hash(), ) .unwrap(); }, @@ -1829,10 +1812,9 @@ fn htlc_timeout_before_client_claim_results_in_handling_failed() { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let mut service_node_config = test_default_channel_config(); - service_node_config.accept_intercept_htlcs = true; + service_node_config.htlc_interception_flags = HTLCInterceptionFlags::ToInterceptSCIDs as u8; let mut client_node_config = test_default_channel_config(); - client_node_config.manually_accept_inbound_channels = true; client_node_config.channel_config.accept_underpaying_htlcs = true; let node_chanmgrs = create_node_chanmgrs( @@ -1883,14 +1865,13 @@ fn htlc_timeout_before_client_claim_results_in_handling_failed() { .node .pay_for_bolt11_invoice( &invoice, - PaymentId(invoice.payment_hash().to_byte_array()), + PaymentId(invoice.payment_hash().0), None, - Default::default(), - Retry::Attempts(3), + OptionalBolt11PaymentParams::default(), ) .unwrap(); - check_added_monitors!(payer_node, 1); + check_added_monitors(&payer_node, 1); let events = payer_node.node.get_and_clear_pending_msg_events(); let ev = SendEvent::from_event(events[0].clone()); service_node.inner.node.handle_update_add_htlc(payer_node_id, &ev.msgs[0]); @@ -1913,7 +1894,7 @@ fn htlc_timeout_before_client_claim_results_in_handling_failed() { *requested_next_hop_scid, *intercept_id, *expected_outbound_amount_msat, - PaymentHash(invoice.payment_hash().to_byte_array()), + invoice.payment_hash(), ) .unwrap(); }, @@ -1992,7 +1973,7 @@ fn htlc_timeout_before_client_claim_results_in_handling_failed() { match &client_events[0] { Event::HTLCHandlingFailed { failure_type, .. } => match failure_type { lightning::events::HTLCHandlingFailureType::Receive { payment_hash } => { - assert_eq!(*payment_hash, PaymentHash(invoice.payment_hash().to_byte_array())); + assert_eq!(*payment_hash, invoice.payment_hash()); }, _ => panic!("Unexpected failure_type: {:?}", failure_type), }, @@ -2165,10 +2146,9 @@ fn client_trusts_lsp_partial_fee_does_not_trigger_broadcast() { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let mut service_node_config = test_default_channel_config(); - service_node_config.accept_intercept_htlcs = true; + service_node_config.htlc_interception_flags = HTLCInterceptionFlags::ToInterceptSCIDs as u8; let mut client_node_config = test_default_channel_config(); - client_node_config.manually_accept_inbound_channels = true; client_node_config.channel_config.accept_underpaying_htlcs = true; let node_chanmgrs = create_node_chanmgrs( @@ -2220,14 +2200,13 @@ fn client_trusts_lsp_partial_fee_does_not_trigger_broadcast() { .node .pay_for_bolt11_invoice( &invoice, - PaymentId(invoice.payment_hash().to_byte_array()), + PaymentId(invoice.payment_hash().0), None, - Default::default(), - Retry::Attempts(3), + OptionalBolt11PaymentParams::default(), ) .unwrap(); - check_added_monitors!(payer_node, 1); + check_added_monitors(&payer_node, 1); let events = payer_node.node.get_and_clear_pending_msg_events(); let ev = SendEvent::from_event(events[0].clone()); service_node.inner.node.handle_update_add_htlc(payer_node_id, &ev.msgs[0]); diff --git a/lightning-macros/Cargo.toml b/lightning-macros/Cargo.toml index 8a20670bad4..822b50816df 100644 --- a/lightning-macros/Cargo.toml +++ b/lightning-macros/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning-macros" -version = "0.2.0+git" +version = "0.2.2+git" authors = ["Elias Rohrer"] license = "MIT OR Apache-2.0" repository = "https://github.com/lightningdevkit/rust-lightning/" diff --git a/lightning-macros/src/lib.rs b/lightning-macros/src/lib.rs index e784acf72fb..778da45ee8f 100644 --- a/lightning-macros/src/lib.rs +++ b/lightning-macros/src/lib.rs @@ -138,7 +138,7 @@ fn process_fields(group: Group) -> proc_macro::TokenStream { if let TokenTree::Group(group) = ty_info { let first_group_tok = group.stream().into_iter().next().unwrap(); if let TokenTree::Ident(ident) = first_group_tok { - if ident.to_string() == "legacy" { + if ident.to_string() == "legacy" || ident.to_string() == "custom" { continue; } } @@ -155,13 +155,13 @@ fn process_fields(group: Group) -> proc_macro::TokenStream { computed_fields } -/// Scans a match statement for legacy fields which should be skipped. +/// Scans a match statement for legacy or custom fields which should be skipped. /// /// This is used internally in LDK's TLV serialization logic and is not expected to be used by /// other crates. /// /// Wraps a `match self {..}` statement and scans the fields in the match patterns (in the form -/// `ref $field_name: $field_ty`) for types marked `legacy`, skipping those fields. +/// `ref $field_name: $field_ty`) for types marked `legacy` or `custom`, skipping those fields. /// /// Specifically, it expects input like the following, simply dropping `field3` and the /// `: $field_ty` after each field name. diff --git a/lightning-net-tokio/Cargo.toml b/lightning-net-tokio/Cargo.toml index 6c45f40e3c8..79b227f44dc 100644 --- a/lightning-net-tokio/Cargo.toml +++ b/lightning-net-tokio/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning-net-tokio" -version = "0.2.0+git" +version = "0.3.0+git" authors = ["Matt Corallo"] license = "MIT OR Apache-2.0" repository = "https://github.com/lightningdevkit/rust-lightning/" @@ -19,7 +19,7 @@ rustdoc-args = ["--cfg", "docsrs"] [dependencies] bitcoin = "0.32.2" lightning = { version = "0.3.0", path = "../lightning" } -tokio = { version = "1.35", features = [ "rt", "sync", "net", "time" ] } +tokio = { version = "1.35", features = [ "rt", "sync", "net", "time", "io-util" ] } [dev-dependencies] tokio = { version = "1.35", features = [ "macros", "rt", "rt-multi-thread", "sync", "net", "time" ] } diff --git a/lightning-net-tokio/src/lib.rs b/lightning-net-tokio/src/lib.rs index 038b251f3b8..ee129669410 100644 --- a/lightning-net-tokio/src/lib.rs +++ b/lightning-net-tokio/src/lib.rs @@ -37,13 +37,14 @@ use lightning::ln::msgs::SocketAddress; use lightning::ln::peer_handler; use lightning::ln::peer_handler::APeerManager; use lightning::ln::peer_handler::SocketDescriptor as LnSocketTrait; +use lightning::sign::EntropySource; use std::future::Future; use std::hash::Hash; use std::net::SocketAddr; use std::net::TcpStream as StdTcpStream; use std::ops::Deref; -use std::pin::Pin; +use std::pin::{pin, Pin}; use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::{Arc, Mutex}; use std::task::{self, Poll}; @@ -51,6 +52,9 @@ use std::time::Duration; static ID_COUNTER: AtomicU64 = AtomicU64::new(0); +const CONNECT_OUTBOUND_TIMEOUT: u64 = 10; +const TOR_CONNECT_OUTBOUND_TIMEOUT: u64 = 30; + // We only need to select over multiple futures in one place, and taking on the full `tokio/macros` // dependency tree in order to do so (which has broken our MSRV before) is excessive. Instead, we // define a trivial two- and three- select macro with the specific types we need and just use that. @@ -205,18 +209,17 @@ impl Connection { } us_lock.read_paused }; - // TODO: Drop the Box'ing of the futures once Rust has pin-on-stack support. let select_result = if read_paused { TwoSelector { - a: Box::pin(write_avail_receiver.recv()), - b: Box::pin(read_wake_receiver.recv()), + a: pin!(write_avail_receiver.recv()), + b: pin!(read_wake_receiver.recv()), } .await } else { ThreeSelector { - a: Box::pin(write_avail_receiver.recv()), - b: Box::pin(read_wake_receiver.recv()), - c: Box::pin(reader.readable()), + a: pin!(write_avail_receiver.recv()), + b: pin!(read_wake_receiver.recv()), + c: pin!(reader.readable()), } .await }; @@ -381,6 +384,16 @@ where PM::Target: APeerManager, { let remote_addr = get_addr_from_stream(&stream); + setup_outbound_internal(peer_manager, their_node_id, stream, remote_addr) +} + +fn setup_outbound_internal( + peer_manager: PM, their_node_id: PublicKey, stream: StdTcpStream, + remote_addr: Option, +) -> impl std::future::Future +where + PM::Target: APeerManager, +{ let (reader, mut write_receiver, read_receiver, us) = Connection::new(stream); #[cfg(test)] let last_us = Arc::clone(&us); @@ -463,13 +476,174 @@ where PM::Target: APeerManager, { let connect_fut = async { TcpStream::connect(&addr).await.map(|s| s.into_std().unwrap()) }; - if let Ok(Ok(stream)) = time::timeout(Duration::from_secs(10), connect_fut).await { + if let Ok(Ok(stream)) = + time::timeout(Duration::from_secs(CONNECT_OUTBOUND_TIMEOUT), connect_fut).await + { Some(setup_outbound(peer_manager, their_node_id, stream)) } else { None } } +/// Routes [`connect_outbound`] through Tor. Implements stream isolation for each connection +/// using a stream isolation parameter sourced from [`EntropySource::get_secure_random_bytes`]. +/// +/// The `addr` parameter will be set to the [`PeerDetails::socket_address`] for that peer in +/// [`PeerManager::list_peers`], and if it is not a private IPv4 or IPv6 address, it will also +/// reported to the peer in our init message. +/// +/// Returns a future (as the fn is async) that yields another future, see [`connect_outbound`] for +/// details on this return value. +/// +/// [`PeerDetails::socket_address`]: lightning::ln::peer_handler::PeerDetails::socket_address +/// [`PeerManager::list_peers`]: lightning::ln::peer_handler::PeerManager::list_peers +pub async fn tor_connect_outbound( + peer_manager: PM, their_node_id: PublicKey, addr: SocketAddress, tor_proxy_addr: SocketAddr, + entropy_source: ES, +) -> Option> +where + PM::Target: APeerManager, +{ + let connect_fut = async { + tor_connect(addr.clone(), tor_proxy_addr, entropy_source) + .await + .map(|s| s.into_std().unwrap()) + }; + if let Ok(Ok(stream)) = + time::timeout(Duration::from_secs(TOR_CONNECT_OUTBOUND_TIMEOUT), connect_fut).await + { + Some(setup_outbound_internal(peer_manager, their_node_id, stream, Some(addr))) + } else { + None + } +} + +async fn tor_connect( + addr: SocketAddress, tor_proxy_addr: SocketAddr, entropy_source: ES, +) -> Result { + use std::io::Write; + use tokio::io::AsyncReadExt; + + const IPV4_ADDR_LEN: usize = 4; + const IPV6_ADDR_LEN: usize = 16; + const HOSTNAME_MAX_LEN: usize = u8::MAX as usize; + + // Constants defined in RFC 1928 and RFC 1929 + const VERSION: u8 = 5; + const NMETHODS: u8 = 1; + const USERNAME_PASSWORD_AUTH: u8 = 2; + const METHOD_SELECT_REPLY_LEN: usize = 2; + const USERNAME_PASSWORD_VERSION: u8 = 1; + const USERNAME_PASSWORD_REPLY_LEN: usize = 2; + const CMD_CONNECT: u8 = 1; + const RSV: u8 = 0; + const ATYP_IPV4: u8 = 1; + const ATYP_DOMAINNAME: u8 = 3; + const ATYP_IPV6: u8 = 4; + const SUCCESS: u8 = 0; + + // Tor extensions, see https://spec.torproject.org/socks-extensions.html for further details + const USERNAME: &[u8] = b"0"; + const USERNAME_LEN: usize = USERNAME.len(); + const PASSWORD_ENTROPY_LEN: usize = 32; + // We encode the password as a hex string on the wire. RFC 1929 allows arbitrary byte sequences but we choose to be conservative. + const PASSWORD_LEN: usize = PASSWORD_ENTROPY_LEN * 2; + + const USERNAME_PASSWORD_REQUEST_LEN: usize = + 1 /* VER */ + 1 /* ULEN */ + USERNAME_LEN + 1 /* PLEN */ + PASSWORD_LEN; + const SOCKS5_REQUEST_MAX_LEN: usize = 1 /* VER */ + 1 /* CMD */ + 1 /* RSV */ + 1 /* ATYP */ + + 1 /* HOSTNAME len */ + HOSTNAME_MAX_LEN /* HOSTNAME */ + 2 /* PORT */; + const SOCKS5_REPLY_HEADER_LEN: usize = 1 /* VER */ + 1 /* REP */ + 1 /* RSV */ + 1 /* ATYP */; + + let method_selection_request = [VERSION, NMETHODS, USERNAME_PASSWORD_AUTH]; + let mut tcp_stream = TcpStream::connect(&tor_proxy_addr).await.map_err(|_| ())?; + tokio::io::AsyncWriteExt::write_all(&mut tcp_stream, &method_selection_request) + .await + .map_err(|_| ())?; + + let mut method_selection_reply = [0u8; METHOD_SELECT_REPLY_LEN]; + tcp_stream.read_exact(&mut method_selection_reply).await.map_err(|_| ())?; + if method_selection_reply != [VERSION, USERNAME_PASSWORD_AUTH] { + return Err(()); + } + + let password: [u8; PASSWORD_ENTROPY_LEN] = entropy_source.get_secure_random_bytes(); + let mut username_password_request = [0u8; USERNAME_PASSWORD_REQUEST_LEN]; + let mut stream = &mut username_password_request[..]; + stream.write_all(&[USERNAME_PASSWORD_VERSION, USERNAME_LEN as u8]).unwrap(); + stream.write_all(USERNAME).unwrap(); + stream.write_all(&[PASSWORD_LEN as u8]).unwrap(); + // Encode the password as a hex string even if RFC 1929 allows arbitrary sequences + for byte in password { + write!(stream, "{:02x}", byte).unwrap(); + } + debug_assert!(stream.is_empty()); + tokio::io::AsyncWriteExt::write_all(&mut tcp_stream, &username_password_request) + .await + .map_err(|_| ())?; + + let mut username_password_reply = [0u8; USERNAME_PASSWORD_REPLY_LEN]; + tcp_stream.read_exact(&mut username_password_reply).await.map_err(|_| ())?; + if username_password_reply != [USERNAME_PASSWORD_VERSION, SUCCESS] { + return Err(()); + } + + let mut socks5_request = [0u8; SOCKS5_REQUEST_MAX_LEN]; + let mut stream = &mut socks5_request[..]; + stream.write_all(&[VERSION, CMD_CONNECT, RSV]).unwrap(); + match addr { + SocketAddress::TcpIpV4 { addr, port } => { + stream.write_all(&[ATYP_IPV4]).unwrap(); + stream.write_all(&addr).unwrap(); + stream.write_all(&port.to_be_bytes()).unwrap(); + }, + SocketAddress::TcpIpV6 { addr, port } => { + stream.write_all(&[ATYP_IPV6]).unwrap(); + stream.write_all(&addr).unwrap(); + stream.write_all(&port.to_be_bytes()).unwrap(); + }, + ref onion_v3 @ SocketAddress::OnionV3 { port, .. } => { + let onion_v3_url = onion_v3.to_string(); + let hostname = onion_v3_url.split_once(':').ok_or(())?.0.as_bytes(); + stream.write_all(&[ATYP_DOMAINNAME, hostname.len() as u8]).unwrap(); + stream.write_all(hostname).unwrap(); + stream.write_all(&port.to_be_bytes()).unwrap(); + }, + SocketAddress::Hostname { hostname, port } => { + stream.write_all(&[ATYP_DOMAINNAME, hostname.len()]).unwrap(); + stream.write_all(hostname.as_bytes()).unwrap(); + stream.write_all(&port.to_be_bytes()).unwrap(); + }, + SocketAddress::OnionV2 { .. } => return Err(()), + }; + let bytes_remaining = stream.len(); + tokio::io::AsyncWriteExt::write_all( + &mut tcp_stream, + &socks5_request[..socks5_request.len() - bytes_remaining], + ) + .await + .map_err(|_| ())?; + + let mut socks5_reply_header = [0u8; SOCKS5_REPLY_HEADER_LEN]; + tcp_stream.read_exact(&mut socks5_reply_header).await.map_err(|_| ())?; + if socks5_reply_header[..3] != [VERSION, SUCCESS, RSV] { + return Err(()); + } + match socks5_reply_header[3] { + ATYP_IPV4 => tcp_stream.read_exact(&mut [0u8; IPV4_ADDR_LEN]).await.map_err(|_| ())?, + ATYP_DOMAINNAME => { + let hostname_len = tcp_stream.read_u8().await.map_err(|_| ())? as usize; + let mut hostname_buffer = [0u8; HOSTNAME_MAX_LEN]; + tcp_stream.read_exact(&mut hostname_buffer[..hostname_len]).await.map_err(|_| ())? + }, + ATYP_IPV6 => tcp_stream.read_exact(&mut [0u8; IPV6_ADDR_LEN]).await.map_err(|_| ())?, + _ => return Err(()), + }; + tcp_stream.read_u16().await.map_err(|_| ())?; + + Ok(tcp_stream) +} + const SOCK_WAKER_VTABLE: task::RawWakerVTable = task::RawWakerVTable::new( clone_socket_waker, wake_socket_waker, @@ -617,7 +791,7 @@ mod tests { use lightning::ln::types::ChannelId; use lightning::routing::gossip::NodeId; use lightning::types::features::*; - use lightning::util::test_utils::TestNodeSigner; + use lightning::util::test_utils::{TestLogger, TestNodeSigner}; use tokio::sync::mpsc; @@ -626,13 +800,6 @@ mod tests { use std::sync::{Arc, Mutex}; use std::time::Duration; - pub struct TestLogger(); - impl lightning::util::logger::Logger for TestLogger { - fn log(&self, record: lightning::util::logger::Record) { - println!("{}", record); - } - } - struct MsgHandler { expected_pubkey: PublicKey, pubkey_connected: mpsc::Sender<()>, @@ -826,7 +993,7 @@ mod tests { a_msg_handler, 0, &[1; 32], - Arc::new(TestLogger()), + Arc::new(TestLogger::new()), Arc::new(TestNodeSigner::new(a_key)), )); @@ -850,7 +1017,7 @@ mod tests { b_msg_handler, 0, &[2; 32], - Arc::new(TestLogger()), + Arc::new(TestLogger::new()), Arc::new(TestNodeSigner::new(b_key)), )); @@ -913,7 +1080,7 @@ mod tests { a_msg_handler, 0, &[1; 32], - Arc::new(TestLogger()), + Arc::new(TestLogger::new()), Arc::new(TestNodeSigner::new(a_key)), )); @@ -942,4 +1109,193 @@ mod tests { async fn unthreaded_race_disconnect_accept() { race_disconnect_accept().await; } + + #[cfg(tor)] + #[tokio::test] + async fn test_tor_connect() { + use super::tor_connect; + use lightning::sign::EntropySource; + use std::net::SocketAddr; + + // Set TOR_PROXY=127.0.0.1:9050 + let tor_proxy_addr: SocketAddr = std::env!("TOR_PROXY").parse().unwrap(); + + struct TestEntropySource; + + impl EntropySource for TestEntropySource { + fn get_secure_random_bytes(&self) -> [u8; 32] { + [0xffu8; 32] + } + } + + let entropy_source = TestEntropySource; + + // Success cases + + for addr_str in [ + // google.com + "142.250.189.196:80", + // google.com + "[2607:f8b0:4005:813::2004]:80", + // torproject.org + "torproject.org:80", + // torproject.org + "2gzyxa5ihm7nsggfxnu52rck2vv4rvmdlkiu3zzui5du4xyclen53wid.onion:80", + ] { + let addr: SocketAddress = addr_str.parse().unwrap(); + let tcp_stream = tor_connect(addr, tor_proxy_addr, &entropy_source).await.unwrap(); + assert_eq!( + tcp_stream.try_read(&mut [0u8; 1]).unwrap_err().kind(), + std::io::ErrorKind::WouldBlock + ); + } + + // Failure cases + + for addr_str in [ + // google.com, with some invalid port + "142.250.189.196:1234", + // google.com, with some invalid port + "[2607:f8b0:4005:813::2004]:1234", + // torproject.org, with some invalid port + "torproject.org:1234", + // torproject.org, with a typo + "3gzyxa5ihm7nsggfxnu52rck2vv4rvmdlkiu3zzui5du4xyclen53wid.onion:80", + ] { + let addr: SocketAddress = addr_str.parse().unwrap(); + assert!(tor_connect(addr, tor_proxy_addr, &entropy_source).await.is_err()); + } + } + + async fn test_remote_address_with_override(b_addr_override: Option) { + let secp_ctx = Secp256k1::new(); + let a_key = SecretKey::from_slice(&[1; 32]).unwrap(); + let b_key = SecretKey::from_slice(&[1; 32]).unwrap(); + let a_pub = PublicKey::from_secret_key(&secp_ctx, &a_key); + let b_pub = PublicKey::from_secret_key(&secp_ctx, &b_key); + + let (a_connected_sender, mut a_connected) = mpsc::channel(1); + let (a_disconnected_sender, _a_disconnected) = mpsc::channel(1); + let a_handler = Arc::new(MsgHandler { + expected_pubkey: b_pub, + pubkey_connected: a_connected_sender, + pubkey_disconnected: a_disconnected_sender, + disconnected_flag: AtomicBool::new(false), + msg_events: Mutex::new(Vec::new()), + }); + let a_msg_handler = MessageHandler { + chan_handler: Arc::clone(&a_handler), + route_handler: Arc::clone(&a_handler), + onion_message_handler: Arc::new(IgnoringMessageHandler {}), + custom_message_handler: Arc::new(IgnoringMessageHandler {}), + send_only_message_handler: Arc::new(IgnoringMessageHandler {}), + }; + let a_logger = Arc::new(TestLogger::new()); + let a_manager = Arc::new(PeerManager::new( + a_msg_handler, + 0, + &[1; 32], + Arc::clone(&a_logger), + Arc::new(TestNodeSigner::new(a_key)), + )); + + let (b_connected_sender, mut b_connected) = mpsc::channel(1); + let (b_disconnected_sender, _b_disconnected) = mpsc::channel(1); + let b_handler = Arc::new(MsgHandler { + expected_pubkey: a_pub, + pubkey_connected: b_connected_sender, + pubkey_disconnected: b_disconnected_sender, + disconnected_flag: AtomicBool::new(false), + msg_events: Mutex::new(Vec::new()), + }); + let b_msg_handler = MessageHandler { + chan_handler: Arc::clone(&b_handler), + route_handler: Arc::clone(&b_handler), + onion_message_handler: Arc::new(IgnoringMessageHandler {}), + custom_message_handler: Arc::new(IgnoringMessageHandler {}), + send_only_message_handler: Arc::new(IgnoringMessageHandler {}), + }; + let b_logger = Arc::new(TestLogger::new()); + let b_manager = Arc::new(PeerManager::new( + b_msg_handler, + 0, + &[2; 32], + Arc::clone(&b_logger), + Arc::new(TestNodeSigner::new(b_key)), + )); + + // We bind on localhost, hoping the environment is properly configured with a local + // address. This may not always be the case in containers and the like, so if this test is + // failing for you check that you have a loopback interface and it is configured with + // 127.0.0.1. + let (conn_a, conn_b) = make_tcp_connection(); + + // Given that `make_tcp_connection` binds the peer to 127.0.0.1, + // `get_addr_from_stream` always returns a private address, and will not be reported to the peer + // in the init message. + let b_addr = b_addr_override + .clone() + .unwrap_or_else(|| super::get_addr_from_stream(&conn_a).unwrap()); + let _fut_a = super::setup_outbound_internal( + Arc::clone(&a_manager), + b_pub, + conn_a, + Some(b_addr.clone()), + ); + let _fut_b = super::setup_inbound(Arc::clone(&b_manager), conn_b); + + tokio::time::timeout(Duration::from_secs(10), a_connected.recv()).await.unwrap(); + tokio::time::timeout(Duration::from_secs(1), b_connected.recv()).await.unwrap(); + + // Check `PeerDetails::socket_address` + + let mut peers = a_manager.list_peers(); + assert_eq!(peers.len(), 1); + let peer = peers.pop().unwrap(); + assert_eq!(peer.socket_address, Some(b_addr)); + + // Check the init message sent to the peer + + let mainnet_hash = ChainHash::using_genesis_block(Network::Testnet); + let a_init_msg = Init { + features: InitFeatures::empty(), + networks: Some(vec![mainnet_hash]), + // We set it to the override here because addresses from the stream are private addresses, + // so they are filtered out and not reported to the peer + remote_network_address: b_addr_override, + }; + a_logger.assert_log( + "lightning::ln::peer_handler", + format!("Enqueueing message Init({:?})", a_init_msg), + 1, + ); + } + + #[tokio::test] + async fn test_remote_address() { + // Test that the remote address of the peer passed to `setup_outbound_internal` is set correctly in the + // corresponding `PeerDetails::socket_address` returned from `PeerManager::list_peers`, and if it is + // not a private address, that it is reported to the peer in the init message. + + // This tests a private address read from `get_addr_from_stream` + test_remote_address_with_override(None).await; + // Make sure these are not private IPv4 or IPv6 addresses; we assert they are present in the init message + test_remote_address_with_override(Some(SocketAddress::TcpIpV4 { + addr: [0xab; 4], + port: 0xabab, + })) + .await; + test_remote_address_with_override(Some(SocketAddress::TcpIpV6 { + addr: [0x2a; 16], + port: 0x2a2a, + })) + .await; + let torproject_onion_addr_str = + "2gzyxa5ihm7nsggfxnu52rck2vv4rvmdlkiu3zzui5du4xyclen53wid.onion:80"; + let torproject_onion_addr: SocketAddress = torproject_onion_addr_str.parse().unwrap(); + test_remote_address_with_override(Some(torproject_onion_addr)).await; + let torproject_addr_str = "torproject.org:80"; + let torproject_addr: SocketAddress = torproject_addr_str.parse().unwrap(); + test_remote_address_with_override(Some(torproject_addr)).await; + } } diff --git a/lightning-persister/Cargo.toml b/lightning-persister/Cargo.toml index e06803c6b89..19c5ac2545e 100644 --- a/lightning-persister/Cargo.toml +++ b/lightning-persister/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning-persister" -version = "0.2.0+git" +version = "0.3.0+git" authors = ["Valentine Wallace", "Matt Corallo"] license = "MIT OR Apache-2.0" repository = "https://github.com/lightningdevkit/rust-lightning" diff --git a/lightning-persister/src/fs_store.rs b/lightning-persister/src/fs_store.rs index dacc8523116..3129748afda 100644 --- a/lightning-persister/src/fs_store.rs +++ b/lightning-persister/src/fs_store.rs @@ -14,8 +14,6 @@ use std::sync::{Arc, Mutex, RwLock}; #[cfg(feature = "tokio")] use core::future::Future; #[cfg(feature = "tokio")] -use core::pin::Pin; -#[cfg(feature = "tokio")] use lightning::util::persist::KVStore; #[cfg(target_os = "windows")] @@ -464,93 +462,85 @@ impl FilesystemStoreInner { impl KVStore for FilesystemStore { fn read( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, - ) -> Pin, lightning::io::Error>> + 'static + Send>> { + ) -> impl Future, lightning::io::Error>> + 'static + Send { let this = Arc::clone(&self.inner); - let path = match this.get_checked_dest_file_path( + let path = this.get_checked_dest_file_path( primary_namespace, secondary_namespace, Some(key), "read", - ) { - Ok(path) => path, - Err(e) => return Box::pin(async move { Err(e) }), - }; + ); - Box::pin(async move { + async move { + let path = match path { + Ok(path) => path, + Err(e) => return Err(e), + }; tokio::task::spawn_blocking(move || this.read(path)).await.unwrap_or_else(|e| { Err(lightning::io::Error::new(lightning::io::ErrorKind::Other, e)) }) - }) + } } fn write( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, - ) -> Pin> + 'static + Send>> { + ) -> impl Future> + 'static + Send { let this = Arc::clone(&self.inner); - let path = match this.get_checked_dest_file_path( - primary_namespace, - secondary_namespace, - Some(key), - "write", - ) { - Ok(path) => path, - Err(e) => return Box::pin(async move { Err(e) }), - }; - - let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(path.clone()); - Box::pin(async move { + let path = this + .get_checked_dest_file_path(primary_namespace, secondary_namespace, Some(key), "write") + .map(|path| (self.get_new_version_and_lock_ref(path.clone()), path)); + + async move { + let ((inner_lock_ref, version), path) = match path { + Ok(res) => res, + Err(e) => return Err(e), + }; tokio::task::spawn_blocking(move || { this.write_version(inner_lock_ref, path, buf, version) }) .await .unwrap_or_else(|e| Err(lightning::io::Error::new(lightning::io::ErrorKind::Other, e))) - }) + } } fn remove( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, - ) -> Pin> + 'static + Send>> { + ) -> impl Future> + 'static + Send { let this = Arc::clone(&self.inner); - let path = match this.get_checked_dest_file_path( - primary_namespace, - secondary_namespace, - Some(key), - "remove", - ) { - Ok(path) => path, - Err(e) => return Box::pin(async move { Err(e) }), - }; - - let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(path.clone()); - Box::pin(async move { + let path = this + .get_checked_dest_file_path(primary_namespace, secondary_namespace, Some(key), "remove") + .map(|path| (self.get_new_version_and_lock_ref(path.clone()), path)); + + async move { + let ((inner_lock_ref, version), path) = match path { + Ok(res) => res, + Err(e) => return Err(e), + }; tokio::task::spawn_blocking(move || { this.remove_version(inner_lock_ref, path, lazy, version) }) .await .unwrap_or_else(|e| Err(lightning::io::Error::new(lightning::io::ErrorKind::Other, e))) - }) + } } fn list( &self, primary_namespace: &str, secondary_namespace: &str, - ) -> Pin, lightning::io::Error>> + 'static + Send>> { + ) -> impl Future, lightning::io::Error>> + 'static + Send { let this = Arc::clone(&self.inner); - let path = match this.get_checked_dest_file_path( - primary_namespace, - secondary_namespace, - None, - "list", - ) { - Ok(path) => path, - Err(e) => return Box::pin(async move { Err(e) }), - }; + let path = + this.get_checked_dest_file_path(primary_namespace, secondary_namespace, None, "list"); - Box::pin(async move { + async move { + let path = match path { + Ok(path) => path, + Err(e) => return Err(e), + }; tokio::task::spawn_blocking(move || this.list(path)).await.unwrap_or_else(|e| { Err(lightning::io::Error::new(lightning::io::ErrorKind::Other, e)) }) - }) + } } } @@ -570,15 +560,15 @@ fn dir_entry_is_key(dir_entry: &fs::DirEntry) -> Result = fs_store.clone(); + let async_fs_store = Arc::clone(&fs_store); let data1 = vec![42u8; 32]; let data2 = vec![43u8; 32]; - let primary_namespace = "testspace"; - let secondary_namespace = "testsubspace"; + let primary = "testspace"; + let secondary = "testsubspace"; let key = "testkey"; // Test writing the same key twice with different data. Execute the asynchronous part out of order to ensure // that eventual consistency works. - let fut1 = async_fs_store.write(primary_namespace, secondary_namespace, key, data1); + let fut1 = KVStore::write(&*async_fs_store, primary, secondary, key, data1); assert_eq!(fs_store.state_size(), 1); - let fut2 = async_fs_store.remove(primary_namespace, secondary_namespace, key, false); + let fut2 = KVStore::remove(&*async_fs_store, primary, secondary, key, false); assert_eq!(fs_store.state_size(), 1); - let fut3 = async_fs_store.write(primary_namespace, secondary_namespace, key, data2.clone()); + let fut3 = KVStore::write(&*async_fs_store, primary, secondary, key, data2.clone()); assert_eq!(fs_store.state_size(), 1); fut3.await.unwrap(); @@ -787,21 +777,18 @@ mod tests { assert_eq!(fs_store.state_size(), 0); // Test list. - let listed_keys = - async_fs_store.list(primary_namespace, secondary_namespace).await.unwrap(); + let listed_keys = KVStore::list(&*async_fs_store, primary, secondary).await.unwrap(); assert_eq!(listed_keys.len(), 1); assert_eq!(listed_keys[0], key); // Test read. We expect to read data2, as the write call was initiated later. - let read_data = - async_fs_store.read(primary_namespace, secondary_namespace, key).await.unwrap(); + let read_data = KVStore::read(&*async_fs_store, primary, secondary, key).await.unwrap(); assert_eq!(data2, &*read_data); // Test remove. - async_fs_store.remove(primary_namespace, secondary_namespace, key, false).await.unwrap(); + KVStore::remove(&*async_fs_store, primary, secondary, key, false).await.unwrap(); - let listed_keys = - async_fs_store.list(primary_namespace, secondary_namespace).await.unwrap(); + let listed_keys = KVStore::list(&*async_fs_store, primary, secondary).await.unwrap(); assert_eq!(listed_keys.len(), 0); } diff --git a/lightning-persister/src/test_utils.rs b/lightning-persister/src/test_utils.rs index 1de51f44cb2..48b383ad1ea 100644 --- a/lightning-persister/src/test_utils.rs +++ b/lightning-persister/src/test_utils.rs @@ -1,14 +1,11 @@ +use lightning::check_closed_broadcast; use lightning::events::ClosureReason; -use lightning::ln::functional_test_utils::{ - check_closed_event, connect_block, create_announced_chan_between_nodes, create_chanmon_cfgs, - create_dummy_block, create_network, create_node_cfgs, create_node_chanmgrs, send_payment, -}; +use lightning::ln::functional_test_utils::*; use lightning::util::persist::{ migrate_kv_store_data, read_channel_monitors, KVStoreSync, MigratableKVStore, KVSTORE_NAMESPACE_KEY_ALPHABET, KVSTORE_NAMESPACE_KEY_MAX_LEN, }; use lightning::util::test_utils; -use lightning::{check_added_monitors, check_closed_broadcast}; use std::panic::RefUnwindSafe; @@ -135,7 +132,9 @@ pub(crate) fn do_test_store(store_0: &K, store_1: &K) { ); node_cfgs[0].chain_monitor = chain_mon_0; node_cfgs[1].chain_monitor = chain_mon_1; - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_b_id = nodes[1].node.get_our_node_id(); @@ -190,7 +189,7 @@ pub(crate) fn do_test_store(store_0: &K, store_1: &K) { let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, reason, &[node_b_id], 100000); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_txn.len(), 1); @@ -206,7 +205,7 @@ pub(crate) fn do_test_store(store_0: &K, store_1: &K) { check_closed_broadcast!(nodes[1], true); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[1], 1, reason, &[nodes[0].node.get_our_node_id()], 100000); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Make sure everything is persisted as expected after close. check_persisted_data!(11); diff --git a/lightning-rapid-gossip-sync/Cargo.toml b/lightning-rapid-gossip-sync/Cargo.toml index b2cc623ab5b..b623a5aed13 100644 --- a/lightning-rapid-gossip-sync/Cargo.toml +++ b/lightning-rapid-gossip-sync/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning-rapid-gossip-sync" -version = "0.2.0+git" +version = "0.3.0+git" authors = ["Arik Sosman "] license = "MIT OR Apache-2.0" repository = "https://github.com/lightningdevkit/rust-lightning" diff --git a/lightning-rapid-gossip-sync/src/lib.rs b/lightning-rapid-gossip-sync/src/lib.rs index 429a3560be0..a9653754655 100644 --- a/lightning-rapid-gossip-sync/src/lib.rs +++ b/lightning-rapid-gossip-sync/src/lib.rs @@ -132,19 +132,13 @@ impl From for GraphSyncError { /// See [crate-level documentation] for usage. /// /// [crate-level documentation]: crate -pub struct RapidGossipSync>, L: Deref> -where - L::Target: Logger, -{ +pub struct RapidGossipSync>, L: Logger> { network_graph: NG, logger: L, is_initial_sync_complete: AtomicBool, } -impl>, L: Deref> RapidGossipSync -where - L::Target: Logger, -{ +impl>, L: Logger> RapidGossipSync { /// Instantiate a new [`RapidGossipSync`] instance. pub fn new(network_graph: NG, logger: L) -> Self { Self { network_graph, logger, is_initial_sync_complete: AtomicBool::new(false) } diff --git a/lightning-rapid-gossip-sync/src/processing.rs b/lightning-rapid-gossip-sync/src/processing.rs index 8319506b574..9d3287969f2 100644 --- a/lightning-rapid-gossip-sync/src/processing.rs +++ b/lightning-rapid-gossip-sync/src/processing.rs @@ -37,10 +37,7 @@ const MAX_INITIAL_NODE_ID_VECTOR_CAPACITY: u32 = 50_000; /// suggestion. const STALE_RGS_UPDATE_AGE_LIMIT_SECS: u64 = 60 * 60 * 24 * 14; -impl>, L: Deref> RapidGossipSync -where - L::Target: Logger, -{ +impl>, L: Logger> RapidGossipSync { #[cfg(feature = "std")] pub(crate) fn update_network_graph_from_byte_stream( &self, read_cursor: &mut R, diff --git a/lightning-tests/Cargo.toml b/lightning-tests/Cargo.toml index 439157e528b..4e8d330089d 100644 --- a/lightning-tests/Cargo.toml +++ b/lightning-tests/Cargo.toml @@ -15,6 +15,7 @@ lightning-types = { path = "../lightning-types", features = ["_test_utils"] } lightning-invoice = { path = "../lightning-invoice", default-features = false } lightning-macros = { path = "../lightning-macros" } lightning = { path = "../lightning", features = ["_test_utils"] } +lightning_0_2 = { package = "lightning", version = "0.2.0", features = ["_test_utils"] } lightning_0_1 = { package = "lightning", version = "0.1.7", features = ["_test_utils"] } lightning_0_0_125 = { package = "lightning", version = "0.0.125", features = ["_test_utils"] } diff --git a/lightning-tests/src/upgrade_downgrade_tests.rs b/lightning-tests/src/upgrade_downgrade_tests.rs index cef180fbd4e..14b0a5c5822 100644 --- a/lightning-tests/src/upgrade_downgrade_tests.rs +++ b/lightning-tests/src/upgrade_downgrade_tests.rs @@ -10,6 +10,16 @@ //! Tests which test upgrading from previous versions of LDK or downgrading to previous versions of //! LDK. +use lightning_0_2::commitment_signed_dance as commitment_signed_dance_0_2; +use lightning_0_2::events::Event as Event_0_2; +use lightning_0_2::get_monitor as get_monitor_0_2; +use lightning_0_2::ln::channelmanager::PaymentId as PaymentId_0_2; +use lightning_0_2::ln::channelmanager::RecipientOnionFields as RecipientOnionFields_0_2; +use lightning_0_2::ln::functional_test_utils as lightning_0_2_utils; +use lightning_0_2::ln::msgs::ChannelMessageHandler as _; +use lightning_0_2::routing::router as router_0_2; +use lightning_0_2::util::ser::Writeable as _; + use lightning_0_1::commitment_signed_dance as commitment_signed_dance_0_1; use lightning_0_1::events::ClosureReason as ClosureReason_0_1; use lightning_0_1::expect_pending_htlcs_forwardable_ignore as expect_pending_htlcs_forwardable_ignore_0_1; @@ -298,7 +308,9 @@ fn test_0_1_legacy_remote_key_derivation() { connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); let mut spendable_event = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events(); assert_eq!(spendable_event.len(), 1); - if let Event::SpendableOutputs { outputs, channel_id: ev_id } = spendable_event.pop().unwrap() { + if let Event::SpendableOutputs { outputs, channel_id: ev_id, counterparty_node_id: _ } = + spendable_event.pop().unwrap() + { assert_eq!(ev_id.unwrap().0, channel_id); assert_eq!(outputs.len(), 1); let spk = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(); @@ -441,12 +453,10 @@ fn do_test_0_1_htlc_forward_after_splice(fail_htlc: bool) { reconnect_b_c_args.send_announcement_sigs = (true, true); reconnect_nodes(reconnect_b_c_args); - let contribution = SpliceContribution::SpliceOut { - outputs: vec![TxOut { - value: Amount::from_sat(1_000), - script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), - }], - }; + let contribution = SpliceContribution::splice_out(vec![TxOut { + value: Amount::from_sat(1_000), + script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), + }]); let splice_tx = splice_channel(&nodes[0], &nodes[1], ChannelId(chan_id_bytes_a), contribution); for node in nodes.iter() { mine_transaction(node, &splice_tx); @@ -498,3 +508,194 @@ fn test_0_1_htlc_forward_after_splice() { do_test_0_1_htlc_forward_after_splice(true); do_test_0_1_htlc_forward_after_splice(false); } + +#[derive(PartialEq, Eq)] +enum MidHtlcForwardCase { + // Restart the upgraded node after locking an HTLC forward into the inbound edge, but before + // decoding the onion. + PreOnionDecode, + // Restart the upgraded node after locking an HTLC forward into the inbound edge + decoding the + // onion. + PostOnionDecode, + // Restart the upgraded node after the HTLC has been decoded and placed in the pending intercepted + // HTLCs map. + Intercept, +} + +#[test] +fn upgrade_pre_htlc_forward_onion_decode() { + do_upgrade_mid_htlc_forward(MidHtlcForwardCase::PreOnionDecode); +} + +#[test] +fn upgrade_mid_htlc_forward() { + do_upgrade_mid_htlc_forward(MidHtlcForwardCase::PostOnionDecode); +} + +#[test] +fn upgrade_mid_htlc_intercept_forward() { + do_upgrade_mid_htlc_forward(MidHtlcForwardCase::Intercept); +} + +fn do_upgrade_mid_htlc_forward(test: MidHtlcForwardCase) { + // In 0.3, we started reconstructing the `ChannelManager`'s HTLC forwards maps from the HTLCs + // contained in `Channel`s, as part of removing the requirement to regularly persist the + // `ChannelManager`. However, HTLC forwards can only be reconstructed this way if they were + // received on 0.3 or higher. Test that HTLC forwards that were serialized on <=0.2 will still + // succeed when read on 0.3+. + let (node_a_ser, node_b_ser, node_c_ser, mon_a_1_ser, mon_b_1_ser, mon_b_2_ser, mon_c_1_ser); + let (node_a_id, node_b_id, node_c_id); + let (payment_secret_bytes, payment_hash_bytes, payment_preimage_bytes); + let chan_id_bytes_b_c; + + { + let chanmon_cfgs = lightning_0_2_utils::create_chanmon_cfgs(3); + let node_cfgs = lightning_0_2_utils::create_node_cfgs(3, &chanmon_cfgs); + + let mut intercept_cfg = lightning_0_2_utils::test_default_channel_config(); + intercept_cfg.accept_intercept_htlcs = true; + let cfgs = &[None, Some(intercept_cfg), None]; + let node_chanmgrs = lightning_0_2_utils::create_node_chanmgrs(3, &node_cfgs, cfgs); + let nodes = lightning_0_2_utils::create_network(3, &node_cfgs, &node_chanmgrs); + + node_a_id = nodes[0].node.get_our_node_id(); + node_b_id = nodes[1].node.get_our_node_id(); + node_c_id = nodes[2].node.get_our_node_id(); + let chan_id_a = lightning_0_2_utils::create_announced_chan_between_nodes_with_value( + &nodes, 0, 1, 10_000_000, 0, + ) + .2; + + let chan_id_b = lightning_0_2_utils::create_announced_chan_between_nodes_with_value( + &nodes, 1, 2, 50_000, 0, + ) + .2; + chan_id_bytes_b_c = chan_id_b.0; + + // Ensure all nodes are at the same initial height. + let node_max_height = nodes.iter().map(|node| node.best_block_info().1).max().unwrap(); + for node in &nodes { + let blocks_to_mine = node_max_height - node.best_block_info().1; + if blocks_to_mine > 0 { + lightning_0_2_utils::connect_blocks(node, blocks_to_mine); + } + } + + // Initiate an HTLC to be sent over node_a -> node_b -> node_c + let (preimage, hash, secret) = + lightning_0_2_utils::get_payment_preimage_hash(&nodes[2], Some(1_000_000), None); + payment_preimage_bytes = preimage.0; + payment_hash_bytes = hash.0; + payment_secret_bytes = secret.0; + + let pay_params = router_0_2::PaymentParameters::from_node_id( + node_c_id, + lightning_0_2_utils::TEST_FINAL_CLTV, + ) + .with_bolt11_features(nodes[2].node.bolt11_invoice_features()) + .unwrap(); + + let route_params = + router_0_2::RouteParameters::from_payment_params_and_value(pay_params, 1_000_000); + let mut route = lightning_0_2_utils::get_route(&nodes[0], &route_params).unwrap(); + + if test == MidHtlcForwardCase::Intercept { + route.paths[0].hops[1].short_channel_id = nodes[1].node.get_intercept_scid(); + } + + let onion = RecipientOnionFields_0_2::secret_only(secret); + let id = PaymentId_0_2(hash.0); + nodes[0].node.send_payment_with_route(route, hash, onion, id).unwrap(); + + lightning_0_2_utils::check_added_monitors(&nodes[0], 1); + let send_event = lightning_0_2_utils::SendEvent::from_node(&nodes[0]); + + // Lock in the HTLC on the inbound edge of node_b without initiating the outbound edge. + nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); + commitment_signed_dance_0_2!(nodes[1], nodes[0], send_event.commitment_msg, false); + if test != MidHtlcForwardCase::PreOnionDecode { + nodes[1].node.test_process_pending_update_add_htlcs(); + } + let events = nodes[1].node.get_and_clear_pending_events(); + if test == MidHtlcForwardCase::Intercept { + assert_eq!(events.len(), 1); + assert!(matches!(events[0], Event_0_2::HTLCIntercepted { .. })); + } else { + assert!(events.is_empty()); + } + + node_a_ser = nodes[0].node.encode(); + node_b_ser = nodes[1].node.encode(); + node_c_ser = nodes[2].node.encode(); + mon_a_1_ser = get_monitor_0_2!(nodes[0], chan_id_a).encode(); + mon_b_1_ser = get_monitor_0_2!(nodes[1], chan_id_a).encode(); + mon_b_2_ser = get_monitor_0_2!(nodes[1], chan_id_b).encode(); + mon_c_1_ser = get_monitor_0_2!(nodes[2], chan_id_b).encode(); + } + + // Create a dummy node to reload over with the 0.2 state + let mut chanmon_cfgs = create_chanmon_cfgs(3); + + // Our TestChannelSigner will fail as we're jumping ahead, so disable its state-based checks + chanmon_cfgs[0].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[1].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[2].keys_manager.disable_all_state_policy_checks = true; + + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let (persister_a, persister_b, persister_c, chain_mon_a, chain_mon_b, chain_mon_c); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let (node_a, node_b, node_c); + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let config = test_default_channel_config(); + let a_mons = &[&mon_a_1_ser[..]]; + reload_node!(nodes[0], config.clone(), &node_a_ser, a_mons, persister_a, chain_mon_a, node_a); + let b_mons = &[&mon_b_1_ser[..], &mon_b_2_ser[..]]; + reload_node!(nodes[1], config.clone(), &node_b_ser, b_mons, persister_b, chain_mon_b, node_b); + let c_mons = &[&mon_c_1_ser[..]]; + reload_node!(nodes[2], config, &node_c_ser, c_mons, persister_c, chain_mon_c, node_c); + + reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); + let mut reconnect_b_c_args = ReconnectArgs::new(&nodes[1], &nodes[2]); + reconnect_b_c_args.send_channel_ready = (true, true); + reconnect_b_c_args.send_announcement_sigs = (true, true); + reconnect_nodes(reconnect_b_c_args); + + // Now release the HTLC from node_b to node_c, to be claimed back to node_a + nodes[1].node.process_pending_htlc_forwards(); + + if test == MidHtlcForwardCase::Intercept { + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + let (intercept_id, expected_outbound_amt_msat) = match events[0] { + Event::HTLCIntercepted { intercept_id, expected_outbound_amount_msat, .. } => { + (intercept_id, expected_outbound_amount_msat) + }, + _ => panic!(), + }; + nodes[1] + .node + .forward_intercepted_htlc( + intercept_id, + &ChannelId(chan_id_bytes_b_c), + nodes[2].node.get_our_node_id(), + expected_outbound_amt_msat, + ) + .unwrap(); + nodes[1].node.process_pending_htlc_forwards(); + } + + let pay_secret = PaymentSecret(payment_secret_bytes); + let pay_hash = PaymentHash(payment_hash_bytes); + let pay_preimage = PaymentPreimage(payment_preimage_bytes); + + check_added_monitors(&nodes[1], 1); + let forward_event = SendEvent::from_node(&nodes[1]); + nodes[2].node.handle_update_add_htlc(node_b_id, &forward_event.msgs[0]); + let commitment = &forward_event.commitment_msg; + do_commitment_signed_dance(&nodes[2], &nodes[1], commitment, false, false); + + expect_and_process_pending_htlcs(&nodes[2], false); + expect_payment_claimable!(nodes[2], pay_hash, pay_secret, 1_000_000); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], pay_preimage); +} diff --git a/lightning-transaction-sync/Cargo.toml b/lightning-transaction-sync/Cargo.toml index 1a5a56212ba..4bc37d7ff48 100644 --- a/lightning-transaction-sync/Cargo.toml +++ b/lightning-transaction-sync/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning-transaction-sync" -version = "0.2.0+git" +version = "0.3.0+git" authors = ["Elias Rohrer"] license = "MIT OR Apache-2.0" repository = "https://github.com/lightningdevkit/rust-lightning" diff --git a/lightning-transaction-sync/src/electrum.rs b/lightning-transaction-sync/src/electrum.rs index 47489df69bb..1905456d281 100644 --- a/lightning-transaction-sync/src/electrum.rs +++ b/lightning-transaction-sync/src/electrum.rs @@ -37,20 +37,14 @@ use std::time::Instant; /// [`ChainMonitor`]: lightning::chain::chainmonitor::ChainMonitor /// [`Watch::watch_channel`]: lightning::chain::Watch::watch_channel /// [`Filter`]: lightning::chain::Filter -pub struct ElectrumSyncClient -where - L::Target: Logger, -{ +pub struct ElectrumSyncClient { sync_state: Mutex, queue: Mutex, client: Arc, logger: L, } -impl ElectrumSyncClient -where - L::Target: Logger, -{ +impl ElectrumSyncClient { /// Returns a new [`ElectrumSyncClient`] object. pub fn new(server_url: String, logger: L) -> Result { let client = Arc::new(ElectrumClient::new(&server_url).map_err(|e| { @@ -336,10 +330,21 @@ where script_history.iter().filter(|h| h.tx_hash == **txid); if let Some(history) = filtered_history.next() { let prob_conf_height = history.height as u32; + if prob_conf_height <= 0 { + // Skip if it's a an unconfirmed entry. + continue; + } let confirmed_tx = self.get_confirmed_tx(tx, prob_conf_height)?; confirmed_txs.push(confirmed_tx); } - debug_assert!(filtered_history.next().is_none()); + if filtered_history.next().is_some() { + log_error!( + self.logger, + "Failed due to server returning multiple history entries for Tx {}.", + txid + ); + return Err(InternalError::Failed); + } } for (watched_output, script_history) in @@ -347,6 +352,7 @@ where { for possible_output_spend in script_history { if possible_output_spend.height <= 0 { + // Skip if it's a an unconfirmed entry. continue; } @@ -494,10 +500,7 @@ where } } -impl Filter for ElectrumSyncClient -where - L::Target: Logger, -{ +impl Filter for ElectrumSyncClient { fn register_tx(&self, txid: &Txid, _script_pubkey: &Script) { let mut locked_queue = self.queue.lock().unwrap(); locked_queue.transactions.insert(*txid); diff --git a/lightning-transaction-sync/src/esplora.rs b/lightning-transaction-sync/src/esplora.rs index a191260bc01..6caf7a6a7ee 100644 --- a/lightning-transaction-sync/src/esplora.rs +++ b/lightning-transaction-sync/src/esplora.rs @@ -42,20 +42,14 @@ use std::collections::HashSet; /// [`ChainMonitor`]: lightning::chain::chainmonitor::ChainMonitor /// [`Watch::watch_channel`]: lightning::chain::Watch::watch_channel /// [`Filter`]: lightning::chain::Filter -pub struct EsploraSyncClient -where - L::Target: Logger, -{ +pub struct EsploraSyncClient { sync_state: MutexType, queue: std::sync::Mutex, client: EsploraClientType, logger: L, } -impl EsploraSyncClient -where - L::Target: Logger, -{ +impl EsploraSyncClient { /// Returns a new [`EsploraSyncClient`] object. pub fn new(server_url: String, logger: L) -> Self { let builder = Builder::new(&server_url); @@ -472,10 +466,7 @@ type EsploraClientType = AsyncClient; #[cfg(not(feature = "async-interface"))] type EsploraClientType = BlockingClient; -impl Filter for EsploraSyncClient -where - L::Target: Logger, -{ +impl Filter for EsploraSyncClient { fn register_tx(&self, txid: &Txid, _script_pubkey: &Script) { let mut locked_queue = self.queue.lock().unwrap(); locked_queue.transactions.insert(*txid); diff --git a/lightning-types/Cargo.toml b/lightning-types/Cargo.toml index 89bd919836f..eddd3d27fb0 100644 --- a/lightning-types/Cargo.toml +++ b/lightning-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning-types" -version = "0.3.0+git" +version = "0.4.0+git" authors = ["Matt Corallo"] license = "MIT OR Apache-2.0" repository = "https://github.com/lightningdevkit/rust-lightning/" diff --git a/lightning-types/src/features.rs b/lightning-types/src/features.rs index 05a504ab8ca..22493efc556 100644 --- a/lightning-types/src/features.rs +++ b/lightning-types/src/features.rs @@ -166,7 +166,7 @@ mod sealed { // Byte 6 ZeroConf, // Byte 7 - Trampoline | SimpleClose | SpliceProduction, + Trampoline | SimpleClose | Splice, // Byte 8 - 16 ,,,,,,,,, // Byte 17 @@ -174,7 +174,7 @@ mod sealed { // Byte 18 , // Byte 19 - HtlcHold | SplicePrototype, + HtlcHold, ] ); define_context!( @@ -195,7 +195,7 @@ mod sealed { // Byte 6 ZeroConf | Keysend, // Byte 7 - Trampoline | SimpleClose | SpliceProduction, + Trampoline | SimpleClose | Splice, // Byte 8 - 16 ,,,,,,,,, // Byte 17 @@ -203,7 +203,7 @@ mod sealed { // Byte 18 , // Byte 19 - HtlcHold | SplicePrototype, + HtlcHold, // Byte 20 - 31 ,,,,,,,,,,,, // Byte 32 @@ -687,14 +687,14 @@ mod sealed { ); define_feature!( 63, - SpliceProduction, + Splice, [InitContext, NodeContext], "Feature flags for channel splicing.", - set_splicing_production_optional, - set_splicing_production_required, - clear_splicing_production, - supports_splicing_production, - requires_splicing_production + set_splicing_optional, + set_splicing_required, + clear_splicing, + supports_splicing, + requires_splicing ); // By default, allocate enough bytes to cover up to Splice. Update this as new features are // added which we expect to appear commonly across contexts. @@ -721,17 +721,6 @@ mod sealed { supports_htlc_hold, requires_htlc_hold ); - define_feature!( - 155, // Splice prototype feature bit as listed in https://github.com/lightning/bolts/issues/605#issuecomment-877237519. - SplicePrototype, - [InitContext, NodeContext], - "Feature flags for channel splicing.", - set_splicing_optional, - set_splicing_required, - clear_splicing, - supports_splicing, - requires_splicing - ); define_feature!( 259, DnsResolver, @@ -1441,8 +1430,8 @@ mod tests { // - onion_messages // - option_channel_type | option_scid_alias // - option_zeroconf - // - option_simple_close | option_splice - assert_eq!(node_features.flags.len(), 20); + // - option_simple_close + assert_eq!(node_features.flags.len(), 8); assert_eq!(node_features.flags[0], 0b00000001); assert_eq!(node_features.flags[1], 0b01010001); assert_eq!(node_features.flags[2], 0b10001010); @@ -1450,19 +1439,7 @@ mod tests { assert_eq!(node_features.flags[4], 0b10001000); assert_eq!(node_features.flags[5], 0b10100000); assert_eq!(node_features.flags[6], 0b00001000); - assert_eq!(node_features.flags[7], 0b00100000); - assert_eq!(node_features.flags[8], 0b00000000); - assert_eq!(node_features.flags[9], 0b00000000); - assert_eq!(node_features.flags[10], 0b00000000); - assert_eq!(node_features.flags[11], 0b00000000); - assert_eq!(node_features.flags[12], 0b00000000); - assert_eq!(node_features.flags[13], 0b00000000); - assert_eq!(node_features.flags[14], 0b00000000); - assert_eq!(node_features.flags[15], 0b00000000); - assert_eq!(node_features.flags[16], 0b00000000); - assert_eq!(node_features.flags[17], 0b00000000); - assert_eq!(node_features.flags[18], 0b00000000); - assert_eq!(node_features.flags[19], 0b00001000); + assert_eq!(node_features.flags[7], 0b10100000); } // Check that cleared flags are kept blank when converting back: diff --git a/lightning/Cargo.toml b/lightning/Cargo.toml index b3b597029da..fd6c5052359 100644 --- a/lightning/Cargo.toml +++ b/lightning/Cargo.toml @@ -34,8 +34,8 @@ grind_signatures = [] default = ["std", "grind_signatures"] [dependencies] -lightning-types = { version = "0.3.0", path = "../lightning-types", default-features = false } -lightning-invoice = { version = "0.34.0", path = "../lightning-invoice", default-features = false } +lightning-types = { version = "0.4.0", path = "../lightning-types", default-features = false } +lightning-invoice = { version = "0.35.0", path = "../lightning-invoice", default-features = false } lightning-macros = { version = "0.2", path = "../lightning-macros" } bech32 = { version = "0.11.0", default-features = false } @@ -53,7 +53,7 @@ inventory = { version = "0.3", optional = true } [dev-dependencies] regex = "1.5.6" -lightning-types = { version = "0.3.0", path = "../lightning-types", features = ["_test_utils"] } +lightning-types = { version = "0.4.0", path = "../lightning-types", features = ["_test_utils"] } lightning-macros = { path = "../lightning-macros" } parking_lot = { version = "0.12", default-features = false } diff --git a/lightning/src/blinded_path/message.rs b/lightning/src/blinded_path/message.rs index ed55ca5dc9b..7bcbe80a965 100644 --- a/lightning/src/blinded_path/message.rs +++ b/lightning/src/blinded_path/message.rs @@ -31,7 +31,6 @@ use crate::types::payment::PaymentHash; use crate::util::scid_utils; use crate::util::ser::{FixedLengthReader, LengthReadableArgs, Readable, Writeable, Writer}; -use core::ops::Deref; use core::time::Duration; use core::{cmp, mem}; @@ -54,31 +53,43 @@ impl Readable for BlindedMessagePath { impl BlindedMessagePath { /// Create a one-hop blinded path for a message. - pub fn one_hop( + /// + /// `compact_padding` selects between space-inefficient padding which better hides contents and + /// a space-constrained padding which does very little to hide the contents, especially for the + /// last hop. It should only be set when the blinded path needs to be as compact as possible. + pub fn one_hop( recipient_node_id: PublicKey, local_node_receive_key: ReceiveAuthKey, - context: MessageContext, entropy_source: ES, secp_ctx: &Secp256k1, - ) -> Self - where - ES::Target: EntropySource, - { - Self::new(&[], recipient_node_id, local_node_receive_key, context, entropy_source, secp_ctx) + context: MessageContext, compact_padding: bool, entropy_source: ES, + secp_ctx: &Secp256k1, + ) -> Self { + Self::new( + &[], + recipient_node_id, + local_node_receive_key, + context, + compact_padding, + entropy_source, + secp_ctx, + ) } /// Create a path for an onion message, to be forwarded along `node_pks`. - pub fn new( + /// + /// `compact_padding` selects between space-inefficient padding which better hides contents and + /// a space-constrained padding which does very little to hide the contents, especially for the + /// last hop. It should only be set when the blinded path needs to be as compact as possible. + pub fn new( intermediate_nodes: &[MessageForwardNode], recipient_node_id: PublicKey, - local_node_receive_key: ReceiveAuthKey, context: MessageContext, entropy_source: ES, - secp_ctx: &Secp256k1, - ) -> Self - where - ES::Target: EntropySource, - { + local_node_receive_key: ReceiveAuthKey, context: MessageContext, compact_padding: bool, + entropy_source: ES, secp_ctx: &Secp256k1, + ) -> Self { BlindedMessagePath::new_with_dummy_hops( intermediate_nodes, recipient_node_id, 0, local_node_receive_key, context, + compact_padding, entropy_source, secp_ctx, ) @@ -86,16 +97,19 @@ impl BlindedMessagePath { /// Same as [`BlindedMessagePath::new`], but allows specifying a number of dummy hops. /// - /// Note: - /// At most [`MAX_DUMMY_HOPS_COUNT`] dummy hops can be added to the blinded path. - pub fn new_with_dummy_hops( + /// `compact_padding` selects between space-inefficient padding which better hides contents and + /// a space-constrained padding which does very little to hide the contents, especially for the + /// last hop. It should only be set when the blinded path needs to be as compact as possible. + /// + /// Note: At most [`MAX_DUMMY_HOPS_COUNT`] dummy hops can be added to the blinded path. + pub fn new_with_dummy_hops< + ES: EntropySource, + T: secp256k1::Signing + secp256k1::Verification, + >( intermediate_nodes: &[MessageForwardNode], recipient_node_id: PublicKey, dummy_hop_count: usize, local_node_receive_key: ReceiveAuthKey, context: MessageContext, - entropy_source: ES, secp_ctx: &Secp256k1, - ) -> Self - where - ES::Target: EntropySource, - { + compact_padding: bool, entropy_source: ES, secp_ctx: &Secp256k1, + ) -> Self { let introduction_node = IntroductionNode::NodeId( intermediate_nodes.first().map_or(recipient_node_id, |n| n.node_id), ); @@ -114,6 +128,7 @@ impl BlindedMessagePath { context, &blinding_secret, local_node_receive_key, + compact_padding, ), }) } @@ -176,12 +191,10 @@ impl BlindedMessagePath { /// introduction node. /// /// Will only modify `self` when returning `Ok`. - pub fn advance_path_by_one( + pub fn advance_path_by_one( &mut self, node_signer: &NS, node_id_lookup: &NL, secp_ctx: &Secp256k1, ) -> Result<(), ()> where - NS::Target: NodeSigner, - NL::Target: NodeIdLookUp, T: secp256k1::Signing + secp256k1::Verification, { let control_tlvs_ss = node_signer.ecdh(Recipient::Node, &self.0.blinding_point, None)?; @@ -416,28 +429,45 @@ pub enum OffersContext { /// Useful to timeout async recipients that are no longer supported as clients. path_absolute_expiry: Duration, }, - /// Context used by a [`BlindedMessagePath`] within a [`Refund`] or as a reply path for an - /// [`InvoiceRequest`]. + /// Context used by a [`BlindedMessagePath`] within a [`Refund`]. /// /// This variant is intended to be received when handling a [`Bolt12Invoice`] or an /// [`InvoiceError`]. /// /// [`Refund`]: crate::offers::refund::Refund - /// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice /// [`InvoiceError`]: crate::offers::invoice_error::InvoiceError - OutboundPayment { - /// Payment ID used when creating a [`Refund`] or [`InvoiceRequest`]. + OutboundPaymentForRefund { + /// Payment ID used when creating a [`Refund`]. /// /// [`Refund`]: crate::offers::refund::Refund - /// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest payment_id: PaymentId, - /// A nonce used for authenticating that a [`Bolt12Invoice`] is for a valid [`Refund`] or - /// [`InvoiceRequest`] and for deriving their signing keys. + /// A nonce used for authenticating that a [`Bolt12Invoice`] is for a valid [`Refund`] and + /// for deriving its signing keys. /// /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice /// [`Refund`]: crate::offers::refund::Refund + nonce: Nonce, + }, + /// Context used by a [`BlindedMessagePath`] as a reply path for an [`InvoiceRequest`]. + /// + /// This variant is intended to be received when handling a [`Bolt12Invoice`] or an + /// [`InvoiceError`]. + /// + /// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest + /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice + /// [`InvoiceError`]: crate::offers::invoice_error::InvoiceError + OutboundPaymentForOffer { + /// Payment ID used when creating an [`InvoiceRequest`]. + /// + /// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest + payment_id: PaymentId, + + /// A nonce used for authenticating that a [`Bolt12Invoice`] is for a valid + /// [`InvoiceRequest`] and for deriving its signing keys. + /// + /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice /// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest nonce: Nonce, }, @@ -619,7 +649,7 @@ impl_writeable_tlv_based_enum!(OffersContext, (0, InvoiceRequest) => { (0, nonce, required), }, - (1, OutboundPayment) => { + (1, OutboundPaymentForRefund) => { (0, payment_id, required), (1, nonce, required), }, @@ -631,6 +661,10 @@ impl_writeable_tlv_based_enum!(OffersContext, (2, invoice_slot, required), (4, path_absolute_expiry, required), }, + (4, OutboundPaymentForOffer) => { + (0, payment_id, required), + (1, nonce, required), + }, ); impl_writeable_tlv_based_enum!(AsyncPaymentsContext, @@ -693,7 +727,7 @@ pub const MAX_DUMMY_HOPS_COUNT: usize = 10; pub(super) fn blinded_hops( secp_ctx: &Secp256k1, intermediate_nodes: &[MessageForwardNode], recipient_node_id: PublicKey, dummy_hop_count: usize, context: MessageContext, - session_priv: &SecretKey, local_node_receive_key: ReceiveAuthKey, + session_priv: &SecretKey, local_node_receive_key: ReceiveAuthKey, compact_padding: bool, ) -> Vec { let dummy_count = cmp::min(dummy_hop_count, MAX_DUMMY_HOPS_COUNT); let pks = intermediate_nodes @@ -703,9 +737,8 @@ pub(super) fn blinded_hops( core::iter::repeat((recipient_node_id, Some(local_node_receive_key))).take(dummy_count), ) .chain(core::iter::once((recipient_node_id, Some(local_node_receive_key)))); - let is_compact = intermediate_nodes.iter().any(|node| node.short_channel_id.is_some()); - let tlvs = pks + let intermediate_tlvs = pks .clone() .skip(1) // The first node's TLVs contains the next node's pubkey .zip(intermediate_nodes.iter().map(|node| node.short_channel_id)) @@ -716,18 +749,43 @@ pub(super) fn blinded_hops( .map(|next_hop| { ControlTlvs::Forward(ForwardTlvs { next_hop, next_blinding_override: None }) }) - .chain((0..dummy_count).map(|_| ControlTlvs::Dummy)) - .chain(core::iter::once(ControlTlvs::Receive(ReceiveTlvs { context: Some(context) }))); - - if is_compact { - let path = pks.zip(tlvs); - utils::construct_blinded_hops(secp_ctx, path, session_priv) + .chain((0..dummy_count).map(|_| ControlTlvs::Dummy)); + + let max_intermediate_len = + intermediate_tlvs.clone().map(|tlvs| tlvs.serialized_length()).max().unwrap_or(0); + let have_intermediate_one_byte_smaller = + intermediate_tlvs.clone().any(|tlvs| tlvs.serialized_length() == max_intermediate_len - 1); + + let round_off = if compact_padding { + // We can only pad by a minimum of two bytes (we can only go from no-TLV to a type + length + // byte). Thus, if there are any intermediate hops that need to be padded by exactly one + // byte, we have to instead pad everything by two. + if have_intermediate_one_byte_smaller { + max_intermediate_len + 2 + } else { + max_intermediate_len + } } else { - let path = - pks.zip(tlvs.map(|tlv| BlindedPathWithPadding { - tlvs: tlv, - round_off: MESSAGE_PADDING_ROUND_OFF, - })); - utils::construct_blinded_hops(secp_ctx, path, session_priv) - } + MESSAGE_PADDING_ROUND_OFF + }; + + let tlvs = intermediate_tlvs + .map(|tlvs| { + let res = BlindedPathWithPadding { tlvs, round_off }; + if compact_padding { + debug_assert_eq!(res.serialized_length(), max_intermediate_len); + } else { + // We don't currently ever push extra fields to intermediate hops, so they should + // never go over `MESSAGE_PADDING_ROUND_OFF`. + debug_assert_eq!(res.serialized_length(), MESSAGE_PADDING_ROUND_OFF); + } + res + }) + .chain(core::iter::once(BlindedPathWithPadding { + tlvs: ControlTlvs::Receive(ReceiveTlvs { context: Some(context) }), + round_off: if compact_padding { 0 } else { MESSAGE_PADDING_ROUND_OFF }, + })); + + let path = pks.zip(tlvs); + utils::construct_blinded_hops(secp_ctx, path, session_priv) } diff --git a/lightning/src/blinded_path/mod.rs b/lightning/src/blinded_path/mod.rs index 2f9b1b9a411..d1f58c8c1d9 100644 --- a/lightning/src/blinded_path/mod.rs +++ b/lightning/src/blinded_path/mod.rs @@ -88,10 +88,9 @@ impl NodeIdLookUp for EmptyNodeIdLookUp { } } -impl Deref for EmptyNodeIdLookUp { - type Target = EmptyNodeIdLookUp; - fn deref(&self) -> &Self { - self +impl> NodeIdLookUp for N { + fn next_node_id(&self, short_channel_id: u64) -> Option { + self.deref().next_node_id(short_channel_id) } } diff --git a/lightning/src/blinded_path/payment.rs b/lightning/src/blinded_path/payment.rs index 13ade222f5b..27292bacf4d 100644 --- a/lightning/src/blinded_path/payment.rs +++ b/lightning/src/blinded_path/payment.rs @@ -33,9 +33,6 @@ use crate::util::ser::{ Writeable, Writer, }; -use core::mem; -use core::ops::Deref; - #[allow(unused_imports)] use crate::prelude::*; @@ -88,13 +85,10 @@ pub struct BlindedPaymentPath { impl BlindedPaymentPath { /// Create a one-hop blinded path for a payment. - pub fn one_hop( + pub fn one_hop( payee_node_id: PublicKey, local_node_receive_key: ReceiveAuthKey, payee_tlvs: ReceiveTlvs, min_final_cltv_expiry_delta: u16, entropy_source: ES, secp_ctx: &Secp256k1, - ) -> Result - where - ES::Target: EntropySource, - { + ) -> Result { // This value is not considered in pathfinding for 1-hop blinded paths, because it's intended to // be in relation to a specific channel. let htlc_maximum_msat = u64::max_value(); @@ -116,14 +110,63 @@ impl BlindedPaymentPath { /// * [`BlindedPayInfo`] calculation results in an integer overflow /// * any unknown features are required in the provided [`ForwardTlvs`] // TODO: make all payloads the same size with padding + add dummy hops - pub fn new( + pub fn new( intermediate_nodes: &[PaymentForwardNode], payee_node_id: PublicKey, local_node_receive_key: ReceiveAuthKey, payee_tlvs: ReceiveTlvs, htlc_maximum_msat: u64, min_final_cltv_expiry_delta: u16, entropy_source: ES, secp_ctx: &Secp256k1, - ) -> Result - where - ES::Target: EntropySource, - { + ) -> Result { + BlindedPaymentPath::new_inner( + intermediate_nodes, + payee_node_id, + local_node_receive_key, + &[], + payee_tlvs, + htlc_maximum_msat, + min_final_cltv_expiry_delta, + entropy_source, + secp_ctx, + ) + } + + /// Same as [`BlindedPaymentPath::new`], but allows specifying a number of dummy hops. + /// + /// Dummy TLVs allow callers to override the payment relay values used for dummy hops. + /// Any additional fees introduced by these dummy hops are ultimately paid to the final + /// recipient as part of the total amount. + /// + /// This improves privacy by making path-length analysis based on fee and CLTV delta + /// values less reliable. + /// + /// TODO: Add end-to-end tests validating fee aggregation, CLTV deltas, and + /// HTLC bounds when dummy hops are present, before exposing this API publicly. + pub(crate) fn new_with_dummy_hops< + ES: EntropySource, + T: secp256k1::Signing + secp256k1::Verification, + >( + intermediate_nodes: &[PaymentForwardNode], payee_node_id: PublicKey, + dummy_tlvs: &[DummyTlvs], local_node_receive_key: ReceiveAuthKey, payee_tlvs: ReceiveTlvs, + htlc_maximum_msat: u64, min_final_cltv_expiry_delta: u16, entropy_source: ES, + secp_ctx: &Secp256k1, + ) -> Result { + BlindedPaymentPath::new_inner( + intermediate_nodes, + payee_node_id, + local_node_receive_key, + dummy_tlvs, + payee_tlvs, + htlc_maximum_msat, + min_final_cltv_expiry_delta, + entropy_source, + secp_ctx, + ) + } + + fn new_inner( + intermediate_nodes: &[PaymentForwardNode], payee_node_id: PublicKey, + local_node_receive_key: ReceiveAuthKey, dummy_tlvs: &[DummyTlvs], payee_tlvs: ReceiveTlvs, + htlc_maximum_msat: u64, min_final_cltv_expiry_delta: u16, entropy_source: ES, + secp_ctx: &Secp256k1, + ) -> Result { let introduction_node = IntroductionNode::NodeId( intermediate_nodes.first().map_or(payee_node_id, |n| n.node_id), ); @@ -133,6 +176,7 @@ impl BlindedPaymentPath { let blinded_payinfo = compute_payinfo( intermediate_nodes, + dummy_tlvs, &payee_tlvs, htlc_maximum_msat, min_final_cltv_expiry_delta, @@ -145,6 +189,7 @@ impl BlindedPaymentPath { secp_ctx, intermediate_nodes, payee_node_id, + dummy_tlvs, payee_tlvs, &blinding_secret, local_node_receive_key, @@ -183,44 +228,42 @@ impl BlindedPaymentPath { /// introduction node. /// /// Will only modify `self` when returning `Ok`. - pub fn advance_path_by_one( + pub fn advance_path_by_one( &mut self, node_signer: &NS, node_id_lookup: &NL, secp_ctx: &Secp256k1, ) -> Result<(), ()> where - NS::Target: NodeSigner, - NL::Target: NodeIdLookUp, T: secp256k1::Signing + secp256k1::Verification, { - match self.decrypt_intro_payload::(node_signer) { - Ok(( - BlindedPaymentTlvs::Forward(ForwardTlvs { short_channel_id, .. }), - control_tlvs_ss, - )) => { - let next_node_id = match node_id_lookup.next_node_id(short_channel_id) { - Some(node_id) => node_id, - None => return Err(()), - }; - let mut new_blinding_point = onion_utils::next_hop_pubkey( - secp_ctx, - self.inner_path.blinding_point, - control_tlvs_ss.as_ref(), - ) - .map_err(|_| ())?; - mem::swap(&mut self.inner_path.blinding_point, &mut new_blinding_point); - self.inner_path.introduction_node = IntroductionNode::NodeId(next_node_id); - self.inner_path.blinded_hops.remove(0); - Ok(()) - }, - _ => Err(()), - } + let (next_node_id, control_tlvs_ss) = + match self.decrypt_intro_payload::(node_signer).map_err(|_| ())? { + (BlindedPaymentTlvs::Forward(ForwardTlvs { short_channel_id, .. }), ss) => { + let node_id = node_id_lookup.next_node_id(short_channel_id).ok_or(())?; + (node_id, ss) + }, + (BlindedPaymentTlvs::Dummy(_), ss) => { + let node_id = node_signer.get_node_id(Recipient::Node)?; + (node_id, ss) + }, + _ => return Err(()), + }; + + let new_blinding_point = onion_utils::next_hop_pubkey( + secp_ctx, + self.inner_path.blinding_point, + control_tlvs_ss.as_ref(), + ) + .map_err(|_| ())?; + + self.inner_path.blinding_point = new_blinding_point; + self.inner_path.introduction_node = IntroductionNode::NodeId(next_node_id); + self.inner_path.blinded_hops.remove(0); + + Ok(()) } - pub(crate) fn decrypt_intro_payload( + pub(crate) fn decrypt_intro_payload( &self, node_signer: &NS, - ) -> Result<(BlindedPaymentTlvs, SharedSecret), ()> - where - NS::Target: NodeSigner, - { + ) -> Result<(BlindedPaymentTlvs, SharedSecret), ()> { let control_tlvs_ss = node_signer.ecdh(Recipient::Node, &self.inner_path.blinding_point, None)?; let rho = onion_utils::gen_rho_from_shared_secret(&control_tlvs_ss.secret_bytes()); @@ -234,9 +277,9 @@ impl BlindedPaymentPath { .map_err(|_| ())?; match (&readable, used_aad) { - (BlindedPaymentTlvs::Forward(_), false) | (BlindedPaymentTlvs::Receive(_), true) => { - Ok((readable, control_tlvs_ss)) - }, + (BlindedPaymentTlvs::Forward(_), false) + | (BlindedPaymentTlvs::Dummy(_), true) + | (BlindedPaymentTlvs::Receive(_), true) => Ok((readable, control_tlvs_ss)), _ => Err(()), } } @@ -328,6 +371,37 @@ pub struct TrampolineForwardTlvs { pub next_blinding_override: Option, } +/// TLVs carried by a dummy hop within a blinded payment path. +/// +/// Dummy hops do not correspond to real forwarding decisions, but are processed +/// identically to real hops at the protocol level. The TLVs contained here define +/// the relay requirements and constraints that must be satisfied for the payment +/// to continue through this hop. +/// +/// By enforcing realistic relay semantics on dummy hops, the payment path remains +/// indistinguishable from a fully real route with respect to fees, CLTV deltas, and +/// validation behavior. +#[derive(Clone, Copy)] +pub struct DummyTlvs { + /// Relay requirements (fees and CLTV delta) that must be satisfied when + /// processing this dummy hop. + pub payment_relay: PaymentRelay, + /// Constraints that apply to the payment when relaying over this dummy hop. + pub payment_constraints: PaymentConstraints, +} + +impl Default for DummyTlvs { + fn default() -> Self { + let payment_relay = + PaymentRelay { cltv_expiry_delta: 0, fee_proportional_millionths: 0, fee_base_msat: 0 }; + + let payment_constraints = + PaymentConstraints { max_cltv_expiry: u32::MAX, htlc_minimum_msat: 0 }; + + Self { payment_relay, payment_constraints } + } +} + /// Data to construct a [`BlindedHop`] for receiving a payment. This payload is custom to LDK and /// may not be valid if received by another lightning implementation. #[derive(Clone, Debug)] @@ -346,6 +420,8 @@ pub struct ReceiveTlvs { pub(crate) enum BlindedPaymentTlvs { /// This blinded payment data is for a forwarding node. Forward(ForwardTlvs), + /// This blinded payment data is dummy and is to be peeled by receiving node. + Dummy(DummyTlvs), /// This blinded payment data is for the receiving node. Receive(ReceiveTlvs), } @@ -361,15 +437,17 @@ pub(crate) enum BlindedTrampolineTlvs { } // Used to include forward and receive TLVs in the same iterator for encoding. +#[derive(Clone)] enum BlindedPaymentTlvsRef<'a> { Forward(&'a ForwardTlvs), + Dummy(&'a DummyTlvs), Receive(&'a ReceiveTlvs), } /// Parameters for relaying over a given [`BlindedHop`]. /// /// [`BlindedHop`]: crate::blinded_path::BlindedHop -#[derive(Clone, Debug, PartialEq)] +#[derive(Clone, Copy, Debug, PartialEq)] pub struct PaymentRelay { /// Number of blocks subtracted from an incoming HTLC's `cltv_expiry` for this [`BlindedHop`]. pub cltv_expiry_delta: u16, @@ -383,7 +461,7 @@ pub struct PaymentRelay { /// Constraints for relaying over a given [`BlindedHop`]. /// /// [`BlindedHop`]: crate::blinded_path::BlindedHop -#[derive(Clone, Debug, PartialEq)] +#[derive(Clone, Copy, Debug, PartialEq)] pub struct PaymentConstraints { /// The maximum total CLTV that is acceptable when relaying a payment over this [`BlindedHop`]. pub max_cltv_expiry: u32, @@ -512,6 +590,17 @@ impl Writeable for TrampolineForwardTlvs { } } +impl Writeable for DummyTlvs { + fn write(&self, w: &mut W) -> Result<(), io::Error> { + encode_tlv_stream!(w, { + (10, self.payment_relay, required), + (12, self.payment_constraints, required), + (65539, (), required), + }); + Ok(()) + } +} + // Note: The `authentication` TLV field was removed in LDK v0.3 following // the introduction of `ReceiveAuthKey`-based authentication for inbound // `BlindedPaymentPaths`s. Because we do not support receiving to those @@ -532,6 +621,7 @@ impl<'a> Writeable for BlindedPaymentTlvsRef<'a> { fn write(&self, w: &mut W) -> Result<(), io::Error> { match self { Self::Forward(tlvs) => tlvs.write(w)?, + Self::Dummy(tlvs) => tlvs.write(w)?, Self::Receive(tlvs) => tlvs.write(w)?, } Ok(()) @@ -552,28 +642,41 @@ impl Readable for BlindedPaymentTlvs { (14, features, (option, encoding: (BlindedHopFeatures, WithoutLength))), (65536, payment_secret, option), (65537, payment_context, option), + (65539, is_dummy, option) }); - if let Some(short_channel_id) = scid { - if payment_secret.is_some() { - return Err(DecodeError::InvalidValue); - } - Ok(BlindedPaymentTlvs::Forward(ForwardTlvs { - short_channel_id, - payment_relay: payment_relay.ok_or(DecodeError::InvalidValue)?, - payment_constraints: payment_constraints.0.unwrap(), - next_blinding_override, - features: features.unwrap_or_else(BlindedHopFeatures::empty), - })) - } else { - if payment_relay.is_some() || features.is_some() { - return Err(DecodeError::InvalidValue); - } - Ok(BlindedPaymentTlvs::Receive(ReceiveTlvs { - payment_secret: payment_secret.ok_or(DecodeError::InvalidValue)?, - payment_constraints: payment_constraints.0.unwrap(), - payment_context: payment_context.ok_or(DecodeError::InvalidValue)?, - })) + match ( + scid, + next_blinding_override, + payment_relay, + features, + payment_secret, + payment_context, + is_dummy, + ) { + (Some(short_channel_id), next_override, Some(relay), features, None, None, None) => { + Ok(BlindedPaymentTlvs::Forward(ForwardTlvs { + short_channel_id, + payment_relay: relay, + payment_constraints: payment_constraints.0.unwrap(), + next_blinding_override: next_override, + features: features.unwrap_or_else(BlindedHopFeatures::empty), + })) + }, + (None, None, None, None, Some(secret), Some(context), None) => { + Ok(BlindedPaymentTlvs::Receive(ReceiveTlvs { + payment_secret: secret, + payment_constraints: payment_constraints.0.unwrap(), + payment_context: context, + })) + }, + (None, None, Some(relay), None, None, None, Some(())) => { + Ok(BlindedPaymentTlvs::Dummy(DummyTlvs { + payment_relay: relay, + payment_constraints: payment_constraints.0.unwrap(), + })) + }, + _ => return Err(DecodeError::InvalidValue), } } } @@ -620,21 +723,46 @@ pub(crate) const PAYMENT_PADDING_ROUND_OFF: usize = 30; /// Construct blinded payment hops for the given `intermediate_nodes` and payee info. pub(super) fn blinded_hops( secp_ctx: &Secp256k1, intermediate_nodes: &[PaymentForwardNode], payee_node_id: PublicKey, - payee_tlvs: ReceiveTlvs, session_priv: &SecretKey, local_node_receive_key: ReceiveAuthKey, + dummy_tlvs: &[DummyTlvs], payee_tlvs: ReceiveTlvs, session_priv: &SecretKey, + local_node_receive_key: ReceiveAuthKey, ) -> Vec { let pks = intermediate_nodes .iter() .map(|node| (node.node_id, None)) + .chain(dummy_tlvs.iter().map(|_| (payee_node_id, Some(local_node_receive_key)))) .chain(core::iter::once((payee_node_id, Some(local_node_receive_key)))); let tlvs = intermediate_nodes .iter() .map(|node| BlindedPaymentTlvsRef::Forward(&node.tlvs)) + .chain(dummy_tlvs.iter().map(|tlvs| BlindedPaymentTlvsRef::Dummy(tlvs))) .chain(core::iter::once(BlindedPaymentTlvsRef::Receive(&payee_tlvs))); let path = pks.zip( tlvs.map(|tlv| BlindedPathWithPadding { tlvs: tlv, round_off: PAYMENT_PADDING_ROUND_OFF }), ); + // Debug invariant: all non-final hops must have identical serialized size. + #[cfg(debug_assertions)] + { + let mut iter = path.clone(); + if let Some((_, first)) = iter.next() { + let remaining = iter.clone().count(); // includes intermediate + final + + // At least one intermediate hop + if remaining > 1 { + let expected = first.serialized_length(); + + // skip final hop: take(remaining - 1) + for (_, hop) in iter.take(remaining - 1) { + debug_assert!( + hop.serialized_length() == expected, + "All intermediate blinded hops must have identical serialized size" + ); + } + } + } + } + utils::construct_blinded_hops(secp_ctx, path, session_priv) } @@ -694,14 +822,22 @@ where } pub(super) fn compute_payinfo( - intermediate_nodes: &[PaymentForwardNode], payee_tlvs: &ReceiveTlvs, + intermediate_nodes: &[PaymentForwardNode], dummy_tlvs: &[DummyTlvs], payee_tlvs: &ReceiveTlvs, payee_htlc_maximum_msat: u64, min_final_cltv_expiry_delta: u16, ) -> Result { - let (aggregated_base_fee, aggregated_prop_fee) = - compute_aggregated_base_prop_fee(intermediate_nodes.iter().map(|node| RoutingFees { + let routing_fees = intermediate_nodes + .iter() + .map(|node| RoutingFees { base_msat: node.tlvs.payment_relay.fee_base_msat, proportional_millionths: node.tlvs.payment_relay.fee_proportional_millionths, - }))?; + }) + .chain(dummy_tlvs.iter().map(|tlvs| RoutingFees { + base_msat: tlvs.payment_relay.fee_base_msat, + proportional_millionths: tlvs.payment_relay.fee_proportional_millionths, + })); + + let (aggregated_base_fee, aggregated_prop_fee) = + compute_aggregated_base_prop_fee(routing_fees)?; let mut htlc_minimum_msat: u64 = 1; let mut htlc_maximum_msat: u64 = 21_000_000 * 100_000_000 * 1_000; // Total bitcoin supply @@ -730,6 +866,16 @@ pub(super) fn compute_payinfo( ) .ok_or(())?; // If underflow occurs, we cannot send to this hop without exceeding their max } + for dummy_tlvs in dummy_tlvs.iter() { + cltv_expiry_delta = + cltv_expiry_delta.checked_add(dummy_tlvs.payment_relay.cltv_expiry_delta).ok_or(())?; + + htlc_minimum_msat = amt_to_forward_msat( + core::cmp::max(dummy_tlvs.payment_constraints.htlc_minimum_msat, htlc_minimum_msat), + &dummy_tlvs.payment_relay, + ) + .unwrap_or(1); // If underflow occurs, we definitely reached this node's min + } htlc_minimum_msat = core::cmp::max(payee_tlvs.payment_constraints.htlc_minimum_msat, htlc_minimum_msat); htlc_maximum_msat = core::cmp::min(payee_htlc_maximum_msat, htlc_maximum_msat); @@ -874,7 +1020,7 @@ mod tests { }; let htlc_maximum_msat = 100_000; let blinded_payinfo = - super::compute_payinfo(&intermediate_nodes[..], &recv_tlvs, htlc_maximum_msat, 12) + super::compute_payinfo(&intermediate_nodes[..], &[], &recv_tlvs, htlc_maximum_msat, 12) .unwrap(); assert_eq!(blinded_payinfo.fee_base_msat, 201); assert_eq!(blinded_payinfo.fee_proportional_millionths, 1001); @@ -891,7 +1037,7 @@ mod tests { payment_context: PaymentContext::Bolt12Refund(Bolt12RefundContext {}), }; let blinded_payinfo = - super::compute_payinfo(&[], &recv_tlvs, 4242, TEST_FINAL_CLTV as u16).unwrap(); + super::compute_payinfo(&[], &[], &recv_tlvs, 4242, TEST_FINAL_CLTV as u16).unwrap(); assert_eq!(blinded_payinfo.fee_base_msat, 0); assert_eq!(blinded_payinfo.fee_proportional_millionths, 0); assert_eq!(blinded_payinfo.cltv_expiry_delta, TEST_FINAL_CLTV as u16); @@ -950,6 +1096,7 @@ mod tests { let htlc_maximum_msat = 100_000; let blinded_payinfo = super::compute_payinfo( &intermediate_nodes[..], + &[], &recv_tlvs, htlc_maximum_msat, TEST_FINAL_CLTV as u16, @@ -1009,6 +1156,7 @@ mod tests { let htlc_minimum_msat = 3798; assert!(super::compute_payinfo( &intermediate_nodes[..], + &[], &recv_tlvs, htlc_minimum_msat - 1, TEST_FINAL_CLTV as u16 @@ -1018,6 +1166,7 @@ mod tests { let htlc_maximum_msat = htlc_minimum_msat + 1; let blinded_payinfo = super::compute_payinfo( &intermediate_nodes[..], + &[], &recv_tlvs, htlc_maximum_msat, TEST_FINAL_CLTV as u16, @@ -1078,6 +1227,7 @@ mod tests { let blinded_payinfo = super::compute_payinfo( &intermediate_nodes[..], + &[], &recv_tlvs, 10_000, TEST_FINAL_CLTV as u16, diff --git a/lightning/src/blinded_path/utils.rs b/lightning/src/blinded_path/utils.rs index 8894f37ad33..339b4337eb3 100644 --- a/lightning/src/blinded_path/utils.rs +++ b/lightning/src/blinded_path/utils.rs @@ -256,9 +256,12 @@ impl Writeable for BlindedPathWithPadding { let tlv_length = self.tlvs.serialized_length(); let total_length = tlv_length + TLV_OVERHEAD; - let padding_length = total_length.div_ceil(self.round_off) * self.round_off - total_length; - - let padding = Some(BlindedPathPadding::new(padding_length)); + let padding = if self.round_off == 0 || tlv_length % self.round_off == 0 { + None + } else { + let length = total_length.div_ceil(self.round_off) * self.round_off - total_length; + Some(BlindedPathPadding::new(length)) + }; encode_tlv_stream!(writer, { (1, padding, option), diff --git a/lightning/src/chain/chaininterface.rs b/lightning/src/chain/chaininterface.rs index 117e9b3af05..806e947c153 100644 --- a/lightning/src/chain/chaininterface.rs +++ b/lightning/src/chain/chaininterface.rs @@ -15,10 +15,108 @@ use core::{cmp, ops::Deref}; +use crate::ln::types::ChannelId; use crate::prelude::*; +use bitcoin::secp256k1::PublicKey; use bitcoin::transaction::Transaction; +/// Represents the class of transaction being broadcast. +/// +/// This is used to provide context about the type of transaction being broadcast, which may be +/// useful for logging, filtering, or prioritization purposes. +#[derive(Clone, Debug, Hash, PartialEq, Eq)] +pub enum TransactionType { + /// A funding transaction establishing a new channel. + /// + /// If we initiated the channel the transaction given to + /// [`ChannelManager::funding_transaction_generated`] will be broadcast with this type. + /// + /// [`ChannelManager::funding_transaction_generated`]: crate::ln::channelmanager::ChannelManager::funding_transaction_generated + Funding { + /// The counterparty node IDs and channel IDs of the channels being funded. + /// + /// A single funding transaction may establish multiple channels when using batch funding. + channels: Vec<(PublicKey, ChannelId)>, + }, + /// A transaction cooperatively closing a channel. + /// + /// A transaction of this type will be broadcast when cooperatively closing a channel via + /// [`ChannelManager::close_channel`] or if the counterparty closes the channel. + /// + /// [`ChannelManager::close_channel`]: crate::ln::channelmanager::ChannelManager::close_channel + CooperativeClose { + /// The `node_id` of the channel counterparty. + counterparty_node_id: PublicKey, + /// The ID of the channel being closed. + channel_id: ChannelId, + }, + /// A transaction being broadcast to force-close the channel. + /// + /// A transaction of this type will be broadcast when unilaterally closing a channel via + /// [`ChannelManager::force_close_broadcasting_latest_txn`] or if the counterparty force-closes + /// the channel. + /// + /// [`ChannelManager::force_close_broadcasting_latest_txn`]: crate::ln::channelmanager::ChannelManager::force_close_broadcasting_latest_txn + UnilateralClose { + /// The `node_id` of the channel counterparty. + counterparty_node_id: PublicKey, + /// The ID of the channel being force-closed. + channel_id: ChannelId, + }, + /// An anchor bumping transaction used for CPFP fee-bumping a closing transaction. + /// + /// This will be broadcast after an anchor channel has been closed. See + /// [`BumpTransactionEvent`] for more information. + /// + /// [`BumpTransactionEvent`]: crate::events::bump_transaction::BumpTransactionEvent + AnchorBump { + /// The `node_id` of the channel counterparty. + counterparty_node_id: PublicKey, + /// The ID of the channel whose closing transaction is being fee-bumped. + channel_id: ChannelId, + }, + /// A transaction which is resolving an output spendable by both us and our counterparty. + /// + /// When a channel closes via the unilateral close path, there may be transaction outputs which + /// are spendable by either our counterparty or us and represent some lightning state. In order + /// to resolve that state, the [`ChannelMonitor`] will spend any such outputs, ensuring funds + /// are only available to us prior to generating an [`Event::SpendableOutputs`]. This + /// transaction is one such transaction - resolving in-flight HTLCs or punishing our + /// counterparty if they broadcasted an outdated state. + /// + /// [`ChannelMonitor`]: crate::chain::ChannelMonitor + /// [`Event::SpendableOutputs`]: crate::events::Event::SpendableOutputs + Claim { + /// The `node_id` of the channel counterparty. + counterparty_node_id: PublicKey, + /// The ID of the channel from which outputs are being claimed. + channel_id: ChannelId, + }, + /// A transaction generated by the [`OutputSweeper`], sweeping [`SpendableOutputDescriptor`]s + /// to the user's wallet. + /// + /// [`OutputSweeper`]: crate::util::sweep::OutputSweeper + /// [`SpendableOutputDescriptor`]: crate::sign::SpendableOutputDescriptor + Sweep { + /// The counterparty node IDs and channel IDs from which outputs are being swept, if known. + /// + /// A single sweep transaction may aggregate outputs from multiple channels. + channels: Vec<(PublicKey, ChannelId)>, + }, + /// A splice transaction modifying an existing channel's funding. + /// + /// A transaction of this type will be broadcast as a result of a [`ChannelManager::splice_channel`] operation. + /// + /// [`ChannelManager::splice_channel`]: crate::ln::channelmanager::ChannelManager::splice_channel + Splice { + /// The `node_id` of the channel counterparty. + counterparty_node_id: PublicKey, + /// The ID of the channel being spliced. + channel_id: ChannelId, + }, +} + // TODO: Define typed abstraction over feerates to handle their conversions. pub(crate) fn compute_feerate_sat_per_1000_weight(fee_sat: u64, weight: u64) -> u32 { (fee_sat * 1000 / weight).try_into().unwrap_or(u32::max_value()) @@ -45,7 +143,16 @@ pub trait BroadcasterInterface { /// /// Bitcoin transaction packages are defined in BIP 331 and here: /// - fn broadcast_transactions(&self, txs: &[&Transaction]); + /// + /// Each transaction is paired with a [`TransactionType`] indicating the class of transaction + /// being broadcast, which may be useful for logging, filtering, or prioritization. + fn broadcast_transactions(&self, txs: &[(&Transaction, TransactionType)]); +} + +impl> BroadcasterInterface for B { + fn broadcast_transactions(&self, txs: &[(&Transaction, TransactionType)]) { + self.deref().broadcast_transactions(txs) + } } /// An enum that represents the priority at which we want a transaction to confirm used for feerate @@ -181,6 +288,12 @@ pub trait FeeEstimator { fn get_est_sat_per_1000_weight(&self, confirmation_target: ConfirmationTarget) -> u32; } +impl> FeeEstimator for F { + fn get_est_sat_per_1000_weight(&self, confirmation_target: ConfirmationTarget) -> u32 { + self.deref().get_est_sat_per_1000_weight(confirmation_target) + } +} + /// Minimum relay fee as required by bitcoin network mempool policy. pub const INCREMENTAL_RELAY_FEE_SAT_PER_1000_WEIGHT: u64 = 253; /// Minimum feerate that takes a sane approach to bitcoind weight-to-vbytes rounding. @@ -188,19 +301,14 @@ pub const INCREMENTAL_RELAY_FEE_SAT_PER_1000_WEIGHT: u64 = 253; /// pub const FEERATE_FLOOR_SATS_PER_KW: u32 = 253; -/// Wraps a `Deref` to a `FeeEstimator` so that any fee estimations provided by it -/// are bounded below by `FEERATE_FLOOR_SATS_PER_KW` (253 sats/KW). +/// Wraps a [`FeeEstimator`] so that any fee estimations provided by it are bounded below by +/// `FEERATE_FLOOR_SATS_PER_KW` (253 sats/KW). /// /// Note that this does *not* implement [`FeeEstimator`] to make it harder to accidentally mix the /// two. -pub(crate) struct LowerBoundedFeeEstimator(pub F) -where - F::Target: FeeEstimator; - -impl LowerBoundedFeeEstimator -where - F::Target: FeeEstimator, -{ +pub(crate) struct LowerBoundedFeeEstimator(pub F); + +impl LowerBoundedFeeEstimator { /// Creates a new `LowerBoundedFeeEstimator` which wraps the provided fee_estimator pub fn new(fee_estimator: F) -> Self { LowerBoundedFeeEstimator(fee_estimator) diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index f4a1edff038..7db1b697c2b 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -37,7 +37,7 @@ use crate::chain::channelmonitor::{ WithChannelMonitor, }; use crate::chain::transaction::{OutPoint, TransactionData}; -use crate::chain::{BestBlock, ChannelMonitorUpdateStatus, Filter, WatchedOutput}; +use crate::chain::{BestBlock, ChannelMonitorUpdateStatus, WatchedOutput}; use crate::events::{self, Event, EventHandler, ReplayEvent}; use crate::ln::channel_state::ChannelDetails; #[cfg(peer_storage)] @@ -256,41 +256,27 @@ impl Deref for LockedChannelMonitor<'_, Chann /// /// This is not exported to bindings users as async is not supported outside of Rust. pub struct AsyncPersister< - K: Deref + MaybeSend + MaybeSync + 'static, + K: KVStore + MaybeSend + MaybeSync + 'static, S: FutureSpawner, - L: Deref + MaybeSend + MaybeSync + 'static, - ES: Deref + MaybeSend + MaybeSync + 'static, - SP: Deref + MaybeSend + MaybeSync + 'static, - BI: Deref + MaybeSend + MaybeSync + 'static, - FE: Deref + MaybeSend + MaybeSync + 'static, -> where - K::Target: KVStore + MaybeSync, - L::Target: Logger, - ES::Target: EntropySource + Sized, - SP::Target: SignerProvider + Sized, - BI::Target: BroadcasterInterface, - FE::Target: FeeEstimator, -{ + L: Logger + MaybeSend + MaybeSync + 'static, + ES: EntropySource + MaybeSend + MaybeSync + 'static, + SP: SignerProvider + MaybeSend + MaybeSync + 'static, + BI: BroadcasterInterface + MaybeSend + MaybeSync + 'static, + FE: FeeEstimator + MaybeSend + MaybeSync + 'static, +> { persister: MonitorUpdatingPersisterAsync, event_notifier: Arc, } impl< - K: Deref + MaybeSend + MaybeSync + 'static, + K: KVStore + MaybeSend + MaybeSync + 'static, S: FutureSpawner, - L: Deref + MaybeSend + MaybeSync + 'static, - ES: Deref + MaybeSend + MaybeSync + 'static, - SP: Deref + MaybeSend + MaybeSync + 'static, - BI: Deref + MaybeSend + MaybeSync + 'static, - FE: Deref + MaybeSend + MaybeSync + 'static, + L: Logger + MaybeSend + MaybeSync + 'static, + ES: EntropySource + MaybeSend + MaybeSync + 'static, + SP: SignerProvider + MaybeSend + MaybeSync + 'static, + BI: BroadcasterInterface + MaybeSend + MaybeSync + 'static, + FE: FeeEstimator + MaybeSend + MaybeSync + 'static, > Deref for AsyncPersister -where - K::Target: KVStore + MaybeSync, - L::Target: Logger, - ES::Target: EntropySource + Sized, - SP::Target: SignerProvider + Sized, - BI::Target: BroadcasterInterface, - FE::Target: FeeEstimator, { type Target = Self; fn deref(&self) -> &Self { @@ -299,26 +285,19 @@ where } impl< - K: Deref + MaybeSend + MaybeSync + 'static, + K: KVStore + MaybeSend + MaybeSync + 'static, S: FutureSpawner, - L: Deref + MaybeSend + MaybeSync + 'static, - ES: Deref + MaybeSend + MaybeSync + 'static, - SP: Deref + MaybeSend + MaybeSync + 'static, - BI: Deref + MaybeSend + MaybeSync + 'static, - FE: Deref + MaybeSend + MaybeSync + 'static, - > Persist<::EcdsaSigner> for AsyncPersister + L: Logger + MaybeSend + MaybeSync + 'static, + ES: EntropySource + MaybeSend + MaybeSync + 'static, + SP: SignerProvider + MaybeSend + MaybeSync + 'static, + BI: BroadcasterInterface + MaybeSend + MaybeSync + 'static, + FE: FeeEstimator + MaybeSend + MaybeSync + 'static, + > Persist for AsyncPersister where - K::Target: KVStore + MaybeSync, - L::Target: Logger, - ES::Target: EntropySource + Sized, - SP::Target: SignerProvider + Sized, - BI::Target: BroadcasterInterface, - FE::Target: FeeEstimator, - ::EcdsaSigner: MaybeSend + 'static, + SP::EcdsaSigner: MaybeSend + 'static, { fn persist_new_channel( - &self, monitor_name: MonitorName, - monitor: &ChannelMonitor<::EcdsaSigner>, + &self, monitor_name: MonitorName, monitor: &ChannelMonitor, ) -> ChannelMonitorUpdateStatus { let notifier = Arc::clone(&self.event_notifier); self.persister.spawn_async_persist_new_channel(monitor_name, monitor, notifier); @@ -327,7 +306,7 @@ where fn update_persisted_channel( &self, monitor_name: MonitorName, monitor_update: Option<&ChannelMonitorUpdate>, - monitor: &ChannelMonitor<::EcdsaSigner>, + monitor: &ChannelMonitor, ) -> ChannelMonitorUpdateStatus { let notifier = Arc::clone(&self.event_notifier); self.persister.spawn_async_update_channel(monitor_name, monitor_update, monitor, notifier); @@ -361,19 +340,14 @@ where /// [`rebroadcast_pending_claims`]: Self::rebroadcast_pending_claims pub struct ChainMonitor< ChannelSigner: EcdsaChannelSigner, - C: Deref, - T: Deref, - F: Deref, - L: Deref, + C: chain::Filter, + T: BroadcasterInterface, + F: FeeEstimator, + L: Logger, P: Deref, - ES: Deref, + ES: EntropySource, > where - C::Target: chain::Filter, - T::Target: BroadcasterInterface, - F::Target: FeeEstimator, - L::Target: Logger, P::Target: Persist, - ES::Target: EntropySource, { monitors: RwLock>>, chain_source: Option, @@ -400,32 +374,17 @@ pub struct ChainMonitor< } impl< - K: Deref + MaybeSend + MaybeSync + 'static, + K: KVStore + MaybeSend + MaybeSync + 'static, S: FutureSpawner, - SP: Deref + MaybeSend + MaybeSync + 'static, - C: Deref, - T: Deref + MaybeSend + MaybeSync + 'static, - F: Deref + MaybeSend + MaybeSync + 'static, - L: Deref + MaybeSend + MaybeSync + 'static, - ES: Deref + MaybeSend + MaybeSync + 'static, - > - ChainMonitor< - ::EcdsaSigner, - C, - T, - F, - L, - AsyncPersister, - ES, - > where - K::Target: KVStore + MaybeSync, - SP::Target: SignerProvider + Sized, - C::Target: chain::Filter, - T::Target: BroadcasterInterface, - F::Target: FeeEstimator, - L::Target: Logger, - ES::Target: EntropySource + Sized, - ::EcdsaSigner: MaybeSend + 'static, + SP: SignerProvider + MaybeSend + MaybeSync + 'static, + C: chain::Filter, + T: BroadcasterInterface + MaybeSend + MaybeSync + 'static, + F: FeeEstimator + MaybeSend + MaybeSync + 'static, + L: Logger + MaybeSend + MaybeSync + 'static, + ES: EntropySource + MaybeSend + MaybeSync + 'static, + > ChainMonitor, ES> +where + SP::EcdsaSigner: MaybeSend + 'static, { /// Creates a new `ChainMonitor` used to watch on-chain activity pertaining to channels. /// @@ -461,20 +420,15 @@ impl< impl< ChannelSigner: EcdsaChannelSigner, - C: Deref, - T: Deref, - F: Deref, - L: Deref, + C: chain::Filter, + T: BroadcasterInterface, + F: FeeEstimator, + L: Logger, P: Deref, - ES: Deref, + ES: EntropySource, > ChainMonitor where - C::Target: chain::Filter, - T::Target: BroadcasterInterface, - F::Target: FeeEstimator, - L::Target: Logger, P::Target: Persist, - ES::Target: EntropySource, { /// Dispatches to per-channel monitors, which are responsible for updating their on-chain view /// of a channel and reacting accordingly based on transactions in the given chain data. See @@ -895,8 +849,8 @@ where let monitors = self.monitors.read().unwrap(); for (_, monitor_holder) in &*monitors { monitor_holder.monitor.rebroadcast_pending_claims( - &*self.broadcaster, - &*self.fee_estimator, + &self.broadcaster, + &self.fee_estimator, &self.logger, ) } @@ -911,16 +865,16 @@ where if let Some(channel_id) = monitor_opt { if let Some(monitor_holder) = monitors.get(&channel_id) { monitor_holder.monitor.signer_unblocked( - &*self.broadcaster, - &*self.fee_estimator, + &self.broadcaster, + &self.fee_estimator, &self.logger, ) } } else { for (_, monitor_holder) in &*monitors { monitor_holder.monitor.signer_unblocked( - &*self.broadcaster, - &*self.fee_estimator, + &self.broadcaster, + &self.fee_estimator, &self.logger, ) } @@ -1108,20 +1062,15 @@ where impl< ChannelSigner: EcdsaChannelSigner, - C: Deref, - T: Deref, - F: Deref, - L: Deref, + C: chain::Filter, + T: BroadcasterInterface, + F: FeeEstimator, + L: Logger, P: Deref, - ES: Deref, + ES: EntropySource, > BaseMessageHandler for ChainMonitor where - C::Target: chain::Filter, - T::Target: BroadcasterInterface, - F::Target: FeeEstimator, - L::Target: Logger, P::Target: Persist, - ES::Target: EntropySource, { fn get_and_clear_pending_msg_events(&self) -> Vec { let mut pending_events = self.pending_send_only_events.lock().unwrap(); @@ -1147,39 +1096,29 @@ where impl< ChannelSigner: EcdsaChannelSigner, - C: Deref, - T: Deref, - F: Deref, - L: Deref, + C: chain::Filter, + T: BroadcasterInterface, + F: FeeEstimator, + L: Logger, P: Deref, - ES: Deref, + ES: EntropySource, > SendOnlyMessageHandler for ChainMonitor where - C::Target: chain::Filter, - T::Target: BroadcasterInterface, - F::Target: FeeEstimator, - L::Target: Logger, P::Target: Persist, - ES::Target: EntropySource, { } impl< ChannelSigner: EcdsaChannelSigner, - C: Deref, - T: Deref, - F: Deref, - L: Deref, + C: chain::Filter, + T: BroadcasterInterface, + F: FeeEstimator, + L: Logger, P: Deref, - ES: Deref, + ES: EntropySource, > chain::Listen for ChainMonitor where - C::Target: chain::Filter, - T::Target: BroadcasterInterface, - F::Target: FeeEstimator, - L::Target: Logger, P::Target: Persist, - ES::Target: EntropySource, { fn filtered_block_connected(&self, header: &Header, txdata: &TransactionData, height: u32) { log_debug!( @@ -1193,8 +1132,8 @@ where header, txdata, height, - &*self.broadcaster, - &*self.fee_estimator, + &self.broadcaster, + &self.fee_estimator, &self.logger, ) }); @@ -1220,8 +1159,8 @@ where for monitor_state in monitor_states.values() { monitor_state.monitor.blocks_disconnected( fork_point, - &*self.broadcaster, - &*self.fee_estimator, + &self.broadcaster, + &self.fee_estimator, &self.logger, ); } @@ -1230,20 +1169,15 @@ where impl< ChannelSigner: EcdsaChannelSigner, - C: Deref, - T: Deref, - F: Deref, - L: Deref, + C: chain::Filter, + T: BroadcasterInterface, + F: FeeEstimator, + L: Logger, P: Deref, - ES: Deref, + ES: EntropySource, > chain::Confirm for ChainMonitor where - C::Target: chain::Filter, - T::Target: BroadcasterInterface, - F::Target: FeeEstimator, - L::Target: Logger, P::Target: Persist, - ES::Target: EntropySource, { fn transactions_confirmed(&self, header: &Header, txdata: &TransactionData, height: u32) { log_debug!( @@ -1258,8 +1192,8 @@ where header, txdata, height, - &*self.broadcaster, - &*self.fee_estimator, + &self.broadcaster, + &self.fee_estimator, &self.logger, ) }); @@ -1273,8 +1207,8 @@ where for monitor_state in monitor_states.values() { monitor_state.monitor.transaction_unconfirmed( txid, - &*self.broadcaster, - &*self.fee_estimator, + &self.broadcaster, + &self.fee_estimator, &self.logger, ); } @@ -1294,8 +1228,8 @@ where monitor.best_block_updated( header, height, - &*self.broadcaster, - &*self.fee_estimator, + &self.broadcaster, + &self.fee_estimator, &self.logger, ) }); @@ -1325,20 +1259,15 @@ where impl< ChannelSigner: EcdsaChannelSigner, - C: Deref, - T: Deref, - F: Deref, - L: Deref, + C: chain::Filter, + T: BroadcasterInterface, + F: FeeEstimator, + L: Logger, P: Deref, - ES: Deref, + ES: EntropySource, > chain::Watch for ChainMonitor where - C::Target: chain::Filter, - T::Target: BroadcasterInterface, - F::Target: FeeEstimator, - L::Target: Logger, P::Target: Persist, - ES::Target: EntropySource, { fn watch_channel( &self, channel_id: ChannelId, monitor: ChannelMonitor, @@ -1521,20 +1450,15 @@ where impl< ChannelSigner: EcdsaChannelSigner, - C: Deref, - T: Deref, - F: Deref, - L: Deref, + C: chain::Filter, + T: BroadcasterInterface, + F: FeeEstimator, + L: Logger, P: Deref, - ES: Deref, + ES: EntropySource, > events::EventsProvider for ChainMonitor where - C::Target: chain::Filter, - T::Target: BroadcasterInterface, - F::Target: FeeEstimator, - L::Target: Logger, P::Target: Persist, - ES::Target: EntropySource, { /// Processes [`SpendableOutputs`] events produced from each [`ChannelMonitor`] upon maturity. /// @@ -1564,11 +1488,70 @@ where } } +/// A trivial trait which describes any [`ChainMonitor`]. +/// +/// This is not exported to bindings users as general cover traits aren't useful in other +/// languages. +pub trait AChainMonitor { + /// A type implementing [`EcdsaChannelSigner`]. + type Signer: EcdsaChannelSigner + Sized; + /// A type implementing [`chain::Filter`]. + type Filter: chain::Filter; + /// A type implementing [`BroadcasterInterface`]. + type Broadcaster: BroadcasterInterface; + /// A type implementing [`FeeEstimator`]. + type FeeEstimator: FeeEstimator; + /// A type implementing [`Logger`]. + type Logger: Logger; + /// A type that derefs to [`Persist`]. + type Persister: Deref; + /// The target of [`Self::Persister`]. + type PersisterTarget: Persist + ?Sized; + /// A type implementing [`EntropySource`]. + type EntropySource: EntropySource; + /// Returns a reference to the actual [`ChainMonitor`] object. + fn get_cm( + &self, + ) -> &ChainMonitor< + Self::Signer, + Self::Filter, + Self::Broadcaster, + Self::FeeEstimator, + Self::Logger, + Self::Persister, + Self::EntropySource, + >; +} + +impl< + ChannelSigner: EcdsaChannelSigner, + C: chain::Filter, + T: BroadcasterInterface, + F: FeeEstimator, + L: Logger, + P: Deref, + ES: EntropySource, + > AChainMonitor for ChainMonitor +where + P::Target: Persist, +{ + type Signer = ChannelSigner; + type Filter = C; + type Broadcaster = T; + type FeeEstimator = F; + type Logger = L; + type Persister = P; + type PersisterTarget = P::Target; + type EntropySource = ES; + fn get_cm(&self) -> &ChainMonitor { + self + } +} + #[cfg(test)] mod tests { use crate::chain::channelmonitor::ANTI_REORG_DELAY; use crate::chain::{ChannelMonitorUpdateStatus, Watch}; - use crate::check_added_monitors; use crate::events::{ClosureReason, Event}; use crate::ln::functional_test_utils::*; use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, MessageSendEvent}; @@ -1601,9 +1584,9 @@ mod tests { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.claim_funds(payment_preimage_1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.claim_funds(payment_preimage_2); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let persistences = chanmon_cfgs[1].persister.offchain_monitor_updates.lock().unwrap().clone(); @@ -1666,14 +1649,14 @@ mod tests { nodes[0].node.handle_update_fulfill_htlc(node_b_id, updates.update_fulfill_htlcs.remove(0)); expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &updates.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let (as_first_raa, as_first_update) = get_revoke_commit_msgs(&nodes[0], &node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_first_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut bs_2nd_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_first_update); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); nodes[0] @@ -1683,28 +1666,33 @@ mod tests { nodes[0] .node .handle_commitment_signed_batch_test(node_b_id, &bs_2nd_updates.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_first_raa); expect_payment_path_successful!(nodes[0]); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let (as_second_raa, as_second_update) = get_revoke_commit_msgs(&nodes[0], &node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_second_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_second_update); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_raa); expect_payment_path_successful!(nodes[0]); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); } #[test] fn test_chainsync_triggers_distributed_monitor_persistence() { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs( + 3, + &node_cfgs, + &[Some(legacy_cfg.clone()), Some(legacy_cfg.clone()), Some(legacy_cfg)], + ); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 10e5049682e..37351460634 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -42,7 +42,6 @@ use crate::chain::package::{ HolderHTLCOutput, PackageSolvingData, PackageTemplate, RevokedHTLCOutput, RevokedOutput, }; use crate::chain::transaction::{OutPoint, TransactionData}; -use crate::chain::Filter; use crate::chain::{BestBlock, WatchedOutput}; use crate::events::bump_transaction::{AnchorDescriptor, BumpTransactionEvent}; use crate::events::{ClosureReason, Event, EventHandler, ReplayEvent}; @@ -66,7 +65,7 @@ use crate::sign::{ use crate::types::features::ChannelTypeFeatures; use crate::types::payment::{PaymentHash, PaymentPreimage}; use crate::util::byte_utils; -use crate::util::logger::{Logger, Record}; +use crate::util::logger::{Logger, WithContext}; use crate::util::persist::MonitorName; use crate::util::ser::{ MaybeReadable, Readable, ReadableArgs, RequiredWrapper, UpgradableRequired, Writeable, Writer, @@ -1257,18 +1256,19 @@ pub(crate) struct ChannelMonitorImpl { // deserialization current_holder_commitment_number: u64, - /// The set of payment hashes from inbound payments for which we know the preimage. Payment - /// preimages that are not included in any unrevoked local commitment transaction or unrevoked - /// remote commitment transactions are automatically removed when commitment transactions are - /// revoked. Note that this happens one revocation after it theoretically could, leaving - /// preimages present here for the previous state even when the channel is "at rest". This is a - /// good safety buffer, but also is important as it ensures we retain payment preimages for the - /// previous local commitment transaction, which may have been broadcast already when we see - /// the revocation (in setups with redundant monitors). + /// The set of payment hashes from inbound payments and forwards for which we know the preimage. + /// Payment preimages that are not included in any unrevoked local commitment transaction or + /// unrevoked remote commitment transactions are automatically removed when commitment + /// transactions are revoked. Note that this happens one revocation after it theoretically could, + /// leaving preimages present here for the previous state even when the channel is "at rest". + /// This is a good safety buffer, but also is important as it ensures we retain payment preimages + /// for the previous local commitment transaction, which may have been broadcast already when we + /// see the revocation (in setups with redundant monitors). /// /// We also store [`PaymentClaimDetails`] here, tracking the payment information(s) for this /// preimage for inbound payments. This allows us to rebuild the inbound payment information on - /// startup even if we lost our `ChannelManager`. + /// startup even if we lost our `ChannelManager`. For forwardeds, the list of + /// [`PaymentClaimDetails`] is empty. payment_preimages: HashMap)>, // Note that `MonitorEvent`s MUST NOT be generated during update processing, only generated @@ -1825,45 +1825,21 @@ macro_rules! _process_events_body { } pub(super) use _process_events_body as process_events_body; -pub(crate) struct WithChannelMonitor<'a, L: Deref> -where - L::Target: Logger, -{ - logger: &'a L, - peer_id: Option, - channel_id: Option, - payment_hash: Option, -} +pub(crate) struct WithChannelMonitor; -impl<'a, L: Deref> Logger for WithChannelMonitor<'a, L> -where - L::Target: Logger, -{ - fn log(&self, mut record: Record) { - record.peer_id = self.peer_id; - record.channel_id = self.channel_id; - record.payment_hash = self.payment_hash; - self.logger.log(record) - } -} - -impl<'a, L: Deref> WithChannelMonitor<'a, L> -where - L::Target: Logger, -{ - pub(crate) fn from( +impl WithChannelMonitor { + pub(crate) fn from<'a, L: Logger, S: EcdsaChannelSigner>( logger: &'a L, monitor: &ChannelMonitor, payment_hash: Option, - ) -> Self { + ) -> WithContext<'a, L> { Self::from_impl(logger, &*monitor.inner.lock().unwrap(), payment_hash) } - #[rustfmt::skip] - pub(crate) fn from_impl(logger: &'a L, monitor_impl: &ChannelMonitorImpl, payment_hash: Option) -> Self { + pub(crate) fn from_impl<'a, L: Logger, S: EcdsaChannelSigner>( + logger: &'a L, monitor_impl: &ChannelMonitorImpl, payment_hash: Option, + ) -> WithContext<'a, L> { let peer_id = Some(monitor_impl.counterparty_node_id); let channel_id = Some(monitor_impl.channel_id()); - WithChannelMonitor { - logger, peer_id, channel_id, payment_hash, - } + WithContext::from(logger, peer_id, channel_id, payment_hash) } } @@ -1904,8 +1880,9 @@ impl ChannelMonitor { initial_holder_commitment_tx.trust().commitment_number(); let onchain_tx_handler = OnchainTxHandler::new( - channel_parameters.channel_value_satoshis, channel_keys_id, destination_script.into(), - keys, channel_parameters.clone(), initial_holder_commitment_tx.clone(), secp_ctx + channel_id, counterparty_node_id, channel_parameters.channel_value_satoshis, + channel_keys_id, destination_script.into(), keys, channel_parameters.clone(), + initial_holder_commitment_tx.clone(), secp_ctx, ); let funding_outpoint = channel_parameters.funding_outpoint @@ -2076,18 +2053,14 @@ impl ChannelMonitor { /// /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager #[rustfmt::skip] - pub(crate) fn provide_payment_preimage_unsafe_legacy( + pub(crate) fn provide_payment_preimage_unsafe_legacy( &self, payment_hash: &PaymentHash, payment_preimage: &PaymentPreimage, broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, - ) where - B::Target: BroadcasterInterface, - F::Target: FeeEstimator, - L::Target: Logger, - { + ) { let mut inner = self.inner.lock().unwrap(); let logger = WithChannelMonitor::from_impl(logger, &*inner, Some(*payment_hash)); // Note that we don't pass any MPP claim parts here. This is generally not okay but in this @@ -2101,14 +2074,9 @@ impl ChannelMonitor { /// itself. /// /// panics if the given update is not the next update by update_id. - pub fn update_monitor( + pub fn update_monitor( &self, updates: &ChannelMonitorUpdate, broadcaster: &B, fee_estimator: &F, logger: &L, - ) -> Result<(), ()> - where - B::Target: BroadcasterInterface, - F::Target: FeeEstimator, - L::Target: Logger, - { + ) -> Result<(), ()> { let mut inner = self.inner.lock().unwrap(); let logger = WithChannelMonitor::from_impl(logger, &*inner, None); inner.update_monitor(updates, broadcaster, fee_estimator, &logger) @@ -2158,10 +2126,7 @@ impl ChannelMonitor { /// calling `chain::Filter::register_output` and `chain::Filter::register_tx` until all outputs /// have been registered. #[rustfmt::skip] - pub fn load_outputs_to_watch(&self, filter: &F, logger: &L) - where - F::Target: chain::Filter, L::Target: Logger, - { + pub fn load_outputs_to_watch(&self, filter: &F, logger: &L) { let lock = self.inner.lock().unwrap(); let logger = WithChannelMonitor::from_impl(logger, &*lock, None); for funding in core::iter::once(&lock.funding).chain(&lock.pending_funding) { @@ -2205,12 +2170,11 @@ impl ChannelMonitor { /// /// [`SpendableOutputs`]: crate::events::Event::SpendableOutputs /// [`BumpTransaction`]: crate::events::Event::BumpTransaction - pub fn process_pending_events( + pub fn process_pending_events( &self, handler: &H, logger: &L, ) -> Result<(), ReplayEvent> where H::Target: EventHandler, - L::Target: Logger, { let mut ev; process_events_body!(Some(self), logger, ev, handler.handle_event(ev)) @@ -2222,13 +2186,10 @@ impl ChannelMonitor { pub async fn process_pending_events_async< Future: core::future::Future>, H: Fn(Event) -> Future, - L: Deref, + L: Logger, >( &self, handler: &H, logger: &L, - ) -> Result<(), ReplayEvent> - where - L::Target: Logger, - { + ) -> Result<(), ReplayEvent> { let mut ev; process_events_body!(Some(self), logger, ev, { handler(ev).await }) } @@ -2356,15 +2317,15 @@ impl ChannelMonitor { /// transactions that cannot be confirmed until the funding transaction is visible. /// /// [`Event::BumpTransaction`]: crate::events::Event::BumpTransaction - pub fn broadcast_latest_holder_commitment_txn( + pub fn broadcast_latest_holder_commitment_txn< + B: BroadcasterInterface, + F: FeeEstimator, + L: Logger, + >( &self, broadcaster: &B, fee_estimator: &F, logger: &L, - ) where - B::Target: BroadcasterInterface, - F::Target: FeeEstimator, - L::Target: Logger, - { + ) { let mut inner = self.inner.lock().unwrap(); - let fee_estimator = LowerBoundedFeeEstimator::new(&**fee_estimator); + let fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator); let logger = WithChannelMonitor::from_impl(logger, &*inner, None); inner.queue_latest_holder_commitment_txn_for_broadcast( @@ -2379,10 +2340,9 @@ impl ChannelMonitor { /// to bypass HolderCommitmentTransaction state update lockdown after signature and generate /// revoked commitment transaction. #[cfg(any(test, feature = "_test_utils", feature = "unsafe_revoked_tx_signing"))] - pub fn unsafe_get_latest_holder_commitment_txn(&self, logger: &L) -> Vec - where - L::Target: Logger, - { + pub fn unsafe_get_latest_holder_commitment_txn( + &self, logger: &L, + ) -> Vec { let mut inner = self.inner.lock().unwrap(); let logger = WithChannelMonitor::from_impl(logger, &*inner, None); inner.unsafe_get_latest_holder_commitment_txn(&logger) @@ -2400,7 +2360,7 @@ impl ChannelMonitor { /// /// [`get_outputs_to_watch`]: #method.get_outputs_to_watch #[rustfmt::skip] - pub fn block_connected( + pub fn block_connected( &self, header: &Header, txdata: &TransactionData, @@ -2408,12 +2368,7 @@ impl ChannelMonitor { broadcaster: B, fee_estimator: F, logger: &L, - ) -> Vec - where - B::Target: BroadcasterInterface, - F::Target: FeeEstimator, - L::Target: Logger, - { + ) -> Vec { let mut inner = self.inner.lock().unwrap(); let logger = WithChannelMonitor::from_impl(logger, &*inner, None); inner.block_connected( @@ -2422,13 +2377,9 @@ impl ChannelMonitor { /// Determines if the disconnected block contained any transactions of interest and updates /// appropriately. - pub fn blocks_disconnected( + pub fn blocks_disconnected( &self, fork_point: BestBlock, broadcaster: B, fee_estimator: F, logger: &L, - ) where - B::Target: BroadcasterInterface, - F::Target: FeeEstimator, - L::Target: Logger, - { + ) { let mut inner = self.inner.lock().unwrap(); let logger = WithChannelMonitor::from_impl(logger, &*inner, None); inner.blocks_disconnected(fork_point, broadcaster, fee_estimator, &logger) @@ -2442,7 +2393,7 @@ impl ChannelMonitor { /// /// [`block_connected`]: Self::block_connected #[rustfmt::skip] - pub fn transactions_confirmed( + pub fn transactions_confirmed( &self, header: &Header, txdata: &TransactionData, @@ -2450,12 +2401,7 @@ impl ChannelMonitor { broadcaster: B, fee_estimator: F, logger: &L, - ) -> Vec - where - B::Target: BroadcasterInterface, - F::Target: FeeEstimator, - L::Target: Logger, - { + ) -> Vec { let bounded_fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator); let mut inner = self.inner.lock().unwrap(); let logger = WithChannelMonitor::from_impl(logger, &*inner, None); @@ -2470,17 +2416,13 @@ impl ChannelMonitor { /// /// [`blocks_disconnected`]: Self::blocks_disconnected #[rustfmt::skip] - pub fn transaction_unconfirmed( + pub fn transaction_unconfirmed( &self, txid: &Txid, broadcaster: B, fee_estimator: F, logger: &L, - ) where - B::Target: BroadcasterInterface, - F::Target: FeeEstimator, - L::Target: Logger, - { + ) { let bounded_fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator); let mut inner = self.inner.lock().unwrap(); let logger = WithChannelMonitor::from_impl(logger, &*inner, None); @@ -2497,19 +2439,14 @@ impl ChannelMonitor { /// /// [`block_connected`]: Self::block_connected #[rustfmt::skip] - pub fn best_block_updated( + pub fn best_block_updated( &self, header: &Header, height: u32, broadcaster: B, fee_estimator: F, logger: &L, - ) -> Vec - where - B::Target: BroadcasterInterface, - F::Target: FeeEstimator, - L::Target: Logger, - { + ) -> Vec { let bounded_fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator); let mut inner = self.inner.lock().unwrap(); let logger = WithChannelMonitor::from_impl(logger, &*inner, None); @@ -2544,14 +2481,9 @@ impl ChannelMonitor { /// invoking this every 30 seconds, or lower if running in an environment with spotty /// connections, like on mobile. #[rustfmt::skip] - pub fn rebroadcast_pending_claims( + pub fn rebroadcast_pending_claims( &self, broadcaster: B, fee_estimator: F, logger: &L, - ) - where - B::Target: BroadcasterInterface, - F::Target: FeeEstimator, - L::Target: Logger, - { + ) { let fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator); let mut lock = self.inner.lock().unwrap(); let inner = &mut *lock; @@ -2572,14 +2504,9 @@ impl ChannelMonitor { /// Triggers rebroadcasts of pending claims from a force-closed channel after a transaction /// signature generation failure. #[rustfmt::skip] - pub fn signer_unblocked( + pub fn signer_unblocked( &self, broadcaster: B, fee_estimator: F, logger: &L, - ) - where - B::Target: BroadcasterInterface, - F::Target: FeeEstimator, - L::Target: Logger, - { + ) { let fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator); let mut lock = self.inner.lock().unwrap(); let inner = &mut *lock; @@ -3826,14 +3753,11 @@ impl ChannelMonitorImpl { /// /// Note that this is often called multiple times for the same payment and must be idempotent. #[rustfmt::skip] - fn provide_payment_preimage( + fn provide_payment_preimage( &mut self, payment_hash: &PaymentHash, payment_preimage: &PaymentPreimage, payment_info: &Option, broadcaster: &B, - fee_estimator: &LowerBoundedFeeEstimator, logger: &WithChannelMonitor) - where B::Target: BroadcasterInterface, - F::Target: FeeEstimator, - L::Target: Logger, - { + fee_estimator: &LowerBoundedFeeEstimator, logger: &WithContext + ) { self.payment_preimages.entry(payment_hash.clone()) .and_modify(|(_, payment_infos)| { if let Some(payment_info) = payment_info { @@ -4005,15 +3929,10 @@ impl ChannelMonitorImpl { /// See also [`ChannelMonitor::broadcast_latest_holder_commitment_txn`]. /// /// [`ChannelMonitor::broadcast_latest_holder_commitment_txn`]: crate::chain::channelmonitor::ChannelMonitor::broadcast_latest_holder_commitment_txn - pub(crate) fn queue_latest_holder_commitment_txn_for_broadcast( - &mut self, broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator, logger: &WithChannelMonitor, + pub(crate) fn queue_latest_holder_commitment_txn_for_broadcast( + &mut self, broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator, logger: &WithContext, require_funding_seen: bool, - ) - where - B::Target: BroadcasterInterface, - F::Target: FeeEstimator, - L::Target: Logger, - { + ) { let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message: "ChannelMonitor-initiated commitment transaction broadcast".to_owned(), @@ -4033,15 +3952,11 @@ impl ChannelMonitorImpl { ); } - fn renegotiated_funding( - &mut self, logger: &WithChannelMonitor, - channel_parameters: &ChannelTransactionParameters, + fn renegotiated_funding( + &mut self, logger: &WithContext, channel_parameters: &ChannelTransactionParameters, alternative_holder_commitment_tx: &HolderCommitmentTransaction, alternative_counterparty_commitment_tx: &CommitmentTransaction, - ) -> Result<(), ()> - where - L::Target: Logger, - { + ) -> Result<(), ()> { let alternative_counterparty_commitment_txid = alternative_counterparty_commitment_tx.trust().txid(); @@ -4209,13 +4124,9 @@ impl ChannelMonitorImpl { } #[rustfmt::skip] - fn update_monitor( - &mut self, updates: &ChannelMonitorUpdate, broadcaster: &B, fee_estimator: &F, logger: &WithChannelMonitor - ) -> Result<(), ()> - where B::Target: BroadcasterInterface, - F::Target: FeeEstimator, - L::Target: Logger, - { + fn update_monitor( + &mut self, updates: &ChannelMonitorUpdate, broadcaster: &B, fee_estimator: &F, logger: &WithContext + ) -> Result<(), ()> { if self.latest_update_id == LEGACY_CLOSED_CHANNEL_UPDATE_ID && updates.update_id == LEGACY_CLOSED_CHANNEL_UPDATE_ID { log_info!(logger, "Applying pre-0.1 post-force-closed update to monitor {} with {} change(s).", log_funding_info!(self), updates.updates.len()); @@ -4256,7 +4167,7 @@ impl ChannelMonitorImpl { } } let mut ret = Ok(()); - let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&**fee_estimator); + let bounded_fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator); for update in updates.updates.iter() { match update { ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo { commitment_tx, htlc_outputs, claimed_htlcs, nondust_htlc_sources } => { @@ -4677,9 +4588,9 @@ impl ChannelMonitorImpl { /// Returns packages to claim the revoked output(s) and general information about the output that /// is to the counterparty in the commitment transaction. #[rustfmt::skip] - fn check_spend_counterparty_transaction(&mut self, commitment_txid: Txid, commitment_tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &L) + fn check_spend_counterparty_transaction(&mut self, commitment_txid: Txid, commitment_tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &L) -> (Vec, CommitmentTxCounterpartyOutputInfo) - where L::Target: Logger { + { // Most secp and related errors trying to create keys means we have no hope of constructing // a spend transaction...so we return no transactions to broadcast let mut claimable_outpoints = Vec::new(); @@ -4967,9 +4878,9 @@ impl ChannelMonitorImpl { /// Attempts to claim a counterparty HTLC-Success/HTLC-Timeout's outputs using the revocation key #[rustfmt::skip] - fn check_spend_counterparty_htlc( + fn check_spend_counterparty_htlc( &mut self, tx: &Transaction, commitment_number: u64, commitment_txid: &Txid, height: u32, logger: &L - ) -> (Vec, Option) where L::Target: Logger { + ) -> (Vec, Option) { let secret = if let Some(secret) = self.get_secret(commitment_number) { secret } else { return (Vec::new(), None); }; let per_commitment_key = match SecretKey::from_slice(&secret) { Ok(key) => key, @@ -5110,13 +5021,10 @@ impl ChannelMonitorImpl { /// revoked using data in holder_claimable_outpoints. /// Should not be used if check_spend_revoked_transaction succeeds. /// Returns None unless the transaction is definitely one of our commitment transactions. - fn check_spend_holder_transaction( + fn check_spend_holder_transaction( &mut self, commitment_txid: Txid, commitment_tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &L, - ) -> Option<(Vec, TransactionOutputs)> - where - L::Target: Logger, - { + ) -> Option<(Vec, TransactionOutputs)> { let funding_spent = get_confirmed_funding_scope!(self); // HTLCs set may differ between last and previous holder commitment txn, in case of one them hitting chain, ensure we cancel all HTLCs backward @@ -5179,9 +5087,9 @@ impl ChannelMonitorImpl { /// Cancels any existing pending claims for a commitment that previously confirmed and has now /// been replaced by another. #[rustfmt::skip] - pub fn cancel_prev_commitment_claims( + pub fn cancel_prev_commitment_claims( &mut self, logger: &L, confirmed_commitment_txid: &Txid - ) where L::Target: Logger { + ) { for (counterparty_commitment_txid, _) in &self.counterparty_commitment_txn_on_chain { // Cancel any pending claims for counterparty commitments we've seen confirm. if counterparty_commitment_txid == confirmed_commitment_txid { @@ -5253,9 +5161,9 @@ impl ChannelMonitorImpl { #[cfg(any(test, feature = "_test_utils", feature = "unsafe_revoked_tx_signing"))] /// Note that this includes possibly-locktimed-in-the-future transactions! #[rustfmt::skip] - fn unsafe_get_latest_holder_commitment_txn( - &mut self, logger: &WithChannelMonitor - ) -> Vec where L::Target: Logger { + fn unsafe_get_latest_holder_commitment_txn( + &mut self, logger: &WithContext + ) -> Vec { log_debug!(logger, "Getting signed copy of latest holder commitment transaction!"); let commitment_tx = { let sig = self.onchain_tx_handler.signer.unsafe_sign_holder_commitment( @@ -5305,14 +5213,10 @@ impl ChannelMonitorImpl { } #[rustfmt::skip] - fn block_connected( + fn block_connected( &mut self, header: &Header, txdata: &TransactionData, height: u32, broadcaster: B, - fee_estimator: F, logger: &WithChannelMonitor, - ) -> Vec - where B::Target: BroadcasterInterface, - F::Target: FeeEstimator, - L::Target: Logger, - { + fee_estimator: F, logger: &WithContext, + ) -> Vec { let block_hash = header.block_hash(); self.best_block = BestBlock::new(block_hash, height); @@ -5321,19 +5225,14 @@ impl ChannelMonitorImpl { } #[rustfmt::skip] - fn best_block_updated( + fn best_block_updated( &mut self, header: &Header, height: u32, broadcaster: B, fee_estimator: &LowerBoundedFeeEstimator, - logger: &WithChannelMonitor, - ) -> Vec - where - B::Target: BroadcasterInterface, - F::Target: FeeEstimator, - L::Target: Logger, - { + logger: &WithContext, + ) -> Vec { let block_hash = header.block_hash(); if height > self.best_block.height { @@ -5353,20 +5252,15 @@ impl ChannelMonitorImpl { } #[rustfmt::skip] - fn transactions_confirmed( + fn transactions_confirmed( &mut self, header: &Header, txdata: &TransactionData, height: u32, broadcaster: B, fee_estimator: &LowerBoundedFeeEstimator, - logger: &WithChannelMonitor, - ) -> Vec - where - B::Target: BroadcasterInterface, - F::Target: FeeEstimator, - L::Target: Logger, - { + logger: &WithContext, + ) -> Vec { let funding_seen_before = self.funding_seen_onchain; let txn_matched = self.filter_block(txdata); @@ -5638,7 +5532,7 @@ impl ChannelMonitorImpl { /// `conf_height` should be set to the height at which any new transaction(s)/block(s) were /// confirmed at, even if it is not the current best height. #[rustfmt::skip] - fn block_confirmed( + fn block_confirmed( &mut self, conf_height: u32, conf_hash: BlockHash, @@ -5647,13 +5541,8 @@ impl ChannelMonitorImpl { mut claimable_outpoints: Vec, broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator, - logger: &WithChannelMonitor, - ) -> Vec - where - B::Target: BroadcasterInterface, - F::Target: FeeEstimator, - L::Target: Logger, - { + logger: &WithContext, + ) -> Vec { log_trace!(logger, "Processing {} matched transactions for block at height {}.", txn_matched.len(), conf_height); debug_assert!(self.best_block.height >= conf_height); @@ -5731,6 +5620,7 @@ impl ChannelMonitorImpl { self.pending_events.push(Event::SpendableOutputs { outputs: vec![descriptor], channel_id: Some(self.channel_id()), + counterparty_node_id: Some(self.counterparty_node_id), }); self.spendable_txids_confirmed.push(entry.txid); }, @@ -5866,12 +5756,9 @@ impl ChannelMonitorImpl { } #[rustfmt::skip] - fn blocks_disconnected( - &mut self, fork_point: BestBlock, broadcaster: B, fee_estimator: F, logger: &WithChannelMonitor - ) where B::Target: BroadcasterInterface, - F::Target: FeeEstimator, - L::Target: Logger, - { + fn blocks_disconnected( + &mut self, fork_point: BestBlock, broadcaster: B, fee_estimator: F, logger: &WithContext + ) { let new_height = fork_point.height; log_trace!(logger, "Block(s) disconnected to height {}", new_height); assert!(self.best_block.height > fork_point.height, @@ -5915,17 +5802,13 @@ impl ChannelMonitorImpl { } #[rustfmt::skip] - fn transaction_unconfirmed( + fn transaction_unconfirmed( &mut self, txid: &Txid, broadcaster: B, fee_estimator: &LowerBoundedFeeEstimator, - logger: &WithChannelMonitor, - ) where - B::Target: BroadcasterInterface, - F::Target: FeeEstimator, - L::Target: Logger, - { + logger: &WithContext, + ) { let mut removed_height = None; for entry in self.onchain_events_awaiting_threshold_conf.iter() { if entry.txid == *txid { @@ -6030,9 +5913,9 @@ impl ChannelMonitorImpl { } #[rustfmt::skip] - fn should_broadcast_holder_commitment_txn( - &self, logger: &WithChannelMonitor - ) -> Option where L::Target: Logger { + fn should_broadcast_holder_commitment_txn( + &self, logger: &WithContext + ) -> Option { // There's no need to broadcast our commitment transaction if we've seen one confirmed (even // with 1 confirmation) as it'll be rejected as duplicate/conflicting. if self.funding_spend_confirmed.is_some() || @@ -6097,9 +5980,9 @@ impl ChannelMonitorImpl { /// Check if any transaction broadcasted is resolving HTLC output by a success or timeout on a holder /// or counterparty commitment tx, if so send back the source, preimage if found and payment_hash of resolved HTLC #[rustfmt::skip] - fn is_resolving_htlc_output( - &mut self, tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &WithChannelMonitor, - ) where L::Target: Logger { + fn is_resolving_htlc_output( + &mut self, tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &WithContext, + ) { let funding_spent = get_confirmed_funding_scope!(self); 'outer_loop: for input in &tx.input { @@ -6354,9 +6237,9 @@ impl ChannelMonitorImpl { /// Checks if the confirmed transaction is paying funds back to some address we can assume to /// own. #[rustfmt::skip] - fn check_tx_and_push_spendable_outputs( - &mut self, tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &WithChannelMonitor, - ) where L::Target: Logger { + fn check_tx_and_push_spendable_outputs( + &mut self, tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &WithContext, + ) { let funding_spent = get_confirmed_funding_scope!(self); for spendable_output in self.get_spendable_outputs(funding_spent, tx) { let entry = OnchainEventEntry { @@ -6376,39 +6259,33 @@ impl ChannelMonitorImpl { } } -impl chain::Listen +impl chain::Listen for (ChannelMonitor, T, F, L) -where - T::Target: BroadcasterInterface, - F::Target: FeeEstimator, - L::Target: Logger, { fn filtered_block_connected(&self, header: &Header, txdata: &TransactionData, height: u32) { - self.0.block_connected(header, txdata, height, &*self.1, &*self.2, &self.3); + self.0.block_connected(header, txdata, height, &self.1, &self.2, &self.3); } fn blocks_disconnected(&self, fork_point: BestBlock) { - self.0.blocks_disconnected(fork_point, &*self.1, &*self.2, &self.3); + self.0.blocks_disconnected(fork_point, &self.1, &self.2, &self.3); } } -impl chain::Confirm for (M, T, F, L) +impl + chain::Confirm for (M, T, F, L) where M: Deref>, - T::Target: BroadcasterInterface, - F::Target: FeeEstimator, - L::Target: Logger, { fn transactions_confirmed(&self, header: &Header, txdata: &TransactionData, height: u32) { - self.0.transactions_confirmed(header, txdata, height, &*self.1, &*self.2, &self.3); + self.0.transactions_confirmed(header, txdata, height, &self.1, &self.2, &self.3); } fn transaction_unconfirmed(&self, txid: &Txid) { - self.0.transaction_unconfirmed(txid, &*self.1, &*self.2, &self.3); + self.0.transaction_unconfirmed(txid, &self.1, &self.2, &self.3); } fn best_block_updated(&self, header: &Header, height: u32) { - self.0.best_block_updated(header, height, &*self.1, &*self.2, &self.3); + self.0.best_block_updated(header, height, &self.1, &self.2, &self.3); } fn get_relevant_txids(&self) -> Vec<(Txid, u32, Option)> { @@ -6617,7 +6494,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP return Err(DecodeError::InvalidValue); } } - let onchain_tx_handler: OnchainTxHandler = ReadableArgs::read( + let mut onchain_tx_handler: OnchainTxHandler = ReadableArgs::read( reader, (entropy_source, signer_provider, channel_value_satoshis, channel_keys_id) )?; @@ -6713,6 +6590,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP } let channel_id = channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint(outpoint)); + onchain_tx_handler.set_channel_id(channel_id); let (current_holder_commitment_tx, current_holder_htlc_data) = { let holder_commitment_tx = onchain_tx_handler.current_holder_commitment_tx(); @@ -6767,6 +6645,8 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP }; let dummy_node_id = PublicKey::from_slice(&[2; 33]).unwrap(); + onchain_tx_handler + .set_counterparty_node_id(counterparty_node_id.unwrap_or(dummy_node_id)); let monitor = ChannelMonitor::from_impl(ChannelMonitorImpl { funding: FundingScope { channel_parameters, @@ -6900,8 +6780,9 @@ mod tests { DelayedPaymentBasepoint, DelayedPaymentKey, HtlcBasepoint, RevocationBasepoint, RevocationKey, }; - use crate::ln::channelmanager::{HTLCSource, PaymentId, RecipientOnionFields}; + use crate::ln::channelmanager::{HTLCSource, PaymentId}; use crate::ln::functional_test_utils::*; + use crate::ln::outbound_payment::RecipientOnionFields; use crate::ln::script::ShutdownScript; use crate::ln::types::ChannelId; use crate::sign::{ChannelSigner, InMemorySigner}; @@ -6911,10 +6792,7 @@ mod tests { use crate::util::logger::Logger; use crate::util::ser::{ReadableArgs, Writeable}; use crate::util::test_utils::{TestBroadcaster, TestFeeEstimator, TestLogger}; - use crate::{ - check_added_monitors, check_spends, get_local_commitment_txn, get_monitor, - get_route_and_payment_hash, - }; + use crate::{check_spends, get_local_commitment_txn, get_monitor, get_route_and_payment_hash}; #[allow(unused_imports)] use crate::prelude::*; @@ -6936,7 +6814,8 @@ mod tests { // updates is handled correctly in such conditions. let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); let channel = create_announced_chan_between_nodes(&nodes, 0, 1); create_announced_chan_between_nodes(&nodes, 1, 2); @@ -6973,7 +6852,7 @@ mod tests { nodes[1].node.send_payment_with_route(route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0) ).unwrap(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Build a new ChannelMonitorUpdate which contains both the failing commitment tx update // and provides the claim preimages for the two pending HTLCs. The first update generates diff --git a/lightning/src/chain/mod.rs b/lightning/src/chain/mod.rs index b4cc6a302ae..bc47f1b1db6 100644 --- a/lightning/src/chain/mod.rs +++ b/lightning/src/chain/mod.rs @@ -20,11 +20,12 @@ use bitcoin::secp256k1::PublicKey; use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, MonitorEvent}; use crate::chain::transaction::{OutPoint, TransactionData}; -use crate::impl_writeable_tlv_based; use crate::ln::types::ChannelId; use crate::sign::ecdsa::EcdsaChannelSigner; use crate::sign::HTLCDescriptor; +use core::ops::Deref; + #[allow(unused_imports)] use crate::prelude::*; @@ -346,6 +347,28 @@ pub trait Watch { ) -> Vec<(OutPoint, ChannelId, Vec, PublicKey)>; } +impl + ?Sized, W: Deref> + Watch for W +{ + fn watch_channel( + &self, channel_id: ChannelId, monitor: ChannelMonitor, + ) -> Result { + self.deref().watch_channel(channel_id, monitor) + } + + fn update_channel( + &self, channel_id: ChannelId, update: &ChannelMonitorUpdate, + ) -> ChannelMonitorUpdateStatus { + self.deref().update_channel(channel_id, update) + } + + fn release_pending_monitor_events( + &self, + ) -> Vec<(OutPoint, ChannelId, Vec, PublicKey)> { + self.deref().release_pending_monitor_events() + } +} + /// The `Filter` trait defines behavior for indicating chain activity of interest pertaining to /// channels. /// @@ -388,6 +411,16 @@ pub trait Filter { fn register_output(&self, output: WatchedOutput); } +impl> Filter for F { + fn register_tx(&self, txid: &Txid, script_pubkey: &Script) { + self.deref().register_tx(txid, script_pubkey) + } + + fn register_output(&self, output: WatchedOutput) { + self.deref().register_output(output) + } +} + /// A transaction output watched by a [`ChannelMonitor`] for spends on-chain. /// /// Used to convey to a [`Filter`] such an output with a given spending condition. Any transaction diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index fb65aa0f157..3eb6d64f3a2 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -18,12 +18,14 @@ use bitcoin::hashes::Hash; use bitcoin::locktime::absolute::LockTime; use bitcoin::script::{Script, ScriptBuf}; use bitcoin::secp256k1; -use bitcoin::secp256k1::{ecdsa::Signature, Secp256k1}; +use bitcoin::secp256k1::{ecdsa::Signature, PublicKey, Secp256k1}; use bitcoin::transaction::OutPoint as BitcoinOutPoint; use bitcoin::transaction::Transaction; use crate::chain::chaininterface::ConfirmationTarget; -use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator, LowerBoundedFeeEstimator}; +use crate::chain::chaininterface::{ + BroadcasterInterface, FeeEstimator, LowerBoundedFeeEstimator, TransactionType, +}; use crate::chain::channelmonitor::ANTI_REORG_DELAY; use crate::chain::package::{PackageSolvingData, PackageTemplate}; use crate::chain::transaction::MaybeSignedTransaction; @@ -33,6 +35,7 @@ use crate::ln::chan_utils::{ HTLCOutputInCommitment, HolderCommitmentTransaction, }; use crate::ln::msgs::DecodeError; +use crate::ln::types::ChannelId; use crate::sign::{ecdsa::EcdsaChannelSigner, EntropySource, HTLCDescriptor, SignerProvider}; use crate::util::logger::Logger; use crate::util::ser::{ @@ -45,7 +48,6 @@ use alloc::collections::BTreeMap; use core::cmp; use core::mem::replace; use core::mem::swap; -use core::ops::Deref; const MAX_ALLOC_SIZE: usize = 64 * 1024; @@ -221,6 +223,8 @@ pub(crate) enum FeerateStrategy { /// do RBF bumping if possible. #[derive(Clone)] pub struct OnchainTxHandler { + channel_id: ChannelId, + counterparty_node_id: PublicKey, channel_value_satoshis: u64, // Deprecated as of 0.2. channel_keys_id: [u8; 32], // Deprecated as of 0.2. destination_script: ScriptBuf, // Deprecated as of 0.2. @@ -283,7 +287,9 @@ impl PartialEq for OnchainTxHandler bool { // `signer`, `secp_ctx`, and `pending_claim_events` are excluded on purpose. - self.channel_value_satoshis == other.channel_value_satoshis && + self.channel_id == other.channel_id && + self.counterparty_node_id == other.counterparty_node_id && + self.channel_value_satoshis == other.channel_value_satoshis && self.channel_keys_id == other.channel_keys_id && self.destination_script == other.destination_script && self.holder_commitment == other.holder_commitment && @@ -346,6 +352,22 @@ impl OnchainTxHandler { write_tlv_fields!(writer, {}); Ok(()) } + + // `ChannelMonitor`s already track the `channel_id`, however, due to the derserialization order + // there we can't make use of `ReadableArgs` to hand it into `OnchainTxHandler`'s + // deserialization logic directly. Instead we opt to initialize it with 0s and override it + // after reading the respective field via this method. + pub(crate) fn set_channel_id(&mut self, channel_id: ChannelId) { + self.channel_id = channel_id; + } + + // `ChannelMonitor`s already track the `counterparty_node_id`, however, due to the + // deserialization order there we can't make use of `ReadableArgs` to hand it into + // `OnchainTxHandler`'s deserialization logic directly. Instead we opt to initialize it with a + // dummy key and override it after reading the respective field via this method. + pub(crate) fn set_counterparty_node_id(&mut self, counterparty_node_id: PublicKey) { + self.counterparty_node_id = counterparty_node_id; + } } impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP, u64, [u8; 32])> @@ -367,7 +389,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP let prev_holder_commitment = Readable::read(reader)?; let _prev_holder_htlc_sigs: Option>> = Readable::read(reader)?; - let channel_parameters = ReadableArgs::>::read(reader, Some(channel_value_satoshis))?; + let channel_parameters: ChannelTransactionParameters = ReadableArgs::>::read(reader, Some(channel_value_satoshis))?; // Read the serialized signer bytes, but don't deserialize them, as we'll obtain our signer // by re-deriving the private key material. @@ -421,10 +443,20 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP read_tlv_fields!(reader, {}); + // `ChannelMonitor`s already track the `channel_id` and `counterparty_node_id`, however, due + // to the deserialization order there we can't make use of `ReadableArgs` to hand them in + // directly. Instead we opt to initialize them with dummy values and override them after + // reading the respective fields via `OnchainTxHandler::set_channel_id` and + // `OnchainTxHandler::set_counterparty_node_id`. + let channel_id = ChannelId([0u8; 32]); + let counterparty_node_id = PublicKey::from_slice(&[2; 33]).unwrap(); + let mut secp_ctx = Secp256k1::new(); secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes()); Ok(OnchainTxHandler { + channel_id, + counterparty_node_id, channel_value_satoshis, channel_keys_id, destination_script, @@ -444,11 +476,14 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP impl OnchainTxHandler { pub(crate) fn new( - channel_value_satoshis: u64, channel_keys_id: [u8; 32], destination_script: ScriptBuf, - signer: ChannelSigner, channel_parameters: ChannelTransactionParameters, + channel_id: ChannelId, counterparty_node_id: PublicKey, channel_value_satoshis: u64, + channel_keys_id: [u8; 32], destination_script: ScriptBuf, signer: ChannelSigner, + channel_parameters: ChannelTransactionParameters, holder_commitment: HolderCommitmentTransaction, secp_ctx: Secp256k1, ) -> Self { OnchainTxHandler { + channel_id, + counterparty_node_id, channel_value_satoshis, channel_keys_id, destination_script, @@ -485,15 +520,11 @@ impl OnchainTxHandler { /// invoking this every 30 seconds, or lower if running in an environment with spotty /// connections, like on mobile. #[rustfmt::skip] - pub(super) fn rebroadcast_pending_claims( + pub(super) fn rebroadcast_pending_claims( &mut self, current_height: u32, feerate_strategy: FeerateStrategy, broadcaster: &B, conf_target: ConfirmationTarget, destination_script: &Script, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, - ) - where - B::Target: BroadcasterInterface, - F::Target: FeeEstimator, - { + ) { let mut bump_requests = Vec::with_capacity(self.pending_claim_requests.len()); for (claim_id, request) in self.pending_claim_requests.iter() { let inputs = request.outpoints(); @@ -516,7 +547,7 @@ impl OnchainTxHandler { if tx.is_fully_signed() { let log_start = if feerate_was_bumped { "Broadcasting RBF-bumped" } else { "Rebroadcasting" }; log_info!(logger, "{} onchain {}", log_start, log_tx!(tx.0)); - broadcaster.broadcast_transactions(&[&tx.0]); + broadcaster.broadcast_transactions(&[(&tx.0, TransactionType::Claim { counterparty_node_id: self.counterparty_node_id, channel_id: self.channel_id })]); } else { log_info!(logger, "Waiting for signature of unsigned onchain transaction {}", tx.0.compute_txid()); } @@ -554,13 +585,11 @@ impl OnchainTxHandler { /// Panics if there are signing errors, because signing operations in reaction to on-chain /// events are not expected to fail, and if they do, we may lose funds. #[rustfmt::skip] - fn generate_claim( + fn generate_claim( &mut self, cur_height: u32, cached_request: &PackageTemplate, feerate_strategy: &FeerateStrategy, conf_target: ConfirmationTarget, destination_script: &Script, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, - ) -> Option<(u32, u64, OnchainClaim)> - where F::Target: FeeEstimator, - { + ) -> Option<(u32, u64, OnchainClaim)> { let request_outpoints = cached_request.outpoints(); if request_outpoints.is_empty() { // Don't prune pending claiming request yet, we may have to resurrect HTLCs. Untractable @@ -761,14 +790,11 @@ impl OnchainTxHandler { /// does not need to equal the current blockchain tip height, which should be provided via /// `cur_height`, however it must never be higher than `cur_height`. #[rustfmt::skip] - pub(super) fn update_claims_view_from_requests( + pub(super) fn update_claims_view_from_requests( &mut self, mut requests: Vec, conf_height: u32, cur_height: u32, broadcaster: &B, conf_target: ConfirmationTarget, destination_script: &Script, - fee_estimator: &LowerBoundedFeeEstimator, logger: &L - ) where - B::Target: BroadcasterInterface, - F::Target: FeeEstimator, - { + fee_estimator: &LowerBoundedFeeEstimator, logger: &L, + ) { if !requests.is_empty() { log_debug!(logger, "Updating claims view at height {} with {} claim requests", cur_height, requests.len()); } @@ -863,7 +889,7 @@ impl OnchainTxHandler { OnchainClaim::Tx(tx) => { if tx.is_fully_signed() { log_info!(logger, "Broadcasting onchain {}", log_tx!(tx.0)); - broadcaster.broadcast_transactions(&[&tx.0]); + broadcaster.broadcast_transactions(&[(&tx.0, TransactionType::Claim { counterparty_node_id: self.counterparty_node_id, channel_id: self.channel_id })]); } else { log_info!(logger, "Waiting for signature of unsigned onchain transaction {}", tx.0.compute_txid()); } @@ -912,14 +938,11 @@ impl OnchainTxHandler { /// confirmed. This does not need to equal the current blockchain tip height, which should be /// provided via `cur_height`, however it must never be higher than `cur_height`. #[rustfmt::skip] - pub(super) fn update_claims_view_from_matched_txn( + pub(super) fn update_claims_view_from_matched_txn( &mut self, txn_matched: &[&Transaction], conf_height: u32, conf_hash: BlockHash, cur_height: u32, broadcaster: &B, conf_target: ConfirmationTarget, - destination_script: &Script, fee_estimator: &LowerBoundedFeeEstimator, logger: &L - ) where - B::Target: BroadcasterInterface, - F::Target: FeeEstimator, - { + destination_script: &Script, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, + ) { let mut have_logged_intro = false; let mut maybe_log_intro = || { if !have_logged_intro { @@ -1084,7 +1107,7 @@ impl OnchainTxHandler { OnchainClaim::Tx(bump_tx) => { if bump_tx.is_fully_signed() { log_info!(logger, "Broadcasting RBF-bumped onchain {}", log_tx!(bump_tx.0)); - broadcaster.broadcast_transactions(&[&bump_tx.0]); + broadcaster.broadcast_transactions(&[(&bump_tx.0, TransactionType::Claim { counterparty_node_id: self.counterparty_node_id, channel_id: self.channel_id })]); } else { log_info!(logger, "Waiting for signature of RBF-bumped unsigned onchain transaction {}", bump_tx.0.compute_txid()); @@ -1110,7 +1133,7 @@ impl OnchainTxHandler { } #[rustfmt::skip] - pub(super) fn transaction_unconfirmed( + pub(super) fn transaction_unconfirmed( &mut self, txid: &Txid, broadcaster: &B, @@ -1118,10 +1141,7 @@ impl OnchainTxHandler { destination_script: &Script, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, - ) where - B::Target: BroadcasterInterface, - F::Target: FeeEstimator, - { + ) { let mut height = None; for entry in self.onchain_events_awaiting_threshold_conf.iter() { if entry.txid == *txid { @@ -1138,13 +1158,10 @@ impl OnchainTxHandler { } #[rustfmt::skip] - pub(super) fn blocks_disconnected( + pub(super) fn blocks_disconnected( &mut self, new_best_height: u32, broadcaster: &B, conf_target: ConfirmationTarget, destination_script: &Script, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, - ) - where B::Target: BroadcasterInterface, - F::Target: FeeEstimator, - { + ) { let mut bump_candidates = new_hash_map(); let onchain_events_awaiting_threshold_conf = self.onchain_events_awaiting_threshold_conf.drain(..).collect::>(); @@ -1187,7 +1204,7 @@ impl OnchainTxHandler { OnchainClaim::Tx(bump_tx) => { if bump_tx.is_fully_signed() { log_info!(logger, "Broadcasting onchain {}", log_tx!(bump_tx.0)); - broadcaster.broadcast_transactions(&[&bump_tx.0]); + broadcaster.broadcast_transactions(&[(&bump_tx.0, TransactionType::Claim { counterparty_node_id: self.counterparty_node_id, channel_id: self.channel_id })]); } else { log_info!(logger, "Waiting for signature of unsigned onchain transaction {}", bump_tx.0.compute_txid()); } @@ -1281,6 +1298,7 @@ mod tests { }; use crate::ln::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint, RevocationBasepoint}; use crate::ln::functional_test_utils::create_dummy_block; + use crate::ln::types::ChannelId; use crate::sign::{ChannelDerivationParameters, ChannelSigner, HTLCDescriptor, InMemorySigner}; use crate::types::payment::{PaymentHash, PaymentPreimage}; use crate::util::test_utils::{TestBroadcaster, TestFeeEstimator, TestLogger}; @@ -1364,7 +1382,10 @@ mod tests { } let holder_commit = HolderCommitmentTransaction::dummy(1000000, funding_outpoint, nondust_htlcs); let destination_script = ScriptBuf::new(); + let counterparty_node_id = PublicKey::from_slice(&[2; 33]).unwrap(); let mut tx_handler = OnchainTxHandler::new( + ChannelId::from_bytes([0; 32]), + counterparty_node_id, 1000000, [0; 32], destination_script.clone(), diff --git a/lightning/src/chain/package.rs b/lightning/src/chain/package.rs index db46f3be60d..0ef8855242b 100644 --- a/lightning/src/chain/package.rs +++ b/lightning/src/chain/package.rs @@ -46,7 +46,6 @@ use crate::util::ser::{Readable, ReadableArgs, RequiredWrapper, Writeable, Write use crate::io; use core::cmp; -use core::ops::Deref; #[allow(unused_imports)] use crate::prelude::*; @@ -184,7 +183,7 @@ impl_writeable_tlv_based!(RevokedOutput, { (12, on_counterparty_tx_csv, required), // Unused since 0.1, this setting causes downgrades to before 0.1 to refuse to // aggregate `RevokedOutput` claims, which is the more conservative stance. - (14, is_counterparty_balance_on_anchors, (legacy, (), |_| Some(()))), + (14, is_counterparty_balance_on_anchors, (legacy, (), |_| Ok(()), |_| Some(()))), (15, channel_parameters, (option: ReadableArgs, None)), // Added in 0.2. }); @@ -1512,12 +1511,10 @@ impl PackageTemplate { /// which was used to generate the value. Will not return less than `dust_limit_sats` for the /// value. #[rustfmt::skip] - pub(crate) fn compute_package_output( + pub(crate) fn compute_package_output( &self, predicted_weight: u64, dust_limit_sats: u64, feerate_strategy: &FeerateStrategy, conf_target: ConfirmationTarget, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, - ) -> Option<(u64, u64)> - where F::Target: FeeEstimator, - { + ) -> Option<(u64, u64)> { debug_assert!(matches!(self.malleability, PackageMalleability::Malleable(..)), "The package output is fixed for non-malleable packages"); let input_amounts = self.package_amount(); @@ -1540,10 +1537,10 @@ impl PackageTemplate { /// Computes a feerate based on the given confirmation target and feerate strategy. #[rustfmt::skip] - pub(crate) fn compute_package_feerate( + pub(crate) fn compute_package_feerate( &self, fee_estimator: &LowerBoundedFeeEstimator, conf_target: ConfirmationTarget, feerate_strategy: &FeerateStrategy, - ) -> u32 where F::Target: FeeEstimator { + ) -> u32 { let feerate_estimate = fee_estimator.bounded_sat_per_1000_weight(conf_target); if self.feerate_previous != 0 { let previous_feerate = self.feerate_previous.try_into().unwrap_or(u32::max_value()); @@ -1675,11 +1672,9 @@ impl Readable for PackageTemplate { /// fee and the corresponding updated feerate. If fee is under [`FEERATE_FLOOR_SATS_PER_KW`], /// we return nothing. #[rustfmt::skip] -fn compute_fee_from_spent_amounts( +fn compute_fee_from_spent_amounts( input_amounts: u64, predicted_weight: u64, conf_target: ConfirmationTarget, fee_estimator: &LowerBoundedFeeEstimator, logger: &L -) -> Option<(u64, u64)> - where F::Target: FeeEstimator, -{ +) -> Option<(u64, u64)> { let sweep_feerate = fee_estimator.bounded_sat_per_1000_weight(conf_target); let fee_rate = cmp::min(sweep_feerate, compute_feerate_sat_per_1000_weight(input_amounts / 2, predicted_weight)); let fee = fee_rate as u64 * (predicted_weight) / 1000; @@ -1701,14 +1696,11 @@ fn compute_fee_from_spent_amounts( /// respect BIP125 rules 3) and 4) and if required adjust the new fee to meet the RBF policy /// requirement. #[rustfmt::skip] -fn feerate_bump( +fn feerate_bump( predicted_weight: u64, input_amounts: u64, dust_limit_sats: u64, previous_feerate: u64, feerate_strategy: &FeerateStrategy, conf_target: ConfirmationTarget, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, -) -> Option<(u64, u64)> -where - F::Target: FeeEstimator, -{ +) -> Option<(u64, u64)> { let previous_fee = previous_feerate * predicted_weight / 1000; // If old feerate inferior to actual one given back by Fee Estimator, use it to compute new fee... diff --git a/lightning/src/crypto/chacha20.rs b/lightning/src/crypto/chacha20.rs index 5b0c16c933f..67f9e93c480 100644 --- a/lightning/src/crypto/chacha20.rs +++ b/lightning/src/crypto/chacha20.rs @@ -321,6 +321,7 @@ mod fuzzy_chacha { ) { debug_assert_eq!(dest.len(), src.len()); debug_assert!(dest.len() <= 32); + dest.copy_from_slice(src); } pub fn encrypt_single_block_in_place( diff --git a/lightning/src/crypto/utils.rs b/lightning/src/crypto/utils.rs index b59cc6002d9..1570b3a0b2f 100644 --- a/lightning/src/crypto/utils.rs +++ b/lightning/src/crypto/utils.rs @@ -5,8 +5,6 @@ use bitcoin::secp256k1::{ecdsa::Signature, Message, Secp256k1, SecretKey, Signin use crate::sign::EntropySource; -use core::ops::Deref; - macro_rules! hkdf_extract_expand { ($salt: expr, $ikm: expr) => {{ let mut hmac = HmacEngine::::new($salt); @@ -72,12 +70,9 @@ pub fn sign(ctx: &Secp256k1, msg: &Message, sk: &SecretKey) -> Si #[inline] #[allow(unused_variables)] -pub fn sign_with_aux_rand( +pub fn sign_with_aux_rand( ctx: &Secp256k1, msg: &Message, sk: &SecretKey, entropy_source: &ES, -) -> Signature -where - ES::Target: EntropySource, -{ +) -> Signature { #[cfg(feature = "grind_signatures")] let sig = loop { let sig = ctx.sign_ecdsa_with_noncedata(msg, sk, &entropy_source.get_secure_random_bytes()); diff --git a/lightning/src/events/bump_transaction/mod.rs b/lightning/src/events/bump_transaction/mod.rs index 3d9beb82c07..ff034176385 100644 --- a/lightning/src/events/bump_transaction/mod.rs +++ b/lightning/src/events/bump_transaction/mod.rs @@ -14,10 +14,11 @@ pub mod sync; use alloc::collections::BTreeMap; +use core::future::Future; use core::ops::Deref; use crate::chain::chaininterface::{ - compute_feerate_sat_per_1000_weight, fee_for_weight, BroadcasterInterface, + compute_feerate_sat_per_1000_weight, fee_for_weight, BroadcasterInterface, TransactionType, }; use crate::chain::ClaimId; use crate::io_extras::sink; @@ -33,15 +34,17 @@ use crate::ln::types::ChannelId; use crate::prelude::*; use crate::sign::ecdsa::EcdsaChannelSigner; use crate::sign::{ - ChannelDerivationParameters, HTLCDescriptor, SignerProvider, P2WPKH_WITNESS_WEIGHT, + ChannelDerivationParameters, HTLCDescriptor, SignerProvider, P2TR_KEY_PATH_WITNESS_WEIGHT, + P2WPKH_WITNESS_WEIGHT, }; use crate::sync::Mutex; -use crate::util::async_poll::{AsyncResult, MaybeSend, MaybeSync}; +use crate::util::async_poll::{MaybeSend, MaybeSync}; use crate::util::logger::Logger; use bitcoin::amount::Amount; use bitcoin::consensus::Encodable; use bitcoin::constants::WITNESS_SCALE_FACTOR; +use bitcoin::key::TweakedPublicKey; use bitcoin::locktime::absolute::LockTime; use bitcoin::policy::MAX_STANDARD_TX_WEIGHT; use bitcoin::secp256k1; @@ -331,6 +334,17 @@ impl Utxo { satisfaction_weight: EMPTY_SCRIPT_SIG_WEIGHT + P2WPKH_WITNESS_WEIGHT, } } + + /// Returns a `Utxo` with the `satisfaction_weight` estimate for a keypath spend of a SegWit v1 P2TR output. + pub fn new_v1_p2tr( + outpoint: OutPoint, value: Amount, tweaked_public_key: TweakedPublicKey, + ) -> Self { + Self { + outpoint, + output: TxOut { value, script_pubkey: ScriptBuf::new_p2tr_tweaked(tweaked_public_key) }, + satisfaction_weight: EMPTY_SCRIPT_SIG_WEIGHT + P2TR_KEY_PATH_WITNESS_WEIGHT, + } + } } /// The result of a successful coin selection attempt for a transaction requiring additional UTXOs @@ -394,13 +408,15 @@ pub trait CoinSelectionSource { fn select_confirmed_utxos<'a>( &'a self, claim_id: ClaimId, must_spend: Vec, must_pay_to: &'a [TxOut], target_feerate_sat_per_1000_weight: u32, max_tx_weight: u64, - ) -> AsyncResult<'a, CoinSelection, ()>; + ) -> impl Future> + MaybeSend + 'a; /// Signs and provides the full witness for all inputs within the transaction known to the /// trait (i.e., any provided via [`CoinSelectionSource::select_confirmed_utxos`]). /// /// If your wallet does not support signing PSBTs you can call `psbt.extract_tx()` to get the /// unsigned transaction and then sign it with your wallet. - fn sign_psbt<'a>(&'a self, psbt: Psbt) -> AsyncResult<'a, Transaction, ()>; + fn sign_psbt<'a>( + &'a self, psbt: Psbt, + ) -> impl Future> + MaybeSend + 'a; } /// An alternative to [`CoinSelectionSource`] that can be implemented and used along [`Wallet`] to @@ -412,17 +428,23 @@ pub trait CoinSelectionSource { // Note that updates to documentation on this trait should be copied to the synchronous version. pub trait WalletSource { /// Returns all UTXOs, with at least 1 confirmation each, that are available to spend. - fn list_confirmed_utxos<'a>(&'a self) -> AsyncResult<'a, Vec, ()>; + fn list_confirmed_utxos<'a>( + &'a self, + ) -> impl Future, ()>> + MaybeSend + 'a; /// Returns a script to use for change above dust resulting from a successful coin selection /// attempt. - fn get_change_script<'a>(&'a self) -> AsyncResult<'a, ScriptBuf, ()>; + fn get_change_script<'a>( + &'a self, + ) -> impl Future> + MaybeSend + 'a; /// Signs and provides the full [`TxIn::script_sig`] and [`TxIn::witness`] for all inputs within /// the transaction known to the wallet (i.e., any provided via /// [`WalletSource::list_confirmed_utxos`]). /// /// If your wallet does not support signing PSBTs you can call `psbt.extract_tx()` to get the /// unsigned transaction and then sign it with your wallet. - fn sign_psbt<'a>(&'a self, psbt: Psbt) -> AsyncResult<'a, Transaction, ()>; + fn sign_psbt<'a>( + &'a self, psbt: Psbt, + ) -> impl Future> + MaybeSend + 'a; } /// A wrapper over [`WalletSource`] that implements [`CoinSelectionSource`] by preferring UTXOs @@ -433,10 +455,9 @@ pub trait WalletSource { /// /// This is not exported to bindings users as async is only supported in Rust. // Note that updates to documentation on this struct should be copied to the synchronous version. -pub struct Wallet +pub struct Wallet where W::Target: WalletSource + MaybeSend, - L::Target: Logger + MaybeSend, { source: W, logger: L, @@ -446,10 +467,9 @@ where locked_utxos: Mutex>, } -impl Wallet +impl Wallet where W::Target: WalletSource + MaybeSend, - L::Target: Logger + MaybeSend, { /// Returns a new instance backed by the given [`WalletSource`] that serves as an implementation /// of [`CoinSelectionSource`]. @@ -608,17 +628,16 @@ where } } -impl CoinSelectionSource +impl CoinSelectionSource for Wallet where W::Target: WalletSource + MaybeSend + MaybeSync, - L::Target: Logger + MaybeSend + MaybeSync, { fn select_confirmed_utxos<'a>( &'a self, claim_id: ClaimId, must_spend: Vec, must_pay_to: &'a [TxOut], target_feerate_sat_per_1000_weight: u32, max_tx_weight: u64, - ) -> AsyncResult<'a, CoinSelection, ()> { - Box::pin(async move { + ) -> impl Future> + MaybeSend + 'a { + async move { let utxos = self.source.list_confirmed_utxos().await?; // TODO: Use fee estimation utils when we upgrade to bitcoin v0.30.0. let total_output_size: u64 = must_pay_to @@ -665,10 +684,12 @@ where } } Err(()) - }) + } } - fn sign_psbt<'a>(&'a self, psbt: Psbt) -> AsyncResult<'a, Transaction, ()> { + fn sign_psbt<'a>( + &'a self, psbt: Psbt, + ) -> impl Future> + MaybeSend + 'a { self.source.sign_psbt(psbt) } } @@ -683,12 +704,13 @@ where /// /// [`Event::BumpTransaction`]: crate::events::Event::BumpTransaction // Note that updates to documentation on this struct should be copied to the synchronous version. -pub struct BumpTransactionEventHandler -where - B::Target: BroadcasterInterface, +pub struct BumpTransactionEventHandler< + B: BroadcasterInterface, + C: Deref, + SP: SignerProvider, + L: Logger, +> where C::Target: CoinSelectionSource, - SP::Target: SignerProvider, - L::Target: Logger, { broadcaster: B, utxo_source: C, @@ -697,12 +719,10 @@ where secp: Secp256k1, } -impl BumpTransactionEventHandler +impl + BumpTransactionEventHandler where - B::Target: BroadcasterInterface, C::Target: CoinSelectionSource, - SP::Target: SignerProvider, - L::Target: Logger, { /// Returns a new instance capable of handling [`Event::BumpTransaction`] events. /// @@ -754,9 +774,9 @@ where /// transaction spending an anchor output of the commitment transaction to bump its fee and /// broadcasts them to the network as a package. async fn handle_channel_close( - &self, claim_id: ClaimId, package_target_feerate_sat_per_1000_weight: u32, - commitment_tx: &Transaction, commitment_tx_fee_sat: u64, - anchor_descriptor: &AnchorDescriptor, + &self, channel_id: ChannelId, counterparty_node_id: PublicKey, claim_id: ClaimId, + package_target_feerate_sat_per_1000_weight: u32, commitment_tx: &Transaction, + commitment_tx_fee_sat: u64, anchor_descriptor: &AnchorDescriptor, ) -> Result<(), ()> { let channel_type = &anchor_descriptor .channel_derivation_parameters @@ -777,7 +797,10 @@ where log_debug!(self.logger, "Pre-signed commitment {} already has feerate {} sat/kW above required {} sat/kW, broadcasting.", commitment_tx.compute_txid(), commitment_tx_feerate_sat_per_1000_weight, package_target_feerate_sat_per_1000_weight); - self.broadcaster.broadcast_transactions(&[&commitment_tx]); + self.broadcaster.broadcast_transactions(&[( + &commitment_tx, + TransactionType::UnilateralClose { counterparty_node_id, channel_id }, + )]); return Ok(()); } @@ -944,7 +967,13 @@ where anchor_txid, commitment_tx.compute_txid() ); - self.broadcaster.broadcast_transactions(&[&commitment_tx, &anchor_tx]); + self.broadcaster.broadcast_transactions(&[ + ( + &commitment_tx, + TransactionType::UnilateralClose { counterparty_node_id, channel_id }, + ), + (&anchor_tx, TransactionType::AnchorBump { counterparty_node_id, channel_id }), + ]); return Ok(()); } } @@ -952,8 +981,9 @@ where /// Handles a [`BumpTransactionEvent::HTLCResolution`] event variant by producing a /// fully-signed, fee-bumped HTLC transaction that is broadcast to the network. async fn handle_htlc_resolution( - &self, claim_id: ClaimId, target_feerate_sat_per_1000_weight: u32, - htlc_descriptors: &[HTLCDescriptor], tx_lock_time: LockTime, + &self, channel_id: ChannelId, counterparty_node_id: PublicKey, claim_id: ClaimId, + target_feerate_sat_per_1000_weight: u32, htlc_descriptors: &[HTLCDescriptor], + tx_lock_time: LockTime, ) -> Result<(), ()> { let channel_type = &htlc_descriptors[0] .channel_derivation_parameters @@ -1177,7 +1207,10 @@ where } log_info!(self.logger, "Broadcasting {}", log_tx!(htlc_tx)); - self.broadcaster.broadcast_transactions(&[&htlc_tx]); + self.broadcaster.broadcast_transactions(&[( + &htlc_tx, + TransactionType::UnilateralClose { counterparty_node_id, channel_id }, + )]); } Ok(()) @@ -1187,6 +1220,8 @@ where pub async fn handle_event(&self, event: &BumpTransactionEvent) { match event { BumpTransactionEvent::ChannelClose { + channel_id, + counterparty_node_id, claim_id, package_target_feerate_sat_per_1000_weight, commitment_tx, @@ -1201,6 +1236,8 @@ where commitment_tx.compute_txid() ); self.handle_channel_close( + *channel_id, + *counterparty_node_id, *claim_id, *package_target_feerate_sat_per_1000_weight, commitment_tx, @@ -1217,6 +1254,8 @@ where }); }, BumpTransactionEvent::HTLCResolution { + channel_id, + counterparty_node_id, claim_id, target_feerate_sat_per_1000_weight, htlc_descriptors, @@ -1230,6 +1269,8 @@ where log_iter!(htlc_descriptors.iter().map(|d| d.outpoint())) ); self.handle_htlc_resolution( + *channel_id, + *counterparty_node_id, *claim_id, *target_feerate_sat_per_1000_weight, htlc_descriptors, @@ -1265,7 +1306,9 @@ mod tests { use bitcoin::hashes::Hash; use bitcoin::hex::FromHex; - use bitcoin::{Network, ScriptBuf, Transaction, Txid}; + use bitcoin::{ + Network, ScriptBuf, Transaction, Txid, WitnessProgram, WitnessVersion, XOnlyPublicKey, + }; struct TestCoinSelectionSource { // (commitment + anchor value, commitment + input weight, target feerate, result) @@ -1382,4 +1425,30 @@ mod tests { pending_htlcs: Vec::new(), }); } + + #[test] + fn test_utxo_new_v1_p2tr() { + // Transaction 33e794d097969002ee05d336686fc03c9e15a597c1b9827669460fac98799036 + let p2tr_tx: Transaction = bitcoin::consensus::deserialize(&>::from_hex("01000000000101d1f1c1f8cdf6759167b90f52c9ad358a369f95284e841d7a2536cef31c0549580100000000fdffffff020000000000000000316a2f49206c696b65205363686e6f7272207369677320616e6420492063616e6e6f74206c69652e204062697462756734329e06010000000000225120a37c3903c8d0db6512e2b40b0dffa05e5a3ab73603ce8c9c4b7771e5412328f90140a60c383f71bac0ec919b1d7dbc3eb72dd56e7aa99583615564f9f99b8ae4e837b758773a5b2e4c51348854c8389f008e05029db7f464a5ff2e01d5e6e626174affd30a00").unwrap()).unwrap(); + + let script_pubkey = &p2tr_tx.output[1].script_pubkey; + assert_eq!(script_pubkey.witness_version(), Some(WitnessVersion::V1)); + let witness_bytes = &script_pubkey.as_bytes()[2..]; + let witness_program = WitnessProgram::new(WitnessVersion::V1, witness_bytes).unwrap(); + let tweaked_key = TweakedPublicKey::dangerous_assume_tweaked( + XOnlyPublicKey::from_slice(&witness_program.program().as_bytes()).unwrap(), + ); + + let utxo = Utxo::new_v1_p2tr( + OutPoint { txid: p2tr_tx.compute_txid(), vout: 1 }, + p2tr_tx.output[1].value, + tweaked_key, + ); + assert_eq!(utxo.output, p2tr_tx.output[1]); + assert_eq!( + utxo.satisfaction_weight, + 1 /* empty script_sig */ * WITNESS_SCALE_FACTOR as u64 + + 1 /* witness items */ + 1 /* schnorr sig len */ + 64 /* schnorr sig */ + ); + } } diff --git a/lightning/src/events/bump_transaction/sync.rs b/lightning/src/events/bump_transaction/sync.rs index 653710a3358..f4245cd5194 100644 --- a/lightning/src/events/bump_transaction/sync.rs +++ b/lightning/src/events/bump_transaction/sync.rs @@ -11,13 +11,14 @@ use core::future::Future; use core::ops::Deref; +use core::pin::pin; use core::task; use crate::chain::chaininterface::BroadcasterInterface; use crate::chain::ClaimId; use crate::prelude::*; use crate::sign::SignerProvider; -use crate::util::async_poll::{dummy_waker, AsyncResult, MaybeSend, MaybeSync}; +use crate::util::async_poll::{dummy_waker, MaybeSend, MaybeSync}; use crate::util::logger::Logger; use bitcoin::{Psbt, ScriptBuf, Transaction, TxOut}; @@ -71,19 +72,25 @@ impl WalletSource for WalletSourceSyncWrapper where T::Target: WalletSourceSync, { - fn list_confirmed_utxos<'a>(&'a self) -> AsyncResult<'a, Vec, ()> { + fn list_confirmed_utxos<'a>( + &'a self, + ) -> impl Future, ()>> + MaybeSend + 'a { let utxos = self.0.list_confirmed_utxos(); - Box::pin(async move { utxos }) + async move { utxos } } - fn get_change_script<'a>(&'a self) -> AsyncResult<'a, ScriptBuf, ()> { + fn get_change_script<'a>( + &'a self, + ) -> impl Future> + MaybeSend + 'a { let script = self.0.get_change_script(); - Box::pin(async move { script }) + async move { script } } - fn sign_psbt<'a>(&'a self, psbt: Psbt) -> AsyncResult<'a, Transaction, ()> { + fn sign_psbt<'a>( + &'a self, psbt: Psbt, + ) -> impl Future> + MaybeSend + 'a { let signed_psbt = self.0.sign_psbt(psbt); - Box::pin(async move { signed_psbt }) + async move { signed_psbt } } } @@ -93,18 +100,16 @@ where /// /// For an asynchronous version of this wrapper, see [`Wallet`]. // Note that updates to documentation on this struct should be copied to the asynchronous version. -pub struct WalletSync +pub struct WalletSync where W::Target: WalletSourceSync + MaybeSend, - L::Target: Logger + MaybeSend, { wallet: Wallet, L>, } -impl WalletSync +impl WalletSync where W::Target: WalletSourceSync + MaybeSend, - L::Target: Logger + MaybeSend, { /// Constructs a new [`WalletSync`] instance. pub fn new(source: W, logger: L) -> Self { @@ -112,17 +117,16 @@ where } } -impl CoinSelectionSourceSync +impl CoinSelectionSourceSync for WalletSync where W::Target: WalletSourceSync + MaybeSend + MaybeSync, - L::Target: Logger + MaybeSend + MaybeSync, { fn select_confirmed_utxos( &self, claim_id: ClaimId, must_spend: Vec, must_pay_to: &[TxOut], target_feerate_sat_per_1000_weight: u32, max_tx_weight: u64, ) -> Result { - let mut fut = self.wallet.select_confirmed_utxos( + let fut = self.wallet.select_confirmed_utxos( claim_id, must_spend, must_pay_to, @@ -131,7 +135,7 @@ where ); let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); - match fut.as_mut().poll(&mut ctx) { + match pin!(fut).poll(&mut ctx) { task::Poll::Ready(result) => result, task::Poll::Pending => { unreachable!( @@ -142,10 +146,10 @@ where } fn sign_psbt(&self, psbt: Psbt) -> Result { - let mut fut = self.wallet.sign_psbt(psbt); + let fut = self.wallet.sign_psbt(psbt); let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); - match fut.as_mut().poll(&mut ctx) { + match pin!(fut).poll(&mut ctx) { task::Poll::Ready(result) => result, task::Poll::Pending => { unreachable!("Wallet::sign_psbt should not be pending in a sync context"); @@ -233,7 +237,7 @@ where fn select_confirmed_utxos<'a>( &'a self, claim_id: ClaimId, must_spend: Vec, must_pay_to: &'a [TxOut], target_feerate_sat_per_1000_weight: u32, max_tx_weight: u64, - ) -> AsyncResult<'a, CoinSelection, ()> { + ) -> impl Future> + MaybeSend + 'a { let coins = self.0.select_confirmed_utxos( claim_id, must_spend, @@ -241,12 +245,14 @@ where target_feerate_sat_per_1000_weight, max_tx_weight, ); - Box::pin(async move { coins }) + async move { coins } } - fn sign_psbt<'a>(&'a self, psbt: Psbt) -> AsyncResult<'a, Transaction, ()> { + fn sign_psbt<'a>( + &'a self, psbt: Psbt, + ) -> impl Future> + MaybeSend + 'a { let psbt = self.0.sign_psbt(psbt); - Box::pin(async move { psbt }) + async move { psbt } } } @@ -258,23 +264,22 @@ where /// /// [`Event::BumpTransaction`]: crate::events::Event::BumpTransaction // Note that updates to documentation on this struct should be copied to the synchronous version. -pub struct BumpTransactionEventHandlerSync -where - B::Target: BroadcasterInterface, +pub struct BumpTransactionEventHandlerSync< + B: BroadcasterInterface, + C: Deref, + SP: SignerProvider, + L: Logger, +> where C::Target: CoinSelectionSourceSync, - SP::Target: SignerProvider, - L::Target: Logger, { bump_transaction_event_handler: BumpTransactionEventHandler, SP, L>, } -impl BumpTransactionEventHandlerSync +impl + BumpTransactionEventHandlerSync where - B::Target: BroadcasterInterface, C::Target: CoinSelectionSourceSync, - SP::Target: SignerProvider, - L::Target: Logger, { /// Constructs a new instance of [`BumpTransactionEventHandlerSync`]. pub fn new(broadcaster: B, utxo_source: C, signer_provider: SP, logger: L) -> Self { @@ -289,7 +294,7 @@ where /// Handles all variants of [`BumpTransactionEvent`]. pub fn handle_event(&self, event: &BumpTransactionEvent) { - let mut fut = Box::pin(self.bump_transaction_event_handler.handle_event(event)); + let mut fut = pin!(self.bump_transaction_event_handler.handle_event(event)); let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); match fut.as_mut().poll(&mut ctx) { diff --git a/lightning/src/events/mod.rs b/lightning/src/events/mod.rs index b9c4b1ca1ef..3dfed10d5c8 100644 --- a/lightning/src/events/mod.rs +++ b/lightning/src/events/mod.rs @@ -24,9 +24,11 @@ use crate::blinded_path::payment::{ }; use crate::chain::transaction; use crate::ln::channel::FUNDING_CONF_DEADLINE_BLOCKS; -use crate::ln::channelmanager::{InterceptId, PaymentId, RecipientOnionFields}; +use crate::ln::channelmanager::{InterceptId, PaymentId}; +use crate::ln::msgs; +use crate::ln::onion_utils::LocalHTLCFailureReason; +use crate::ln::outbound_payment::RecipientOnionFields; use crate::ln::types::ChannelId; -use crate::ln::{msgs, LocalHTLCFailureReason}; use crate::offers::invoice::Bolt12Invoice; use crate::offers::invoice_request::InvoiceRequest; use crate::offers::static_invoice::StaticInvoice; @@ -243,9 +245,7 @@ pub struct ClaimedHTLC { pub channel_id: ChannelId, /// The `user_channel_id` of the channel over which the HTLC was received. This is the value /// passed in to [`ChannelManager::create_channel`] for outbound channels, or to - /// [`ChannelManager::accept_inbound_channel`] for inbound channels if - /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true. Otherwise - /// `user_channel_id` will be randomized for an inbound channel. + /// [`ChannelManager::accept_inbound_channel`] for inbound channels. /// /// This field will be zero for a payment that was serialized prior to LDK version 0.0.117. (This /// should only happen in the case that a payment was claimable prior to LDK version 0.0.117, but @@ -253,7 +253,6 @@ pub struct ClaimedHTLC { /// /// [`ChannelManager::create_channel`]: crate::ln::channelmanager::ChannelManager::create_channel /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel - /// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels pub user_channel_id: u128, /// The block height at which this HTLC expires. pub cltv_expiry: u32, @@ -661,7 +660,7 @@ pub enum PaymentFailureReason { #[cfg_attr(feature = "std", doc = "")] #[cfg_attr( feature = "std", - doc = "[`Retry::Timeout`]: crate::ln::channelmanager::Retry::Timeout" + doc = "[`Retry::Timeout`]: crate::ln::outbound_payment::Retry::Timeout" )] RetriesExhausted, /// Either the BOLT 12 invoice was expired by the time we received it or the payment expired while @@ -763,14 +762,11 @@ pub enum Event { /// The script which should be used in the transaction output. output_script: ScriptBuf, /// The `user_channel_id` value passed in to [`ChannelManager::create_channel`] for outbound - /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels if - /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true. Otherwise - /// `user_channel_id` will be randomized for an inbound channel. This may be zero for objects - /// serialized with LDK versions prior to 0.0.113. + /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels. + /// This may be zero for objects serialized with LDK versions prior to 0.0.113. /// /// [`ChannelManager::create_channel`]: crate::ln::channelmanager::ChannelManager::create_channel /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel - /// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels user_channel_id: u128, }, /// Used to indicate that the counterparty node has provided the signature(s) required to @@ -1081,7 +1077,7 @@ pub enum Event { /// This event will eventually be replayed after failures-to-handle (i.e., the event handler /// returning `Err(ReplayEvent ())`) and will be persisted across restarts. /// - /// [`Retry`]: crate::ln::channelmanager::Retry + /// [`Retry`]: crate::ln::outbound_payment::Retry /// [`ChannelManager::abandon_payment`]: crate::ln::channelmanager::ChannelManager::abandon_payment PaymentFailed { /// The `payment_id` passed to [`ChannelManager::send_payment`]. @@ -1249,28 +1245,29 @@ pub enum Event { short_channel_id: Option, }, /// Used to indicate that we've intercepted an HTLC forward. This event will only be generated if - /// you've encoded an intercept scid in the receiver's invoice route hints using - /// [`ChannelManager::get_intercept_scid`] and have set [`UserConfig::accept_intercept_htlcs`]. + /// you've set some flags on [`UserConfig::htlc_interception_flags`]. /// /// [`ChannelManager::forward_intercepted_htlc`] or - /// [`ChannelManager::fail_intercepted_htlc`] MUST be called in response to this event. See - /// their docs for more information. + /// [`ChannelManager::fail_intercepted_htlc`] MUST be called in response to this event in a + /// timely manner (i.e. within some number of seconds, not minutes). See their docs for more + /// information. /// /// # Failure Behavior and Persistence /// This event will eventually be replayed after failures-to-handle (i.e., the event handler /// returning `Err(ReplayEvent ())`) and will be persisted across restarts. /// - /// [`ChannelManager::get_intercept_scid`]: crate::ln::channelmanager::ChannelManager::get_intercept_scid - /// [`UserConfig::accept_intercept_htlcs`]: crate::util::config::UserConfig::accept_intercept_htlcs + /// [`UserConfig::htlc_interception_flags`]: crate::util::config::UserConfig::htlc_interception_flags /// [`ChannelManager::forward_intercepted_htlc`]: crate::ln::channelmanager::ChannelManager::forward_intercepted_htlc /// [`ChannelManager::fail_intercepted_htlc`]: crate::ln::channelmanager::ChannelManager::fail_intercepted_htlc HTLCIntercepted { /// An id to help LDK identify which HTLC is being forwarded or failed. intercept_id: InterceptId, - /// The fake scid that was programmed as the next hop's scid, generated using - /// [`ChannelManager::get_intercept_scid`]. + /// The SCID which was selected by the sender as the next hop. It may point to one of our + /// channels, an intercept SCID generated with [`ChannelManager::get_intercept_scid`], or + /// an unknown SCID if [`HTLCInterceptionFlags::ToUnknownSCIDs`] was selected. /// /// [`ChannelManager::get_intercept_scid`]: crate::ln::channelmanager::ChannelManager::get_intercept_scid + /// [`HTLCInterceptionFlags::ToUnknownSCIDs`]: crate::util::config::HTLCInterceptionFlags::ToUnknownSCIDs requested_next_hop_scid: u64, /// The payment hash used for this HTLC. payment_hash: PaymentHash, @@ -1281,9 +1278,17 @@ pub enum Event { /// Forwarding less than this amount may break compatibility with LDK versions prior to 0.0.116. /// /// Note that LDK will NOT check that expected fees were factored into this value. You MUST - /// check that whatever fee you want has been included here or subtract it as required. Further, + /// check that whatever fee you want has been included here (by comparing with + /// [`Self::HTLCIntercepted::inbound_amount_msat`]) or subtract it as required. Further, /// LDK will not stop you from forwarding more than you received. expected_outbound_amount_msat: u64, + /// The block height at which the forwarded HTLC sent to our peer will time out. In + /// practice, LDK will refuse to forward an HTLC several blocks before this height (as if + /// we attempted to forward an HTLC at this height we'd run some risk that our peer + /// force-closes the channel immediately). + /// + /// This will only be `None` for events generated or serialized by LDK 0.2 or prior. + outgoing_htlc_expiry_block_height: Option, }, /// Used to indicate that an output which you should know how to spend was confirmed on chain /// and is now spendable. @@ -1307,6 +1312,10 @@ pub enum Event { /// /// This will always be `Some` for events generated by LDK versions 0.0.117 and above. channel_id: Option, + /// The `node_id` of the channel counterparty. + /// + /// This will always be `Some` for events generated by LDK versions 0.3 and above. + counterparty_node_id: Option, }, /// This event is generated when a payment has been successfully forwarded through us and a /// forwarding fee earned. @@ -1393,13 +1402,10 @@ pub enum Event { /// The `channel_id` of the channel that is pending confirmation. channel_id: ChannelId, /// The `user_channel_id` value passed in to [`ChannelManager::create_channel`] for outbound - /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels if - /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true. Otherwise - /// `user_channel_id` will be randomized for an inbound channel. + /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels. /// /// [`ChannelManager::create_channel`]: crate::ln::channelmanager::ChannelManager::create_channel /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel - /// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels user_channel_id: u128, /// The `temporary_channel_id` this channel used to be known by during channel establishment. /// @@ -1433,13 +1439,10 @@ pub enum Event { /// The `channel_id` of the channel that is ready. channel_id: ChannelId, /// The `user_channel_id` value passed in to [`ChannelManager::create_channel`] for outbound - /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels if - /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true. Otherwise - /// `user_channel_id` will be randomized for an inbound channel. + /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels. /// /// [`ChannelManager::create_channel`]: crate::ln::channelmanager::ChannelManager::create_channel /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel - /// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels user_channel_id: u128, /// The `node_id` of the channel counterparty. counterparty_node_id: PublicKey, @@ -1455,11 +1458,10 @@ pub enum Event { /// process of closure. This includes previously opened channels, and channels that time out from not being funded. /// /// Note that this event is only triggered for accepted channels: if the - /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true and the channel is - /// rejected, no `ChannelClosed` event will be sent. + /// [`Event::OpenChannelRequest`] was rejected, no `ChannelClosed` event will be sent. /// /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel - /// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels + /// [`Event::OpenChannelRequest`]: Event::OpenChannelRequest /// /// # Failure Behavior and Persistence /// This event will eventually be replayed after failures-to-handle (i.e., the event handler @@ -1469,15 +1471,12 @@ pub enum Event { /// resolving the channel are likely still awaiting confirmation. channel_id: ChannelId, /// The `user_channel_id` value passed in to [`ChannelManager::create_channel`] for outbound - /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels if - /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true. Otherwise - /// `user_channel_id` will be randomized for inbound channels. + /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels. /// This may be zero for inbound channels serialized prior to 0.0.113 and will always be /// zero for objects serialized with LDK versions prior to 0.0.102. /// /// [`ChannelManager::create_channel`]: crate::ln::channelmanager::ChannelManager::create_channel /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel - /// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels user_channel_id: u128, /// The reason the channel was closed. reason: ClosureReason, @@ -1521,13 +1520,10 @@ pub enum Event { /// The `channel_id` of the channel that has a pending splice funding transaction. channel_id: ChannelId, /// The `user_channel_id` value passed in to [`ChannelManager::create_channel`] for outbound - /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels if - /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true. Otherwise - /// `user_channel_id` will be randomized for an inbound channel. + /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels. /// /// [`ChannelManager::create_channel`]: crate::ln::channelmanager::ChannelManager::create_channel /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel - /// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels user_channel_id: u128, /// The `node_id` of the channel counterparty. counterparty_node_id: PublicKey, @@ -1554,13 +1550,10 @@ pub enum Event { /// The `channel_id` of the channel for which the splice failed. channel_id: ChannelId, /// The `user_channel_id` value passed in to [`ChannelManager::create_channel`] for outbound - /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels if - /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true. Otherwise - /// `user_channel_id` will be randomized for an inbound channel. + /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels. /// /// [`ChannelManager::create_channel`]: crate::ln::channelmanager::ChannelManager::create_channel /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel - /// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels user_channel_id: u128, /// The `node_id` of the channel counterparty. counterparty_node_id: PublicKey, @@ -1593,14 +1586,12 @@ pub enum Event { }, /// Indicates a request to open a new channel by a peer. /// + /// This event is triggered for all inbound requests to open a new channel. /// To accept the request (and in the case of a dual-funded channel, not contribute funds), /// call [`ChannelManager::accept_inbound_channel`]. /// To reject the request, call [`ChannelManager::force_close_broadcasting_latest_txn`]. /// Note that a [`ChannelClosed`] event will _not_ be triggered if the channel is rejected. /// - /// The event is only triggered when a new open channel request is received and the - /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true. - /// /// # Failure Behavior and Persistence /// This event will eventually be replayed after failures-to-handle (i.e., the event handler /// returning `Err(ReplayEvent ())`) and won't be persisted across restarts. @@ -1608,7 +1599,6 @@ pub enum Event { /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel /// [`ChannelClosed`]: Event::ChannelClosed /// [`ChannelManager::force_close_broadcasting_latest_txn`]: crate::ln::channelmanager::ChannelManager::force_close_broadcasting_latest_txn - /// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels OpenChannelRequest { /// The temporary channel ID of the channel requested to be opened. /// @@ -1850,11 +1840,11 @@ pub enum Event { /// /// [`ChannelManager::funding_transaction_signed`]: crate::ln::channelmanager::ChannelManager::funding_transaction_signed counterparty_node_id: PublicKey, - /// The `user_channel_id` value passed in for outbound channels, or for inbound channels if - /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true. Otherwise - /// `user_channel_id` will be randomized for inbound channels. + /// The `user_channel_id` value passed in to [`ChannelManager::create_channel`] for outbound + /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels. /// - /// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels + /// [`ChannelManager::create_channel`]: crate::ln::channelmanager::ChannelManager::create_channel + /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel user_channel_id: u128, /// The unsigned transaction to be signed and passed back to /// [`ChannelManager::funding_transaction_signed`]. @@ -2001,11 +1991,12 @@ impl Writeable for Event { }); }, // 4u8 used to be `PendingHTLCsForwardable` - &Event::SpendableOutputs { ref outputs, channel_id } => { + &Event::SpendableOutputs { ref outputs, channel_id, counterparty_node_id } => { 5u8.write(writer)?; write_tlv_fields!(writer, { (0, WithoutLength(outputs), required), (1, channel_id, option), + (3, counterparty_node_id, option), }); }, &Event::HTLCIntercepted { @@ -2014,11 +2005,13 @@ impl Writeable for Event { inbound_amount_msat, expected_outbound_amount_msat, intercept_id, + outgoing_htlc_expiry_block_height, } => { 6u8.write(writer)?; let intercept_scid = InterceptNextHop::FakeScid { requested_next_hop_scid }; write_tlv_fields!(writer, { (0, intercept_id, required), + (1, outgoing_htlc_expiry_block_height, option), (2, intercept_scid, required), (4, payment_hash, required), (6, inbound_amount_msat, required), @@ -2508,11 +2501,17 @@ impl MaybeReadable for Event { let mut f = || { let mut outputs = WithoutLength(Vec::new()); let mut channel_id: Option = None; + let mut counterparty_node_id: Option = None; read_tlv_fields!(reader, { (0, outputs, required), (1, channel_id, option), + (3, counterparty_node_id, option), }); - Ok(Some(Event::SpendableOutputs { outputs: outputs.0, channel_id })) + Ok(Some(Event::SpendableOutputs { + outputs: outputs.0, + channel_id, + counterparty_node_id, + })) }; f() }, @@ -2523,8 +2522,10 @@ impl MaybeReadable for Event { InterceptNextHop::FakeScid { requested_next_hop_scid: 0 }; let mut inbound_amount_msat = 0; let mut expected_outbound_amount_msat = 0; + let mut outgoing_htlc_expiry_block_height = None; read_tlv_fields!(reader, { (0, intercept_id, required), + (1, outgoing_htlc_expiry_block_height, option), (2, requested_next_hop_scid, required), (4, payment_hash, required), (6, inbound_amount_msat, required), @@ -2539,6 +2540,7 @@ impl MaybeReadable for Event { inbound_amount_msat, expected_outbound_amount_msat, intercept_id, + outgoing_htlc_expiry_block_height, })) }, 7u8 => { diff --git a/lightning/src/ln/accountable_tests.rs b/lightning/src/ln/accountable_tests.rs new file mode 100644 index 00000000000..16ca1425817 --- /dev/null +++ b/lightning/src/ln/accountable_tests.rs @@ -0,0 +1,101 @@ +// This file is Copyright its original authors, visible in version control +// history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license +// , at your option. +// You may not use this file except in accordance with one or both of these +// licenses. + +//! Tests for verifying the correct relay of accountable signals between nodes. + +use crate::ln::channelmanager::{HTLCForwardInfo, PaymentId, PendingAddHTLCInfo, PendingHTLCInfo}; +use crate::ln::functional_test_utils::*; +use crate::ln::msgs::ChannelMessageHandler; +use crate::ln::outbound_payment::{RecipientOnionFields, Retry}; +use crate::routing::router::{PaymentParameters, RouteParameters}; + +fn test_accountable_forwarding_with_override( + override_accountable: Option, expected_forwarded: bool, +) { + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let _chan_ab = create_announced_chan_between_nodes(&nodes, 0, 1); + let _chan_bc = create_announced_chan_between_nodes(&nodes, 1, 2); + + let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[2]); + let route_params = RouteParameters::from_payment_params_and_value( + PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV), + 100_000, + ); + let onion_fields = RecipientOnionFields::secret_only(payment_secret); + let payment_id = PaymentId(payment_hash.0); + nodes[0] + .node + .send_payment(payment_hash, onion_fields, payment_id, route_params, Retry::Attempts(0)) + .unwrap(); + check_added_monitors(&nodes[0], 1); + + let updates_ab = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); + assert_eq!(updates_ab.update_add_htlcs.len(), 1); + let mut htlc_ab = updates_ab.update_add_htlcs[0].clone(); + assert_eq!(htlc_ab.accountable, Some(false)); + + // Override accountable value if requested + if let Some(override_value) = override_accountable { + htlc_ab.accountable = Some(override_value); + } + + nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &htlc_ab); + do_commitment_signed_dance(&nodes[1], &nodes[0], &updates_ab.commitment_signed, false, false); + expect_and_process_pending_htlcs(&nodes[1], false); + check_added_monitors(&nodes[1], 1); + + let updates_bc = get_htlc_update_msgs(&nodes[1], &nodes[2].node.get_our_node_id()); + assert_eq!(updates_bc.update_add_htlcs.len(), 1); + let htlc_bc = &updates_bc.update_add_htlcs[0]; + assert_eq!( + htlc_bc.accountable, + Some(expected_forwarded), + "B -> C should have accountable = {:?}", + expected_forwarded + ); + + nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), htlc_bc); + do_commitment_signed_dance(&nodes[2], &nodes[1], &updates_bc.commitment_signed, false, false); + + // Accountable signal is not surfaced in PaymentClaimable, so we do our next-best and check + // that the received htlcs that will be processed has the signal set as we expect. We manually + // process pending update adds so that we can access the htlc in forward_htlcs. + nodes[2].node.test_process_pending_update_add_htlcs(); + { + let fwds_lock = nodes[2].node.forward_htlcs.lock().unwrap(); + let recvs = fwds_lock.get(&0).unwrap(); + assert_eq!(recvs.len(), 1); + match recvs[0] { + HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { + forward_info: PendingHTLCInfo { incoming_accountable, .. }, + .. + }) => { + assert_eq!(incoming_accountable, expected_forwarded) + }, + _ => panic!("Unexpected forward"), + } + } + + expect_and_process_pending_htlcs(&nodes[2], false); + check_added_monitors(&nodes[2], 0); + expect_payment_claimable!(nodes[2], payment_hash, payment_secret, 100_000); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); +} + +#[test] +fn test_accountable_signal() { + // Tests forwarding of accountable signal for various incoming signal values. + test_accountable_forwarding_with_override(None, false); + test_accountable_forwarding_with_override(Some(true), true); + test_accountable_forwarding_with_override(Some(false), false); +} diff --git a/lightning/src/ln/async_payments_tests.rs b/lightning/src/ln/async_payments_tests.rs index 8e7fbdf94fd..8a991b1d98d 100644 --- a/lightning/src/ln/async_payments_tests.rs +++ b/lightning/src/ln/async_payments_tests.rs @@ -10,18 +10,15 @@ use crate::blinded_path::message::{ BlindedMessagePath, MessageContext, NextMessageHop, OffersContext, }; -use crate::blinded_path::payment::PaymentContext; use crate::blinded_path::payment::{AsyncBolt12OfferContext, BlindedPaymentTlvs}; +use crate::blinded_path::payment::{DummyTlvs, PaymentContext}; use crate::chain::channelmonitor::{HTLC_FAIL_BACK_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS}; use crate::events::{ Event, EventsProvider, HTLCHandlingFailureReason, HTLCHandlingFailureType, PaidBolt12Invoice, PaymentFailureReason, PaymentPurpose, }; use crate::ln::blinded_payment_tests::{fail_blinded_htlc_backwards, get_blinded_route_parameters}; -use crate::ln::channelmanager::{ - Bolt12PaymentError, OptionalOfferPaymentParams, PaymentId, RecipientOnionFields, - MIN_CLTV_EXPIRY_DELTA, -}; +use crate::ln::channelmanager::{OptionalOfferPaymentParams, PaymentId, MIN_CLTV_EXPIRY_DELTA}; use crate::ln::functional_test_utils::*; use crate::ln::inbound_payment; use crate::ln::msgs; @@ -30,6 +27,7 @@ use crate::ln::msgs::{ }; use crate::ln::offers_tests; use crate::ln::onion_utils::LocalHTLCFailureReason; +use crate::ln::outbound_payment::{Bolt12PaymentError, RecipientOnionFields}; use crate::ln::outbound_payment::{ PendingOutboundPayment, Retry, TEST_ASYNC_PAYMENT_TIMEOUT_RELATIVE_EXPIRY, }; @@ -55,12 +53,12 @@ use crate::onion_message::messenger::{ use crate::onion_message::offers::OffersMessage; use crate::onion_message::packet::ParsedOnionMessageContents; use crate::prelude::*; -use crate::routing::router::{Payee, PaymentParameters}; +use crate::routing::router::{Payee, PaymentParameters, DEFAULT_PAYMENT_DUMMY_HOPS}; use crate::sign::NodeSigner; use crate::sync::Mutex; use crate::types::features::Bolt12InvoiceFeatures; use crate::types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; -use crate::util::config::UserConfig; +use crate::util::config::{HTLCInterceptionFlags, UserConfig}; use crate::util::ser::Writeable; use bitcoin::constants::ChainHash; use bitcoin::network::Network; @@ -981,10 +979,11 @@ fn ignore_duplicate_invoice() { assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&always_online_node_id, &mut events); let payment_hash = extract_payment_hash(&ev); - check_added_monitors!(sender, 1); + check_added_monitors(&sender, 1); let route: &[&[&Node]] = &[&[always_online_node, async_recipient]]; - let args = PassAlongPathArgs::new(sender, route[0], amt_msat, payment_hash, ev); + let args = PassAlongPathArgs::new(sender, route[0], amt_msat, payment_hash, ev) + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); let claimable_ev = do_pass_along_path(args).unwrap(); let keysend_preimage = extract_payment_preimage(&claimable_ev); let (res, _) = @@ -1060,10 +1059,11 @@ fn ignore_duplicate_invoice() { assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&always_online_node_id, &mut events); let payment_hash = extract_payment_hash(&ev); - check_added_monitors!(sender, 1); + check_added_monitors(&sender, 1); let args = PassAlongPathArgs::new(sender, route[0], amt_msat, payment_hash, ev) - .without_clearing_recipient_events(); + .without_clearing_recipient_events() + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); do_pass_along_path(args); let payment_preimage = match get_event!(async_recipient, Event::PaymentClaimable) { @@ -1129,7 +1129,7 @@ fn async_receive_flow_success() { assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events); let payment_hash = extract_payment_hash(&ev); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); // Receiving a duplicate release_htlc message doesn't result in duplicate payment. nodes[0] @@ -1138,7 +1138,8 @@ fn async_receive_flow_success() { assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); let route: &[&[&Node]] = &[&[&nodes[1], &nodes[2]]]; - let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev); + let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev) + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); let claimable_ev = do_pass_along_path(args).unwrap(); let keysend_preimage = extract_payment_preimage(&claimable_ev); let (res, _) = @@ -1375,11 +1376,13 @@ fn async_receive_mpp() { }; let args = PassAlongPathArgs::new(&nodes[0], expected_route[0], amt_msat, payment_hash, ev) - .without_claimable_event(); + .without_claimable_event() + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); do_pass_along_path(args); let ev = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events); - let args = PassAlongPathArgs::new(&nodes[0], expected_route[1], amt_msat, payment_hash, ev); + let args = PassAlongPathArgs::new(&nodes[0], expected_route[1], amt_msat, payment_hash, ev) + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); let claimable_ev = do_pass_along_path(args).unwrap(); let keysend_preimage = match claimable_ev { Event::PaymentClaimable { @@ -1497,7 +1500,8 @@ fn amount_doesnt_match_invreq() { let route: &[&[&Node]] = &[&[&nodes[1], &nodes[3]]]; let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev) .without_claimable_event() - .expect_failure(HTLCHandlingFailureType::Receive { payment_hash }); + .expect_failure(HTLCHandlingFailureType::Receive { payment_hash }) + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); do_pass_along_path(args); // Modify the invoice request stored in our outbounds to be the correct one, to make sure the @@ -1519,9 +1523,10 @@ fn amount_doesnt_match_invreq() { let mut ev = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events); assert!(matches!( ev, MessageSendEvent::UpdateHTLCs { ref updates, .. } if updates.update_add_htlcs.len() == 1)); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let route: &[&[&Node]] = &[&[&nodes[2], &nodes[3]]]; - let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev); + let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev) + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); let claimable_ev = do_pass_along_path(args).unwrap(); let keysend_preimage = extract_payment_preimage(&claimable_ev); claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], route, keysend_preimage)); @@ -1712,7 +1717,8 @@ fn invalid_async_receive_with_retry( let payment_hash = extract_payment_hash(&ev); let route: &[&[&Node]] = &[&[&nodes[1], &nodes[2]]]; - let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev); + let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev) + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); do_pass_along_path(args); // Fail the HTLC backwards to enable us to more easily modify the now-Retryable outbound to test @@ -1723,7 +1729,7 @@ fn invalid_async_receive_with_retry( &[HTLCHandlingFailureType::Receive { payment_hash }], ); nodes[2].node.process_pending_htlc_forwards(); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); fail_blinded_htlc_backwards(payment_hash, 1, &[&nodes[0], &nodes[1], &nodes[2]], true); // Trigger a retry and make sure it fails after calling the closure that induces recipient @@ -1735,11 +1741,12 @@ fn invalid_async_receive_with_retry( let mut ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events); assert!(matches!( ev, MessageSendEvent::UpdateHTLCs { ref updates, .. } if updates.update_add_htlcs.len() == 1)); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let route: &[&[&Node]] = &[&[&nodes[1], &nodes[2]]]; let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev) .without_claimable_event() - .expect_failure(HTLCHandlingFailureType::Receive { payment_hash }); + .expect_failure(HTLCHandlingFailureType::Receive { payment_hash }) + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); do_pass_along_path(args); fail_blinded_htlc_backwards(payment_hash, 1, &[&nodes[0], &nodes[1], &nodes[2]], true); @@ -1749,9 +1756,10 @@ fn invalid_async_receive_with_retry( let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let mut ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let route: &[&[&Node]] = &[&[&nodes[1], &nodes[2]]]; - let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev); + let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev) + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); let claimable_ev = do_pass_along_path(args).unwrap(); let keysend_preimage = extract_payment_preimage(&claimable_ev); claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], route, keysend_preimage)); @@ -1858,6 +1866,13 @@ fn expired_static_invoice_payment_path() { blinded_path .advance_path_by_one(&nodes[1].keys_manager, &nodes[1].node, &secp_ctx) .unwrap(); + + for _ in 0..DEFAULT_PAYMENT_DUMMY_HOPS { + blinded_path + .advance_path_by_one(&nodes[2].keys_manager, &nodes[2].node, &secp_ctx) + .unwrap(); + } + match blinded_path.decrypt_intro_payload(&nodes[2].keys_manager).unwrap().0 { BlindedPaymentTlvs::Receive(tlvs) => tlvs.payment_constraints.max_cltv_expiry, _ => panic!(), @@ -1915,12 +1930,13 @@ fn expired_static_invoice_payment_path() { assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events); let payment_hash = extract_payment_hash(&ev); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let route: &[&[&Node]] = &[&[&nodes[1], &nodes[2]]]; let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev) .without_claimable_event() - .expect_failure(HTLCHandlingFailureType::Receive { payment_hash }); + .expect_failure(HTLCHandlingFailureType::Receive { payment_hash }) + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); do_pass_along_path(args); fail_blinded_htlc_backwards(payment_hash, 1, &[&nodes[0], &nodes[1], &nodes[2]], false); nodes[2].logger.assert_log_contains( @@ -2360,10 +2376,11 @@ fn refresh_static_invoices_for_used_offers() { assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&server.node.get_our_node_id(), &mut events); let payment_hash = extract_payment_hash(&ev); - check_added_monitors!(sender, 1); + check_added_monitors(&sender, 1); let route: &[&[&Node]] = &[&[server, recipient]]; - let args = PassAlongPathArgs::new(sender, route[0], amt_msat, payment_hash, ev); + let args = PassAlongPathArgs::new(sender, route[0], amt_msat, payment_hash, ev) + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); let claimable_ev = do_pass_along_path(args).unwrap(); let keysend_preimage = extract_payment_preimage(&claimable_ev); let res = claim_payment_along_route(ClaimAlongRouteArgs::new(sender, route, keysend_preimage)); @@ -2694,10 +2711,11 @@ fn invoice_server_is_not_channel_peer() { assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&forwarding_node.node.get_our_node_id(), &mut events); let payment_hash = extract_payment_hash(&ev); - check_added_monitors!(sender, 1); + check_added_monitors(&sender, 1); let route: &[&[&Node]] = &[&[forwarding_node, recipient]]; - let args = PassAlongPathArgs::new(sender, route[0], amt_msat, payment_hash, ev); + let args = PassAlongPathArgs::new(sender, route[0], amt_msat, payment_hash, ev) + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); let claimable_ev = do_pass_along_path(args).unwrap(); let keysend_preimage = extract_payment_preimage(&claimable_ev); let res = claim_payment_along_route(ClaimAlongRouteArgs::new(sender, route, keysend_preimage)); @@ -2933,10 +2951,11 @@ fn async_payment_e2e() { let mut events = sender_lsp.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&invoice_server.node.get_our_node_id(), &mut events); - check_added_monitors!(sender_lsp, 1); + check_added_monitors(&sender_lsp, 1); let path: &[&Node] = &[invoice_server, recipient]; - let args = PassAlongPathArgs::new(sender_lsp, path, amt_msat, payment_hash, ev); + let args = PassAlongPathArgs::new(sender_lsp, path, amt_msat, payment_hash, ev) + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); let claimable_ev = do_pass_along_path(args).unwrap(); let route: &[&[&Node]] = &[&[sender_lsp, invoice_server, recipient]]; @@ -3038,11 +3057,10 @@ fn intercepted_hold_htlc() { let chanmon_cfgs = create_chanmon_cfgs(4); let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); let (sender_cfg, mut recipient_cfg) = (often_offline_node_cfg(), often_offline_node_cfg()); - recipient_cfg.manually_accept_inbound_channels = true; recipient_cfg.channel_handshake_limits.force_announced_channel_preference = false; let mut lsp_cfg = test_default_channel_config(); - lsp_cfg.accept_intercept_htlcs = true; + lsp_cfg.htlc_interception_flags = HTLCInterceptionFlags::ToInterceptSCIDs as u8; lsp_cfg.accept_forwards_to_priv_channels = true; lsp_cfg.enable_htlc_hold = true; @@ -3170,10 +3188,11 @@ fn intercepted_hold_htlc() { let mut events = lsp.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&recipient.node.get_our_node_id(), &mut events); - check_added_monitors!(lsp, 1); + check_added_monitors(&lsp, 1); let path: &[&Node] = &[recipient]; - let args = PassAlongPathArgs::new(lsp, path, amt_msat, payment_hash, ev); + let args = PassAlongPathArgs::new(lsp, path, amt_msat, payment_hash, ev) + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); let claimable_ev = do_pass_along_path(args).unwrap(); let route: &[&[&Node]] = &[&[lsp, recipient]]; @@ -3271,20 +3290,22 @@ fn async_payment_mpp() { let expected_path: &[&Node] = &[recipient]; lsp_a.node.process_pending_htlc_forwards(); - check_added_monitors!(lsp_a, 1); + check_added_monitors(&lsp_a, 1); let mut events = lsp_a.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&recipient.node.get_our_node_id(), &mut events); let args = PassAlongPathArgs::new(lsp_a, expected_path, amt_msat, payment_hash, ev) - .without_claimable_event(); + .without_claimable_event() + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); do_pass_along_path(args); lsp_b.node.process_pending_htlc_forwards(); - check_added_monitors!(lsp_b, 1); + check_added_monitors(&lsp_b, 1); let mut events = lsp_b.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&recipient.node.get_our_node_id(), &mut events); - let args = PassAlongPathArgs::new(lsp_b, expected_path, amt_msat, payment_hash, ev); + let args = PassAlongPathArgs::new(lsp_b, expected_path, amt_msat, payment_hash, ev) + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); let claimable_ev = do_pass_along_path(args).unwrap(); let keysend_preimage = match claimable_ev { @@ -3417,10 +3438,11 @@ fn release_htlc_races_htlc_onion_decode() { let mut events = sender_lsp.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&invoice_server.node.get_our_node_id(), &mut events); - check_added_monitors!(sender_lsp, 1); + check_added_monitors(&sender_lsp, 1); let path: &[&Node] = &[invoice_server, recipient]; - let args = PassAlongPathArgs::new(sender_lsp, path, amt_msat, payment_hash, ev); + let args = PassAlongPathArgs::new(sender_lsp, path, amt_msat, payment_hash, ev) + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); let claimable_ev = do_pass_along_path(args).unwrap(); let route: &[&[&Node]] = &[&[sender_lsp, invoice_server, recipient]]; diff --git a/lightning/src/ln/async_signer_tests.rs b/lightning/src/ln/async_signer_tests.rs index 0c7a467fde7..b81279c10ac 100644 --- a/lightning/src/ln/async_signer_tests.rs +++ b/lightning/src/ln/async_signer_tests.rs @@ -10,9 +10,13 @@ //! Tests for asynchronous signing. These tests verify that the channel state machine behaves //! properly with a signer implementation that asynchronously derives signatures. +use crate::events::bump_transaction::sync::WalletSourceSync; +use crate::ln::funding::SpliceContribution; +use crate::ln::splicing_tests::negotiate_splice_tx; use crate::prelude::*; use crate::util::ser::Writeable; use bitcoin::secp256k1::Secp256k1; +use bitcoin::{Amount, TxOut}; use crate::chain::channelmonitor::LATENCY_GRACE_PERIOD_BLOCKS; use crate::chain::ChannelMonitorUpdateStatus; @@ -20,8 +24,9 @@ use crate::events::{ClosureReason, Event}; use crate::ln::chan_utils::ClosingTransaction; use crate::ln::channel::DISCONNECT_PEER_AWAITING_RESPONSE_TICKS; use crate::ln::channel_state::{ChannelDetails, ChannelShutdownState}; -use crate::ln::channelmanager::{PaymentId, RAACommitmentOrder, RecipientOnionFields}; +use crate::ln::channelmanager::{PaymentId, RAACommitmentOrder}; use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, ErrorAction, MessageSendEvent}; +use crate::ln::outbound_payment::RecipientOnionFields; use crate::ln::{functional_test_utils::*, msgs}; use crate::sign::ecdsa::EcdsaChannelSigner; use crate::sign::SignerProvider; @@ -36,12 +41,9 @@ fn test_open_channel() { fn do_test_open_channel(zero_conf: bool) { // Simulate acquiring the commitment point for `open_channel` and `accept_channel` asynchronously. - let mut manually_accept_config = test_default_channel_config(); - manually_accept_config.manually_accept_inbound_channels = zero_conf; - let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_config)]); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); let node_b_id = nodes[1].node.get_our_node_id(); @@ -68,9 +70,9 @@ fn do_test_open_channel(zero_conf: bool) { // Handle an inbound channel simulating an async signer. nodes[1].disable_next_channel_signer_op(SignerOp::GetPerCommitmentPoint); - nodes[1].node.handle_open_channel(node_a_id, &open_chan_msg); if zero_conf { + nodes[1].node.handle_open_channel(node_a_id, &open_chan_msg); let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1, "Expected one event, got {}", events.len()); match &events[0] { @@ -88,8 +90,7 @@ fn do_test_open_channel(zero_conf: bool) { ev => panic!("Expected OpenChannelRequest, not {:?}", ev), } } else { - let msgs = nodes[1].node.get_and_clear_pending_msg_events(); - assert!(msgs.is_empty(), "Expected no message events; got {:?}", msgs); + handle_and_accept_open_channel(&nodes[1], node_a_id, &open_chan_msg); } let channel_id_1 = { @@ -130,7 +131,7 @@ fn do_test_funding_created(signer_ops: Vec) { // nodes[0] --- open_channel --> nodes[1] let mut open_chan_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); - nodes[1].node.handle_open_channel(node_a_id, &open_chan_msg); + handle_and_accept_open_channel(&nodes[1], node_a_id, &open_chan_msg); // nodes[0] <-- accept_channel --- nodes[1] nodes[0].node.handle_accept_channel( @@ -207,7 +208,7 @@ fn do_test_funding_signed(signer_ops: Vec) { // nodes[0] --- open_channel --> nodes[1] let mut open_chan_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); - nodes[1].node.handle_open_channel(node_a_id, &open_chan_msg); + handle_and_accept_open_channel(&nodes[1], node_a_id, &open_chan_msg); // nodes[0] <-- accept_channel --- nodes[1] nodes[0].node.handle_accept_channel( @@ -301,7 +302,7 @@ fn do_test_async_commitment_signature_for_commitment_signed_revoke_and_ack( src.node .send_payment_with_route(route, our_payment_hash, recipient_fields, payment_id) .unwrap(); - check_added_monitors!(src, 1); + check_added_monitors(&src, 1); // Pass the payment along the route. let payment_event = { @@ -363,12 +364,9 @@ fn test_funding_signed_0conf() { fn do_test_funding_signed_0conf(signer_ops: Vec) { // Simulate acquiring the signature for `funding_signed` asynchronously for a zero-conf channel. - let mut manually_accept_config = test_default_channel_config(); - manually_accept_config.manually_accept_inbound_channels = true; - let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_config)]); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); let node_b_id = nodes[1].node.get_our_node_id(); @@ -528,7 +526,7 @@ fn do_test_async_raa_peer_disconnect( src.node .send_payment_with_route(route, our_payment_hash, recipient_fields, payment_id) .unwrap(); - check_added_monitors!(src, 1); + check_added_monitors(&src, 1); // Pass the payment along the route. let payment_event = { @@ -593,7 +591,7 @@ fn do_test_async_raa_peer_disconnect( (latest_update, _) = channel_map.get(&chan_id).unwrap().clone(); } dst.chain_monitor.chain_monitor.force_channel_monitor_updated(chan_id, latest_update); - check_added_monitors!(dst, 0); + check_added_monitors(&dst, 0); } // Expect the RAA @@ -677,7 +675,7 @@ fn do_test_async_commitment_signature_peer_disconnect( src.node .send_payment_with_route(route, our_payment_hash, recipient_fields, payment_id) .unwrap(); - check_added_monitors!(src, 1); + check_added_monitors(&src, 1); // Pass the payment along the route. let payment_event = { @@ -743,7 +741,7 @@ fn do_test_async_commitment_signature_peer_disconnect( (latest_update, _) = channel_map.get(&chan_id).unwrap().clone(); } dst.chain_monitor.chain_monitor.force_channel_monitor_updated(chan_id, latest_update); - check_added_monitors!(dst, 0); + check_added_monitors(&dst, 0); } // Expect the RAA @@ -813,14 +811,14 @@ fn do_test_async_commitment_signature_ordering(monitor_update_failure: bool) { .node .send_payment_with_route(route, payment_hash_2, recipient_fields, payment_id) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); get_htlc_update_msgs(&nodes[0], &node_b_id); // Send back update_fulfill_htlc + commitment_signed for the first payment. nodes[1].node.claim_funds(payment_preimage_1); expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Handle the update_fulfill_htlc, but fail to persist the monitor update when handling the // commitment_signed. @@ -844,7 +842,7 @@ fn do_test_async_commitment_signature_ordering(monitor_update_failure: bool) { let _ = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); } // No commitment_signed so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); }, _ => panic!("Unexpected event"), } @@ -893,7 +891,7 @@ fn do_test_async_commitment_signature_ordering(monitor_update_failure: bool) { (latest_update, _) = channel_map.get(&chan_id).unwrap().clone(); } nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_id, latest_update); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); } // Make sure that on signer_unblocked we have the same behavior (even though RAA is ready, @@ -946,18 +944,18 @@ fn do_test_async_commitment_signature_ordering(monitor_update_failure: bool) { nodes[1].node.handle_revoke_and_ack(node_a_id, as_resp.1.as_ref().unwrap()); let (bs_revoke_and_ack, bs_second_commitment_signed) = get_revoke_commit_msgs(&nodes[1], &node_a_id); - check_added_monitors!(nodes[1], 2); + check_added_monitors(&nodes[1], 2); // The rest of this is boilerplate for resolving the previous state. nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_revoke_and_ack); let as_commitment_signed = get_htlc_update_msgs(&nodes[0], &node_b_id); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_second_commitment_signed); let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // No commitment_signed so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[1] .node @@ -965,15 +963,15 @@ fn do_test_async_commitment_signature_ordering(monitor_update_failure: bool) { let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); // No commitment_signed so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_revoke_and_ack); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); expect_and_process_pending_htlcs(&nodes[1], false); @@ -998,7 +996,6 @@ fn do_test_async_holder_signatures(keyed_anchors: bool, p2a_anchor: bool, remote let mut config = test_default_channel_config(); config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = keyed_anchors; config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = p2a_anchor; - config.manually_accept_inbound_channels = keyed_anchors || p2a_anchor; let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); @@ -1549,3 +1546,105 @@ fn test_async_force_close_on_invalid_secret_for_stale_state() { check_closed_broadcast(&nodes[1], 1, true); check_closed_event(&nodes[1], 1, closure_reason, &[node_id_0], 100_000); } + +#[test] +fn test_async_splice_initial_commit_sig() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; + send_payment(&nodes[0], &[&nodes[1]], 1_000); + + let (initiator, acceptor) = (&nodes[0], &nodes[1]); + let initiator_node_id = initiator.node.get_our_node_id(); + let acceptor_node_id = acceptor.node.get_our_node_id(); + + initiator.disable_channel_signer_op( + &acceptor_node_id, + &channel_id, + SignerOp::SignCounterpartyCommitment, + ); + acceptor.disable_channel_signer_op( + &initiator_node_id, + &channel_id, + SignerOp::SignCounterpartyCommitment, + ); + + // Negotiate a splice up until the signature exchange. + let contribution = SpliceContribution::splice_out(vec![TxOut { + value: Amount::from_sat(1_000), + script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), + }]); + negotiate_splice_tx(initiator, acceptor, channel_id, contribution); + + assert!(initiator.node.get_and_clear_pending_msg_events().is_empty()); + assert!(acceptor.node.get_and_clear_pending_msg_events().is_empty()); + + // Have the initiator sign the funding transaction. We won't see their initial commitment signed + // go out until their signer returns. + let event = get_event!(initiator, Event::FundingTransactionReadyForSigning); + if let Event::FundingTransactionReadyForSigning { unsigned_transaction, .. } = event { + let partially_signed_tx = initiator.wallet_source.sign_tx(unsigned_transaction).unwrap(); + initiator + .node + .funding_transaction_signed(&channel_id, &acceptor_node_id, partially_signed_tx) + .unwrap(); + } + + assert!(initiator.node.get_and_clear_pending_msg_events().is_empty()); + assert!(acceptor.node.get_and_clear_pending_msg_events().is_empty()); + + initiator.enable_channel_signer_op( + &acceptor_node_id, + &channel_id, + SignerOp::SignCounterpartyCommitment, + ); + initiator.node.signer_unblocked(None); + + // Have the acceptor process the message. They should be able to send their `tx_signatures` as + // they go first, but it is held back as their initial `commitment_signed` is not ready yet. + let initiator_commit_sig = get_htlc_update_msgs(initiator, &acceptor_node_id); + acceptor + .node + .handle_commitment_signed(initiator_node_id, &initiator_commit_sig.commitment_signed[0]); + check_added_monitors(acceptor, 1); + assert!(acceptor.node.get_and_clear_pending_msg_events().is_empty()); + + // Reestablish the channel to make sure the acceptor doesn't attempt to retransmit any messages + // that are not ready yet. + initiator.node.peer_disconnected(acceptor_node_id); + acceptor.node.peer_disconnected(initiator_node_id); + reconnect_nodes(ReconnectArgs::new(initiator, acceptor)); + + // Re-enable the acceptor's signer. We should see both their initial `commitment_signed` and + // `tx_signatures` go out. + acceptor.enable_channel_signer_op( + &initiator_node_id, + &channel_id, + SignerOp::SignCounterpartyCommitment, + ); + acceptor.node.signer_unblocked(None); + + let msg_events = acceptor.node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 2, "{msg_events:?}"); + if let MessageSendEvent::UpdateHTLCs { updates, .. } = &msg_events[0] { + initiator.node.handle_commitment_signed(acceptor_node_id, &updates.commitment_signed[0]); + check_added_monitors(initiator, 1); + } else { + panic!("Unexpected event"); + } + if let MessageSendEvent::SendTxSignatures { msg, .. } = &msg_events[1] { + initiator.node.handle_tx_signatures(acceptor_node_id, &msg); + } else { + panic!("Unexpected event"); + } + + let tx_signatures = + get_event_msg!(initiator, MessageSendEvent::SendTxSignatures, acceptor_node_id); + acceptor.node.handle_tx_signatures(initiator_node_id, &tx_signatures); + + let _ = get_event!(initiator, Event::SplicePending); + let _ = get_event!(acceptor, Event::SplicePending); +} diff --git a/lightning/src/ln/blinded_payment_tests.rs b/lightning/src/ln/blinded_payment_tests.rs index a902cfebd12..d78b9dfa4f2 100644 --- a/lightning/src/ln/blinded_payment_tests.rs +++ b/lightning/src/ln/blinded_payment_tests.rs @@ -8,13 +8,13 @@ // licenses. use crate::blinded_path::payment::{ - BlindedPaymentPath, Bolt12RefundContext, ForwardTlvs, PaymentConstraints, PaymentContext, - PaymentForwardNode, PaymentRelay, ReceiveTlvs, PAYMENT_PADDING_ROUND_OFF, + BlindedPaymentPath, Bolt12RefundContext, DummyTlvs, ForwardTlvs, PaymentConstraints, + PaymentContext, PaymentForwardNode, PaymentRelay, ReceiveTlvs, PAYMENT_PADDING_ROUND_OFF, }; use crate::blinded_path::utils::is_padded; use crate::blinded_path::{self, BlindedHop}; use crate::events::{Event, HTLCHandlingFailureType, PaymentFailureReason}; -use crate::ln::channelmanager::{self, HTLCFailureMsg, PaymentId, RecipientOnionFields}; +use crate::ln::channelmanager::{self, HTLCFailureMsg, PaymentId}; use crate::ln::functional_test_utils::*; use crate::ln::inbound_payment::ExpandedKey; use crate::ln::msgs::{ @@ -22,7 +22,9 @@ use crate::ln::msgs::{ }; use crate::ln::onion_payment; use crate::ln::onion_utils::{self, LocalHTLCFailureReason}; -use crate::ln::outbound_payment::{Retry, IDEMPOTENCY_TIMEOUT_TICKS}; +use crate::ln::outbound_payment::{ + RecipientCustomTlvs, RecipientOnionFields, Retry, IDEMPOTENCY_TIMEOUT_TICKS, +}; use crate::ln::types::ChannelId; use crate::offers::invoice::UnsignedBolt12Invoice; use crate::prelude::*; @@ -32,7 +34,7 @@ use crate::routing::router::{ use crate::sign::{NodeSigner, PeerStorageKey, ReceiveAuthKey, Recipient}; use crate::types::features::{BlindedHopFeatures, ChannelFeatures, NodeFeatures}; use crate::types::payment::{PaymentHash, PaymentSecret}; -use crate::util::config::UserConfig; +use crate::util::config::{HTLCInterceptionFlags, UserConfig}; use crate::util::ser::{WithoutLength, Writeable}; use crate::util::test_utils::{self, bytes_from_hex, pubkey_from_hex, secret_from_hex}; use bitcoin::hex::DisplayHex; @@ -196,6 +198,72 @@ fn do_one_hop_blinded_path(success: bool) { } } +#[test] +fn one_hop_blinded_path_with_dummy_hops() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let chan_upd = + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0).0.contents; + + let amt_msat = 5000; + let (payment_preimage, payment_hash, payment_secret) = + get_payment_preimage_hash(&nodes[1], Some(amt_msat), None); + let payee_tlvs = ReceiveTlvs { + payment_secret, + payment_constraints: PaymentConstraints { + max_cltv_expiry: u32::max_value(), + htlc_minimum_msat: chan_upd.htlc_minimum_msat, + }, + payment_context: PaymentContext::Bolt12Refund(Bolt12RefundContext {}), + }; + let receive_auth_key = chanmon_cfgs[1].keys_manager.get_receive_auth_key(); + let dummy_tlvs = [DummyTlvs::default(); 2]; + + let mut secp_ctx = Secp256k1::new(); + let blinded_path = BlindedPaymentPath::new_with_dummy_hops( + &[], + nodes[1].node.get_our_node_id(), + &dummy_tlvs, + receive_auth_key, + payee_tlvs, + u64::MAX, + TEST_FINAL_CLTV as u16, + &chanmon_cfgs[1].keys_manager, + &secp_ctx, + ) + .unwrap(); + + let route_params = RouteParameters::from_payment_params_and_value( + PaymentParameters::blinded(vec![blinded_path]), + amt_msat, + ); + nodes[0] + .node + .send_payment( + payment_hash, + RecipientOnionFields::spontaneous_empty(), + PaymentId(payment_hash.0), + route_params, + Retry::Attempts(0), + ) + .unwrap(); + check_added_monitors(&nodes[0], 1); + + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events); + + let path = &[&nodes[1]]; + let args = PassAlongPathArgs::new(&nodes[0], path, amt_msat, payment_hash, ev) + .with_dummy_tlvs(&dummy_tlvs) + .with_payment_secret(payment_secret); + + do_pass_along_path(args); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage); +} + #[test] #[rustfmt::skip] fn mpp_to_one_hop_blinded_path() { @@ -438,11 +506,11 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) { } nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &update_add); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &updates_0_1.commitment_signed, true, true); expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); if intro_fails { let mut updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); @@ -476,7 +544,7 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) { cause_error!(2, 3, update_add); nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &update_add); - check_added_monitors!(nodes[2], 0); + check_added_monitors(&nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &updates_1_2.commitment_signed, true, true); expect_and_process_pending_htlcs(&nodes[2], false); @@ -488,7 +556,7 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) { expect_htlc_handling_failed_destinations!( nodes[2].node.get_and_clear_pending_events(), core::slice::from_ref(&failed_destination) ); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let mut updates = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); let update_malformed = &mut updates.update_fail_malformed_htlcs[0]; @@ -535,10 +603,10 @@ fn failed_backwards_to_intro_node() { let mut payment_event = SendEvent::from_event(ev); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, false, false); expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -548,7 +616,7 @@ fn failed_backwards_to_intro_node() { // Ensure the final node fails to handle the HTLC. payment_event.msgs[0].onion_routing_packet.hop_data[0] ^= 1; nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); - check_added_monitors!(nodes[2], 0); + check_added_monitors(&nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event.commitment_msg, true, true); expect_and_process_pending_htlcs(&nodes[2], false); @@ -621,7 +689,7 @@ fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck, let mut payment_event = SendEvent::from_event(ev); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, false, false); macro_rules! cause_error { @@ -645,7 +713,7 @@ fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck, _ => panic!("Unexpected event {:?}", events), } check_closed_broadcast(&$curr_node, 1, true); - check_added_monitors!($curr_node, 1); + check_added_monitors(&$curr_node, 1); $curr_node.node.process_pending_htlc_forwards(); expect_htlc_handling_failed_destinations!($curr_node.node.get_and_clear_pending_events(), @@ -657,22 +725,22 @@ fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck, if intro_fails { cause_error!(nodes[0], nodes[1], nodes[2], chan_id_1_2, chan_upd_1_2.short_channel_id); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); fail_blinded_htlc_backwards(payment_hash, 1, &[&nodes[0], &nodes[1]], false); return } expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut updates_1_2 = get_htlc_update_msgs(&nodes[1], &nodes[2].node.get_our_node_id()); let mut update_add = &mut updates_1_2.update_add_htlcs[0]; nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &update_add); - check_added_monitors!(nodes[2], 0); + check_added_monitors(&nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &updates_1_2.commitment_signed, true, true); cause_error!(nodes[1], nodes[2], nodes[3], chan_id_2_3, chan_upd_2_3.short_channel_id); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let mut updates = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); let update_malformed = &mut updates.update_fail_malformed_htlcs[0]; @@ -703,7 +771,8 @@ fn do_blinded_intercept_payment(intercept_node_fails: bool) { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let mut intercept_forwards_config = test_default_channel_config(); - intercept_forwards_config.accept_intercept_htlcs = true; + intercept_forwards_config.htlc_interception_flags = + HTLCInterceptionFlags::ToInterceptSCIDs as u8; let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(intercept_forwards_config), None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0); @@ -751,7 +820,7 @@ fn do_blinded_intercept_payment(intercept_node_fails: bool) { nodes[1].node.fail_intercepted_htlc(intercept_id).unwrap(); expect_htlc_failure_conditions(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::InvalidForward { requested_forward_scid: intercept_scid }]); nodes[1].node.process_pending_htlc_forwards(); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); fail_blinded_htlc_backwards(payment_hash, 1, &[&nodes[0], &nodes[1]], false); return } @@ -860,7 +929,7 @@ fn three_hop_blinded_path_fail() { nodes[3].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }] ); nodes[3].node.process_pending_htlc_forwards(); - check_added_monitors!(nodes[3], 1); + check_added_monitors(&nodes[3], 1); fail_blinded_htlc_backwards(payment_hash, 1, &[&nodes[0], &nodes[1], &nodes[2], &nodes[3]], false); } @@ -962,10 +1031,10 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { SendEvent::from_event(ev) }; nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event_0_1.msgs[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event_0_1.commitment_msg, false, false); expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut payment_event_1_2 = { let mut events = nodes[1].node.get_and_clear_pending_msg_events(); @@ -977,7 +1046,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { match check { ReceiveCheckFail::RecipientFail => { nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event_1_2.msgs[0]); - check_added_monitors!(nodes[2], 0); + check_added_monitors(&nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true); expect_and_process_pending_htlcs(&nodes[2], false); check_payment_claimable( @@ -989,7 +1058,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }] ); nodes[2].node.process_pending_htlc_forwards(); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); }, ReceiveCheckFail::OnionDecodeFail => { let session_priv = SecretKey::from_slice(&session_priv).unwrap(); @@ -1013,7 +1082,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { &payment_hash ).unwrap(); nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), update_add); - check_added_monitors!(nodes[2], 0); + check_added_monitors(&nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true); expect_and_process_pending_htlcs(&nodes[2], false); expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::InvalidOnion]); @@ -1023,7 +1092,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { let update_add = &mut payment_event_1_2.msgs[0]; update_add.amount_msat -= 1; nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), update_add); - check_added_monitors!(nodes[2], 0); + check_added_monitors(&nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true); expect_and_process_pending_htlcs(&nodes[2], false); expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }]); @@ -1037,7 +1106,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event_1_2.msgs[0]); nodes[2].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &payment_event_1_2.commitment_msg); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); nodes[2].node.handle_shutdown(nodes[1].node.get_our_node_id(), &node_1_shutdown); assert!(commitment_signed_dance_through_cp_raa(&nodes[2], &nodes[1], false, false).is_none()); @@ -1048,15 +1117,15 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { ReceiveCheckFail::ProcessPendingHTLCsCheck => { assert_eq!(payment_event_1_2.msgs[0].cltv_expiry, nodes[0].best_block_info().1 + 1 + excess_final_cltv_delta_opt.unwrap() as u32 + TEST_FINAL_CLTV); nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event_1_2.msgs[0]); - check_added_monitors!(nodes[2], 0); + check_added_monitors(&nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true); expect_and_process_pending_htlcs(&nodes[2], true); expect_htlc_failure_conditions(nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }]); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); }, ReceiveCheckFail::PaymentConstraints => { nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event_1_2.msgs[0]); - check_added_monitors!(nodes[2], 0); + check_added_monitors(&nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true); expect_and_process_pending_htlcs(&nodes[2], false); expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }]); @@ -1152,7 +1221,7 @@ fn blinded_path_retries() { nodes[3].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }] ); nodes[3].node.process_pending_htlc_forwards(); - check_added_monitors!(nodes[3], 1); + check_added_monitors(&nodes[3], 1); let updates = get_htlc_update_msgs(&nodes[3], &$intro_node.node.get_our_node_id()); assert_eq!(updates.update_fail_malformed_htlcs.len(), 1); @@ -1183,7 +1252,7 @@ fn blinded_path_retries() { fail_payment_back!(nodes[1]); // Pass the retry along. - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], amt_msat, payment_hash, Some(payment_secret), msg_events.pop().unwrap(), true, None); @@ -1263,7 +1332,7 @@ fn min_htlc() { SendEvent::from_event(ev) }; nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event_0_1.msgs[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event_0_1.commitment_msg, true, true); expect_and_process_pending_htlcs(&nodes[1], false); expect_htlc_handling_failed_destinations!( @@ -1364,8 +1433,7 @@ fn custom_tlvs_to_blinded_path() { ); let recipient_onion_fields = RecipientOnionFields::spontaneous_empty() - .with_custom_tlvs(vec![((1 << 16) + 1, vec![42, 42])]) - .unwrap(); + .with_custom_tlvs(RecipientCustomTlvs::new(vec![((1 << 16) + 1, vec![42, 42])]).unwrap()); nodes[0].node.send_payment(payment_hash, recipient_onion_fields.clone(), PaymentId(payment_hash.0), route_params, Retry::Attempts(0)).unwrap(); check_added_monitors(&nodes[0], 1); @@ -1461,7 +1529,7 @@ fn fails_receive_tlvs_authentication() { do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, true, true); expect_and_process_pending_htlcs(&nodes[1], false); nodes[1].node.process_pending_htlc_forwards(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::InvalidOnion]); let mut update_fail = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); @@ -1526,6 +1594,7 @@ fn update_add_msg( skimmed_fee_msat: None, blinding_point, hold_htlc: None, + accountable: None, } } @@ -2098,7 +2167,7 @@ fn test_trampoline_forward_payload_encoded_as_receive() { }; nodes[0].node.send_payment_with_route(route.clone(), payment_hash, RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0)).unwrap(); - check_added_monitors!(&nodes[0], 1); + check_added_monitors(&nodes[0], 1); let replacement_onion = { // create a substitute onion where the last Trampoline hop is a forward @@ -2263,7 +2332,7 @@ fn do_test_trampoline_single_hop_receive(success: bool) { }; nodes[0].node.send_payment_with_route(route.clone(), payment_hash, RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0)).unwrap(); - check_added_monitors!(&nodes[0], 1); + check_added_monitors(&nodes[0], 1); pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], amt_msat, payment_hash, payment_secret); if success { @@ -2586,7 +2655,7 @@ fn do_test_trampoline_relay(blinded: bool, test_case: TrampolineTestCase) { ) .unwrap(); - check_added_monitors!(&nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -2765,7 +2834,7 @@ fn test_trampoline_forward_rejection() { nodes[0].node.send_payment_with_route(route.clone(), payment_hash, RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0)).unwrap(); - check_added_monitors!(&nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); diff --git a/lightning/src/ln/bolt11_payment_tests.rs b/lightning/src/ln/bolt11_payment_tests.rs index 63c5576e333..8c2ac155ce7 100644 --- a/lightning/src/ln/bolt11_payment_tests.rs +++ b/lightning/src/ln/bolt11_payment_tests.rs @@ -10,14 +10,11 @@ //! Tests for verifying the correct end-to-end handling of BOLT11 payments, including metadata propagation. use crate::events::Event; -use crate::ln::channelmanager::{PaymentId, Retry}; +use crate::ln::channelmanager::{OptionalBolt11PaymentParams, PaymentId}; use crate::ln::functional_test_utils::*; use crate::ln::msgs::ChannelMessageHandler; use crate::ln::outbound_payment::Bolt11PaymentError; -use crate::routing::router::RouteParametersConfig; use crate::sign::{NodeSigner, Recipient}; -use bitcoin::hashes::sha256::Hash as Sha256; -use bitcoin::hashes::Hash; use lightning_invoice::{Bolt11Invoice, Currency, InvoiceBuilder}; use std::time::SystemTime; @@ -39,7 +36,7 @@ fn payment_metadata_end_to_end_for_invoice_with_amount() { let timestamp = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap(); let invoice = InvoiceBuilder::new(Currency::Bitcoin) .description("test".into()) - .payment_hash(Sha256::from_slice(&payment_hash.0).unwrap()) + .payment_hash(payment_hash) .payment_secret(payment_secret) .duration_since_epoch(timestamp) .min_final_cltv_expiry_delta(144) @@ -55,8 +52,7 @@ fn payment_metadata_end_to_end_for_invoice_with_amount() { &invoice, PaymentId(payment_hash.0), Some(100), - RouteParametersConfig::default(), - Retry::Attempts(0), + OptionalBolt11PaymentParams::default(), ) { Err(Bolt11PaymentError::InvalidAmount) => (), _ => panic!("Unexpected result"), @@ -68,8 +64,7 @@ fn payment_metadata_end_to_end_for_invoice_with_amount() { &invoice, PaymentId(payment_hash.0), None, - RouteParametersConfig::default(), - Retry::Attempts(0), + OptionalBolt11PaymentParams::default(), ) .unwrap(); @@ -108,7 +103,7 @@ fn payment_metadata_end_to_end_for_invoice_with_no_amount() { let timestamp = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap(); let invoice = InvoiceBuilder::new(Currency::Bitcoin) .description("test".into()) - .payment_hash(Sha256::from_slice(&payment_hash.0).unwrap()) + .payment_hash(payment_hash) .payment_secret(payment_secret) .duration_since_epoch(timestamp) .min_final_cltv_expiry_delta(144) @@ -123,8 +118,7 @@ fn payment_metadata_end_to_end_for_invoice_with_no_amount() { &invoice, PaymentId(payment_hash.0), None, - RouteParametersConfig::default(), - Retry::Attempts(0), + OptionalBolt11PaymentParams::default(), ) { Err(Bolt11PaymentError::InvalidAmount) => (), _ => panic!("Unexpected result"), @@ -136,8 +130,7 @@ fn payment_metadata_end_to_end_for_invoice_with_no_amount() { &invoice, PaymentId(payment_hash.0), Some(50_000), - RouteParametersConfig::default(), - Retry::Attempts(0), + OptionalBolt11PaymentParams::default(), ) .unwrap(); diff --git a/lightning/src/ln/chan_utils.rs b/lightning/src/ln/chan_utils.rs index 431fdd2859c..4bb8ffac9ef 100644 --- a/lightning/src/ln/chan_utils.rs +++ b/lightning/src/ln/chan_utils.rs @@ -320,12 +320,9 @@ pub(crate) fn htlc_tx_fees_sat(feerate_per_kw: u32, num_accepted_htlcs: usize, n /// Returns a fee estimate for the commitment transaction that we would ideally like to set, /// depending on channel type. -pub(super) fn selected_commitment_sat_per_1000_weight( +pub(super) fn selected_commitment_sat_per_1000_weight( fee_estimator: &LowerBoundedFeeEstimator, channel_type: &ChannelTypeFeatures, -) -> u32 -where - F::Target: FeeEstimator, -{ +) -> u32 { if channel_type.supports_anchor_zero_fee_commitments() { 0 } else if channel_type.supports_anchors_zero_fee_htlc_tx() { @@ -1452,13 +1449,10 @@ impl BuiltCommitmentTransaction { } /// Signs the holder commitment transaction because we are about to broadcast it. - pub fn sign_holder_commitment( + pub fn sign_holder_commitment( &self, funding_key: &SecretKey, funding_redeemscript: &Script, channel_value_satoshis: u64, entropy_source: &ES, secp_ctx: &Secp256k1, - ) -> Signature - where - ES::Target: EntropySource, - { + ) -> Signature { let sighash = self.get_sighash_all(funding_redeemscript, channel_value_satoshis); sign_with_aux_rand(secp_ctx, &sighash, funding_key, entropy_source) } @@ -2139,10 +2133,10 @@ impl<'a> TrustedCommitmentTransaction<'a> { /// /// This function is only valid in the holder commitment context, it always uses EcdsaSighashType::All. #[rustfmt::skip] - pub fn get_htlc_sigs( + pub fn get_htlc_sigs( &self, htlc_base_key: &SecretKey, channel_parameters: &DirectedChannelTransactionParameters, entropy_source: &ES, secp_ctx: &Secp256k1, - ) -> Result, ()> where ES::Target: EntropySource { + ) -> Result, ()> { let inner = self.inner; let keys = &inner.keys; let txid = inner.built.txid; diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index e79e8becc66..b421114e911 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -19,11 +19,12 @@ use crate::chain::transaction::OutPoint; use crate::chain::{ChannelMonitorUpdateStatus, Listen, Watch}; use crate::events::{ClosureReason, Event, HTLCHandlingFailureType, PaymentPurpose}; use crate::ln::channel::AnnouncementSigsState; -use crate::ln::channelmanager::{PaymentId, RAACommitmentOrder, RecipientOnionFields, Retry}; +use crate::ln::channelmanager::{PaymentId, RAACommitmentOrder}; use crate::ln::msgs; use crate::ln::msgs::{ BaseMessageHandler, ChannelMessageHandler, MessageSendEvent, RoutingMessageHandler, }; +use crate::ln::outbound_payment::{RecipientOnionFields, Retry}; use crate::ln::types::ChannelId; use crate::routing::router::{PaymentParameters, RouteParameters}; use crate::sign::NodeSigner; @@ -48,13 +49,6 @@ use crate::prelude::*; use crate::sync::{Arc, Mutex}; use bitcoin::hashes::Hash; -fn get_latest_mon_update_id<'a, 'b, 'c>( - node: &Node<'a, 'b, 'c>, channel_id: ChannelId, -) -> (u64, u64) { - let monitor_id_state = node.chain_monitor.latest_monitor_update_id.lock().unwrap(); - monitor_id_state.get(&channel_id).unwrap().clone() -} - #[test] fn test_monitor_and_persister_update_fail() { // Test that if both updating the `ChannelMonitor` and persisting the updated @@ -86,6 +80,7 @@ fn test_monitor_and_persister_update_fail() { let persister = test_utils::TestPersister::new(); let tx_broadcaster = TestBroadcaster { txn_broadcasted: Mutex::new(Vec::new()), + txn_types: Mutex::new(Vec::new()), // Because we will connect a block at height 200 below, we need the TestBroadcaster to know // that we are at height 200 so that it doesn't think we're violating the time lock // requirements of transactions broadcasted at that point. @@ -123,7 +118,7 @@ fn test_monitor_and_persister_update_fail() { // Try to update ChannelMonitor nodes[1].node.claim_funds(preimage); expect_payment_claimed!(nodes[1], payment_hash, 9_000_000); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut updates = get_htlc_update_msgs(&nodes[1], &node_a_id); assert_eq!(updates.update_fulfill_htlcs.len(), 1); @@ -169,7 +164,7 @@ fn test_monitor_and_persister_update_fail() { } } - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); expect_payment_sent(&nodes[0], preimage, None, false, false); } @@ -195,7 +190,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { let onion = RecipientOnionFields::secret_only(payment_secret_1); let id = PaymentId(payment_hash_1.0); nodes[0].node.send_payment_with_route(route, payment_hash_1, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -211,9 +206,9 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { } chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = get_latest_mon_update_id(&nodes[0], channel_id); + let (latest_update, _) = nodes[0].chain_monitor.get_latest_mon_update_id(channel_id); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); let mut events_2 = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events_2.len(), 1); @@ -262,7 +257,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { let onion = RecipientOnionFields::secret_only(payment_secret_2); let id = PaymentId(payment_hash_2.0); nodes[0].node.send_payment_with_route(route, payment_hash_2, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -281,7 +276,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { message: message.clone(), }; nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &node_b_id, message).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_closed_broadcast!(nodes[0], true); // TODO: Once we hit the chain with the failure transaction we should check that we get a @@ -338,7 +333,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { let onion = RecipientOnionFields::secret_only(payment_secret_2); let id = PaymentId(payment_hash_2.0); nodes[0].node.send_payment_with_route(route, payment_hash_2, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -347,7 +342,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { // Claim the previous payment, which will result in a update_fulfill_htlc/CS from nodes[1] // but nodes[0] won't respond since it is frozen. nodes[1].node.claim_funds(payment_preimage_1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); let events_2 = nodes[1].node.get_and_clear_pending_msg_events(); @@ -387,7 +382,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { } nodes[0].node.handle_commitment_signed_batch_test(node_b_id, commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); } @@ -403,9 +398,9 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { // Now fix monitor updating... chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = get_latest_mon_update_id(&nodes[0], channel_id); + let (latest_update, _) = nodes[0].chain_monitor.get_latest_mon_update_id(channel_id); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); macro_rules! disconnect_reconnect_peers { () => {{ @@ -454,10 +449,10 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { assert_eq!(reestablish_2.len(), 1); nodes[0].node.handle_channel_reestablish(node_b_id, &reestablish_2[0]); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); let mut as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]); nodes[1].node.handle_channel_reestablish(node_a_id, &reestablish_1[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); let mut bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]); assert!(as_resp.0.is_none()); @@ -501,7 +496,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { let as_resp_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // No commitment_signed so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); as_resp.1 = Some(as_resp_raa); bs_resp.2 = None; @@ -544,7 +539,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); // nodes[1] is awaiting an RAA from nodes[0] still so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); if disconnect_count & !disconnect_flags > 2 { let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!(); @@ -568,7 +563,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { assert!(as_commitment_update.update_fail_htlcs.is_empty()); assert!(as_commitment_update.update_fail_malformed_htlcs.is_empty()); assert!(as_commitment_update.update_fee.is_none()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); }; } @@ -581,7 +576,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { assert!(bs_second_commitment_update.update_fail_htlcs.is_empty()); assert!(bs_second_commitment_update.update_fail_malformed_htlcs.is_empty()); assert!(bs_second_commitment_update.update_fee.is_none()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); }; } @@ -645,7 +640,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { ); let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // No commitment_signed so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[1] .node @@ -653,15 +648,15 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); // No commitment_signed so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_revoke_and_ack); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); expect_payment_path_successful!(nodes[0]); expect_and_process_pending_htlcs(&nodes[1], false); @@ -743,7 +738,7 @@ fn test_monitor_update_fail_cs() { let onion = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); @@ -752,13 +747,13 @@ fn test_monitor_update_fail_cs() { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &send_event.commitment_msg); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); + let (latest_update, _) = nodes[1].chain_monitor.get_latest_mon_update_id(channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); let responses = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(responses.len(), 2); @@ -766,7 +761,7 @@ fn test_monitor_update_fail_cs() { MessageSendEvent::SendRevokeAndACK { ref msg, ref node_id } => { assert_eq!(*node_id, node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &msg); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); }, _ => panic!("Unexpected event"), } @@ -784,20 +779,20 @@ fn test_monitor_update_fail_cs() { .node .handle_commitment_signed_batch_test(node_b_id, &updates.commitment_signed); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); }, _ => panic!("Unexpected event"), } chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = get_latest_mon_update_id(&nodes[0], channel_id); + let (latest_update, _) = nodes[0].chain_monitor.get_latest_mon_update_id(channel_id); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); let final_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &final_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_and_process_pending_htlcs(&nodes[1], false); @@ -851,7 +846,7 @@ fn test_monitor_update_fail_no_rebroadcast() { let onion = RecipientOnionFields::secret_only(payment_secret_1); let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); @@ -864,13 +859,13 @@ fn test_monitor_update_fail_no_rebroadcast() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); + let (latest_update, _) = nodes[1].chain_monitor.get_latest_mon_update_id(channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); expect_and_process_pending_htlcs(&nodes[1], false); let events = nodes[1].node.get_and_clear_pending_events(); @@ -906,7 +901,7 @@ fn test_monitor_update_raa_while_paused() { let id = PaymentId(our_payment_hash_1.0); nodes[0].node.send_payment_with_route(route, our_payment_hash_1, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let send_event_1 = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); @@ -916,13 +911,13 @@ fn test_monitor_update_raa_while_paused() { let id_2 = PaymentId(our_payment_hash_2.0); nodes[1].node.send_payment_with_route(route, our_payment_hash_2, onion_2, id_2).unwrap(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let send_event_2 = SendEvent::from_event(nodes[1].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(node_a_id, &send_event_1.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &send_event_1.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); @@ -930,37 +925,37 @@ fn test_monitor_update_raa_while_paused() { nodes[0].node.handle_update_add_htlc(node_b_id, &send_event_2.msgs[0]); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &send_event_2.commitment_msg); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); - let (latest_update, _) = get_latest_mon_update_id(&nodes[0], channel_id); + let (latest_update, _) = nodes[0].chain_monitor.get_latest_mon_update_id(channel_id); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); let as_update_raa = get_revoke_commit_msgs(&nodes[0], &node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_update_raa.0); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_cs = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_update_raa.1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_cs.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); expect_and_process_pending_htlcs(&nodes[0], false); expect_payment_claimable!(nodes[0], our_payment_hash_2, our_payment_secret_2, 1000000); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_second_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_and_process_pending_htlcs(&nodes[1], false); expect_payment_claimable!(nodes[1], our_payment_hash_1, our_payment_secret_1, 1000000); @@ -994,7 +989,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { &nodes[2], &[HTLCHandlingFailureType::Receive { payment_hash: payment_hash_1 }], ); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let updates = get_htlc_update_msgs(&nodes[2], &node_b_id); assert!(updates.update_add_htlcs.is_empty()); @@ -1007,7 +1002,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { let commitment = updates.commitment_signed; let bs_revoke_and_ack = commitment_signed_dance_return_raa(&nodes[1], &nodes[2], &commitment, false); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); // While the second channel is AwaitingRAA, forward a second payment to get it into the // holding cell. @@ -1016,7 +1011,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { let onion_2 = RecipientOnionFields::secret_only(payment_secret_2); let id_2 = PaymentId(payment_hash_2.0); nodes[0].node.send_payment_with_route(route, payment_hash_2, onion_2, id_2).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); @@ -1024,7 +1019,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { do_commitment_signed_dance(&nodes[1], &nodes[0], &send_event.commitment_msg, false, false); expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); // Now fail monitor updating. @@ -1033,7 +1028,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Forward a third payment which will also be added to the holding cell, despite the channel // being paused waiting a monitor update. @@ -1042,18 +1037,18 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { let onion_3 = RecipientOnionFields::secret_only(payment_secret_3); let id_3 = PaymentId(payment_hash_3.0); nodes[0].node.send_payment_with_route(route, payment_hash_3, onion_3, id_3).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); // We succeed in updating the monitor for the first channel send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); do_commitment_signed_dance(&nodes[1], &nodes[0], &send_event.commitment_msg, false, true); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); // Call forward_pending_htlcs and check that the new HTLC was simply added to the holding cell // and not forwarded. expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); let (payment_preimage_4, payment_hash_4) = if test_ignore_second_cs { @@ -1063,13 +1058,13 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { let onion_4 = RecipientOnionFields::secret_only(payment_secret_4); let id_4 = PaymentId(payment_hash_4.0); nodes[2].node.send_payment_with_route(route, payment_hash_4, onion_4, id_4).unwrap(); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); send_event = SendEvent::from_event(nodes[2].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(node_c_id, &send_event.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &send_event.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); (Some(payment_preimage_4), Some(payment_hash_4)) } else { @@ -1079,14 +1074,14 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { // Restore monitor updating, ensuring we immediately get a fail-back update and a // update_add update. chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = get_latest_mon_update_id(&nodes[1], chan_2.2); + let (latest_update, _) = nodes[1].chain_monitor.get_latest_mon_update_id(chan_2.2); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_2.2, latest_update); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); expect_and_process_pending_htlcs_and_htlc_handling_failed( &nodes[1], &[HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }], ); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut events_3 = nodes[1].node.get_and_clear_pending_msg_events(); if test_ignore_second_cs { @@ -1138,11 +1133,11 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { let as_cs; if test_ignore_second_cs { nodes[2].node.handle_commitment_signed_batch_test(node_b_id, &send_event_b.commitment_msg); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let bs_revoke_and_ack = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[2].node.handle_revoke_and_ack(node_b_id, &raa.unwrap()); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let bs_cs = get_htlc_update_msgs(&nodes[2], &node_b_id); assert!(bs_cs.update_add_htlcs.is_empty()); assert!(bs_cs.update_fail_htlcs.is_empty()); @@ -1151,14 +1146,14 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { assert!(bs_cs.update_fee.is_none()); nodes[1].node.handle_revoke_and_ack(node_c_id, &bs_revoke_and_ack); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); as_cs = get_htlc_update_msgs(&nodes[1], &node_c_id); nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &bs_cs.commitment_signed); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); } else { nodes[2].node.handle_commitment_signed_batch_test(node_b_id, &send_event_b.commitment_msg); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let bs_revoke_and_commit = nodes[2].node.get_and_clear_pending_msg_events(); // As both messages are for nodes[1], they're in order. @@ -1167,7 +1162,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { assert_eq!(*node_id, node_b_id); nodes[1].node.handle_revoke_and_ack(node_c_id, &msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); }, _ => panic!("Unexpected event"), } @@ -1185,7 +1180,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { nodes[1] .node .handle_commitment_signed_batch_test(node_c_id, &updates.commitment_signed); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); }, _ => panic!("Unexpected event"), } @@ -1200,23 +1195,23 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { nodes[2].node.handle_update_add_htlc(node_b_id, &as_cs.update_add_htlcs[0]); nodes[2].node.handle_commitment_signed_batch_test(node_b_id, &as_cs.commitment_signed); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let bs_second_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[2].node.handle_revoke_and_ack(node_b_id, &as_raa); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let bs_second_cs = get_htlc_update_msgs(&nodes[2], &node_b_id); nodes[1].node.handle_revoke_and_ack(node_c_id, &bs_second_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &bs_second_cs.commitment_signed); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let as_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_c_id); nodes[2].node.handle_revoke_and_ack(node_b_id, &as_second_raa); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty()); expect_and_process_pending_htlcs(&nodes[2], false); @@ -1238,7 +1233,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { if test_ignore_second_cs { expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); send_event = SendEvent::from_node(&nodes[1]); assert_eq!(send_event.node_id, node_a_id); @@ -1292,7 +1287,7 @@ fn test_monitor_update_fail_reestablish() { nodes[0].node.peer_disconnected(node_b_id); nodes[2].node.claim_funds(payment_preimage); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash, 1_000_000); let mut updates = get_htlc_update_msgs(&nodes[2], &node_b_id); @@ -1303,7 +1298,7 @@ fn test_monitor_update_fail_reestablish() { assert_eq!(updates.update_fulfill_htlcs.len(), 1); nodes[1].node.handle_update_fulfill_htlc(node_c_id, updates.update_fulfill_htlcs.remove(0)); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, false, false); @@ -1328,7 +1323,7 @@ fn test_monitor_update_fail_reestablish() { assert_eq!(as_channel_upd.contents.channel_flags & 2, 0); nodes[1].node.get_and_clear_pending_msg_events(); // Free the holding cell - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.peer_disconnected(node_a_id); nodes[0].node.peer_disconnected(node_b_id); @@ -1346,16 +1341,16 @@ fn test_monitor_update_fail_reestablish() { assert_eq!(as_channel_upd.contents.channel_flags & 2, 0); nodes[1].node.handle_channel_reestablish(node_a_id, &as_reestablish); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); // The "disabled" bit should be unset as we just reconnected let bs_channel_upd = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, node_a_id); assert_eq!(bs_channel_upd.contents.channel_flags & 2, 0); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = get_latest_mon_update_id(&nodes[1], chan_1.2); + let (latest_update, _) = nodes[1].chain_monitor.get_latest_mon_update_id(chan_1.2); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_1.2, latest_update); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); updates = get_htlc_update_msgs(&nodes[1], &node_a_id); assert!(updates.update_add_htlcs.is_empty()); @@ -1399,28 +1394,28 @@ fn raa_no_response_awaiting_raa_state() { let onion_1 = RecipientOnionFields::secret_only(payment_secret_1); let id_1 = PaymentId(payment_hash_1.0); nodes[0].node.send_payment_with_route(route.clone(), payment_hash_1, onion_1, id_1).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let onion_2 = RecipientOnionFields::secret_only(payment_secret_2); let id_2 = PaymentId(payment_hash_2.0); nodes[0].node.send_payment_with_route(route.clone(), payment_hash_2, onion_2, id_2).unwrap(); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_responses = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_responses.0); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_responses.1); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // Now we have a CS queued up which adds a new HTLC (which will need a RAA/CS response from @@ -1431,17 +1426,17 @@ fn raa_no_response_awaiting_raa_state() { nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); - let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); + let (latest_update, _) = nodes[1].chain_monitor.get_latest_mon_update_id(channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); // nodes[1] should be AwaitingRAA here! - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); let bs_responses = get_revoke_commit_msgs(&nodes[1], &node_a_id); expect_and_process_pending_htlcs(&nodes[1], false); expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 1000000); @@ -1452,39 +1447,39 @@ fn raa_no_response_awaiting_raa_state() { let onion_3 = RecipientOnionFields::secret_only(payment_secret_3); let id_3 = PaymentId(payment_hash_3.0); nodes[0].node.send_payment_with_route(route, payment_hash_3, onion_3, id_3).unwrap(); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_responses.0); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_responses.1); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); // Finally deliver the RAA to nodes[1] which results in a CS response to the last update nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_and_process_pending_htlcs(&nodes[1], false); expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000); let bs_update = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_update.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_and_process_pending_htlcs(&nodes[1], false); expect_payment_claimable!(nodes[1], payment_hash_3, payment_secret_3, 1000000); @@ -1519,7 +1514,7 @@ fn claim_while_disconnected_monitor_update_fail() { nodes[1].node.peer_disconnected(node_a_id); nodes[1].node.claim_funds(payment_preimage_1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); let init_msg = msgs::Init { @@ -1544,7 +1539,7 @@ fn claim_while_disconnected_monitor_update_fail() { nodes[1].node.handle_channel_reestablish(node_a_id, &as_reconnect); let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, node_a_id); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); // Send a second payment from A to B, resulting in a commitment update that gets swallowed with @@ -1554,12 +1549,12 @@ fn claim_while_disconnected_monitor_update_fail() { let onion_2 = RecipientOnionFields::secret_only(payment_secret_2); let id_2 = PaymentId(payment_hash_2.0); nodes[0].node.send_payment_with_route(route, payment_hash_2, onion_2, id_2).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_updates = get_htlc_update_msgs(&nodes[0], &node_b_id); nodes[1].node.handle_update_add_htlc(node_a_id, &as_updates.update_add_htlcs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_updates.commitment_signed); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); // Note that nodes[1] not updating monitor here is OK - it wont take action on the new HTLC // until we've channel_monitor_update'd and updated for the new commitment transaction. @@ -1567,9 +1562,9 @@ fn claim_while_disconnected_monitor_update_fail() { // Now un-fail the monitor, which will result in B sending its original commitment update, // receiving the commitment update from A, and the resulting commitment dances. chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); + let (latest_update, _) = nodes[1].chain_monitor.get_latest_mon_update_id(channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); let mut bs_msgs = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(bs_msgs.len(), 2); @@ -1583,11 +1578,11 @@ fn claim_while_disconnected_monitor_update_fail() { nodes[0] .node .handle_commitment_signed_batch_test(node_b_id, &updates.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); }, _ => panic!("Unexpected event"), } @@ -1596,7 +1591,7 @@ fn claim_while_disconnected_monitor_update_fail() { MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { assert_eq!(*node_id, node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, msg); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); }, _ => panic!("Unexpected event"), } @@ -1605,20 +1600,20 @@ fn claim_while_disconnected_monitor_update_fail() { let bs_commitment = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_commitment.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_commitment.commitment_signed); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_and_process_pending_htlcs(&nodes[1], false); expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); expect_payment_path_successful!(nodes[0]); claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); @@ -1661,7 +1656,7 @@ fn monitor_failed_no_reestablish_response() { let onion = RecipientOnionFields::secret_only(payment_secret_1); let id = PaymentId(payment_hash_1.0); nodes[0].node.send_payment_with_route(route, payment_hash_1, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -1670,7 +1665,7 @@ fn monitor_failed_no_reestablish_response() { nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Now disconnect and immediately reconnect, delivering the channel_reestablish while nodes[1] // is still failing to update monitors. @@ -1696,19 +1691,19 @@ fn monitor_failed_no_reestablish_response() { get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, node_b_id); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); + let (latest_update, _) = nodes[1].chain_monitor.get_latest_mon_update_id(channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); let bs_responses = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_responses.0); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_responses.1); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_and_process_pending_htlcs(&nodes[1], false); expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 1000000); @@ -1745,7 +1740,7 @@ fn first_message_on_recv_ordering() { let onion_1 = RecipientOnionFields::secret_only(payment_secret_1); let id_1 = PaymentId(payment_hash_1.0); nodes[0].node.send_payment_with_route(route, payment_hash_1, onion_1, id_1).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -1753,13 +1748,13 @@ fn first_message_on_recv_ordering() { assert_eq!(payment_event.node_id, node_b_id); nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_responses = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_responses.0); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_responses.1); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); @@ -1770,7 +1765,7 @@ fn first_message_on_recv_ordering() { let id_2 = PaymentId(payment_hash_2.0); nodes[0].node.send_payment_with_route(route, payment_hash_2, onion_2, id_2).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); @@ -1783,20 +1778,20 @@ fn first_message_on_recv_ordering() { // to the next message also tests resetting the delivery order. nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Now deliver the update_add_htlc/commitment_signed for the second payment, which does need an // RAA/CS response, which should be generated when we call channel_monitor_update (with the // appropriate HTLC acceptance). nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); + let (latest_update, _) = nodes[1].chain_monitor.get_latest_mon_update_id(channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); expect_and_process_pending_htlcs(&nodes[1], false); @@ -1804,13 +1799,13 @@ fn first_message_on_recv_ordering() { let bs_responses = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_responses.0); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_responses.1); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_and_process_pending_htlcs(&nodes[1], false); expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000); @@ -1850,7 +1845,7 @@ fn test_monitor_update_fail_claim() { nodes[1].node.claim_funds(payment_preimage_1); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Note that at this point there is a pending commitment transaction update for A being held by // B. Even when we go to send the payment from C through B to A, B will not update this @@ -1862,7 +1857,7 @@ fn test_monitor_update_fail_claim() { let onion_2 = RecipientOnionFields::secret_only(payment_secret_2); let id_2 = PaymentId(payment_hash_2.0); nodes[2].node.send_payment_with_route(route.clone(), payment_hash_2, onion_2, id_2).unwrap(); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); // Successfully update the monitor on the 1<->2 channel, but the 0<->1 channel should still be // paused, so forward shouldn't succeed until we call channel_monitor_updated(). @@ -1881,7 +1876,7 @@ fn test_monitor_update_fail_claim() { let id_3 = PaymentId(payment_hash_3.0); let onion_3 = RecipientOnionFields::secret_only(payment_secret_3); nodes[2].node.send_payment_with_route(route, payment_hash_3, onion_3, id_3).unwrap(); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let mut events = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -1893,10 +1888,10 @@ fn test_monitor_update_fail_claim() { // Now restore monitor updating on the 0<->1 channel and claim the funds on B. let channel_id = chan_1.2; - let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); + let (latest_update, _) = nodes[1].chain_monitor.get_latest_mon_update_id(channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); let mut bs_fulfill = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fulfill_htlc(node_b_id, bs_fulfill.update_fulfill_htlcs.remove(0)); @@ -1905,7 +1900,7 @@ fn test_monitor_update_fail_claim() { // Get the payment forwards, note that they were batched into one commitment update. nodes[1].node.process_pending_htlc_forwards(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_forward_update = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_add_htlc(node_b_id, &bs_forward_update.update_add_htlcs[0]); nodes[0].node.handle_update_add_htlc(node_b_id, &bs_forward_update.update_add_htlcs[1]); @@ -1994,7 +1989,7 @@ fn test_monitor_update_on_pending_forwards() { &nodes[2], &[HTLCHandlingFailureType::Receive { payment_hash: payment_hash_1 }], ); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let cs_fail_update = get_htlc_update_msgs(&nodes[2], &node_b_id); nodes[1].node.handle_update_fail_htlc(node_c_id, &cs_fail_update.update_fail_htlcs[0]); @@ -2006,7 +2001,7 @@ fn test_monitor_update_on_pending_forwards() { let onion = RecipientOnionFields::secret_only(payment_secret_2); let id = PaymentId(payment_hash_2.0); nodes[2].node.send_payment_with_route(route, payment_hash_2, onion, id).unwrap(); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let mut events = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -2019,12 +2014,12 @@ fn test_monitor_update_on_pending_forwards() { &nodes[1], &[HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }], ); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = get_latest_mon_update_id(&nodes[1], chan_1.2); + let (latest_update, _) = nodes[1].chain_monitor.get_latest_mon_update_id(chan_1.2); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_1.2, latest_update); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); let bs_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fail_htlc(node_b_id, &bs_updates.update_fail_htlcs[0]); @@ -2077,7 +2072,7 @@ fn monitor_update_claim_fail_no_response() { let onion = RecipientOnionFields::secret_only(payment_secret_2); let id = PaymentId(payment_hash_2.0); nodes[0].node.send_payment_with_route(route, payment_hash_2, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -2088,19 +2083,19 @@ fn monitor_update_claim_fail_no_response() { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.claim_funds(payment_preimage_1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); + let (latest_update, _) = nodes[1].chain_monitor.get_latest_mon_update_id(channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_and_process_pending_htlcs(&nodes[1], false); expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000); @@ -2128,10 +2123,8 @@ fn do_during_funding_monitor_fail( let node_b_id = nodes[1].node.get_our_node_id(); nodes[0].node.create_channel(node_b_id, 100000, 10001, 43, None, None).unwrap(); - nodes[1].node.handle_open_channel( - node_a_id, - &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id), - ); + let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + handle_and_accept_open_channel(&nodes[1], node_a_id, &open_channel_msg); nodes[0].node.handle_accept_channel( node_b_id, &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id), @@ -2144,7 +2137,7 @@ fn do_during_funding_monitor_fail( .node .funding_transaction_generated(temporary_channel_id, node_b_id, funding_tx.clone()) .unwrap(); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); let funding_created_msg = @@ -2154,20 +2147,20 @@ fn do_during_funding_monitor_fail( funding_created_msg.funding_output_index, ); nodes[1].node.handle_funding_created(node_a_id, &funding_created_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[0].node.handle_funding_signed( node_b_id, &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id), ); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = get_latest_mon_update_id(&nodes[0], channel_id); + let (latest_update, _) = nodes[0].chain_monitor.get_latest_mon_update_id(channel_id); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); expect_channel_pending_event(&nodes[0], &node_b_id); let events = nodes[0].node.get_and_clear_pending_events(); @@ -2220,9 +2213,9 @@ fn do_during_funding_monitor_fail( } chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); + let (latest_update, _) = nodes[1].chain_monitor.get_latest_mon_update_id(channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); let (channel_id, (announcement, as_update, bs_update)) = if !confirm_a_first { if !restore_b_before_lock { @@ -2326,7 +2319,7 @@ fn test_path_paused_mpp() { let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 2); + check_added_monitors(&nodes[0], 2); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); // Pass the first HTLC of the payment along to nodes[3]. @@ -2338,7 +2331,7 @@ fn test_path_paused_mpp() { // And check that, after we successfully update the monitor for chan_2 we can pass the second // HTLC along to nodes[3] and claim the whole payment back to nodes[0]. - let (latest_update, _) = get_latest_mon_update_id(&nodes[0], chan_2_id); + let (latest_update, _) = nodes[0].chain_monitor.get_latest_mon_update_id(chan_2_id); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_2_id, latest_update); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -2382,7 +2375,7 @@ fn test_pending_update_fee_ack_on_reconnect() { let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[1].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_initial_send_msgs = get_htlc_update_msgs(&nodes[1], &node_a_id); // bs_initial_send_msgs are not delivered until they are re-generated after reconnect @@ -2391,7 +2384,7 @@ fn test_pending_update_fee_ack_on_reconnect() { *feerate_lock *= 2; } nodes[0].node.timer_tick_occurred(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_update_fee_msgs = get_htlc_update_msgs(&nodes[0], &node_b_id); assert!(as_update_fee_msgs.update_fee.is_some()); @@ -2399,7 +2392,7 @@ fn test_pending_update_fee_ack_on_reconnect() { nodes[1] .node .handle_commitment_signed_batch_test(node_a_id, &as_update_fee_msgs.commitment_signed); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); // bs_first_raa is not delivered until it is re-generated after reconnect @@ -2441,33 +2434,33 @@ fn test_pending_update_fee_ack_on_reconnect() { nodes[0] .node .handle_commitment_signed_batch_test(node_b_id, &bs_initial_send_msgs.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[1].node.handle_revoke_and_ack( node_a_id, &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id), ); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_second_cs = get_htlc_update_msgs(&nodes[1], &node_a_id).commitment_signed; nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_first_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[1].node.handle_commitment_signed_batch_test( node_a_id, &get_htlc_update_msgs(&nodes[0], &node_b_id).commitment_signed, ); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_second_cs); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_third_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[1].node.handle_revoke_and_ack( node_a_id, &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id), ); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_and_process_pending_htlcs(&nodes[0], false); expect_payment_claimable!(nodes[0], payment_hash, payment_secret, 1_000_000); @@ -2504,13 +2497,13 @@ fn test_fail_htlc_on_broadcast_after_claim() { assert_eq!(bs_txn.len(), 1); nodes[2].node.claim_funds(payment_preimage); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash, 2000); let mut cs_updates = get_htlc_update_msgs(&nodes[2], &node_b_id); nodes[1].node.handle_update_fulfill_htlc(node_c_id, cs_updates.update_fulfill_htlcs.remove(0)); let mut bs_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); mine_transaction(&nodes[1], &bs_txn[0]); @@ -2518,7 +2511,7 @@ fn test_fail_htlc_on_broadcast_after_claim() { check_closed_event(&nodes[1], 1, reason, &[node_c_id], 100000); check_closed_broadcast!(nodes[1], true); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_and_process_pending_htlcs_and_htlc_handling_failed( &nodes[1], &[HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_id_2 }], @@ -2550,7 +2543,7 @@ fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) { *feerate_lock += 20; } nodes[0].node.timer_tick_occurred(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_msgs = get_htlc_update_msgs(&nodes[0], &node_b_id); assert!(update_msgs.update_fee.is_some()); if deliver_update { @@ -2602,38 +2595,38 @@ fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) { nodes[1] .node .handle_commitment_signed_batch_test(node_a_id, &update_msgs.commitment_signed); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_first_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_second_update = get_htlc_update_msgs(&nodes[0], &node_b_id); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_first_cs); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_update_fee(node_a_id, as_second_update.update_fee.as_ref().unwrap()); nodes[1] .node .handle_commitment_signed_batch_test(node_a_id, &as_second_update.commitment_signed); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_first_raa); let bs_second_cs = get_htlc_update_msgs(&nodes[1], &node_a_id); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0] .node .handle_commitment_signed_batch_test(node_b_id, &bs_second_cs.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_second_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); } else { let commitment = &update_msgs.commitment_signed; do_commitment_signed_dance(&nodes[1], &nodes[0], commitment, false, false); @@ -2697,29 +2690,29 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { let onion_1 = RecipientOnionFields::secret_only(payment_secret_1); let id_1 = PaymentId(payment_hash_1.0); nodes[0].node.send_payment_with_route(route.clone(), payment_hash_1, onion_1, id_1).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let send = SendEvent::from_node(&nodes[0]); assert_eq!(send.msgs.len(), 1); let onion_2 = RecipientOnionFields::secret_only(payment_secret_2); let id_2 = PaymentId(payment_hash_2.0); nodes[0].node.send_payment_with_route(route, payment_hash_2, onion_2, id_2).unwrap(); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode(); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[0].node.claim_funds(payment_preimage_0); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[1].node.handle_update_add_htlc(node_a_id, &send.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &send.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let (raa, cs) = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); if disconnect { // Optionally reload nodes[0] entirely through a serialization roundtrip, otherwise just @@ -2751,7 +2744,7 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { nodes[1].node.handle_channel_reestablish(node_a_id, &reestablish_1[0]); let resp_1 = handle_chan_reestablish_msgs!(nodes[1], nodes[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); nodes[0].node.handle_channel_reestablish(node_b_id, &reestablish_2[0]); let resp_0 = handle_chan_reestablish_msgs!(nodes[0], nodes[1]); @@ -2784,22 +2777,18 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { } // If we finish updating the monitor, we should free the holding cell right away (this did - // not occur prior to #756). + // not occur prior to #756). This should result in a new monitor update. chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (mon_id, _) = get_latest_mon_update_id(&nodes[0], chan_id); + let (mon_id, _) = nodes[0].chain_monitor.get_latest_mon_update_id(chan_id); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_id, mon_id); expect_payment_claimed!(nodes[0], payment_hash_0, 100_000); - - // New outbound messages should be generated immediately upon a call to - // get_and_clear_pending_msg_events (but not before). - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); - check_added_monitors!(nodes[0], 1); assert_eq!(events.len(), 1); // Deliver the pending in-flight CS nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &cs); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let commitment_msg = match events.pop().unwrap() { MessageSendEvent::UpdateHTLCs { node_id, channel_id: _, mut updates } => { @@ -2819,13 +2808,13 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { }; nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); expect_and_process_pending_htlcs(&nodes[1], false); expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 100000); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(commitment_signed_dance_through_cp_raa(&nodes[1], &nodes[0], false, false).is_none()); let events = nodes[1].node.get_and_clear_pending_events(); @@ -2885,19 +2874,19 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f let onion_2 = RecipientOnionFields::secret_only(second_payment_secret); let id_2 = PaymentId(second_payment_hash.0); nodes[0].node.send_payment_with_route(route, second_payment_hash, onion_2, id_2).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &send_event.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let (bs_raa, bs_cs) = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_cs); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); as_raa = Some(get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id)); } @@ -2914,13 +2903,13 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f &nodes[2], &[HTLCHandlingFailureType::Receive { payment_hash }], ); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); get_htlc_update_msgs(&nodes[2], &node_b_id); // Note that we don't populate fulfill_msg.attribution_data here, which will lead to hold times being // unavailable. } else { nodes[2].node.claim_funds(payment_preimage); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash, 100_000); let cs_updates = get_htlc_update_msgs(&nodes[2], &node_b_id); @@ -2937,7 +2926,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f } nodes[1].node.handle_update_fulfill_htlc(node_c_id, fulfill_msg); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut bs_updates = None; if htlc_status != HTLCStatusAtDupClaim::HoldingCell { @@ -2976,7 +2965,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f if htlc_status == HTLCStatusAtDupClaim::HoldingCell { nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa.unwrap()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_htlc_failure_conditions(nodes[1].node.get_and_clear_pending_events(), &[]); // We finally receive the second payment, but don't claim it bs_updates = Some(get_htlc_update_msgs(&nodes[1], &node_a_id)); @@ -3029,20 +3018,20 @@ fn test_temporary_error_during_shutdown() { node_a_id, &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, node_b_id), ); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[0].node.handle_shutdown( node_b_id, &get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, node_a_id), ); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = get_latest_mon_update_id(&nodes[0], channel_id); + let (latest_update, _) = nodes[0].chain_monitor.get_latest_mon_update_id(channel_id); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); nodes[1].node.handle_closing_signed( node_a_id, @@ -3052,7 +3041,7 @@ fn test_temporary_error_during_shutdown() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); + let (latest_update, _) = nodes[1].chain_monitor.get_latest_mon_update_id(channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); nodes[0].node.handle_closing_signed( @@ -3097,20 +3086,20 @@ fn double_temp_error() { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); // `claim_funds` results in a ChannelMonitorUpdate. nodes[1].node.claim_funds(payment_preimage_1); - check_added_monitors!(nodes[1], 1); - let (latest_update_1, _) = get_latest_mon_update_id(&nodes[1], channel_id); + check_added_monitors(&nodes[1], 1); + let (latest_update_1, _) = nodes[1].chain_monitor.get_latest_mon_update_id(channel_id); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); // Previously, this would've panicked due to a double-call to `Channel::monitor_update_failed`, // which had some asserts that prevented it from being called twice. nodes[1].node.claim_funds(payment_preimage_2); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update_2, _) = get_latest_mon_update_id(&nodes[1], channel_id); + let (latest_update_2, _) = nodes[1].chain_monitor.get_latest_mon_update_id(channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update_1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update_2); // Complete the first HTLC. Note that as a side-effect we handle the monitor update completions @@ -3160,18 +3149,18 @@ fn double_temp_error() { }; assert_eq!(node_id, node_a_id); nodes[0].node.handle_update_fulfill_htlc(node_b_id, update_fulfill_1); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &commitment_signed_b1); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.process_pending_htlc_forwards(); let (raa_a1, commitment_signed_a1) = get_revoke_commit_msgs(&nodes[0], &node_b_id); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].node.handle_revoke_and_ack(node_a_id, &raa_a1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &commitment_signed_a1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Complete the second HTLC. let ((update_fulfill_2, commitment_signed_b2), raa_b2) = { @@ -3200,11 +3189,11 @@ fn double_temp_error() { ) }; nodes[0].node.handle_revoke_and_ack(node_b_id, &raa_b2); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); expect_payment_path_successful!(nodes[0]); nodes[0].node.handle_update_fulfill_htlc(node_b_id, update_fulfill_2); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); do_commitment_signed_dance(&nodes[0], &nodes[1], &commitment_signed_b2, false, false); expect_payment_sent!(nodes[0], payment_preimage_2); @@ -3221,7 +3210,6 @@ fn do_test_outbound_reload_without_init_mon(use_0conf: bool) { let new_chain_monitor; let mut chan_config = test_default_channel_config(); - chan_config.manually_accept_inbound_channels = true; chan_config.channel_handshake_limits.trust_own_funding_0conf = true; let node_chanmgrs = @@ -3267,12 +3255,12 @@ fn do_test_outbound_reload_without_init_mon(use_0conf: bool) { .node .funding_transaction_generated(temporary_channel_id, node_b_id, funding_tx.clone()) .unwrap(); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); nodes[1].node.handle_funding_created(node_a_id, &funding_created_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_channel_pending_event(&nodes[1], &node_a_id); let bs_signed_locked = nodes[1].node.get_and_clear_pending_msg_events(); @@ -3282,7 +3270,7 @@ fn do_test_outbound_reload_without_init_mon(use_0conf: bool) { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[0].node.handle_funding_signed(node_b_id, &msg); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); }, _ => panic!("Unexpected event"), } @@ -3331,7 +3319,6 @@ fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: boo let new_chain_monitor; let mut chan_config = test_default_channel_config(); - chan_config.manually_accept_inbound_channels = true; chan_config.channel_handshake_limits.trust_own_funding_0conf = true; let node_chanmgrs = @@ -3377,13 +3364,13 @@ fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: boo .node .funding_transaction_generated(temporary_channel_id, node_b_id, funding_tx.clone()) .unwrap(); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.handle_funding_created(node_a_id, &funding_created_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // nodes[1] happily sends its funding_signed even though its awaiting the persistence of the // initial ChannelMonitor, but it will decline to send its channel_ready even if the funding @@ -3392,7 +3379,7 @@ fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: boo get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id); nodes[0].node.handle_funding_signed(node_b_id, &funding_signed_msg); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); expect_channel_pending_event(&nodes[0], &node_b_id); let as_funding_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); @@ -3514,7 +3501,7 @@ fn do_test_blocked_chan_preimage_release(completion_mode: BlockedUpdateComplMode reconnect_nodes(a_b_reconnect); reconnect_nodes(ReconnectArgs::new(&nodes[2], &nodes[1])); } else if completion_mode == BlockedUpdateComplMode::Async { - let (latest_update, _) = get_latest_mon_update_id(&nodes[1], chan_id_2); + let (latest_update, _) = nodes[1].chain_monitor.get_latest_mon_update_id(chan_id_2); nodes[1] .chain_monitor .chain_monitor @@ -3532,8 +3519,9 @@ fn do_test_blocked_chan_preimage_release(completion_mode: BlockedUpdateComplMode .node .handle_commitment_signed_batch_test(node_a_id, &as_htlc_fulfill.commitment_signed); check_added_monitors(&nodes[1], 1); - let (a, raa) = do_main_commitment_signed_dance(&nodes[1], &nodes[0], false); + let (a, raa, holding_cell) = do_main_commitment_signed_dance(&nodes[1], &nodes[0], false); assert!(a.is_none()); + assert!(holding_cell.is_empty()); nodes[1].node.handle_revoke_and_ack(node_a_id, &raa); check_added_monitors(&nodes[1], 1); @@ -3556,12 +3544,10 @@ fn do_test_blocked_chan_preimage_release(completion_mode: BlockedUpdateComplMode } // The event processing should release the last RAA update. - check_added_monitors(&nodes[1], 1); - - // When we fetch the next update the message getter will generate the next update for nodes[2], - // generating a further monitor update. + // It should also generate the next update for nodes[2]. + check_added_monitors(&nodes[1], 2); let mut bs_htlc_fulfill = get_htlc_update_msgs(&nodes[1], &node_c_id); - check_added_monitors(&nodes[1], 1); + check_added_monitors(&nodes[1], 0); nodes[2] .node @@ -3694,7 +3680,7 @@ fn do_test_inverted_mon_completion_order( // (Finally) complete the A <-> B ChannelMonitorUpdate, ensuring the preimage is durably on // disk in the proper ChannelMonitor, unblocking the B <-> C ChannelMonitor updating // process. - let (_, ab_update_id) = get_latest_mon_update_id(&nodes[1], chan_id_ab); + let (_, ab_update_id) = nodes[1].chain_monitor.get_latest_mon_update_id(chan_id_ab); nodes[1] .chain_monitor .chain_monitor @@ -3727,7 +3713,7 @@ fn do_test_inverted_mon_completion_order( // ChannelMonitorUpdate hasn't yet completed. reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); - let (_, ab_update_id) = get_latest_mon_update_id(&nodes[1], chan_id_ab); + let (_, ab_update_id) = nodes[1].chain_monitor.get_latest_mon_update_id(chan_id_ab); nodes[1] .chain_monitor .chain_monitor @@ -3788,7 +3774,12 @@ fn do_test_durable_preimages_on_closed_channel( let chain_mon; let node_b_reload; - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs( + 3, + &node_cfgs, + &[Some(legacy_cfg.clone()), Some(legacy_cfg.clone()), None], + ); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -3940,7 +3931,7 @@ fn do_test_durable_preimages_on_closed_channel( // Once the blocked `ChannelMonitorUpdate` *finally* completes, the pending // `PaymentForwarded` event will finally be released. - let (_, ab_update_id) = get_latest_mon_update_id(&nodes[1], chan_id_ab); + let (_, ab_update_id) = nodes[1].chain_monitor.get_latest_mon_update_id(chan_id_ab); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_id_ab, ab_update_id); // If the A<->B channel was closed before we reload, we'll replay the claim against it on @@ -3948,7 +3939,12 @@ fn do_test_durable_preimages_on_closed_channel( let evs = nodes[1].node.get_and_clear_pending_events(); assert_eq!(evs.len(), if close_chans_before_reload { 2 } else { 1 }); for ev in evs { - if let Event::PaymentForwarded { .. } = ev { + if let Event::PaymentForwarded { claim_from_onchain_tx, next_user_channel_id, .. } = ev { + if !claim_from_onchain_tx { + // If the outbound channel is still open, the `next_user_channel_id` should be available. + // This was previously broken. + assert!(next_user_channel_id.is_some()) + } } else { panic!(); } @@ -3983,7 +3979,8 @@ fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) { let chain_mon; let node_b_reload; - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(legacy_cfg), None, None]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); let node_b_id = nodes[1].node.get_our_node_id(); @@ -4045,14 +4042,14 @@ fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) { message: msg.clone(), }; nodes[0].node.force_close_broadcasting_latest_txn(&chan_id_ab, &node_b_id, msg).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_closed_broadcast!(nodes[0], true); check_closed_event(&nodes[0], 1, reason, &[node_b_id], 100_000); let as_closing_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); mine_transaction_without_consistency_checks(&nodes[1], &as_closing_tx[0]); } - let (_, bc_update_id) = get_latest_mon_update_id(&nodes[1], chan_id_bc); + let (_, bc_update_id) = nodes[1].chain_monitor.get_latest_mon_update_id(chan_id_bc); let mut events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), if close_during_reload { 2 } else { 1 }); expect_payment_forwarded( @@ -4077,7 +4074,7 @@ fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) { // Once we run event processing the monitor should free, check that it was indeed the B<->C // channel which was updated. check_added_monitors(&nodes[1], if close_during_reload { 2 } else { 1 }); - let (_, post_ev_bc_update_id) = get_latest_mon_update_id(&nodes[1], chan_id_bc); + let (_, post_ev_bc_update_id) = nodes[1].chain_monitor.get_latest_mon_update_id(chan_id_bc); assert!(bc_update_id != post_ev_bc_update_id); // Finally, check that there's nothing left to do on B<->C reconnect and the channel operates @@ -4167,7 +4164,7 @@ fn do_test_glacial_peer_cant_hang(hold_chan_a: bool) { // ...but once we complete the A<->B channel preimage persistence, the B<->C channel // unlocks and we send both peers commitment updates. - let (ab_update_id, _) = get_latest_mon_update_id(&nodes[1], chan_id_ab); + let (ab_update_id, _) = nodes[1].chain_monitor.get_latest_mon_update_id(chan_id_ab); assert!(nodes[1] .chain_monitor .chain_monitor @@ -4471,7 +4468,9 @@ fn test_claim_to_closed_channel_blocks_forwarded_preimage_removal() { // This tests that behavior. let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(3, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg), None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -4492,7 +4491,7 @@ fn test_claim_to_closed_channel_blocks_forwarded_preimage_removal() { .node .force_close_broadcasting_latest_txn(&chan_a.2, &node_b_id, message.clone()) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let a_reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, a_reason, &[node_b_id], 1000000); check_closed_broadcast!(nodes[0], true); @@ -4502,20 +4501,20 @@ fn test_claim_to_closed_channel_blocks_forwarded_preimage_removal() { mine_transaction(&nodes[1], &as_commit_tx[0]); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let b_reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[1], 1, b_reason, &[node_a_id], 1000000); // Now that B has a pending forwarded payment across it with the inbound edge on-chain, claim // the payment on C and give B the preimage for it. nodes[2].node.claim_funds(payment_preimage); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash, 1_000_000); let mut updates = get_htlc_update_msgs(&nodes[2], &node_b_id); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.handle_update_fulfill_htlc(node_c_id, updates.update_fulfill_htlcs.remove(0)); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, false, false); // At this point nodes[1] has the preimage and is waiting for the `ChannelMonitorUpdate` for @@ -4530,13 +4529,13 @@ fn test_claim_to_closed_channel_blocks_forwarded_preimage_removal() { // background events (via `get_and_clear_pending_msg_events`), the final `ChannelMonitorUpdate` // will fly and we'll drop the preimage from channel B's `ChannelMonitor`. We'll also release // the `Event::PaymentForwarded`. - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); nodes[1].chain_monitor.complete_sole_pending_chan_update(&chan_a.2); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(!get_monitor!(nodes[1], chan_b.2) .get_all_current_outbound_htlcs() .iter() @@ -4552,7 +4551,8 @@ fn test_claim_to_closed_channel_blocks_claimed_event() { // This tests that behavior. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg), None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -4569,7 +4569,7 @@ fn test_claim_to_closed_channel_blocks_claimed_event() { .node .force_close_broadcasting_latest_txn(&chan_a.2, &node_b_id, message.clone()) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let a_reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, a_reason, &[node_b_id], 1000000); check_closed_broadcast!(nodes[0], true); @@ -4579,7 +4579,7 @@ fn test_claim_to_closed_channel_blocks_claimed_event() { mine_transaction(&nodes[1], &as_commit_tx[0]); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let b_reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[1], 1, b_reason, &[node_a_id], 1000000); @@ -4588,7 +4588,7 @@ fn test_claim_to_closed_channel_blocks_claimed_event() { // `Event::PaymentClaimed` from being generated. chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.claim_funds(payment_preimage); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); // Once we complete the `ChannelMonitorUpdate` the `Event::PaymentClaimed` will become @@ -4720,6 +4720,9 @@ fn test_single_channel_multiple_mpp() { } have_event = true; } + if !have_event { + std::thread::yield_now(); + } } }); @@ -5127,7 +5130,7 @@ fn test_mpp_claim_to_holding_cell() { check_added_monitors(&nodes[3], 2); // Complete the B <-> D monitor update, freeing the first fulfill. - let (latest_id, _) = get_latest_mon_update_id(&nodes[3], chan_3_id); + let (latest_id, _) = nodes[3].chain_monitor.get_latest_mon_update_id(chan_3_id); nodes[3].chain_monitor.chain_monitor.channel_monitor_updated(chan_3_id, latest_id).unwrap(); let mut b_claim = get_htlc_update_msgs(&nodes[3], &node_b_id); @@ -5138,17 +5141,16 @@ fn test_mpp_claim_to_holding_cell() { // Finally, complete the C <-> D monitor update. Previously, this unlock failed to be processed // due to the existence of the blocked RAA update above. - let (latest_id, _) = get_latest_mon_update_id(&nodes[3], chan_4_id); + let (latest_id, _) = nodes[3].chain_monitor.get_latest_mon_update_id(chan_4_id); nodes[3].chain_monitor.chain_monitor.channel_monitor_updated(chan_4_id, latest_id).unwrap(); // Once we process monitor events (in this case by checking for the `PaymentClaimed` event, the // RAA monitor update blocked above will be released. + // At the same time, the RAA monitor update completion will allow the C <-> D channel to + // generate its fulfill update. expect_payment_claimed!(nodes[3], paymnt_hash_1, 500_000); - check_added_monitors(&nodes[3], 1); - - // After the RAA monitor update completes, the C <-> D channel will be able to generate its - // fulfill updates as well. + check_added_monitors(&nodes[3], 2); let mut c_claim = get_htlc_update_msgs(&nodes[3], &node_c_id); - check_added_monitors(&nodes[3], 1); + check_added_monitors(&nodes[3], 0); // Finally, clear all the pending payments. let path = [&[&nodes[1], &nodes[3]][..], &[&nodes[2], &nodes[3]][..]]; diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 5b4ac4c0aa5..48b52992953 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -28,7 +28,7 @@ use bitcoin::{secp256k1, sighash, FeeRate, Sequence, TxIn}; use crate::blinded_path::message::BlindedMessagePath; use crate::chain::chaininterface::{ - fee_for_weight, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator, + fee_for_weight, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator, TransactionType, }; use crate::chain::channelmonitor::{ ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, CommitmentHTLCData, @@ -50,10 +50,10 @@ use crate::ln::channel_state::{ OutboundHTLCDetails, OutboundHTLCStateDetails, }; use crate::ln::channelmanager::{ - self, ChannelReadyOrder, FundingConfirmedMessage, HTLCFailureMsg, HTLCSource, - OpenChannelMessage, PaymentClaimDetails, PendingHTLCInfo, PendingHTLCStatus, - RAACommitmentOrder, SentHTLCId, BREAKDOWN_TIMEOUT, MAX_LOCAL_BREAKDOWN_TIMEOUT, - MIN_CLTV_EXPIRY_DELTA, + self, BlindedFailure, ChannelReadyOrder, FundingConfirmedMessage, HTLCFailureMsg, + HTLCPreviousHopData, HTLCSource, OpenChannelMessage, PaymentClaimDetails, PendingHTLCInfo, + PendingHTLCStatus, RAACommitmentOrder, SentHTLCId, BREAKDOWN_TIMEOUT, + MAX_LOCAL_BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, }; use crate::ln::funding::{FundingTxInput, SpliceContribution}; use crate::ln::interactivetxs::{ @@ -85,6 +85,7 @@ use crate::util::errors::APIError; use crate::util::logger::{Logger, Record, WithContext}; use crate::util::scid_utils::{block_from_scid, scid_from_parts}; use crate::util::ser::{Readable, ReadableArgs, RequiredWrapper, Writeable, Writer}; +use crate::{impl_readable_for_vec, impl_writeable_for_vec}; use alloc::collections::{btree_map, BTreeMap}; @@ -93,7 +94,6 @@ use crate::prelude::*; use crate::sign::type_resolver::ChannelSignerType; #[cfg(any(test, fuzzing, debug_assertions))] use crate::sync::Mutex; -use core::ops::Deref; use core::time::Duration; use core::{cmp, fmt, mem}; @@ -211,7 +211,14 @@ enum InboundHTLCState { /// channel (before it can then get forwarded and/or removed). /// Implies AwaitingRemoteRevoke. AwaitingAnnouncedRemoteRevoke(InboundHTLCResolution), - Committed, + /// An HTLC irrevocably committed in the latest commitment transaction, ready to be forwarded or + /// removed. + Committed { + /// Used to rebuild `ChannelManager` HTLC state on restart. Previously the manager would track + /// and persist all HTLC forwards and receives itself, but newer LDK versions avoid relying on + /// its persistence and instead reconstruct state based on `Channel` and `ChannelMonitor` data. + update_add_htlc: InboundUpdateAdd, + }, /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we /// created it we would have put it in the holding cell instead). When they next revoke_and_ack /// we'll drop it. @@ -235,7 +242,7 @@ impl From<&InboundHTLCState> for Option { InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => { Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToAdd) }, - InboundHTLCState::Committed => Some(InboundHTLCStateDetails::Committed), + InboundHTLCState::Committed { .. } => Some(InboundHTLCStateDetails::Committed), InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(_)) => { Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail) }, @@ -256,7 +263,7 @@ impl fmt::Display for InboundHTLCState { InboundHTLCState::RemoteAnnounced(_) => write!(f, "RemoteAnnounced"), InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => write!(f, "AwaitingRemoteRevokeToAnnounce"), InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => write!(f, "AwaitingAnnouncedRemoteRevoke"), - InboundHTLCState::Committed => write!(f, "Committed"), + InboundHTLCState::Committed { .. } => write!(f, "Committed"), InboundHTLCState::LocalRemoved(_) => write!(f, "LocalRemoved"), } } @@ -268,7 +275,7 @@ impl InboundHTLCState { InboundHTLCState::RemoteAnnounced(_) => !generated_by_local, InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => !generated_by_local, InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => true, - InboundHTLCState::Committed => true, + InboundHTLCState::Committed { .. } => true, InboundHTLCState::LocalRemoved(_) => !generated_by_local, } } @@ -296,11 +303,81 @@ impl InboundHTLCState { }, InboundHTLCResolution::Resolved { .. } => false, }, - InboundHTLCState::Committed | InboundHTLCState::LocalRemoved(_) => false, + InboundHTLCState::Committed { .. } | InboundHTLCState::LocalRemoved(_) => false, } } } +/// Information about the outbound hop for a forwarded HTLC. Useful for generating an accurate +/// [`Event::PaymentForwarded`] if we need to claim this HTLC post-restart. +/// +/// [`Event::PaymentForwarded`]: crate::events::Event::PaymentForwarded +#[derive(Debug, Copy, Clone)] +pub(super) struct OutboundHop { + /// The amount forwarded outbound. + pub(super) amt_msat: u64, + /// The outbound channel this HTLC was forwarded over. + pub(super) channel_id: ChannelId, + /// The next-hop recipient of this HTLC. + pub(super) node_id: PublicKey, + /// The outbound channel's funding outpoint. + pub(super) funding_txo: OutPoint, + /// The outbound channel's user channel ID. + pub(super) user_channel_id: u128, +} + +impl_writeable_tlv_based!(OutboundHop, { + (0, amt_msat, required), + (2, channel_id, required), + (4, node_id, required), + (6, funding_txo, required), + (8, user_channel_id, required), +}); + +/// A field of `InboundHTLCState::Committed` containing the HTLC's `update_add_htlc` message. If +/// the HTLC is a forward and gets irrevocably committed to the outbound edge, we convert to +/// `InboundUpdateAdd::Forwarded`, thus pruning the onion and not persisting it on every +/// `ChannelManager` persist. +/// +/// Useful for reconstructing the pending HTLC set on startup. +#[derive(Debug, Clone)] +enum InboundUpdateAdd { + /// The inbound committed HTLC's update_add_htlc message. + WithOnion { update_add_htlc: msgs::UpdateAddHTLC }, + /// This inbound HTLC is a forward that was irrevocably committed to the outbound edge, allowing + /// its onion to be pruned and no longer persisted. + /// + /// Contains data that is useful if we need to fail or claim this HTLC backwards after a restart + /// and it's missing in the outbound edge. + Forwarded { + incoming_packet_shared_secret: [u8; 32], + phantom_shared_secret: Option<[u8; 32]>, + trampoline_shared_secret: Option<[u8; 32]>, + blinded_failure: Option, + outbound_hop: OutboundHop, + }, + /// This HTLC was received pre-LDK 0.3, before we started persisting the onion for inbound + /// committed HTLCs. + Legacy, +} + +impl_writeable_tlv_based_enum_upgradable!(InboundUpdateAdd, + (0, WithOnion) => { + (0, update_add_htlc, required), + }, + (2, Legacy) => {}, + (4, Forwarded) => { + (0, incoming_packet_shared_secret, required), + (2, outbound_hop, required), + (4, phantom_shared_secret, option), + (6, trampoline_shared_secret, option), + (8, blinded_failure, option), + }, +); + +impl_writeable_for_vec!(&InboundUpdateAdd); +impl_readable_for_vec!(InboundUpdateAdd); + #[cfg_attr(test, derive(Debug))] struct InboundHTLCOutput { htlc_id: u64, @@ -444,6 +521,7 @@ struct OutboundHTLCOutput { skimmed_fee_msat: Option, send_timestamp: Option, hold_htlc: Option<()>, + accountable: bool, } /// See AwaitingRemoteRevoke ChannelState for more info @@ -462,6 +540,7 @@ enum HTLCUpdateAwaitingACK { skimmed_fee_msat: Option, blinding_point: Option, hold_htlc: Option<()>, + accountable: bool, }, ClaimHTLC { payment_preimage: PaymentPreimage, @@ -976,20 +1055,14 @@ impl ChannelError { } } -pub(super) struct WithChannelContext<'a, L: Deref> -where - L::Target: Logger, -{ +pub(super) struct WithChannelContext<'a, L: Logger> { pub logger: &'a L, pub peer_id: Option, pub channel_id: Option, pub payment_hash: Option, } -impl<'a, L: Deref> Logger for WithChannelContext<'a, L> -where - L::Target: Logger, -{ +impl<'a, L: Logger> Logger for WithChannelContext<'a, L> { fn log(&self, mut record: Record) { record.peer_id = self.peer_id; record.channel_id = self.channel_id; @@ -998,16 +1071,10 @@ where } } -impl<'a, 'b, L: Deref> WithChannelContext<'a, L> -where - L::Target: Logger, -{ - pub(super) fn from( +impl<'a, 'b, L: Logger> WithChannelContext<'a, L> { + pub(super) fn from( logger: &'a L, context: &'b ChannelContext, payment_hash: Option, - ) -> Self - where - S::Target: SignerProvider, - { + ) -> Self { WithChannelContext { logger, peer_id: Some(context.counterparty_node_id), @@ -1141,17 +1208,24 @@ pub enum UpdateFulfillCommitFetch { /// The return value of `monitor_updating_restored` pub(super) struct MonitorRestoreUpdates { pub raa: Option, + /// A `CommitmentUpdate` to be sent to our channel peer. pub commitment_update: Option, pub commitment_order: RAACommitmentOrder, pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>, pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, pub finalized_claimed_htlcs: Vec<(HTLCSource, Option)>, + /// Inbound update_adds that are now irrevocably committed to this channel and are ready for the + /// onion to be processed in order to forward or receive the HTLC. pub pending_update_adds: Vec, pub funding_broadcastable: Option, pub channel_ready: Option, pub channel_ready_order: ChannelReadyOrder, pub announcement_sigs: Option, pub tx_signatures: Option, + /// The sources of outbound HTLCs that were forwarded and irrevocably committed on this channel + /// (the outbound edge), along with their outbound amounts. Useful to store in the inbound HTLC + /// to ensure it gets resolved. + pub committed_outbound_htlc_sources: Vec<(HTLCPreviousHopData, u64)>, } /// The return value of `signer_maybe_unblocked` @@ -1162,6 +1236,8 @@ pub(super) struct SignerResumeUpdates { pub accept_channel: Option, pub funding_created: Option, pub funding_signed: Option, + pub funding_commit_sig: Option, + pub tx_signatures: Option, pub channel_ready: Option, pub order: RAACommitmentOrder, pub closing_signed: Option, @@ -1242,9 +1318,7 @@ struct HolderCommitmentPoint { impl HolderCommitmentPoint { #[rustfmt::skip] - pub fn new(signer: &ChannelSignerType, secp_ctx: &Secp256k1) -> Option - where SP::Target: SignerProvider - { + pub fn new(signer: &ChannelSignerType, secp_ctx: &Secp256k1) -> Option { Some(HolderCommitmentPoint { next_transaction_number: INITIAL_COMMITMENT_NUMBER, previous_revoked_point: None, @@ -1285,12 +1359,9 @@ impl HolderCommitmentPoint { /// If we are pending advancing the next commitment point, this method tries asking the signer /// again. - pub fn try_resolve_pending( + pub fn try_resolve_pending( &mut self, signer: &ChannelSignerType, secp_ctx: &Secp256k1, logger: &L, - ) where - SP::Target: SignerProvider, - L::Target: Logger, - { + ) { if !self.can_advance() { let pending_next_point = signer .as_ref() @@ -1322,13 +1393,9 @@ impl HolderCommitmentPoint { /// /// If our signer is ready to provide the next commitment point, the next call to `advance` will /// succeed. - pub fn advance( + pub fn advance( &mut self, signer: &ChannelSignerType, secp_ctx: &Secp256k1, logger: &L, - ) -> Result<(), ()> - where - SP::Target: SignerProvider, - L::Target: Logger, - { + ) -> Result<(), ()> { if let Some(next_point) = self.pending_next_point { *self = Self { next_transaction_number: self.next_transaction_number - 1, @@ -1444,19 +1511,13 @@ impl_writeable_tlv_based!(PendingChannelMonitorUpdate, { /// A payment channel with a counterparty throughout its life-cycle, encapsulating negotiation and /// funding phases. -pub(super) struct Channel -where - SP::Target: SignerProvider, -{ +pub(super) struct Channel { phase: ChannelPhase, } /// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of /// its variants containing an appropriate channel struct. -enum ChannelPhase -where - SP::Target: SignerProvider, -{ +enum ChannelPhase { Undefined, UnfundedOutboundV1(OutboundV1Channel), UnfundedInboundV1(InboundV1Channel), @@ -1464,10 +1525,9 @@ where Funded(FundedChannel), } -impl Channel +impl Channel where - SP::Target: SignerProvider, - ::EcdsaSigner: ChannelSigner, + SP::EcdsaSigner: ChannelSigner, { pub fn context(&self) -> &ChannelContext { match &self.phase { @@ -1610,9 +1670,9 @@ where } #[rustfmt::skip] - pub fn signer_maybe_unblocked( + pub fn signer_maybe_unblocked( &mut self, chain_hash: ChainHash, logger: &L, path_for_release_htlc: CBP - ) -> Result, ChannelError> where L::Target: Logger, CBP: Fn(u64) -> BlindedMessagePath { + ) -> Result, ChannelError> where CBP: Fn(u64) -> BlindedMessagePath { match &mut self.phase { ChannelPhase::Undefined => unreachable!(), ChannelPhase::Funded(chan) => chan.signer_maybe_unblocked(logger, path_for_release_htlc).map(|r| Some(r)), @@ -1625,6 +1685,8 @@ where accept_channel: None, funding_created, funding_signed: None, + funding_commit_sig: None, + tx_signatures: None, channel_ready: None, order: chan.context.resend_order.clone(), closing_signed: None, @@ -1641,6 +1703,8 @@ where accept_channel, funding_created: None, funding_signed: None, + funding_commit_sig: None, + tx_signatures: None, channel_ready: None, order: chan.context.resend_order.clone(), closing_signed: None, @@ -1655,10 +1719,7 @@ where /// Should be called when the peer is disconnected. Returns true if the channel can be resumed /// when the peer reconnects (via [`Self::peer_connected_get_handshake`]). If not, the channel /// must be immediately closed. - pub fn peer_disconnected_is_resumable(&mut self, logger: &L) -> DisconnectResult - where - L::Target: Logger, - { + pub fn peer_disconnected_is_resumable(&mut self, logger: &L) -> DisconnectResult { let is_resumable = match &mut self.phase { ChannelPhase::Undefined => unreachable!(), ChannelPhase::Funded(chan) => { @@ -1712,9 +1773,9 @@ where /// Should be called when the peer re-connects, returning an initial message which we should /// send our peer to begin the channel reconnection process. #[rustfmt::skip] - pub fn peer_connected_get_handshake( + pub fn peer_connected_get_handshake( &mut self, chain_hash: ChainHash, logger: &L, - ) -> ReconnectionMsg where L::Target: Logger { + ) -> ReconnectionMsg { match &mut self.phase { ChannelPhase::Undefined => unreachable!(), ChannelPhase::Funded(chan) => @@ -1748,14 +1809,10 @@ where } #[rustfmt::skip] - pub fn maybe_handle_error_without_close( + pub fn maybe_handle_error_without_close( &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, user_config: &UserConfig, their_features: &InitFeatures, - ) -> Result, ()> - where - F::Target: FeeEstimator, - L::Target: Logger, - { + ) -> Result, ()> { match &mut self.phase { ChannelPhase::Undefined => unreachable!(), ChannelPhase::Funded(_) => Ok(None), @@ -1788,12 +1845,9 @@ where } } - fn fail_interactive_tx_negotiation( + fn fail_interactive_tx_negotiation( &mut self, reason: AbortReason, logger: &L, - ) -> (ChannelError, Option) - where - L::Target: Logger, - { + ) -> (ChannelError, Option) { let logger = WithChannelContext::from(logger, &self.context(), None); log_info!(logger, "Failed interactive transaction negotiation: {reason}"); @@ -1817,12 +1871,9 @@ where (ChannelError::Abort(reason), splice_funding_failed) } - pub fn tx_add_input( + pub fn tx_add_input( &mut self, msg: &msgs::TxAddInput, logger: &L, - ) -> Result)> - where - L::Target: Logger, - { + ) -> Result)> { match self.interactive_tx_constructor_mut() { Some(interactive_tx_constructor) => interactive_tx_constructor .handle_tx_add_input(msg) @@ -1836,12 +1887,9 @@ where } } - pub fn tx_add_output( + pub fn tx_add_output( &mut self, msg: &msgs::TxAddOutput, logger: &L, - ) -> Result)> - where - L::Target: Logger, - { + ) -> Result)> { match self.interactive_tx_constructor_mut() { Some(interactive_tx_constructor) => interactive_tx_constructor .handle_tx_add_output(msg) @@ -1855,12 +1903,9 @@ where } } - pub fn tx_remove_input( + pub fn tx_remove_input( &mut self, msg: &msgs::TxRemoveInput, logger: &L, - ) -> Result)> - where - L::Target: Logger, - { + ) -> Result)> { match self.interactive_tx_constructor_mut() { Some(interactive_tx_constructor) => interactive_tx_constructor .handle_tx_remove_input(msg) @@ -1874,12 +1919,9 @@ where } } - pub fn tx_remove_output( + pub fn tx_remove_output( &mut self, msg: &msgs::TxRemoveOutput, logger: &L, - ) -> Result)> - where - L::Target: Logger, - { + ) -> Result)> { match self.interactive_tx_constructor_mut() { Some(interactive_tx_constructor) => interactive_tx_constructor .handle_tx_remove_output(msg) @@ -1893,27 +1935,16 @@ where } } - pub fn tx_complete( - &mut self, msg: &msgs::TxComplete, logger: &L, - ) -> Result< - (Option, Option), - (ChannelError, Option), - > - where - L::Target: Logger, - { + pub fn tx_complete( + &mut self, msg: &msgs::TxComplete, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, + ) -> Result)> { let tx_complete_action = match self.interactive_tx_constructor_mut() { Some(interactive_tx_constructor) => interactive_tx_constructor .handle_tx_complete(msg) .map_err(|reason| self.fail_interactive_tx_negotiation(reason, logger))?, None => { - return Err(( - ChannelError::WarnAndDisconnect( - "Received unexpected interactive transaction negotiation message" - .to_owned(), - ), - None, - )) + let err = "Received unexpected interactive transaction negotiation message"; + return Err((ChannelError::WarnAndDisconnect(err.to_owned()), None)); }, }; @@ -1930,21 +1961,50 @@ where let funding_outpoint = if let Some(funding_outpoint) = negotiation_complete { funding_outpoint } else { - return Ok((interactive_tx_msg_send, None)); + return Ok(TxCompleteResult { + interactive_tx_msg_send, + event_unsigned_tx: None, + funding_tx_signed: None, + }); }; - let commitment_signed = self - .funding_tx_constructed(funding_outpoint, logger) + self.funding_tx_constructed(funding_outpoint) .map_err(|abort_reason| self.fail_interactive_tx_negotiation(abort_reason, logger))?; - Ok((interactive_tx_msg_send, Some(commitment_signed))) + + let signing_session = self + .context() + .interactive_tx_signing_session + .as_ref() + .expect("The signing session must have been initialized in funding_tx_constructed"); + let has_local_contribution = signing_session.has_local_contribution(); + + let event_unsigned_tx = + has_local_contribution.then(|| signing_session.unsigned_tx().tx().clone()); + + let funding_tx_signed = if !has_local_contribution { + let funding_txid = signing_session.unsigned_tx().tx().compute_txid(); + self.funding_transaction_signed(funding_txid, vec![], 0, fee_estimator, logger) + .map(Some) + .map_err(|err| { + log_error!( + logger, + "Failed signing funding transaction without local contribution: {err:?}" + ); + self.fail_interactive_tx_negotiation( + AbortReason::InternalError("Signing failed"), + logger, + ) + })? + } else { + None + }; + + Ok(TxCompleteResult { interactive_tx_msg_send, event_unsigned_tx, funding_tx_signed }) } - pub fn tx_abort( + pub fn tx_abort( &mut self, msg: &msgs::TxAbort, logger: &L, - ) -> Result<(Option, Option), ChannelError> - where - L::Target: Logger, - { + ) -> Result<(Option, Option), ChannelError> { // If we have not sent a `tx_abort` message for this negotiation previously, we need to echo // back a tx_abort message according to the spec: // https://github.com/lightning/bolts/blob/247e83d/02-peer-protocol.md?plain=1#L560-L561 @@ -2011,12 +2071,9 @@ where } #[rustfmt::skip] - pub fn funding_signed( + pub fn funding_signed( &mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L - ) -> Result<(&mut FundedChannel, ChannelMonitor<::EcdsaSigner>), ChannelError> - where - L::Target: Logger - { + ) -> Result<(&mut FundedChannel, ChannelMonitor), ChannelError> { let phase = core::mem::replace(&mut self.phase, ChannelPhase::Undefined); let result = if let ChannelPhase::UnfundedOutboundV1(chan) = phase { let channel_state = chan.context.channel_state; @@ -2042,14 +2099,8 @@ where result.map(|monitor| (self.as_funded_mut().expect("Channel should be funded"), monitor)) } - fn funding_tx_constructed( - &mut self, funding_outpoint: OutPoint, logger: &L, - ) -> Result - where - L::Target: Logger, - { - let logger = WithChannelContext::from(logger, self.context(), None); - let (interactive_tx_constructor, commitment_signed) = match &mut self.phase { + fn funding_tx_constructed(&mut self, funding_outpoint: OutPoint) -> Result<(), AbortReason> { + let interactive_tx_constructor = match &mut self.phase { ChannelPhase::UnfundedV2(chan) => { debug_assert_eq!( chan.context.channel_state, @@ -2068,77 +2119,35 @@ where chan.funding.channel_transaction_parameters.funding_outpoint = Some(funding_outpoint); - let interactive_tx_constructor = chan - .interactive_tx_constructor + chan.interactive_tx_constructor .take() - .expect("PendingV2Channel::interactive_tx_constructor should be set"); - - let commitment_signed = - chan.context.get_initial_commitment_signed_v2(&chan.funding, &&logger); - let commitment_signed = match commitment_signed { - Some(commitment_signed) => commitment_signed, - // TODO(dual_funding): Support async signing - None => { - return Err(AbortReason::InternalError( - "Failed to compute commitment_signed signatures", - )); - }, - }; - - (interactive_tx_constructor, commitment_signed) + .expect("PendingV2Channel::interactive_tx_constructor should be set") }, ChannelPhase::Funded(chan) => { if let Some(pending_splice) = chan.pending_splice.as_mut() { - pending_splice - .funding_negotiation - .take() - .and_then(|funding_negotiation| { - if let FundingNegotiation::ConstructingTransaction { + let funding_negotiation = pending_splice.funding_negotiation.take(); + if let Some(FundingNegotiation::ConstructingTransaction { + mut funding, + interactive_tx_constructor, + }) = funding_negotiation + { + let is_initiator = interactive_tx_constructor.is_initiator(); + funding.channel_transaction_parameters.funding_outpoint = + Some(funding_outpoint); + pending_splice.funding_negotiation = + Some(FundingNegotiation::AwaitingSignatures { + is_initiator, funding, - interactive_tx_constructor, - } = funding_negotiation - { - let is_initiator = interactive_tx_constructor.is_initiator(); - Some((is_initiator, funding, interactive_tx_constructor)) - } else { - // Replace the taken state for later error handling - pending_splice.funding_negotiation = Some(funding_negotiation); - None - } - }) - .ok_or_else(|| { - AbortReason::InternalError( - "Got a tx_complete message in an invalid state", - ) - }) - .and_then(|(is_initiator, mut funding, interactive_tx_constructor)| { - funding.channel_transaction_parameters.funding_outpoint = - Some(funding_outpoint); - match chan.context.get_initial_commitment_signed_v2(&funding, &&logger) - { - Some(commitment_signed) => { - // Advance the state - pending_splice.funding_negotiation = - Some(FundingNegotiation::AwaitingSignatures { - is_initiator, - funding, - }); - Ok((interactive_tx_constructor, commitment_signed)) - }, - // TODO(splicing): Support async signing - None => { - // Restore the taken state for later error handling - pending_splice.funding_negotiation = - Some(FundingNegotiation::ConstructingTransaction { - funding, - interactive_tx_constructor, - }); - Err(AbortReason::InternalError( - "Failed to compute commitment_signed signatures", - )) - }, - } - })? + initial_commitment_signed_from_counterparty: None, + }); + interactive_tx_constructor + } else { + // Replace the taken state for later error handling + pending_splice.funding_negotiation = funding_negotiation; + return Err(AbortReason::InternalError( + "Got a tx_complete message in an invalid state", + )); + } } else { return Err(AbortReason::InternalError( "Got a tx_complete message in an invalid state", @@ -2155,7 +2164,187 @@ where let signing_session = interactive_tx_constructor.into_signing_session(); self.context_mut().interactive_tx_signing_session = Some(signing_session); - Ok(commitment_signed) + Ok(()) + } + + pub fn funding_transaction_signed( + &mut self, funding_txid_signed: Txid, witnesses: Vec, best_block_height: u32, + fee_estimator: &LowerBoundedFeeEstimator, logger: &L, + ) -> Result { + let (context, funding, pending_splice) = match &mut self.phase { + ChannelPhase::Undefined => unreachable!(), + ChannelPhase::UnfundedV2(channel) => (&mut channel.context, &channel.funding, None), + ChannelPhase::Funded(channel) => { + (&mut channel.context, &channel.funding, channel.pending_splice.as_ref()) + }, + _ => { + return Err(APIError::APIMisuseError { + err: format!( + "Channel with id {} not expecting funding signatures", + self.context().channel_id + ), + }); + }, + }; + + let channel_id = context.channel_id; + let counterparty_node_id = context.counterparty_node_id; + + let signing_session = if let Some(signing_session) = + context.interactive_tx_signing_session.as_mut() + { + if let Some(pending_splice) = pending_splice.as_ref() { + debug_assert!(pending_splice + .funding_negotiation + .as_ref() + .map(|funding_negotiation| matches!( + funding_negotiation, + FundingNegotiation::AwaitingSignatures { .. } + )) + .unwrap_or(false)); + } + + if signing_session.holder_tx_signatures().is_some() { + // Our `tx_signatures` either should've been the first time we processed them, + // or we're waiting for our counterparty to send theirs first. + return Ok(FundingTxSigned { + commitment_signed: None, + counterparty_initial_commitment_signed_result: None, + tx_signatures: None, + funding_tx: None, + splice_negotiated: None, + splice_locked: None, + }); + } + + signing_session + } else { + if Some(funding_txid_signed) == funding.get_funding_txid() { + // We may be handling a duplicate call and the funding was already locked so we + // no longer have the signing session present. + return Ok(FundingTxSigned { + commitment_signed: None, + counterparty_initial_commitment_signed_result: None, + tx_signatures: None, + funding_tx: None, + splice_negotiated: None, + splice_locked: None, + }); + } + let err = format!("Channel {} not expecting funding signatures", context.channel_id); + return Err(APIError::APIMisuseError { err }); + }; + + let tx = signing_session.unsigned_tx().tx(); + if funding_txid_signed != tx.compute_txid() { + return Err(APIError::APIMisuseError { + err: "Transaction was malleated prior to signing".to_owned(), + }); + } + + let shared_input_signature = + if let Some(splice_input_index) = signing_session.unsigned_tx().shared_input_index() { + let sig = match &context.holder_signer { + ChannelSignerType::Ecdsa(signer) => signer.sign_splice_shared_input( + &funding.channel_transaction_parameters, + tx, + splice_input_index as usize, + &context.secp_ctx, + ), + #[cfg(taproot)] + ChannelSignerType::Taproot(_) => todo!(), + }; + Some(sig) + } else { + None + }; + debug_assert_eq!(pending_splice.is_some(), shared_input_signature.is_some()); + + let tx_signatures = msgs::TxSignatures { + channel_id: context.channel_id, + tx_hash: funding_txid_signed, + witnesses, + shared_input_signature, + }; + let (tx_signatures, funding_tx) = signing_session + .provide_holder_witnesses(tx_signatures, &context.secp_ctx) + .map_err(|err| APIError::APIMisuseError { err })?; + + let logger = WithChannelContext::from(logger, &context, None); + if tx_signatures.is_some() { + log_info!( + logger, + "Sending tx_signatures for interactive funding transaction {funding_txid_signed}" + ); + } + + let funding = pending_splice + .as_ref() + .and_then(|pending_splice| pending_splice.funding_negotiation.as_ref()) + .and_then(|funding_negotiation| funding_negotiation.as_funding()) + .unwrap_or(funding); + let commitment_signed = context.get_initial_commitment_signed_v2(funding, &&logger); + + // For zero conf channels, we don't expect the funding transaction to be ready for broadcast + // yet as, according to the spec, our counterparty shouldn't have sent their `tx_signatures` + // without us having sent our initial commitment signed to them first. However, in the event + // they do, we choose to handle it anyway. Note that because of this behavior not being + // spec-compliant, we're not able to test this without custom logic. + let (splice_negotiated, splice_locked) = if let Some(funding_tx) = funding_tx.clone() { + debug_assert!(tx_signatures.is_some()); + let funded_channel = self.as_funded_mut().expect( + "Funding transactions ready for broadcast can only exist for funded channels", + ); + funded_channel.on_tx_signatures_exchange(funding_tx, best_block_height, &logger) + } else { + (None, None) + }; + + let funding_tx = funding_tx.map(|tx| { + let tx_type = if splice_negotiated.is_some() { + TransactionType::Splice { counterparty_node_id, channel_id } + } else { + TransactionType::Funding { channels: vec![(counterparty_node_id, channel_id)] } + }; + (tx, tx_type) + }); + + // If we have a pending splice with a buffered initial commitment signed from our + // counterparty, process it now that we have provided our signatures. + let counterparty_initial_commitment_signed_result = + self.as_funded_mut().and_then(|funded_channel| { + funded_channel + .pending_splice + .as_mut() + .and_then(|pending_splice| pending_splice.funding_negotiation.as_mut()) + .and_then(|funding_negotiation| { + if let FundingNegotiation::AwaitingSignatures { + ref mut initial_commitment_signed_from_counterparty, + .. + } = funding_negotiation + { + initial_commitment_signed_from_counterparty.take() + } else { + None + } + }) + .map(|commit_sig| { + funded_channel.splice_initial_commitment_signed( + &commit_sig, + fee_estimator, + &&logger, + ) + }) + }); + + Ok(FundingTxSigned { + commitment_signed, + counterparty_initial_commitment_signed_result, + tx_signatures, + funding_tx, + splice_negotiated, + splice_locked, + }) } pub fn force_shutdown(&mut self, closure_reason: ClosureReason) -> ShutdownResult { @@ -2164,13 +2353,9 @@ where } #[rustfmt::skip] - pub fn commitment_signed( + pub fn commitment_signed( &mut self, msg: &msgs::CommitmentSigned, best_block: BestBlock, signer_provider: &SP, fee_estimator: &LowerBoundedFeeEstimator, logger: &L - ) -> Result<(Option::EcdsaSigner>>, Option), ChannelError> - where - F::Target: FeeEstimator, - L::Target: Logger - { + ) -> Result<(Option>, Option), ChannelError> { let phase = core::mem::replace(&mut self.phase, ChannelPhase::Undefined); match phase { ChannelPhase::UnfundedV2(chan) => { @@ -2219,9 +2404,33 @@ where // which must always come after the initial commitment signed is sent. .unwrap_or(true); let res = if has_negotiated_pending_splice && !session_received_commitment_signed { - funded_channel - .splice_initial_commitment_signed(msg, fee_estimator, logger) - .map(|monitor_update_opt| (None, monitor_update_opt)) + let has_holder_tx_signatures = funded_channel + .context + .interactive_tx_signing_session + .as_ref() + .map(|session| session.holder_tx_signatures().is_some()) + .unwrap_or(false); + + // We delay processing this until the user manually approves the splice via + // [`Channel::funding_transaction_signed`], as otherwise, there would be a + // [`ChannelMonitorUpdateStep::RenegotiatedFunding`] committed that we would + // need to undo if they no longer wish to proceed. + if has_holder_tx_signatures { + funded_channel + .splice_initial_commitment_signed(msg, fee_estimator, logger) + .map(|monitor_update_opt| (None, monitor_update_opt)) + } else { + let pending_splice = funded_channel.pending_splice.as_mut() + .expect("We have a pending splice negotiated"); + let funding_negotiation = pending_splice.funding_negotiation.as_mut() + .expect("We have a pending splice negotiated"); + if let FundingNegotiation::AwaitingSignatures { + ref mut initial_commitment_signed_from_counterparty, .. + } = funding_negotiation { + *initial_commitment_signed_from_counterparty = Some(msg.clone()); + } + Ok((None, None)) + } } else { funded_channel.commitment_signed(msg, fee_estimator, logger) .map(|monitor_update_opt| (None, monitor_update_opt)) @@ -2242,12 +2451,9 @@ where /// Doesn't bother handling the /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC /// corner case properly. - pub fn get_available_balances( + pub fn get_available_balances( &self, fee_estimator: &LowerBoundedFeeEstimator, - ) -> AvailableBalances - where - F::Target: FeeEstimator, - { + ) -> AvailableBalances { match &self.phase { ChannelPhase::Undefined => unreachable!(), ChannelPhase::Funded(chan) => chan.get_available_balances(fee_estimator), @@ -2268,40 +2474,36 @@ where } } -impl From> for Channel +impl From> for Channel where - SP::Target: SignerProvider, - ::EcdsaSigner: ChannelSigner, + SP::EcdsaSigner: ChannelSigner, { fn from(channel: OutboundV1Channel) -> Self { Channel { phase: ChannelPhase::UnfundedOutboundV1(channel) } } } -impl From> for Channel +impl From> for Channel where - SP::Target: SignerProvider, - ::EcdsaSigner: ChannelSigner, + SP::EcdsaSigner: ChannelSigner, { fn from(channel: InboundV1Channel) -> Self { Channel { phase: ChannelPhase::UnfundedInboundV1(channel) } } } -impl From> for Channel +impl From> for Channel where - SP::Target: SignerProvider, - ::EcdsaSigner: ChannelSigner, + SP::EcdsaSigner: ChannelSigner, { fn from(channel: PendingV2Channel) -> Self { Channel { phase: ChannelPhase::UnfundedV2(channel) } } } -impl From> for Channel +impl From> for Channel where - SP::Target: SignerProvider, - ::EcdsaSigner: ChannelSigner, + SP::EcdsaSigner: ChannelSigner, { fn from(channel: FundedChannel) -> Self { Channel { phase: ChannelPhase::Funded(channel) } @@ -2553,16 +2755,13 @@ impl FundingScope { } /// Constructs a `FundingScope` for splicing a channel. - fn for_splice( + fn for_splice( prev_funding: &Self, context: &ChannelContext, our_funding_contribution: SignedAmount, their_funding_contribution: SignedAmount, counterparty_funding_pubkey: PublicKey, our_new_holder_keys: ChannelPublicKeys, - ) -> Self - where - SP::Target: SignerProvider, - { - debug_assert!(our_funding_contribution.abs() <= SignedAmount::MAX_MONEY); - debug_assert!(their_funding_contribution.abs() <= SignedAmount::MAX_MONEY); + ) -> Self { + debug_assert!(our_funding_contribution.unsigned_abs() <= Amount::MAX_MONEY); + debug_assert!(their_funding_contribution.unsigned_abs() <= Amount::MAX_MONEY); let post_channel_value = prev_funding.compute_post_splice_value( our_funding_contribution.to_sat(), @@ -2705,6 +2904,17 @@ enum FundingNegotiation { AwaitingSignatures { funding: FundingScope, is_initiator: bool, + /// The initial [`msgs::CommitmentSigned`] message received for the [`FundingScope`] above. + /// We delay processing this until the user manually approves the splice via + /// [`Channel::funding_transaction_signed`], as otherwise, there would be a + /// [`ChannelMonitorUpdateStep::RenegotiatedFunding`] committed that we would need to undo + /// if they no longer wish to proceed. + /// + /// Note that this doesn't need to be done with dual-funded channels as there is no + /// equivalent monitor update for them, and we can just force close the channel. + /// + /// This field is not persisted as the message should be resent on reconnections. + initial_commitment_signed_from_counterparty: Option, }, } @@ -2712,6 +2922,7 @@ impl_writeable_tlv_based_enum_upgradable!(FundingNegotiation, (0, AwaitingSignatures) => { (1, funding, required), (3, is_initiator, required), + (_unused, initial_commitment_signed_from_counterparty, (static_value, None)), }, unread_variants: AwaitingAck, ConstructingTransaction ); @@ -2737,12 +2948,9 @@ impl FundingNegotiation { } impl PendingFunding { - fn check_get_splice_locked( + fn check_get_splice_locked( &mut self, context: &ChannelContext, confirmed_funding_index: usize, height: u32, - ) -> Option - where - SP::Target: SignerProvider, - { + ) -> Option { debug_assert!(confirmed_funding_index < self.negotiated_candidates.len()); let funding = &self.negotiated_candidates[confirmed_funding_index]; @@ -2848,10 +3056,7 @@ impl<'a> From<&'a Transaction> for ConfirmedTransaction<'a> { /// Contains everything about the channel including state, and various flags. #[cfg_attr(test, derive(Debug))] -pub(super) struct ChannelContext -where - SP::Target: SignerProvider, -{ +pub(super) struct ChannelContext { config: LegacyChannelConfig, // Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were @@ -2907,6 +3112,7 @@ where /// send it first. resend_order: RAACommitmentOrder, + monitor_pending_tx_signatures: bool, monitor_pending_channel_ready: bool, monitor_pending_revoke_and_ack: bool, monitor_pending_commitment_signed: bool, @@ -2936,8 +3142,9 @@ where /// setting it again as a side-effect of [`FundedChannel::channel_reestablish`]. signer_pending_commitment_update: bool, /// Similar to [`Self::signer_pending_commitment_update`] but we're waiting to send either a - /// [`msgs::FundingCreated`] or [`msgs::FundingSigned`] depending on if this channel is - /// outbound or inbound. + /// [`msgs::FundingCreated`] for an outbound V1 channel, [`msgs::FundingSigned`] for an inbound + /// V1 channel, or [`msgs::CommitmentSigned`] for a V2 channel (dual-funded) or a funded channel + /// with a pending splice. signer_pending_funding: bool, /// If we attempted to sign a cooperative close transaction but the signer wasn't ready, then this /// will be set to `true`. @@ -3139,10 +3346,7 @@ where /// A channel struct implementing this trait can receive an initial counterparty commitment /// transaction signature. -trait InitialRemoteCommitmentReceiver -where - SP::Target: SignerProvider, -{ +trait InitialRemoteCommitmentReceiver { fn context(&self) -> &ChannelContext; fn context_mut(&mut self) -> &mut ChannelContext; @@ -3154,9 +3358,9 @@ where fn received_msg(&self) -> &'static str; #[rustfmt::skip] - fn check_counterparty_commitment_signature( + fn check_counterparty_commitment_signature( &self, sig: &Signature, holder_commitment_point: &HolderCommitmentPoint, logger: &L - ) -> Result where L::Target: Logger { + ) -> Result { let funding_script = self.funding().get_funding_redeemscript(); let commitment_data = self.context().build_commitment_transaction(self.funding(), @@ -3177,13 +3381,10 @@ where } #[rustfmt::skip] - fn initial_commitment_signed( + fn initial_commitment_signed( &mut self, channel_id: ChannelId, counterparty_signature: Signature, holder_commitment_point: &mut HolderCommitmentPoint, best_block: BestBlock, signer_provider: &SP, logger: &L, - ) -> Result<(ChannelMonitor<::EcdsaSigner>, CommitmentTransaction), ChannelError> - where - L::Target: Logger - { + ) -> Result<(ChannelMonitor, CommitmentTransaction), ChannelError> { let initial_commitment_tx = match self.check_counterparty_commitment_signature(&counterparty_signature, holder_commitment_point, logger) { Ok(res) => res, Err(ChannelError::Close(e)) => { @@ -3271,10 +3472,7 @@ where fn is_v2_established(&self) -> bool; } -impl InitialRemoteCommitmentReceiver for OutboundV1Channel -where - SP::Target: SignerProvider, -{ +impl InitialRemoteCommitmentReceiver for OutboundV1Channel { fn context(&self) -> &ChannelContext { &self.context } @@ -3300,10 +3498,7 @@ where } } -impl InitialRemoteCommitmentReceiver for InboundV1Channel -where - SP::Target: SignerProvider, -{ +impl InitialRemoteCommitmentReceiver for InboundV1Channel { fn context(&self) -> &ChannelContext { &self.context } @@ -3329,10 +3524,7 @@ where } } -impl InitialRemoteCommitmentReceiver for FundedChannel -where - SP::Target: SignerProvider, -{ +impl InitialRemoteCommitmentReceiver for FundedChannel { fn context(&self) -> &ChannelContext { &self.context } @@ -3367,12 +3559,9 @@ where } } -impl ChannelContext -where - SP::Target: SignerProvider, -{ +impl ChannelContext { #[rustfmt::skip] - fn new_for_inbound_channel<'a, ES: Deref, F: Deref, L: Deref>( + fn new_for_inbound_channel<'a, ES: EntropySource, F: FeeEstimator, L: Logger>( fee_estimator: &'a LowerBoundedFeeEstimator, entropy_source: &'a ES, signer_provider: &'a SP, @@ -3390,13 +3579,7 @@ where msg_channel_reserve_satoshis: u64, msg_push_msat: u64, open_channel_fields: msgs::CommonOpenChannelFields, - ) -> Result<(FundingScope, ChannelContext), ChannelError> - where - ES::Target: EntropySource, - F::Target: FeeEstimator, - L::Target: Logger, - SP::Target: SignerProvider, - { + ) -> Result<(FundingScope, ChannelContext), ChannelError> { let logger = WithContext::from(logger, Some(counterparty_node_id), Some(open_channel_fields.temporary_channel_id), None); let announce_for_forwarding = if (open_channel_fields.channel_flags & 1) == 1 { true } else { false }; @@ -3409,13 +3592,6 @@ where return Err(ChannelError::close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT))); } - // Check sanity of message fields: - if channel_value_satoshis > config.channel_handshake_limits.max_funding_satoshis { - return Err(ChannelError::close(format!( - "Per our config, funding must be at most {}. It was {}. Peer contribution: {}. Our contribution: {}", - config.channel_handshake_limits.max_funding_satoshis, channel_value_satoshis, - open_channel_fields.funding_satoshis, our_funding_satoshis))); - } if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS { return Err(ChannelError::close(format!("Funding must be smaller than the total bitcoin supply. It was {}", channel_value_satoshis))); } @@ -3638,6 +3814,7 @@ where resend_order: RAACommitmentOrder::CommitmentFirst, + monitor_pending_tx_signatures: false, monitor_pending_channel_ready: false, monitor_pending_revoke_and_ack: false, monitor_pending_commitment_signed: false, @@ -3716,7 +3893,7 @@ where } #[rustfmt::skip] - fn new_for_outbound_channel<'a, ES: Deref, F: Deref, L: Deref>( + fn new_for_outbound_channel<'a, ES: EntropySource, F: FeeEstimator, L: Logger>( fee_estimator: &'a LowerBoundedFeeEstimator, entropy_source: &'a ES, signer_provider: &'a SP, @@ -3731,15 +3908,9 @@ where temporary_channel_id_fn: Option ChannelId>, holder_selected_channel_reserve_satoshis: u64, channel_keys_id: [u8; 32], - holder_signer: ::EcdsaSigner, + holder_signer: SP::EcdsaSigner, _logger: L, - ) -> Result<(FundingScope, ChannelContext), APIError> - where - ES::Target: EntropySource, - F::Target: FeeEstimator, - SP::Target: SignerProvider, - L::Target: Logger, - { + ) -> Result<(FundingScope, ChannelContext), APIError> { // This will be updated with the counterparty contribution if this is a dual-funded channel let channel_value_satoshis = funding_satoshis; @@ -3877,6 +4048,7 @@ where resend_order: RAACommitmentOrder::CommitmentFirst, + monitor_pending_tx_signatures: false, monitor_pending_channel_ready: false, monitor_pending_revoke_and_ack: false, monitor_pending_commitment_signed: false, @@ -4102,7 +4274,7 @@ where if self.pending_inbound_htlcs.iter() .any(|htlc| match htlc.state { - InboundHTLCState::Committed => false, + InboundHTLCState::Committed { .. } => false, // An HTLC removal from the local node is pending on the remote commitment. InboundHTLCState::LocalRemoved(_) => true, // An HTLC add from the remote node is pending on the local commitment. @@ -4346,12 +4518,9 @@ where /// Returns a maximum "sane" fee rate used to reason about our dust exposure. /// Will be Some if the `channel_type`'s dust exposure depends on its commitment fee rate, and /// None otherwise. - fn get_dust_exposure_limiting_feerate( + fn get_dust_exposure_limiting_feerate( &self, fee_estimator: &LowerBoundedFeeEstimator, channel_type: &ChannelTypeFeatures, - ) -> Option - where - F::Target: FeeEstimator, - { + ) -> Option { if channel_type.supports_anchor_zero_fee_commitments() { None } else { @@ -4531,7 +4700,7 @@ where (InboundHTLCState::RemoteAnnounced(..), _) => true, (InboundHTLCState::AwaitingRemoteRevokeToAnnounce(..), _) => true, (InboundHTLCState::AwaitingAnnouncedRemoteRevoke(..), _) => true, - (InboundHTLCState::Committed, _) => true, + (InboundHTLCState::Committed { .. }, _) => true, (InboundHTLCState::LocalRemoved(..), true) => true, (InboundHTLCState::LocalRemoved(..), false) => false, }) @@ -4748,13 +4917,10 @@ where Ok(ret) } - fn validate_update_add_htlc( + fn validate_update_add_htlc( &self, funding: &FundingScope, msg: &msgs::UpdateAddHTLC, fee_estimator: &LowerBoundedFeeEstimator, - ) -> Result<(), ChannelError> - where - F::Target: FeeEstimator, - { + ) -> Result<(), ChannelError> { if msg.amount_msat > funding.get_value_satoshis() * 1000 { return Err(ChannelError::close( "Remote side tried to send more than the total value of the channel".to_owned(), @@ -4866,13 +5032,10 @@ where Ok(()) } - fn validate_update_fee( + fn validate_update_fee( &self, funding: &FundingScope, fee_estimator: &LowerBoundedFeeEstimator, new_feerate_per_kw: u32, - ) -> Result<(), ChannelError> - where - F::Target: FeeEstimator, - { + ) -> Result<(), ChannelError> { // Check that we won't be pushed over our dust exposure limit by the feerate increase. let dust_exposure_limiting_feerate = self.get_dust_exposure_limiting_feerate(&fee_estimator, funding.get_channel_type()); @@ -4944,17 +5107,13 @@ where Ok(()) } - fn validate_commitment_signed( + fn validate_commitment_signed( &self, funding: &FundingScope, transaction_number: u64, commitment_point: PublicKey, msg: &msgs::CommitmentSigned, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, ) -> Result< (HolderCommitmentTransaction, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>), ChannelError, - > - where - F::Target: FeeEstimator, - L::Target: Logger, - { + > { let funding_script = funding.get_funding_redeemscript(); let commitment_data = self.build_commitment_transaction( @@ -5076,14 +5235,10 @@ where Ok((holder_commitment_tx, commitment_data.htlcs_included)) } - fn can_send_update_fee( + fn can_send_update_fee( &self, funding: &FundingScope, feerate_per_kw: u32, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, - ) -> bool - where - F::Target: FeeEstimator, - L::Target: Logger, - { + ) -> bool { // Before proposing a feerate update, check that we can actually afford the new fee. let dust_exposure_limiting_feerate = self.get_dust_exposure_limiting_feerate(&fee_estimator, funding.get_channel_type()); @@ -5158,12 +5313,9 @@ where return true; } - fn can_accept_incoming_htlc( + fn can_accept_incoming_htlc( &self, funding: &FundingScope, dust_exposure_limiting_feerate: Option, logger: &L, - ) -> Result<(), LocalHTLCFailureReason> - where - L::Target: Logger, - { + ) -> Result<(), LocalHTLCFailureReason> { // The fee spike buffer (an additional nondust HTLC) we keep for the remote if the channel // is not zero fee. This deviates from the spec because the fee spike buffer requirement // doesn't exist on the receiver's side, only on the sender's. @@ -5289,9 +5441,7 @@ where /// which peer generated this transaction and "to whom" this transaction flows. #[inline] #[rustfmt::skip] - fn build_commitment_transaction(&self, funding: &FundingScope, commitment_number: u64, per_commitment_point: &PublicKey, local: bool, generated_by_local: bool, logger: &L) -> CommitmentData<'_> - where L::Target: Logger - { + fn build_commitment_transaction(&self, funding: &FundingScope, commitment_number: u64, per_commitment_point: &PublicKey, local: bool, generated_by_local: bool, logger: &L) -> CommitmentData<'_> { let broadcaster_dust_limit_sat = if local { self.holder_dust_limit_satoshis } else { self.counterparty_dust_limit_satoshis }; let feerate_per_kw = self.get_commitment_feerate(funding, generated_by_local); @@ -5663,12 +5813,9 @@ where } #[rustfmt::skip] - fn get_available_balances_for_scope( + fn get_available_balances_for_scope( &self, funding: &FundingScope, fee_estimator: &LowerBoundedFeeEstimator, - ) -> AvailableBalances - where - F::Target: FeeEstimator, - { + ) -> AvailableBalances { let context = &self; // Note that we have to handle overflow due to the case mentioned in the docs in general // here. @@ -6147,10 +6294,10 @@ where /// Only allowed after [`FundingScope::channel_transaction_parameters`] is set. #[rustfmt::skip] - fn get_funding_signed_msg( + fn get_funding_signed_msg( &mut self, channel_parameters: &ChannelTransactionParameters, logger: &L, counterparty_initial_commitment_tx: CommitmentTransaction, - ) -> Option where L::Target: Logger { + ) -> Option { let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust(); let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction(); log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}", @@ -6187,13 +6334,10 @@ where /// of the channel type we tried, not of our ability to open any channel at all. We can see if a /// downgrade of channel features would be possible so that we can still open the channel. #[rustfmt::skip] - pub(crate) fn maybe_downgrade_channel_features( + pub(crate) fn maybe_downgrade_channel_features( &mut self, funding: &mut FundingScope, fee_estimator: &LowerBoundedFeeEstimator, user_config: &UserConfig, their_features: &InitFeatures, - ) -> Result<(), ()> - where - F::Target: FeeEstimator - { + ) -> Result<(), ()> { if !funding.is_outbound() || !matches!( self.channel_state, ChannelState::NegotiatingFunding(flags) @@ -6255,13 +6399,9 @@ where } } - fn get_initial_counterparty_commitment_signatures( + fn get_initial_counterparty_commitment_signatures( &self, funding: &FundingScope, logger: &L, - ) -> Option<(Signature, Vec)> - where - SP::Target: SignerProvider, - L::Target: Logger, - { + ) -> Option<(Signature, Vec)> { let mut commitment_number = self.counterparty_next_commitment_transaction_number; let mut commitment_point = self.counterparty_next_commitment_point.unwrap(); @@ -6300,13 +6440,9 @@ where } } - fn get_initial_commitment_signed_v2( - &self, funding: &FundingScope, logger: &L, - ) -> Option - where - SP::Target: SignerProvider, - L::Target: Logger, - { + fn get_initial_commitment_signed_v2( + &mut self, funding: &FundingScope, logger: &L, + ) -> Option { let signatures = self.get_initial_counterparty_commitment_signatures(funding, logger); if let Some((signature, htlc_signatures)) = signatures { log_info!(logger, "Generated commitment_signed for peer",); @@ -6314,6 +6450,7 @@ where // We shouldn't expect any HTLCs before `ChannelReady`. debug_assert!(htlc_signatures.is_empty()); } + self.signer_pending_funding = false; Some(msgs::CommitmentSigned { channel_id: self.channel_id, htlc_signatures, @@ -6323,7 +6460,11 @@ where partial_signature_with_nonce: None, }) } else { - // TODO(splicing): Support async signing + log_debug!( + logger, + "Initial counterparty commitment signature not available, waiting on async signer" + ); + self.signer_pending_funding = true; None } } @@ -6352,13 +6493,10 @@ where } #[rustfmt::skip] - fn check_for_funding_tx_confirmed( + fn check_for_funding_tx_confirmed( &mut self, funding: &mut FundingScope, block_hash: &BlockHash, height: u32, index_in_block: usize, tx: &mut ConfirmedTransaction, logger: &L, - ) -> Result - where - L::Target: Logger, - { + ) -> Result { let funding_txo = match funding.get_funding_txo() { Some(funding_txo) => funding_txo, None => { @@ -6497,8 +6635,7 @@ fn get_v2_channel_reserve_satoshis(channel_value_satoshis: u64, dust_limit_satos fn check_splice_contribution_sufficient( contribution: &SpliceContribution, is_initiator: bool, funding_feerate: FeeRate, ) -> Result { - let contribution_amount = contribution.value(); - if contribution_amount < SignedAmount::ZERO { + if contribution.inputs().is_empty() { let estimated_fee = Amount::from_sat(estimate_v2_funding_transaction_fee( contribution.inputs(), contribution.outputs(), @@ -6507,20 +6644,25 @@ fn check_splice_contribution_sufficient( funding_feerate.to_sat_per_kwu() as u32, )); + let contribution_amount = contribution.net_value(); contribution_amount .checked_sub( estimated_fee.to_signed().expect("fees should never exceed Amount::MAX_MONEY"), ) - .ok_or(format!("Our {contribution_amount} contribution plus the fee estimate exceeds the total bitcoin supply")) + .ok_or(format!( + "{estimated_fee} splice-out amount plus {} fee estimate exceeds the total bitcoin supply", + contribution_amount.unsigned_abs(), + )) } else { check_v2_funding_inputs_sufficient( - contribution_amount.to_sat(), + contribution.value_added(), contribution.inputs(), + contribution.outputs(), is_initiator, true, funding_feerate.to_sat_per_kwu() as u32, ) - .map(|_| contribution_amount) + .map(|_| contribution.net_value()) } } @@ -6579,16 +6721,16 @@ fn estimate_v2_funding_transaction_fee( /// Returns estimated (partial) fees as additional information #[rustfmt::skip] fn check_v2_funding_inputs_sufficient( - contribution_amount: i64, funding_inputs: &[FundingTxInput], is_initiator: bool, - is_splice: bool, funding_feerate_sat_per_1000_weight: u32, -) -> Result { - let estimated_fee = estimate_v2_funding_transaction_fee( - funding_inputs, &[], is_initiator, is_splice, funding_feerate_sat_per_1000_weight, - ); - - let mut total_input_sats = 0u64; + contributed_input_value: Amount, funding_inputs: &[FundingTxInput], outputs: &[TxOut], + is_initiator: bool, is_splice: bool, funding_feerate_sat_per_1000_weight: u32, +) -> Result { + let estimated_fee = Amount::from_sat(estimate_v2_funding_transaction_fee( + funding_inputs, outputs, is_initiator, is_splice, funding_feerate_sat_per_1000_weight, + )); + + let mut total_input_value = Amount::ZERO; for FundingTxInput { utxo, .. } in funding_inputs.iter() { - total_input_sats = total_input_sats.checked_add(utxo.output.value.to_sat()) + total_input_value = total_input_value.checked_add(utxo.output.value) .ok_or("Sum of input values is greater than the total bitcoin supply")?; } @@ -6603,13 +6745,11 @@ fn check_v2_funding_inputs_sufficient( // TODO(splicing): refine check including the fact wether a change will be added or not. // Can be done once dual funding preparation is included. - let minimal_input_amount_needed = contribution_amount.checked_add(estimated_fee as i64) - .ok_or(format!("Our {contribution_amount} contribution plus the fee estimate exceeds the total bitcoin supply"))?; - if i64::try_from(total_input_sats).map_err(|_| "Sum of input values is greater than the total bitcoin supply")? - < minimal_input_amount_needed - { + let minimal_input_amount_needed = contributed_input_value.checked_add(estimated_fee) + .ok_or(format!("{contributed_input_value} contribution plus {estimated_fee} fee estimate exceeds the total bitcoin supply"))?; + if total_input_value < minimal_input_amount_needed { Err(format!( - "Total input amount {total_input_sats} is lower than needed for contribution {contribution_amount}, considering fees of {estimated_fee}. Need more inputs.", + "Total input amount {total_input_value} is lower than needed for splice-in contribution {contributed_input_value}, considering fees of {estimated_fee}. Need more inputs.", )) } else { Ok(estimated_fee) @@ -6647,14 +6787,10 @@ pub(super) struct FundingNegotiationContext { impl FundingNegotiationContext { /// Prepare and start interactive transaction negotiation. /// If error occurs, it is caused by our side, not the counterparty. - fn into_interactive_tx_constructor( + fn into_interactive_tx_constructor( mut self, context: &ChannelContext, funding: &FundingScope, signer_provider: &SP, entropy_source: &ES, holder_node_id: PublicKey, - ) -> Result - where - SP::Target: SignerProvider, - ES::Target: EntropySource, - { + ) -> Result { debug_assert_eq!( self.shared_funding_input.is_some(), funding.channel_transaction_parameters.splice_parent_funding_txid.is_some(), @@ -6675,7 +6811,7 @@ impl FundingNegotiationContext { }; // Optionally add change output - let change_value_opt = if self.our_funding_contribution > SignedAmount::ZERO { + let change_value_opt = if !self.our_funding_inputs.is_empty() { match calculate_change_output_value( &self, self.shared_funding_input.is_some(), @@ -6703,12 +6839,12 @@ impl FundingNegotiationContext { }, } }; - let mut change_output = - TxOut { value: Amount::from_sat(change_value), script_pubkey: change_script }; + let mut change_output = TxOut { value: change_value, script_pubkey: change_script }; let change_output_weight = get_output_weight(&change_output.script_pubkey).to_wu(); let change_output_fee = fee_for_weight(self.funding_feerate_sat_per_1000_weight, change_output_weight); - let change_value_decreased_with_fee = change_value.saturating_sub(change_output_fee); + let change_value_decreased_with_fee = + change_value.to_sat().saturating_sub(change_output_fee); // Check dust limit again if change_value_decreased_with_fee > context.holder_dust_limit_satoshis { change_output.value = Amount::from_sat(change_value_decreased_with_fee); @@ -6758,10 +6894,7 @@ impl FundingNegotiationContext { // Holder designates channel data owned for the benefit of the user client. // Counterparty designates channel data owned by the another channel participant entity. #[cfg_attr(test, derive(Debug))] -pub(super) struct FundedChannel -where - SP::Target: SignerProvider, -{ +pub(super) struct FundedChannel { pub funding: FundingScope, pub context: ChannelContext, holder_commitment_point: HolderCommitmentPoint, @@ -6854,13 +6987,35 @@ type BestBlockUpdatedRes = ( Option, ); +/// The result of handling a `tx_complete` message during interactive transaction construction. +pub(super) struct TxCompleteResult { + /// The message to send to the counterparty, if any. + pub interactive_tx_msg_send: Option, + + /// If the negotiation completed and the holder has local contributions, this contains the + /// unsigned funding transaction for the `FundingTransactionReadyForSigning` event. + pub event_unsigned_tx: Option, + + /// If the negotiation completed and the holder has no local contributions, this contains + /// the result of automatically calling `funding_transaction_signed` with empty witnesses. + pub funding_tx_signed: Option, +} + /// The result of signing a funding transaction negotiated using the interactive-tx protocol. -pub struct FundingTxSigned { +pub(super) struct FundingTxSigned { + /// The initial `commitment_signed` message to send to the counterparty, if necessary. + pub commitment_signed: Option, + + /// The result of processing a buffered initial commitment signed from our counterparty, + /// if any. + pub counterparty_initial_commitment_signed_result: + Option, ChannelError>>, + /// Signatures that should be sent to the counterparty, if necessary. pub tx_signatures: Option, - /// The fully-signed funding transaction to be broadcast. - pub funding_tx: Option, + /// The fully-signed funding transaction to be broadcast, along with the transaction type. + pub funding_tx: Option<(Transaction, TransactionType)>, /// Information about the completed funding negotiation. pub splice_negotiated: Option, @@ -6944,10 +7099,9 @@ pub struct SpliceFundingPromotion { pub discarded_funding: Vec, } -impl FundedChannel +impl FundedChannel where - SP::Target: SignerProvider, - ::EcdsaSigner: EcdsaChannelSigner, + SP::EcdsaSigner: EcdsaChannelSigner, { pub fn context(&self) -> &ChannelContext { &self.context @@ -7114,11 +7268,10 @@ where } #[rustfmt::skip] - fn check_remote_fee( + fn check_remote_fee( channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator, feerate_per_kw: u32, cur_feerate_per_kw: Option, logger: &L - ) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger, - { + ) -> Result<(), ChannelError> { if channel_type.supports_anchor_zero_fee_commitments() { if feerate_per_kw != 0 { let err = "Zero Fee Channels must never attempt to use a fee".to_owned(); @@ -7268,11 +7421,9 @@ where /// /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is /// disconnected). - pub fn claim_htlc_while_disconnected_dropping_mon_update_legacy( + pub fn claim_htlc_while_disconnected_dropping_mon_update_legacy( &mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L, - ) where - L::Target: Logger, - { + ) { // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc` // (see equivalent if condition there). assert!(!self.context.channel_state.can_generate_new_commitment()); @@ -7285,14 +7436,11 @@ where } } - fn get_update_fulfill_htlc( + fn get_update_fulfill_htlc( &mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, payment_info: Option, attribution_data: Option, logger: &L, - ) -> UpdateFulfillFetch - where - L::Target: Logger, - { + ) -> UpdateFulfillFetch { // Either ChannelReady got set (which means it won't be unset) or there is no way any // caller thought we could have something claimed (cause we wouldn't have accepted in an // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us, @@ -7320,7 +7468,7 @@ where payment_preimage_arg ); match htlc.state { - InboundHTLCState::Committed => {}, + InboundHTLCState::Committed { .. } => {}, InboundHTLCState::LocalRemoved(ref reason) => { if let &InboundHTLCRemovalReason::Fulfill { .. } = reason { } else { @@ -7413,7 +7561,7 @@ where { let htlc = &mut self.context.pending_inbound_htlcs[pending_idx]; - if let InboundHTLCState::Committed = htlc.state { + if let InboundHTLCState::Committed { .. } = htlc.state { } else { debug_assert!( false, @@ -7439,14 +7587,11 @@ where UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, update_blocked: false } } - pub fn get_update_fulfill_htlc_and_commit( + pub fn get_update_fulfill_htlc_and_commit( &mut self, htlc_id: u64, payment_preimage: PaymentPreimage, payment_info: Option, attribution_data: Option, logger: &L, - ) -> UpdateFulfillCommitFetch - where - L::Target: Logger, - { + ) -> UpdateFulfillCommitFetch { let release_cs_monitor = self.context.blocked_monitor_updates.is_empty(); match self.get_update_fulfill_htlc( htlc_id, @@ -7496,6 +7641,7 @@ where Vec::new(), Vec::new(), Vec::new(), + logger, ); UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat } }, @@ -7505,12 +7651,9 @@ where /// Returns `Err` (always with [`ChannelError::Ignore`]) if the HTLC could not be failed (e.g. /// if it was already resolved). Otherwise returns `Ok`. - pub fn queue_fail_htlc( + pub fn queue_fail_htlc( &mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L, - ) -> Result<(), ChannelError> - where - L::Target: Logger, - { + ) -> Result<(), ChannelError> { self.fail_htlc(htlc_id_arg, err_packet, true, logger) .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?")) } @@ -7519,12 +7662,9 @@ where /// want to fail blinded HTLCs where we are not the intro node. /// /// See [`Self::queue_fail_htlc`] for more info. - pub fn queue_fail_malformed_htlc( + pub fn queue_fail_malformed_htlc( &mut self, htlc_id_arg: u64, failure_code: u16, sha256_of_onion: [u8; 32], logger: &L, - ) -> Result<(), ChannelError> - where - L::Target: Logger, - { + ) -> Result<(), ChannelError> { self.fail_htlc(htlc_id_arg, (sha256_of_onion, failure_code), true, logger) .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?")) } @@ -7532,10 +7672,10 @@ where /// Returns `Err` (always with [`ChannelError::Ignore`]) if the HTLC could not be failed (e.g. /// if it was already resolved). Otherwise returns `Ok`. #[rustfmt::skip] - fn fail_htlc( + fn fail_htlc( &mut self, htlc_id_arg: u64, err_contents: E, mut force_holding_cell: bool, logger: &L - ) -> Result, ChannelError> where L::Target: Logger { + ) -> Result, ChannelError> { if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) { panic!("Was asked to fail an HTLC when channel was not in an operational state"); } @@ -7548,7 +7688,7 @@ where for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() { if htlc.htlc_id == htlc_id_arg { match htlc.state { - InboundHTLCState::Committed => {}, + InboundHTLCState::Committed { .. } => {}, InboundHTLCState::LocalRemoved(_) => { return Err(ChannelError::Ignore(format!("HTLC {} was already resolved", htlc.htlc_id))); }, @@ -7642,14 +7782,10 @@ where /// and the channel is now usable (and public), this may generate an announcement_signatures to /// reply with. #[rustfmt::skip] - pub fn channel_ready( + pub fn channel_ready( &mut self, msg: &msgs::ChannelReady, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock, logger: &L - ) -> Result, ChannelError> - where - NS::Target: NodeSigner, - L::Target: Logger - { + ) -> Result, ChannelError> { if self.context.channel_state.is_peer_disconnected() { self.context.workaround_lnd_bug_4006 = Some(msg.clone()); return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned())); @@ -7724,9 +7860,9 @@ where } #[rustfmt::skip] - pub fn update_add_htlc( + pub fn update_add_htlc( &mut self, msg: &msgs::UpdateAddHTLC, fee_estimator: &LowerBoundedFeeEstimator, - ) -> Result<(), ChannelError> where F::Target: FeeEstimator { + ) -> Result<(), ChannelError> { if self.context.channel_state.is_remote_stfu_sent() || self.context.channel_state.is_quiescent() { return Err(ChannelError::WarnAndDisconnect("Got add HTLC message while quiescent".to_owned())); } @@ -7771,6 +7907,159 @@ where Ok(()) } + /// Returns true if any committed inbound HTLCs were received pre-LDK 0.3 and cannot be used + /// during `ChannelManager` deserialization to reconstruct the set of pending HTLCs. + pub(super) fn has_legacy_inbound_htlcs(&self) -> bool { + self.context.pending_inbound_htlcs.iter().any(|htlc| { + matches!( + &htlc.state, + InboundHTLCState::Committed { update_add_htlc: InboundUpdateAdd::Legacy } + ) + }) + } + + /// Returns committed inbound HTLCs whose onion has not yet been decoded and processed. Useful + /// for reconstructing the set of pending HTLCs when deserializing the `ChannelManager`. + pub(super) fn inbound_htlcs_pending_decode( + &self, + ) -> impl Iterator + '_ { + self.context.pending_inbound_htlcs.iter().filter_map(|htlc| match &htlc.state { + InboundHTLCState::Committed { + update_add_htlc: InboundUpdateAdd::WithOnion { update_add_htlc }, + } => Some(update_add_htlc.clone()), + _ => None, + }) + } + + /// Returns committed inbound HTLCs that have been forwarded but not yet fully resolved. Useful + /// when reconstructing the set of pending HTLCs when deserializing the `ChannelManager`. + pub(super) fn inbound_forwarded_htlcs( + &self, + ) -> impl Iterator + '_ { + // We don't want to return an HTLC as needing processing if it already has a resolution that's + // pending in the holding cell. + let htlc_resolution_in_holding_cell = |id: u64| -> bool { + self.context.holding_cell_htlc_updates.iter().any(|holding_cell_htlc| { + match holding_cell_htlc { + HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => *htlc_id == id, + HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => *htlc_id == id, + HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } => *htlc_id == id, + HTLCUpdateAwaitingACK::AddHTLC { .. } => false, + } + }) + }; + + let prev_outbound_scid_alias = self.context.outbound_scid_alias(); + let user_channel_id = self.context.get_user_id(); + let channel_id = self.context.channel_id(); + let outpoint = self.funding_outpoint(); + let counterparty_node_id = self.context.get_counterparty_node_id(); + + self.context.pending_inbound_htlcs.iter().filter_map(move |htlc| match &htlc.state { + InboundHTLCState::Committed { + update_add_htlc: + InboundUpdateAdd::Forwarded { + incoming_packet_shared_secret, + phantom_shared_secret, + trampoline_shared_secret, + blinded_failure, + outbound_hop, + }, + } => { + if htlc_resolution_in_holding_cell(htlc.htlc_id) { + return None; + } + // The reconstructed `HTLCPreviousHopData` is used to fail or claim the HTLC backwards + // post-restart, if it is missing in the outbound edge. + let prev_hop_data = HTLCPreviousHopData { + prev_outbound_scid_alias, + user_channel_id: Some(user_channel_id), + htlc_id: htlc.htlc_id, + incoming_packet_shared_secret: *incoming_packet_shared_secret, + phantom_shared_secret: *phantom_shared_secret, + trampoline_shared_secret: *trampoline_shared_secret, + blinded_failure: *blinded_failure, + channel_id, + outpoint, + counterparty_node_id: Some(counterparty_node_id), + cltv_expiry: Some(htlc.cltv_expiry), + }; + Some((htlc.payment_hash, prev_hop_data, *outbound_hop)) + }, + _ => None, + }) + } + + /// Useful when reconstructing the set of pending HTLC forwards when deserializing the + /// `ChannelManager`. We don't want to cache an HTLC as needing to be forwarded if it's already + /// present in the outbound edge, or else we'll double-forward. + pub(super) fn outbound_htlc_forwards( + &self, + ) -> impl Iterator + '_ { + let holding_cell_outbounds = + self.context.holding_cell_htlc_updates.iter().filter_map(|htlc| match htlc { + HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => match source { + HTLCSource::PreviousHopData(prev_hop_data) => { + Some((*payment_hash, prev_hop_data.clone())) + }, + _ => None, + }, + _ => None, + }); + let committed_outbounds = + self.context.pending_outbound_htlcs.iter().filter_map(|htlc| match &htlc.source { + HTLCSource::PreviousHopData(prev_hop_data) => { + Some((htlc.payment_hash, prev_hop_data.clone())) + }, + _ => None, + }); + holding_cell_outbounds.chain(committed_outbounds) + } + + #[cfg(test)] + pub(super) fn test_holding_cell_outbound_htlc_forwards_count(&self) -> usize { + self.context + .holding_cell_htlc_updates + .iter() + .filter_map(|htlc| match htlc { + HTLCUpdateAwaitingACK::AddHTLC { source, .. } => match source { + HTLCSource::PreviousHopData(prev_hop_data) => Some(prev_hop_data.clone()), + _ => None, + }, + _ => None, + }) + .count() + } + + /// This inbound HTLC was irrevocably forwarded to the outbound edge, so we no longer need to + /// persist its onion. + pub(super) fn prune_inbound_htlc_onion( + &mut self, htlc_id: u64, prev_hop_data: &HTLCPreviousHopData, + outbound_hop_data: OutboundHop, + ) { + for htlc in self.context.pending_inbound_htlcs.iter_mut() { + if htlc.htlc_id == htlc_id { + if let InboundHTLCState::Committed { ref mut update_add_htlc } = htlc.state { + *update_add_htlc = InboundUpdateAdd::Forwarded { + incoming_packet_shared_secret: prev_hop_data.incoming_packet_shared_secret, + phantom_shared_secret: prev_hop_data.phantom_shared_secret, + trampoline_shared_secret: prev_hop_data.trampoline_shared_secret, + blinded_failure: prev_hop_data.blinded_failure, + outbound_hop: outbound_hop_data, + }; + return; + } + } + } + debug_assert!(false, "If we go to prune an inbound HTLC it should be present") + } + + /// Useful for testing crash scenarios where the holding cell is not persisted. + #[cfg(test)] + pub(super) fn test_clear_holding_cell(&mut self) { + self.context.holding_cell_htlc_updates.clear() + } + /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed #[inline] fn mark_outbound_htlc_removed( @@ -7864,12 +8153,10 @@ where Ok(()) } - #[rustfmt::skip] - pub fn initial_commitment_signed_v2( - &mut self, msg: &msgs::CommitmentSigned, best_block: BestBlock, signer_provider: &SP, logger: &L - ) -> Result::EcdsaSigner>, ChannelError> - where L::Target: Logger - { + pub fn initial_commitment_signed_v2( + &mut self, msg: &msgs::CommitmentSigned, best_block: BestBlock, signer_provider: &SP, + logger: &L, + ) -> Result, ChannelError> { if let Some(signing_session) = self.context.interactive_tx_signing_session.as_ref() { if signing_session.has_received_tx_signatures() { let msg = "Received initial commitment_signed after peer's tx_signatures received!"; @@ -7883,16 +8170,41 @@ where }; let holder_commitment_point = &mut self.holder_commitment_point.clone(); - self.context.assert_no_commitment_advancement(holder_commitment_point.next_transaction_number(), "initial commitment_signed"); + self.context.assert_no_commitment_advancement( + holder_commitment_point.next_transaction_number(), + "initial commitment_signed", + ); let (channel_monitor, _) = self.initial_commitment_signed( - self.context.channel_id(), msg.signature, holder_commitment_point, best_block, signer_provider, logger)?; + self.context.channel_id(), + msg.signature, + holder_commitment_point, + best_block, + signer_provider, + logger, + )?; self.holder_commitment_point = *holder_commitment_point; - log_info!(logger, "Received initial commitment_signed from peer for channel {}", &self.context.channel_id()); + log_info!( + logger, + "Received initial commitment_signed from peer for channel {}", + &self.context.channel_id() + ); - self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); - self.context.interactive_tx_signing_session.as_mut().expect("signing session should be present").received_commitment_signed(); + self.monitor_updating_paused( + false, + false, + false, + Vec::new(), + Vec::new(), + Vec::new(), + logger, + ); + self.context + .interactive_tx_signing_session + .as_mut() + .expect("signing session should be present") + .received_commitment_signed(); Ok(channel_monitor) } @@ -7908,14 +8220,10 @@ where /// Note that our `commitment_signed` send did not include a monitor update. This is due to: /// 1. Updates cannot be made since the state machine is paused until `tx_signatures`. /// 2. We're still able to abort negotiation until `tx_signatures`. - fn splice_initial_commitment_signed( + fn splice_initial_commitment_signed( &mut self, msg: &msgs::CommitmentSigned, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, - ) -> Result, ChannelError> - where - F::Target: FeeEstimator, - L::Target: Logger, - { + ) -> Result, ChannelError> { debug_assert!(self .context .interactive_tx_signing_session @@ -7995,7 +8303,16 @@ where .as_mut() .expect("Signing session must exist for negotiated pending splice") .received_commitment_signed(); - self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); + self.monitor_updating_paused( + false, + false, + false, + Vec::new(), + Vec::new(), + Vec::new(), + logger, + ); + self.context.monitor_pending_tx_signatures = true; Ok(self.push_ret_blockable_mon_update(monitor_update)) } @@ -8017,14 +8334,10 @@ where (nondust_htlc_sources, dust_htlcs) } - pub fn commitment_signed( + pub fn commitment_signed( &mut self, msg: &msgs::CommitmentSigned, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, - ) -> Result, ChannelError> - where - F::Target: FeeEstimator, - L::Target: Logger, - { + ) -> Result, ChannelError> { self.commitment_signed_check_state()?; if !self.pending_funding().is_empty() { @@ -8061,14 +8374,10 @@ where self.commitment_signed_update_monitor(update, logger) } - pub fn commitment_signed_batch( + pub fn commitment_signed_batch( &mut self, batch: Vec, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, - ) -> Result, ChannelError> - where - F::Target: FeeEstimator, - L::Target: Logger, - { + ) -> Result, ChannelError> { self.commitment_signed_check_state()?; let mut messages = BTreeMap::new(); @@ -8166,12 +8475,9 @@ where Ok(()) } - fn commitment_signed_update_monitor( + fn commitment_signed_update_monitor( &mut self, mut update: ChannelMonitorUpdateStep, logger: &L, - ) -> Result, ChannelError> - where - L::Target: Logger, - { + ) -> Result, ChannelError> { if self .holder_commitment_point .advance(&self.context.holder_signer, &self.context.secp_ctx, logger) @@ -8307,6 +8613,7 @@ where Vec::new(), Vec::new(), Vec::new(), + logger, ); return Ok(self.push_ret_blockable_mon_update(monitor_update)); } @@ -8314,13 +8621,9 @@ where /// Public version of the below, checking relevant preconditions first. /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and /// returns `(None, Vec::new())`. - pub fn maybe_free_holding_cell_htlcs( + pub fn maybe_free_holding_cell_htlcs( &mut self, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, - ) -> (Option, Vec<(HTLCSource, PaymentHash)>) - where - F::Target: FeeEstimator, - L::Target: Logger, - { + ) -> (Option, Vec<(HTLCSource, PaymentHash)>) { if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.channel_state.can_generate_new_commitment() { @@ -8332,13 +8635,9 @@ where /// Frees any pending commitment updates in the holding cell, generating the relevant messages /// for our counterparty. - fn free_holding_cell_htlcs( + fn free_holding_cell_htlcs( &mut self, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, - ) -> (Option, Vec<(HTLCSource, PaymentHash)>) - where - F::Target: FeeEstimator, - L::Target: Logger, - { + ) -> (Option, Vec<(HTLCSource, PaymentHash)>) { assert!(matches!(self.context.channel_state, ChannelState::ChannelReady(_))); assert!(!self.context.channel_state.is_monitor_update_in_progress()); assert!(!self.context.channel_state.is_quiescent()); @@ -8384,7 +8683,7 @@ where skimmed_fee_msat, blinding_point, hold_htlc, - .. + accountable, } => { match self.send_htlc( amount_msat, @@ -8396,6 +8695,7 @@ where skimmed_fee_msat, blinding_point, hold_htlc.is_some(), + accountable, fee_estimator, logger, ) { @@ -8512,7 +8812,15 @@ where if update_fee.is_some() { "a fee update, " } else { "" }, update_add_count, update_fulfill_count, update_fail_count); - self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new()); + self.monitor_updating_paused( + false, + true, + false, + Vec::new(), + Vec::new(), + Vec::new(), + logger, + ); (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail) } else { (None, Vec::new()) @@ -8532,7 +8840,7 @@ where /// /// [`HeldHtlcAvailable`]: crate::onion_message::async_payments::HeldHtlcAvailable /// [`ReleaseHeldHtlc`]: crate::onion_message::async_payments::ReleaseHeldHtlc - pub fn revoke_and_ack( + pub fn revoke_and_ack( &mut self, msg: &msgs::RevokeAndACK, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, hold_mon_update: bool, ) -> Result< @@ -8542,11 +8850,7 @@ where Option, ), ChannelError, - > - where - F::Target: FeeEstimator, - L::Target: Logger, - { + > { if self.context.channel_state.is_quiescent() { return Err(ChannelError::WarnAndDisconnect( "Got revoke_and_ack message while quiescent".to_owned(), @@ -8716,7 +9020,8 @@ where false }; if swap { - let mut state = InboundHTLCState::Committed; + let mut state = + InboundHTLCState::Committed { update_add_htlc: InboundUpdateAdd::Legacy }; mem::swap(&mut state, &mut htlc.state); if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(resolution) = state { @@ -8755,14 +9060,22 @@ where PendingHTLCStatus::Forward(forward_info) => { log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed, attempting to forward", &htlc.payment_hash); to_forward_infos.push((forward_info, htlc.htlc_id)); - htlc.state = InboundHTLCState::Committed; + htlc.state = InboundHTLCState::Committed { + // HTLCs will only be in state `InboundHTLCResolution::Resolved` if they were + // received on LDK 0.1-. + update_add_htlc: InboundUpdateAdd::Legacy, + }; }, } }, InboundHTLCResolution::Pending { update_add_htlc } => { log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash); - pending_update_adds.push(update_add_htlc); - htlc.state = InboundHTLCState::Committed; + pending_update_adds.push(update_add_htlc.clone()); + htlc.state = InboundHTLCState::Committed { + update_add_htlc: InboundUpdateAdd::WithOnion { + update_add_htlc, + }, + }; }, } } @@ -8883,6 +9196,7 @@ where to_forward_infos, revoked_htlcs, finalized_claimed_htlcs, + logger, ); return_with_htlcs_to_fail!(htlcs_to_fail); }, @@ -8910,7 +9224,6 @@ where update_fail_htlcs.len() + update_fail_malformed_htlcs.len(), &self.context.channel_id); } else { - debug_assert!(htlcs_to_fail.is_empty()); let reason = if self.context.channel_state.is_local_stfu_sent() { "exits quiescence" } else if self.context.channel_state.is_monitor_update_in_progress() { @@ -8928,11 +9241,12 @@ where to_forward_infos, revoked_htlcs, finalized_claimed_htlcs, + logger, ); return_with_htlcs_to_fail!(htlcs_to_fail); } else { - log_debug!(logger, "Received a valid revoke_and_ack with no reply necessary. {} monitor update.", - release_state_str); + log_debug!(logger, "Received a valid revoke_and_ack with no reply necessary. {} monitor update {}.", + release_state_str, monitor_update.update_id); self.monitor_updating_paused( false, @@ -8941,6 +9255,7 @@ where to_forward_infos, revoked_htlcs, finalized_claimed_htlcs, + logger, ); return_with_htlcs_to_fail!(htlcs_to_fail); } @@ -8948,13 +9263,10 @@ where } } - fn on_tx_signatures_exchange<'a, L: Deref>( + fn on_tx_signatures_exchange<'a, L: Logger>( &mut self, funding_tx: Transaction, best_block_height: u32, logger: &WithChannelContext<'a, L>, - ) -> (Option, Option) - where - L::Target: Logger, - { + ) -> (Option, Option) { debug_assert!(!self.context.channel_state.is_monitor_update_in_progress()); debug_assert!(!self.context.channel_state.is_awaiting_remote_revoke()); @@ -9006,113 +9318,9 @@ where } } - pub fn funding_transaction_signed( - &mut self, funding_txid_signed: Txid, witnesses: Vec, best_block_height: u32, - logger: &L, - ) -> Result - where - L::Target: Logger, - { - let signing_session = - if let Some(signing_session) = self.context.interactive_tx_signing_session.as_mut() { - if let Some(pending_splice) = self.pending_splice.as_ref() { - debug_assert!(pending_splice - .funding_negotiation - .as_ref() - .map(|funding_negotiation| matches!( - funding_negotiation, - FundingNegotiation::AwaitingSignatures { .. } - )) - .unwrap_or(false)); - } - - if signing_session.holder_tx_signatures().is_some() { - // Our `tx_signatures` either should've been the first time we processed them, - // or we're waiting for our counterparty to send theirs first. - return Ok(FundingTxSigned { - tx_signatures: None, - funding_tx: None, - splice_negotiated: None, - splice_locked: None, - }); - } - - signing_session - } else { - if Some(funding_txid_signed) == self.funding.get_funding_txid() { - // We may be handling a duplicate call and the funding was already locked so we - // no longer have the signing session present. - return Ok(FundingTxSigned { - tx_signatures: None, - funding_tx: None, - splice_negotiated: None, - splice_locked: None, - }); - } - let err = - format!("Channel {} not expecting funding signatures", self.context.channel_id); - return Err(APIError::APIMisuseError { err }); - }; - - let tx = signing_session.unsigned_tx().tx(); - if funding_txid_signed != tx.compute_txid() { - return Err(APIError::APIMisuseError { - err: "Transaction was malleated prior to signing".to_owned(), - }); - } - - let shared_input_signature = - if let Some(splice_input_index) = signing_session.unsigned_tx().shared_input_index() { - let sig = match &self.context.holder_signer { - ChannelSignerType::Ecdsa(signer) => signer.sign_splice_shared_input( - &self.funding.channel_transaction_parameters, - tx, - splice_input_index as usize, - &self.context.secp_ctx, - ), - #[cfg(taproot)] - ChannelSignerType::Taproot(_) => todo!(), - }; - Some(sig) - } else { - None - }; - debug_assert_eq!(self.pending_splice.is_some(), shared_input_signature.is_some()); - - let tx_signatures = msgs::TxSignatures { - channel_id: self.context.channel_id, - tx_hash: funding_txid_signed, - witnesses, - shared_input_signature, - }; - let (tx_signatures, funding_tx) = signing_session - .provide_holder_witnesses(tx_signatures, &self.context.secp_ctx) - .map_err(|err| APIError::APIMisuseError { err })?; - - let logger = WithChannelContext::from(logger, &self.context, None); - if tx_signatures.is_some() { - log_info!( - logger, - "Sending tx_signatures for interactive funding transaction {funding_txid_signed}" - ); - } - - let (splice_negotiated, splice_locked) = if let Some(funding_tx) = funding_tx.clone() { - debug_assert!(tx_signatures.is_some()); - self.on_tx_signatures_exchange(funding_tx, best_block_height, &logger) - } else { - (None, None) - }; - - Ok(FundingTxSigned { tx_signatures, funding_tx, splice_negotiated, splice_locked }) - } - - pub fn tx_signatures( + pub fn tx_signatures( &mut self, msg: &msgs::TxSignatures, best_block_height: u32, logger: &L, - ) -> Result - where - L::Target: Logger, - { + ) -> Result { let signing_session = if let Some(signing_session) = self.context.interactive_tx_signing_session.as_mut() { @@ -9171,7 +9379,23 @@ where (None, None) }; + let funding_tx = funding_tx.map(|tx| { + let tx_type = if splice_negotiated.is_some() { + TransactionType::Splice { + counterparty_node_id: self.context.counterparty_node_id, + channel_id: self.context.channel_id, + } + } else { + TransactionType::Funding { + channels: vec![(self.context.counterparty_node_id, self.context.channel_id)], + } + }; + (tx, tx_type) + }); + Ok(FundingTxSigned { + commitment_signed: None, + counterparty_initial_commitment_signed_result: None, tx_signatures: holder_tx_signatures, funding_tx, splice_negotiated, @@ -9182,12 +9406,9 @@ where /// Queues up an outbound update fee by placing it in the holding cell. You should call /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the /// commitment update. - pub fn queue_update_fee( + pub fn queue_update_fee( &mut self, feerate_per_kw: u32, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, - ) where - F::Target: FeeEstimator, - L::Target: Logger, - { + ) { let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger); assert!(msg_opt.is_none(), "We forced holding cell?"); } @@ -9200,12 +9421,10 @@ where /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this /// [`FundedChannel`] if `force_holding_cell` is false. #[rustfmt::skip] - fn send_update_fee( + fn send_update_fee( &mut self, feerate_per_kw: u32, mut force_holding_cell: bool, fee_estimator: &LowerBoundedFeeEstimator, logger: &L - ) -> Option - where F::Target: FeeEstimator, L::Target: Logger - { + ) -> Option { if !self.funding.is_outbound() { panic!("Cannot send fee from inbound channel"); } @@ -9255,7 +9474,7 @@ where /// completed. /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately. #[rustfmt::skip] - fn remove_uncommitted_htlcs_and_mark_paused(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger { + fn remove_uncommitted_htlcs_and_mark_paused(&mut self, logger: &L) -> Result<(), ()> { assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete)); if !self.context.can_resume_on_reconnect() { return Err(()) @@ -9297,7 +9516,7 @@ where // in response to it yet, so don't touch it. true }, - InboundHTLCState::Committed => true, + InboundHTLCState::Committed { .. } => true, InboundHTLCState::LocalRemoved(_) => { // We (hopefully) sent a commitment_signed updating this HTLC (which we can // re-transmit if needed) and they may have even sent a revoke_and_ack back @@ -9341,20 +9560,20 @@ where /// [`ChannelManager`]: super::channelmanager::ChannelManager /// [`chain::Watch`]: crate::chain::Watch /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress - fn monitor_updating_paused( + fn monitor_updating_paused( &mut self, resend_raa: bool, resend_commitment: bool, resend_channel_ready: bool, - mut pending_forwards: Vec<(PendingHTLCInfo, u64)>, - mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, - mut pending_finalized_claimed_htlcs: Vec<(HTLCSource, Option)>, + pending_forwards: Vec<(PendingHTLCInfo, u64)>, + pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, + pending_finalized_claimed_htlcs: Vec<(HTLCSource, Option)>, logger: &L, ) { + log_trace!(logger, "Pausing channel monitor updates"); + self.context.monitor_pending_revoke_and_ack |= resend_raa; self.context.monitor_pending_commitment_signed |= resend_commitment; self.context.monitor_pending_channel_ready |= resend_channel_ready; - self.context.monitor_pending_forwards.append(&mut pending_forwards); - self.context.monitor_pending_failures.append(&mut pending_fails); - self.context - .monitor_pending_finalized_fulfills - .append(&mut pending_finalized_claimed_htlcs); + self.context.monitor_pending_forwards.extend(pending_forwards); + self.context.monitor_pending_failures.extend(pending_fails); + self.context.monitor_pending_finalized_fulfills.extend(pending_finalized_claimed_htlcs); self.context.channel_state.set_monitor_update_in_progress(); } @@ -9362,19 +9581,41 @@ where /// successfully and we should restore normal operation. Returns messages which should be sent /// to the remote side. #[rustfmt::skip] - pub fn monitor_updating_restored( + pub fn monitor_updating_restored( &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig, best_block_height: u32, path_for_release_htlc: CBP ) -> MonitorRestoreUpdates where - L::Target: Logger, - NS::Target: NodeSigner, CBP: Fn(u64) -> BlindedMessagePath { assert!(self.context.channel_state.is_monitor_update_in_progress()); self.context.channel_state.clear_monitor_update_in_progress(); assert_eq!(self.blocked_monitor_updates_pending(), 0); + let mut tx_signatures = self + .context + .monitor_pending_tx_signatures + .then(|| ()) + .and_then(|_| self.context.interactive_tx_signing_session.as_ref()) + .and_then(|signing_session| signing_session.holder_tx_signatures().clone()); + if tx_signatures.is_some() { + // We want to clear that the monitor update for our `tx_signatures` has completed, but + // we may still need to hold back the message until it's ready to be sent. + self.context.monitor_pending_tx_signatures = false; + + if self.context.signer_pending_funding { + tx_signatures.take(); + } + + let signing_session = self.context.interactive_tx_signing_session.as_ref() + .expect("We have a tx_signatures message so we must have a valid signing session"); + if !signing_session.holder_sends_tx_signatures_first() + && !signing_session.has_received_tx_signatures() + { + tx_signatures.take(); + } + } + // If we're past (or at) the AwaitingChannelReady stage on an outbound (or V2-established) channel, // try to (re-)broadcast the funding transaction as we may have declined to broadcast it when we // first received the funding_signed. @@ -9425,6 +9666,14 @@ where mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills); let mut pending_update_adds = Vec::new(); mem::swap(&mut pending_update_adds, &mut self.context.monitor_pending_update_adds); + let committed_outbound_htlc_sources = self.context.pending_outbound_htlcs.iter().filter_map(|htlc| { + if let &OutboundHTLCState::LocalAnnounced(_) = &htlc.state { + if let HTLCSource::PreviousHopData(prev_hop_data) = &htlc.source { + return Some((prev_hop_data.clone(), htlc.amount_msat)) + } + } + None + }).collect(); if self.context.channel_state.is_peer_disconnected() { self.context.monitor_pending_revoke_and_ack = false; @@ -9433,7 +9682,7 @@ where raa: None, commitment_update: None, commitment_order: RAACommitmentOrder::RevokeAndACKFirst, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, pending_update_adds, funding_broadcastable, channel_ready, announcement_sigs, tx_signatures: None, - channel_ready_order, + channel_ready_order, committed_outbound_htlc_sources }; } @@ -9463,8 +9712,8 @@ where match commitment_order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"}); MonitorRestoreUpdates { raa, commitment_update, commitment_order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, - pending_update_adds, funding_broadcastable, channel_ready, announcement_sigs, tx_signatures: None, - channel_ready_order, + pending_update_adds, funding_broadcastable, channel_ready, announcement_sigs, tx_signatures, + channel_ready_order, committed_outbound_htlc_sources } } @@ -9497,9 +9746,7 @@ where } #[rustfmt::skip] - pub fn update_fee(&mut self, fee_estimator: &LowerBoundedFeeEstimator, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError> - where F::Target: FeeEstimator, L::Target: Logger - { + pub fn update_fee(&mut self, fee_estimator: &LowerBoundedFeeEstimator, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError> { if self.funding.is_outbound() { return Err(ChannelError::close("Non-funding remote tried to update channel fee".to_owned())); } @@ -9525,9 +9772,9 @@ where /// Indicates that the signer may have some signatures for us, so we should retry if we're /// blocked. #[rustfmt::skip] - pub fn signer_maybe_unblocked( + pub fn signer_maybe_unblocked( &mut self, logger: &L, path_for_release_htlc: CBP - ) -> Result where L::Target: Logger, CBP: Fn(u64) -> BlindedMessagePath { + ) -> Result where CBP: Fn(u64) -> BlindedMessagePath { if let Some((commitment_number, commitment_secret)) = self.context.signer_pending_stale_state_verification.clone() { if let Ok(expected_point) = self.context.holder_signer.as_ref() .get_per_commitment_point(commitment_number, &self.context.secp_ctx) @@ -9543,7 +9790,12 @@ where log_trace!(logger, "Attempting to update holder per-commitment point..."); self.holder_commitment_point.try_resolve_pending(&self.context.holder_signer, &self.context.secp_ctx, logger); } - let funding_signed = if self.context.signer_pending_funding && !self.funding.is_outbound() { + + let funding_signed = if self.context.signer_pending_funding + && !self.is_v2_established() + && !self.funding.is_outbound() + && self.pending_splice.is_none() + { let commitment_data = self.context.build_commitment_transaction(&self.funding, // The previous transaction number (i.e., when adding 1) is used because this field // is advanced when handling funding_created, but the point is not advanced until @@ -9553,6 +9805,43 @@ where let counterparty_initial_commitment_tx = commitment_data.tx; self.context.get_funding_signed_msg(&self.funding.channel_transaction_parameters, logger, counterparty_initial_commitment_tx) } else { None }; + + let funding_commit_sig = if self.context.signer_pending_funding + && (self.is_v2_established() || self.pending_splice.is_some()) + { + log_debug!(logger, "Attempting to generate pending initial commitment_signed..."); + let funding = self + .pending_splice + .as_ref() + .and_then(|pending_splice| pending_splice.funding_negotiation.as_ref()) + .and_then(|funding_negotiation| { + debug_assert!(matches!( + funding_negotiation, + FundingNegotiation::AwaitingSignatures { .. } + )); + funding_negotiation.as_funding() + }) + .unwrap_or(&self.funding); + self.context.get_initial_commitment_signed_v2(funding, logger) + } else { + None + }; + + let tx_signatures = if funding_commit_sig.is_some() { + if let Some(signing_session) = self.context.interactive_tx_signing_session.as_ref() { + let should_send_tx_signatures = signing_session.holder_sends_tx_signatures_first() + || signing_session.has_received_tx_signatures(); + should_send_tx_signatures + .then(|| ()) + .and_then(|_| signing_session.holder_tx_signatures().clone()) + } else { + debug_assert!(false); + None + } + } else { + None + }; + // Provide a `channel_ready` message if we need to, but only if we're _not_ still pending // funding. let channel_ready = if self.context.signer_pending_channel_ready && !self.context.signer_pending_funding { @@ -9611,12 +9900,14 @@ where } else { (None, None, None) } } else { (None, None, None) }; - log_trace!(logger, "Signer unblocked with {} commitment_update, {} revoke_and_ack, with resend order {:?}, {} funding_signed, {} channel_ready, - {} closing_signed, {} signed_closing_tx, and {} shutdown result", + log_trace!(logger, "Signer unblocked with {} commitment_update, {} revoke_and_ack, with resend order {:?}, {} funding_signed, \ + {} funding commit_sig, {} tx_signatures, {} channel_ready, {} closing_signed, {} signed_closing_tx, and {} shutdown result", if commitment_update.is_some() { "a" } else { "no" }, if revoke_and_ack.is_some() { "a" } else { "no" }, self.context.resend_order, if funding_signed.is_some() { "a" } else { "no" }, + if funding_commit_sig.is_some() { "a" } else { "no" }, + if tx_signatures.is_some() { "a" } else { "no" }, if channel_ready.is_some() { "a" } else { "no" }, if closing_signed.is_some() { "a" } else { "no" }, if signed_closing_tx.is_some() { "a" } else { "no" }, @@ -9629,6 +9920,8 @@ where accept_channel: None, funding_created: None, funding_signed, + funding_commit_sig, + tx_signatures, channel_ready, order: self.context.resend_order.clone(), closing_signed, @@ -9637,11 +9930,10 @@ where }) } - fn get_last_revoke_and_ack( + fn get_last_revoke_and_ack( &mut self, path_for_release_htlc: CBP, logger: &L, ) -> Option where - L::Target: Logger, CBP: Fn(u64) -> BlindedMessagePath, { debug_assert!( @@ -9696,12 +9988,9 @@ where } /// Gets the last commitment update for immediate sending to our peer. - fn get_last_commitment_update_for_send( + fn get_last_commitment_update_for_send( &mut self, logger: &L, - ) -> Result - where - L::Target: Logger, - { + ) -> Result { let mut update_add_htlcs = Vec::new(); let mut update_fulfill_htlcs = Vec::new(); let mut update_fail_htlcs = Vec::new(); @@ -9719,6 +10008,7 @@ where skimmed_fee_msat: htlc.skimmed_fee_msat, blinding_point: htlc.blinding_point, hold_htlc: htlc.hold_htlc, + accountable: Some(htlc.accountable), }); } } @@ -9812,10 +10102,7 @@ where } } - fn panic_on_stale_state(logger: &L) - where - L::Target: Logger, - { + fn panic_on_stale_state(logger: &L) { macro_rules! log_and_panic { ($err_msg: expr) => { log_error!(logger, $err_msg); @@ -9834,14 +10121,12 @@ where /// May panic if some calls other than message-handling calls (which will all Err immediately) /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call. #[rustfmt::skip] - pub fn channel_reestablish( + pub fn channel_reestablish( &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock, path_for_release_htlc: CBP, ) -> Result where - L::Target: Logger, - NS::Target: NodeSigner, CBP: Fn(u64) -> BlindedMessagePath { if !self.context.channel_state.is_peer_disconnected() { @@ -9934,6 +10219,7 @@ where // A receiving node: // - if the `next_funding` TLV is set: + let mut retransmit_funding_commit_sig = None; if let Some(next_funding) = &msg.next_funding { // - if `next_funding_txid` matches the latest interactive funding transaction // or the current channel funding transaction: @@ -9956,49 +10242,7 @@ where && next_funding.should_retransmit(msgs::NextFundingFlag::CommitmentSigned) { // - MUST retransmit its `commitment_signed` for that funding transaction. - let funding = self - .pending_splice - .as_ref() - .and_then(|pending_splice| pending_splice.funding_negotiation.as_ref()) - .and_then(|funding_negotiation| { - if let FundingNegotiation::AwaitingSignatures { funding, .. } = &funding_negotiation { - Some(funding) - } else { - None - } - }) - .or_else(|| Some(&self.funding)) - .filter(|funding| funding.get_funding_txid() == Some(next_funding.txid)) - .ok_or_else(|| { - let message = "Failed to find funding for new commitment_signed".to_owned(); - ChannelError::Close( - ( - message.clone(), - ClosureReason::HolderForceClosed { message, broadcasted_latest_txn: Some(false) }, - ) - ) - })?; - - let commitment_signed = self.context.get_initial_commitment_signed_v2(&funding, logger) - // TODO(splicing): Support async signing - .ok_or_else(|| { - let message = "Failed to get signatures for new commitment_signed".to_owned(); - ChannelError::Close( - ( - message.clone(), - ClosureReason::HolderForceClosed { message, broadcasted_latest_txn: Some(false) }, - ) - ) - })?; - - commitment_update = Some(msgs::CommitmentUpdate { - commitment_signed: vec![commitment_signed], - update_add_htlcs: vec![], - update_fulfill_htlcs: vec![], - update_fail_htlcs: vec![], - update_fail_malformed_htlcs: vec![], - update_fee: None, - }); + retransmit_funding_commit_sig = Some(next_funding.txid); } // - if it has already received `commitment_signed` and it should sign first @@ -10030,6 +10274,47 @@ where "No active signing session. The associated funding transaction may have already been broadcast.".as_bytes().to_vec() }); } } + if let Some(funding_txid) = retransmit_funding_commit_sig { + let funding = self + .pending_splice + .as_ref() + .and_then(|pending_splice| pending_splice.funding_negotiation.as_ref()) + .and_then(|funding_negotiation| { + if let FundingNegotiation::AwaitingSignatures { funding, .. } = &funding_negotiation { + Some(funding) + } else { + None + } + }) + .or_else(|| Some(&self.funding)) + .filter(|funding| funding.get_funding_txid() == Some(funding_txid)) + .ok_or_else(|| { + let message = "Failed to find funding for new commitment_signed".to_owned(); + ChannelError::Close( + ( + message.clone(), + ClosureReason::HolderForceClosed { message, broadcasted_latest_txn: Some(false) }, + ) + ) + })?; + + commitment_update = self + .context + .get_initial_commitment_signed_v2(&funding, logger) + .map(|commitment_signed| + msgs::CommitmentUpdate { + commitment_signed: vec![commitment_signed], + update_add_htlcs: vec![], + update_fulfill_htlcs: vec![], + update_fail_htlcs: vec![], + update_fail_malformed_htlcs: vec![], + update_fee: None, + } + ); + if commitment_update.is_none() { + tx_signatures.take(); + } + } if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(_)) { // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's. @@ -10220,12 +10505,9 @@ where /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart, /// at which point they will be recalculated. - fn calculate_closing_fee_limits( + fn calculate_closing_fee_limits( &mut self, fee_estimator: &LowerBoundedFeeEstimator, - ) -> (u64, u64) - where - F::Target: FeeEstimator, - { + ) -> (u64, u64) { if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); } @@ -10312,12 +10594,9 @@ where Ok(()) } - pub fn maybe_propose_closing_signed( + pub fn maybe_propose_closing_signed( &mut self, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, ) -> Result<(Option, Option<(Transaction, ShutdownResult)>), ChannelError> - where - F::Target: FeeEstimator, - L::Target: Logger, { // If we're waiting on a monitor persistence, that implies we're also waiting to send some // message to our counterparty (probably a `revoke_and_ack`). In such a case, we shouldn't @@ -10397,8 +10676,9 @@ where } } - pub fn shutdown( - &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown, + pub fn shutdown( + &mut self, logger: &L, signer_provider: &SP, their_features: &InitFeatures, + msg: &msgs::Shutdown, ) -> Result< (Option, Option, Vec<(HTLCSource, PaymentHash)>), ChannelError, @@ -10507,7 +10787,15 @@ where }], channel_id: Some(self.context.channel_id()), }; - self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); + self.monitor_updating_paused( + false, + false, + false, + Vec::new(), + Vec::new(), + Vec::new(), + logger, + ); self.push_ret_blockable_mon_update(monitor_update) } else { None @@ -10566,13 +10854,10 @@ where tx } - fn get_closing_signed_msg( + fn get_closing_signed_msg( &mut self, closing_tx: &ClosingTransaction, skip_remote_output: bool, fee_satoshis: u64, min_fee_satoshis: u64, max_fee_satoshis: u64, logger: &L, - ) -> Option - where - L::Target: Logger, - { + ) -> Option { let sig = match &self.context.holder_signer { ChannelSignerType::Ecdsa(ecdsa) => ecdsa .sign_closing_transaction( @@ -10627,13 +10912,10 @@ where } } - pub fn closing_signed( + pub fn closing_signed( &mut self, fee_estimator: &LowerBoundedFeeEstimator, msg: &msgs::ClosingSigned, logger: &L, ) -> Result<(Option, Option<(Transaction, ShutdownResult)>), ChannelError> - where - F::Target: FeeEstimator, - L::Target: Logger, { if self.is_shutdown_pending_signature() { return Err(ChannelError::Warn(String::from("Remote end sent us a closing_signed while fully shutdown and just waiting on the final closing signature"))); @@ -10877,13 +11159,9 @@ where /// When this function is called, the HTLC is already irrevocably committed to the channel; /// this function determines whether to fail the HTLC, or forward / claim it. #[rustfmt::skip] - pub fn can_accept_incoming_htlc( + pub fn can_accept_incoming_htlc( &self, fee_estimator: &LowerBoundedFeeEstimator, logger: L - ) -> Result<(), LocalHTLCFailureReason> - where - F::Target: FeeEstimator, - L::Target: Logger - { + ) -> Result<(), LocalHTLCFailureReason> { if self.context.channel_state.is_local_shutdown_sent() { return Err(LocalHTLCFailureReason::ChannelClosed) } @@ -11094,9 +11372,7 @@ where } #[rustfmt::skip] - fn check_get_channel_ready(&mut self, height: u32, logger: &L) -> Option - where L::Target: Logger - { + fn check_get_channel_ready(&mut self, height: u32, logger: &L) -> Option { // Called: // * always when a new block/transactions are confirmed with the new height // * when funding is signed with a height of 0 @@ -11153,9 +11429,9 @@ where } #[rustfmt::skip] - fn get_channel_ready( + fn get_channel_ready( &mut self, logger: &L - ) -> Option where L::Target: Logger { + ) -> Option { if self.holder_commitment_point.can_advance() { self.context.signer_pending_channel_ready = false; Some(msgs::ChannelReady { @@ -11175,14 +11451,10 @@ where } /// Returns `Some` if a splice [`FundingScope`] was promoted. - fn maybe_promote_splice_funding( + fn maybe_promote_splice_funding( &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig, block_height: u32, logger: &L, - ) -> Option - where - NS::Target: NodeSigner, - L::Target: Logger, - { + ) -> Option { debug_assert!(self.pending_splice.is_some()); let pending_splice = self.pending_splice.as_mut().unwrap(); @@ -11264,7 +11536,15 @@ where }], channel_id: Some(self.context.channel_id()), }; - self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); + self.monitor_updating_paused( + false, + false, + false, + Vec::new(), + Vec::new(), + Vec::new(), + logger, + ); let monitor_update = self.push_ret_blockable_mon_update(monitor_update); let announcement_sigs = @@ -11288,14 +11568,10 @@ where /// In the first case, we store the confirmation height and calculating the short channel id. /// In the second, we simply return an Err indicating we need to be force-closed now. #[rustfmt::skip] - pub fn transactions_confirmed( + pub fn transactions_confirmed( &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData, chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L - ) -> Result<(Option, Option), ClosureReason> - where - NS::Target: NodeSigner, - L::Target: Logger - { + ) -> Result<(Option, Option), ClosureReason> { for &(index_in_block, tx) in txdata.iter() { let mut confirmed_tx = ConfirmedTransaction::from(tx); @@ -11386,14 +11662,10 @@ where /// /// May return some HTLCs (and their payment_hash) which have timed out and should be failed /// back. - pub fn best_block_updated( + pub fn best_block_updated( &mut self, height: u32, highest_header_time: Option, chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L, - ) -> Result - where - NS::Target: NodeSigner, - L::Target: Logger, - { + ) -> Result { self.do_best_block_updated( height, highest_header_time, @@ -11403,14 +11675,10 @@ where } #[rustfmt::skip] - fn do_best_block_updated( + fn do_best_block_updated( &mut self, height: u32, highest_header_time: Option, chain_node_signer: Option<(ChainHash, &NS, &UserConfig)>, logger: &L - ) -> Result<(Option, Vec<(HTLCSource, PaymentHash)>, Option), ClosureReason> - where - NS::Target: NodeSigner, - L::Target: Logger - { + ) -> Result<(Option, Vec<(HTLCSource, PaymentHash)>, Option), ClosureReason> { let mut timed_out_htlcs = Vec::new(); // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to // forward an HTLC when our counterparty should almost certainly just fail it for expiring @@ -11433,9 +11701,13 @@ where } // Check if the funding transaction was unconfirmed + let original_scid = self.funding.short_channel_id; + let was_confirmed = self.funding.funding_tx_confirmed_in.is_some(); let funding_tx_confirmations = self.funding.get_funding_tx_confirmations(height); if funding_tx_confirmations == 0 { self.funding.funding_tx_confirmation_height = 0; + self.funding.short_channel_id = None; + self.funding.funding_tx_confirmed_in = None; } if let Some(channel_ready) = self.check_get_channel_ready(height, logger) { @@ -11450,18 +11722,33 @@ where self.context.channel_state.is_our_channel_ready() { // If we've sent channel_ready (or have both sent and received channel_ready), and - // the funding transaction has become unconfirmed, - // close the channel and hope we can get the latest state on chain (because presumably - // the funding transaction is at least still in the mempool of most nodes). + // the funding transaction has become unconfirmed, we'll probably get a new SCID when + // it re-confirms. + // + // Worse, if the funding has un-confirmed we could have accepted some HTLC(s) over it + // and are now at risk of double-spend. While its possible, even likely, that this is + // just a trivial reorg and we should wait to see the new block connected in the next + // call, its also possible we've been double-spent. To avoid further loss of funds, we + // need some kind of method to freeze the channel and avoid accepting further HTLCs, + // but absent such a method, we just force-close. // - // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or - // 0-conf channel, but not doing so may lead to the - // `ChannelManager::short_to_chan_info` map being inconsistent, so we currently have - // to. - if funding_tx_confirmations == 0 && self.funding.funding_tx_confirmed_in.is_some() { - let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.", - self.context.minimum_depth.unwrap(), funding_tx_confirmations); - return Err(ClosureReason::ProcessingError { err: err_reason }); + // The one exception we make is for 0-conf channels, which we decided to trust anyway, + // in which case we simply track the previous SCID as a `historical_scids` the same as + // after a channel is spliced. + if funding_tx_confirmations == 0 && was_confirmed { + if let Some(scid) = original_scid { + self.context.historical_scids.push(scid); + } else { + debug_assert!(false); + } + if self.context.minimum_depth(&self.funding).expect("set for a ready channel") > 0 { + // Reset the original short_channel_id so that we'll generate a closure + // `channel_update` broadcast event. + self.funding.short_channel_id = original_scid; + let err_reason = format!("Funding transaction was un-confirmed, originally locked at {} confs.", + self.context.minimum_depth.unwrap()); + return Err(ClosureReason::ProcessingError { err: err_reason }); + } } } else if !self.funding.is_outbound() && self.funding.funding_tx_confirmed_in.is_none() && height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS { @@ -11567,12 +11854,9 @@ where /// before the channel has reached channel_ready or splice_locked, and we can just wait for more /// blocks. #[rustfmt::skip] - pub fn transaction_unconfirmed( + pub fn transaction_unconfirmed( &mut self, txid: &Txid, logger: &L, - ) -> Result<(), ClosureReason> - where - L::Target: Logger, - { + ) -> Result<(), ClosureReason> { let unconfirmed_funding = self .funding_and_pending_funding_iter_mut() .find(|funding| funding.get_funding_txid() == Some(*txid)); @@ -11617,9 +11901,9 @@ where /// /// [`ChannelReady`]: crate::ln::msgs::ChannelReady #[rustfmt::skip] - fn get_channel_announcement( + fn get_channel_announcement( &self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig, - ) -> Result where NS::Target: NodeSigner { + ) -> Result { if !self.context.config.announce_for_forwarding { return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned())); } @@ -11649,14 +11933,10 @@ where } #[rustfmt::skip] - fn get_announcement_sigs( + fn get_announcement_sigs( &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig, best_block_height: u32, logger: &L - ) -> Option - where - NS::Target: NodeSigner, - L::Target: Logger - { + ) -> Option { if self.funding.funding_tx_confirmation_height == 0 || self.funding.funding_tx_confirmation_height + 5 > best_block_height { return None; } @@ -11723,9 +12003,9 @@ where /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are /// available. #[rustfmt::skip] - fn sign_channel_announcement( + fn sign_channel_announcement( &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement - ) -> Result where NS::Target: NodeSigner { + ) -> Result { if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs { let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node) .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?); @@ -11760,10 +12040,10 @@ where /// channel_announcement message which we can broadcast and storing our counterparty's /// signatures for later reconstruction/rebroadcast of the channel_announcement. #[rustfmt::skip] - pub fn announcement_signatures( + pub fn announcement_signatures( &mut self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, msg: &msgs::AnnouncementSignatures, user_config: &UserConfig - ) -> Result where NS::Target: NodeSigner { + ) -> Result { let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?; let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]); @@ -11791,9 +12071,9 @@ where /// Gets a signed channel_announcement for this channel, if we previously received an /// announcement_signatures from our counterparty. #[rustfmt::skip] - pub fn get_signed_channel_announcement( + pub fn get_signed_channel_announcement( &self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, user_config: &UserConfig - ) -> Option where NS::Target: NodeSigner { + ) -> Option { if self.funding.funding_tx_confirmation_height == 0 || self.funding.funding_tx_confirmation_height + 5 > best_block_height { return None; } @@ -11863,7 +12143,7 @@ where /// May panic if called on a channel that wasn't immediately-previously /// self.remove_uncommitted_htlcs_and_mark_paused()'d #[rustfmt::skip] - fn get_channel_reestablish(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger { + fn get_channel_reestablish(&mut self, logger: &L) -> msgs::ChannelReestablish { assert!(self.context.channel_state.is_peer_disconnected()); assert_ne!(self.context.counterparty_next_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER); // This is generally the first function which gets called on any given channel once we're @@ -11920,13 +12200,10 @@ where /// Includes the witness weight for this input (e.g. P2WPKH_WITNESS_WEIGHT=109 for typical P2WPKH inputs). /// - `change_script`: an option change output script. If `None` and needed, one will be /// generated by `SignerProvider::get_destination_script`. - pub fn splice_channel( + pub fn splice_channel( &mut self, contribution: SpliceContribution, funding_feerate_per_kw: u32, locktime: u32, logger: &L, - ) -> Result, APIError> - where - L::Target: Logger, - { + ) -> Result, APIError> { if self.holder_commitment_point.current_point().is_none() { return Err(APIError::APIMisuseError { err: format!( @@ -11956,7 +12233,7 @@ where }); } - let our_funding_contribution = contribution.value(); + let our_funding_contribution = contribution.net_value(); if our_funding_contribution == SignedAmount::ZERO { return Err(APIError::APIMisuseError { err: format!( @@ -12187,7 +12464,7 @@ where fn validate_splice_contributions( &self, our_funding_contribution: SignedAmount, their_funding_contribution: SignedAmount, ) -> Result<(), String> { - if our_funding_contribution.abs() > SignedAmount::MAX_MONEY { + if our_funding_contribution.unsigned_abs() > Amount::MAX_MONEY { return Err(format!( "Channel {} cannot be spliced; our {} contribution exceeds the total bitcoin supply", self.context.channel_id(), @@ -12195,7 +12472,7 @@ where )); } - if their_funding_contribution.abs() > SignedAmount::MAX_MONEY { + if their_funding_contribution.unsigned_abs() > Amount::MAX_MONEY { return Err(format!( "Channel {} cannot be spliced; their {} contribution exceeds the total bitcoin supply", self.context.channel_id(), @@ -12269,14 +12546,10 @@ where Ok(()) } - pub(crate) fn splice_init( + pub(crate) fn splice_init( &mut self, msg: &msgs::SpliceInit, our_funding_contribution_satoshis: i64, signer_provider: &SP, entropy_source: &ES, holder_node_id: &PublicKey, logger: &L, - ) -> Result - where - ES::Target: EntropySource, - L::Target: Logger, - { + ) -> Result { let our_funding_contribution = SignedAmount::from_sat(our_funding_contribution_satoshis); let splice_funding = self.validate_splice_init(msg, our_funding_contribution)?; @@ -12340,14 +12613,10 @@ where }) } - pub(crate) fn splice_ack( + pub(crate) fn splice_ack( &mut self, msg: &msgs::SpliceAck, signer_provider: &SP, entropy_source: &ES, holder_node_id: &PublicKey, logger: &L, - ) -> Result, ChannelError> - where - ES::Target: EntropySource, - L::Target: Logger, - { + ) -> Result, ChannelError> { let splice_funding = self.validate_splice_ack(msg)?; log_info!( @@ -12494,14 +12763,10 @@ where Ok((holder_balance_floor, counterparty_balance_floor)) } - pub fn splice_locked( + pub fn splice_locked( &mut self, msg: &msgs::SpliceLocked, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig, block_height: u32, logger: &L, - ) -> Result, ChannelError> - where - NS::Target: NodeSigner, - L::Target: Logger, - { + ) -> Result, ChannelError> { log_info!(logger, "Received splice_locked txid {} from our peer", msg.splice_txid,); let pending_splice = match self.pending_splice.as_mut() { @@ -12542,15 +12807,12 @@ where /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the /// commitment update. - pub fn queue_add_htlc( + pub fn queue_add_htlc( &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option, - blinding_point: Option, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, - ) -> Result<(), (LocalHTLCFailureReason, String)> - where - F::Target: FeeEstimator, - L::Target: Logger, - { + blinding_point: Option, accountable: bool, + fee_estimator: &LowerBoundedFeeEstimator, logger: &L, + ) -> Result<(), (LocalHTLCFailureReason, String)> { self.send_htlc( amount_msat, payment_hash, @@ -12562,6 +12824,7 @@ where blinding_point, // This method is only called for forwarded HTLCs, which are never held at the next hop false, + accountable, fee_estimator, logger, ) @@ -12589,16 +12852,12 @@ where /// on this [`FundedChannel`] if `force_holding_cell` is false. /// /// `Err`'s will always be temporary channel failures. - fn send_htlc( + fn send_htlc( &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool, skimmed_fee_msat: Option, blinding_point: Option, hold_htlc: bool, - fee_estimator: &LowerBoundedFeeEstimator, logger: &L, - ) -> Result - where - F::Target: FeeEstimator, - L::Target: Logger, - { + accountable: bool, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, + ) -> Result { if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) || self.context.channel_state.is_local_shutdown_sent() || self.context.channel_state.is_remote_shutdown_sent() @@ -12675,6 +12934,7 @@ where skimmed_fee_msat, blinding_point, hold_htlc: hold_htlc.then(|| ()), + accountable, }); return Ok(false); } @@ -12697,6 +12957,7 @@ where skimmed_fee_msat, send_timestamp, hold_htlc: hold_htlc.then(|| ()), + accountable, }); self.context.next_holder_htlc_id += 1; @@ -12704,12 +12965,9 @@ where } #[rustfmt::skip] - pub(super) fn get_available_balances( + pub(super) fn get_available_balances( &self, fee_estimator: &LowerBoundedFeeEstimator, - ) -> AvailableBalances - where - F::Target: FeeEstimator, - { + ) -> AvailableBalances { core::iter::once(&self.funding) .chain(self.pending_funding().iter()) .map(|funding| self.context.get_available_balances_for_scope(funding, fee_estimator)) @@ -12724,10 +12982,7 @@ where .expect("At least one FundingScope is always provided") } - fn build_commitment_no_status_check(&mut self, logger: &L) -> ChannelMonitorUpdate - where - L::Target: Logger, - { + fn build_commitment_no_status_check(&mut self, logger: &L) -> ChannelMonitorUpdate { log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed..."); // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we // fail to generate this, we still are at least at a position where upgrading their status @@ -12845,12 +13100,9 @@ where } #[rustfmt::skip] - fn build_commitment_no_state_update( + fn build_commitment_no_state_update( &self, funding: &FundingScope, logger: &L, - ) -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction) - where - L::Target: Logger, - { + ) -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction) { let commitment_data = self.context.build_commitment_transaction( funding, self.context.counterparty_next_commitment_transaction_number, &self.context.counterparty_next_commitment_point.unwrap(), false, true, logger, @@ -12862,12 +13114,9 @@ where /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed /// generation when we shouldn't change HTLC/channel state. - fn send_commitment_no_state_update( + fn send_commitment_no_state_update( &self, logger: &L, - ) -> Result, ChannelError> - where - L::Target: Logger, - { + ) -> Result, ChannelError> { core::iter::once(&self.funding) .chain(self.pending_funding().iter()) .map(|funding| self.send_commitment_no_state_update_for_funding(funding, logger)) @@ -12875,12 +13124,9 @@ where } #[rustfmt::skip] - fn send_commitment_no_state_update_for_funding( + fn send_commitment_no_state_update_for_funding( &self, funding: &FundingScope, logger: &L, - ) -> Result - where - L::Target: Logger, - { + ) -> Result { // Get the fee tests from `build_commitment_no_state_update` #[cfg(any(test, fuzzing))] self.build_commitment_no_state_update(funding, logger); @@ -12943,15 +13189,12 @@ where /// /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info. - pub fn send_htlc_and_commit( + pub fn send_htlc_and_commit( &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option, - hold_htlc: bool, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, - ) -> Result, ChannelError> - where - F::Target: FeeEstimator, - L::Target: Logger, - { + hold_htlc: bool, accountable: bool, fee_estimator: &LowerBoundedFeeEstimator, + logger: &L, + ) -> Result, ChannelError> { let send_res = self.send_htlc( amount_msat, payment_hash, @@ -12962,6 +13205,7 @@ where skimmed_fee_msat, None, hold_htlc, + accountable, fee_estimator, logger, ); @@ -12969,7 +13213,15 @@ where let can_add_htlc = send_res.map_err(|(_, msg)| ChannelError::Ignore(msg))?; if can_add_htlc { let monitor_update = self.build_commitment_no_status_check(logger); - self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new()); + self.monitor_updating_paused( + false, + true, + false, + Vec::new(), + Vec::new(), + Vec::new(), + logger, + ); Ok(self.push_ret_blockable_mon_update(monitor_update)) } else { Ok(None) @@ -12995,13 +13247,16 @@ where /// Begins the shutdown process, getting a message for the remote peer and returning all /// holding cell HTLCs for payment failure. - pub fn get_shutdown( + pub fn get_shutdown( &mut self, signer_provider: &SP, their_features: &InitFeatures, target_feerate_sats_per_kw: Option, override_shutdown_script: Option, + logger: &L, ) -> Result< (msgs::Shutdown, Option, Vec<(HTLCSource, PaymentHash)>), APIError, > { + let logger = WithChannelContext::from(logger, &self.context, None); + if self.context.channel_state.is_local_stfu_sent() || self.context.channel_state.is_remote_stfu_sent() || self.context.channel_state.is_quiescent() @@ -13086,7 +13341,15 @@ where }], channel_id: Some(self.context.channel_id()), }; - self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); + self.monitor_updating_paused( + false, + false, + false, + Vec::new(), + Vec::new(), + Vec::new(), + &&logger, + ); self.push_ret_blockable_mon_update(monitor_update) } else { None @@ -13143,12 +13406,9 @@ where } #[rustfmt::skip] - pub fn propose_quiescence( + pub fn propose_quiescence( &mut self, logger: &L, action: QuiescentAction, - ) -> Result, &'static str> - where - L::Target: Logger, - { + ) -> Result, &'static str> { log_debug!(logger, "Attempting to initiate quiescence"); if !self.context.is_usable() { @@ -13184,10 +13444,7 @@ where // Assumes we are either awaiting quiescence or our counterparty has requested quiescence. #[rustfmt::skip] - pub fn send_stfu(&mut self, logger: &L) -> Result - where - L::Target: Logger, - { + pub fn send_stfu(&mut self, logger: &L) -> Result { debug_assert!(!self.context.channel_state.is_local_stfu_sent()); debug_assert!( self.context.channel_state.is_awaiting_quiescence() @@ -13222,9 +13479,9 @@ where } #[rustfmt::skip] - pub fn stfu( + pub fn stfu( &mut self, msg: &msgs::Stfu, logger: &L - ) -> Result, ChannelError> where L::Target: Logger { + ) -> Result, ChannelError> { if self.context.channel_state.is_quiescent() { return Err(ChannelError::Warn("Channel is already quiescent".to_owned())); } @@ -13325,12 +13582,9 @@ where Ok(None) } - pub fn try_send_stfu( + pub fn try_send_stfu( &mut self, logger: &L, - ) -> Result, ChannelError> - where - L::Target: Logger, - { + ) -> Result, ChannelError> { // We must never see both stfu flags set, we always set the quiescent flag instead. debug_assert!( !(self.context.channel_state.is_local_stfu_sent() @@ -13401,10 +13655,7 @@ where } /// A not-yet-funded outbound (from holder) channel using V1 channel establishment. -pub(super) struct OutboundV1Channel -where - SP::Target: SignerProvider, -{ +pub(super) struct OutboundV1Channel { pub funding: FundingScope, pub context: ChannelContext, pub unfunded_context: UnfundedChannelContext, @@ -13414,25 +13665,18 @@ where pub signer_pending_open_channel: bool, } -impl OutboundV1Channel -where - SP::Target: SignerProvider, -{ +impl OutboundV1Channel { pub fn abandon_unfunded_chan(&mut self, closure_reason: ClosureReason) -> ShutdownResult { self.context.force_shutdown(&self.funding, closure_reason) } #[allow(dead_code)] // TODO(dual_funding): Remove once opending V2 channels is enabled. #[rustfmt::skip] - pub fn new( + pub fn new( fee_estimator: &LowerBoundedFeeEstimator, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures, channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32, outbound_scid_alias: u64, temporary_channel_id: Option, logger: L - ) -> Result, APIError> - where ES::Target: EntropySource, - F::Target: FeeEstimator, - L::Target: Logger, - { + ) -> Result, APIError> { let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config); if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { // Protocol level safety check in place, although it should never happen because @@ -13479,7 +13723,7 @@ where /// Only allowed after [`FundingScope::channel_transaction_parameters`] is set. #[rustfmt::skip] - fn get_funding_created_msg(&mut self, logger: &L) -> Option where L::Target: Logger { + fn get_funding_created_msg(&mut self, logger: &L) -> Option { let commitment_data = self.context.build_commitment_transaction(&self.funding, self.context.counterparty_next_commitment_transaction_number, &self.context.counterparty_next_commitment_point.unwrap(), false, false, logger); @@ -13524,8 +13768,8 @@ where /// Do NOT broadcast the funding transaction until after a successful funding_signed call! /// If an Err is returned, it is a ChannelError::Close. #[rustfmt::skip] - pub fn get_funding_created(&mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L) - -> Result, (Self, ChannelError)> where L::Target: Logger { + pub fn get_funding_created(&mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L) + -> Result, (Self, ChannelError)> { if !self.funding.is_outbound() { panic!("Tried to create outbound funding_created message on an inbound channel!"); } @@ -13564,14 +13808,10 @@ where /// not of our ability to open any channel at all. Thus, on error, we should first call this /// and see if we get a new `OpenChannel` message, otherwise the channel is failed. #[rustfmt::skip] - pub(crate) fn maybe_handle_error_without_close( + pub(crate) fn maybe_handle_error_without_close( &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, user_config: &UserConfig, their_features: &InitFeatures, - ) -> Result - where - F::Target: FeeEstimator, - L::Target: Logger, - { + ) -> Result { self.context.maybe_downgrade_channel_features( &mut self.funding, fee_estimator, user_config, their_features, )?; @@ -13585,9 +13825,9 @@ where } #[rustfmt::skip] - pub fn get_open_channel( + pub fn get_open_channel( &mut self, chain_hash: ChainHash, _logger: &L - ) -> Option where L::Target: Logger { + ) -> Option { if !self.funding.is_outbound() { panic!("Tried to open a channel for an inbound channel?"); } @@ -13657,34 +13897,50 @@ where /// Handles a funding_signed message from the remote end. /// If this call is successful, broadcast the funding transaction (and not before!) - #[rustfmt::skip] - pub fn funding_signed( - mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L - ) -> Result<(FundedChannel, ChannelMonitor<::EcdsaSigner>), (OutboundV1Channel, ChannelError)> - where - L::Target: Logger - { + pub fn funding_signed( + mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, + logger: &L, + ) -> Result< + (FundedChannel, ChannelMonitor), + (OutboundV1Channel, ChannelError), + > { if !self.funding.is_outbound() { - return Err((self, ChannelError::close("Received funding_signed for an inbound channel?".to_owned()))); + let err = "Received funding_signed for an inbound channel?"; + return Err((self, ChannelError::close(err.to_owned()))); } if !matches!(self.context.channel_state, ChannelState::FundingNegotiated(_)) { - return Err((self, ChannelError::close("Received funding_signed in strange state!".to_owned()))); + let err = "Received funding_signed in strange state!"; + return Err((self, ChannelError::close(err.to_owned()))); } let mut holder_commitment_point = match self.unfunded_context.holder_commitment_point { Some(point) => point, - None => return Err((self, ChannelError::close("Received funding_signed before our first commitment point was available".to_owned()))), + None => { + let err = "Received funding_signed before our first commitment point was available"; + return Err((self, ChannelError::close(err.to_owned()))); + }, }; - self.context.assert_no_commitment_advancement(holder_commitment_point.next_transaction_number(), "funding_signed"); + self.context.assert_no_commitment_advancement( + holder_commitment_point.next_transaction_number(), + "funding_signed", + ); let (channel_monitor, _) = match self.initial_commitment_signed( - self.context.channel_id(), msg.signature, - &mut holder_commitment_point, best_block, signer_provider, logger + self.context.channel_id(), + msg.signature, + &mut holder_commitment_point, + best_block, + signer_provider, + logger, ) { Ok(channel_monitor) => channel_monitor, Err(err) => return Err((self, err)), }; - log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id()); + log_info!( + logger, + "Received funding_signed from peer for channel {}", + &self.context.channel_id() + ); let mut channel = FundedChannel { funding: self.funding, @@ -13696,16 +13952,24 @@ where let need_channel_ready = channel.check_get_channel_ready(0, logger).is_some() || channel.context.signer_pending_channel_ready; - channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new()); + channel.monitor_updating_paused( + false, + false, + need_channel_ready, + Vec::new(), + Vec::new(), + Vec::new(), + logger, + ); Ok((channel, channel_monitor)) } /// Indicates that the signer may have some signatures for us, so we should retry if we're /// blocked. #[rustfmt::skip] - pub fn signer_maybe_unblocked( + pub fn signer_maybe_unblocked( &mut self, chain_hash: ChainHash, logger: &L - ) -> (Option, Option) where L::Target: Logger { + ) -> (Option, Option) { // If we were pending a commitment point, retry the signer and advance to an // available state. if self.unfunded_context.holder_commitment_point.is_none() { @@ -13741,10 +14005,7 @@ where } /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment. -pub(super) struct InboundV1Channel -where - SP::Target: SignerProvider, -{ +pub(super) struct InboundV1Channel { pub funding: FundingScope, pub context: ChannelContext, pub unfunded_context: UnfundedChannelContext, @@ -13782,23 +14043,16 @@ pub(super) fn channel_type_from_open_channel( Ok(channel_type.clone()) } -impl InboundV1Channel -where - SP::Target: SignerProvider, -{ +impl InboundV1Channel { /// Creates a new channel from a remote sides' request for one. /// Assumes chain_hash has already been checked and corresponds with what we expect! #[rustfmt::skip] - pub fn new( + pub fn new( fee_estimator: &LowerBoundedFeeEstimator, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures, their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig, current_chain_height: u32, logger: &L, is_0conf: bool, - ) -> Result, ChannelError> - where ES::Target: EntropySource, - F::Target: FeeEstimator, - L::Target: Logger, - { + ) -> Result, ChannelError> { let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.common_fields.temporary_channel_id), None); // First check the channel type is known, failing before we do anything else if we don't @@ -13846,10 +14100,7 @@ where /// should be sent back to the counterparty node. /// /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel - pub fn accept_inbound_channel(&mut self, logger: &L) -> Option - where - L::Target: Logger, - { + pub fn accept_inbound_channel(&mut self, logger: &L) -> Option { if self.funding.is_outbound() { panic!("Tried to send accept_channel for an outbound channel?"); } @@ -13872,9 +14123,9 @@ where /// /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel #[rustfmt::skip] - fn generate_accept_channel_message( + fn generate_accept_channel_message( &mut self, _logger: &L - ) -> Option where L::Target: Logger { + ) -> Option { let first_per_commitment_point = match self.unfunded_context.holder_commitment_point { Some(holder_commitment_point) if holder_commitment_point.can_advance() => { self.signer_pending_accept_channel = false; @@ -13920,24 +14171,22 @@ where /// /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel #[cfg(test)] - pub fn get_accept_channel_message( + pub fn get_accept_channel_message( &mut self, logger: &L, - ) -> Option - where - L::Target: Logger, - { + ) -> Option { self.generate_accept_channel_message(logger) } - #[rustfmt::skip] - pub fn funding_created( - mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L - ) -> Result<(FundedChannel, Option, ChannelMonitor<::EcdsaSigner>), (Self, ChannelError)> - where - L::Target: Logger - { + pub fn funding_created( + mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, + logger: &L, + ) -> Result< + (FundedChannel, Option, ChannelMonitor), + (Self, ChannelError), + > { if self.funding.is_outbound() { - return Err((self, ChannelError::close("Received funding_created for an outbound channel?".to_owned()))); + let err = "Received funding_created for an outbound channel?"; + return Err((self, ChannelError::close(err.to_owned()))); } if !matches!( self.context.channel_state, ChannelState::NegotiatingFunding(flags) @@ -13946,31 +14195,50 @@ where // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT // remember the channel, so it's safe to just send an error_message here and drop the // channel. - return Err((self, ChannelError::close("Received funding_created after we got the channel!".to_owned()))); + let err = "Received funding_created after we got the channel!"; + return Err((self, ChannelError::close(err.to_owned()))); } let mut holder_commitment_point = match self.unfunded_context.holder_commitment_point { Some(point) => point, - None => return Err((self, ChannelError::close("Received funding_created before our first commitment point was available".to_owned()))), + None => { + let err = + "Received funding_created before our first commitment point was available"; + return Err((self, ChannelError::close(err.to_owned()))); + }, }; - self.context.assert_no_commitment_advancement(holder_commitment_point.next_transaction_number(), "funding_created"); + self.context.assert_no_commitment_advancement( + holder_commitment_point.next_transaction_number(), + "funding_created", + ); let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index }; self.funding.channel_transaction_parameters.funding_outpoint = Some(funding_txo); - let (channel_monitor, counterparty_initial_commitment_tx) = match self.initial_commitment_signed( - ChannelId::v1_from_funding_outpoint(funding_txo), msg.signature, - &mut holder_commitment_point, best_block, signer_provider, logger - ) { + let (channel_monitor, counterparty_initial_commitment_tx) = match self + .initial_commitment_signed( + ChannelId::v1_from_funding_outpoint(funding_txo), + msg.signature, + &mut holder_commitment_point, + best_block, + signer_provider, + logger, + ) { Ok(channel_monitor) => channel_monitor, Err(err) => return Err((self, err)), }; let funding_signed = self.context.get_funding_signed_msg( - &self.funding.channel_transaction_parameters, logger, counterparty_initial_commitment_tx + &self.funding.channel_transaction_parameters, + logger, + counterparty_initial_commitment_tx, ); - log_info!(logger, "{} funding_signed for peer for channel {}", - if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, &self.context.channel_id()); + log_info!( + logger, + "{} funding_signed for peer for channel {}", + if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, + &self.context.channel_id() + ); // Promote the channel to a full-fledged one now that we have updated the state and have a // `ChannelMonitor`. @@ -13983,7 +14251,15 @@ where }; let need_channel_ready = channel.check_get_channel_ready(0, logger).is_some() || channel.context.signer_pending_channel_ready; - channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new()); + channel.monitor_updating_paused( + false, + false, + need_channel_ready, + Vec::new(), + Vec::new(), + Vec::new(), + logger, + ); Ok((channel, funding_signed, channel_monitor)) } @@ -13991,9 +14267,9 @@ where /// Indicates that the signer may have some signatures for us, so we should retry if we're /// blocked. #[rustfmt::skip] - pub fn signer_maybe_unblocked( + pub fn signer_maybe_unblocked( &mut self, logger: &L - ) -> Option where L::Target: Logger { + ) -> Option { if self.unfunded_context.holder_commitment_point.is_none() { self.unfunded_context.holder_commitment_point = HolderCommitmentPoint::new(&self.context.holder_signer, &self.context.secp_ctx); } @@ -14010,10 +14286,7 @@ where } // A not-yet-funded channel using V2 channel establishment. -pub(super) struct PendingV2Channel -where - SP::Target: SignerProvider, -{ +pub(super) struct PendingV2Channel { pub funding: FundingScope, pub context: ChannelContext, pub unfunded_context: UnfundedChannelContext, @@ -14022,23 +14295,16 @@ where pub interactive_tx_constructor: Option, } -impl PendingV2Channel -where - SP::Target: SignerProvider, -{ +impl PendingV2Channel { #[allow(dead_code)] // TODO(dual_funding): Remove once creating V2 channels is enabled. #[rustfmt::skip] - pub fn new_outbound( + pub fn new_outbound( fee_estimator: &LowerBoundedFeeEstimator, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures, funding_satoshis: u64, funding_inputs: Vec, user_id: u128, config: &UserConfig, current_chain_height: u32, outbound_scid_alias: u64, funding_confirmation_target: ConfirmationTarget, logger: L, - ) -> Result - where ES::Target: EntropySource, - F::Target: FeeEstimator, - L::Target: Logger, - { + ) -> Result { let channel_keys_id = signer_provider.generate_channel_keys_id(false, user_id); let holder_signer = signer_provider.derive_channel_signer(channel_keys_id); @@ -14101,13 +14367,10 @@ where /// If we receive an error message, it may only be a rejection of the channel type we tried, /// not of our ability to open any channel at all. Thus, on error, we should first call this /// and see if we get a new `OpenChannelV2` message, otherwise the channel is failed. - pub(crate) fn maybe_handle_error_without_close( + pub(crate) fn maybe_handle_error_without_close( &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator, user_config: &UserConfig, their_features: &InitFeatures, - ) -> Result - where - F::Target: FeeEstimator, - { + ) -> Result { self.context.maybe_downgrade_channel_features( &mut self.funding, fee_estimator, @@ -14177,16 +14440,12 @@ where /// TODO(dual_funding): Allow contributions, pass intended amount and inputs #[allow(dead_code)] // TODO(dual_funding): Remove once V2 channels is enabled. #[rustfmt::skip] - pub fn new_inbound( + pub fn new_inbound( fee_estimator: &LowerBoundedFeeEstimator, entropy_source: &ES, signer_provider: &SP, holder_node_id: PublicKey, counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures, their_features: &InitFeatures, msg: &msgs::OpenChannelV2, user_id: u128, config: &UserConfig, current_chain_height: u32, logger: &L, - ) -> Result - where ES::Target: EntropySource, - F::Target: FeeEstimator, - L::Target: Logger, - { + ) -> Result { // TODO(dual_funding): Take these as input once supported let (our_funding_contribution, our_funding_contribution_sats) = (SignedAmount::ZERO, 0u64); let our_funding_inputs = Vec::new(); @@ -14443,10 +14702,7 @@ impl Readable for AnnouncementSigsState { } } -impl Writeable for FundedChannel -where - SP::Target: SignerProvider, -{ +impl Writeable for FundedChannel { fn write(&self, writer: &mut W) -> Result<(), io::Error> { // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been // called. @@ -14518,6 +14774,7 @@ where } } let mut removed_htlc_attribution_data: Vec<&Option> = Vec::new(); + let mut inbound_committed_update_adds: Vec<&InboundUpdateAdd> = Vec::new(); (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?; for htlc in self.context.pending_inbound_htlcs.iter() { if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state { @@ -14537,8 +14794,9 @@ where 2u8.write(writer)?; htlc_resolution.write(writer)?; }, - &InboundHTLCState::Committed => { + &InboundHTLCState::Committed { ref update_add_htlc } => { 3u8.write(writer)?; + inbound_committed_update_adds.push(update_add_htlc); }, &InboundHTLCState::LocalRemoved(ref removal_reason) => { 4u8.write(writer)?; @@ -14575,6 +14833,7 @@ where let mut pending_outbound_skimmed_fees: Vec> = Vec::new(); let mut pending_outbound_blinding_points: Vec> = Vec::new(); let mut pending_outbound_held_htlc_flags: Vec> = Vec::new(); + let mut pending_outbound_accountable: Vec = Vec::new(); (self.context.pending_outbound_htlcs.len() as u64).write(writer)?; for htlc in self.context.pending_outbound_htlcs.iter() { @@ -14618,6 +14877,7 @@ where pending_outbound_skimmed_fees.push(htlc.skimmed_fee_msat); pending_outbound_blinding_points.push(htlc.blinding_point); pending_outbound_held_htlc_flags.push(htlc.hold_htlc); + pending_outbound_accountable.push(htlc.accountable); } let holding_cell_htlc_update_count = self.context.holding_cell_htlc_updates.len(); @@ -14629,6 +14889,8 @@ where Vec::with_capacity(holding_cell_htlc_update_count); let mut holding_cell_held_htlc_flags: Vec> = Vec::with_capacity(holding_cell_htlc_update_count); + let mut holding_cell_accountable_flags: Vec = + Vec::with_capacity(holding_cell_htlc_update_count); // Vec of (htlc_id, failure_code, sha256_of_onion) let mut malformed_htlcs: Vec<(u64, u16, [u8; 32])> = Vec::new(); (holding_cell_htlc_update_count as u64).write(writer)?; @@ -14643,6 +14905,7 @@ where blinding_point, skimmed_fee_msat, hold_htlc, + accountable, } => { 0u8.write(writer)?; amount_msat.write(writer)?; @@ -14654,6 +14917,7 @@ where holding_cell_skimmed_fees.push(skimmed_fee_msat); holding_cell_blinding_points.push(blinding_point); holding_cell_held_htlc_flags.push(hold_htlc); + holding_cell_accountable_flags.push(accountable); }, &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, @@ -14860,6 +15124,9 @@ where let pending_splice = self.pending_splice.as_ref().filter(|_| !self.should_reset_pending_splice_state(false)); + let monitor_pending_tx_signatures = + self.context.monitor_pending_tx_signatures.then_some(()); + write_tlv_fields!(writer, { (0, self.context.announcement_sigs, option), // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a @@ -14879,6 +15146,7 @@ where (9, self.context.target_closing_feerate_sats_per_kw, option), (10, monitor_pending_update_adds, option), // Added in 0.0.122 (11, self.context.monitor_pending_finalized_fulfills, required_vec), + (12, monitor_pending_tx_signatures, option), // Added in 0.3 (13, self.context.channel_creation_height, required), (15, preimages, required_vec), (17, self.context.announcement_sigs_state, required), @@ -14914,17 +15182,17 @@ where (69, holding_cell_held_htlc_flags, optional_vec), // Added in 0.2 (71, holder_commitment_point_previous_revoked, option), // Added in 0.3 (73, holder_commitment_point_last_revoked, option), // Added in 0.3 + (75, inbound_committed_update_adds, optional_vec), + (77, holding_cell_accountable_flags, optional_vec), // Added in 0.3 + (79, pending_outbound_accountable, optional_vec), // Added in 0.3 }); Ok(()) } } -impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, &'c ChannelTypeFeatures)> - for FundedChannel -where - ES::Target: EntropySource, - SP::Target: SignerProvider, +impl<'a, 'b, 'c, ES: EntropySource, SP: SignerProvider> + ReadableArgs<(&'a ES, &'b SP, &'c ChannelTypeFeatures)> for FundedChannel { fn read( reader: &mut R, args: (&'a ES, &'b SP, &'c ChannelTypeFeatures), @@ -14997,7 +15265,7 @@ where }; InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution) }, - 3 => InboundHTLCState::Committed, + 3 => InboundHTLCState::Committed { update_add_htlc: InboundUpdateAdd::Legacy }, 4 => { let reason = match ::read(reader)? { 0 => InboundHTLCRemovalReason::FailRelay(msgs::OnionErrorPacket { @@ -15081,6 +15349,7 @@ where blinding_point: None, send_timestamp: None, hold_htlc: None, + accountable: false, }); } @@ -15100,6 +15369,7 @@ where skimmed_fee_msat: None, blinding_point: None, hold_htlc: None, + accountable: false, }, 1 => HTLCUpdateAwaitingACK::ClaimHTLC { payment_preimage: Readable::read(reader)?, @@ -15301,6 +15571,11 @@ where let mut pending_outbound_held_htlc_flags_opt: Option>> = None; let mut holding_cell_held_htlc_flags_opt: Option>> = None; + let mut inbound_committed_update_adds_opt: Option> = None; + let mut holding_cell_accountable: Option> = None; + let mut pending_outbound_accountable: Option> = None; + + let mut monitor_pending_tx_signatures: Option<()> = None; read_tlv_fields!(reader, { (0, announcement_sigs, option), @@ -15315,6 +15590,7 @@ where (9, target_closing_feerate_sats_per_kw, option), (10, monitor_pending_update_adds, option), // Added in 0.0.122 (11, monitor_pending_finalized_fulfills, optional_vec), + (12, monitor_pending_tx_signatures, option), // Added in 0.3 (13, channel_creation_height, required), (15, preimages, required_vec), // The preimages transitioned from optional to required in 0.2 (17, announcement_sigs_state, required), @@ -15350,6 +15626,9 @@ where (69, holding_cell_held_htlc_flags_opt, optional_vec), // Added in 0.2 (71, holder_commitment_point_previous_revoked_opt, option), // Added in 0.3 (73, holder_commitment_point_last_revoked_opt, option), // Added in 0.3 + (75, inbound_committed_update_adds_opt, optional_vec), + (77, holding_cell_accountable, optional_vec), // Added in 0.3 + (79, pending_outbound_accountable, optional_vec), // Added in 0.3 }); let holder_signer = signer_provider.derive_channel_signer(channel_keys_id); @@ -15473,7 +15752,40 @@ where return Err(DecodeError::InvalidValue); } } + if let Some(update_adds) = inbound_committed_update_adds_opt { + let mut iter = update_adds.into_iter(); + for htlc in pending_inbound_htlcs.iter_mut() { + if let InboundHTLCState::Committed { ref mut update_add_htlc } = htlc.state { + *update_add_htlc = iter.next().ok_or(DecodeError::InvalidValue)?; + } + } + if iter.next().is_some() { + return Err(DecodeError::InvalidValue); + } + } + if let Some(accountable_htlcs) = holding_cell_accountable { + let mut iter = accountable_htlcs.into_iter(); + for htlc in holding_cell_htlc_updates.iter_mut() { + if let HTLCUpdateAwaitingACK::AddHTLC { ref mut accountable, .. } = htlc { + *accountable = iter.next().ok_or(DecodeError::InvalidValue)?; + } + } + // We expect all accountable HTLC signals to be consumed above + if iter.next().is_some() { + return Err(DecodeError::InvalidValue); + } + } + if let Some(accountable_htlcs) = pending_outbound_accountable { + let mut iter = accountable_htlcs.into_iter(); + for htlc in pending_outbound_htlcs.iter_mut() { + htlc.accountable = iter.next().ok_or(DecodeError::InvalidValue)?; + } + // We expect all accountable HTLC signals to be consumed above + if iter.next().is_some() { + return Err(DecodeError::InvalidValue); + } + } if let Some(attribution_data_list) = removed_htlc_attribution_data { let mut removed_htlcs = pending_inbound_htlcs.iter_mut().filter_map(|status| { if let InboundHTLCState::LocalRemoved(reason) = &mut status.state { @@ -15691,6 +16003,7 @@ where resend_order, + monitor_pending_tx_signatures: monitor_pending_tx_signatures.is_some(), monitor_pending_channel_ready, monitor_pending_revoke_and_ack, monitor_pending_commitment_signed, @@ -15811,12 +16124,9 @@ mod tests { use crate::ln::chan_utils::{self, commit_tx_fee_sat, ChannelTransactionParameters}; use crate::ln::channel::{ AwaitingChannelReadyFlags, ChannelState, FundedChannel, HTLCCandidate, HTLCInitiator, - HTLCUpdateAwaitingACK, InboundHTLCOutput, InboundHTLCState, InboundV1Channel, - OutboundHTLCOutput, OutboundHTLCState, OutboundV1Channel, - }; - use crate::ln::channel::{ - MAX_FUNDING_SATOSHIS_NO_WUMBO, MIN_THEIR_CHAN_RESERVE_SATOSHIS, - TOTAL_BITCOIN_SUPPLY_SATOSHIS, + HTLCUpdateAwaitingACK, InboundHTLCOutput, InboundHTLCState, InboundUpdateAdd, + InboundV1Channel, OutboundHTLCOutput, OutboundHTLCState, OutboundV1Channel, + MIN_THEIR_CHAN_RESERVE_SATOSHIS, }; use crate::ln::channel_keys::{RevocationBasepoint, RevocationKey}; use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; @@ -15855,6 +16165,10 @@ mod tests { use bitcoin::{ScriptBuf, WPubkeyHash, WitnessProgram, WitnessVersion}; use std::cmp; + fn dummy_inbound_update_add() -> InboundUpdateAdd { + InboundUpdateAdd::Legacy + } + #[test] #[rustfmt::skip] fn test_channel_state_order() { @@ -15869,15 +16183,6 @@ mod tests { assert!(ChannelState::ChannelReady(ChannelReadyFlags::new()) < ChannelState::ShutdownComplete); } - #[test] - fn test_max_funding_satoshis_no_wumbo() { - assert_eq!(TOTAL_BITCOIN_SUPPLY_SATOSHIS, 21_000_000 * 100_000_000); - assert!( - MAX_FUNDING_SATOSHIS_NO_WUMBO <= TOTAL_BITCOIN_SUPPLY_SATOSHIS, - "MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence" - ); - } - #[cfg(ldk_test_vectors)] struct Keys { signer: crate::sign::InMemorySigner, @@ -16022,7 +16327,8 @@ mod tests { // Create Node A's channel pointing to Node B's pubkey let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); - let config = UserConfig::default(); + let mut config = UserConfig::default(); + config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = false; let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None, &logger).unwrap(); // Create Node B's channel by receiving Node A's open_channel message @@ -16057,7 +16363,7 @@ mod tests { amount_msat: htlc_amount_msat, payment_hash: PaymentHash(Sha256::hash(&[42; 32]).to_byte_array()), cltv_expiry: 300000000, - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }); node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput { @@ -16077,6 +16383,7 @@ mod tests { blinding_point: None, send_timestamp: None, hold_htlc: None, + accountable: false, }); // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass @@ -16111,7 +16418,8 @@ mod tests { let logger = TestLogger::new(); let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); - let config = UserConfig::default(); + let mut config = UserConfig::default(); + config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = false; let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None, &logger).unwrap(); let commitment_tx_fee_0_htlcs = commit_tx_fee_sat(chan.context.feerate_per_kw, 0, chan.funding.get_channel_type()) * 1000; @@ -16532,6 +16840,7 @@ mod tests { blinding_point: None, send_timestamp: None, hold_htlc: None, + accountable: false, }; let mut pending_outbound_htlcs = vec![dummy_outbound_output.clone(); 10]; for (idx, htlc) in pending_outbound_htlcs.iter_mut().enumerate() { @@ -16558,6 +16867,7 @@ mod tests { skimmed_fee_msat: None, blinding_point: None, hold_htlc: None, + accountable: false, }; let dummy_holding_cell_claim_htlc = |attribution_data| HTLCUpdateAwaitingACK::ClaimHTLC { payment_preimage: PaymentPreimage([42; 32]), @@ -16903,7 +17213,7 @@ mod tests { amount_msat: 1000000, cltv_expiry: 500, payment_hash: PaymentHash::from(payment_preimage_0), - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }); let payment_preimage_1 = @@ -16913,7 +17223,7 @@ mod tests { amount_msat: 2000000, cltv_expiry: 501, payment_hash: PaymentHash::from(payment_preimage_1), - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }); let payment_preimage_2 = @@ -16929,6 +17239,7 @@ mod tests { blinding_point: None, send_timestamp: None, hold_htlc: None, + accountable: false, }); let payment_preimage_3 = @@ -16944,6 +17255,7 @@ mod tests { blinding_point: None, send_timestamp: None, hold_htlc: None, + accountable: false, }); let payment_preimage_4 = @@ -16953,7 +17265,7 @@ mod tests { amount_msat: 4000000, cltv_expiry: 504, payment_hash: PaymentHash::from(payment_preimage_4), - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }); // commitment tx with all five HTLCs untrimmed (minimum feerate) @@ -17342,7 +17654,7 @@ mod tests { amount_msat: 2000000, cltv_expiry: 501, payment_hash: PaymentHash::from(payment_preimage_1), - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }); chan.context.pending_outbound_htlcs.clear(); @@ -17359,6 +17671,7 @@ mod tests { blinding_point: None, send_timestamp: None, hold_htlc: None, + accountable: false, }); chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput { @@ -17372,6 +17685,7 @@ mod tests { blinding_point: None, send_timestamp: None, hold_htlc: None, + accountable: false, }); test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8", @@ -17505,7 +17819,6 @@ mod tests { // Node id for alice and bob doesn't matter to our test vectors. let bob_node_id = crate::util::test_utils::pubkey(2); let mut config = UserConfig::default(); - config.manually_accept_inbound_channels = true; config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = true; let mut chan = OutboundV1Channel::<&Keys>::new( @@ -17593,7 +17906,7 @@ mod tests { amount_msat: 5000000, cltv_expiry: 920150, payment_hash: PaymentHash::from(htlc_in_preimage), - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, })); chan.context.pending_outbound_htlcs.extend( @@ -17613,6 +17926,7 @@ mod tests { blinding_point: None, send_timestamp: None, hold_htlc: None, + accountable: false, }), ); @@ -17656,7 +17970,7 @@ mod tests { amount_msat, cltv_expiry: 920150, payment_hash: PaymentHash::from(htlc_in_preimage), - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }, )); @@ -17676,6 +17990,7 @@ mod tests { blinding_point: None, send_timestamp: None, hold_htlc: None, + accountable: false, }), ); @@ -17722,7 +18037,7 @@ mod tests { amount_msat: 100000, cltv_expiry: 920125, payment_hash: htlc_0_in_hash, - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }); let htlc_1_in_preimage = @@ -17740,7 +18055,7 @@ mod tests { amount_msat: 49900000, cltv_expiry: 920125, payment_hash: htlc_1_in_hash, - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }); chan.context.pending_outbound_htlcs.extend( @@ -17758,6 +18073,7 @@ mod tests { blinding_point: None, send_timestamp: None, hold_htlc: None, + accountable: false, } }), ); @@ -17792,7 +18108,7 @@ mod tests { amount_msat: 30000, payment_hash, cltv_expiry: 920125, - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }, )); @@ -17814,6 +18130,7 @@ mod tests { blinding_point: None, send_timestamp: None, hold_htlc: None, + accountable: false, }), ); @@ -17833,7 +18150,7 @@ mod tests { amount_msat: 29525, payment_hash, cltv_expiry: 920125, - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }, )); @@ -17850,6 +18167,7 @@ mod tests { blinding_point: None, send_timestamp: None, hold_htlc: None, + accountable: false, }, ), ); @@ -17870,7 +18188,7 @@ mod tests { amount_msat: 29525, payment_hash, cltv_expiry: 920125, - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }, )); @@ -17887,6 +18205,7 @@ mod tests { blinding_point: None, send_timestamp: None, hold_htlc: None, + accountable: false, }, ), ); @@ -17907,7 +18226,7 @@ mod tests { amount_msat: 29753, payment_hash, cltv_expiry: 920125, - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }, )); @@ -17924,6 +18243,7 @@ mod tests { blinding_point: None, send_timestamp: None, hold_htlc: None, + accountable: false, }, ), ); @@ -17959,7 +18279,7 @@ mod tests { amount_msat, cltv_expiry, payment_hash, - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc: dummy_inbound_update_add() }, }), ); @@ -17984,6 +18304,7 @@ mod tests { blinding_point: None, send_timestamp: None, hold_htlc: None, + accountable: false, }), ); @@ -18254,6 +18575,13 @@ mod tests { FundingTxInput::new_p2wpkh(prevtx, 0).unwrap() } + fn funding_output_sats(output_value_sats: u64) -> TxOut { + TxOut { + value: Amount::from_sat(output_value_sats), + script_pubkey: ScriptBuf::new_p2wpkh(&WPubkeyHash::all_zeros()), + } + } + #[test] #[rustfmt::skip] fn test_check_v2_funding_inputs_sufficient() { @@ -18264,16 +18592,83 @@ mod tests { let expected_fee = if cfg!(feature = "grind_signatures") { 2278 } else { 2284 }; assert_eq!( check_v2_funding_inputs_sufficient( - 220_000, + Amount::from_sat(220_000), + &[ + funding_input_sats(200_000), + funding_input_sats(100_000), + ], + &[], + true, + true, + 2000, + ).unwrap(), + Amount::from_sat(expected_fee), + ); + } + + // Net splice-in + { + let expected_fee = if cfg!(feature = "grind_signatures") { 2526 } else { 2532 }; + assert_eq!( + check_v2_funding_inputs_sufficient( + Amount::from_sat(220_000), + &[ + funding_input_sats(200_000), + funding_input_sats(100_000), + ], + &[ + funding_output_sats(200_000), + ], + true, + true, + 2000, + ).unwrap(), + Amount::from_sat(expected_fee), + ); + } + + // Net splice-out + { + let expected_fee = if cfg!(feature = "grind_signatures") { 2526 } else { 2532 }; + assert_eq!( + check_v2_funding_inputs_sufficient( + Amount::from_sat(220_000), &[ funding_input_sats(200_000), funding_input_sats(100_000), ], + &[ + funding_output_sats(400_000), + ], true, true, 2000, ).unwrap(), - expected_fee, + Amount::from_sat(expected_fee), + ); + } + + // Net splice-out, inputs insufficient to cover fees + { + let expected_fee = if cfg!(feature = "grind_signatures") { 113670 } else { 113940 }; + assert_eq!( + check_v2_funding_inputs_sufficient( + Amount::from_sat(220_000), + &[ + funding_input_sats(200_000), + funding_input_sats(100_000), + ], + &[ + funding_output_sats(400_000), + ], + true, + true, + 90000, + ), + Err(format!( + "Total input amount 0.00300000 BTC is lower than needed for splice-in contribution 0.00220000 BTC, considering fees of {}. Need more inputs.", + Amount::from_sat(expected_fee), + )), ); } @@ -18282,17 +18677,18 @@ mod tests { let expected_fee = if cfg!(feature = "grind_signatures") { 1736 } else { 1740 }; assert_eq!( check_v2_funding_inputs_sufficient( - 220_000, + Amount::from_sat(220_000), &[ funding_input_sats(100_000), ], + &[], true, true, 2000, ), Err(format!( - "Total input amount 100000 is lower than needed for contribution 220000, considering fees of {}. Need more inputs.", - expected_fee, + "Total input amount 0.00100000 BTC is lower than needed for splice-in contribution 0.00220000 BTC, considering fees of {}. Need more inputs.", + Amount::from_sat(expected_fee), )), ); } @@ -18302,16 +18698,17 @@ mod tests { let expected_fee = if cfg!(feature = "grind_signatures") { 2278 } else { 2284 }; assert_eq!( check_v2_funding_inputs_sufficient( - (300_000 - expected_fee - 20) as i64, + Amount::from_sat(300_000 - expected_fee - 20), &[ funding_input_sats(200_000), funding_input_sats(100_000), ], + &[], true, true, 2000, ).unwrap(), - expected_fee, + Amount::from_sat(expected_fee), ); } @@ -18320,18 +18717,19 @@ mod tests { let expected_fee = if cfg!(feature = "grind_signatures") { 2506 } else { 2513 }; assert_eq!( check_v2_funding_inputs_sufficient( - 298032, + Amount::from_sat(298032), &[ funding_input_sats(200_000), funding_input_sats(100_000), ], + &[], true, true, 2200, ), Err(format!( - "Total input amount 300000 is lower than needed for contribution 298032, considering fees of {}. Need more inputs.", - expected_fee + "Total input amount 0.00300000 BTC is lower than needed for splice-in contribution 0.00298032 BTC, considering fees of {}. Need more inputs.", + Amount::from_sat(expected_fee), )), ); } @@ -18341,16 +18739,17 @@ mod tests { let expected_fee = if cfg!(feature = "grind_signatures") { 1084 } else { 1088 }; assert_eq!( check_v2_funding_inputs_sufficient( - (300_000 - expected_fee - 20) as i64, + Amount::from_sat(300_000 - expected_fee - 20), &[ funding_input_sats(200_000), funding_input_sats(100_000), ], + &[], false, false, 2000, ).unwrap(), - expected_fee, + Amount::from_sat(expected_fee), ); } } diff --git a/lightning/src/ln/channel_open_tests.rs b/lightning/src/ln/channel_open_tests.rs index 3a9c266aacd..08cabc053c5 100644 --- a/lightning/src/ln/channel_open_tests.rs +++ b/lightning/src/ln/channel_open_tests.rs @@ -62,7 +62,7 @@ fn test_outbound_chans_unlimited() { let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b); for _ in 0..MAX_UNFUNDED_CHANS_PER_PEER { - nodes[1].node.handle_open_channel(node_a, &open_channel_msg); + handle_and_accept_open_channel(&nodes[1], node_a, &open_channel_msg); get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a); open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager); @@ -90,13 +90,10 @@ fn test_outbound_chans_unlimited() { #[test] fn test_0conf_limiting() { - // Tests that we properly limit inbound channels when we have the manual-channel-acceptance - // flag set and (sometimes) accept channels as 0conf. + // Tests that we properly limit inbound channels but accept channels as 0conf. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let mut settings = test_default_channel_config(); - settings.manually_accept_inbound_channels = true; - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(settings)]); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); // Note that create_network connects the nodes together for us @@ -110,24 +107,14 @@ fn test_0conf_limiting() { }; // First, get us up to MAX_UNFUNDED_CHANNEL_PEERS so we can test at the edge - for _ in 0..MAX_UNFUNDED_CHANNEL_PEERS - 1 { + for _ in 0..MAX_UNFUNDED_CHANNEL_PEERS { let random_pk = PublicKey::from_secret_key( &nodes[0].node.secp_ctx, &SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap(), ); nodes[1].node.peer_connected(random_pk, init_msg, true).unwrap(); - nodes[1].node.handle_open_channel(random_pk, &open_channel_msg); - let events = nodes[1].node.get_and_clear_pending_events(); - match events[0] { - Event::OpenChannelRequest { temporary_channel_id, .. } => { - nodes[1] - .node - .accept_inbound_channel(&temporary_channel_id, &random_pk, 23, None) - .unwrap(); - }, - _ => panic!("Unexpected event"), - } + handle_and_accept_open_channel(&nodes[1], random_pk, &open_channel_msg); get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, random_pk); open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager); @@ -185,13 +172,12 @@ fn test_0conf_limiting() { #[test] fn test_inbound_anchors_manual_acceptance() { - let mut anchors_cfg = test_default_channel_config(); - anchors_cfg.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; + let anchors_cfg = test_default_channel_config(); do_test_manual_inbound_accept_with_override(anchors_cfg, None); } #[test] -fn test_inbound_anchors_manual_acceptance_overridden() { +fn test_inbound_anchors_config_overridden() { let overrides = ChannelConfigOverrides { handshake_overrides: Some(ChannelHandshakeConfigUpdate { max_inbound_htlc_value_in_flight_percent_of_channel: Some(5), @@ -205,8 +191,6 @@ fn test_inbound_anchors_manual_acceptance_overridden() { }; let mut anchors_cfg = test_default_channel_config(); - anchors_cfg.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; - let accept_message = do_test_manual_inbound_accept_with_override(anchors_cfg, Some(overrides)); assert_eq!(accept_message.common_fields.max_htlc_value_in_flight_msat, 5_000_000); assert_eq!(accept_message.common_fields.htlc_minimum_msat, 1_000); @@ -226,15 +210,12 @@ fn test_inbound_zero_fee_commitments_manual_acceptance() { fn do_test_manual_inbound_accept_with_override( start_cfg: UserConfig, config_overrides: Option, ) -> AcceptChannel { - let mut mannual_accept_cfg = start_cfg.clone(); - mannual_accept_cfg.manually_accept_inbound_channels = true; - let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs( 3, &node_cfgs, - &[Some(start_cfg.clone()), Some(start_cfg.clone()), Some(mannual_accept_cfg.clone())], + &[Some(start_cfg.clone()), Some(start_cfg.clone()), Some(start_cfg)], ); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); @@ -243,22 +224,6 @@ fn do_test_manual_inbound_accept_with_override( nodes[0].node.create_channel(node_b, 100_000, 0, 42, None, None).unwrap(); let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b); - nodes[1].node.handle_open_channel(node_a, &open_channel_msg); - assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); - let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); - match &msg_events[0] { - MessageSendEvent::HandleError { node_id, action } => { - assert_eq!(*node_id, node_a); - match action { - ErrorAction::SendErrorMessage { msg } => { - assert_eq!(msg.data, "No channels with anchor outputs accepted".to_owned()) - }, - _ => panic!("Unexpected error action"), - } - }, - _ => panic!("Unexpected event"), - } - nodes[2].node.handle_open_channel(node_a, &open_channel_msg); let events = nodes[2].node.get_and_clear_pending_events(); match events[0] { @@ -281,7 +246,6 @@ fn test_anchors_zero_fee_htlc_tx_downgrade() { let mut receiver_cfg = test_default_channel_config(); receiver_cfg.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; - receiver_cfg.manually_accept_inbound_channels = true; let start_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(); let end_type = ChannelTypeFeatures::only_static_remote_key(); @@ -303,7 +267,6 @@ fn test_scid_privacy_downgrade() { receiver_cfg.channel_handshake_config.negotiate_anchor_zero_fee_commitments = true; receiver_cfg.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; receiver_cfg.channel_handshake_config.negotiate_scid_privacy = true; - receiver_cfg.manually_accept_inbound_channels = true; let mut start_type = ChannelTypeFeatures::anchors_zero_fee_commitments(); start_type.set_scid_privacy_required(); @@ -328,7 +291,6 @@ fn test_zero_fee_commitments_downgrade() { let mut receiver_cfg = test_default_channel_config(); receiver_cfg.channel_handshake_config.negotiate_anchor_zero_fee_commitments = true; receiver_cfg.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; - receiver_cfg.manually_accept_inbound_channels = true; let start_type = ChannelTypeFeatures::anchors_zero_fee_commitments(); let downgrade_types = vec![ @@ -344,11 +306,9 @@ fn test_zero_fee_commitments_downgrade_to_static_remote() { // are supported (but not accepted), but not legacy anchors. let mut initiator_cfg = test_default_channel_config(); initiator_cfg.channel_handshake_config.negotiate_anchor_zero_fee_commitments = true; - initiator_cfg.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; - let mut receiver_cfg = test_default_channel_config(); + let mut receiver_cfg = test_legacy_channel_config(); receiver_cfg.channel_handshake_config.negotiate_anchor_zero_fee_commitments = true; - receiver_cfg.manually_accept_inbound_channels = true; let start_type = ChannelTypeFeatures::anchors_zero_fee_commitments(); let end_type = ChannelTypeFeatures::only_static_remote_key(); @@ -406,10 +366,8 @@ fn do_test_channel_type_downgrade( fn test_no_channel_downgrade() { // Tests that the local node will not retry when a `option_static_remote` channel is // rejected by a peer that advertises support for the feature. - let initiator_cfg = test_default_channel_config(); + let initiator_cfg = test_legacy_channel_config(); let mut receiver_cfg = test_default_channel_config(); - receiver_cfg.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; - receiver_cfg.manually_accept_inbound_channels = true; let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); @@ -469,7 +427,7 @@ fn test_channel_resumption_fail_post_funding() { nodes[0].node.create_channel(node_b_id, 1_000_000, 0, 42, None, None).unwrap(); let open_chan = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); - nodes[1].node.handle_open_channel(node_a_id, &open_chan); + handle_and_accept_open_channel(&nodes[1], node_a_id, &open_chan); let accept_chan = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); nodes[0].node.handle_accept_channel(node_b_id, &accept_chan); @@ -499,11 +457,10 @@ fn test_channel_resumption_fail_post_funding() { pub fn test_insane_channel_opens() { // Stand up a network of 2 nodes use crate::ln::channel::TOTAL_BITCOIN_SUPPLY_SATOSHIS; - let mut cfg = UserConfig::default(); - cfg.channel_handshake_limits.max_funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1; + let legacy_cfg = test_legacy_channel_config(); let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(cfg.clone())]); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(legacy_cfg.clone())]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -513,7 +470,7 @@ pub fn test_insane_channel_opens() { // funding satoshis let channel_value_sat = 31337; // same as funding satoshis let channel_reserve_satoshis = - get_holder_selected_channel_reserve_satoshis(channel_value_sat, &cfg); + get_holder_selected_channel_reserve_satoshis(channel_value_sat, &legacy_cfg); let push_msat = (channel_value_sat - channel_reserve_satoshis) * 1000; // Have node0 initiate a channel to node1 with aforementioned parameters @@ -529,6 +486,22 @@ pub fn test_insane_channel_opens() { |expected_error_str: &str, message_mutator: fn(msgs::OpenChannel) -> msgs::OpenChannel| { let open_channel_mutated = message_mutator(open_channel_message.clone()); nodes[1].node.handle_open_channel(node_a_id, &open_channel_mutated); + let events = nodes[1].node.get_and_clear_pending_events(); + match events[0] { + Event::OpenChannelRequest { + temporary_channel_id, counterparty_node_id, .. + } => match nodes[1].node.accept_inbound_channel( + &temporary_channel_id, + &counterparty_node_id, + 42, + None, + ) { + Err(_) => {}, + _ => panic!(), + }, + _ => panic!("Unexpected event"), + } + let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); let expected_regex = regex::Regex::new(expected_error_str).unwrap(); @@ -550,19 +523,6 @@ pub fn test_insane_channel_opens() { use crate::ln::channelmanager::MAX_LOCAL_BREAKDOWN_TIMEOUT; - // Test all mutations that would make the channel open message insane - insane_open_helper( - format!( - "Per our config, funding must be at most {}. It was {}", - TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1, - TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2 - ) - .as_str(), - |mut msg| { - msg.common_fields.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2; - msg - }, - ); insane_open_helper( format!( "Funding must be smaller than the total bitcoin supply. It was {}", @@ -625,7 +585,6 @@ pub fn test_insane_channel_opens() { #[xtest(feature = "_externalize_tests")] fn test_insane_zero_fee_channel_open() { let mut cfg = UserConfig::default(); - cfg.manually_accept_inbound_channels = true; cfg.channel_handshake_config.negotiate_anchor_zero_fee_commitments = true; let chanmon_cfgs = create_chanmon_cfgs(2); @@ -746,7 +705,7 @@ fn do_test_sanity_on_in_flight_opens(steps: u8) { if steps & 0x0f == 1 { return; } - nodes[1].node.handle_open_channel(node_a_id, &open_channel); + handle_and_accept_open_channel(&nodes[1], node_a_id, &open_channel); let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); if steps & 0x0f == 2 { @@ -925,17 +884,24 @@ pub fn bolt2_open_channel_sane_dust_limit() { node0_to_1_send_open_channel.channel_reserve_satoshis = 100001; nodes[1].node.handle_open_channel(node_a_id, &node0_to_1_send_open_channel); - let events = nodes[1].node.get_and_clear_pending_msg_events(); - let err_msg = match events[0] { - MessageSendEvent::HandleError { - action: ErrorAction::SendErrorMessage { ref msg }, .. - } => msg.clone(), + let events = nodes[1].node.get_and_clear_pending_events(); + match events[0] { + Event::OpenChannelRequest { temporary_channel_id, counterparty_node_id, .. } => match nodes + [1] + .node + .accept_inbound_channel(&temporary_channel_id, &counterparty_node_id, 42, None) + { + Err(APIError::ChannelUnavailable { err }) => assert_eq!( + err, + "dust_limit_satoshis (547) is greater than the implementation limit (546)" + ), + _ => panic!(), + }, _ => panic!("Unexpected event"), - }; - assert_eq!( - err_msg.data, - "dust_limit_satoshis (547) is greater than the implementation limit (546)" - ); + } + let events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + assert!(matches!(events[0], MessageSendEvent::HandleError { .. })); } #[xtest(feature = "_externalize_tests")] @@ -1022,7 +988,7 @@ pub fn test_user_configurable_csv_delay() { // We test msg.to_self_delay <= config.their_to_self_delay is enforced in Chanel::accept_channel() nodes[0].node.create_channel(node_b_id, 1000000, 1000000, 42, None, None).unwrap(); let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); - nodes[1].node.handle_open_channel(node_a_id, &open_channel); + handle_and_accept_open_channel(&nodes[1], node_a_id, &open_channel); let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); @@ -1078,24 +1044,20 @@ pub fn test_user_configurable_csv_delay() { } #[xtest(feature = "_externalize_tests")] -pub fn test_manually_accept_inbound_channel_request() { - let mut manually_accept_conf = UserConfig::default(); - manually_accept_conf.manually_accept_inbound_channels = true; - manually_accept_conf.channel_handshake_config.minimum_depth = 1; +pub fn test_accept_inbound_channel_config_override() { + let mut conf = UserConfig::default(); + conf.channel_handshake_config.minimum_depth = 1; + conf.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = false; let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = - create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(conf.clone())]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); let node_b_id = nodes[1].node.get_our_node_id(); - nodes[0] - .node - .create_channel(node_b_id, 100000, 10001, 42, None, Some(manually_accept_conf)) - .unwrap(); + nodes[0].node.create_channel(node_b_id, 100000, 10001, 42, None, Some(conf)).unwrap(); let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); nodes[1].node.handle_open_channel(node_a_id, &res); @@ -1203,28 +1165,19 @@ pub fn test_manually_accept_inbound_channel_request() { } #[xtest(feature = "_externalize_tests")] -pub fn test_manually_reject_inbound_channel_request() { - let mut manually_accept_conf = UserConfig::default(); - manually_accept_conf.manually_accept_inbound_channels = true; +pub fn test_reject_inbound_channel_request() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = - create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); let node_b_id = nodes[1].node.get_our_node_id(); - nodes[0] - .node - .create_channel(node_b_id, 100000, 10001, 42, None, Some(manually_accept_conf)) - .unwrap(); + nodes[0].node.create_channel(node_b_id, 100000, 10001, 42, None, None).unwrap(); let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); nodes[1].node.handle_open_channel(node_a_id, &res); - - // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before - // rejecting the inbound channel request. assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); let err = "Channel force-closed".to_string(); let events = nodes[1].node.get_and_clear_pending_events(); @@ -1254,29 +1207,19 @@ pub fn test_manually_reject_inbound_channel_request() { #[xtest(feature = "_externalize_tests")] pub fn test_can_not_accept_inbound_channel_twice() { - let mut manually_accept_conf = UserConfig::default(); - manually_accept_conf.manually_accept_inbound_channels = true; let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = - create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); let node_b_id = nodes[1].node.get_our_node_id(); - nodes[0] - .node - .create_channel(node_b_id, 100000, 10001, 42, None, Some(manually_accept_conf)) - .unwrap(); + nodes[0].node.create_channel(node_b_id, 100000, 10001, 42, None, None).unwrap(); let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); nodes[1].node.handle_open_channel(node_a_id, &res); - - // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before - // accepting the inbound channel request. assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - let events = nodes[1].node.get_and_clear_pending_events(); match events[0] { Event::OpenChannelRequest { temporary_channel_id, .. } => { @@ -1359,7 +1302,7 @@ pub fn test_duplicate_temporary_channel_id_from_different_peers() { // Assert that `nodes[0]` can accept both `OpenChannel` requests, even though they use the same // `temporary_channel_id` as they are from different peers. - nodes[0].node.handle_open_channel(node_b_id, &open_chan_msg_chan_1_0); + handle_and_accept_open_channel(&nodes[0], node_b_id, &open_chan_msg_chan_1_0); { let events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -1375,7 +1318,7 @@ pub fn test_duplicate_temporary_channel_id_from_different_peers() { } } - nodes[0].node.handle_open_channel(node_c_id, &open_chan_msg_chan_2_0); + handle_and_accept_open_channel(&nodes[0], node_c_id, &open_chan_msg_chan_2_0); { let events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -1416,7 +1359,8 @@ pub fn test_duplicate_funding_err_in_funding() { let mut open_chan_msg = get_event_msg!(nodes[2], MessageSendEvent::SendOpenChannel, node_b_id); let node_c_temp_chan_id = open_chan_msg.common_fields.temporary_channel_id; open_chan_msg.common_fields.temporary_channel_id = real_channel_id; - nodes[1].node.handle_open_channel(node_c_id, &open_chan_msg); + handle_and_accept_open_channel(&nodes[1], node_c_id, &open_chan_msg); + let mut accept_chan_msg = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_c_id); accept_chan_msg.common_fields.temporary_channel_id = node_c_temp_chan_id; @@ -1461,7 +1405,7 @@ pub fn test_duplicate_chan_id() { // Create an initial channel nodes[0].node.create_channel(node_b_id, 100000, 10001, 42, None, None).unwrap(); let mut open_chan_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); - nodes[1].node.handle_open_channel(node_a_id, &open_chan_msg); + handle_and_accept_open_channel(&nodes[1], node_a_id, &open_chan_msg); nodes[0].node.handle_accept_channel( node_b_id, &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id), @@ -1548,7 +1492,7 @@ pub fn test_duplicate_chan_id() { // Now try to create a second channel which has a duplicate funding output. nodes[0].node.create_channel(node_b_id, 100000, 10001, 42, None, None).unwrap(); let open_chan_2_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); - nodes[1].node.handle_open_channel(node_a_id, &open_chan_2_msg); + handle_and_accept_open_channel(&nodes[1], node_a_id, &open_chan_2_msg); nodes[0].node.handle_accept_channel( node_b_id, &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id), @@ -1652,10 +1596,8 @@ pub fn test_invalid_funding_tx() { let node_b_id = nodes[1].node.get_our_node_id(); nodes[0].node.create_channel(node_b_id, 100_000, 10_000, 42, None, None).unwrap(); - nodes[1].node.handle_open_channel( - node_a_id, - &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id), - ); + let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + handle_and_accept_open_channel(&nodes[1], node_a_id, &open_channel_msg); nodes[0].node.handle_accept_channel( node_b_id, &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id), @@ -1699,7 +1641,7 @@ pub fn test_invalid_funding_tx() { assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0], tx); - nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); + nodes[0].tx_broadcaster.clear(); let expected_err = "funding tx had wrong script/value or output index"; confirm_transaction_at(&nodes[1], &tx, 1); @@ -1769,10 +1711,9 @@ pub fn test_coinbase_funding_tx() { nodes[0].node.create_channel(node_b_id, 100000, 10001, 42, None, None).unwrap(); let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + handle_and_accept_open_channel(&nodes[1], node_a_id, &open_channel); - nodes[1].node.handle_open_channel(node_a_id, &open_channel); let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); - nodes[0].node.handle_accept_channel(node_b_id, &accept_channel); // Create the coinbase funding transaction. @@ -1831,7 +1772,8 @@ pub fn test_non_final_funding_tx() { nodes[0].node.create_channel(node_b_id, 100_000, 0, 42, None, None).unwrap(); let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); - nodes[1].node.handle_open_channel(node_a_id, &open_channel_message); + handle_and_accept_open_channel(&nodes[1], node_a_id, &open_channel_message); + let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); nodes[0].node.handle_accept_channel(node_b_id, &accept_channel_message); @@ -1890,7 +1832,7 @@ pub fn test_non_final_funding_tx_within_headroom() { nodes[0].node.create_channel(node_b_id, 100_000, 0, 42, None, None).unwrap(); let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); - nodes[1].node.handle_open_channel(node_a_id, &open_channel_message); + handle_and_accept_open_channel(&nodes[1], node_a_id, &open_channel_message); let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); nodes[0].node.handle_accept_channel(node_b_id, &accept_channel_message); @@ -2364,7 +2306,6 @@ pub fn test_accept_inbound_channel_errors_queued() { let mut config0 = test_default_channel_config(); let mut config1 = config0.clone(); config1.channel_handshake_limits.their_to_self_delay = 1000; - config1.manually_accept_inbound_channels = true; config0.channel_handshake_config.our_to_self_delay = 2000; let chanmon_cfgs = create_chanmon_cfgs(2); @@ -2410,8 +2351,8 @@ pub fn test_manual_funding_abandon() { assert!(nodes[0].node.create_channel(node_b_id, 100_000, 0, 42, None, None).is_ok()); let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + handle_and_accept_open_channel(&nodes[1], node_a_id, &open_channel); - nodes[1].node.handle_open_channel(node_a_id, &open_channel); let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); nodes[0].node.handle_accept_channel(node_b_id, &accept_channel); @@ -2459,10 +2400,9 @@ pub fn test_funding_signed_event() { assert!(nodes[0].node.create_channel(node_b_id, 100_000, 0, 42, None, None).is_ok()); let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + handle_and_accept_open_channel(&nodes[1], node_a_id, &open_channel); - nodes[1].node.handle_open_channel(node_a_id, &open_channel); let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); - nodes[0].node.handle_accept_channel(node_b_id, &accept_channel); let (temp_channel_id, tx, funding_outpoint) = create_funding_transaction(&nodes[0], &node_b_id, 100_000, 42); diff --git a/lightning/src/ln/channel_state.rs b/lightning/src/ln/channel_state.rs index d10327b259a..c7277d18e3b 100644 --- a/lightning/src/ln/channel_state.rs +++ b/lightning/src/ln/channel_state.rs @@ -22,8 +22,6 @@ use crate::types::features::{ChannelTypeFeatures, InitFeatures}; use crate::types::payment::PaymentHash; use crate::util::config::ChannelConfig; -use core::ops::Deref; - /// Exposes the state of pending inbound HTLCs. /// /// At a high level, an HTLC being forwarded from one Lightning node to another Lightning node goes @@ -368,14 +366,11 @@ pub struct ChannelDetails { /// [`outbound_capacity_msat`]: ChannelDetails::outbound_capacity_msat pub unspendable_punishment_reserve: Option, /// The `user_channel_id` value passed in to [`ChannelManager::create_channel`] for outbound - /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels if - /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true. Otherwise - /// `user_channel_id` will be randomized for an inbound channel. This may be zero for objects - /// serialized with LDK versions prior to 0.0.113. + /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels. + /// This may be zero for objects serialized with LDK versions prior to 0.0.113. /// /// [`ChannelManager::create_channel`]: crate::ln::channelmanager::ChannelManager::create_channel /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel - /// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels pub user_channel_id: u128, /// The currently negotiated fee rate denominated in satoshi per 1000 weight units, /// which is applied to commitment and HTLC transactions. @@ -524,14 +519,10 @@ impl ChannelDetails { } } - pub(super) fn from_channel( + pub(super) fn from_channel( channel: &Channel, best_block_height: u32, latest_features: InitFeatures, fee_estimator: &LowerBoundedFeeEstimator, - ) -> Self - where - SP::Target: SignerProvider, - F::Target: FeeEstimator, - { + ) -> Self { let context = channel.context(); let funding = channel.funding(); let balance = channel.get_available_balances(fee_estimator); @@ -613,9 +604,9 @@ impl_writeable_tlv_based!(ChannelDetails, { (10, channel_value_satoshis, required), (12, unspendable_punishment_reserve, option), // Note that _user_channel_id_low is used below, but rustc warns anyway - (14, _user_channel_id_low, (legacy, u64, + (14, _user_channel_id_low, (legacy, u64, |_| Ok(()), |us: &ChannelDetails| Some(us.user_channel_id as u64))), - (16, _balance_msat, (legacy, u64, |us: &ChannelDetails| Some(us.next_outbound_htlc_limit_msat))), + (16, _balance_msat, (legacy, u64, |_| Ok(()), |us: &ChannelDetails| Some(us.next_outbound_htlc_limit_msat))), (18, outbound_capacity_msat, required), (19, next_outbound_htlc_limit_msat, (default_value, outbound_capacity_msat)), (20, inbound_capacity_msat, required), @@ -629,7 +620,7 @@ impl_writeable_tlv_based!(ChannelDetails, { (33, inbound_htlc_minimum_msat, option), (35, inbound_htlc_maximum_msat, option), // Note that _user_channel_id_high is used below, but rustc warns anyway - (37, _user_channel_id_high, (legacy, u64, + (37, _user_channel_id_high, (legacy, u64, |_| Ok(()), |us: &ChannelDetails| Some((us.user_channel_id >> 64) as u64))), (39, feerate_sat_per_1000_weight, option), (41, channel_shutdown_state, option), diff --git a/lightning/src/ln/channel_type_tests.rs b/lightning/src/ln/channel_type_tests.rs index 13470d50614..2b069a6d314 100644 --- a/lightning/src/ln/channel_type_tests.rs +++ b/lightning/src/ln/channel_type_tests.rs @@ -34,8 +34,10 @@ fn test_option_anchors_zero_fee_initial() { let mut expected_type = ChannelTypeFeatures::only_static_remote_key(); expected_type.set_anchors_zero_fee_htlc_tx_required(); + let mut start_cfg = UserConfig::default(); + start_cfg.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = false; do_test_get_initial_channel_type( - UserConfig::default(), + start_cfg, InitFeatures::empty(), ChannelTypeFeatures::only_static_remote_key(), |cfg: &mut UserConfig| { @@ -225,13 +227,15 @@ fn do_test_supports_channel_type(config: UserConfig, expected_channel_type: Chan let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap()); + let mut non_anchors_config = UserConfig::default(); + non_anchors_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = false; // Assert that we get `static_remotekey` when no custom config is negotiated. let channel_a = OutboundV1Channel::<&TestKeysInterface>::new( &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, - &channelmanager::provided_init_features(&UserConfig::default()), + &channelmanager::provided_init_features(&non_anchors_config), 10000000, 100000, 42, diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 72585d69f80..7d97f424181 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -40,6 +40,7 @@ use crate::blinded_path::NodeIdLookUp; use crate::chain; use crate::chain::chaininterface::{ BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator, + TransactionType, }; use crate::chain::channelmonitor::{ Balance, ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, MonitorEvent, @@ -58,9 +59,9 @@ use crate::ln::chan_utils::selected_commitment_sat_per_1000_weight; use crate::ln::channel::QuiescentAction; use crate::ln::channel::{ self, hold_time_since, Channel, ChannelError, ChannelUpdateStatus, DisconnectResult, - FundedChannel, FundingTxSigned, InboundV1Channel, OutboundV1Channel, PendingV2Channel, - ReconnectionMsg, ShutdownResult, SpliceFundingFailed, StfuResponse, UpdateFulfillCommitFetch, - WithChannelContext, + FundedChannel, FundingTxSigned, InboundV1Channel, OutboundHop, OutboundV1Channel, + PendingV2Channel, ReconnectionMsg, ShutdownResult, SpliceFundingFailed, StfuResponse, + UpdateFulfillCommitFetch, WithChannelContext, }; use crate::ln::channel_state::ChannelDetails; use crate::ln::funding::SpliceContribution; @@ -84,9 +85,12 @@ use crate::ln::onion_utils::{process_fulfill_attribution_data, AttributionData}; use crate::ln::our_peer_storage::{EncryptedOurPeerStorage, PeerStorageMonitorHolder}; #[cfg(test)] use crate::ln::outbound_payment; +#[cfg(any(test, feature = "_externalize_tests"))] +use crate::ln::outbound_payment::PaymentSendFailure; use crate::ln::outbound_payment::{ - OutboundPayments, PendingOutboundPayment, RetryableInvoiceRequest, SendAlongPathArgs, - StaleExpiration, + Bolt11PaymentError, Bolt12PaymentError, OutboundPayments, PendingOutboundPayment, + ProbeSendFailure, RecipientCustomTlvs, RecipientOnionFields, Retry, RetryableInvoiceRequest, + RetryableSendFailure, SendAlongPathArgs, StaleExpiration, }; use crate::ln::types::ChannelId; use crate::offers::async_receive_offer_cache::AsyncReceiveOfferCache; @@ -122,7 +126,9 @@ use crate::types::features::{ }; use crate::types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; use crate::types::string::UntrustedString; -use crate::util::config::{ChannelConfig, ChannelConfigOverrides, ChannelConfigUpdate, UserConfig}; +use crate::util::config::{ + ChannelConfig, ChannelConfigOverrides, ChannelConfigUpdate, HTLCInterceptionFlags, UserConfig, +}; use crate::util::errors::APIError; use crate::util::logger::{Level, Logger, WithContext}; use crate::util::scid_utils::fake_scid; @@ -173,6 +179,7 @@ use crate::prelude::*; use crate::sync::{Arc, FairRwLock, LockHeldState, LockTestExt, Mutex, RwLock, RwLockReadGuard}; use bitcoin::hex::impl_fmt_traits; +use crate::ln::script::ShutdownScript; use core::borrow::Borrow; use core::cell::RefCell; use core::convert::Infallible; @@ -180,14 +187,6 @@ use core::ops::Deref; use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use core::time::Duration; use core::{cmp, mem}; -// Re-export this for use in the public API. -#[cfg(any(test, feature = "_externalize_tests"))] -pub(crate) use crate::ln::outbound_payment::PaymentSendFailure; -pub use crate::ln::outbound_payment::{ - Bolt11PaymentError, Bolt12PaymentError, ProbeSendFailure, RecipientOnionFields, Retry, - RetryableSendFailure, -}; -use crate::ln::script::ShutdownScript; // We hold various information about HTLC relay in the HTLC objects in Channel itself: // @@ -427,6 +426,9 @@ pub struct PendingHTLCInfo { /// This is used to allow LSPs to take fees as a part of payments, without the sender having to /// shoulder them. pub skimmed_fee_msat: Option, + /// An experimental field indicating whether our node's reputation would be held accountable + /// for the timely resolution of the received HTLC. + pub incoming_accountable: bool, } #[derive(Clone, Debug)] // See FundedChannel::revoke_and_ack for why, tl;dr: Rust bug @@ -634,14 +636,6 @@ impl Readable for PaymentId { pub struct InterceptId(pub [u8; 32]); impl InterceptId { - /// This intercept id corresponds to an HTLC that will be forwarded on - /// [`ChannelManager::forward_intercepted_htlc`]. - fn from_incoming_shared_secret(ss: &[u8; 32]) -> Self { - Self(Sha256::hash(ss).to_byte_array()) - } - - /// This intercept id corresponds to an HTLC that will be forwarded on receipt of a - /// [`ReleaseHeldHtlc`] onion message. fn from_htlc_id_and_chan_id( htlc_id: u64, channel_id: &ChannelId, counterparty_node_id: &PublicKey, ) -> Self { @@ -677,6 +671,36 @@ impl Readable for InterceptId { } } +/// Optional arguments to [`ChannelManager::pay_for_bolt11_invoice`] +/// +/// These fields will often not need to be set, and the provided [`Self::default`] can be used. +pub struct OptionalBolt11PaymentParams { + /// A set of custom tlvs, user can send along the payment. + pub custom_tlvs: RecipientCustomTlvs, + /// Pathfinding options which tweak how the path is constructed to the recipient. + pub route_params_config: RouteParametersConfig, + /// The number of tries or time during which we'll retry this payment if some paths to the + /// recipient fail. + /// + /// Once the retry limit is reached, further path failures will not be retried and the payment + /// will ultimately fail once all pending paths have failed (generating an + /// [`Event::PaymentFailed`]). + pub retry_strategy: Retry, +} + +impl Default for OptionalBolt11PaymentParams { + fn default() -> Self { + Self { + custom_tlvs: RecipientCustomTlvs::new(vec![]).unwrap(), + route_params_config: Default::default(), + #[cfg(feature = "std")] + retry_strategy: Retry::Timeout(core::time::Duration::from_secs(2)), + #[cfg(not(feature = "std"))] + retry_strategy: Retry::Attempts(3), + } + } +} + /// Optional arguments to [`ChannelManager::pay_for_offer`] #[cfg_attr( feature = "dnssec", @@ -915,6 +939,7 @@ struct MsgHandleErrInternal { shutdown_finish: Option<(ShutdownResult, Option<(msgs::ChannelUpdate, NodeId, NodeId)>)>, tx_abort: Option, } + impl MsgHandleErrInternal { fn send_err_msg_no_close(err: String, channel_id: ChannelId) -> Self { Self { @@ -930,6 +955,20 @@ impl MsgHandleErrInternal { } } + fn no_such_peer(counterparty_node_id: &PublicKey, channel_id: ChannelId) -> Self { + let err = + format!("No such peer for the passed counterparty_node_id {counterparty_node_id}"); + Self::send_err_msg_no_close(err, channel_id) + } + + fn no_such_channel_for_peer(counterparty_node_id: &PublicKey, channel_id: ChannelId) -> Self { + let err = format!( + "Got a message for a channel from the wrong node! No such channel_id {} for the passed counterparty_node_id {}", + channel_id, counterparty_node_id + ); + Self::send_err_msg_no_close(err, channel_id) + } + fn from_no_close(err: msgs::LightningError) -> Self { Self { err, closes_channel: false, shutdown_finish: None, tx_abort: None } } @@ -1166,12 +1205,10 @@ impl ClaimablePayments { /// /// If no payment is found, `Err(Vec::new())` is returned. #[rustfmt::skip] - fn begin_claiming_payment( + fn begin_claiming_payment( &mut self, payment_hash: PaymentHash, node_signer: &S, logger: &L, inbound_payment_id_secret: &[u8; 32], custom_tlvs_known: bool, - ) -> Result<(Vec, ClaimingPayment), Vec> - where L::Target: Logger, S::Target: NodeSigner, - { + ) -> Result<(Vec, ClaimingPayment), Vec> { match self.claimable_payments.remove(&payment_hash) { Some(payment) => { let mut receiver_node_id = node_signer.get_node_id(Recipient::Node) @@ -1256,7 +1293,11 @@ enum BackgroundEvent { /// Some [`ChannelMonitorUpdate`] (s) completed before we were serialized but we still have /// them marked pending, thus we need to run any [`MonitorUpdateCompletionAction`] (s) pending /// on a channel. - MonitorUpdatesComplete { counterparty_node_id: PublicKey, channel_id: ChannelId }, + MonitorUpdatesComplete { + counterparty_node_id: PublicKey, + channel_id: ChannelId, + highest_update_id_completed: u64, + }, } /// A pointer to a channel that is unblocked when an event is surfaced @@ -1366,6 +1407,28 @@ impl_writeable_tlv_based_enum_upgradable!(MonitorUpdateCompletionAction, }, ); +/// Result of attempting to resume a channel after a monitor update completes while locks are held. +/// Contains remaining work to be processed after locks are released. +#[must_use] +enum PostMonitorUpdateChanResume { + /// Channel still has blocked monitor updates pending. Contains only update actions to process. + Blocked { update_actions: Vec }, + /// Channel was fully unblocked and has been resumed. Contains remaining data to process. + Unblocked { + channel_id: ChannelId, + counterparty_node_id: PublicKey, + funding_txo: OutPoint, + user_channel_id: u128, + unbroadcasted_batch_funding_txid: Option, + update_actions: Vec, + htlc_forwards: Option, + decode_update_add_htlcs: Option<(u64, Vec)>, + finalized_claimed_htlcs: Vec<(HTLCSource, Option)>, + failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, + committed_outbound_htlc_sources: Vec<(HTLCPreviousHopData, u64)>, + }, +} + #[derive(Clone, Debug, PartialEq, Eq)] pub(crate) struct PaymentCompleteUpdate { counterparty_node_id: PublicKey, @@ -1535,20 +1598,17 @@ impl Readable for Option { } /// State we hold per-peer. -pub(super) struct PeerState -where - SP::Target: SignerProvider, -{ +pub(super) struct PeerState { /// `channel_id` -> `Channel` /// /// Holds all channels where the peer is the counterparty. pub(super) channel_by_id: HashMap>, /// `temporary_channel_id` -> `InboundChannelRequest`. /// - /// When manual channel acceptance is enabled, this holds all unaccepted inbound channels where - /// the peer is the counterparty. If the channel is accepted, then the entry in this table is - /// removed, and an InboundV1Channel is created and placed in the `inbound_v1_channel_by_id` table. If - /// the channel is rejected, then the entry is simply removed. + /// Holds all unaccepted inbound channels where the peer is the counterparty. + /// If the channel is accepted, then the entry in this table is removed and a Channel is + /// created and placed in the `channel_by_id` table. If the channel is rejected, then + /// the entry is simply removed. pub(super) inbound_channel_request_by_id: HashMap, /// The latest `InitFeatures` we heard from the peer. latest_features: InitFeatures, @@ -1613,10 +1673,7 @@ where peer_storage: Vec, } -impl PeerState -where - SP::Target: SignerProvider, -{ +impl PeerState { /// Indicates that a peer meets the criteria where we're ok to remove it from our storage. /// If true is passed for `require_disconnected`, the function will return false if we haven't /// disconnected from the node already, ie. `PeerState::is_connected` is set to `true`. @@ -1769,100 +1826,63 @@ pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, 'i, M, T, F, L> /// languages. pub trait AChannelManager { /// A type implementing [`chain::Watch`]. - type Watch: chain::Watch + ?Sized; - /// A type that may be dereferenced to [`Self::Watch`]. - type M: Deref; + type Watch: chain::Watch; /// A type implementing [`BroadcasterInterface`]. - type Broadcaster: BroadcasterInterface + ?Sized; - /// A type that may be dereferenced to [`Self::Broadcaster`]. - type T: Deref; + type Broadcaster: BroadcasterInterface; /// A type implementing [`EntropySource`]. - type EntropySource: EntropySource + ?Sized; - /// A type that may be dereferenced to [`Self::EntropySource`]. - type ES: Deref; + type EntropySource: EntropySource; /// A type implementing [`NodeSigner`]. - type NodeSigner: NodeSigner + ?Sized; - /// A type that may be dereferenced to [`Self::NodeSigner`]. - type NS: Deref; + type NodeSigner: NodeSigner; /// A type implementing [`EcdsaChannelSigner`]. type Signer: EcdsaChannelSigner + Sized; /// A type implementing [`SignerProvider`] for [`Self::Signer`]. - type SignerProvider: SignerProvider + ?Sized; - /// A type that may be dereferenced to [`Self::SignerProvider`]. - type SP: Deref; + type SP: SignerProvider; /// A type implementing [`FeeEstimator`]. - type FeeEstimator: FeeEstimator + ?Sized; - /// A type that may be dereferenced to [`Self::FeeEstimator`]. - type F: Deref; + type FeeEstimator: FeeEstimator; /// A type implementing [`Router`]. - type Router: Router + ?Sized; - /// A type that may be dereferenced to [`Self::Router`]. - type R: Deref; + type Router: Router; /// A type implementing [`MessageRouter`]. - type MessageRouter: MessageRouter + ?Sized; - /// A type that may be dereferenced to [`Self::MessageRouter`]. - type MR: Deref; + type MessageRouter: MessageRouter; /// A type implementing [`Logger`]. - type Logger: Logger + ?Sized; - /// A type that may be dereferenced to [`Self::Logger`]. - type L: Deref; + type Logger: Logger; /// Returns a reference to the actual [`ChannelManager`] object. fn get_cm( &self, ) -> &ChannelManager< - Self::M, - Self::T, - Self::ES, - Self::NS, + Self::Watch, + Self::Broadcaster, + Self::EntropySource, + Self::NodeSigner, Self::SP, - Self::F, - Self::R, - Self::MR, - Self::L, + Self::FeeEstimator, + Self::Router, + Self::MessageRouter, + Self::Logger, >; } impl< - M: Deref, - T: Deref, - ES: Deref, - NS: Deref, - SP: Deref, - F: Deref, - R: Deref, - MR: Deref, - L: Deref, + M: chain::Watch, + T: BroadcasterInterface, + ES: EntropySource, + NS: NodeSigner, + SP: SignerProvider, + F: FeeEstimator, + R: Router, + MR: MessageRouter, + L: Logger, > AChannelManager for ChannelManager -where - M::Target: chain::Watch<::EcdsaSigner>, - T::Target: BroadcasterInterface, - ES::Target: EntropySource, - NS::Target: NodeSigner, - SP::Target: SignerProvider, - F::Target: FeeEstimator, - R::Target: Router, - MR::Target: MessageRouter, - L::Target: Logger, { - type Watch = M::Target; - type M = M; - type Broadcaster = T::Target; - type T = T; - type EntropySource = ES::Target; - type ES = ES; - type NodeSigner = NS::Target; - type NS = NS; - type Signer = ::EcdsaSigner; - type SignerProvider = SP::Target; + type Watch = M; + type Broadcaster = T; + type EntropySource = ES; + type NodeSigner = NS; + type Signer = SP::EcdsaSigner; type SP = SP; - type FeeEstimator = F::Target; - type F = F; - type Router = R::Target; - type R = R; - type MessageRouter = MR::Target; - type MR = MR; - type Logger = L::Target; - type L = L; + type FeeEstimator = F; + type Router = R; + type MessageRouter = MR; + type Logger = L; fn get_cm(&self) -> &ChannelManager { self } @@ -2031,7 +2051,7 @@ where /// /// ## Opening Channels /// -/// To an open a channel with a peer, call [`create_channel`]. This will initiate the process of +/// To open a channel with a peer, call [`create_channel`]. This will initiate the process of /// opening an outbound channel, which requires self-funding when handling /// [`Event::FundingGenerationReady`]. /// @@ -2095,9 +2115,8 @@ where /// /// ## Accepting Channels /// -/// Inbound channels are initiated by peers and are automatically accepted unless [`ChannelManager`] -/// has [`UserConfig::manually_accept_inbound_channels`] set. In that case, the channel may be -/// either accepted or rejected when handling [`Event::OpenChannelRequest`]. +/// Inbound channels are initiated by peers and must be manually accepted or rejected when +/// handling [`Event::OpenChannelRequest`]. /// /// ``` /// # use bitcoin::secp256k1::PublicKey; @@ -2230,7 +2249,7 @@ where /// match event { /// Event::PaymentClaimable { payment_hash, purpose, .. } => match purpose { /// PaymentPurpose::Bolt11InvoicePayment { payment_preimage: Some(payment_preimage), .. } => { -/// assert_eq!(payment_hash.0, invoice.payment_hash().as_ref()); +/// assert_eq!(payment_hash, invoice.payment_hash()); /// println!("Claiming payment {}", payment_hash); /// channel_manager.claim_funds(payment_preimage); /// }, @@ -2238,7 +2257,7 @@ where /// println!("Unknown payment hash: {}", payment_hash); /// }, /// PaymentPurpose::SpontaneousPayment(payment_preimage) => { -/// assert_ne!(payment_hash.0, invoice.payment_hash().as_ref()); +/// assert_ne!(payment_hash, invoice.payment_hash()); /// println!("Claiming spontaneous payment {}", payment_hash); /// channel_manager.claim_funds(payment_preimage); /// }, @@ -2246,7 +2265,7 @@ where /// # _ => {}, /// }, /// Event::PaymentClaimed { payment_hash, amount_msat, .. } => { -/// assert_eq!(payment_hash.0, invoice.payment_hash().as_ref()); +/// assert_eq!(payment_hash, invoice.payment_hash()); /// println!("Claimed {} msats", amount_msat); /// }, /// // ... @@ -2261,19 +2280,20 @@ where /// # use bitcoin::hashes::Hash; /// # use lightning::events::{Event, EventsProvider}; /// # use lightning::types::payment::PaymentHash; -/// # use lightning::ln::channelmanager::{AChannelManager, PaymentId, RecentPaymentDetails, Retry}; -/// # use lightning::routing::router::RouteParametersConfig; +/// # use lightning::ln::channelmanager::{AChannelManager, OptionalBolt11PaymentParams, PaymentId, RecentPaymentDetails}; +/// # use lightning::ln::outbound_payment::Retry; /// # use lightning_invoice::Bolt11Invoice; /// # /// # fn example( -/// # channel_manager: T, invoice: &Bolt11Invoice, route_params_config: RouteParametersConfig, +/// # channel_manager: T, invoice: &Bolt11Invoice, optional_params: OptionalBolt11PaymentParams, /// # retry: Retry /// # ) { /// # let channel_manager = channel_manager.get_cm(); /// # let payment_id = PaymentId([42; 32]); -/// # let payment_hash = PaymentHash((*invoice.payment_hash()).to_byte_array()); +/// # let payment_hash = invoice.payment_hash(); +/// /// match channel_manager.pay_for_bolt11_invoice( -/// invoice, payment_id, None, route_params_config, retry +/// invoice, payment_id, None, optional_params /// ) { /// Ok(()) => println!("Sending payment with hash {}", payment_hash), /// Err(e) => println!("Failed sending payment with hash {}: {:?}", payment_hash, e), @@ -2419,7 +2439,8 @@ where /// ``` /// # use core::time::Duration; /// # use lightning::events::{Event, EventsProvider}; -/// # use lightning::ln::channelmanager::{AChannelManager, PaymentId, RecentPaymentDetails, Retry}; +/// # use lightning::ln::channelmanager::{AChannelManager, PaymentId, RecentPaymentDetails}; +/// # use lightning::ln::outbound_payment::Retry; /// # use lightning::offers::parse::Bolt12SemanticError; /// # use lightning::routing::router::RouteParametersConfig; /// # @@ -2607,67 +2628,17 @@ where /// [`update_channel`]: chain::Watch::update_channel /// [`ChannelUpdate`]: msgs::ChannelUpdate /// [`read`]: ReadableArgs::read -// -// Lock order: -// The tree structure below illustrates the lock order requirements for the different locks of the -// `ChannelManager`. Locks can be held at the same time if they are on the same branch in the tree, -// and should then be taken in the order of the lowest to the highest level in the tree. -// Note that locks on different branches shall not be taken at the same time, as doing so will -// create a new lock order for those specific locks in the order they were taken. -// -// Lock order tree: -// -// `pending_offers_messages` -// -// `pending_async_payments_messages` -// -// `total_consistency_lock` -// | -// |__`forward_htlcs` -// | -// |__`pending_intercepted_htlcs` -// | -// |__`decode_update_add_htlcs` -// | -// |__`per_peer_state` -// | -// |__`claimable_payments` -// | -// |__`pending_outbound_payments` // This field's struct contains a map of pending outbounds -// | -// |__`peer_state` -// | -// |__`short_to_chan_info` -// | -// |__`outbound_scid_aliases` -// | -// |__`best_block` -// | -// |__`pending_events` -// | -// |__`pending_background_events` -// pub struct ChannelManager< - M: Deref, - T: Deref, - ES: Deref, - NS: Deref, - SP: Deref, - F: Deref, - R: Deref, - MR: Deref, - L: Deref, -> where - M::Target: chain::Watch<::EcdsaSigner>, - T::Target: BroadcasterInterface, - ES::Target: EntropySource, - NS::Target: NodeSigner, - SP::Target: SignerProvider, - F::Target: FeeEstimator, - R::Target: Router, - MR::Target: MessageRouter, - L::Target: Logger, -{ + M: chain::Watch, + T: BroadcasterInterface, + ES: EntropySource, + NS: NodeSigner, + SP: SignerProvider, + F: FeeEstimator, + R: Router, + MR: MessageRouter, + L: Logger, +> { config: RwLock, chain_hash: ChainHash, fee_estimator: LowerBoundedFeeEstimator, @@ -2680,11 +2651,9 @@ pub struct ChannelManager< #[cfg(not(test))] flow: OffersMessageFlow, - /// See `ChannelManager` struct-level documentation for lock order requirements. #[cfg(any(test, feature = "_test_utils"))] pub(super) best_block: RwLock, #[cfg(not(any(test, feature = "_test_utils")))] - /// See `ChannelManager` struct-level documentation for lock order requirements. best_block: RwLock, pub(super) secp_ctx: Secp256k1, @@ -2698,9 +2667,7 @@ pub struct ChannelManager< /// after reloading from disk while replaying blocks against ChannelMonitors. /// /// See `PendingOutboundPayment` documentation for more info. - /// - /// See `ChannelManager` struct-level documentation for lock order requirements. - pending_outbound_payments: OutboundPayments, + pending_outbound_payments: OutboundPayments, /// SCID/SCID Alias -> forward infos. Key of 0 means payments received. /// @@ -2710,8 +2677,6 @@ pub struct ChannelManager< /// /// Note that no consistency guarantees are made about the existence of a channel with the /// `short_channel_id` here, nor the `short_channel_id` in the `PendingHTLCInfo`! - /// - /// See `ChannelManager` struct-level documentation for lock order requirements. #[cfg(test)] pub(super) forward_htlcs: Mutex>>, #[cfg(not(test))] @@ -2724,8 +2689,6 @@ pub struct ChannelManager< /// (or timeout) /// 2. HTLCs that are being held on behalf of an often-offline sender until receipt of a /// [`ReleaseHeldHtlc`] onion message from an often-offline recipient - /// - /// See `ChannelManager` struct-level documentation for lock order requirements. pending_intercepted_htlcs: Mutex>, /// Outbound SCID Alias -> pending `update_add_htlc`s to decode. @@ -2733,22 +2696,16 @@ pub struct ChannelManager< /// /// Note that no consistency guarantees are made about the existence of a channel with the /// `short_channel_id` here, nor the `channel_id` in `UpdateAddHTLC`! - /// - /// See `ChannelManager` struct-level documentation for lock order requirements. decode_update_add_htlcs: Mutex>>, /// The sets of payments which are claimable or currently being claimed. See /// [`ClaimablePayments`]' individual field docs for more info. - /// - /// See `ChannelManager` struct-level documentation for lock order requirements. claimable_payments: Mutex, /// The set of outbound SCID aliases across all our channels, including unconfirmed channels /// and some closed channels which reached a usable state prior to being closed. This is used /// only to avoid duplicates, and is not persisted explicitly to disk, but rebuilt from the /// active channel list on load. - /// - /// See `ChannelManager` struct-level documentation for lock order requirements. outbound_scid_aliases: Mutex>, /// SCIDs (and outbound SCID aliases) -> `counterparty_node_id`s and `channel_id`s. @@ -2760,8 +2717,6 @@ pub struct ChannelManager< /// Note that while this holds `counterparty_node_id`s and `channel_id`s, no consistency /// guarantees are made about the existence of a peer with the `counterparty_node_id` nor a /// channel with the `channel_id` in our other maps. - /// - /// See `ChannelManager` struct-level documentation for lock order requirements. #[cfg(test)] pub(super) short_to_chan_info: FairRwLock>, #[cfg(not(test))] @@ -2802,8 +2757,6 @@ pub struct ChannelManager< /// channels. /// /// Note that the same thread must never acquire two inner `PeerState` locks at the same time. - /// - /// See `ChannelManager` struct-level documentation for lock order requirements. #[cfg(not(any(test, feature = "_test_utils")))] per_peer_state: FairRwLock>>>, #[cfg(any(test, feature = "_test_utils"))] @@ -2824,8 +2777,6 @@ pub struct ChannelManager< /// /// Note that events MUST NOT be removed from pending_events after deserialization, as they /// could be in the middle of being processed without the direct mutex held. - /// - /// See `ChannelManager` struct-level documentation for lock order requirements. #[cfg(not(any(test, feature = "_test_utils")))] pending_events: Mutex)>>, #[cfg(any(test, feature = "_test_utils"))] @@ -2846,8 +2797,6 @@ pub struct ChannelManager< /// /// Thus, we place them here to be handled as soon as possible once we are running normally. /// - /// See `ChannelManager` struct-level documentation for lock order requirements. - /// /// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor pending_background_events: Mutex>, /// Used when we have to take a BIG lock to make sure everything is self-consistent. @@ -3201,86 +3150,13 @@ pub struct PhantomRouteHints { pub real_node_pubkey: PublicKey, } -#[rustfmt::skip] -macro_rules! handle_error { - ($self: ident, $internal: expr, $counterparty_node_id: expr) => { { - // In testing, ensure there are no deadlocks where the lock is already held upon - // entering the macro. - debug_assert_ne!($self.pending_events.held_by_thread(), LockHeldState::HeldByThread); - debug_assert_ne!($self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread); - - match $internal { - Ok(msg) => Ok(msg), - Err(MsgHandleErrInternal { err, shutdown_finish, tx_abort, .. }) => { - let mut msg_event = None; - - if let Some((shutdown_res, update_option)) = shutdown_finish { - let counterparty_node_id = shutdown_res.counterparty_node_id; - let channel_id = shutdown_res.channel_id; - let logger = WithContext::from( - &$self.logger, Some(counterparty_node_id), Some(channel_id), None - ); - log_error!(logger, "Closing channel: {}", err.err); - - $self.finish_close_channel(shutdown_res); - if let Some((update, node_id_1, node_id_2)) = update_option { - let mut pending_broadcast_messages = $self.pending_broadcast_messages.lock().unwrap(); - pending_broadcast_messages.push(MessageSendEvent::BroadcastChannelUpdate { - msg: update, node_id_1, node_id_2 - }); - } - } else { - log_error!($self.logger, "Got non-closing error: {}", err.err); - } - - if let msgs::ErrorAction::IgnoreError = err.action { - if let Some(tx_abort) = tx_abort { - msg_event = Some(MessageSendEvent::SendTxAbort { - node_id: $counterparty_node_id, - msg: tx_abort, - }); - } - } else { - msg_event = Some(MessageSendEvent::HandleError { - node_id: $counterparty_node_id, - action: err.action.clone() - }); - } - - if let Some(msg_event) = msg_event { - let per_peer_state = $self.per_peer_state.read().unwrap(); - if let Some(peer_state_mutex) = per_peer_state.get(&$counterparty_node_id) { - let mut peer_state = peer_state_mutex.lock().unwrap(); - if peer_state.is_connected { - peer_state.pending_msg_events.push(msg_event); - } - } - } - - // Return error in case higher-API need one - Err(err) - }, - } - } }; -} - -macro_rules! send_channel_ready { - ($self: ident, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => {{ - if $channel.context.is_connected() { - $pending_msg_events.push(MessageSendEvent::SendChannelReady { - node_id: $channel.context.get_counterparty_node_id(), - msg: $channel_ready_msg, - }); - } - // Note that we may send a `channel_ready` multiple times for a channel if we reconnect, so - // we allow collisions, but we shouldn't ever be updating the channel ID pointed to. - let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap(); - let outbound_alias_insert = short_to_chan_info.insert($channel.context.outbound_scid_alias(), ($channel.context.get_counterparty_node_id(), $channel.context.channel_id())); - assert!(outbound_alias_insert.is_none() || outbound_alias_insert.unwrap() == ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()), - "SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels"); - insert_short_channel_id!(short_to_chan_info, $channel); - }} -} +/// The return type of [`ChannelManager::check_free_peer_holding_cells`] +type FreeHoldingCellsResult = Vec<( + ChannelId, + PublicKey, + Option, + Vec<(HTLCSource, PaymentHash)>, +)>; macro_rules! insert_short_channel_id { ($short_to_chan_info: ident, $channel: expr) => {{ @@ -3353,284 +3229,6 @@ macro_rules! emit_initial_channel_ready_event { }; } -/// Handles the completion steps for when a [`ChannelMonitorUpdate`] is applied to a live channel. -/// -/// You should not add new direct calls to this, generally, rather rely on -/// `handle_new_monitor_update` or [`ChannelManager::channel_monitor_updated`] to call it for you. -/// -/// Requires that the in-flight monitor update set for this channel is empty! -macro_rules! handle_monitor_update_completion { - ($self: ident, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => {{ - let chan_id = $chan.context.channel_id(); - let outbound_alias = $chan.context().outbound_scid_alias(); - let cp_node_id = $chan.context.get_counterparty_node_id(); - - #[cfg(debug_assertions)] - { - let in_flight_updates = $peer_state.in_flight_monitor_updates.get(&chan_id); - assert!(in_flight_updates.map(|(_, updates)| updates.is_empty()).unwrap_or(true)); - assert!($chan.is_awaiting_monitor_update()); - } - - let logger = WithChannelContext::from(&$self.logger, &$chan.context, None); - - let update_actions = - $peer_state.monitor_update_blocked_actions.remove(&chan_id).unwrap_or(Vec::new()); - - if $chan.blocked_monitor_updates_pending() != 0 { - mem::drop($peer_state_lock); - mem::drop($per_peer_state_lock); - - log_debug!(logger, "Channel has blocked monitor updates, completing update actions but leaving channel blocked"); - $self.handle_monitor_update_completion_actions(update_actions); - } else { - log_debug!(logger, "Channel is open and awaiting update, resuming it"); - let updates = $chan.monitor_updating_restored( - &&logger, - &$self.node_signer, - $self.chain_hash, - &*$self.config.read().unwrap(), - $self.best_block.read().unwrap().height, - |htlc_id| { - $self.path_for_release_held_htlc(htlc_id, outbound_alias, &chan_id, &cp_node_id) - }, - ); - let channel_update = if updates.channel_ready.is_some() - && $chan.context.is_usable() - && $peer_state.is_connected - { - // We only send a channel_update in the case where we are just now sending a - // channel_ready and the channel is in a usable state. We may re-send a - // channel_update later through the announcement_signatures process for public - // channels, but there's no reason not to just inform our counterparty of our fees - // now. - if let Ok((msg, _, _)) = $self.get_channel_update_for_unicast($chan) { - Some(MessageSendEvent::SendChannelUpdate { node_id: cp_node_id, msg }) - } else { - None - } - } else { - None - }; - - let (htlc_forwards, decode_update_add_htlcs) = $self.handle_channel_resumption( - &mut $peer_state.pending_msg_events, - $chan, - updates.raa, - updates.commitment_update, - updates.commitment_order, - updates.accepted_htlcs, - updates.pending_update_adds, - updates.funding_broadcastable, - updates.channel_ready, - updates.announcement_sigs, - updates.tx_signatures, - None, - updates.channel_ready_order, - ); - if let Some(upd) = channel_update { - $peer_state.pending_msg_events.push(upd); - } - - let unbroadcasted_batch_funding_txid = - $chan.context.unbroadcasted_batch_funding_txid(&$chan.funding); - core::mem::drop($peer_state_lock); - core::mem::drop($per_peer_state_lock); - - $self.post_monitor_update_unlock( - chan_id, - cp_node_id, - unbroadcasted_batch_funding_txid, - update_actions, - htlc_forwards, - decode_update_add_htlcs, - updates.finalized_claimed_htlcs, - updates.failed_htlcs, - ); - } - }}; -} - -/// Returns whether the monitor update is completed, `false` if the update is in-progress. -fn handle_monitor_update_res( - cm: &CM, update_res: ChannelMonitorUpdateStatus, logger: LG, -) -> bool { - debug_assert!(cm.get_cm().background_events_processed_since_startup.load(Ordering::Acquire)); - match update_res { - ChannelMonitorUpdateStatus::UnrecoverableError => { - let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down."; - log_error!(logger, "{}", err_str); - panic!("{}", err_str); - }, - ChannelMonitorUpdateStatus::InProgress => { - #[cfg(not(any(test, feature = "_externalize_tests")))] - if cm.get_cm().monitor_update_type.swap(1, Ordering::Relaxed) == 2 { - panic!("Cannot use both ChannelMonitorUpdateStatus modes InProgress and Completed without restart"); - } - log_debug!( - logger, - "ChannelMonitor update in flight, holding messages until the update completes.", - ); - false - }, - ChannelMonitorUpdateStatus::Completed => { - #[cfg(not(any(test, feature = "_externalize_tests")))] - if cm.get_cm().monitor_update_type.swap(2, Ordering::Relaxed) == 1 { - panic!("Cannot use both ChannelMonitorUpdateStatus modes InProgress and Completed without restart"); - } - true - }, - } -} - -macro_rules! handle_initial_monitor { - ($self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { - let logger = WithChannelContext::from(&$self.logger, &$chan.context, None); - let update_completed = handle_monitor_update_res($self, $update_res, logger); - if update_completed { - handle_monitor_update_completion!( - $self, - $peer_state_lock, - $peer_state, - $per_peer_state_lock, - $chan - ); - } - }; -} - -fn handle_new_monitor_update_internal( - cm: &CM, - in_flight_monitor_updates: &mut BTreeMap)>, - channel_id: ChannelId, funding_txo: OutPoint, counterparty_node_id: PublicKey, - new_update: ChannelMonitorUpdate, logger: LG, -) -> (bool, bool) { - let in_flight_updates = &mut in_flight_monitor_updates - .entry(channel_id) - .or_insert_with(|| (funding_txo, Vec::new())) - .1; - // During startup, we push monitor updates as background events through to here in - // order to replay updates that were in-flight when we shut down. Thus, we have to - // filter for uniqueness here. - let update_idx = - in_flight_updates.iter().position(|upd| upd == &new_update).unwrap_or_else(|| { - in_flight_updates.push(new_update); - in_flight_updates.len() - 1 - }); - - if cm.get_cm().background_events_processed_since_startup.load(Ordering::Acquire) { - let update_res = - cm.get_cm().chain_monitor.update_channel(channel_id, &in_flight_updates[update_idx]); - let update_completed = handle_monitor_update_res(cm, update_res, logger); - if update_completed { - let _ = in_flight_updates.remove(update_idx); - } - (update_completed, update_completed && in_flight_updates.is_empty()) - } else { - // We blindly assume that the ChannelMonitorUpdate will be regenerated on startup if we - // fail to persist it. This is a fairly safe assumption, however, since anything we do - // during the startup sequence should be replayed exactly if we immediately crash. - let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup { - counterparty_node_id, - funding_txo, - channel_id, - update: in_flight_updates[update_idx].clone(), - }; - // We want to track the in-flight update both in `in_flight_monitor_updates` and in - // `pending_background_events` to avoid a race condition during - // `pending_background_events` processing where we complete one - // `ChannelMonitorUpdate` (but there are more pending as background events) but we - // conclude that all pending `ChannelMonitorUpdate`s have completed and its safe to - // run post-completion actions. - // We could work around that with some effort, but its simpler to just track updates - // twice. - cm.get_cm().pending_background_events.lock().unwrap().push(event); - (false, false) - } -} - -macro_rules! handle_post_close_monitor_update { - ( - $self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, - $per_peer_state_lock: expr, $counterparty_node_id: expr, $channel_id: expr - ) => {{ - let (update_completed, all_updates_complete) = handle_new_monitor_update_internal( - $self, - &mut $peer_state.in_flight_monitor_updates, - $channel_id, - $funding_txo, - $counterparty_node_id, - $update, - WithContext::from(&$self.logger, Some($counterparty_node_id), Some($channel_id), None), - ); - if all_updates_complete { - let update_actions = $peer_state - .monitor_update_blocked_actions - .remove(&$channel_id) - .unwrap_or(Vec::new()); - - mem::drop($peer_state_lock); - mem::drop($per_peer_state_lock); - - $self.handle_monitor_update_completion_actions(update_actions); - } - update_completed - }}; -} - -/// Handles a new monitor update without dropping peer_state locks and calling -/// [`ChannelManager::handle_monitor_update_completion_actions`] if the monitor update completed -/// synchronously. -/// -/// Useful because monitor updates need to be handled in the same mutex where the channel generated -/// them (otherwise they can end up getting applied out-of-order) but it's not always possible to -/// drop the aforementioned peer state locks at a given callsite. In this situation, use this macro -/// to apply the monitor update immediately and handle the monitor update completion actions at a -/// later time. -macro_rules! handle_new_monitor_update_locked_actions_handled_by_caller { - ( - $self: ident, $funding_txo: expr, $update: expr, $in_flight_monitor_updates: expr, $chan_context: expr - ) => {{ - let (update_completed, _all_updates_complete) = handle_new_monitor_update_internal( - $self, - $in_flight_monitor_updates, - $chan_context.channel_id(), - $funding_txo, - $chan_context.get_counterparty_node_id(), - $update, - WithChannelContext::from(&$self.logger, &$chan_context, None), - ); - update_completed - }}; -} - -macro_rules! handle_new_monitor_update { - ( - $self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, - $per_peer_state_lock: expr, $chan: expr - ) => {{ - let (update_completed, all_updates_complete) = handle_new_monitor_update_internal( - $self, - &mut $peer_state.in_flight_monitor_updates, - $chan.context.channel_id(), - $funding_txo, - $chan.context.get_counterparty_node_id(), - $update, - WithChannelContext::from(&$self.logger, &$chan.context, None), - ); - if all_updates_complete { - handle_monitor_update_completion!( - $self, - $peer_state_lock, - $peer_state, - $per_peer_state_lock, - $chan - ); - } - update_completed - }}; -} - fn convert_channel_err_internal< Close: FnOnce(ClosureReason, &str) -> (ShutdownResult, Option<(msgs::ChannelUpdate, NodeId, NodeId)>), >( @@ -3660,148 +3258,17 @@ fn convert_channel_err_internal< } } -fn convert_funded_channel_err_internal>( - cm: &CM, closed_channel_monitor_update_ids: &mut BTreeMap, - in_flight_monitor_updates: &mut BTreeMap)>, - coop_close_shutdown_res: Option, err: ChannelError, - chan: &mut FundedChannel, -) -> (bool, MsgHandleErrInternal) -where - SP::Target: SignerProvider, - CM::Watch: Watch<::EcdsaSigner>, -{ - let chan_id = chan.context.channel_id(); - convert_channel_err_internal(err, chan_id, |reason, msg| { - let cm = cm.get_cm(); - let logger = WithChannelContext::from(&cm.logger, &chan.context, None); - - let mut shutdown_res = - if let Some(res) = coop_close_shutdown_res { res } else { chan.force_shutdown(reason) }; - let chan_update = cm.get_channel_update_for_broadcast(chan).ok(); - - log_error!(logger, "Closed channel due to close-required error: {}", msg); - - if let Some((_, funding_txo, _, update)) = shutdown_res.monitor_update.take() { - handle_new_monitor_update_locked_actions_handled_by_caller!( - cm, - funding_txo, - update, - in_flight_monitor_updates, - chan.context - ); - } - // If there's a possibility that we need to generate further monitor updates for this - // channel, we need to store the last update_id of it. However, we don't want to insert - // into the map (which prevents the `PeerState` from being cleaned up) for channels that - // never even got confirmations (which would open us up to DoS attacks). - let update_id = chan.context.get_latest_monitor_update_id(); - let funding_confirmed = chan.funding.get_funding_tx_confirmation_height().is_some(); - let chan_zero_conf = chan.context.minimum_depth(&chan.funding) == Some(0); - if funding_confirmed || chan_zero_conf || update_id > 1 { - closed_channel_monitor_update_ids.insert(chan_id, update_id); - } - let mut short_to_chan_info = cm.short_to_chan_info.write().unwrap(); - if let Some(short_id) = chan.funding.get_short_channel_id() { - short_to_chan_info.remove(&short_id); - } else { - // If the channel was never confirmed on-chain prior to its closure, remove the - // outbound SCID alias we used for it from the collision-prevention set. While we - // generally want to avoid ever re-using an outbound SCID alias across all channels, we - // also don't want a counterparty to be able to trivially cause a memory leak by simply - // opening a million channels with us which are closed before we ever reach the funding - // stage. - let outbound_alias = chan.context.outbound_scid_alias(); - let alias_removed = cm.outbound_scid_aliases.lock().unwrap().remove(&outbound_alias); - debug_assert!(alias_removed); - } - short_to_chan_info.remove(&chan.context.outbound_scid_alias()); - for scid in chan.context.historical_scids() { - short_to_chan_info.remove(scid); - } - - (shutdown_res, chan_update) - }) -} - -fn convert_unfunded_channel_err_internal( - cm: &CM, err: ChannelError, chan: &mut Channel, -) -> (bool, MsgHandleErrInternal) -where - SP::Target: SignerProvider, -{ - let chan_id = chan.context().channel_id(); - convert_channel_err_internal(err, chan_id, |reason, msg| { - let cm = cm.get_cm(); - let logger = WithChannelContext::from(&cm.logger, chan.context(), None); - - let shutdown_res = chan.force_shutdown(reason); - log_error!(logger, "Closed channel due to close-required error: {}", msg); - cm.short_to_chan_info.write().unwrap().remove(&chan.context().outbound_scid_alias()); - // If the channel was never confirmed on-chain prior to its closure, remove the - // outbound SCID alias we used for it from the collision-prevention set. While we - // generally want to avoid ever re-using an outbound SCID alias across all channels, we - // also don't want a counterparty to be able to trivially cause a memory leak by simply - // opening a million channels with us which are closed before we ever reach the funding - // stage. - let outbound_alias = chan.context().outbound_scid_alias(); - let alias_removed = cm.outbound_scid_aliases.lock().unwrap().remove(&outbound_alias); - debug_assert!(alias_removed); - (shutdown_res, None) - }) -} - -/// When a channel is removed, two things need to happen: -/// (a) This must be called in the same `per_peer_state` lock as the channel-closing action, -/// (b) [`handle_error`] needs to be called without holding any locks (except -/// [`ChannelManager::total_consistency_lock`]), which then calls -/// [`ChannelManager::finish_close_channel`]. -/// -/// Note that this step can be skipped if the channel was never opened (through the creation of a -/// [`ChannelMonitor`]/channel funding transaction) to begin with. -/// -/// Returns `(boolean indicating if we should remove the Channel object from memory, a mapped -/// error)`, except in the `COOP_CLOSE` case, where the bool is elided (it is always implicitly -/// true). -#[rustfmt::skip] -macro_rules! convert_channel_err { - ($self: ident, $peer_state: expr, $shutdown_result: expr, $funded_channel: expr, COOP_CLOSED) => { { - let reason = ChannelError::Close(("Coop Closed".to_owned(), $shutdown_result.closure_reason.clone())); - let closed_update_ids = &mut $peer_state.closed_channel_monitor_update_ids; - let in_flight_updates = &mut $peer_state.in_flight_monitor_updates; - let (close, mut err) = - convert_funded_channel_err_internal($self, closed_update_ids, in_flight_updates, Some($shutdown_result), reason, $funded_channel); - err.dont_send_error_message(); - debug_assert!(close); - err - } }; - ($self: ident, $peer_state: expr, $err: expr, $funded_channel: expr, FUNDED_CHANNEL) => { { - let closed_update_ids = &mut $peer_state.closed_channel_monitor_update_ids; - let in_flight_updates = &mut $peer_state.in_flight_monitor_updates; - convert_funded_channel_err_internal($self, closed_update_ids, in_flight_updates, None, $err, $funded_channel) - } }; - ($self: ident, $peer_state: expr, $err: expr, $channel: expr, UNFUNDED_CHANNEL) => { { - convert_unfunded_channel_err_internal($self, $err, $channel) - } }; - ($self: ident, $peer_state: expr, $err: expr, $channel: expr) => { - match $channel.as_funded_mut() { - Some(funded_channel) => { - let closed_update_ids = &mut $peer_state.closed_channel_monitor_update_ids; - let in_flight_updates = &mut $peer_state.in_flight_monitor_updates; - convert_funded_channel_err_internal($self, closed_update_ids, in_flight_updates, None, $err, funded_channel) - }, - None => { - convert_unfunded_channel_err_internal($self, $err, $channel) - }, - } - }; -} - macro_rules! break_channel_entry { ($self: ident, $peer_state: expr, $res: expr, $entry: expr) => { match $res { Ok(res) => res, Err(e) => { - let (drop, res) = convert_channel_err!($self, $peer_state, e, $entry.get_mut()); + let (drop, res) = $self.locked_handle_force_close( + &mut $peer_state.closed_channel_monitor_update_ids, + &mut $peer_state.in_flight_monitor_updates, + e, + $entry.get_mut(), + ); if drop { $entry.remove_entry(); } @@ -3816,7 +3283,12 @@ macro_rules! try_channel_entry { match $res { Ok(res) => res, Err(e) => { - let (drop, res) = convert_channel_err!($self, $peer_state, e, $entry.get_mut()); + let (drop, res) = $self.locked_handle_force_close( + &mut $peer_state.closed_channel_monitor_update_ids, + &mut $peer_state.in_flight_monitor_updates, + e, + $entry.get_mut(), + ); if drop { $entry.remove_entry(); } @@ -3893,6 +3365,11 @@ macro_rules! process_events_body { } if !post_event_actions.is_empty() { + // `handle_post_event_actions` may update channel state, so take the total + // consistency lock now similarly to other callers of `handle_post_event_actions`. + // Note that if it needs to wake the background processor for event handling or + // persistence it will do so directly. + let _read_guard = $self.total_consistency_lock.read().unwrap(); $self.handle_post_event_actions(post_event_actions); // If we had some actions, go around again as we may have more events now processed_all_events = false; @@ -3911,27 +3388,37 @@ macro_rules! process_events_body { } } +/// Creates an [`Event::HTLCIntercepted`] from a [`PendingAddHTLCInfo`]. We generate this event in a +/// few places so this DRYs the code. +fn create_htlc_intercepted_event( + intercept_id: InterceptId, pending_add: &PendingAddHTLCInfo, +) -> Result { + let inbound_amount_msat = pending_add.forward_info.incoming_amt_msat.ok_or(())?; + let requested_next_hop_scid = match pending_add.forward_info.routing { + PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id, + _ => return Err(()), + }; + Ok(Event::HTLCIntercepted { + requested_next_hop_scid, + payment_hash: pending_add.forward_info.payment_hash, + inbound_amount_msat, + expected_outbound_amount_msat: pending_add.forward_info.outgoing_amt_msat, + intercept_id, + outgoing_htlc_expiry_block_height: Some(pending_add.forward_info.outgoing_cltv_value), + }) +} + impl< - M: Deref, - T: Deref, - ES: Deref, - NS: Deref, - SP: Deref, - F: Deref, - R: Deref, - MR: Deref, - L: Deref, + M: chain::Watch, + T: BroadcasterInterface, + ES: EntropySource, + NS: NodeSigner, + SP: SignerProvider, + F: FeeEstimator, + R: Router, + MR: MessageRouter, + L: Logger, > ChannelManager -where - M::Target: chain::Watch<::EcdsaSigner>, - T::Target: BroadcasterInterface, - ES::Target: EntropySource, - NS::Target: NodeSigner, - SP::Target: SignerProvider, - F::Target: FeeEstimator, - R::Target: Router, - MR::Target: MessageRouter, - L::Target: Logger, { /// Constructs a new `ChannelManager` to hold several channels and route between them. /// @@ -3983,7 +3470,7 @@ where best_block: RwLock::new(params.best_block), outbound_scid_aliases: Mutex::new(new_hash_set()), - pending_outbound_payments: OutboundPayments::new(new_hash_map(), logger.clone()), + pending_outbound_payments: OutboundPayments::new(new_hash_map()), forward_htlcs: Mutex::new(new_hash_map()), decode_update_add_htlcs: Mutex::new(new_hash_map()), claimable_payments: Mutex::new(ClaimablePayments { claimable_payments: new_hash_map(), pending_claiming_payments: new_hash_map() }), @@ -4031,6 +3518,29 @@ where } } + fn send_channel_ready( + &self, pending_msg_events: &mut Vec, channel: &FundedChannel, + channel_ready_msg: msgs::ChannelReady, + ) { + let counterparty_node_id = channel.context.get_counterparty_node_id(); + if channel.context.is_connected() { + pending_msg_events.push(MessageSendEvent::SendChannelReady { + node_id: counterparty_node_id, + msg: channel_ready_msg, + }); + } + // Note that we may send a `channel_ready` multiple times for a channel if we reconnect, so + // we allow collisions, but we shouldn't ever be updating the channel ID pointed to. + let mut short_to_chan_info = self.short_to_chan_info.write().unwrap(); + let outbound_alias_insert = short_to_chan_info.insert( + channel.context.outbound_scid_alias(), + (counterparty_node_id, channel.context.channel_id()), + ); + assert!(outbound_alias_insert.is_none() || outbound_alias_insert.unwrap() == (counterparty_node_id, channel.context.channel_id()), + "SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels"); + insert_short_channel_id!(short_to_chan_info, channel); + } + /// Gets the current [`UserConfig`] which controls some global behavior and includes the /// default configuration applied to all new channels. pub fn get_current_config(&self) -> UserConfig { @@ -4146,7 +3656,7 @@ where }; match OutboundV1Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider, their_network_key, their_features, channel_value_satoshis, push_msat, user_channel_id, config, - self.best_block.read().unwrap().height, outbound_scid_alias, temporary_channel_id, &*self.logger) + self.best_block.read().unwrap().height, outbound_scid_alias, temporary_channel_id, &self.logger) { Ok(res) => res, Err(e) => { @@ -4323,8 +3833,11 @@ where .collect() } - #[rustfmt::skip] - fn close_channel_internal(&self, chan_id: &ChannelId, counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option, override_shutdown_script: Option) -> Result<(), APIError> { + fn close_channel_internal( + &self, chan_id: &ChannelId, counterparty_node_id: &PublicKey, + target_feerate_sats_per_1000_weight: Option, + override_shutdown_script: Option, + ) -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)> = Vec::new(); @@ -4333,8 +3846,9 @@ where { let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}") })?; + let peer_state_mutex = per_peer_state + .get(counterparty_node_id) + .ok_or_else(|| APIError::no_such_peer(counterparty_node_id))?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -4350,8 +3864,13 @@ where if let Some(chan) = chan_entry.get_mut().as_funded_mut() { let funding_txo_opt = chan.funding.get_funding_txo(); let their_features = &peer_state.latest_features; - let (shutdown_msg, mut monitor_update_opt, htlcs) = - chan.get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?; + let (shutdown_msg, mut monitor_update_opt, htlcs) = chan.get_shutdown( + &self.signer_provider, + their_features, + target_feerate_sats_per_1000_weight, + override_shutdown_script, + &self.logger, + )?; failed_htlcs = htlcs; // We can send the `shutdown` message before updating the `ChannelMonitor` @@ -4362,30 +3881,38 @@ where msg: shutdown_msg, }); - debug_assert!(monitor_update_opt.is_none() || !chan.is_shutdown(), - "We can't both complete shutdown and generate a monitor update"); + debug_assert!( + monitor_update_opt.is_none() || !chan.is_shutdown(), + "We can't both complete shutdown and generate a monitor update" + ); // Update the monitor with the shutdown script if necessary. if let Some(monitor_update) = monitor_update_opt.take() { - handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update, - peer_state_lock, peer_state, per_peer_state, chan); + if let Some(data) = self.handle_new_monitor_update( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + &mut peer_state.pending_msg_events, + peer_state.is_connected, + chan, + funding_txo_opt.unwrap(), + monitor_update, + ) { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_post_monitor_update_chan_resume(data); + } } } else { let reason = ClosureReason::LocallyCoopClosedUnfundedChannel; let err = ChannelError::Close((reason.to_string(), reason)); let mut chan = chan_entry.remove(); - let (_, mut e) = convert_channel_err!(self, peer_state, err, &mut chan); + let (_, mut e) = self.locked_handle_unfunded_close(err, &mut chan); e.dont_send_error_message(); shutdown_result = Err(e); } }, hash_map::Entry::Vacant(_) => { - return Err(APIError::ChannelUnavailable { - err: format!( - "Channel with id {} not found for the passed counterparty node_id {}", - chan_id, counterparty_node_id, - ) - }); + return Err(APIError::no_such_channel_for_peer(chan_id, counterparty_node_id)); }, } } @@ -4393,12 +3920,15 @@ where for htlc_source in failed_htlcs.drain(..) { let failure_reason = LocalHTLCFailureReason::ChannelClosed; let reason = HTLCFailReason::from_failure_code(failure_reason); - let receiver = HTLCHandlingFailureType::Forward { node_id: Some(*counterparty_node_id), channel_id: *chan_id }; + let receiver = HTLCHandlingFailureType::Forward { + node_id: Some(*counterparty_node_id), + channel_id: *chan_id, + }; let (source, hash) = htlc_source; self.fail_htlc_backwards_internal(&source, &hash, &reason, receiver, None); } - let _ = handle_error!(self, shutdown_result, *counterparty_node_id); + let _ = self.handle_error(shutdown_result, *counterparty_node_id); Ok(()) } @@ -4490,8 +4020,19 @@ where match peer_state.channel_by_id.entry(channel_id) { hash_map::Entry::Occupied(mut chan_entry) => { if let Some(chan) = chan_entry.get_mut().as_funded_mut() { - handle_new_monitor_update!(self, funding_txo, - monitor_update, peer_state_lock, peer_state, per_peer_state, chan); + if let Some(data) = self.handle_new_monitor_update( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + &mut peer_state.pending_msg_events, + peer_state.is_connected, + chan, + funding_txo, + monitor_update, + ) { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_post_monitor_update_chan_resume(data); + } return; } else { debug_assert!(false, "We shouldn't have an update for a non-funded channel"); @@ -4500,18 +4041,28 @@ where hash_map::Entry::Vacant(_) => {}, } - handle_post_close_monitor_update!( - self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, - counterparty_node_id, channel_id - ); + if let Some(actions) = self.handle_post_close_monitor_update( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + funding_txo, + monitor_update, + counterparty_node_id, + channel_id, + ) { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_monitor_update_completion_actions(actions); + } } /// When a channel is removed, two things need to happen: - /// (a) [`convert_channel_err`] must be called in the same `per_peer_state` lock as the - /// channel-closing action, - /// (b) [`handle_error`] needs to be called without holding any locks (except + /// (a) Handle the initial within-lock closure for the channel via one of the following methods: + /// [`ChannelManager::locked_handle_unfunded_close`], + /// [`ChannelManager::locked_handle_funded_coop_close`], + /// [`ChannelManager::locked_handle_funded_force_close`] or + /// [`ChannelManager::locked_handle_force_close`]. + /// (b) [`ChannelManager::handle_error`] needs to be called without holding any locks (except /// [`ChannelManager::total_consistency_lock`]), which then calls this. - #[rustfmt::skip] fn finish_close_channel(&self, mut shutdown_res: ShutdownResult) { debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread); #[cfg(debug_assertions)] @@ -4520,21 +4071,36 @@ where } let logger = WithContext::from( - &self.logger, Some(shutdown_res.counterparty_node_id), Some(shutdown_res.channel_id), None + &self.logger, + Some(shutdown_res.counterparty_node_id), + Some(shutdown_res.channel_id), + None, ); - log_debug!(logger, "Finishing closure of channel due to {} with {} HTLCs to fail", - shutdown_res.closure_reason, shutdown_res.dropped_outbound_htlcs.len()); + log_debug!( + logger, + "Finishing closure of channel due to {} with {} HTLCs to fail", + shutdown_res.closure_reason, + shutdown_res.dropped_outbound_htlcs.len() + ); for htlc_source in shutdown_res.dropped_outbound_htlcs.drain(..) { let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source; let failure_reason = LocalHTLCFailureReason::ChannelClosed; let reason = HTLCFailReason::from_failure_code(failure_reason); - let receiver = HTLCHandlingFailureType::Forward { node_id: Some(counterparty_node_id), channel_id }; + let receiver = HTLCHandlingFailureType::Forward { + node_id: Some(counterparty_node_id), + channel_id, + }; self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver, None); } if let Some((_, funding_txo, _channel_id, monitor_update)) = shutdown_res.monitor_update { debug_assert!(false, "This should have been handled in `convert_channel_err`"); - self.apply_post_close_monitor_update(shutdown_res.counterparty_node_id, shutdown_res.channel_id, funding_txo, monitor_update); + self.apply_post_close_monitor_update( + shutdown_res.counterparty_node_id, + shutdown_res.channel_id, + funding_txo, + monitor_update, + ); } if self.background_events_processed_since_startup.load(Ordering::Acquire) { // If a `ChannelMonitorUpdate` was applied (i.e. any time we have a funding txo and are @@ -4543,7 +4109,11 @@ where // TODO: If we do the `in_flight_monitor_updates.is_empty()` check in // `convert_channel_err` we can skip the locks here. if shutdown_res.channel_funding_txo.is_some() { - self.channel_monitor_updated(&shutdown_res.channel_id, None, &shutdown_res.counterparty_node_id); + self.channel_monitor_updated( + &shutdown_res.channel_id, + None, + &shutdown_res.counterparty_node_id, + ); } } let mut shutdown_results: Vec<(Result, _)> = Vec::new(); @@ -4559,11 +4129,17 @@ where if let Some(mut chan) = peer_state.channel_by_id.remove(&channel_id) { let reason = ClosureReason::FundingBatchClosure; let err = ChannelError::Close((reason.to_string(), reason)); - let (_, e) = convert_channel_err!(self, peer_state, err, &mut chan); + let (_, e) = self.locked_handle_force_close( + &mut peer_state.closed_channel_monitor_update_ids, + &mut peer_state.in_flight_monitor_updates, + err, + &mut chan, + ); shutdown_results.push((Err(e), counterparty_node_id)); } } - has_uncompleted_channel = Some(has_uncompleted_channel.map_or(!state, |v| v || !state)); + has_uncompleted_channel = + Some(has_uncompleted_channel.map_or(!state, |v| v || !state)); } debug_assert!( has_uncompleted_channel.unwrap_or(true), @@ -4573,26 +4149,32 @@ where { let mut pending_events = self.pending_events.lock().unwrap(); - pending_events.push_back((events::Event::ChannelClosed { - channel_id: shutdown_res.channel_id, - user_channel_id: shutdown_res.user_channel_id, - reason: shutdown_res.closure_reason, - counterparty_node_id: Some(shutdown_res.counterparty_node_id), - channel_capacity_sats: Some(shutdown_res.channel_capacity_satoshis), - channel_funding_txo: shutdown_res.channel_funding_txo, - last_local_balance_msat: Some(shutdown_res.last_local_balance_msat), - }, None)); - - if let Some(splice_funding_failed) = shutdown_res.splice_funding_failed.take() { - pending_events.push_back((events::Event::SpliceFailed { + pending_events.push_back(( + events::Event::ChannelClosed { channel_id: shutdown_res.channel_id, - counterparty_node_id: shutdown_res.counterparty_node_id, user_channel_id: shutdown_res.user_channel_id, - abandoned_funding_txo: splice_funding_failed.funding_txo, - channel_type: splice_funding_failed.channel_type, - contributed_inputs: splice_funding_failed.contributed_inputs, - contributed_outputs: splice_funding_failed.contributed_outputs, - }, None)); + reason: shutdown_res.closure_reason, + counterparty_node_id: Some(shutdown_res.counterparty_node_id), + channel_capacity_sats: Some(shutdown_res.channel_capacity_satoshis), + channel_funding_txo: shutdown_res.channel_funding_txo, + last_local_balance_msat: Some(shutdown_res.last_local_balance_msat), + }, + None, + )); + + if let Some(splice_funding_failed) = shutdown_res.splice_funding_failed.take() { + pending_events.push_back(( + events::Event::SpliceFailed { + channel_id: shutdown_res.channel_id, + counterparty_node_id: shutdown_res.counterparty_node_id, + user_channel_id: shutdown_res.user_channel_id, + abandoned_funding_txo: splice_funding_failed.funding_txo, + channel_type: splice_funding_failed.channel_type, + contributed_inputs: splice_funding_failed.contributed_inputs, + contributed_outputs: splice_funding_failed.contributed_outputs, + }, + None, + )); } if let Some(transaction) = shutdown_res.unbroadcasted_funding_tx { @@ -4602,26 +4184,30 @@ where .expect("We had an unbroadcasted funding tx, so should also have had a funding outpoint"), } } else { - FundingInfo::Tx{ transaction } + FundingInfo::Tx { transaction } }; - pending_events.push_back((events::Event::DiscardFunding { - channel_id: shutdown_res.channel_id, funding_info - }, None)); + pending_events.push_back(( + events::Event::DiscardFunding { + channel_id: shutdown_res.channel_id, + funding_info, + }, + None, + )); } } for (err, counterparty_node_id) in shutdown_results.drain(..) { - let _ = handle_error!(self, err, counterparty_node_id); + let _ = self.handle_error(err, counterparty_node_id); } } /// `peer_msg` should be set when we receive a message from a peer, but not set when the /// user closes, which will be re-exposed as the `ChannelClosed` reason. - #[rustfmt::skip] - fn force_close_channel_with_peer(&self, channel_id: &ChannelId, peer_node_id: &PublicKey, reason: ClosureReason) - -> Result<(), APIError> { + fn force_close_channel_with_peer( + &self, channel_id: &ChannelId, peer_node_id: &PublicKey, reason: ClosureReason, + ) -> Result<(), APIError> { let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = per_peer_state.get(peer_node_id) - .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", peer_node_id) })?; + let peer_state_mutex = + per_peer_state.get(peer_node_id).ok_or_else(|| APIError::no_such_peer(peer_node_id))?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; let logger = WithContext::from(&self.logger, Some(*peer_node_id), Some(*channel_id), None); @@ -4635,7 +4221,12 @@ where if let Some(mut chan) = peer_state.channel_by_id.remove(channel_id) { log_error!(logger, "Force-closing channel"); let err = ChannelError::Close((message, reason)); - let (_, mut e) = convert_channel_err!(self, peer_state, err, &mut chan); + let (_, mut e) = self.locked_handle_force_close( + &mut peer_state.closed_channel_monitor_update_ids, + &mut peer_state.in_flight_monitor_updates, + err, + &mut chan, + ); mem::drop(peer_state_lock); mem::drop(per_peer_state); if is_from_counterparty { @@ -4643,26 +4234,24 @@ where // error message. e.dont_send_error_message(); } - let _ = handle_error!(self, Err::<(), _>(e), *peer_node_id); + let _ = self.handle_error(Err::<(), _>(e), *peer_node_id); Ok(()) } else if peer_state.inbound_channel_request_by_id.remove(channel_id).is_some() { log_error!(logger, "Force-closing inbound channel request"); if !is_from_counterparty && peer_state.is_connected { - peer_state.pending_msg_events.push( - MessageSendEvent::HandleError { - node_id: *peer_node_id, - action: msgs::ErrorAction::SendErrorMessage { - msg: msgs::ErrorMessage { channel_id: *channel_id, data: message } - }, - } - ); + peer_state.pending_msg_events.push(MessageSendEvent::HandleError { + node_id: *peer_node_id, + action: msgs::ErrorAction::SendErrorMessage { + msg: msgs::ErrorMessage { channel_id: *channel_id, data: message }, + }, + }); } // N.B. that we don't send any channel close event here: we // don't have a user_channel_id, and we never sent any opening // events anyway. Ok(()) } else { - Err(APIError::ChannelUnavailable{ err: format!("Channel with id {} not found for the passed counterparty node_id {}", channel_id, peer_node_id) }) + Err(APIError::no_such_channel_for_peer(channel_id, peer_node_id)) } } @@ -4710,10 +4299,251 @@ where } } + /// Handles an error by closing the channel if required and generating peer messages. + fn handle_error( + &self, internal: Result, counterparty_node_id: PublicKey, + ) -> Result { + // In testing, ensure there are no deadlocks where the lock is already held upon + // entering the macro. + debug_assert_ne!(self.pending_events.held_by_thread(), LockHeldState::HeldByThread); + debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread); + + internal.map_err(|err_internal| { + let mut msg_event = None; + + if let Some((shutdown_res, update_option)) = err_internal.shutdown_finish { + let counterparty_node_id = shutdown_res.counterparty_node_id; + let channel_id = shutdown_res.channel_id; + let logger = WithContext::from( + &self.logger, + Some(counterparty_node_id), + Some(channel_id), + None, + ); + log_error!(logger, "Closing channel: {}", err_internal.err.err); + + self.finish_close_channel(shutdown_res); + if let Some((update, node_id_1, node_id_2)) = update_option { + let mut pending_broadcast_messages = + self.pending_broadcast_messages.lock().unwrap(); + pending_broadcast_messages.push(MessageSendEvent::BroadcastChannelUpdate { + msg: update, + node_id_1, + node_id_2, + }); + } + } else { + log_error!(self.logger, "Got non-closing error: {}", err_internal.err.err); + } + + if let msgs::ErrorAction::IgnoreError = err_internal.err.action { + if let Some(tx_abort) = err_internal.tx_abort { + msg_event = Some(MessageSendEvent::SendTxAbort { + node_id: counterparty_node_id, + msg: tx_abort, + }); + } + } else { + msg_event = Some(MessageSendEvent::HandleError { + node_id: counterparty_node_id, + action: err_internal.err.action.clone(), + }); + } + + if let Some(msg_event) = msg_event { + let per_peer_state = self.per_peer_state.read().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { + let mut peer_state = peer_state_mutex.lock().unwrap(); + if peer_state.is_connected { + peer_state.pending_msg_events.push(msg_event); + } + } + } + + // Return error in case higher-API need one + err_internal.err + }) + } + + /// Handle the initial within-lock closure for a funded channel that is either force-closed or cooperatively + /// closed (as indicated by `coop_close_shutdown_res`). + /// + /// Returns `(boolean indicating if we should remove the Channel object from memory, a mapped + /// error)`. + fn locked_handle_funded_close_internal( + &self, closed_channel_monitor_update_ids: &mut BTreeMap, + in_flight_monitor_updates: &mut BTreeMap)>, + coop_close_shutdown_res: Option, err: ChannelError, + chan: &mut FundedChannel, + ) -> (bool, MsgHandleErrInternal) { + let chan_id = chan.context.channel_id(); + convert_channel_err_internal(err, chan_id, |reason, msg| { + let logger = WithChannelContext::from(&self.logger, &chan.context, None); + + let mut shutdown_res = if let Some(res) = coop_close_shutdown_res { + res + } else { + chan.force_shutdown(reason) + }; + let chan_update = self.get_channel_update_for_broadcast(chan).ok(); + + log_error!(logger, "Closed channel due to close-required error: {}", msg); + + if let Some((_, funding_txo, _, update)) = shutdown_res.monitor_update.take() { + self.handle_new_monitor_update_locked_actions_handled_by_caller( + in_flight_monitor_updates, + chan.context.channel_id(), + funding_txo, + chan.context.get_counterparty_node_id(), + update, + ); + } + // If there's a possibility that we need to generate further monitor updates for this + // channel, we need to store the last update_id of it. However, we don't want to insert + // into the map (which prevents the `PeerState` from being cleaned up) for channels that + // never even got confirmations (which would open us up to DoS attacks). + let update_id = chan.context.get_latest_monitor_update_id(); + let funding_confirmed = chan.funding.get_funding_tx_confirmation_height().is_some(); + let chan_zero_conf = chan.context.minimum_depth(&chan.funding) == Some(0); + if funding_confirmed || chan_zero_conf || update_id > 1 { + closed_channel_monitor_update_ids.insert(chan_id, update_id); + } + let mut short_to_chan_info = self.short_to_chan_info.write().unwrap(); + if let Some(short_id) = chan.funding.get_short_channel_id() { + short_to_chan_info.remove(&short_id); + } else { + // If the channel was never confirmed on-chain prior to its closure, remove the + // outbound SCID alias we used for it from the collision-prevention set. While we + // generally want to avoid ever re-using an outbound SCID alias across all channels, we + // also don't want a counterparty to be able to trivially cause a memory leak by simply + // opening a million channels with us which are closed before we ever reach the funding + // stage. + let outbound_alias = chan.context.outbound_scid_alias(); + let alias_removed = + self.outbound_scid_aliases.lock().unwrap().remove(&outbound_alias); + debug_assert!(alias_removed); + } + short_to_chan_info.remove(&chan.context.outbound_scid_alias()); + for scid in chan.context.historical_scids() { + short_to_chan_info.remove(scid); + } + + (shutdown_res, chan_update) + }) + } + + /// Handle the initial within-lock closure for an unfunded channel. + /// + /// Returns `(boolean indicating if we should remove the Channel object from memory, a mapped + /// error)`. + /// + /// The same closure semantics as described in [`ChannelManager::locked_handle_force_close`] apply. + fn locked_handle_unfunded_close( + &self, err: ChannelError, chan: &mut Channel, + ) -> (bool, MsgHandleErrInternal) { + let chan_id = chan.context().channel_id(); + convert_channel_err_internal(err, chan_id, |reason, msg| { + let logger = WithChannelContext::from(&self.logger, chan.context(), None); + + let shutdown_res = chan.force_shutdown(reason); + log_error!(logger, "Closed channel due to close-required error: {}", msg); + self.short_to_chan_info.write().unwrap().remove(&chan.context().outbound_scid_alias()); + // If the channel was never confirmed on-chain prior to its closure, remove the + // outbound SCID alias we used for it from the collision-prevention set. While we + // generally want to avoid ever re-using an outbound SCID alias across all channels, we + // also don't want a counterparty to be able to trivially cause a memory leak by simply + // opening a million channels with us which are closed before we ever reach the funding + // stage. + let outbound_alias = chan.context().outbound_scid_alias(); + let alias_removed = self.outbound_scid_aliases.lock().unwrap().remove(&outbound_alias); + debug_assert!(alias_removed); + (shutdown_res, None) + }) + } + + /// Handle the initial within-lock closure for a channel that is cooperatively closed. + /// + /// Returns a mapped error. + /// + /// The same closure semantics as described in [`ChannelManager::locked_handle_force_close`] apply. + fn locked_handle_funded_coop_close( + &self, closed_update_ids: &mut BTreeMap, + in_flight_updates: &mut BTreeMap)>, + shutdown_result: ShutdownResult, funded_channel: &mut FundedChannel, + ) -> MsgHandleErrInternal { + let reason = + ChannelError::Close(("Coop Closed".to_owned(), shutdown_result.closure_reason.clone())); + let (close, mut err) = self.locked_handle_funded_close_internal( + closed_update_ids, + in_flight_updates, + Some(shutdown_result), + reason, + funded_channel, + ); + err.dont_send_error_message(); + debug_assert!(close); + err + } + + /// Handle the initial within-lock closure for a funded channel that is force-closed. + /// + /// Returns `(boolean indicating if we should remove the Channel object from memory, a mapped + /// error)`. + /// + /// The same closure semantics as described in [`ChannelManager::locked_handle_force_close`] apply. + fn locked_handle_funded_force_close( + &self, closed_update_ids: &mut BTreeMap, + in_flight_updates: &mut BTreeMap)>, + err: ChannelError, funded_channel: &mut FundedChannel, + ) -> (bool, MsgHandleErrInternal) { + self.locked_handle_funded_close_internal( + closed_update_ids, + in_flight_updates, + None, + err, + funded_channel, + ) + } + + /// Handle the initial within-lock closure for a channel that is force-closed. + /// + /// Returns `(boolean indicating if we should remove the Channel object from memory, a mapped + /// error)`. + /// + /// # Closure semantics + /// + /// Two things need to happen: + /// (a) This method must be called in the same `per_peer_state` lock as the channel-closing action, + /// (b) [`ChannelManager::handle_error`] needs to be called without holding any locks (except + /// [`ChannelManager::total_consistency_lock`]), which then calls + /// [`ChannelManager::finish_close_channel`]. + fn locked_handle_force_close( + &self, closed_update_ids: &mut BTreeMap, + in_flight_updates: &mut BTreeMap)>, + err: ChannelError, channel: &mut Channel, + ) -> (bool, MsgHandleErrInternal) { + match channel.as_funded_mut() { + Some(funded_channel) => self.locked_handle_funded_close_internal( + closed_update_ids, + in_flight_updates, + None, + err, + funded_channel, + ), + None => self.locked_handle_unfunded_close(err, channel), + } + } + /// Initiate a splice in order to add value to (splice-in) or remove value from (splice-out) /// the channel. This will spend the channel's funding transaction output, effectively replacing /// it with a new one. /// + /// # Required Feature Flags + /// + /// Initiating a splice requires that the channel counterparty supports splicing. Any + /// channel (no matter the type) can be spliced, as long as the counterparty is currently + /// connected. + /// /// # Arguments /// /// Provide a `contribution` to determine if value is spliced in or out. The splice initiator is @@ -4765,17 +4595,25 @@ where ) -> Result<(), APIError> { let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = match per_peer_state.get(counterparty_node_id).ok_or_else(|| { - APIError::ChannelUnavailable { - err: format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), - } - }) { + let peer_state_mutex = match per_peer_state + .get(counterparty_node_id) + .ok_or_else(|| APIError::no_such_peer(counterparty_node_id)) + { Ok(p) => p, Err(e) => return Err(e), }; - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; + let mut peer_state = peer_state_mutex.lock().unwrap(); + if !peer_state.latest_features.supports_splicing() { + return Err(APIError::ChannelUnavailable { + err: "Peer does not support splicing".to_owned(), + }); + } + if !peer_state.latest_features.supports_quiescence() { + return Err(APIError::ChannelUnavailable { + err: "Peer does not support quiescence, a splicing prerequisite".to_owned(), + }); + } // Look for the channel match peer_state.channel_by_id.entry(*channel_id) { @@ -4805,12 +4643,9 @@ where }) } }, - hash_map::Entry::Vacant(_) => Err(APIError::ChannelUnavailable { - err: format!( - "Channel with id {} not found for the passed counterparty node_id {}", - channel_id, counterparty_node_id, - ), - }), + hash_map::Entry::Vacant(_) => { + Err(APIError::no_such_channel_for_peer(channel_id, counterparty_node_id)) + }, } } @@ -4836,11 +4671,10 @@ where ) -> Result<(), APIError> { let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = match per_peer_state.get(counterparty_node_id).ok_or_else(|| { - APIError::ChannelUnavailable { - err: format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), - } - }) { + let peer_state_mutex = match per_peer_state + .get(counterparty_node_id) + .ok_or_else(|| APIError::no_such_peer(counterparty_node_id)) + { Ok(p) => p, Err(e) => return Err(e), }; @@ -4893,18 +4727,58 @@ where }) } }, - hash_map::Entry::Vacant(_) => Err(APIError::ChannelUnavailable { - err: format!( - "Channel with id {} not found for the passed counterparty node_id {}", - channel_id, counterparty_node_id, - ), - }), + hash_map::Entry::Vacant(_) => { + Err(APIError::no_such_channel_for_peer(channel_id, counterparty_node_id)) + }, + } + } + + fn forward_needs_intercept_to_known_chan(&self, outbound_chan: &FundedChannel) -> bool { + let intercept_flags = self.config.read().unwrap().htlc_interception_flags; + if !outbound_chan.context.should_announce() { + if outbound_chan.context.is_connected() { + if intercept_flags & (HTLCInterceptionFlags::ToOnlinePrivateChannels as u8) != 0 { + return true; + } + } else { + if intercept_flags & (HTLCInterceptionFlags::ToOfflinePrivateChannels as u8) != 0 { + return true; + } + } + } else { + if intercept_flags & (HTLCInterceptionFlags::ToPublicChannels as u8) != 0 { + return true; + } + } + false + } + + fn forward_needs_intercept_to_unknown_chan(&self, outgoing_scid: u64) -> bool { + let intercept_flags = self.config.read().unwrap().htlc_interception_flags; + if fake_scid::is_valid_intercept( + &self.fake_scid_rand_bytes, + outgoing_scid, + &self.chain_hash, + ) { + if intercept_flags & (HTLCInterceptionFlags::ToInterceptSCIDs as u8) != 0 { + return true; + } + } else if fake_scid::is_valid_phantom( + &self.fake_scid_rand_bytes, + outgoing_scid, + &self.chain_hash, + ) { + // Handled as a normal forward + } else if intercept_flags & (HTLCInterceptionFlags::ToUnknownSCIDs as u8) != 0 { + return true; } + false } #[rustfmt::skip] fn can_forward_htlc_to_outgoing_channel( - &self, chan: &mut FundedChannel, msg: &msgs::UpdateAddHTLC, next_packet: &NextPacketDetails + &self, chan: &mut FundedChannel, msg: &msgs::UpdateAddHTLC, + next_packet: &NextPacketDetails, will_intercept: bool, ) -> Result<(), LocalHTLCFailureReason> { if !chan.context.should_announce() && !self.config.read().unwrap().accept_forwards_to_priv_channels @@ -4930,9 +4804,11 @@ where // around to doing the actual forward, but better to fail early if we can and // hopefully an attacker trying to path-trace payments cannot make this occur // on a small/per-node/per-channel scale. - if !chan.context.is_live() { + if !will_intercept && !chan.context.is_live() { if !chan.context.is_enabled() { return Err(LocalHTLCFailureReason::ChannelDisabled); + } else if !chan.context.is_connected() { + return Err(LocalHTLCFailureReason::PeerOffline); } else { return Err(LocalHTLCFailureReason::ChannelNotReady); } @@ -4940,9 +4816,7 @@ where if next_packet.outgoing_amt_msat < chan.context.get_counterparty_htlc_minimum_msat() { return Err(LocalHTLCFailureReason::AmountBelowMinimum); } - chan.htlc_satisfies_config(msg, next_packet.outgoing_amt_msat, next_packet.outgoing_cltv_value)?; - - Ok(()) + chan.htlc_satisfies_config(msg, next_packet.outgoing_amt_msat, next_packet.outgoing_cltv_value) } /// Executes a callback `C` that returns some value `X` on the channel found with the given @@ -4968,37 +4842,60 @@ where } } - #[rustfmt::skip] - fn can_forward_htlc( - &self, msg: &msgs::UpdateAddHTLC, next_packet_details: &NextPacketDetails - ) -> Result<(), LocalHTLCFailureReason> { - let outgoing_scid = match next_packet_details.outgoing_connector { + fn can_forward_htlc_should_intercept( + &self, msg: &msgs::UpdateAddHTLC, next_hop: &NextPacketDetails, + ) -> Result { + let outgoing_scid = match next_hop.outgoing_connector { HopConnector::ShortChannelId(scid) => scid, + HopConnector::Dummy => { + // Dummy hops are only used for path padding and must not reach HTLC processing. + debug_assert!(false, "Dummy hop reached HTLC handling."); + return Err(LocalHTLCFailureReason::InvalidOnionPayload); + }, HopConnector::Trampoline(_) => { return Err(LocalHTLCFailureReason::InvalidTrampolineForward); - } + }, }; - match self.do_funded_channel_callback(outgoing_scid, |chan: &mut FundedChannel| { - self.can_forward_htlc_to_outgoing_channel(chan, msg, next_packet_details) - }) { - Some(Ok(())) => {}, - Some(Err(e)) => return Err(e), - None => { - // If we couldn't find the channel info for the scid, it may be a phantom or - // intercept forward. - if (self.config.read().unwrap().accept_intercept_htlcs && - fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, outgoing_scid, &self.chain_hash)) || - fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, outgoing_scid, &self.chain_hash) - {} else { - return Err(LocalHTLCFailureReason::UnknownNextPeer); - } - } - } + // TODO: We do the fake SCID namespace check a bunch of times here (and indirectly via + // `forward_needs_intercept_*`, including as called in + // `can_forward_htlc_to_outgoing_channel`), we should find a way to reduce the number of + // times we do it. + let intercept = + match self.do_funded_channel_callback(outgoing_scid, |chan: &mut FundedChannel| { + let intercept = self.forward_needs_intercept_to_known_chan(chan); + self.can_forward_htlc_to_outgoing_channel(chan, msg, next_hop, intercept)?; + Ok(intercept) + }) { + Some(Ok(intercept)) => intercept, + Some(Err(e)) => return Err(e), + None => { + // Perform basic sanity checks on the amounts and CLTV being forwarded + if next_hop.outgoing_amt_msat > msg.amount_msat { + return Err(LocalHTLCFailureReason::FeeInsufficient); + } + let cltv_delta = msg.cltv_expiry.saturating_sub(next_hop.outgoing_cltv_value); + if cltv_delta < MIN_CLTV_EXPIRY_DELTA.into() { + return Err(LocalHTLCFailureReason::IncorrectCLTVExpiry); + } + + if fake_scid::is_valid_phantom( + &self.fake_scid_rand_bytes, + outgoing_scid, + &self.chain_hash, + ) { + false + } else if self.forward_needs_intercept_to_unknown_chan(outgoing_scid) { + true + } else { + return Err(LocalHTLCFailureReason::UnknownNextPeer); + } + }, + }; let cur_height = self.best_block.read().unwrap().height + 1; - check_incoming_htlc_cltv(cur_height, next_packet_details.outgoing_cltv_value, msg.cltv_expiry)?; + check_incoming_htlc_cltv(cur_height, next_hop.outgoing_cltv_value, msg.cltv_expiry)?; - Ok(()) + Ok(intercept) } #[rustfmt::skip] @@ -5100,11 +4997,25 @@ where let current_height: u32 = self.best_block.read().unwrap().height; create_recv_pending_htlc_info(decoded_hop, shared_secret, msg.payment_hash, msg.amount_msat, msg.cltv_expiry, None, allow_underpay, msg.skimmed_fee_msat, - current_height) + msg.accountable.unwrap_or(false), current_height) }, onion_utils::Hop::Forward { .. } | onion_utils::Hop::BlindedForward { .. } => { create_fwd_pending_htlc_info(msg, decoded_hop, shared_secret, next_packet_pubkey_opt) }, + onion_utils::Hop::Dummy { .. } => { + debug_assert!( + false, + "Reached unreachable dummy-hop HTLC. Dummy hops are peeled in \ + `process_pending_update_add_htlcs`, and the resulting HTLC is \ + re-enqueued for processing. Hitting this means the peel-and-requeue \ + step was missed." + ); + return Err(InboundHTLCErr { + msg: "Failed to decode update add htlc onion", + reason: LocalHTLCFailureReason::InvalidOnionPayload, + err_data: Vec::new(), + }) + }, onion_utils::Hop::TrampolineForward { .. } | onion_utils::Hop::TrampolineBlindedForward { .. } => { create_fwd_pending_htlc_info(msg, decoded_hop, shared_secret, next_packet_pubkey_opt) }, @@ -5177,7 +5088,7 @@ where chain_hash: self.chain_hash, short_channel_id, timestamp: chan.context.get_update_time_counter(), - message_flags: 1, // Only must_be_one + message_flags: 1 | if !chan.context.should_announce() { 1 << 1 } else { 0 }, // must_be_one + dont_forward channel_flags: (!were_node_one) as u8 | ((!enabled as u8) << 1), cltv_expiry_delta: chan.context.get_cltv_expiry_delta(), htlc_minimum_msat: chan.context.get_counterparty_htlc_minimum_msat(), @@ -5243,6 +5154,13 @@ where let prng_seed = self.entropy_source.get_secure_random_bytes(); let session_priv = SecretKey::from_slice(&session_priv_bytes[..]).expect("RNG is busted"); + let logger = WithContext::for_payment( + &self.logger, + path.hops.first().map(|hop| hop.pubkey), + None, + Some(*payment_hash), + payment_id, + ); let (onion_packet, htlc_msat, htlc_cltv) = onion_utils::create_payment_onion( &self.secp_ctx, &path, @@ -5256,8 +5174,6 @@ where prng_seed, ) .map_err(|e| { - let first_hop_key = Some(path.hops.first().unwrap().pubkey); - let logger = WithContext::from(&self.logger, first_hop_key, None, Some(*payment_hash)); log_error!(logger, "Failed to build an onion for path"); e })?; @@ -5268,9 +5184,6 @@ where let (counterparty_node_id, id) = match first_chan { None => { - let first_hop_key = Some(path.hops.first().unwrap().pubkey); - let logger = - WithContext::from(&self.logger, first_hop_key, None, Some(*payment_hash)); log_error!(logger, "Failed to find first-hop for payment hash {payment_hash}"); return Err(APIError::ChannelUnavailable { err: "No channel available with first hop!".to_owned(), @@ -5279,12 +5192,9 @@ where Some((cp_id, chan_id)) => (cp_id, chan_id), }; - let logger = WithContext::from( - &self.logger, - Some(counterparty_node_id), - Some(id), - Some(*payment_hash), - ); + // Add the channel id to the logger that already has the rest filled in. + let logger_ref = &logger; + let logger = WithContext::from(&logger_ref, None, Some(id), None); log_trace!( logger, "Attempting to send payment along path with next hop {first_chan_scid}" @@ -5307,11 +5217,6 @@ where }); } let funding_txo = chan.funding.get_funding_txo().unwrap(); - let logger = WithChannelContext::from( - &self.logger, - &chan.context, - Some(*payment_hash), - ); let htlc_source = HTLCSource::OutboundRoute { path: path.clone(), session_priv: session_priv.clone(), @@ -5327,21 +5232,28 @@ where onion_packet, None, hold_htlc_at_next_hop, + false, // Not accountable by default for sender. &self.fee_estimator, &&logger, ); match break_channel_entry!(self, peer_state, send_res, chan_entry) { Some(monitor_update) => { - let ok = handle_new_monitor_update!( - self, - funding_txo, - monitor_update, - peer_state_lock, - peer_state, - per_peer_state, - chan - ); - if !ok { + let (update_completed, completion_data) = self + .handle_new_monitor_update_with_status( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + &mut peer_state.pending_msg_events, + peer_state.is_connected, + chan, + funding_txo, + monitor_update, + ); + if let Some(data) = completion_data { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_post_monitor_update_chan_resume(data); + } + if !update_completed { // Note that MonitorUpdateInProgress here indicates (per function // docs) that we will resend the commitment update once monitor // updating completes. Therefore, we must return an error @@ -5371,7 +5283,7 @@ where } return Ok(()); }; - match handle_error!(self, err, path.hops.first().unwrap().pubkey) { + match self.handle_error(err, path.hops.first().unwrap().pubkey) { Ok(_) => unreachable!(), Err(e) => Err(APIError::ChannelUnavailable { err: e.err }), } @@ -5398,11 +5310,13 @@ where }); if route.route_params.is_none() { route.route_params = Some(route_params.clone()); } let router = FixedRouter::new(route); + let logger = + WithContext::for_payment(&self.logger, None, None, Some(payment_hash), payment_id); self.pending_outbound_payments .send_payment(payment_hash, recipient_onion, payment_id, Retry::Attempts(0), route_params, &&router, self.list_usable_channels(), || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height, - &self.pending_events, |args| self.send_payment_along_path(args)) + &self.pending_events, |args| self.send_payment_along_path(args), &logger) } /// Sends a payment to the route found using the provided [`RouteParameters`], retrying failed @@ -5433,7 +5347,7 @@ where /// using [`ChannelMonitorUpdateStatus::InProgress`]), the payment may be lost on restart. See /// [`ChannelManager::list_recent_payments`] for more information. /// - /// Routes are automatically found using the [`Router] provided on startup. To fix a route for a + /// Routes are automatically found using the [`Router`] provided on startup. To fix a route for a /// particular payment, use [`Self::send_payment_with_route`] or match the [`PaymentId`] passed to /// [`Router::find_route_with_id`]. /// @@ -5462,6 +5376,7 @@ where best_block_height, &self.pending_events, |args| self.send_payment_along_path(args), + &WithContext::for_payment(&self.logger, None, None, Some(payment_hash), payment_id), ) } @@ -5542,16 +5457,16 @@ where /// To use default settings, call the function with [`RouteParametersConfig::default`]. pub fn pay_for_bolt11_invoice( &self, invoice: &Bolt11Invoice, payment_id: PaymentId, amount_msats: Option, - route_params_config: RouteParametersConfig, retry_strategy: Retry, + optional_params: OptionalBolt11PaymentParams, ) -> Result<(), Bolt11PaymentError> { let best_block_height = self.best_block.read().unwrap().height; let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); + let payment_hash = invoice.payment_hash(); self.pending_outbound_payments.pay_for_bolt11_invoice( invoice, payment_id, amount_msats, - route_params_config, - retry_strategy, + optional_params, &self.router, self.list_usable_channels(), || self.compute_inflight_htlcs(), @@ -5560,6 +5475,7 @@ where best_block_height, &self.pending_events, |args| self.send_payment_along_path(args), + &WithContext::for_payment(&self.logger, None, None, Some(payment_hash), payment_id), ) } @@ -5586,29 +5502,12 @@ where pub fn send_payment_for_bolt12_invoice( &self, invoice: &Bolt12Invoice, context: Option<&OffersContext>, ) -> Result<(), Bolt12PaymentError> { - match self.verify_bolt12_invoice(invoice, context) { + match self.flow.verify_bolt12_invoice(invoice, context) { Ok(payment_id) => self.send_payment_for_verified_bolt12_invoice(invoice, payment_id), Err(()) => Err(Bolt12PaymentError::UnexpectedInvoice), } } - fn verify_bolt12_invoice( - &self, invoice: &Bolt12Invoice, context: Option<&OffersContext>, - ) -> Result { - let secp_ctx = &self.secp_ctx; - let expanded_key = &self.inbound_payment_key; - - match context { - None if invoice.is_for_refund_without_paths() => { - invoice.verify_using_metadata(expanded_key, secp_ctx) - }, - Some(&OffersContext::OutboundPayment { payment_id, nonce, .. }) => { - invoice.verify_using_payer_data(payment_id, nonce, expanded_key, secp_ctx) - }, - _ => Err(()), - } - } - fn send_payment_for_verified_bolt12_invoice( &self, invoice: &Bolt12Invoice, payment_id: PaymentId, ) -> Result<(), Bolt12PaymentError> { @@ -5629,13 +5528,14 @@ where best_block_height, &self.pending_events, |args| self.send_payment_along_path(args), + &WithContext::for_payment(&self.logger, None, None, None, payment_id), ) } fn check_refresh_async_receive_offer_cache(&self, timer_tick_occurred: bool) { let peers = self.get_peers_for_blinded_path(); let channels = self.list_usable_channels(); - let router = &*self.router; + let router = &self.router; let refresh_res = self.flow.check_refresh_async_receive_offer_cache( peers, channels, @@ -5685,6 +5585,7 @@ where ) -> Result<(), Bolt12PaymentError> { let mut res = Ok(()); PersistenceNotifierGuard::optionally_notify(self, || { + let logger = WithContext::for_payment(&self.logger, None, None, None, payment_id); let best_block_height = self.best_block.read().unwrap().height; let features = self.bolt12_invoice_features(); let outbound_pmts_res = self.pending_outbound_payments.static_invoice_received( @@ -5693,7 +5594,7 @@ where features, best_block_height, self.duration_since_epoch(), - &*self.entropy_source, + &self.entropy_source, &self.pending_events, ); match outbound_pmts_res { @@ -5717,7 +5618,7 @@ where self.send_payment_for_static_invoice_no_persist(payment_id, channels, true) { log_trace!( - self.logger, + logger, "Failed to send held HTLC with payment id {}: {:?}", payment_id, e @@ -5809,6 +5710,7 @@ where best_block_height, &self.pending_events, |args| self.send_payment_along_path(args), + &WithContext::for_payment(&self.logger, None, None, None, payment_id), ) } @@ -5826,7 +5728,7 @@ where intercept_id, prev_outbound_scid_alias, htlc_id, - &*self.entropy_source, + &self.entropy_source, ) } @@ -5887,6 +5789,7 @@ where ) -> Result { let best_block_height = self.best_block.read().unwrap().height; let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); + let payment_hash = payment_preimage.map(|preimage| preimage.into()); self.pending_outbound_payments.send_spontaneous_payment( payment_preimage, recipient_onion, @@ -5901,6 +5804,7 @@ where best_block_height, &self.pending_events, |args| self.send_payment_along_path(args), + &WithContext::for_payment(&self.logger, None, None, payment_hash, payment_id), ) } @@ -6043,12 +5947,12 @@ where /// which checks the correctness of the funding transaction given the associated channel. #[rustfmt::skip] fn funding_transaction_generated_intern) -> Result>( - &self, temporary_channel_id: ChannelId, counterparty_node_id: PublicKey, funding_transaction: Transaction, is_batch_funding: bool, - mut find_funding_output: FundingOutput, is_manual_broadcast: bool, - ) -> Result<(), APIError> { + &self, temporary_channel_id: ChannelId, counterparty_node_id: PublicKey, funding_transaction: Transaction, is_batch_funding: bool, + mut find_funding_output: FundingOutput, is_manual_broadcast: bool, + ) -> Result<(), APIError> { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(&counterparty_node_id) - .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}") })?; + .ok_or_else(|| APIError::no_such_peer(&counterparty_node_id))?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -6064,7 +5968,7 @@ where mem::drop(peer_state_lock); mem::drop(per_peer_state); - let _: Result<(), _> = handle_error!(self, Err(err), counterparty); + let _: Result<(), _> = self.handle_error(Err(err), counterparty); Err($api_err) } } } @@ -6288,17 +6192,20 @@ where self.batch_funding_transaction_generated_intern(temporary_channels, funding_type) } - #[rustfmt::skip] - fn batch_funding_transaction_generated_intern(&self, temporary_channels: &[(&ChannelId, &PublicKey)], funding: FundingType) -> Result<(), APIError> { + fn batch_funding_transaction_generated_intern( + &self, temporary_channels: &[(&ChannelId, &PublicKey)], funding: FundingType, + ) -> Result<(), APIError> { let mut result = Ok(()); - if let FundingType::Checked(funding_transaction) | - FundingType::CheckedManualBroadcast(funding_transaction) = &funding + if let FundingType::Checked(funding_transaction) + | FundingType::CheckedManualBroadcast(funding_transaction) = &funding { if !funding_transaction.is_coinbase() { for inp in funding_transaction.input.iter() { if inp.witness.is_empty() { result = result.and(Err(APIError::APIMisuseError { - err: "Funding transaction must be fully signed and spend Segwit outputs".to_owned() + err: + "Funding transaction must be fully signed and spend Segwit outputs" + .to_owned(), })); } } @@ -6306,7 +6213,8 @@ where if funding_transaction.output.len() > u16::max_value() as usize { result = result.and(Err(APIError::APIMisuseError { - err: "Transaction had more than 2^16 outputs, which is not supported".to_owned() + err: "Transaction had more than 2^16 outputs, which is not supported" + .to_owned(), })); } let height = self.best_block.read().unwrap().height; @@ -6314,104 +6222,121 @@ where // lower than the next block height. However, the modules constituting our Lightning // node might not have perfect sync about their blockchain views. Thus, if the wallet // module is ahead of LDK, only allow one more block of headroom. - if !funding_transaction.input.iter().all(|input| input.sequence == Sequence::MAX) && - funding_transaction.lock_time.is_block_height() && - funding_transaction.lock_time.to_consensus_u32() > height + 1 + if !funding_transaction.input.iter().all(|input| input.sequence == Sequence::MAX) + && funding_transaction.lock_time.is_block_height() + && funding_transaction.lock_time.to_consensus_u32() > height + 1 { result = result.and(Err(APIError::APIMisuseError { - err: "Funding transaction absolute timelock is non-final".to_owned() + err: "Funding transaction absolute timelock is non-final".to_owned(), })); } } let txid = funding.txid(); let is_batch_funding = temporary_channels.len() > 1; - let mut funding_batch_states = if is_batch_funding { - Some(self.funding_batch_states.lock().unwrap()) - } else { - None - }; - let mut funding_batch_state = funding_batch_states.as_mut().and_then(|states| { - match states.entry(txid) { - btree_map::Entry::Occupied(_) => { - result = result.clone().and(Err(APIError::APIMisuseError { - err: "Batch funding transaction with the same txid already exists".to_owned() - })); - None - }, - btree_map::Entry::Vacant(vacant) => Some(vacant.insert(Vec::new())), - } + let mut funding_batch_states = + if is_batch_funding { Some(self.funding_batch_states.lock().unwrap()) } else { None }; + let mut funding_batch_state = funding_batch_states.as_mut().and_then(|states| match states + .entry(txid) + { + btree_map::Entry::Occupied(_) => { + result = result.clone().and(Err(APIError::APIMisuseError { + err: "Batch funding transaction with the same txid already exists".to_owned(), + })); + None + }, + btree_map::Entry::Vacant(vacant) => Some(vacant.insert(Vec::new())), }); let is_manual_broadcast = funding.is_manual_broadcast(); for &(temporary_channel_id, counterparty_node_id) in temporary_channels { - result = result.and_then(|_| self.funding_transaction_generated_intern( - *temporary_channel_id, - *counterparty_node_id, - funding.transaction_or_dummy(), - is_batch_funding, - |chan| { - let mut output_index = None; - let expected_spk = chan.funding.get_funding_redeemscript().to_p2wsh(); - let outpoint = match &funding { - FundingType::Checked(tx) | FundingType::CheckedManualBroadcast(tx) => { - for (idx, outp) in tx.output.iter().enumerate() { - if outp.script_pubkey == expected_spk && outp.value.to_sat() == chan.funding.get_value_satoshis() { - if output_index.is_some() { - return Err("Multiple outputs matched the expected script and value"); + result = result.and_then(|_| { + self.funding_transaction_generated_intern( + *temporary_channel_id, + *counterparty_node_id, + funding.transaction_or_dummy(), + is_batch_funding, + |chan| { + let mut output_index = None; + let expected_spk = chan.funding.get_funding_redeemscript().to_p2wsh(); + let outpoint = match &funding { + FundingType::Checked(tx) | FundingType::CheckedManualBroadcast(tx) => { + for (idx, outp) in tx.output.iter().enumerate() { + if outp.script_pubkey == expected_spk + && outp.value.to_sat() == chan.funding.get_value_satoshis() + { + if output_index.is_some() { + return Err("Multiple outputs matched the expected script and value"); + } + output_index = Some(idx as u16); } - output_index = Some(idx as u16); } - } - if output_index.is_none() { - return Err("No output matched the script_pubkey and value in the FundingGenerationReady event"); - } - OutPoint { txid, index: output_index.unwrap() } - }, - FundingType::Unchecked(outpoint) => outpoint.clone(), - }; - if let Some(funding_batch_state) = funding_batch_state.as_mut() { - // TODO(dual_funding): We only do batch funding for V1 channels at the moment, but we'll probably - // need to fix this somehow to not rely on using the outpoint for the channel ID if we - // want to support V2 batching here as well. - funding_batch_state.push((ChannelId::v1_from_funding_outpoint(outpoint), *counterparty_node_id, false)); - } - Ok(outpoint) - }, - is_manual_broadcast) - ); + if output_index.is_none() { + return Err("No output matched the script_pubkey and value in the FundingGenerationReady event"); + } + OutPoint { txid, index: output_index.unwrap() } + }, + FundingType::Unchecked(outpoint) => outpoint.clone(), + }; + if let Some(funding_batch_state) = funding_batch_state.as_mut() { + // TODO(dual_funding): We only do batch funding for V1 channels at the moment, but we'll probably + // need to fix this somehow to not rely on using the outpoint for the channel ID if we + // want to support V2 batching here as well. + funding_batch_state.push(( + ChannelId::v1_from_funding_outpoint(outpoint), + *counterparty_node_id, + false, + )); + } + Ok(outpoint) + }, + is_manual_broadcast, + ) + }); } if let Err(ref e) = result { // Remaining channels need to be removed on any error. let e = format!("Error in transaction funding: {:?}", e); let mut channels_to_remove = Vec::new(); - channels_to_remove.extend(funding_batch_states.as_mut() - .and_then(|states| states.remove(&txid)) - .into_iter().flatten() - .map(|(chan_id, node_id, _state)| (chan_id, node_id)) - ); - channels_to_remove.extend(temporary_channels.iter() - .map(|(&chan_id, &node_id)| (chan_id, node_id)) + channels_to_remove.extend( + funding_batch_states + .as_mut() + .and_then(|states| states.remove(&txid)) + .into_iter() + .flatten() + .map(|(chan_id, node_id, _state)| (chan_id, node_id)), ); + channels_to_remove + .extend(temporary_channels.iter().map(|(&chan_id, &node_id)| (chan_id, node_id))); let mut shutdown_results: Vec<(Result, _)> = Vec::new(); { let per_peer_state = self.per_peer_state.read().unwrap(); for (channel_id, counterparty_node_id) in channels_to_remove { - per_peer_state.get(&counterparty_node_id) + per_peer_state + .get(&counterparty_node_id) .map(|peer_state_mutex| peer_state_mutex.lock().unwrap()) - .and_then(|mut peer_state| peer_state.channel_by_id.remove(&channel_id).map(|chan| (chan, peer_state))) + .and_then(|mut peer_state| { + peer_state + .channel_by_id + .remove(&channel_id) + .map(|chan| (chan, peer_state)) + }) .map(|(mut chan, mut peer_state_lock)| { let reason = ClosureReason::ProcessingError { err: e.clone() }; let err = ChannelError::Close((e.clone(), reason)); let peer_state = &mut *peer_state_lock; - let (_, e) = - convert_channel_err!(self, peer_state, err, &mut chan); + let (_, e) = self.locked_handle_force_close( + &mut peer_state.closed_channel_monitor_update_ids, + &mut peer_state.in_flight_monitor_updates, + err, + &mut chan, + ); shutdown_results.push((Err(e), counterparty_node_id)); }); } } mem::drop(funding_batch_states); for (err, counterparty_node_id) in shutdown_results { - let _ = handle_error!(self, err, counterparty_node_id); + let _ = self.handle_error(err, counterparty_node_id); } } result @@ -6458,69 +6383,100 @@ where pub fn funding_transaction_signed( &self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, transaction: Transaction, ) -> Result<(), APIError> { - let mut result = Ok(()); + let mut funding_tx_signed_result = Ok(()); + let mut monitor_update_result: Option< + Result, + > = None; + PersistenceNotifierGuard::optionally_notify(self, || { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); if peer_state_mutex_opt.is_none() { - result = Err(APIError::ChannelUnavailable { - err: format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}") - }); + funding_tx_signed_result = Err(APIError::no_such_peer(counterparty_node_id)); return NotifyOption::SkipPersistNoEvents; } - let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap(); + let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); + let peer_state = &mut *peer_state_lock; - match peer_state.channel_by_id.get_mut(channel_id) { - Some(channel) => match channel.as_funded_mut() { - Some(chan) => { - let txid = transaction.compute_txid(); - let witnesses: Vec<_> = transaction - .input - .into_iter() - .map(|input| input.witness) - .filter(|witness| !witness.is_empty()) - .collect(); - let best_block_height = self.best_block.read().unwrap().height; - match chan.funding_transaction_signed( - txid, - witnesses, - best_block_height, - &self.logger, - ) { - Ok(FundingTxSigned { - tx_signatures: Some(tx_signatures), - funding_tx, - splice_negotiated, - splice_locked, - }) => { - if let Some(funding_tx) = funding_tx { - self.broadcast_interactive_funding( - chan, - &funding_tx, - &self.logger, + match peer_state.channel_by_id.entry(*channel_id) { + hash_map::Entry::Occupied(mut chan_entry) => { + let txid = transaction.compute_txid(); + let witnesses: Vec<_> = transaction + .input + .into_iter() + .map(|input| input.witness) + .filter(|witness| !witness.is_empty()) + .collect(); + let best_block_height = self.best_block.read().unwrap().height; + + let chan = chan_entry.get_mut(); + match chan.funding_transaction_signed( + txid, + witnesses, + best_block_height, + &self.fee_estimator, + &self.logger, + ) { + Ok(FundingTxSigned { + commitment_signed, + counterparty_initial_commitment_signed_result, + tx_signatures, + funding_tx, + splice_negotiated, + splice_locked, + }) => { + if let Some((funding_tx, tx_type)) = funding_tx { + let funded_chan = chan.as_funded_mut().expect( + "Funding transactions ready for broadcast can only exist for funded channels", + ); + self.broadcast_interactive_funding( + funded_chan, + &funding_tx, + Some(tx_type), + &self.logger, + ); + } + if let Some(splice_negotiated) = splice_negotiated { + self.pending_events.lock().unwrap().push_back(( + events::Event::SplicePending { + channel_id: *channel_id, + counterparty_node_id: *counterparty_node_id, + user_channel_id: chan.context().get_user_id(), + new_funding_txo: splice_negotiated.funding_txo, + channel_type: splice_negotiated.channel_type, + new_funding_redeem_script: splice_negotiated + .funding_redeem_script, + }, + None, + )); + } + + if chan.context().is_connected() { + if let Some(commitment_signed) = commitment_signed { + peer_state.pending_msg_events.push( + MessageSendEvent::UpdateHTLCs { + node_id: *counterparty_node_id, + channel_id: *channel_id, + updates: CommitmentUpdate { + commitment_signed: vec![commitment_signed], + update_add_htlcs: vec![], + update_fulfill_htlcs: vec![], + update_fail_htlcs: vec![], + update_fail_malformed_htlcs: vec![], + update_fee: None, + }, + }, ); } - if let Some(splice_negotiated) = splice_negotiated { - self.pending_events.lock().unwrap().push_back(( - events::Event::SplicePending { - channel_id: *channel_id, - counterparty_node_id: *counterparty_node_id, - user_channel_id: chan.context.get_user_id(), - new_funding_txo: splice_negotiated.funding_txo, - channel_type: splice_negotiated.channel_type, - new_funding_redeem_script: splice_negotiated - .funding_redeem_script, + if let Some(tx_signatures) = tx_signatures { + peer_state.pending_msg_events.push( + MessageSendEvent::SendTxSignatures { + node_id: *counterparty_node_id, + msg: tx_signatures, }, - None, - )); + ); } - peer_state.pending_msg_events.push( - MessageSendEvent::SendTxSignatures { - node_id: *counterparty_node_id, - msg: tx_signatures, - }, - ); if let Some(splice_locked) = splice_locked { peer_state.pending_msg_events.push( MessageSendEvent::SendSpliceLocked { @@ -6529,52 +6485,80 @@ where }, ); } - return NotifyOption::DoPersist; - }, - Err(err) => { - result = Err(err); - return NotifyOption::SkipPersistNoEvents; - }, - Ok(FundingTxSigned { - tx_signatures: None, - funding_tx, - splice_negotiated, - splice_locked, - }) => { - debug_assert!(funding_tx.is_none()); - debug_assert!(splice_negotiated.is_none()); - debug_assert!(splice_locked.is_none()); - return NotifyOption::SkipPersistNoEvents; - }, - } - }, - None => { - result = Err(APIError::APIMisuseError { - err: format!( - "Channel with id {} not expecting funding signatures", - channel_id - ), - }); - return NotifyOption::SkipPersistNoEvents; - }, + } + + if let Some(funded_chan) = chan.as_funded_mut() { + match counterparty_initial_commitment_signed_result { + Some(Ok(Some(monitor_update))) => { + let funding_txo = funded_chan.funding.get_funding_txo(); + if let Some(post_update_data) = self + .handle_new_monitor_update( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + &mut peer_state.pending_msg_events, + peer_state.is_connected, + funded_chan, + funding_txo.unwrap(), + monitor_update, + ) { + monitor_update_result = Some(Ok(post_update_data)); + } + }, + Some(Err(err)) => { + let (drop, err) = self.locked_handle_funded_force_close( + &mut peer_state.closed_channel_monitor_update_ids, + &mut peer_state.in_flight_monitor_updates, + err, + funded_chan, + ); + if drop { + chan_entry.remove_entry(); + } + + monitor_update_result = Some(Err(err)); + }, + Some(Ok(None)) | None => {}, + } + } + + funding_tx_signed_result = Ok(()); + }, + Err(err) => { + funding_tx_signed_result = Err(err); + return NotifyOption::SkipPersistNoEvents; + }, + } }, - None => { - result = Err(APIError::ChannelUnavailable { - err: format!( - "Channel with id {} not found for the passed counterparty node_id {}", - channel_id, counterparty_node_id - ), - }); + hash_map::Entry::Vacant(_) => { + funding_tx_signed_result = + Err(APIError::no_such_channel_for_peer(channel_id, counterparty_node_id)); return NotifyOption::SkipPersistNoEvents; }, } + + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + + if let Some(monitor_update_result) = monitor_update_result { + match monitor_update_result { + Ok(post_update_data) => { + self.handle_post_monitor_update_chan_resume(post_update_data); + }, + Err(_) => { + let _ = self.handle_error(monitor_update_result, *counterparty_node_id); + }, + } + } + + NotifyOption::DoPersist }); - result + funding_tx_signed_result } fn broadcast_interactive_funding( - &self, channel: &mut FundedChannel, funding_tx: &Transaction, logger: &L, + &self, channel: &mut FundedChannel, funding_tx: &Transaction, + transaction_type: Option, logger: &L, ) { let logger = WithChannelContext::from(logger, channel.context(), None); log_info!( @@ -6582,7 +6566,13 @@ where "Broadcasting signed interactive funding transaction {}", funding_tx.compute_txid() ); - self.tx_broadcaster.broadcast_transactions(&[funding_tx]); + let tx_type = transaction_type.unwrap_or_else(|| TransactionType::Funding { + channels: vec![( + channel.context().get_counterparty_node_id(), + channel.context().channel_id(), + )], + }); + self.tx_broadcaster.broadcast_transactions(&[(funding_tx, tx_type)]); { let mut pending_events = self.pending_events.lock().unwrap(); emit_channel_pending_event!(pending_events, channel); @@ -6624,15 +6614,16 @@ where let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}") })?; + .ok_or_else(|| APIError::no_such_peer(counterparty_node_id))?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; for channel_id in channel_ids { if !peer_state.has_channel(channel_id) { - return Err(APIError::ChannelUnavailable { - err: format!("Channel with id {} not found for the passed counterparty node_id {}", channel_id, counterparty_node_id), - }); + return Err(APIError::no_such_channel_for_peer( + channel_id, + counterparty_node_id, + )); }; } for channel_id in channel_ids { @@ -6703,11 +6694,8 @@ where /// Intercepted HTLCs can be useful for Lightning Service Providers (LSPs) to open a just-in-time /// channel to a receiving node if the node lacks sufficient inbound liquidity. /// - /// To make use of intercepted HTLCs, set [`UserConfig::accept_intercept_htlcs`] and use - /// [`ChannelManager::get_intercept_scid`] to generate short channel id(s) to put in the - /// receiver's invoice route hints. These route hints will signal to LDK to generate an - /// [`HTLCIntercepted`] event when it receives the forwarded HTLC, and this method or - /// [`ChannelManager::fail_intercepted_htlc`] MUST be called in response to the event. + /// To make use of intercepted HTLCs, set [`UserConfig::htlc_interception_flags`] must have a + /// non-0 value. /// /// Note that LDK does not enforce fee requirements in `amt_to_forward_msat`, and will not stop /// you from forwarding more than you received. See @@ -6717,7 +6705,7 @@ where /// Errors if the event was not handled in time, in which case the HTLC was automatically failed /// backwards. /// - /// [`UserConfig::accept_intercept_htlcs`]: crate::util::config::UserConfig::accept_intercept_htlcs + /// [`UserConfig::htlc_interception_flags`]: crate::util::config::UserConfig::htlc_interception_flags /// [`HTLCIntercepted`]: events::Event::HTLCIntercepted /// [`HTLCIntercepted::expected_outbound_amount_msat`]: events::Event::HTLCIntercepted::expected_outbound_amount_msat // TODO: when we move to deciding the best outbound channel at forward time, only take @@ -6730,12 +6718,9 @@ where let outbound_scid_alias = { let peer_state_lock = self.per_peer_state.read().unwrap(); - let peer_state_mutex = - peer_state_lock.get(&next_node_id).ok_or_else(|| APIError::ChannelUnavailable { - err: format!( - "Can't find a peer matching the passed counterparty node_id {next_node_id}" - ), - })?; + let peer_state_mutex = peer_state_lock + .get(&next_node_id) + .ok_or_else(|| APIError::no_such_peer(&next_node_id))?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.get(next_hop_channel_id) { @@ -6768,11 +6753,10 @@ where logger, "Channel not found when attempting to forward intercepted HTLC" ); - return Err(APIError::ChannelUnavailable { - err: format!( - "Channel with id {next_hop_channel_id} not found for the passed counterparty node_id {next_node_id}" - ), - }); + return Err(APIError::no_such_channel_for_peer( + next_hop_channel_id, + &next_node_id, + )); }, } }; @@ -6864,6 +6848,7 @@ where fn process_pending_update_add_htlcs(&self) -> bool { let mut should_persist = false; let mut decode_update_add_htlcs = new_hash_map(); + let mut dummy_update_add_htlcs = new_hash_map(); mem::swap(&mut decode_update_add_htlcs, &mut self.decode_update_add_htlcs.lock().unwrap()); let get_htlc_failure_type = |outgoing_scid_opt: Option, payment_hash: PaymentHash| { @@ -6923,11 +6908,40 @@ where let (next_hop, next_packet_details_opt) = match decode_incoming_update_add_htlc_onion( &update_add_htlc, - &*self.node_signer, - &*self.logger, + &self.node_signer, + &self.logger, &self.secp_ctx, ) { - Ok(decoded_onion) => decoded_onion, + Ok(decoded_onion) => match decoded_onion { + ( + onion_utils::Hop::Dummy { + dummy_hop_data, + next_hop_hmac, + new_packet_bytes, + .. + }, + Some(next_packet_details), + ) => { + let new_update_add_htlc = + onion_utils::peel_dummy_hop_update_add_htlc( + update_add_htlc, + dummy_hop_data, + next_hop_hmac, + new_packet_bytes, + next_packet_details, + &self.node_signer, + &self.secp_ctx, + ); + + dummy_update_add_htlcs + .entry(incoming_scid_alias) + .or_insert_with(Vec::new) + .push(new_update_add_htlc); + + continue; + }, + _ => decoded_onion, + }, Err((htlc_fail, reason)) => { let failure_type = HTLCHandlingFailureType::InvalidOnion; @@ -6940,10 +6954,33 @@ where let outgoing_scid_opt = next_packet_details_opt.as_ref().and_then(|d| match d.outgoing_connector { HopConnector::ShortChannelId(scid) => Some(scid), + HopConnector::Dummy => { + debug_assert!( + false, + "Dummy hops must never be processed at this stage." + ); + None + }, HopConnector::Trampoline(_) => None, }); let shared_secret = next_hop.shared_secret().secret_bytes(); + macro_rules! fail_htlc_continue_to_next { + ($reason: expr) => {{ + let htlc_fail = self.htlc_failure_from_update_add_err( + &update_add_htlc, + &incoming_counterparty_node_id, + $reason, + is_intro_node_blinded_forward, + &shared_secret, + ); + let failure_type = + get_htlc_failure_type(outgoing_scid_opt, update_add_htlc.payment_hash); + htlc_fails.push((htlc_fail, failure_type, $reason.into())); + continue; + }}; + } + // Nodes shouldn't expect us to hold HTLCs for them if we don't advertise htlc_hold feature // support. // @@ -6956,18 +6993,7 @@ where if update_add_htlc.hold_htlc.is_some() && !BaseMessageHandler::provided_node_features(self).supports_htlc_hold() { - let reason = LocalHTLCFailureReason::TemporaryNodeFailure; - let htlc_fail = self.htlc_failure_from_update_add_err( - &update_add_htlc, - &incoming_counterparty_node_id, - reason, - is_intro_node_blinded_forward, - &shared_secret, - ); - let failure_type = - get_htlc_failure_type(outgoing_scid_opt, update_add_htlc.payment_hash); - htlc_fails.push((htlc_fail, failure_type, reason.into())); - continue; + fail_htlc_continue_to_next!(LocalHTLCFailureReason::TemporaryNodeFailure); } // Process the HTLC on the incoming channel. @@ -6984,38 +7010,22 @@ where ) { Some(Ok(_)) => {}, Some(Err(reason)) => { - let htlc_fail = self.htlc_failure_from_update_add_err( - &update_add_htlc, - &incoming_counterparty_node_id, - reason, - is_intro_node_blinded_forward, - &shared_secret, - ); - let failure_type = - get_htlc_failure_type(outgoing_scid_opt, update_add_htlc.payment_hash); - htlc_fails.push((htlc_fail, failure_type, reason.into())); - continue; + fail_htlc_continue_to_next!(reason); }, // The incoming channel no longer exists, HTLCs should be resolved onchain instead. None => continue 'outer_loop, } // Now process the HTLC on the outgoing channel if it's a forward. + let mut intercept_forward = false; if let Some(next_packet_details) = next_packet_details_opt.as_ref() { - if let Err(reason) = - self.can_forward_htlc(&update_add_htlc, next_packet_details) + match self + .can_forward_htlc_should_intercept(&update_add_htlc, next_packet_details) { - let htlc_fail = self.htlc_failure_from_update_add_err( - &update_add_htlc, - &incoming_counterparty_node_id, - reason, - is_intro_node_blinded_forward, - &shared_secret, - ); - let failure_type = - get_htlc_failure_type(outgoing_scid_opt, update_add_htlc.payment_hash); - htlc_fails.push((htlc_fail, failure_type, reason.into())); - continue; + Err(reason) => { + fail_htlc_continue_to_next!(reason); + }, + Ok(intercept) => intercept_forward = intercept, } } @@ -7026,7 +7036,98 @@ where incoming_accept_underpaying_htlcs, next_packet_details_opt.map(|d| d.next_packet_pubkey), ) { - Ok(info) => htlc_forwards.push((info, update_add_htlc.htlc_id)), + Ok(info) => { + let to_pending_add = |info| PendingAddHTLCInfo { + prev_outbound_scid_alias: incoming_scid_alias, + prev_counterparty_node_id: incoming_counterparty_node_id, + prev_funding_outpoint: incoming_funding_txo, + prev_channel_id: incoming_channel_id, + prev_htlc_id: update_add_htlc.htlc_id, + prev_user_channel_id: incoming_user_channel_id, + forward_info: info, + }; + let intercept_id = || { + InterceptId::from_htlc_id_and_chan_id( + update_add_htlc.htlc_id, + &incoming_channel_id, + &incoming_counterparty_node_id, + ) + }; + let logger = WithContext::from( + &self.logger, + None, + Some(incoming_channel_id), + Some(update_add_htlc.payment_hash), + ); + if info.routing.should_hold_htlc() { + let mut held_htlcs = self.pending_intercepted_htlcs.lock().unwrap(); + let intercept_id = intercept_id(); + match held_htlcs.entry(intercept_id) { + hash_map::Entry::Vacant(entry) => { + log_debug!( + logger, + "Intercepted held HTLC with id {intercept_id}, holding until the recipient is online" + ); + let pending_add = to_pending_add(info); + entry.insert(pending_add); + }, + hash_map::Entry::Occupied(_) => { + debug_assert!(false, "Should never have two HTLCs with the same channel id and htlc id"); + log_error!(logger, "Duplicate intercept id for HTLC"); + fail_htlc_continue_to_next!( + LocalHTLCFailureReason::TemporaryNodeFailure + ); + }, + } + } else if intercept_forward { + let intercept_id = intercept_id(); + let mut pending_intercepts = + self.pending_intercepted_htlcs.lock().unwrap(); + match pending_intercepts.entry(intercept_id) { + hash_map::Entry::Vacant(entry) => { + let pending_add = to_pending_add(info); + if let Ok(intercept_ev) = + create_htlc_intercepted_event(intercept_id, &pending_add) + { + log_debug!( + logger, + "Intercepted HTLC, generating intercept event with ID {intercept_id}" + ); + let ev_entry = (intercept_ev, None); + // It's possible we processed this intercept forward, + // generated an event, then re-processed it here after + // restart, in which case the intercept event should not be + // pushed redundantly. + let mut events = self.pending_events.lock().unwrap(); + events.retain(|ev| *ev != ev_entry); + events.push_back(ev_entry); + entry.insert(pending_add); + } else { + debug_assert!(false); + log_error!( + logger, + "Failed to generate an intercept event for HTLC" + ); + fail_htlc_continue_to_next!( + LocalHTLCFailureReason::TemporaryNodeFailure + ); + } + }, + hash_map::Entry::Occupied(_) => { + log_error!( + logger, + "Failed to forward incoming HTLC: detected duplicate intercepted payment", + ); + debug_assert!(false, "Should never have two HTLCs with the same channel id and htlc id"); + fail_htlc_continue_to_next!( + LocalHTLCFailureReason::TemporaryNodeFailure + ); + }, + } + } else { + htlc_forwards.push((info, update_add_htlc.htlc_id)) + } + }, Err(inbound_err) => { let failure_type = get_htlc_failure_type(outgoing_scid_opt, update_add_htlc.payment_hash); @@ -7050,7 +7151,7 @@ where incoming_funding_txo, incoming_channel_id, incoming_user_channel_id, - htlc_forwards.drain(..).collect(), + htlc_forwards, ); self.forward_htlcs(&mut [pending_forwards]); for (htlc_fail, failure_type, failure_reason) in htlc_fails.drain(..) { @@ -7083,6 +7184,19 @@ where )); } } + + // Merge peeled dummy HTLCs into the existing decode queue so they can be + // processed in the next iteration. We avoid replacing the whole queue + // (e.g. via mem::swap) because other threads may have enqueued new HTLCs + // meanwhile; merging preserves everything safely. + if !dummy_update_add_htlcs.is_empty() { + let mut decode_update_add_htlc_source = self.decode_update_add_htlcs.lock().unwrap(); + + for (incoming_scid_alias, htlcs) in dummy_update_add_htlcs.into_iter() { + decode_update_add_htlc_source.entry(incoming_scid_alias).or_default().extend(htlcs); + } + } + should_persist } @@ -7165,6 +7279,7 @@ where best_block_height, &self.pending_events, |args| self.send_payment_along_path(args), + &WithContext::from(&self.logger, None, None, None), ); if needs_persist { should_persist = NotifyOption::DoPersist; @@ -7181,10 +7296,6 @@ where } self.forward_htlcs(&mut phantom_receives); - // Freeing the holding cell here is relatively redundant - in practice we'll do it when we - // next get a `get_and_clear_pending_msg_events` call, but some tests rely on it, and it's - // nice to do the work now if we can rather than while we're trying to get messages in the - // network stack. if self.check_free_holding_cells() { should_persist = NotifyOption::DoPersist; } @@ -7222,6 +7333,7 @@ where payment_hash, outgoing_amt_msat, outgoing_cltv_value, + incoming_accountable, .. }, } = payment; @@ -7272,7 +7384,7 @@ where onion_packet.hmac, payment_hash, None, - &*self.node_signer, + &self.node_signer, ); let next_hop = match decode_res { Ok(res) => res, @@ -7320,6 +7432,7 @@ where Some(phantom_shared_secret), false, None, + incoming_accountable, current_height, ); match create_res { @@ -7429,6 +7542,7 @@ where outgoing_cltv_value, routing, skimmed_fee_msat, + incoming_accountable, .. }, .. @@ -7529,6 +7643,7 @@ where onion_packet.clone(), *skimmed_fee_msat, next_blinding_point, + *incoming_accountable, &self.fee_estimator, &&logger, ) { @@ -8022,9 +8137,11 @@ where /// Free the background events, generally called from [`PersistenceNotifierGuard`] constructors. /// /// Expects the caller to have a total_consistency_lock read lock. - #[rustfmt::skip] fn process_background_events(&self) -> NotifyOption { - debug_assert_ne!(self.total_consistency_lock.held_by_thread(), LockHeldState::NotHeldByThread); + debug_assert_ne!( + self.total_consistency_lock.held_by_thread(), + LockHeldState::NotHeldByThread + ); self.background_events_processed_since_startup.store(true, Ordering::Release); @@ -8036,14 +8153,37 @@ where for event in background_events.drain(..) { match event { - BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, channel_id, update } => { - self.apply_post_close_monitor_update(counterparty_node_id, channel_id, funding_txo, update); - }, - BackgroundEvent::MonitorUpdatesComplete { counterparty_node_id, channel_id } => { - self.channel_monitor_updated(&channel_id, None, &counterparty_node_id); + BackgroundEvent::MonitorUpdateRegeneratedOnStartup { + counterparty_node_id, + funding_txo, + channel_id, + update, + } => { + self.apply_post_close_monitor_update( + counterparty_node_id, + channel_id, + funding_txo, + update, + ); }, - } - } + BackgroundEvent::MonitorUpdatesComplete { + counterparty_node_id, + channel_id, + highest_update_id_completed, + } => { + // Now that we can finally handle the background event, remove all in-flight + // monitor updates for this channel that we've known to complete, as they have + // already been persisted to the monitor and can be applied to our internal + // state such that the channel resumes operation if no new updates have been + // made since. + self.channel_monitor_updated( + &channel_id, + Some(highest_update_id_completed), + &counterparty_node_id, + ); + }, + } + } NotifyOption::DoPersist } @@ -8166,7 +8306,7 @@ where if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } if let Err(e) = funded_chan.timer_check_closing_negotiation_progress() { - let (needs_close, err) = convert_channel_err!(self, peer_state, e, funded_chan, FUNDED_CHANNEL); + let (needs_close, err) = self.locked_handle_funded_force_close(&mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, e, funded_chan); handle_errors.push((Err(err), counterparty_node_id)); if needs_close { return false; } } @@ -8243,7 +8383,10 @@ where let reason = ClosureReason::FundingTimedOut; let msg = "Force-closing pending channel due to timeout awaiting establishment handshake".to_owned(); let err = ChannelError::Close((msg, reason)); - let (_, e) = convert_channel_err!(self, peer_state, err, chan); + let (_, e) = self.locked_handle_unfunded_close( + err, + chan, + ); handle_errors.push((Err(e), counterparty_node_id)); false } else { @@ -8358,7 +8501,7 @@ where } for (err, counterparty_node_id) in handle_errors { - let _ = handle_error!(self, err, counterparty_node_id); + let _ = self.handle_error(err, counterparty_node_id); } #[cfg(feature = "std")] @@ -8375,10 +8518,21 @@ where self.check_refresh_async_receive_offer_cache(true); - // Technically we don't need to do this here, but if we have holding cell entries in a - // channel that need freeing, it's better to do that here and block a background task - // than block the message queueing pipeline. if self.check_free_holding_cells() { + // While we try to ensure we clear holding cells immediately, its possible we miss + // one somewhere. Thus, its useful to try regularly to ensure even if something + // gets stuck its only for a minute or so. Still, good to panic here in debug to + // ensure we discover the missing free. + // Note that in cases where we had a fee update in the loop above, we expect to + // need to free holding cells now, thus we only report an error if `should_persist` + // has not been updated to `DoPersist`. + if should_persist != NotifyOption::DoPersist { + debug_assert!(false, "Holding cells are cleared immediately"); + log_error!( + self.logger, + "Holding cells were freed in last-ditch cleanup. Please report this (performance) bug." + ); + } should_persist = NotifyOption::DoPersist; } @@ -8543,6 +8697,13 @@ where // being fully configured. See the docs for `ChannelManagerReadArgs` for more. match source { HTLCSource::OutboundRoute { ref path, ref session_priv, ref payment_id, .. } => { + let logger = WithContext::for_payment( + &self.logger, + path.hops.first().map(|hop| hop.pubkey), + None, + Some(*payment_hash), + *payment_id, + ); self.pending_outbound_payments.fail_htlc( source, payment_hash, @@ -8554,6 +8715,7 @@ where &self.secp_ctx, &self.pending_events, &mut from_monitor_update_completion, + &logger, ); if let Some(update) = from_monitor_update_completion { // If `fail_htlc` didn't `take` the post-event action, we should go ahead and @@ -8868,7 +9030,7 @@ where // Now we can handle any errors which were generated. for (counterparty_node_id, err) in errs.drain(..) { let res: Result<(), _> = Err(err); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); } } @@ -8981,15 +9143,19 @@ where .or_insert_with(Vec::new) .push(raa_blocker); } - handle_new_monitor_update!( - self, + if let Some(data) = self.handle_new_monitor_update( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + &mut peer_state.pending_msg_events, + peer_state.is_connected, + chan, prev_hop.funding_txo, monitor_update, - peer_state_lock, - peer_state, - per_peer_state, - chan - ); + ) { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_post_monitor_update_chan_resume(data); + } }, UpdateFulfillCommitFetch::DuplicateClaim {} => { let (action_opt, raa_blocker_opt) = completion_action(None, true); @@ -9150,16 +9316,18 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ .push(action); } - handle_post_close_monitor_update!( - self, + if let Some(actions) = self.handle_post_close_monitor_update( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, prev_hop.funding_txo, preimage_update, - peer_state_lock, - peer_state, - per_peer_state, prev_hop.counterparty_node_id, - chan_id - ); + chan_id, + ) { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_monitor_update_completion_actions(actions); + } } fn finalize_claims(&self, sources: Vec<(HTLCSource, Option)>) { @@ -9229,6 +9397,13 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ counterparty_node_id: path.hops[0].pubkey, }) }; + let logger = WithContext::for_payment( + &self.logger, + path.hops.first().map(|hop| hop.pubkey), + None, + Some(payment_preimage.into()), + payment_id, + ); self.pending_outbound_payments.claim_htlc( payment_id, payment_preimage, @@ -9238,6 +9413,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ from_onchain, &mut ev_completion_action, &self.pending_events, + &logger, ); // If an event was generated, `claim_htlc` set `ev_completion_action` to None, if // not, we should go ahead and run it now (as the claim was duplicative), at least @@ -9394,13 +9570,14 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ /// Handles actions which need to complete after a [`ChannelMonitorUpdate`] has been applied /// which can happen after the per-peer state lock has been dropped. fn post_monitor_update_unlock( - &self, channel_id: ChannelId, counterparty_node_id: PublicKey, - unbroadcasted_batch_funding_txid: Option, + &self, channel_id: ChannelId, counterparty_node_id: PublicKey, funding_txo: OutPoint, + user_channel_id: u128, unbroadcasted_batch_funding_txid: Option, update_actions: Vec, htlc_forwards: Option, decode_update_add_htlcs: Option<(u64, Vec)>, finalized_claimed_htlcs: Vec<(HTLCSource, Option)>, failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, + committed_outbound_htlc_sources: Vec<(HTLCPreviousHopData, u64)>, ) { // If the channel belongs to a batch funding transaction, the progress of the batch // should be updated as we have received funding_signed and persisted the monitor. @@ -9427,6 +9604,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let removed_batch_state = funding_batch_states.remove(&txid).into_iter().flatten(); let per_peer_state = self.per_peer_state.read().unwrap(); let mut batch_funding_tx = None; + let mut batch_channels = Vec::new(); for (channel_id, counterparty_node_id, _) in removed_batch_state { if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { let mut peer_state = peer_state_mutex.lock().unwrap(); @@ -9437,6 +9615,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ funded_chan.context.unbroadcasted_funding(&funded_chan.funding) }); funded_chan.set_batch_ready(); + batch_channels.push((counterparty_node_id, channel_id)); let mut pending_events = self.pending_events.lock().unwrap(); emit_channel_pending_event!(pending_events, funded_chan); @@ -9445,7 +9624,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } if let Some(tx) = batch_funding_tx { log_info!(self.logger, "Broadcasting batch funding tx {}", tx.compute_txid()); - self.tx_broadcaster.broadcast_transactions(&[&tx]); + self.tx_broadcaster.broadcast_transactions(&[( + &tx, + TransactionType::Funding { channels: batch_channels }, + )]); } } } @@ -9466,10 +9648,20 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }; self.fail_htlc_backwards_internal(&failure.0, &failure.1, &failure.2, receiver, None); } + self.prune_persisted_inbound_htlc_onions( + channel_id, + counterparty_node_id, + funding_txo, + user_channel_id, + committed_outbound_htlc_sources, + ); } - #[rustfmt::skip] - fn handle_monitor_update_completion_actions>(&self, actions: I) { + fn handle_monitor_update_completion_actions< + I: IntoIterator, + >( + &self, actions: I, + ) { debug_assert_ne!(self.pending_events.held_by_thread(), LockHeldState::HeldByThread); debug_assert_ne!(self.claimable_payments.held_by_thread(), LockHeldState::HeldByThread); debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread); @@ -9478,36 +9670,54 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ for action in actions.into_iter() { match action { - MonitorUpdateCompletionAction::PaymentClaimed { payment_hash, pending_mpp_claim } => { - if let Some((counterparty_node_id, chan_id, claim_ptr)) = pending_mpp_claim { + MonitorUpdateCompletionAction::PaymentClaimed { + payment_hash, + pending_mpp_claim, + } => { + let (peer_id, chan_id) = pending_mpp_claim + .as_ref() + .map(|c| (Some(c.0), Some(c.1))) + .unwrap_or_default(); + let logger = + WithContext::from(&self.logger, peer_id, chan_id, Some(payment_hash)); + log_trace!(logger, "Handling PaymentClaimed monitor update completion action"); + + if let Some((cp_node_id, chan_id, claim_ptr)) = pending_mpp_claim { let per_peer_state = self.per_peer_state.read().unwrap(); - per_peer_state.get(&counterparty_node_id).map(|peer_state_mutex| { + per_peer_state.get(&cp_node_id).map(|peer_state_mutex| { let mut peer_state = peer_state_mutex.lock().unwrap(); - let blockers_entry = peer_state.actions_blocking_raa_monitor_updates.entry(chan_id); + let blockers_entry = + peer_state.actions_blocking_raa_monitor_updates.entry(chan_id); if let btree_map::Entry::Occupied(mut blockers) = blockers_entry { - blockers.get_mut().retain(|blocker| - if let &RAAMonitorUpdateBlockingAction::ClaimedMPPPayment { pending_claim } = &blocker { - if *pending_claim == claim_ptr { - let mut pending_claim_state_lock = pending_claim.0.lock().unwrap(); - let pending_claim_state = &mut *pending_claim_state_lock; - pending_claim_state.channels_without_preimage.retain(|(cp, cid)| { - let this_claim = - *cp == counterparty_node_id && *cid == chan_id; - if this_claim { - pending_claim_state.channels_with_preimage.push((*cp, *cid)); - false - } else { true } - }); - if pending_claim_state.channels_without_preimage.is_empty() { - for (cp, cid) in pending_claim_state.channels_with_preimage.iter() { - let freed_chan = (*cp, *cid, blocker.clone()); - freed_channels.push(freed_chan); - } - } - !pending_claim_state.channels_without_preimage.is_empty() - } else { true } - } else { true } - ); + blockers.get_mut().retain(|blocker| { + let pending_claim = match &blocker { + RAAMonitorUpdateBlockingAction::ClaimedMPPPayment { + pending_claim, + } => pending_claim, + _ => return true, + }; + if *pending_claim != claim_ptr { + return true; + } + let mut claim_state_lock = pending_claim.0.lock().unwrap(); + let claim_state = &mut *claim_state_lock; + claim_state.channels_without_preimage.retain(|(cp, cid)| { + let this_claim = *cp == cp_node_id && *cid == chan_id; + if this_claim { + claim_state.channels_with_preimage.push((*cp, *cid)); + false + } else { + true + } + }); + if claim_state.channels_without_preimage.is_empty() { + for (cp, cid) in claim_state.channels_with_preimage.iter() { + let freed_chan = (*cp, *cid, blocker.clone()); + freed_channels.push(freed_chan); + } + } + !claim_state.channels_without_preimage.is_empty() + }); if blockers.get().is_empty() { blockers.remove(); } @@ -9515,7 +9725,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }); } - let payment = self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash); + let payment = self + .claimable_payments + .lock() + .unwrap() + .pending_claiming_payments + .remove(&payment_hash); if let Some(ClaimingPayment { amount_msat, payment_purpose: purpose, @@ -9525,7 +9740,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ onion_fields, payment_id, durable_preimage_channel, - }) = payment { + }) = payment + { let event = events::Event::PaymentClaimed { payment_hash, purpose, @@ -9536,8 +9752,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ onion_fields, payment_id, }; - let action = if let Some((outpoint, counterparty_node_id, channel_id)) - = durable_preimage_channel + let action = if let Some((outpoint, counterparty_node_id, channel_id)) = + durable_preimage_channel { Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate { channel_funding_outpoint: Some(outpoint), @@ -9554,12 +9770,18 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // `payment_id` should suffice to ensure we never spuriously drop a second // event for a duplicate payment. if !pending_events.contains(&event_action) { + log_trace!( + logger, + "Queuing PaymentClaimed event with event completion action {:?}", + event_action.1 + ); pending_events.push_back(event_action); } } }, MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel { - event, downstream_counterparty_and_funding_outpoint + event, + downstream_counterparty_and_funding_outpoint, } => { self.pending_events.lock().unwrap().push_back((event, None)); if let Some(unblocked) = downstream_counterparty_and_funding_outpoint { @@ -9571,7 +9793,9 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } }, MonitorUpdateCompletionAction::FreeOtherChannelImmediately { - downstream_counterparty_node_id, downstream_channel_id, blocking_action, + downstream_counterparty_node_id, + downstream_channel_id, + blocking_action, } => { self.handle_monitor_update_release( downstream_counterparty_node_id, @@ -9587,6 +9811,447 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } + /// Applies a [`ChannelMonitorUpdate`] to the channel monitor. + /// + /// Monitor updates must be applied while holding the same lock under which they were generated + /// to ensure correct ordering. However, completion handling requires releasing those locks. + /// This method applies the update immediately (while locks are held) and returns whether the + /// update completed, allowing the caller to handle completion separately after releasing locks. + /// + /// Returns a tuple of `(update_completed, all_updates_completed)`: + /// - `update_completed`: whether this specific monitor update finished persisting + /// - `all_updates_completed`: whether all in-flight updates for this channel are now complete + fn handle_new_monitor_update_locked_actions_handled_by_caller( + &self, + in_flight_monitor_updates: &mut BTreeMap)>, + channel_id: ChannelId, funding_txo: OutPoint, counterparty_node_id: PublicKey, + new_update: ChannelMonitorUpdate, + ) -> (bool, bool) { + let in_flight_updates = &mut in_flight_monitor_updates + .entry(channel_id) + .or_insert_with(|| (funding_txo, Vec::new())) + .1; + // During startup, we push monitor updates as background events through to here in + // order to replay updates that were in-flight when we shut down. Thus, we have to + // filter for uniqueness here. + let update_idx = + in_flight_updates.iter().position(|upd| upd == &new_update).unwrap_or_else(|| { + in_flight_updates.push(new_update); + in_flight_updates.len() - 1 + }); + + if self.background_events_processed_since_startup.load(Ordering::Acquire) { + let update_res = + self.chain_monitor.update_channel(channel_id, &in_flight_updates[update_idx]); + let logger = + WithContext::from(&self.logger, Some(counterparty_node_id), Some(channel_id), None); + let update_completed = self.handle_monitor_update_res(update_res, logger); + if update_completed { + let _ = in_flight_updates.remove(update_idx); + } + (update_completed, update_completed && in_flight_updates.is_empty()) + } else { + // We blindly assume that the ChannelMonitorUpdate will be regenerated on startup if we + // fail to persist it. This is a fairly safe assumption, however, since anything we do + // during the startup sequence should be replayed exactly if we immediately crash. + let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup { + counterparty_node_id, + funding_txo, + channel_id, + update: in_flight_updates[update_idx].clone(), + }; + // We want to track the in-flight update both in `in_flight_monitor_updates` and in + // `pending_background_events` to avoid a race condition during + // `pending_background_events` processing where we complete one + // `ChannelMonitorUpdate` (but there are more pending as background events) but we + // conclude that all pending `ChannelMonitorUpdate`s have completed and its safe to + // run post-completion actions. + // We could work around that with some effort, but its simpler to just track updates + // twice. + self.pending_background_events.lock().unwrap().push(event); + (false, false) + } + } + + /// Handles a monitor update for a closed channel, returning optionally the completion actions + /// to process after locks are released. + /// + /// Returns `Some` if all in-flight updates are complete. + fn handle_post_close_monitor_update( + &self, + in_flight_monitor_updates: &mut BTreeMap)>, + monitor_update_blocked_actions: &mut BTreeMap< + ChannelId, + Vec, + >, + funding_txo: OutPoint, update: ChannelMonitorUpdate, counterparty_node_id: PublicKey, + channel_id: ChannelId, + ) -> Option> { + let (_update_completed, all_updates_complete) = self + .handle_new_monitor_update_locked_actions_handled_by_caller( + in_flight_monitor_updates, + channel_id, + funding_txo, + counterparty_node_id, + update, + ); + if all_updates_complete { + Some(monitor_update_blocked_actions.remove(&channel_id).unwrap_or(Vec::new())) + } else { + None + } + } + + /// Returns whether the monitor update is completed, `false` if the update is in-progress. + fn handle_monitor_update_res( + &self, update_res: ChannelMonitorUpdateStatus, logger: LG, + ) -> bool { + debug_assert!(self.background_events_processed_since_startup.load(Ordering::Acquire)); + match update_res { + ChannelMonitorUpdateStatus::UnrecoverableError => { + let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down."; + log_error!(logger, "{}", err_str); + panic!("{}", err_str); + }, + ChannelMonitorUpdateStatus::InProgress => { + #[cfg(not(any(test, feature = "_externalize_tests")))] + if self.monitor_update_type.swap(1, Ordering::Relaxed) == 2 { + panic!("Cannot use both ChannelMonitorUpdateStatus modes InProgress and Completed without restart"); + } + log_debug!( + logger, + "ChannelMonitor update in flight, holding messages until the update completes.", + ); + false + }, + ChannelMonitorUpdateStatus::Completed => { + #[cfg(not(any(test, feature = "_externalize_tests")))] + if self.monitor_update_type.swap(2, Ordering::Relaxed) == 1 { + panic!("Cannot use both ChannelMonitorUpdateStatus modes InProgress and Completed without restart"); + } + true + }, + } + } + + /// Handles the initial monitor persistence, returning optionally data to process after locks + /// are released. + /// + /// Note: This method takes individual fields from `PeerState` rather than the whole struct + /// to avoid borrow checker issues when the channel is borrowed from `peer_state.channel_by_id`. + fn handle_initial_monitor( + &self, + in_flight_monitor_updates: &mut BTreeMap)>, + monitor_update_blocked_actions: &mut BTreeMap< + ChannelId, + Vec, + >, + pending_msg_events: &mut Vec, is_connected: bool, + chan: &mut FundedChannel, update_res: ChannelMonitorUpdateStatus, + ) -> Option { + let logger = WithChannelContext::from(&self.logger, &chan.context, None); + let update_completed = self.handle_monitor_update_res(update_res, logger); + if update_completed { + Some(self.try_resume_channel_post_monitor_update( + in_flight_monitor_updates, + monitor_update_blocked_actions, + pending_msg_events, + is_connected, + chan, + )) + } else { + None + } + } + + /// Applies a new monitor update and attempts to resume the channel if all updates are complete. + /// + /// Returns [`PostMonitorUpdateChanResume`] if all in-flight updates are complete, which should + /// be passed to [`Self::handle_post_monitor_update_chan_resume`] after releasing locks. + /// + /// Note: This method takes individual fields from [`PeerState`] rather than the whole struct + /// to avoid borrow checker issues when the channel is borrowed from `peer_state.channel_by_id`. + fn handle_new_monitor_update( + &self, + in_flight_monitor_updates: &mut BTreeMap)>, + monitor_update_blocked_actions: &mut BTreeMap< + ChannelId, + Vec, + >, + pending_msg_events: &mut Vec, is_connected: bool, + chan: &mut FundedChannel, funding_txo: OutPoint, update: ChannelMonitorUpdate, + ) -> Option { + self.handle_new_monitor_update_with_status( + in_flight_monitor_updates, + monitor_update_blocked_actions, + pending_msg_events, + is_connected, + chan, + funding_txo, + update, + ) + .1 + } + + /// Like [`Self::handle_new_monitor_update`], but also returns whether this specific update + /// completed (as opposed to being in-progress). + fn handle_new_monitor_update_with_status( + &self, + in_flight_monitor_updates: &mut BTreeMap)>, + monitor_update_blocked_actions: &mut BTreeMap< + ChannelId, + Vec, + >, + pending_msg_events: &mut Vec, is_connected: bool, + chan: &mut FundedChannel, funding_txo: OutPoint, update: ChannelMonitorUpdate, + ) -> (bool, Option) { + let chan_id = chan.context.channel_id(); + let counterparty_node_id = chan.context.get_counterparty_node_id(); + + let (update_completed, all_updates_complete) = self + .handle_new_monitor_update_locked_actions_handled_by_caller( + in_flight_monitor_updates, + chan_id, + funding_txo, + counterparty_node_id, + update, + ); + + let completion_data = if all_updates_complete { + Some(self.try_resume_channel_post_monitor_update( + in_flight_monitor_updates, + monitor_update_blocked_actions, + pending_msg_events, + is_connected, + chan, + )) + } else { + None + }; + + (update_completed, completion_data) + } + + /// Attempts to resume a channel after a monitor update completes, while locks are still held. + /// + /// If the channel has no more blocked monitor updates, this resumes normal operation by + /// calling [`Self::handle_channel_resumption`] and returns the remaining work to process + /// after locks are released. If blocked updates remain, only the update actions are returned. + /// + /// Note: This method takes individual fields from [`PeerState`] rather than the whole struct + /// to avoid borrow checker issues when the channel is borrowed from `peer_state.channel_by_id`. + fn try_resume_channel_post_monitor_update( + &self, + in_flight_monitor_updates: &mut BTreeMap)>, + monitor_update_blocked_actions: &mut BTreeMap< + ChannelId, + Vec, + >, + pending_msg_events: &mut Vec, is_connected: bool, + chan: &mut FundedChannel, + ) -> PostMonitorUpdateChanResume { + let chan_id = chan.context.channel_id(); + let outbound_alias = chan.context.outbound_scid_alias(); + let counterparty_node_id = chan.context.get_counterparty_node_id(); + + #[cfg(debug_assertions)] + { + let in_flight_updates = in_flight_monitor_updates.get(&chan_id); + assert!(in_flight_updates.map(|(_, updates)| updates.is_empty()).unwrap_or(true)); + assert!(chan.is_awaiting_monitor_update()); + } + + let logger = WithChannelContext::from(&self.logger, &chan.context, None); + + let update_actions = monitor_update_blocked_actions.remove(&chan_id).unwrap_or(Vec::new()); + + if chan.blocked_monitor_updates_pending() != 0 { + log_debug!(logger, "Channel has blocked monitor updates, completing update actions but leaving channel blocked"); + PostMonitorUpdateChanResume::Blocked { update_actions } + } else { + log_debug!(logger, "Channel is open and awaiting update, resuming it"); + let updates = chan.monitor_updating_restored( + &&logger, + &self.node_signer, + self.chain_hash, + &*self.config.read().unwrap(), + self.best_block.read().unwrap().height, + |htlc_id| { + self.path_for_release_held_htlc( + htlc_id, + outbound_alias, + &chan_id, + &counterparty_node_id, + ) + }, + ); + let channel_update = if updates.channel_ready.is_some() + && chan.context.is_usable() + && is_connected + { + if let Ok((msg, _, _)) = self.get_channel_update_for_unicast(chan) { + Some(MessageSendEvent::SendChannelUpdate { node_id: counterparty_node_id, msg }) + } else { + None + } + } else { + None + }; + + let (htlc_forwards, decode_update_add_htlcs) = self.handle_channel_resumption( + pending_msg_events, + chan, + updates.raa, + updates.commitment_update, + updates.commitment_order, + updates.accepted_htlcs, + updates.pending_update_adds, + updates.funding_broadcastable, + updates.channel_ready, + updates.announcement_sigs, + updates.tx_signatures, + None, + updates.channel_ready_order, + ); + if let Some(upd) = channel_update { + pending_msg_events.push(upd); + } + + let unbroadcasted_batch_funding_txid = + chan.context.unbroadcasted_batch_funding_txid(&chan.funding); + + PostMonitorUpdateChanResume::Unblocked { + channel_id: chan_id, + counterparty_node_id, + funding_txo: chan.funding_outpoint(), + user_channel_id: chan.context.get_user_id(), + unbroadcasted_batch_funding_txid, + update_actions, + htlc_forwards, + decode_update_add_htlcs, + finalized_claimed_htlcs: updates.finalized_claimed_htlcs, + failed_htlcs: updates.failed_htlcs, + committed_outbound_htlc_sources: updates.committed_outbound_htlc_sources, + } + } + } + + /// We store inbound committed HTLCs' onions in `Channel`s for use in reconstructing the pending + /// HTLC set on `ChannelManager` read. If an HTLC has been irrevocably forwarded to the outbound + /// edge, we no longer need to persist the inbound edge's onion and can prune it here. + fn prune_persisted_inbound_htlc_onions( + &self, outbound_channel_id: ChannelId, outbound_node_id: PublicKey, + outbound_funding_txo: OutPoint, outbound_user_channel_id: u128, + committed_outbound_htlc_sources: Vec<(HTLCPreviousHopData, u64)>, + ) { + let per_peer_state = self.per_peer_state.read().unwrap(); + for (source, outbound_amt_msat) in committed_outbound_htlc_sources { + let counterparty_node_id = match source.counterparty_node_id.as_ref() { + Some(id) => id, + None => continue, + }; + let mut peer_state = + match per_peer_state.get(counterparty_node_id).map(|state| state.lock().unwrap()) { + Some(peer_state) => peer_state, + None => continue, + }; + + if let Some(chan) = + peer_state.channel_by_id.get_mut(&source.channel_id).and_then(|c| c.as_funded_mut()) + { + chan.prune_inbound_htlc_onion( + source.htlc_id, + &source, + OutboundHop { + amt_msat: outbound_amt_msat, + channel_id: outbound_channel_id, + node_id: outbound_node_id, + funding_txo: outbound_funding_txo, + user_channel_id: outbound_user_channel_id, + }, + ); + } + } + } + + #[cfg(test)] + pub(crate) fn test_holding_cell_outbound_htlc_forwards_count( + &self, cp_id: PublicKey, chan_id: ChannelId, + ) -> usize { + let per_peer_state = self.per_peer_state.read().unwrap(); + let peer_state = per_peer_state.get(&cp_id).map(|state| state.lock().unwrap()).unwrap(); + let chan = peer_state.channel_by_id.get(&chan_id).and_then(|c| c.as_funded()).unwrap(); + chan.test_holding_cell_outbound_htlc_forwards_count() + } + + #[cfg(test)] + /// Useful to check that we prune inbound HTLC onions once they are irrevocably forwarded to the + /// outbound edge, see [`Self::prune_persisted_inbound_htlc_onions`]. + pub(crate) fn test_get_inbound_committed_htlcs_with_onion( + &self, cp_id: PublicKey, chan_id: ChannelId, + ) -> usize { + let per_peer_state = self.per_peer_state.read().unwrap(); + let peer_state = per_peer_state.get(&cp_id).map(|state| state.lock().unwrap()).unwrap(); + let chan = peer_state.channel_by_id.get(&chan_id).and_then(|c| c.as_funded()).unwrap(); + chan.inbound_htlcs_pending_decode().count() + } + + #[cfg(test)] + /// Useful for testing crash scenarios where the holding cell of a channel is not persisted. + pub(crate) fn test_clear_channel_holding_cell(&self, cp_id: PublicKey, chan_id: ChannelId) { + let per_peer_state = self.per_peer_state.read().unwrap(); + let mut peer_state = per_peer_state.get(&cp_id).map(|state| state.lock().unwrap()).unwrap(); + let chan = + peer_state.channel_by_id.get_mut(&chan_id).and_then(|c| c.as_funded_mut()).unwrap(); + chan.test_clear_holding_cell(); + } + + /// Completes channel resumption after locks have been released. + /// + /// Processes the [`PostMonitorUpdateChanResume`] returned by + /// [`Self::try_resume_channel_post_monitor_update`], handling update actions and any + /// remaining work that requires locks to be released (e.g., forwarding HTLCs, failing HTLCs). + fn handle_post_monitor_update_chan_resume(&self, data: PostMonitorUpdateChanResume) { + debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread); + #[cfg(debug_assertions)] + for (_, peer) in self.per_peer_state.read().unwrap().iter() { + debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread); + } + + match data { + PostMonitorUpdateChanResume::Blocked { update_actions } => { + self.handle_monitor_update_completion_actions(update_actions); + }, + PostMonitorUpdateChanResume::Unblocked { + channel_id, + counterparty_node_id, + funding_txo, + user_channel_id, + unbroadcasted_batch_funding_txid, + update_actions, + htlc_forwards, + decode_update_add_htlcs, + finalized_claimed_htlcs, + failed_htlcs, + committed_outbound_htlc_sources, + } => { + self.post_monitor_update_unlock( + channel_id, + counterparty_node_id, + funding_txo, + user_channel_id, + unbroadcasted_batch_funding_txid, + update_actions, + htlc_forwards, + decode_update_add_htlcs, + finalized_claimed_htlcs, + failed_htlcs, + committed_outbound_htlc_sources, + ); + }, + } + } + /// Handles a channel reentering a functional state, either due to reconnect or a monitor /// update completion. #[rustfmt::skip] @@ -9630,7 +10295,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if channel.context.is_connected() { if let ChannelReadyOrder::ChannelReadyFirst = channel_ready_order { if let Some(msg) = &channel_ready { - send_channel_ready!(self, pending_msg_events, channel, msg.clone()); + self.send_channel_ready(pending_msg_events, channel, msg.clone()); } if let Some(msg) = &announcement_sigs { @@ -9669,7 +10334,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }, } - // TODO(dual_funding): For async signing support we need to hold back `tx_signatures` until the `commitment_signed` is ready. if let Some(msg) = tx_signatures { pending_msg_events.push(MessageSendEvent::SendTxSignatures { node_id: counterparty_node_id, @@ -9685,7 +10349,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let ChannelReadyOrder::SignaturesFirst = channel_ready_order { if let Some(msg) = channel_ready { - send_channel_ready!(self, pending_msg_events, channel, msg); + self.send_channel_ready(pending_msg_events, channel, msg); } if let Some(msg) = announcement_sigs { @@ -9696,7 +10360,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } } else if let Some(msg) = channel_ready { - send_channel_ready!(self, pending_msg_events, channel, msg); + self.send_channel_ready(pending_msg_events, channel, msg); } if let Some(tx) = funding_broadcastable { @@ -9714,80 +10378,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }; } else { log_info!(logger, "Broadcasting funding transaction with txid {}", tx.compute_txid()); - self.tx_broadcaster.broadcast_transactions(&[&tx]); - } - } - - if let Some(signing_session) = (!channel.is_awaiting_monitor_update()) - .then(|| ()) - .and_then(|_| channel.context.interactive_tx_signing_session.as_mut()) - .filter(|signing_session| signing_session.has_received_commitment_signed()) - .filter(|signing_session| signing_session.holder_tx_signatures().is_none()) - { - if signing_session.has_local_contribution() { - let mut pending_events = self.pending_events.lock().unwrap(); - let unsigned_transaction = signing_session.unsigned_tx().tx().clone(); - let event_action = ( - Event::FundingTransactionReadyForSigning { - unsigned_transaction, - counterparty_node_id, - channel_id: channel.context.channel_id(), - user_channel_id: channel.context.get_user_id(), - }, - None, - ); - - if !pending_events.contains(&event_action) { - pending_events.push_back(event_action); - } - } else { - let txid = signing_session.unsigned_tx().compute_txid(); - let best_block_height = self.best_block.read().unwrap().height; - match channel.funding_transaction_signed(txid, vec![], best_block_height, &self.logger) { - Ok(FundingTxSigned { - tx_signatures: Some(tx_signatures), - funding_tx, - splice_negotiated, - splice_locked, - }) => { - if let Some(funding_tx) = funding_tx { - self.broadcast_interactive_funding(channel, &funding_tx, &self.logger); - } - - if let Some(splice_negotiated) = splice_negotiated { - self.pending_events.lock().unwrap().push_back(( - events::Event::SplicePending { - channel_id: channel.context.channel_id(), - counterparty_node_id, - user_channel_id: channel.context.get_user_id(), - new_funding_txo: splice_negotiated.funding_txo, - channel_type: splice_negotiated.channel_type, - new_funding_redeem_script: splice_negotiated.funding_redeem_script, - }, - None, - )); - } - - if channel.context.is_connected() { - pending_msg_events.push(MessageSendEvent::SendTxSignatures { - node_id: counterparty_node_id, - msg: tx_signatures, - }); - if let Some(splice_locked) = splice_locked { - pending_msg_events.push(MessageSendEvent::SendSpliceLocked { - node_id: counterparty_node_id, - msg: splice_locked, - }); - } - } - }, - Ok(FundingTxSigned { tx_signatures: None, .. }) => { - debug_assert!(false, "If our tx_signatures is empty, then we should send it first!"); - }, - Err(err) => { - log_warn!(logger, "Failed signing interactive funding transaction: {err:?}"); - }, - } + self.tx_broadcaster.broadcast_transactions(&[( + &tx, + TransactionType::Funding { channels: vec![(counterparty_node_id, channel.context.channel_id())] }, + )]); } } @@ -9847,7 +10441,21 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ .and_then(Channel::as_funded_mut) { if chan.is_awaiting_monitor_update() { - handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan); + let completion_data = self.try_resume_channel_post_monitor_update( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + &mut peer_state.pending_msg_events, + peer_state.is_connected, + chan, + ); + + let holding_cell_res = self.check_free_peer_holding_cells(peer_state); + + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + + self.handle_post_monitor_update_chan_resume(completion_data); + self.handle_holding_cell_free_result(holding_cell_res); } else { log_trace!(logger, "Channel is open but not awaiting update"); } @@ -9894,7 +10502,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ ) } - /// Accepts a request to open a channel after a [`events::Event::OpenChannelRequest`], treating + /// Accepts a request to open a channel after a [`Event::OpenChannelRequest`], treating /// it as confirmed immediately. /// /// The `user_channel_id` parameter will be provided back in @@ -9951,11 +10559,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { log_error!(logger, "Can't find peer matching the passed counterparty node_id"); - - let err_str = format!( - "Can't find a peer matching the passed counterparty node_id {counterparty_node_id}" - ); - APIError::ChannelUnavailable { err: err_str } + APIError::no_such_peer(counterparty_node_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -10044,10 +10648,9 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ mem::drop(peer_state_lock); mem::drop(per_peer_state); // TODO(dunxen): Find/make less icky way to do this. - match handle_error!( - self, + match self.handle_error( Result::<(), MsgHandleErrInternal>::Err(err), - *counterparty_node_id + *counterparty_node_id, ) { Ok(_) => { unreachable!("`handle_error` only returns Err as we've passed in an Err") @@ -10082,7 +10685,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // If this peer already has some channels, a new channel won't increase our number of peers // with unfunded channels, so as long as we aren't over the maximum number of unfunded // channels per-peer we can accept channels from a peer with existing ones. - if is_only_peer_channel && peers_without_funded_channels >= MAX_UNFUNDED_CHANNEL_PEERS { + if is_only_peer_channel && peers_without_funded_channels > MAX_UNFUNDED_CHANNEL_PEERS { let send_msg_err_event = MessageSendEvent::HandleError { node_id: channel.context().get_counterparty_node_id(), action: msgs::ErrorAction::SendErrorMessage { @@ -10185,8 +10788,9 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ num_unfunded_channels + peer.inbound_channel_request_by_id.len() } - #[rustfmt::skip] - fn internal_open_channel(&self, counterparty_node_id: &PublicKey, msg: OpenChannelMessageRef<'_>) -> Result<(), MsgHandleErrInternal> { + fn internal_open_channel( + &self, counterparty_node_id: &PublicKey, msg: OpenChannelMessageRef<'_>, + ) -> Result<(), MsgHandleErrInternal> { let common_fields = match msg { OpenChannelMessageRef::V1(msg) => &msg.common_fields, OpenChannelMessageRef::V2(msg) => &msg.common_fields, @@ -10197,49 +10801,38 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // Note that the ChannelManager is NOT re-persisted on disk after this, so any changes are // likely to be lost on restart! if common_fields.chain_hash != self.chain_hash { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(), - common_fields.temporary_channel_id)); + return Err(MsgHandleErrInternal::send_err_msg_no_close( + "Unknown genesis block hash".to_owned(), + common_fields.temporary_channel_id, + )); } if !self.config.read().unwrap().accept_inbound_channels { - return Err(MsgHandleErrInternal::send_err_msg_no_close("No inbound channels accepted".to_owned(), - common_fields.temporary_channel_id)); + return Err(MsgHandleErrInternal::send_err_msg_no_close( + "No inbound channels accepted".to_owned(), + common_fields.temporary_channel_id, + )); } - // Get the number of peers with channels, but without funded ones. We don't care too much - // about peers that never open a channel, so we filter by peers that have at least one - // channel, and then limit the number of those with unfunded channels. - let channeled_peers_without_funding = - self.peers_without_funded_channels(|node| node.total_channel_count() > 0); - let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| { - debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close( - format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), - common_fields.temporary_channel_id) - })?; + let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { + debug_assert!(false); + MsgHandleErrInternal::no_such_peer( + counterparty_node_id, + common_fields.temporary_channel_id, + ) + })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; - // If this peer already has some channels, a new channel won't increase our number of peers - // with unfunded channels, so as long as we aren't over the maximum number of unfunded - // channels per-peer we can accept channels from a peer with existing ones. - if peer_state.total_channel_count() == 0 && - channeled_peers_without_funding >= MAX_UNFUNDED_CHANNEL_PEERS && - !self.config.read().unwrap().manually_accept_inbound_channels - { - return Err(MsgHandleErrInternal::send_err_msg_no_close( - "Have too many peers with unfunded channels, not accepting new ones".to_owned(), - common_fields.temporary_channel_id)); - } - let best_block_height = self.best_block.read().unwrap().height; - if Self::unfunded_channel_count(peer_state, best_block_height) >= MAX_UNFUNDED_CHANS_PER_PEER { + if Self::unfunded_channel_count(peer_state, best_block_height) + >= MAX_UNFUNDED_CHANS_PER_PEER + { return Err(MsgHandleErrInternal::send_err_msg_no_close( format!("Refusing more than {} unfunded channels.", MAX_UNFUNDED_CHANS_PER_PEER), - common_fields.temporary_channel_id)); + common_fields.temporary_channel_id, + )); } let channel_id = common_fields.temporary_channel_id; @@ -10247,20 +10840,20 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if channel_exists { return Err(MsgHandleErrInternal::send_err_msg_no_close( "temporary_channel_id collision for the same peer!".to_owned(), - common_fields.temporary_channel_id)); + common_fields.temporary_channel_id, + )); } - // We can get the channel type at this point already as we'll need it immediately in both the - // manual and the automatic acceptance cases. - let channel_type = channel::channel_type_from_open_channel( - common_fields, &self.channel_type_features() - ).map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, common_fields.temporary_channel_id))?; + let channel_type = + channel::channel_type_from_open_channel(common_fields, &self.channel_type_features()) + .map_err(|e| { + MsgHandleErrInternal::from_chan_no_close(e, common_fields.temporary_channel_id) + })?; - // If we're doing manual acceptance checks on the channel, then defer creation until we're sure we want to accept. - if self.config.read().unwrap().manually_accept_inbound_channels { - let mut pending_events = self.pending_events.lock().unwrap(); - let is_announced = (common_fields.channel_flags & 1) == 1; - pending_events.push_back((events::Event::OpenChannelRequest { + let mut pending_events = self.pending_events.lock().unwrap(); + let is_announced = (common_fields.channel_flags & 1) == 1; + pending_events.push_back(( + events::Event::OpenChannelRequest { temporary_channel_id: common_fields.temporary_channel_id, counterparty_node_id: *counterparty_node_id, funding_satoshis: common_fields.funding_satoshis, @@ -10271,67 +10864,19 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ channel_type, is_announced, params: common_fields.channel_parameters(), - }, None)); - peer_state.inbound_channel_request_by_id.insert(channel_id, InboundChannelRequest { + }, + None, + )); + peer_state.inbound_channel_request_by_id.insert( + channel_id, + InboundChannelRequest { open_channel_msg: match msg { OpenChannelMessageRef::V1(msg) => OpenChannelMessage::V1(msg.clone()), OpenChannelMessageRef::V2(msg) => OpenChannelMessage::V2(msg.clone()), }, ticks_remaining: UNACCEPTED_INBOUND_CHANNEL_AGE_LIMIT_TICKS, - }); - return Ok(()); - } - - // Otherwise create the channel right now. - let mut random_bytes = [0u8; 16]; - random_bytes.copy_from_slice(&self.entropy_source.get_secure_random_bytes()[..16]); - let user_channel_id = u128::from_be_bytes(random_bytes); - - if channel_type.requires_zero_conf() { - return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), common_fields.temporary_channel_id)); - } - if channel_type.requires_anchors_zero_fee_htlc_tx() || channel_type.requires_anchor_zero_fee_commitments() { - return Err(MsgHandleErrInternal::send_err_msg_no_close("No channels with anchor outputs accepted".to_owned(), common_fields.temporary_channel_id)); - } - - let (mut channel, message_send_event) = match msg { - OpenChannelMessageRef::V1(msg) => { - let mut channel = InboundV1Channel::new( - &self.fee_estimator, &self.entropy_source, &self.signer_provider, *counterparty_node_id, - &self.channel_type_features(), &peer_state.latest_features, msg, user_channel_id, - &self.config.read().unwrap(), best_block_height, &self.logger, /*is_0conf=*/false - ).map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.common_fields.temporary_channel_id))?; - let logger = WithChannelContext::from(&self.logger, &channel.context, None); - let message_send_event = channel.accept_inbound_channel(&&logger).map(|msg| { - MessageSendEvent::SendAcceptChannel { - node_id: *counterparty_node_id, - msg, - } - }); - (Channel::from(channel), message_send_event) - }, - OpenChannelMessageRef::V2(msg) => { - let channel = PendingV2Channel::new_inbound( - &self.fee_estimator, &self.entropy_source, &self.signer_provider, - self.get_our_node_id(), *counterparty_node_id, &self.channel_type_features(), - &peer_state.latest_features, msg, user_channel_id, - &self.config.read().unwrap(), best_block_height, &self.logger, - ).map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.common_fields.temporary_channel_id))?; - let message_send_event = MessageSendEvent::SendAcceptChannelV2 { - node_id: *counterparty_node_id, - msg: channel.accept_inbound_dual_funded_channel(), - }; - (Channel::from(channel), Some(message_send_event)) }, - }; - - let outbound_scid_alias = self.create_and_insert_outbound_scid_alias(); - channel.context_mut().set_outbound_scid_alias(outbound_scid_alias); - - if let Some(message_send_event) = message_send_event { - peer_state.pending_msg_events.push(message_send_event); - } - peer_state.channel_by_id.insert(channel.context().channel_id(), channel); + ); Ok(()) } @@ -10345,7 +10890,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.common_fields.temporary_channel_id) + MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.common_fields.temporary_channel_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -10366,7 +10911,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.common_fields.temporary_channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::no_such_channel_for_peer(counterparty_node_id, msg.common_fields.temporary_channel_id)) } }; let mut pending_events = self.pending_events.lock().unwrap(); @@ -10380,57 +10925,80 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ Ok(()) } - #[rustfmt::skip] - fn internal_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> { + fn internal_funding_created( + &self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated, + ) -> Result<(), MsgHandleErrInternal> { let best_block = *self.best_block.read().unwrap(); let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| { - debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.temporary_channel_id) - })?; + let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { + debug_assert!(false); + MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.temporary_channel_id) + })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; - let (mut chan, funding_msg_opt, monitor) = - match peer_state.channel_by_id.remove(&msg.temporary_channel_id) - .map(Channel::into_unfunded_inbound_v1) - { - Some(Ok(inbound_chan)) => { - let logger = WithChannelContext::from(&self.logger, &inbound_chan.context, None); - match inbound_chan.funding_created(msg, best_block, &self.signer_provider, &&logger) { - Ok(res) => res, - Err((inbound_chan, err)) => { - // We've already removed this inbound channel from the map in `PeerState` - // above so at this point we just need to clean up any lingering entries - // concerning this channel as it is safe to do so. - debug_assert!(matches!(err, ChannelError::Close(_))); - let mut chan = Channel::from(inbound_chan); - return Err(convert_channel_err!(self, peer_state, err, &mut chan).1); - }, - } - }, - Some(Err(mut chan)) => { - let err_msg = format!("Got an unexpected funding_created message from peer with counterparty_node_id {}", counterparty_node_id); - let err = ChannelError::close(err_msg); - return Err(convert_channel_err!(self, peer_state, err, &mut chan).1); - }, - None => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id)) - }; + let (mut chan, funding_msg_opt, monitor) = match peer_state + .channel_by_id + .remove(&msg.temporary_channel_id) + .map(Channel::into_unfunded_inbound_v1) + { + Some(Ok(inbound_chan)) => { + let logger = WithChannelContext::from(&self.logger, &inbound_chan.context, None); + match inbound_chan.funding_created(msg, best_block, &self.signer_provider, &&logger) + { + Ok(res) => res, + Err((inbound_chan, err)) => { + // We've already removed this inbound channel from the map in `PeerState` + // above so at this point we just need to clean up any lingering entries + // concerning this channel as it is safe to do so. + debug_assert!(matches!(err, ChannelError::Close(_))); + let mut chan = Channel::from(inbound_chan); + return Err(self + .locked_handle_force_close( + &mut peer_state.closed_channel_monitor_update_ids, + &mut peer_state.in_flight_monitor_updates, + err, + &mut chan, + ) + .1); + }, + } + }, + Some(Err(mut chan)) => { + let err_msg = format!("Got an unexpected funding_created message from peer with counterparty_node_id {}", counterparty_node_id); + let err = ChannelError::close(err_msg); + return Err(self + .locked_handle_force_close( + &mut peer_state.closed_channel_monitor_update_ids, + &mut peer_state.in_flight_monitor_updates, + err, + &mut chan, + ) + .1); + }, + None => { + return Err(MsgHandleErrInternal::no_such_channel_for_peer( + counterparty_node_id, + msg.temporary_channel_id, + )) + }, + }; let funded_channel_id = chan.context.channel_id(); - macro_rules! fail_chan { ($err: expr) => { { - // Note that at this point we've filled in the funding outpoint on our channel, but its - // actually in conflict with another channel. Thus, if we call `convert_channel_err` - // immediately, we'll remove the existing channel from `outpoint_to_peer`. - // Thus, we must first unset the funding outpoint on the channel. - let err = ChannelError::close($err.to_owned()); - chan.unset_funding_info(); - let mut chan = Channel::from(chan); - return Err(convert_channel_err!(self, peer_state, err, &mut chan, UNFUNDED_CHANNEL).1); - } } } + macro_rules! fail_chan { + ($err: expr) => {{ + // Note that at this point we've filled in the funding outpoint on our channel, but its + // actually in conflict with another channel. Thus, if we call `convert_channel_err` + // immediately, we'll remove the existing channel from `outpoint_to_peer`. + // Thus, we must first unset the funding outpoint on the channel. + let err = ChannelError::close($err.to_owned()); + chan.unset_funding_info(); + let mut chan = Channel::from(chan); + return Err(self.locked_handle_unfunded_close(err, &mut chan).1); + }}; + } match peer_state.channel_by_id.entry(funded_channel_id) { hash_map::Entry::Occupied(_) => { @@ -10451,8 +11019,18 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } if let Some(funded_chan) = e.insert(Channel::from(chan)).as_funded_mut() { - handle_initial_monitor!(self, persist_state, peer_state_lock, peer_state, - per_peer_state, funded_chan); + if let Some(data) = self.handle_initial_monitor( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + &mut peer_state.pending_msg_events, + peer_state.is_connected, + funded_chan, + persist_state, + ) { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_post_monitor_update_chan_resume(data); + } } else { unreachable!("This must be a funded channel as we just inserted it."); } @@ -10462,7 +11040,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ log_error!(logger, "Persisting initial ChannelMonitor failed, implying the channel ID was duplicated"); fail_chan!("Duplicate channel ID"); } - } + }, } } @@ -10552,7 +11130,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let peer_state_mutex = per_peer_state.get(&counterparty_node_id) .ok_or_else(|| { debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), ChannelId([0; 32])) + MsgHandleErrInternal::no_such_peer(&counterparty_node_id, ChannelId([0; 32])) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); @@ -10590,7 +11168,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.channel_id) + MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); @@ -10615,7 +11193,18 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }) { Ok((funded_chan, persist_status)) => { - handle_initial_monitor!(self, persist_status, peer_state_lock, peer_state, per_peer_state, funded_chan); + if let Some(data) = self.handle_initial_monitor( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + &mut peer_state.pending_msg_events, + peer_state.is_connected, + funded_chan, + persist_status, + ) { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_post_monitor_update_chan_resume(data); + } Ok(()) }, Err(e) => try_channel_entry!(self, peer_state, Err(e), chan_entry), @@ -10636,10 +11225,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close( - format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), - channel_id, - ) + MsgHandleErrInternal::no_such_peer(counterparty_node_id, channel_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -10655,26 +11241,27 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ Err((error, splice_funding_failed)) => { if let Some(splice_funding_failed) = splice_funding_failed { let pending_events = &mut self.pending_events.lock().unwrap(); - pending_events.push_back((events::Event::SpliceFailed { - channel_id, - counterparty_node_id: *counterparty_node_id, - user_channel_id: channel.context().get_user_id(), - abandoned_funding_txo: splice_funding_failed.funding_txo, - channel_type: splice_funding_failed.channel_type.clone(), - contributed_inputs: splice_funding_failed.contributed_inputs, - contributed_outputs: splice_funding_failed.contributed_outputs, - }, None)); + pending_events.push_back(( + events::Event::SpliceFailed { + channel_id, + counterparty_node_id: *counterparty_node_id, + user_channel_id: channel.context().get_user_id(), + abandoned_funding_txo: splice_funding_failed.funding_txo, + channel_type: splice_funding_failed.channel_type.clone(), + contributed_inputs: splice_funding_failed.contributed_inputs, + contributed_outputs: splice_funding_failed.contributed_outputs, + }, + None, + )); } Err(MsgHandleErrInternal::from_chan_no_close(error, channel_id)) }, } }, - hash_map::Entry::Vacant(_) => { - Err(MsgHandleErrInternal::send_err_msg_no_close(format!( - "Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", - counterparty_node_id), channel_id) - ) - } + hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::no_such_channel_for_peer( + counterparty_node_id, + channel_id, + )), } } @@ -10710,82 +11297,128 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }) } - #[rustfmt::skip] - fn internal_tx_complete(&self, counterparty_node_id: PublicKey, msg: &msgs::TxComplete) -> Result { + fn internal_tx_complete( + &self, counterparty_node_id: PublicKey, msg: &msgs::TxComplete, + ) -> Result { let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = per_peer_state.get(&counterparty_node_id) - .ok_or_else(|| { - debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close( - format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), - msg.channel_id) - })?; + let peer_state_mutex = per_peer_state.get(&counterparty_node_id).ok_or_else(|| { + debug_assert!(false); + MsgHandleErrInternal::no_such_peer(&counterparty_node_id, msg.channel_id) + })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan_entry) => { let chan = chan_entry.get_mut(); - match chan.tx_complete(msg, &self.logger) { - Ok((interactive_tx_msg_send, commitment_signed)) => { - let persist = if interactive_tx_msg_send.is_some() || commitment_signed.is_some() { - NotifyOption::SkipPersistHandleEvents - } else { - NotifyOption::SkipPersistNoEvents - }; - if let Some(interactive_tx_msg_send) = interactive_tx_msg_send { - let msg_send_event = interactive_tx_msg_send.into_msg_send_event(counterparty_node_id); + match chan.tx_complete(msg, &self.fee_estimator, &self.logger) { + Ok(tx_complete_result) => { + let mut persist = NotifyOption::SkipPersistNoEvents; + + if let Some(interactive_tx_msg_send) = + tx_complete_result.interactive_tx_msg_send + { + let msg_send_event = + interactive_tx_msg_send.into_msg_send_event(counterparty_node_id); peer_state.pending_msg_events.push(msg_send_event); + persist = NotifyOption::SkipPersistHandleEvents; }; - if let Some(commitment_signed) = commitment_signed { - peer_state.pending_msg_events.push(MessageSendEvent::UpdateHTLCs { - node_id: counterparty_node_id, - channel_id: msg.channel_id, - updates: CommitmentUpdate { - commitment_signed: vec![commitment_signed], - update_add_htlcs: vec![], - update_fulfill_htlcs: vec![], - update_fail_htlcs: vec![], - update_fail_malformed_htlcs: vec![], - update_fee: None, + + if let Some(unsigned_transaction) = tx_complete_result.event_unsigned_tx { + self.pending_events.lock().unwrap().push_back(( + events::Event::FundingTransactionReadyForSigning { + unsigned_transaction, + counterparty_node_id, + channel_id: msg.channel_id, + user_channel_id: chan.context().get_user_id(), }, - }); + None, + )); + // We have a successful signing session that we need to persist. + persist = NotifyOption::DoPersist; + } + + if let Some(FundingTxSigned { + commitment_signed, + counterparty_initial_commitment_signed_result, + tx_signatures, + funding_tx, + splice_negotiated, + splice_locked, + }) = tx_complete_result.funding_tx_signed + { + // We shouldn't expect to see the splice negotiated or locked yet as we + // haven't exchanged `tx_signatures` at this point. Similarly, we + // shouldn't have a result for the counterparty's initial commitment + // signed as they haven't sent it yet. + debug_assert!(funding_tx.is_none()); + debug_assert!(splice_negotiated.is_none()); + debug_assert!(splice_locked.is_none()); + debug_assert!(counterparty_initial_commitment_signed_result.is_none()); + + if let Some(commitment_signed) = commitment_signed { + peer_state.pending_msg_events.push(MessageSendEvent::UpdateHTLCs { + node_id: counterparty_node_id, + channel_id: msg.channel_id, + updates: CommitmentUpdate { + commitment_signed: vec![commitment_signed], + update_add_htlcs: vec![], + update_fulfill_htlcs: vec![], + update_fail_htlcs: vec![], + update_fail_malformed_htlcs: vec![], + update_fee: None, + }, + }); + } + if let Some(tx_signatures) = tx_signatures { + peer_state.pending_msg_events.push( + MessageSendEvent::SendTxSignatures { + node_id: counterparty_node_id, + msg: tx_signatures, + }, + ); + } + + // We have a successful signing session that we need to persist. + persist = NotifyOption::DoPersist; } + Ok(persist) }, Err((error, splice_funding_failed)) => { if let Some(splice_funding_failed) = splice_funding_failed { let pending_events = &mut self.pending_events.lock().unwrap(); - pending_events.push_back((events::Event::SpliceFailed { - channel_id: msg.channel_id, - counterparty_node_id, - user_channel_id: chan.context().get_user_id(), - abandoned_funding_txo: splice_funding_failed.funding_txo, - channel_type: splice_funding_failed.channel_type.clone(), - contributed_inputs: splice_funding_failed.contributed_inputs, - contributed_outputs: splice_funding_failed.contributed_outputs, - }, None)); + pending_events.push_back(( + events::Event::SpliceFailed { + channel_id: msg.channel_id, + counterparty_node_id, + user_channel_id: chan.context().get_user_id(), + abandoned_funding_txo: splice_funding_failed.funding_txo, + channel_type: splice_funding_failed.channel_type.clone(), + contributed_inputs: splice_funding_failed.contributed_inputs, + contributed_outputs: splice_funding_failed.contributed_outputs, + }, + None, + )); } Err(MsgHandleErrInternal::from_chan_no_close(error, msg.channel_id)) }, } }, - hash_map::Entry::Vacant(_) => { - Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) - } + hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::no_such_channel_for_peer( + &counterparty_node_id, + msg.channel_id, + )), } } - #[rustfmt::skip] - fn internal_tx_signatures(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxSignatures) - -> Result<(), MsgHandleErrInternal> { + fn internal_tx_signatures( + &self, counterparty_node_id: &PublicKey, msg: &msgs::TxSignatures, + ) -> Result<(), MsgHandleErrInternal> { let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| { - debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close( - format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), - msg.channel_id) - })?; + let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { + debug_assert!(false); + MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) + })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { @@ -10794,6 +11427,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ Some(chan) => { let best_block_height = self.best_block.read().unwrap().height; let FundingTxSigned { + commitment_signed, + counterparty_initial_commitment_signed_result, tx_signatures, funding_tx, splice_negotiated, @@ -10804,20 +11439,35 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ chan.tx_signatures(msg, best_block_height, &self.logger), chan_entry ); + + // We should never be sending a `commitment_signed` in response to their + // `tx_signatures`. + debug_assert!(commitment_signed.is_none()); + debug_assert!(counterparty_initial_commitment_signed_result.is_none()); + if let Some(tx_signatures) = tx_signatures { - peer_state.pending_msg_events.push(MessageSendEvent::SendTxSignatures { - node_id: *counterparty_node_id, - msg: tx_signatures, - }); + peer_state.pending_msg_events.push( + MessageSendEvent::SendTxSignatures { + node_id: *counterparty_node_id, + msg: tx_signatures, + }, + ); } if let Some(splice_locked) = splice_locked { - peer_state.pending_msg_events.push(MessageSendEvent::SendSpliceLocked { - node_id: *counterparty_node_id, - msg: splice_locked, - }); + peer_state.pending_msg_events.push( + MessageSendEvent::SendSpliceLocked { + node_id: *counterparty_node_id, + msg: splice_locked, + }, + ); } - if let Some(ref funding_tx) = funding_tx { - self.broadcast_interactive_funding(chan, funding_tx, &self.logger); + if let Some((ref funding_tx, ref tx_type)) = funding_tx { + self.broadcast_interactive_funding( + chan, + funding_tx, + Some(tx_type.clone()), + &self.logger, + ); } if let Some(splice_negotiated) = splice_negotiated { self.pending_events.lock().unwrap().push_back(( @@ -10827,7 +11477,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ user_channel_id: chan.context.get_user_id(), new_funding_txo: splice_negotiated.funding_txo, channel_type: splice_negotiated.channel_type, - new_funding_redeem_script: splice_negotiated.funding_redeem_script, + new_funding_redeem_script: splice_negotiated + .funding_redeem_script, }, None, )); @@ -10842,29 +11493,28 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } Ok(()) }, - hash_map::Entry::Vacant(_) => { - Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) - } + hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::no_such_channel_for_peer( + counterparty_node_id, + msg.channel_id, + )), } } - #[rustfmt::skip] - fn internal_tx_abort(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxAbort) - -> Result { + fn internal_tx_abort( + &self, counterparty_node_id: &PublicKey, msg: &msgs::TxAbort, + ) -> Result { let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| { - debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close( - format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), - msg.channel_id) - })?; + let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { + debug_assert!(false); + MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) + })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan_entry) => { let res = chan_entry.get_mut().tx_abort(msg, &self.logger); - let (tx_abort, splice_failed) = try_channel_entry!(self, peer_state, res, chan_entry); + let (tx_abort, splice_failed) = + try_channel_entry!(self, peer_state, res, chan_entry); let persist = if tx_abort.is_some() || splice_failed.is_some() { NotifyOption::DoPersist @@ -10881,22 +11531,26 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some(splice_funding_failed) = splice_failed { let pending_events = &mut self.pending_events.lock().unwrap(); - pending_events.push_back((events::Event::SpliceFailed { - channel_id: msg.channel_id, - counterparty_node_id: *counterparty_node_id, - user_channel_id: chan_entry.get().context().get_user_id(), - abandoned_funding_txo: splice_funding_failed.funding_txo, - channel_type: splice_funding_failed.channel_type, - contributed_inputs: splice_funding_failed.contributed_inputs, - contributed_outputs: splice_funding_failed.contributed_outputs, - }, None)); + pending_events.push_back(( + events::Event::SpliceFailed { + channel_id: msg.channel_id, + counterparty_node_id: *counterparty_node_id, + user_channel_id: chan_entry.get().context().get_user_id(), + abandoned_funding_txo: splice_funding_failed.funding_txo, + channel_type: splice_funding_failed.channel_type, + contributed_inputs: splice_funding_failed.contributed_inputs, + contributed_outputs: splice_funding_failed.contributed_outputs, + }, + None, + )); } Ok(persist) }, - hash_map::Entry::Vacant(_) => { - Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) - } + hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::no_such_channel_for_peer( + counterparty_node_id, + msg.channel_id, + )), } } @@ -10908,7 +11562,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.channel_id) + MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -10959,7 +11613,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } }, hash_map::Entry::Vacant(_) => { - Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) + Err(MsgHandleErrInternal::no_such_channel_for_peer(counterparty_node_id, msg.channel_id)) } } } @@ -10972,13 +11626,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close( - format!( - "Can't find a peer matching the passed counterparty node_id {}", - counterparty_node_id - ), - msg.channel_id, - ) + MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -11005,7 +11653,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let (shutdown, monitor_update_opt, htlcs) = try_channel_entry!( self, peer_state, - chan.shutdown(&self.signer_provider, &peer_state.latest_features, &msg), + chan.shutdown( + &self.logger, + &self.signer_provider, + &peer_state.latest_features, + &msg + ), chan_entry ); dropped_htlcs = htlcs; @@ -11021,15 +11674,19 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } // Update the monitor with the shutdown script if necessary. if let Some(monitor_update) = monitor_update_opt { - handle_new_monitor_update!( - self, + if let Some(data) = self.handle_new_monitor_update( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + &mut peer_state.pending_msg_events, + peer_state.is_connected, + chan, funding_txo_opt.unwrap(), monitor_update, - peer_state_lock, - peer_state, - per_peer_state, - chan - ); + ) { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_post_monitor_update_chan_resume(data); + } } }, None => { @@ -11042,13 +11699,16 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let reason = ClosureReason::CounterpartyCoopClosedUnfundedChannel; let err = ChannelError::Close((reason.to_string(), reason)); let mut chan = chan_entry.remove(); - let (_, mut e) = convert_channel_err!(self, peer_state, err, &mut chan); + let (_, mut e) = self.locked_handle_unfunded_close(err, &mut chan); e.dont_send_error_message(); return Err(e); }, } } else { - return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)); + return Err(MsgHandleErrInternal::no_such_channel_for_peer( + counterparty_node_id, + msg.channel_id, + )); } } for htlc_source in dropped_htlcs.drain(..) { @@ -11070,13 +11730,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close( - format!( - "Can't find a peer matching the passed counterparty node_id {}", - counterparty_node_id - ), - msg.channel_id, - ) + MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) })?; let logger; let tx_err: Option<(_, Result)> = { @@ -11091,10 +11745,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ try_channel_entry!(self, peer_state, res, chan_entry); debug_assert_eq!(tx_shutdown_result.is_some(), chan.is_shutdown()); if let Some(msg) = closing_signed { - peer_state.pending_msg_events.push(MessageSendEvent::SendClosingSigned { - node_id: counterparty_node_id.clone(), - msg, - }); + peer_state.pending_msg_events.push( + MessageSendEvent::SendClosingSigned { + node_id: counterparty_node_id.clone(), + msg, + }, + ); } if let Some((tx, close_res)) = tx_shutdown_result { // We're done with this channel, we've got a signed closing transaction and @@ -11102,25 +11758,47 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // also implies there are no pending HTLCs left on the channel, so we can // fully delete it from tracking (the channel monitor is still around to // watch for old state broadcasts)! - let err = convert_channel_err!(self, peer_state, close_res, chan, COOP_CLOSED); + let err = self.locked_handle_funded_coop_close( + &mut peer_state.closed_channel_monitor_update_ids, + &mut peer_state.in_flight_monitor_updates, + close_res, + chan, + ); chan_entry.remove(); Some((tx, Err(err))) } else { None } } else { - return try_channel_entry!(self, peer_state, Err(ChannelError::close( - "Got a closing_signed message for an unfunded channel!".into())), chan_entry); + return try_channel_entry!( + self, + peer_state, + Err(ChannelError::close( + "Got a closing_signed message for an unfunded channel!".into() + )), + chan_entry + ); } }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) + hash_map::Entry::Vacant(_) => { + return Err(MsgHandleErrInternal::no_such_channel_for_peer( + counterparty_node_id, + msg.channel_id, + )) + }, } }; mem::drop(per_peer_state); if let Some((broadcast_tx, err)) = tx_err { log_info!(logger, "Broadcasting {}", log_tx!(broadcast_tx)); - self.tx_broadcaster.broadcast_transactions(&[&broadcast_tx]); - let _ = handle_error!(self, err, *counterparty_node_id); + self.tx_broadcaster.broadcast_transactions(&[( + &broadcast_tx, + TransactionType::CooperativeClose { + counterparty_node_id: *counterparty_node_id, + channel_id: msg.channel_id, + }, + )]); + let _ = self.handle_error(err, *counterparty_node_id); } Ok(()) } @@ -11157,7 +11835,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.channel_id) + MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -11170,7 +11848,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ "Got an update_add_htlc message for an unfunded channel!".into())), chan_entry); } }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::no_such_channel_for_peer(counterparty_node_id, msg.channel_id)) } Ok(()) } @@ -11184,28 +11862,32 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close( - format!( - "Can't find a peer matching the passed counterparty node_id {}", - counterparty_node_id - ), - msg.channel_id, - ) + MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan_entry) => { if let Some(chan) = chan_entry.get_mut().as_funded_mut() { - let res = try_channel_entry!(self, peer_state, chan.update_fulfill_htlc(&msg), chan_entry); + let res = try_channel_entry!( + self, + peer_state, + chan.update_fulfill_htlc(&msg), + chan_entry + ); if let HTLCSource::PreviousHopData(prev_hop) = &res.0 { - let logger = WithChannelContext::from(&self.logger, &chan.context, None); + let logger = + WithChannelContext::from(&self.logger, &chan.context, None); log_trace!(logger, "Holding the next revoke_and_ack until the preimage is durably persisted in the inbound edge's ChannelMonitor", ); - peer_state.actions_blocking_raa_monitor_updates.entry(msg.channel_id) + peer_state + .actions_blocking_raa_monitor_updates + .entry(msg.channel_id) .or_insert_with(Vec::new) - .push(RAAMonitorUpdateBlockingAction::from_prev_hop_data(&prev_hop)); + .push(RAAMonitorUpdateBlockingAction::from_prev_hop_data( + &prev_hop, + )); } // Note that we do not need to push an `actions_blocking_raa_monitor_updates` // entry here, even though we *do* need to block the next RAA monitor update. @@ -11213,15 +11895,30 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // `ReleaseRAAChannelMonitorUpdate` action to the event generated when the // outbound HTLC is claimed. This is guaranteed to all complete before we // process the RAA as messages are processed from single peers serially. - funding_txo = chan.funding.get_funding_txo().expect("We won't accept a fulfill until funded"); + funding_txo = chan + .funding + .get_funding_txo() + .expect("We won't accept a fulfill until funded"); next_user_channel_id = chan.context.get_user_id(); res } else { - return try_channel_entry!(self, peer_state, Err(ChannelError::close( - "Got an update_fulfill_htlc message for an unfunded channel!".into())), chan_entry); + return try_channel_entry!( + self, + peer_state, + Err(ChannelError::close( + "Got an update_fulfill_htlc message for an unfunded channel!" + .into() + )), + chan_entry + ); } }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) + hash_map::Entry::Vacant(_) => { + return Err(MsgHandleErrInternal::no_such_channel_for_peer( + counterparty_node_id, + msg.channel_id, + )) + }, } }; self.claim_funds_internal( @@ -11249,7 +11946,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.channel_id) + MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -11262,7 +11959,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ "Got an update_fail_htlc message for an unfunded channel!".into())), chan_entry); } }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::no_such_channel_for_peer(counterparty_node_id, msg.channel_id)) } Ok(()) } @@ -11275,7 +11972,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.channel_id) + MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -11293,19 +11990,19 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } Ok(()) }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::no_such_channel_for_peer(counterparty_node_id, msg.channel_id)) } } - #[rustfmt::skip] - fn internal_commitment_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(), MsgHandleErrInternal> { + fn internal_commitment_signed( + &self, counterparty_node_id: &PublicKey, msg: &msgs::CommitmentSigned, + ) -> Result<(), MsgHandleErrInternal> { let best_block = *self.best_block.read().unwrap(); let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| { - debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.channel_id) - })?; + let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { + debug_assert!(false); + MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) + })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { @@ -11314,17 +12011,38 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let logger = WithChannelContext::from(&self.logger, &chan.context(), None); let funding_txo = chan.funding().get_funding_txo(); let (monitor_opt, monitor_update_opt) = try_channel_entry!( - self, peer_state, chan.commitment_signed(msg, best_block, &self.signer_provider, &self.fee_estimator, &&logger), - chan_entry); + self, + peer_state, + chan.commitment_signed( + msg, + best_block, + &self.signer_provider, + &self.fee_estimator, + &&logger + ), + chan_entry + ); if let Some(chan) = chan.as_funded_mut() { if let Some(monitor) = monitor_opt { - let monitor_res = self.chain_monitor.watch_channel(monitor.channel_id(), monitor); + let monitor_res = + self.chain_monitor.watch_channel(monitor.channel_id(), monitor); if let Ok(persist_state) = monitor_res { - handle_initial_monitor!(self, persist_state, peer_state_lock, peer_state, - per_peer_state, chan); + if let Some(data) = self.handle_initial_monitor( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + &mut peer_state.pending_msg_events, + peer_state.is_connected, + chan, + persist_state, + ) { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_post_monitor_update_chan_resume(data); + } } else { - let logger = WithChannelContext::from(&self.logger, &chan.context, None); + let logger = + WithChannelContext::from(&self.logger, &chan.context, None); log_error!(logger, "Persisting initial ChannelMonitor failed, implying the channel ID was duplicated"); let msg = "Channel ID was a duplicate"; let reason = ClosureReason::ProcessingError { err: msg.to_owned() }; @@ -11332,13 +12050,27 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ try_channel_entry!(self, peer_state, Err(err), chan_entry) } } else if let Some(monitor_update) = monitor_update_opt { - handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock, - peer_state, per_peer_state, chan); + if let Some(data) = self.handle_new_monitor_update( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + &mut peer_state.pending_msg_events, + peer_state.is_connected, + chan, + funding_txo.unwrap(), + monitor_update, + ) { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_post_monitor_update_chan_resume(data); + } } } Ok(()) }, - hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) + hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::no_such_channel_for_peer( + counterparty_node_id, + msg.channel_id, + )), } } @@ -11348,7 +12080,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), channel_id) + MsgHandleErrInternal::no_such_peer(counterparty_node_id, channel_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -11363,15 +12095,24 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ ); if let Some(monitor_update) = monitor_update_opt { - handle_new_monitor_update!( - self, funding_txo.unwrap(), monitor_update, peer_state_lock, peer_state, - per_peer_state, chan - ); + if let Some(data) = self.handle_new_monitor_update( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + &mut peer_state.pending_msg_events, + peer_state.is_connected, + chan, + funding_txo.unwrap(), + monitor_update, + ) { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_post_monitor_update_chan_resume(data); + } } } Ok(()) }, - hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), channel_id)) + hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::no_such_channel_for_peer(counterparty_node_id, channel_id)) } } @@ -11399,26 +12140,15 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ ref mut pending_forwards, ) in per_source_pending_forwards { - let mut new_intercept_events = VecDeque::new(); - let mut failed_intercept_forwards = Vec::new(); if !pending_forwards.is_empty() { for (forward_info, prev_htlc_id) in pending_forwards.drain(..) { let scid = match forward_info.routing { PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id, - PendingHTLCRouting::TrampolineForward { .. } => 0, - PendingHTLCRouting::Receive { .. } => 0, - PendingHTLCRouting::ReceiveKeysend { .. } => 0, + PendingHTLCRouting::TrampolineForward { .. } + | PendingHTLCRouting::Receive { .. } + | PendingHTLCRouting::ReceiveKeysend { .. } => 0, }; - // Pull this now to avoid introducing a lock order with `forward_htlcs`. - let is_our_scid = self.short_to_chan_info.read().unwrap().contains_key(&scid); - let payment_hash = forward_info.payment_hash; - let logger = WithContext::from( - &self.logger, - None, - Some(prev_channel_id), - Some(payment_hash), - ); let pending_add = PendingAddHTLCInfo { prev_outbound_scid_alias, prev_counterparty_node_id, @@ -11428,119 +12158,17 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ prev_user_channel_id, forward_info, }; - let mut fail_intercepted_htlc = |pending_add: PendingAddHTLCInfo| { - let htlc_source = - HTLCSource::PreviousHopData(pending_add.htlc_previous_hop_data()); - let reason = HTLCFailReason::from_failure_code( - LocalHTLCFailureReason::UnknownNextPeer, - ); - let failure_type = HTLCHandlingFailureType::InvalidForward { - requested_forward_scid: scid, - }; - failed_intercept_forwards.push(( - htlc_source, - payment_hash, - reason, - failure_type, - )); - }; - // In the case that we have an HTLC that we're supposed to hold onto until the - // recipient comes online *and* the outbound scid is encoded as - // `fake_scid::is_valid_intercept`, we should first wait for the recipient to come - // online before generating an `HTLCIntercepted` event, since the event cannot be - // acted on until the recipient is online to cooperatively open the JIT channel. Once - // we receive the `ReleaseHeldHtlc` message from the recipient, we will circle back - // here and resume generating the event below. - if pending_add.forward_info.routing.should_hold_htlc() { - let intercept_id = InterceptId::from_htlc_id_and_chan_id( - prev_htlc_id, - &prev_channel_id, - &prev_counterparty_node_id, - ); - let mut held_htlcs = self.pending_intercepted_htlcs.lock().unwrap(); - match held_htlcs.entry(intercept_id) { - hash_map::Entry::Vacant(entry) => { - log_trace!( - logger, - "Intercepted held HTLC with id {}, holding until the recipient is online", - intercept_id - ); - entry.insert(pending_add); - }, - hash_map::Entry::Occupied(_) => { - debug_assert!(false, "Should never have two HTLCs with the same channel id and htlc id"); - fail_intercepted_htlc(pending_add); - }, - } - } else if !is_our_scid - && pending_add.forward_info.incoming_amt_msat.is_some() - && fake_scid::is_valid_intercept( - &self.fake_scid_rand_bytes, - scid, - &self.chain_hash, - ) { - let intercept_id = InterceptId::from_incoming_shared_secret( - &pending_add.forward_info.incoming_shared_secret, - ); - let mut pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap(); - match pending_intercepts.entry(intercept_id) { - hash_map::Entry::Vacant(entry) => { - new_intercept_events.push_back(( - events::Event::HTLCIntercepted { - requested_next_hop_scid: scid, - payment_hash, - inbound_amount_msat: pending_add - .forward_info - .incoming_amt_msat - .unwrap(), - expected_outbound_amount_msat: pending_add - .forward_info - .outgoing_amt_msat, - intercept_id, - }, - None, - )); - entry.insert(pending_add); - }, - hash_map::Entry::Occupied(_) => { - log_info!( - logger, - "Failed to forward incoming HTLC: detected duplicate intercepted payment over short channel id {}", - scid - ); - fail_intercepted_htlc(pending_add); - }, - } - } else { - match self.forward_htlcs.lock().unwrap().entry(scid) { - hash_map::Entry::Occupied(mut entry) => { - entry.get_mut().push(HTLCForwardInfo::AddHTLC(pending_add)); - }, - hash_map::Entry::Vacant(entry) => { - entry.insert(vec![HTLCForwardInfo::AddHTLC(pending_add)]); - }, - } + match self.forward_htlcs.lock().unwrap().entry(scid) { + hash_map::Entry::Occupied(mut entry) => { + entry.get_mut().push(HTLCForwardInfo::AddHTLC(pending_add)); + }, + hash_map::Entry::Vacant(entry) => { + entry.insert(vec![HTLCForwardInfo::AddHTLC(pending_add)]); + }, } } } - - for (htlc_source, payment_hash, failure_reason, destination) in - failed_intercept_forwards.drain(..) - { - self.fail_htlc_backwards_internal( - &htlc_source, - &payment_hash, - &failure_reason, - destination, - None, - ); - } - - if !new_intercept_events.is_empty() { - let mut events = self.pending_events.lock().unwrap(); - events.append(&mut new_intercept_events); - } } } @@ -11594,7 +12222,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let mut peer_state_lock = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.channel_id) + MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) }).map(|mtx| mtx.lock().unwrap())?; let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { @@ -11610,8 +12238,19 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some(monitor_update) = monitor_update_opt { let funding_txo = funding_txo_opt .expect("Funding outpoint must have been set for RAA handling to succeed"); - handle_new_monitor_update!(self, funding_txo, monitor_update, - peer_state_lock, peer_state, per_peer_state, chan); + if let Some(data) = self.handle_new_monitor_update( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + &mut peer_state.pending_msg_events, + peer_state.is_connected, + chan, + funding_txo, + monitor_update, + ) { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_post_monitor_update_chan_resume(data); + } } (htlcs_to_fail, static_invoices) } else { @@ -11619,7 +12258,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ "Got a revoke_and_ack message for an unfunded channel!".into())), chan_entry); } }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::no_such_channel_for_peer(counterparty_node_id, msg.channel_id)) } }; self.fail_holding_cell_htlcs(htlcs_to_fail, msg.channel_id, counterparty_node_id); @@ -11636,7 +12275,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.channel_id) + MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -11650,7 +12289,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ "Got an update_fee message for an unfunded channel!".into())), chan_entry); } }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::no_such_channel_for_peer(counterparty_node_id, msg.channel_id)) } Ok(()) } @@ -11660,9 +12299,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close( - format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), - msg.channel_id + MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id ) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); @@ -11708,9 +12345,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ return try_channel_entry!(self, peer_state, err, chan_entry); } }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close( - format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), - msg.channel_id + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::no_such_channel_for_peer(counterparty_node_id, msg.channel_id )) } } @@ -11721,7 +12356,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.channel_id) + MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -11751,7 +12386,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ "Got an announcement_signatures message for an unfunded channel!".into())), chan_entry); } }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::no_such_channel_for_peer(counterparty_node_id, msg.channel_id)) } Ok(()) } @@ -11763,6 +12398,9 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()), None => { // It's not a local channel + if msg.contents.message_flags & (1 << 1) != 0 { + log_debug!(self.logger, "Received channel_update for unknown channel {} with dont_forward set. You may wish to check if an incorrect tx_index was passed to chain::Confirm::transactions_confirmed.", msg.contents.short_channel_id); + } return Ok(NotifyOption::SkipPersistNoEvents) } }; @@ -11811,15 +12449,13 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ #[rustfmt::skip] fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> { - let (inferred_splice_locked, need_lnd_workaround) = { + let (inferred_splice_locked, need_lnd_workaround, holding_cell_res) = { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close( - format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), - msg.channel_id + MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id ) })?; let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id), None); @@ -11872,7 +12508,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ peer_state.pending_msg_events.push(upd); } - (responses.inferred_splice_locked, need_lnd_workaround) + let holding_cell_res = self.check_free_peer_holding_cells(peer_state); + (responses.inferred_splice_locked, need_lnd_workaround, holding_cell_res) } else { return try_channel_entry!(self, peer_state, Err(ChannelError::close( "Got a channel_reestablish message for an unfunded channel!".into())), chan_entry); @@ -11907,14 +12544,14 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ my_current_funding_locked: None, }, }); - return Err(MsgHandleErrInternal::send_err_msg_no_close( - format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", - counterparty_node_id), msg.channel_id) + return Err(MsgHandleErrInternal::no_such_channel_for_peer(counterparty_node_id, msg.channel_id) ) } } }; + self.handle_holding_cell_free_result(holding_cell_res); + if let Some(channel_ready_msg) = need_lnd_workaround { self.internal_channel_ready(counterparty_node_id, &channel_ready_msg)?; } @@ -11927,14 +12564,14 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } /// Handle incoming splice request, transition channel to splice-pending (unless some check fails). - #[rustfmt::skip] - fn internal_splice_init(&self, counterparty_node_id: &PublicKey, msg: &msgs::SpliceInit) -> Result<(), MsgHandleErrInternal> { + fn internal_splice_init( + &self, counterparty_node_id: &PublicKey, msg: &msgs::SpliceInit, + ) -> Result<(), MsgHandleErrInternal> { let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| { - debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.channel_id) - })?; + let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { + debug_assert!(false); + MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) + })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -11943,22 +12580,28 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // Look for the channel match peer_state.channel_by_id.entry(msg.channel_id) { - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!( - "Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}, channel_id {}", - counterparty_node_id, msg.channel_id, - ), msg.channel_id)), + hash_map::Entry::Vacant(_) => { + return Err(MsgHandleErrInternal::no_such_channel_for_peer( + counterparty_node_id, + msg.channel_id, + )) + }, hash_map::Entry::Occupied(mut chan_entry) => { if self.config.read().unwrap().reject_inbound_splices { let err = ChannelError::WarnAndDisconnect( - "Inbound channel splices are currently not allowed".to_owned() + "Inbound channel splices are currently not allowed".to_owned(), ); return Err(MsgHandleErrInternal::from_chan_no_close(err, msg.channel_id)); } if let Some(ref mut funded_channel) = chan_entry.get_mut().as_funded_mut() { let init_res = funded_channel.splice_init( - msg, our_funding_contribution, &self.signer_provider, &self.entropy_source, - &self.get_our_node_id(), &self.logger + msg, + our_funding_contribution, + &self.signer_provider, + &self.entropy_source, + &self.get_our_node_id(), + &self.logger, ); let splice_ack_msg = try_channel_entry!(self, peer_state, init_res, chan_entry); peer_state.pending_msg_events.push(MessageSendEvent::SendSpliceAck { @@ -11967,43 +12610,59 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }); Ok(()) } else { - try_channel_entry!(self, peer_state, Err(ChannelError::close("Channel is not funded, cannot be spliced".into())), chan_entry) + try_channel_entry!( + self, + peer_state, + Err(ChannelError::close("Channel is not funded, cannot be spliced".into())), + chan_entry + ) } }, } } /// Handle incoming splice request ack, transition channel to splice-pending (unless some check fails). - #[rustfmt::skip] - fn internal_splice_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::SpliceAck) -> Result<(), MsgHandleErrInternal> { + fn internal_splice_ack( + &self, counterparty_node_id: &PublicKey, msg: &msgs::SpliceAck, + ) -> Result<(), MsgHandleErrInternal> { let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| { - debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.channel_id) - })?; + let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { + debug_assert!(false); + MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) + })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; // Look for the channel match peer_state.channel_by_id.entry(msg.channel_id) { - hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::send_err_msg_no_close(format!( - "Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", - counterparty_node_id - ), msg.channel_id)), + hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::no_such_channel_for_peer( + counterparty_node_id, + msg.channel_id, + )), hash_map::Entry::Occupied(mut chan_entry) => { if let Some(ref mut funded_channel) = chan_entry.get_mut().as_funded_mut() { let splice_ack_res = funded_channel.splice_ack( - msg, &self.signer_provider, &self.entropy_source, - &self.get_our_node_id(), &self.logger + msg, + &self.signer_provider, + &self.entropy_source, + &self.get_our_node_id(), + &self.logger, ); - let tx_msg_opt = try_channel_entry!(self, peer_state, splice_ack_res, chan_entry); + let tx_msg_opt = + try_channel_entry!(self, peer_state, splice_ack_res, chan_entry); if let Some(tx_msg) = tx_msg_opt { - peer_state.pending_msg_events.push(tx_msg.into_msg_send_event(counterparty_node_id.clone())); + peer_state + .pending_msg_events + .push(tx_msg.into_msg_send_event(counterparty_node_id.clone())); } Ok(()) } else { - try_channel_entry!(self, peer_state, Err(ChannelError::close("Channel is not funded, cannot be spliced".into())), chan_entry) + try_channel_entry!( + self, + peer_state, + Err(ChannelError::close("Channel is not funded, cannot be spliced".into())), + chan_entry + ) } }, } @@ -12015,13 +12674,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close( - format!( - "Can't find a peer matching the passed counterparty node_id {}", - counterparty_node_id - ), - msg.channel_id, - ) + MsgHandleErrInternal::no_such_peer(counterparty_node_id, msg.channel_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -12029,11 +12682,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // Look for the channel match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Vacant(_) => { - let err = format!( - "Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", + return Err(MsgHandleErrInternal::no_such_channel_for_peer( counterparty_node_id, - ); - return Err(MsgHandleErrInternal::send_err_msg_no_close(err, msg.channel_id)); + msg.channel_id, + )); }, hash_map::Entry::Occupied(mut chan_entry) => { if let Some(chan) = chan_entry.get_mut().as_funded_mut() { @@ -12089,15 +12741,19 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } if let Some(monitor_update) = splice_promotion.monitor_update { - handle_new_monitor_update!( - self, + if let Some(data) = self.handle_new_monitor_update( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + &mut peer_state.pending_msg_events, + peer_state.is_connected, + chan, splice_promotion.funding_txo, monitor_update, - peer_state_lock, - peer_state, - per_peer_state, - chan - ); + ) { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_post_monitor_update_chan_resume(data); + } } } } else { @@ -12198,7 +12854,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }; let err = ChannelError::Close((reason.to_string(), reason)); let mut chan = chan_entry.remove(); - let (_, e) = convert_channel_err!(self, peer_state, err, &mut chan); + let (_, e) = self.locked_handle_force_close( + &mut peer_state.closed_channel_monitor_update_ids, + &mut peer_state.in_flight_monitor_updates, + err, + &mut chan, + ); failed_channels.push((Err(e), counterparty_node_id)); } } @@ -12214,7 +12875,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let reason = ClosureReason::CommitmentTxConfirmed; let err = ChannelError::Close((reason.to_string(), reason)); let mut chan = chan_entry.remove(); - let (_, e) = convert_channel_err!(self, peer_state, err, &mut chan); + let (_, e) = self.locked_handle_force_close( + &mut peer_state.closed_channel_monitor_update_ids, + &mut peer_state.in_flight_monitor_updates, + err, + &mut chan, + ); failed_channels.push((Err(e), counterparty_node_id)); } } @@ -12231,72 +12897,89 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } for (err, counterparty_node_id) in failed_channels { - let _ = handle_error!(self, err, counterparty_node_id); + let _ = self.handle_error(err, counterparty_node_id); } has_pending_monitor_events } + fn handle_holding_cell_free_result(&self, result: FreeHoldingCellsResult) { + debug_assert_ne!( + self.total_consistency_lock.held_by_thread(), + LockHeldState::NotHeldByThread + ); + for (chan_id, cp_node_id, post_update_data, failed_htlcs) in result { + if let Some(data) = post_update_data { + self.handle_post_monitor_update_chan_resume(data); + } + + self.fail_holding_cell_htlcs(failed_htlcs, chan_id, &cp_node_id); + self.needs_persist_flag.store(true, Ordering::Release); + self.event_persist_notifier.notify(); + } + } + + /// Frees all holding cells in all the channels for a peer. + /// + /// Includes elements in the returned Vec only for channels which changed (implying persistence + /// is required). + #[must_use] + fn check_free_peer_holding_cells( + &self, peer_state: &mut PeerState, + ) -> FreeHoldingCellsResult { + debug_assert_ne!( + self.total_consistency_lock.held_by_thread(), + LockHeldState::NotHeldByThread + ); + + let mut updates = Vec::new(); + let funded_chan_iter = peer_state + .channel_by_id + .iter_mut() + .filter_map(|(chan_id, chan)| chan.as_funded_mut().map(|chan| (chan_id, chan))); + for (chan_id, chan) in funded_chan_iter { + let (monitor_opt, holding_cell_failed_htlcs) = chan.maybe_free_holding_cell_htlcs( + &self.fee_estimator, + &&WithChannelContext::from(&self.logger, &chan.context, None), + ); + if monitor_opt.is_some() || !holding_cell_failed_htlcs.is_empty() { + let update_res = monitor_opt + .map(|monitor_update| { + self.handle_new_monitor_update( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + &mut peer_state.pending_msg_events, + peer_state.is_connected, + chan, + chan.funding.get_funding_txo().unwrap(), + monitor_update, + ) + }) + .flatten(); + let cp_node_id = chan.context.get_counterparty_node_id(); + updates.push((*chan_id, cp_node_id, update_res, holding_cell_failed_htlcs)); + } + } + updates + } + /// Check the holding cell in each channel and free any pending HTLCs in them if possible. /// Returns whether there were any updates such as if pending HTLCs were freed or a monitor /// update was applied. fn check_free_holding_cells(&self) -> bool { - let mut has_monitor_update = false; - let mut failed_htlcs = Vec::new(); + let mut unlocked_results = Vec::new(); - // Walk our list of channels and find any that need to update. Note that when we do find an - // update, if it includes actions that must be taken afterwards, we have to drop the - // per-peer state lock as well as the top level per_peer_state lock. Thus, we loop until we - // manage to go through all our peers without finding a single channel to update. - 'peer_loop: loop { + { let per_peer_state = self.per_peer_state.read().unwrap(); for (_cp_id, peer_state_mutex) in per_peer_state.iter() { - 'chan_loop: loop { - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state: &mut PeerState<_> = &mut *peer_state_lock; - for (channel_id, chan) in - peer_state.channel_by_id.iter_mut().filter_map(|(chan_id, chan)| { - chan.as_funded_mut().map(|chan| (chan_id, chan)) - }) { - let counterparty_node_id = chan.context.get_counterparty_node_id(); - let funding_txo = chan.funding.get_funding_txo(); - let (monitor_opt, holding_cell_failed_htlcs) = chan - .maybe_free_holding_cell_htlcs( - &self.fee_estimator, - &&WithChannelContext::from(&self.logger, &chan.context, None), - ); - if !holding_cell_failed_htlcs.is_empty() { - failed_htlcs.push(( - holding_cell_failed_htlcs, - *channel_id, - counterparty_node_id, - )); - } - if let Some(monitor_update) = monitor_opt { - has_monitor_update = true; - - handle_new_monitor_update!( - self, - funding_txo.unwrap(), - monitor_update, - peer_state_lock, - peer_state, - per_peer_state, - chan - ); - continue 'peer_loop; - } - } - break 'chan_loop; - } + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state: &mut PeerState<_> = &mut *peer_state_lock; + unlocked_results.append(&mut self.check_free_peer_holding_cells(peer_state)); } - break 'peer_loop; } - let has_update = has_monitor_update || !failed_htlcs.is_empty(); - for (failures, channel_id, counterparty_node_id) in failed_htlcs.drain(..) { - self.fail_holding_cell_htlcs(failures, channel_id, &counterparty_node_id); - } + let has_update = !unlocked_results.is_empty(); + self.handle_holding_cell_free_result(unlocked_results); has_update } @@ -12308,53 +12991,50 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ /// attempted in every channel, or in the specifically provided channel. /// /// [`ChannelSigner`]: crate::sign::ChannelSigner - #[rustfmt::skip] pub fn signer_unblocked(&self, channel_opt: Option<(PublicKey, ChannelId)>) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); // Returns whether we should remove this channel as it's just been closed. - let unblock_chan = |chan: &mut Channel, pending_msg_events: &mut Vec| -> Result, ChannelError> { + let unblock_chan = |chan: &mut Channel, + pending_msg_events: &mut Vec| + -> Result, ChannelError> { let channel_id = chan.context().channel_id(); let outbound_scid_alias = chan.context().outbound_scid_alias(); let logger = WithChannelContext::from(&self.logger, &chan.context(), None); let node_id = chan.context().get_counterparty_node_id(); - let cbp = |htlc_id| self.path_for_release_held_htlc(htlc_id, outbound_scid_alias, &channel_id, &node_id); + let cbp = |htlc_id| { + self.path_for_release_held_htlc(htlc_id, outbound_scid_alias, &channel_id, &node_id) + }; let msgs = chan.signer_maybe_unblocked(self.chain_hash, &&logger, cbp)?; if let Some(msgs) = msgs { if chan.context().is_connected() { if let Some(msg) = msgs.open_channel { - pending_msg_events.push(MessageSendEvent::SendOpenChannel { - node_id, - msg, - }); + pending_msg_events.push(MessageSendEvent::SendOpenChannel { node_id, msg }); } if let Some(msg) = msgs.funding_created { - pending_msg_events.push(MessageSendEvent::SendFundingCreated { - node_id, - msg, - }); + pending_msg_events + .push(MessageSendEvent::SendFundingCreated { node_id, msg }); } if let Some(msg) = msgs.accept_channel { - pending_msg_events.push(MessageSendEvent::SendAcceptChannel { - node_id, - msg, - }); + pending_msg_events + .push(MessageSendEvent::SendAcceptChannel { node_id, msg }); } - let cu_msg = msgs.commitment_update.map(|updates| MessageSendEvent::UpdateHTLCs { - node_id, - channel_id, - updates, - }); - let raa_msg = msgs.revoke_and_ack.map(|msg| MessageSendEvent::SendRevokeAndACK { - node_id, - msg, + let cu_msg = msgs.commitment_update.map(|updates| { + MessageSendEvent::UpdateHTLCs { node_id, channel_id, updates } }); + let raa_msg = msgs + .revoke_and_ack + .map(|msg| MessageSendEvent::SendRevokeAndACK { node_id, msg }); match (cu_msg, raa_msg) { - (Some(cu), Some(raa)) if msgs.order == RAACommitmentOrder::CommitmentFirst => { + (Some(cu), Some(raa)) + if msgs.order == RAACommitmentOrder::CommitmentFirst => + { pending_msg_events.push(cu); pending_msg_events.push(raa); }, - (Some(cu), Some(raa)) if msgs.order == RAACommitmentOrder::RevokeAndACKFirst => { + (Some(cu), Some(raa)) + if msgs.order == RAACommitmentOrder::RevokeAndACKFirst => + { pending_msg_events.push(raa); pending_msg_events.push(cu); }, @@ -12363,25 +13043,45 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ (_, _) => {}, } if let Some(msg) = msgs.funding_signed { - pending_msg_events.push(MessageSendEvent::SendFundingSigned { + pending_msg_events + .push(MessageSendEvent::SendFundingSigned { node_id, msg }); + } + if let Some(msg) = msgs.funding_commit_sig { + pending_msg_events.push(MessageSendEvent::UpdateHTLCs { node_id, - msg, + channel_id, + updates: CommitmentUpdate { + update_add_htlcs: vec![], + update_fulfill_htlcs: vec![], + update_fail_htlcs: vec![], + update_fail_malformed_htlcs: vec![], + update_fee: None, + commitment_signed: vec![msg], + }, }); } + if let Some(msg) = msgs.tx_signatures { + pending_msg_events + .push(MessageSendEvent::SendTxSignatures { node_id, msg }); + } if let Some(msg) = msgs.closing_signed { - pending_msg_events.push(MessageSendEvent::SendClosingSigned { - node_id, - msg, - }); + pending_msg_events + .push(MessageSendEvent::SendClosingSigned { node_id, msg }); } } if let Some(funded_chan) = chan.as_funded() { if let Some(msg) = msgs.channel_ready { - send_channel_ready!(self, pending_msg_events, funded_chan, msg); + self.send_channel_ready(pending_msg_events, funded_chan, msg); } if let Some(broadcast_tx) = msgs.signed_closing_tx { log_info!(logger, "Broadcasting closing tx {}", log_tx!(broadcast_tx)); - self.tx_broadcaster.broadcast_transactions(&[&broadcast_tx]); + self.tx_broadcaster.broadcast_transactions(&[( + &broadcast_tx, + TransactionType::CooperativeClose { + counterparty_node_id: node_id, + channel_id, + }, + )]); } } else { // We don't know how to handle a channel_ready or signed_closing_tx for a @@ -12400,7 +13100,9 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let per_peer_state_iter = per_peer_state.iter().filter(|(cp_id, _)| { if let Some((counterparty_node_id, _)) = channel_opt { **cp_id == counterparty_node_id - } else { true } + } else { + true + } }); for (cp_id, peer_state_mutex) in per_peer_state_iter { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); @@ -12411,7 +13113,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ _ => match unblock_chan(chan, &mut peer_state.pending_msg_events) { Ok(shutdown_result) => shutdown_result, Err(err) => { - let (_, err) = convert_channel_err!(self, peer_state, err, chan); + let (_, err) = self.locked_handle_force_close( + &mut peer_state.closed_channel_monitor_update_ids, + &mut peer_state.in_flight_monitor_updates, + err, + chan, + ); shutdown_results.push((Err(err), *cp_id)); return false; }, @@ -12422,14 +13129,18 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let logger = WithChannelContext::from(&self.logger, context, None); log_trace!(logger, "Removing channel now that the signer is unblocked"); let (remove, err) = if let Some(funded) = chan.as_funded_mut() { - let err = - convert_channel_err!(self, peer_state, shutdown, funded, COOP_CLOSED); + let err = self.locked_handle_funded_coop_close( + &mut peer_state.closed_channel_monitor_update_ids, + &mut peer_state.in_flight_monitor_updates, + shutdown, + funded, + ); (true, err) } else { debug_assert!(false); let reason = shutdown.closure_reason.clone(); let err = ChannelError::Close((reason.to_string(), reason)); - convert_channel_err!(self, peer_state, err, chan, UNFUNDED_CHANNEL) + self.locked_handle_unfunded_close(err, chan) }; debug_assert!(remove); shutdown_results.push((Err(err), *cp_id)); @@ -12441,14 +13152,13 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } drop(per_peer_state); for (err, counterparty_node_id) in shutdown_results { - let _ = handle_error!(self, err, counterparty_node_id); + let _ = self.handle_error(err, counterparty_node_id); } } /// Check whether any channels have finished removing all pending updates after a shutdown /// exchange and can now send a closing_signed. /// Returns whether any closing_signed messages were generated. - #[rustfmt::skip] fn maybe_generate_initial_closing_signed(&self) -> bool { let mut handle_errors: Vec<(PublicKey, Result<(), _>)> = Vec::new(); let mut has_update = false; @@ -12465,33 +13175,67 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } match chan.as_funded_mut() { Some(funded_chan) => { - let logger = WithChannelContext::from(&self.logger, &funded_chan.context, None); - match funded_chan.maybe_propose_closing_signed(&self.fee_estimator, &&logger) { + let logger = + WithChannelContext::from(&self.logger, &funded_chan.context, None); + match funded_chan + .maybe_propose_closing_signed(&self.fee_estimator, &&logger) + { Ok((msg_opt, tx_shutdown_result_opt)) => { if let Some(msg) = msg_opt { has_update = true; - pending_msg_events.push(MessageSendEvent::SendClosingSigned { - node_id: funded_chan.context.get_counterparty_node_id(), msg, - }); + pending_msg_events.push( + MessageSendEvent::SendClosingSigned { + node_id: funded_chan + .context + .get_counterparty_node_id(), + msg, + }, + ); } - debug_assert_eq!(tx_shutdown_result_opt.is_some(), funded_chan.is_shutdown()); + debug_assert_eq!( + tx_shutdown_result_opt.is_some(), + funded_chan.is_shutdown() + ); if let Some((tx, shutdown_res)) = tx_shutdown_result_opt { // We're done with this channel. We got a closing_signed and sent back // a closing_signed with a closing transaction to broadcast. - let err = convert_channel_err!(self, peer_state, shutdown_res, funded_chan, COOP_CLOSED); + let channel_id = funded_chan.context.channel_id(); + let err = self.locked_handle_funded_coop_close( + &mut peer_state.closed_channel_monitor_update_ids, + &mut peer_state.in_flight_monitor_updates, + shutdown_res, + funded_chan, + ); handle_errors.push((*cp_id, Err(err))); log_info!(logger, "Broadcasting {}", log_tx!(tx)); - self.tx_broadcaster.broadcast_transactions(&[&tx]); + self.tx_broadcaster.broadcast_transactions(&[( + &tx, + TransactionType::CooperativeClose { + counterparty_node_id: *cp_id, + channel_id, + }, + )]); false - } else { true } + } else { + true + } }, Err(e) => { has_update = true; - let (close_channel, res) = convert_channel_err!(self, peer_state, e, funded_chan, FUNDED_CHANNEL); - handle_errors.push((funded_chan.context.get_counterparty_node_id(), Err(res))); + let (close_channel, res) = self + .locked_handle_funded_force_close( + &mut peer_state.closed_channel_monitor_update_ids, + &mut peer_state.in_flight_monitor_updates, + e, + funded_chan, + ); + handle_errors.push(( + funded_chan.context.get_counterparty_node_id(), + Err(res), + )); !close_channel - } + }, } }, None => true, // Retain unfunded channels if present. @@ -12501,7 +13245,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } for (counterparty_node_id, err) in handle_errors { - let _ = handle_error!(self, err, counterparty_node_id); + let _ = self.handle_error(err, counterparty_node_id); } has_update @@ -12546,9 +13290,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); if peer_state_mutex_opt.is_none() { - result = Err(APIError::ChannelUnavailable { - err: format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}") - }); + result = Err(APIError::no_such_peer(counterparty_node_id)); return notify; } @@ -12582,10 +13324,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } }, hash_map::Entry::Vacant(_) => { - result = Err(APIError::ChannelUnavailable { - err: format!("Channel with id {} not found for the passed counterparty node_id {}", - channel_id, counterparty_node_id), - }); + result = Err(APIError::no_such_channel_for_peer( + channel_id, + counterparty_node_id, + )); }, } @@ -12598,27 +13340,32 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ #[cfg(any(test, fuzzing))] #[rustfmt::skip] pub fn exit_quiescence(&self, counterparty_node_id: &PublicKey, channel_id: &ChannelId) -> Result { - let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| APIError::ChannelUnavailable { - err: format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}") - })?; - let mut peer_state = peer_state_mutex.lock().unwrap(); - let initiator = match peer_state.channel_by_id.entry(*channel_id) { - hash_map::Entry::Occupied(mut chan_entry) => { - if let Some(chan) = chan_entry.get_mut().as_funded_mut() { - chan.exit_quiescence() - } else { - return Err(APIError::APIMisuseError { - err: format!("Unfunded channel {} cannot be quiescent", channel_id), - }) - } - }, - hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable { - err: format!("Channel with id {} not found for the passed counterparty node_id {}", - channel_id, counterparty_node_id), - }), + let _read_guard = self.total_consistency_lock.read().unwrap(); + + let initiator = { + let per_peer_state = self.per_peer_state.read().unwrap(); + let peer_state_mutex = per_peer_state.get(counterparty_node_id) + .ok_or_else(|| APIError::no_such_peer(counterparty_node_id))?; + let mut peer_state = peer_state_mutex.lock().unwrap(); + match peer_state.channel_by_id.entry(*channel_id) { + hash_map::Entry::Occupied(mut chan_entry) => { + if let Some(chan) = chan_entry.get_mut().as_funded_mut() { + chan.exit_quiescence() + } else { + return Err(APIError::APIMisuseError { + err: format!("Unfunded channel {} cannot be quiescent", channel_id), + }) + } + }, + hash_map::Entry::Vacant(_) => { + return Err(APIError::no_such_channel_for_peer( + channel_id, + counterparty_node_id, + )) + }, + } }; + self.check_free_holding_cells(); Ok(initiator) } @@ -12690,7 +13437,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let mut invoice = invoice .duration_since_epoch(duration_since_epoch) .payee_pub_key(self.get_our_node_id()) - .payment_hash(Hash::from_slice(&payment_hash.0).unwrap()) + .payment_hash(payment_hash) .payment_secret(payment_secret) .basic_mpp() .min_final_cltv_expiry_delta( @@ -12794,7 +13541,7 @@ macro_rules! create_offer_builder { ($self: ident, $builder: ty) => { /// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest pub fn create_offer_builder(&$self) -> Result<$builder, Bolt12SemanticError> { let builder = $self.flow.create_offer_builder( - &*$self.entropy_source, $self.get_peers_for_blinded_path() + &$self.entropy_source, $self.get_peers_for_blinded_path() )?; Ok(builder.into()) @@ -12811,15 +13558,12 @@ macro_rules! create_offer_builder { ($self: ident, $builder: ty) => { /// [`BlindedMessagePath`]: crate::blinded_path::message::BlindedMessagePath /// [`Offer`]: crate::offers::offer::Offer /// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest - pub fn create_offer_builder_using_router( + pub fn create_offer_builder_using_router( &$self, router: ME, - ) -> Result<$builder, Bolt12SemanticError> - where - ME::Target: MessageRouter, - { + ) -> Result<$builder, Bolt12SemanticError> { let builder = $self.flow.create_offer_builder_using_router( - router, &*$self.entropy_source, $self.get_peers_for_blinded_path() + router, &$self.entropy_source, $self.get_peers_for_blinded_path() )?; Ok(builder.into()) @@ -12871,7 +13615,7 @@ macro_rules! create_refund_builder { ($self: ident, $builder: ty) => { &$self, amount_msats: u64, absolute_expiry: Duration, payment_id: PaymentId, retry_strategy: Retry, route_params_config: RouteParametersConfig ) -> Result<$builder, Bolt12SemanticError> { - let entropy = &*$self.entropy_source; + let entropy = &$self.entropy_source; let builder = $self.flow.create_refund_builder( entropy, amount_msats, absolute_expiry, @@ -12908,14 +13652,11 @@ macro_rules! create_refund_builder { ($self: ident, $builder: ty) => { /// [`Refund`]: crate::offers::refund::Refund /// [`BlindedMessagePath`]: crate::blinded_path::message::BlindedMessagePath /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice - pub fn create_refund_builder_using_router( + pub fn create_refund_builder_using_router( &$self, router: ME, amount_msats: u64, absolute_expiry: Duration, payment_id: PaymentId, retry_strategy: Retry, route_params_config: RouteParametersConfig - ) -> Result<$builder, Bolt12SemanticError> - where - ME::Target: MessageRouter, - { - let entropy = &*$self.entropy_source; + ) -> Result<$builder, Bolt12SemanticError> { + let entropy = &$self.entropy_source; let builder = $self.flow.create_refund_builder_using_router( router, entropy, amount_msats, absolute_expiry, @@ -12936,26 +13677,16 @@ macro_rules! create_refund_builder { ($self: ident, $builder: ty) => { } } impl< - M: Deref, - T: Deref, - ES: Deref, - NS: Deref, - SP: Deref, - F: Deref, - R: Deref, - MR: Deref, - L: Deref, + M: Watch, + T: BroadcasterInterface, + ES: EntropySource, + NS: NodeSigner, + SP: SignerProvider, + F: FeeEstimator, + R: Router, + MR: MessageRouter, + L: Logger, > ChannelManager -where - M::Target: chain::Watch<::EcdsaSigner>, - T::Target: BroadcasterInterface, - ES::Target: EntropySource, - NS::Target: NodeSigner, - SP::Target: SignerProvider, - F::Target: FeeEstimator, - R::Target: Router, - MR::Target: MessageRouter, - L::Target: Logger, { #[cfg(not(c_bindings))] create_offer_builder!(self, OfferBuilder<'_, DerivedMetadata, secp256k1::All>); @@ -13145,7 +13876,7 @@ where payer_note: Option, payment_id: PaymentId, human_readable_name: Option, create_pending_payment: CPP, ) -> Result<(), Bolt12SemanticError> { - let entropy = &*self.entropy_source; + let entropy = &self.entropy_source; let nonce = Nonce::from_entropy_source(entropy); let builder = self.flow.create_invoice_request_builder( @@ -13213,7 +13944,7 @@ where &self, refund: &Refund, ) -> Result { let secp_ctx = &self.secp_ctx; - let entropy = &*self.entropy_source; + let entropy = &self.entropy_source; let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); @@ -13278,7 +14009,7 @@ where optional_params: OptionalOfferPaymentParams, dns_resolvers: Vec, ) -> Result<(), ()> { let (onion_message, context) = - self.flow.hrn_resolver.resolve_name(payment_id, name, &*self.entropy_source)?; + self.flow.hrn_resolver.resolve_name(payment_id, name, &self.entropy_source)?; let expiration = StaleExpiration::TimerTicks(1); self.pending_outbound_payments.add_new_awaiting_offer( @@ -13598,6 +14329,7 @@ where pub fn push_pending_event(&self, event: events::Event) { let mut events = self.pending_events.lock().unwrap(); events.push_back((event, None)); + self.event_persist_notifier.notify(); } #[cfg(test)] @@ -13682,8 +14414,26 @@ where if let Some((monitor_update, further_update_exists)) = chan.unblock_next_blocked_monitor_update() { log_debug!(logger, "Unlocking monitor updating and updating monitor", ); - handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update, - peer_state_lck, peer_state, per_peer_state, chan); + let post_update_data = self.handle_new_monitor_update( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + &mut peer_state.pending_msg_events, + peer_state.is_connected, + chan, + channel_funding_outpoint, + monitor_update, + ); + let holding_cell_res = self.check_free_peer_holding_cells(peer_state); + + mem::drop(peer_state_lck); + mem::drop(per_peer_state); + + if let Some(data) = post_update_data { + self.handle_post_monitor_update_chan_resume(data); + } + + self.handle_holding_cell_free_result(holding_cell_res); + if further_update_exists { // If there are more `ChannelMonitorUpdate`s to process, restart at the // top of the loop. @@ -13705,6 +14455,10 @@ where } fn handle_post_event_actions>(&self, actions: I) { + debug_assert_ne!( + self.total_consistency_lock.held_by_thread(), + LockHeldState::NotHeldByThread + ); for action in actions.into_iter() { match action { EventCompletionAction::ReleaseRAAChannelMonitorUpdate { @@ -13726,10 +14480,11 @@ where }, ) => { let per_peer_state = self.per_peer_state.read().unwrap(); - let mut peer_state = per_peer_state + let mut peer_state_lock = per_peer_state .get(&counterparty_node_id) .map(|state| state.lock().unwrap()) .expect("Channels originating a payment resolution must have peer state"); + let peer_state = &mut *peer_state_lock; let update_id = peer_state .closed_channel_monitor_update_ids .get_mut(&channel_id) @@ -13756,16 +14511,18 @@ where }; self.pending_background_events.lock().unwrap().push(event); } else { - handle_post_close_monitor_update!( - self, + if let Some(actions) = self.handle_post_close_monitor_update( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, channel_funding_outpoint, update, - peer_state, - peer_state, - per_peer_state, counterparty_node_id, - channel_id - ); + channel_id, + ) { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_monitor_update_completion_actions(actions); + } } }, } @@ -13788,26 +14545,16 @@ where } impl< - M: Deref, - T: Deref, - ES: Deref, - NS: Deref, - SP: Deref, - F: Deref, - R: Deref, - MR: Deref, - L: Deref, + M: chain::Watch, + T: BroadcasterInterface, + ES: EntropySource, + NS: NodeSigner, + SP: SignerProvider, + F: FeeEstimator, + R: Router, + MR: MessageRouter, + L: Logger, > BaseMessageHandler for ChannelManager -where - M::Target: chain::Watch<::EcdsaSigner>, - T::Target: BroadcasterInterface, - ES::Target: EntropySource, - NS::Target: NodeSigner, - SP::Target: SignerProvider, - F::Target: FeeEstimator, - R::Target: Router, - MR::Target: MessageRouter, - L::Target: Logger, { fn provided_node_features(&self) -> NodeFeatures { provided_node_features(&self.config.read().unwrap()) @@ -13855,7 +14602,12 @@ where // Clean up for removal. let reason = ClosureReason::DisconnectedPeer; let err = ChannelError::Close((reason.to_string(), reason)); - let (_, e) = convert_channel_err!(self, peer_state, err, chan); + let (_, e) = self.locked_handle_force_close( + &mut peer_state.closed_channel_monitor_update_ids, + &mut peer_state.in_flight_monitor_updates, + err, + chan, + ); failed_channels.push((Err(e), counterparty_node_id)); false }); @@ -13895,7 +14647,9 @@ where &MessageSendEvent::UpdateHTLCs { .. } => false, &MessageSendEvent::SendRevokeAndACK { .. } => false, &MessageSendEvent::SendClosingSigned { .. } => false, + #[cfg(simple_close)] &MessageSendEvent::SendClosingComplete { .. } => false, + #[cfg(simple_close)] &MessageSendEvent::SendClosingSig { .. } => false, &MessageSendEvent::SendShutdown { .. } => false, &MessageSendEvent::SendChannelReestablish { .. } => false, @@ -13945,7 +14699,7 @@ where }; for (err, counterparty_node_id) in failed_channels.drain(..) { - let _ = handle_error!(self, err, counterparty_node_id); + let _ = self.handle_error(err, counterparty_node_id); } persist @@ -14092,19 +14846,32 @@ where PersistenceNotifierGuard::optionally_notify(self, || { let mut result = NotifyOption::SkipPersistNoEvents; + // This method is quite performance-sensitive. Not only is it called very often, but it + // *is* the critical path between generating a message for a peer and giving it to the + // `PeerManager` to send. Thus, we should avoid adding any more logic here than we + // need, especially anything that might end up causing I/O (like a + // `ChannelMonitorUpdate`)! + // TODO: This behavior should be documented. It's unintuitive that we query // ChannelMonitors when clearing other events. if self.process_pending_monitor_events() { result = NotifyOption::DoPersist; } - if self.check_free_holding_cells() { - result = NotifyOption::DoPersist; - } if self.maybe_generate_initial_closing_signed() { result = NotifyOption::DoPersist; } + #[cfg(test)] + if self.check_free_holding_cells() { + // In tests, we want to ensure that we never forget to free holding cells + // immediately, so we check it here. + // Note that we can't turn this on for `debug_assertions` because there's a race in + // (at least) the fee-update logic in `timer_tick_occurred` which can lead to us + // freeing holding cells here while its running. + debug_assert!(false, "Holding cells should always be auto-free'd"); + } + // Quiescence is an in-memory protocol, so we don't have to persist because of it. self.maybe_send_stfu(); @@ -14139,26 +14906,16 @@ where } impl< - M: Deref, - T: Deref, - ES: Deref, - NS: Deref, - SP: Deref, - F: Deref, - R: Deref, - MR: Deref, - L: Deref, + M: chain::Watch, + T: BroadcasterInterface, + ES: EntropySource, + NS: NodeSigner, + SP: SignerProvider, + F: FeeEstimator, + R: Router, + MR: MessageRouter, + L: Logger, > EventsProvider for ChannelManager -where - M::Target: chain::Watch<::EcdsaSigner>, - T::Target: BroadcasterInterface, - ES::Target: EntropySource, - NS::Target: NodeSigner, - SP::Target: SignerProvider, - F::Target: FeeEstimator, - R::Target: Router, - MR::Target: MessageRouter, - L::Target: Logger, { /// Processes events that must be periodically handled. /// @@ -14174,26 +14931,16 @@ where } impl< - M: Deref, - T: Deref, - ES: Deref, - NS: Deref, - SP: Deref, - F: Deref, - R: Deref, - MR: Deref, - L: Deref, + M: chain::Watch, + T: BroadcasterInterface, + ES: EntropySource, + NS: NodeSigner, + SP: SignerProvider, + F: FeeEstimator, + R: Router, + MR: MessageRouter, + L: Logger, > chain::Listen for ChannelManager -where - M::Target: chain::Watch<::EcdsaSigner>, - T::Target: BroadcasterInterface, - ES::Target: EntropySource, - NS::Target: NodeSigner, - SP::Target: SignerProvider, - F::Target: FeeEstimator, - R::Target: Router, - MR::Target: MessageRouter, - L::Target: Logger, { fn filtered_block_connected(&self, header: &Header, txdata: &TransactionData, height: u32) { { @@ -14204,6 +14951,9 @@ where "Blocks must be connected in chain-order - the connected block height must be one greater than the previous height"); } + log_info!(self.logger, "Block {} at height {} connected with {} relevant transactions", + header.block_hash(), height, txdata.len()); + self.transactions_confirmed(header, txdata, height); self.best_block_updated(header, height); } @@ -14235,26 +14985,16 @@ where } impl< - M: Deref, - T: Deref, - ES: Deref, - NS: Deref, - SP: Deref, - F: Deref, - R: Deref, - MR: Deref, - L: Deref, + M: chain::Watch, + T: BroadcasterInterface, + ES: EntropySource, + NS: NodeSigner, + SP: SignerProvider, + F: FeeEstimator, + R: Router, + MR: MessageRouter, + L: Logger, > chain::Confirm for ChannelManager -where - M::Target: chain::Watch<::EcdsaSigner>, - T::Target: BroadcasterInterface, - ES::Target: EntropySource, - NS::Target: NodeSigner, - SP::Target: SignerProvider, - F::Target: FeeEstimator, - R::Target: Router, - MR::Target: MessageRouter, - L::Target: Logger, { #[rustfmt::skip] fn transactions_confirmed(&self, header: &Header, txdata: &TransactionData, height: u32) { @@ -14263,7 +15003,16 @@ where // See the docs for `ChannelManagerReadArgs` for more. let block_hash = header.block_hash(); - log_trace!(self.logger, "{} transactions included in block {} at height {} provided", txdata.len(), block_hash, height); + log_info!(self.logger, "{} transactions included in block {} at height {} provided", txdata.len(), block_hash, height); + + // Log individual txids when the list is small enough to be useful for debugging + // block sync issues. Cap at 10 to avoid flooding the logs. + if !txdata.is_empty() && txdata.len() <= 10 { + for (_, tx) in txdata.iter() { + log_debug!(self.logger, " Confirmed txid {} in block {} at height {}", + tx.compute_txid(), block_hash, height); + } + } let _persistence_guard = PersistenceNotifierGuard::optionally_notify_skipping_background_events( @@ -14295,7 +15044,7 @@ where // See the docs for `ChannelManagerReadArgs` for more. let block_hash = header.block_hash(); - log_trace!(self.logger, "New best block: {} at height {}", block_hash, height); + log_info!(self.logger, "New best block: {} at height {}", block_hash, height); let _persistence_guard = PersistenceNotifierGuard::optionally_notify_skipping_background_events( @@ -14408,33 +15157,34 @@ pub(super) enum FundingConfirmedMessage { } impl< - M: Deref, - T: Deref, - ES: Deref, - NS: Deref, - SP: Deref, - F: Deref, - R: Deref, - MR: Deref, - L: Deref, + M: chain::Watch, + T: BroadcasterInterface, + ES: EntropySource, + NS: NodeSigner, + SP: SignerProvider, + F: FeeEstimator, + R: Router, + MR: MessageRouter, + L: Logger, > ChannelManager -where - M::Target: chain::Watch<::EcdsaSigner>, - T::Target: BroadcasterInterface, - ES::Target: EntropySource, - NS::Target: NodeSigner, - SP::Target: SignerProvider, - F::Target: FeeEstimator, - R::Target: Router, - MR::Target: MessageRouter, - L::Target: Logger, { /// Calls a function which handles an on-chain event (blocks dis/connected, transactions /// un/confirmed, etc) on each channel, handling any resulting errors or messages generated by /// the function. - #[rustfmt::skip] - fn do_chain_event) -> Result<(Option, Vec<(HTLCSource, PaymentHash)>, Option), ClosureReason>> - (&self, height_opt: Option, f: FN) { + fn do_chain_event< + FN: Fn( + &mut FundedChannel, + ) -> Result< + ( + Option, + Vec<(HTLCSource, PaymentHash)>, + Option, + ), + ClosureReason, + >, + >( + &self, height_opt: Option, f: FN, + ) { // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called // during initialization prior to the chain_monitor being fully configured in some cases. // See the docs for `ChannelManagerReadArgs` for more. @@ -14465,7 +15215,7 @@ where let logger = WithChannelContext::from(&self.logger, &funded_channel.context, None); match funding_confirmed_opt { Some(FundingConfirmedMessage::Establishment(channel_ready)) => { - send_channel_ready!(self, pending_msg_events, funded_channel, channel_ready); + self.send_channel_ready(pending_msg_events, funded_channel, channel_ready); if funded_channel.context.is_usable() && peer_state.is_connected { log_trace!(logger, "Sending channel_ready with private initial channel_update for our counterparty"); if let Ok((msg, _, _)) = self.get_channel_update_for_unicast(funded_channel) { @@ -14487,12 +15237,12 @@ where insert_short_channel_id!(short_to_chan_info, funded_channel); if let Some(monitor_update) = monitor_update_opt { - handle_new_monitor_update_locked_actions_handled_by_caller!( - self, + self.handle_new_monitor_update_locked_actions_handled_by_caller( + &mut peer_state.in_flight_monitor_updates, + funded_channel.context.channel_id(), funding_txo, + funded_channel.context.get_counterparty_node_id(), monitor_update, - &mut peer_state.in_flight_monitor_updates, - funded_channel.context ); to_process_monitor_update_actions.push(( counterparty_node_id, channel_id @@ -14594,12 +15344,10 @@ where // It looks like our counterparty went on-chain or funding transaction was // reorged out of the main chain. Close the channel. let err = ChannelError::Close((reason.to_string(), reason)); - let (_, e) = convert_channel_err!( - self, - peer_state, + let (_, e) = self.locked_handle_funded_force_close( + &mut peer_state.closed_channel_monitor_update_ids, &mut peer_state.in_flight_monitor_updates, err, - funded_channel, - FUNDED_CHANNEL + funded_channel ); failed_channels.push((Err(e), *counterparty_node_id)); return false; @@ -14616,22 +15364,34 @@ where } if let Some(height) = height_opt { - self.claimable_payments.lock().unwrap().claimable_payments.retain(|payment_hash, payment| { - payment.htlcs.retain(|htlc| { - // If height is approaching the number of blocks we think it takes us to get - // our commitment transaction confirmed before the HTLC expires, plus the - // number of blocks we generally consider it to take to do a commitment update, - // just give up on it and fail the HTLC. - if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER { - let reason = LocalHTLCFailureReason::PaymentClaimBuffer; - timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), - HTLCFailReason::reason(reason, invalid_payment_err_data(htlc.value, height)), - HTLCHandlingFailureType::Receive { payment_hash: payment_hash.clone() })); - false - } else { true } - }); - !payment.htlcs.is_empty() // Only retain this entry if htlcs has at least one entry. - }); + self.claimable_payments.lock().unwrap().claimable_payments.retain( + |payment_hash, payment| { + payment.htlcs.retain(|htlc| { + // If height is approaching the number of blocks we think it takes us to get + // our commitment transaction confirmed before the HTLC expires, plus the + // number of blocks we generally consider it to take to do a commitment update, + // just give up on it and fail the HTLC. + if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER { + let reason = LocalHTLCFailureReason::PaymentClaimBuffer; + timed_out_htlcs.push(( + HTLCSource::PreviousHopData(htlc.prev_hop.clone()), + payment_hash.clone(), + HTLCFailReason::reason( + reason, + invalid_payment_err_data(htlc.value, height), + ), + HTLCHandlingFailureType::Receive { + payment_hash: payment_hash.clone(), + }, + )); + false + } else { + true + } + }); + !payment.htlcs.is_empty() // Only retain this entry if htlcs has at least one entry. + }, + ); let mut intercepted_htlcs = self.pending_intercepted_htlcs.lock().unwrap(); intercepted_htlcs.retain(|_, htlc| { @@ -14641,20 +15401,34 @@ where PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id, _ => unreachable!(), }; - timed_out_htlcs.push((prev_hop_data, htlc.forward_info.payment_hash, - HTLCFailReason::from_failure_code(LocalHTLCFailureReason::ForwardExpiryBuffer), - HTLCHandlingFailureType::InvalidForward { requested_forward_scid })); + timed_out_htlcs.push(( + prev_hop_data, + htlc.forward_info.payment_hash, + HTLCFailReason::from_failure_code( + LocalHTLCFailureReason::ForwardExpiryBuffer, + ), + HTLCHandlingFailureType::InvalidForward { requested_forward_scid }, + )); let logger = WithContext::from( - &self.logger, None, Some(htlc.prev_channel_id), Some(htlc.forward_info.payment_hash) + &self.logger, + None, + Some(htlc.prev_channel_id), + Some(htlc.forward_info.payment_hash), + ); + log_trace!( + logger, + "Timing out intercepted HTLC with requested forward scid {}", + requested_forward_scid ); - log_trace!(logger, "Timing out intercepted HTLC with requested forward scid {}", requested_forward_scid); false - } else { true } + } else { + true + } }); } for (failure, counterparty_node_id) in failed_channels { - let _ = handle_error!(self, failure, counterparty_node_id); + let _ = self.handle_error(failure, counterparty_node_id); } for (source, payment_hash, reason, destination) in timed_out_htlcs.drain(..) { @@ -14735,26 +15509,16 @@ where } impl< - M: Deref, - T: Deref, - ES: Deref, - NS: Deref, - SP: Deref, - F: Deref, - R: Deref, - MR: Deref, - L: Deref, + M: chain::Watch, + T: BroadcasterInterface, + ES: EntropySource, + NS: NodeSigner, + SP: SignerProvider, + F: FeeEstimator, + R: Router, + MR: MessageRouter, + L: Logger, > ChannelMessageHandler for ChannelManager -where - M::Target: chain::Watch<::EcdsaSigner>, - T::Target: BroadcasterInterface, - ES::Target: EntropySource, - NS::Target: NodeSigner, - SP::Target: SignerProvider, - F::Target: FeeEstimator, - R::Target: Router, - MR::Target: MessageRouter, - L::Target: Logger, { fn handle_open_channel(&self, counterparty_node_id: PublicKey, message: &msgs::OpenChannel) { // Note that we never need to persist the updated ChannelManager for an inbound @@ -14770,7 +15534,7 @@ where }, _ => NotifyOption::SkipPersistHandleEvents, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } @@ -14778,7 +15542,7 @@ where #[rustfmt::skip] fn handle_open_channel_v2(&self, counterparty_node_id: PublicKey, msg: &msgs::OpenChannelV2) { if !self.init_features().supports_dual_fund() { - let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close( + let _: Result<(), _> = self.handle_error(Err(MsgHandleErrInternal::send_err_msg_no_close( "Dual-funded channels not supported".to_owned(), msg.common_fields.temporary_channel_id.clone())), counterparty_node_id); return; @@ -14795,7 +15559,7 @@ where }, _ => NotifyOption::SkipPersistHandleEvents, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } @@ -14806,7 +15570,7 @@ where // change to the contents. let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || { let res = self.internal_accept_channel(&counterparty_node_id, msg); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); NotifyOption::SkipPersistHandleEvents }); } @@ -14818,26 +15582,26 @@ where "Dual-funded channels not supported".to_owned(), msg.common_fields.temporary_channel_id.clone(), )); - let _: Result<(), _> = handle_error!(self, err, counterparty_node_id); + let _: Result<(), _> = self.handle_error(err, counterparty_node_id); } fn handle_funding_created(&self, counterparty_node_id: PublicKey, msg: &msgs::FundingCreated) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let res = self.internal_funding_created(&counterparty_node_id, msg); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); } fn handle_funding_signed(&self, counterparty_node_id: PublicKey, msg: &msgs::FundingSigned) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let res = self.internal_funding_signed(&counterparty_node_id, msg); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); } fn handle_peer_storage(&self, counterparty_node_id: PublicKey, msg: msgs::PeerStorage) { let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || NotifyOption::SkipPersistNoEvents); let res = self.internal_peer_storage(counterparty_node_id, msg); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); } fn handle_peer_storage_retrieval( @@ -14846,7 +15610,7 @@ where let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || NotifyOption::SkipPersistNoEvents); let res = self.internal_peer_storage_retrieval(counterparty_node_id, msg); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); } fn handle_channel_ready(&self, counterparty_node_id: PublicKey, msg: &msgs::ChannelReady) { @@ -14860,7 +15624,7 @@ where Err(e) if e.closes_channel() => NotifyOption::DoPersist, _ => NotifyOption::SkipPersistHandleEvents, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } @@ -14879,7 +15643,7 @@ where } }, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } @@ -14892,7 +15656,7 @@ where Err(_) => NotifyOption::SkipPersistHandleEvents, Ok(()) => NotifyOption::SkipPersistHandleEvents, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } @@ -14905,7 +15669,7 @@ where Err(_) => NotifyOption::SkipPersistHandleEvents, Ok(()) => NotifyOption::SkipPersistHandleEvents, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } @@ -14919,7 +15683,7 @@ where Err(_) => NotifyOption::SkipPersistHandleEvents, Ok(()) => NotifyOption::DoPersist, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } @@ -14927,27 +15691,27 @@ where fn handle_shutdown(&self, counterparty_node_id: PublicKey, msg: &msgs::Shutdown) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let res = self.internal_shutdown(&counterparty_node_id, msg); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); } fn handle_closing_signed(&self, counterparty_node_id: PublicKey, msg: &msgs::ClosingSigned) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let res = self.internal_closing_signed(&counterparty_node_id, msg); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); } #[cfg(simple_close)] fn handle_closing_complete(&self, counterparty_node_id: PublicKey, msg: msgs::ClosingComplete) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let res = self.internal_closing_complete(counterparty_node_id, msg); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); } #[cfg(simple_close)] fn handle_closing_sig(&self, counterparty_node_id: PublicKey, msg: msgs::ClosingSig) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let res = self.internal_closing_sig(counterparty_node_id, msg); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); } fn handle_update_add_htlc(&self, counterparty_node_id: PublicKey, msg: &msgs::UpdateAddHTLC) { @@ -14961,7 +15725,7 @@ where Err(_) => NotifyOption::SkipPersistHandleEvents, Ok(()) => NotifyOption::SkipPersistNoEvents, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } @@ -14971,7 +15735,7 @@ where ) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let res = self.internal_update_fulfill_htlc(&counterparty_node_id, msg); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); } fn handle_update_fail_htlc(&self, counterparty_node_id: PublicKey, msg: &msgs::UpdateFailHTLC) { @@ -14985,7 +15749,7 @@ where Err(_) => NotifyOption::SkipPersistHandleEvents, Ok(()) => NotifyOption::SkipPersistNoEvents, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } @@ -15003,7 +15767,7 @@ where Err(_) => NotifyOption::SkipPersistHandleEvents, Ok(()) => NotifyOption::SkipPersistNoEvents, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } @@ -15013,7 +15777,7 @@ where ) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let res = self.internal_commitment_signed(&counterparty_node_id, msg); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); } fn handle_commitment_signed_batch( @@ -15022,13 +15786,13 @@ where ) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let res = self.internal_commitment_signed_batch(&counterparty_node_id, channel_id, batch); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); } fn handle_revoke_and_ack(&self, counterparty_node_id: PublicKey, msg: &msgs::RevokeAndACK) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let res = self.internal_revoke_and_ack(&counterparty_node_id, msg); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); } fn handle_update_fee(&self, counterparty_node_id: PublicKey, msg: &msgs::UpdateFee) { @@ -15042,7 +15806,7 @@ where Err(_) => NotifyOption::SkipPersistHandleEvents, Ok(()) => NotifyOption::SkipPersistNoEvents, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } @@ -15052,13 +15816,13 @@ where ) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let res = self.internal_announcement_signatures(&counterparty_node_id, msg); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); } fn handle_channel_update(&self, counterparty_node_id: PublicKey, msg: &msgs::ChannelUpdate) { PersistenceNotifierGuard::optionally_notify(self, || { let res = self.internal_channel_update(&counterparty_node_id, msg); - if let Ok(persist) = handle_error!(self, res, counterparty_node_id) { + if let Ok(persist) = self.handle_error(res, counterparty_node_id) { persist } else { NotifyOption::DoPersist @@ -15071,7 +15835,7 @@ where ) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let res = self.internal_channel_reestablish(&counterparty_node_id, msg); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); } #[rustfmt::skip] @@ -15198,7 +15962,7 @@ where Err(_) => NotifyOption::DoPersist, Ok(persist) => *persist, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } @@ -15210,7 +15974,7 @@ where Err(_) => NotifyOption::DoPersist, Ok(persist) => *persist, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } @@ -15222,7 +15986,7 @@ where Err(_) => NotifyOption::DoPersist, Ok(persist) => *persist, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } @@ -15234,7 +15998,7 @@ where Err(_) => NotifyOption::DoPersist, Ok(persist) => *persist, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } @@ -15246,7 +16010,7 @@ where Err(_) => NotifyOption::DoPersist, Ok(persist) => *persist, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } @@ -15254,7 +16018,7 @@ where fn handle_tx_signatures(&self, counterparty_node_id: PublicKey, msg: &msgs::TxSignatures) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let res = self.internal_tx_signatures(&counterparty_node_id, msg); - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); } fn handle_tx_init_rbf(&self, counterparty_node_id: PublicKey, msg: &msgs::TxInitRbf) { @@ -15262,7 +16026,7 @@ where "Dual-funded channels not supported".to_owned(), msg.channel_id.clone(), )); - let _: Result<(), _> = handle_error!(self, err, counterparty_node_id); + let _: Result<(), _> = self.handle_error(err, counterparty_node_id); } fn handle_tx_ack_rbf(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAckRbf) { @@ -15270,7 +16034,7 @@ where "Dual-funded channels not supported".to_owned(), msg.channel_id.clone(), )); - let _: Result<(), _> = handle_error!(self, err, counterparty_node_id); + let _: Result<(), _> = self.handle_error(err, counterparty_node_id); } fn handle_tx_abort(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAbort) { @@ -15284,7 +16048,7 @@ where Err(_) => NotifyOption::SkipPersistHandleEvents, Ok(persist) => *persist, }; - let _ = handle_error!(self, res, counterparty_node_id); + let _ = self.handle_error(res, counterparty_node_id); persist }); } @@ -15310,26 +16074,16 @@ where } impl< - M: Deref, - T: Deref, - ES: Deref, - NS: Deref, - SP: Deref, - F: Deref, - R: Deref, - MR: Deref, - L: Deref, + M: chain::Watch, + T: BroadcasterInterface, + ES: EntropySource, + NS: NodeSigner, + SP: SignerProvider, + F: FeeEstimator, + R: Router, + MR: MessageRouter, + L: Logger, > OffersMessageHandler for ChannelManager -where - M::Target: chain::Watch<::EcdsaSigner>, - T::Target: BroadcasterInterface, - ES::Target: EntropySource, - NS::Target: NodeSigner, - SP::Target: SignerProvider, - F::Target: FeeEstimator, - R::Target: Router, - MR::Target: MessageRouter, - L::Target: Logger, { #[rustfmt::skip] fn handle_message( @@ -15470,8 +16224,8 @@ where Err(()) => return None, }; - let logger = WithContext::from( - &self.logger, None, None, Some(invoice.payment_hash()), + let logger = WithContext::for_payment( + &self.logger, None, None, Some(invoice.payment_hash()), payment_id, ); if self.config.read().unwrap().manually_handle_bolt12_invoices { @@ -15492,7 +16246,7 @@ where }, OffersMessage::StaticInvoice(invoice) => { let payment_id = match context { - Some(OffersContext::OutboundPayment { payment_id, .. }) => payment_id, + Some(OffersContext::OutboundPaymentForOffer { payment_id, .. }) => payment_id, _ => return None }; let res = self.initiate_async_payment(&invoice, payment_id); @@ -15508,7 +16262,8 @@ where log_trace!(logger, "Received invoice_error: {}", invoice_error); match context { - Some(OffersContext::OutboundPayment { payment_id, .. }) => { + Some(OffersContext::OutboundPaymentForOffer { payment_id, .. }) + |Some(OffersContext::OutboundPaymentForRefund { payment_id, .. }) => { self.abandon_payment_with_reason( payment_id, PaymentFailureReason::InvoiceRequestRejected, ); @@ -15527,26 +16282,16 @@ where } impl< - M: Deref, - T: Deref, - ES: Deref, - NS: Deref, - SP: Deref, - F: Deref, - R: Deref, - MR: Deref, - L: Deref, + M: chain::Watch, + T: BroadcasterInterface, + ES: EntropySource, + NS: NodeSigner, + SP: SignerProvider, + F: FeeEstimator, + R: Router, + MR: MessageRouter, + L: Logger, > AsyncPaymentsMessageHandler for ChannelManager -where - M::Target: chain::Watch<::EcdsaSigner>, - T::Target: BroadcasterInterface, - ES::Target: EntropySource, - NS::Target: NodeSigner, - SP::Target: SignerProvider, - F::Target: FeeEstimator, - R::Target: Router, - MR::Target: MessageRouter, - L::Target: Logger, { fn handle_offer_paths_request( &self, message: OfferPathsRequest, context: AsyncPaymentsContext, @@ -15574,8 +16319,8 @@ where responder.clone(), self.get_peers_for_blinded_path(), self.list_usable_channels(), - &*self.entropy_source, - &*self.router, + &self.entropy_source, + &self.router, ) { Some((msg, ctx)) => (msg, ctx), None => return None, @@ -15650,6 +16395,7 @@ where prev_outbound_scid_alias, htlc_id, } => { + let _serialize_guard = PersistenceNotifierGuard::notify_on_drop(self); // It's possible the release_held_htlc message raced ahead of us transitioning the pending // update_add to `Self::pending_intercept_htlcs`. If that's the case, update the pending // update_add to indicate that the HTLC should be released immediately. @@ -15688,16 +16434,18 @@ where }, } }; - match htlc.forward_info.routing { - PendingHTLCRouting::Forward { ref mut hold_htlc, .. } => { + let next_hop_scid = match htlc.forward_info.routing { + PendingHTLCRouting::Forward { ref mut hold_htlc, short_channel_id, .. } => { debug_assert!(hold_htlc.is_some()); *hold_htlc = None; + short_channel_id }, _ => { debug_assert!(false, "HTLC intercepts can only be forwards"); + // Let the HTLC be auto-failed before it expires. return; }, - } + }; let logger = WithContext::from( &self.logger, @@ -15707,16 +16455,56 @@ where ); log_trace!(logger, "Releasing held htlc with intercept_id {}", intercept_id); - let mut per_source_pending_forward = [( - htlc.prev_outbound_scid_alias, - htlc.prev_counterparty_node_id, - htlc.prev_funding_outpoint, - htlc.prev_channel_id, - htlc.prev_user_channel_id, - vec![(htlc.forward_info, htlc.prev_htlc_id)], - )]; - self.forward_htlcs(&mut per_source_pending_forward); - PersistenceNotifierGuard::notify_on_drop(self); + let should_intercept = self + .do_funded_channel_callback(next_hop_scid, |chan| { + self.forward_needs_intercept_to_known_chan(chan) + }) + .unwrap_or_else(|| self.forward_needs_intercept_to_unknown_chan(next_hop_scid)); + + if should_intercept { + let intercept_id = InterceptId::from_htlc_id_and_chan_id( + htlc.prev_htlc_id, + &htlc.prev_channel_id, + &htlc.prev_counterparty_node_id, + ); + let mut pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap(); + match pending_intercepts.entry(intercept_id) { + hash_map::Entry::Vacant(entry) => { + if let Ok(intercept_ev) = + create_htlc_intercepted_event(intercept_id, &htlc) + { + self.pending_events.lock().unwrap().push_back((intercept_ev, None)); + entry.insert(htlc); + } else { + debug_assert!(false); + // Let the HTLC be auto-failed before it expires. + return; + } + }, + hash_map::Entry::Occupied(_) => { + log_error!( + logger, + "Failed to forward incoming HTLC: detected duplicate intercepted payment", + ); + debug_assert!( + false, + "Should never have two HTLCs with the same channel id and htlc id", + ); + // Let the HTLC be auto-failed before it expires. + return; + }, + } + } else { + let mut per_source_pending_forward = [( + htlc.prev_outbound_scid_alias, + htlc.prev_counterparty_node_id, + htlc.prev_funding_outpoint, + htlc.prev_channel_id, + htlc.prev_user_channel_id, + vec![(htlc.forward_info, htlc.prev_htlc_id)], + )]; + self.forward_htlcs(&mut per_source_pending_forward); + } }, _ => return, } @@ -15729,26 +16517,16 @@ where #[cfg(feature = "dnssec")] impl< - M: Deref, - T: Deref, - ES: Deref, - NS: Deref, - SP: Deref, - F: Deref, - R: Deref, - MR: Deref, - L: Deref, + M: chain::Watch, + T: BroadcasterInterface, + ES: EntropySource, + NS: NodeSigner, + SP: SignerProvider, + F: FeeEstimator, + R: Router, + MR: MessageRouter, + L: Logger, > DNSResolverMessageHandler for ChannelManager -where - M::Target: chain::Watch<::EcdsaSigner>, - T::Target: BroadcasterInterface, - ES::Target: EntropySource, - NS::Target: NodeSigner, - SP::Target: SignerProvider, - F::Target: FeeEstimator, - R::Target: Router, - MR::Target: MessageRouter, - L::Target: Logger, { fn handle_dnssec_query( &self, _message: DNSSECQuery, _responder: Option, @@ -15797,26 +16575,16 @@ where } impl< - M: Deref, - T: Deref, - ES: Deref, - NS: Deref, - SP: Deref, - F: Deref, - R: Deref, - MR: Deref, - L: Deref, + M: chain::Watch, + T: BroadcasterInterface, + ES: EntropySource, + NS: NodeSigner, + SP: SignerProvider, + F: FeeEstimator, + R: Router, + MR: MessageRouter, + L: Logger, > NodeIdLookUp for ChannelManager -where - M::Target: chain::Watch<::EcdsaSigner>, - T::Target: BroadcasterInterface, - ES::Target: EntropySource, - NS::Target: NodeSigner, - SP::Target: SignerProvider, - F::Target: FeeEstimator, - R::Target: Router, - MR::Target: MessageRouter, - L::Target: Logger, { fn next_node_id(&self, short_channel_id: u64) -> Option { self.short_to_chan_info.read().unwrap().get(&short_channel_id).map(|(pubkey, _)| *pubkey) @@ -15905,6 +16673,17 @@ pub fn provided_init_features(config: &UserConfig) -> InitFeatures { const SERIALIZATION_VERSION: u8 = 1; const MIN_SERIALIZATION_VERSION: u8 = 1; +// We plan to start writing this version in 0.5. +// +// LDK 0.5+ will reconstruct the set of pending HTLCs from `Channel{Monitor}` data that started +// being written in 0.3, ignoring legacy `ChannelManager` HTLC maps on read and not writing them. +// LDK 0.5+ will automatically fail to read if the pending HTLC set cannot be reconstructed, i.e. +// if we were last written with pending HTLCs on 0.2- or if the new 0.3+ fields are missing. +// +// If 0.3 or 0.4 reads this manager version, it knows that the legacy maps were not written and +// acts accordingly. +const RECONSTRUCT_HTLCS_FROM_CHANS_VERSION: u8 = 2; + impl_writeable_tlv_based!(PhantomRouteHints, { (2, channels, required_vec), (4, phantom_scid, required), @@ -15963,6 +16742,7 @@ impl_writeable_tlv_based!(PendingHTLCInfo, { (8, outgoing_cltv_value, required), (9, incoming_amt_msat, option), (10, skimmed_fee_msat, option), + (11, incoming_accountable, (default_value, false)), }); impl Writeable for HTLCFailureMsg { @@ -16312,26 +17092,16 @@ impl_writeable_tlv_based!(PendingInboundPayment, { }); impl< - M: Deref, - T: Deref, - ES: Deref, - NS: Deref, - SP: Deref, - F: Deref, - R: Deref, - MR: Deref, - L: Deref, + M: chain::Watch, + T: BroadcasterInterface, + ES: EntropySource, + NS: NodeSigner, + SP: SignerProvider, + F: FeeEstimator, + R: Router, + MR: MessageRouter, + L: Logger, > Writeable for ChannelManager -where - M::Target: chain::Watch<::EcdsaSigner>, - T::Target: BroadcasterInterface, - ES::Target: EntropySource, - NS::Target: NodeSigner, - SP::Target: SignerProvider, - F::Target: FeeEstimator, - R::Target: Router, - MR::Target: MessageRouter, - L::Target: Logger, { #[rustfmt::skip] fn write(&self, writer: &mut W) -> Result<(), io::Error> { @@ -16444,6 +17214,7 @@ where } } + let our_pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap(); // Since some FundingNegotiation variants are not persisted, any splice in such state must // be failed upon reload. However, as the necessary information for the SpliceFailed event @@ -16541,7 +17312,6 @@ where } let mut pending_intercepted_htlcs = None; - let our_pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap(); if our_pending_intercepts.len() != 0 { pending_intercepted_htlcs = Some(our_pending_intercepts); } @@ -16637,99 +17407,468 @@ impl Readable for VecDeque<(Event, Option)> { } } -/// Arguments for the creation of a ChannelManager that are not deserialized. -/// -/// At a high-level, the process for deserializing a ChannelManager and resuming normal operation -/// is: -/// 1) Deserialize all stored [`ChannelMonitor`]s. -/// 2) Deserialize the [`ChannelManager`] by filling in this struct and calling: -/// `<(BlockHash, ChannelManager)>::read(reader, args)` -/// This may result in closing some channels if the [`ChannelMonitor`] is newer than the stored -/// [`ChannelManager`] state to ensure no loss of funds. Thus, transactions may be broadcasted. -/// 3) If you are not fetching full blocks, register all relevant [`ChannelMonitor`] outpoints the -/// same way you would handle a [`chain::Filter`] call using -/// [`ChannelMonitor::get_outputs_to_watch`] and [`ChannelMonitor::get_funding_txo`]. -/// 4) Disconnect/connect blocks on your [`ChannelMonitor`]s to get them in sync with the chain. -/// 5) Disconnect/connect blocks on the [`ChannelManager`] to get it in sync with the chain. -/// 6) Optionally re-persist the [`ChannelMonitor`]s to ensure the latest state is on disk. -/// This is important if you have replayed a nontrivial number of blocks in step (4), allowing -/// you to avoid having to replay the same blocks if you shut down quickly after startup. It is -/// otherwise not required. -/// -/// Note that if you're using a [`ChainMonitor`] for your [`chain::Watch`] implementation, you -/// will likely accomplish this as a side-effect of calling [`chain::Watch::watch_channel`] in -/// the next step. -/// -/// If you wish to avoid this for performance reasons, use -/// [`ChainMonitor::load_existing_monitor`]. -/// 7) Move the [`ChannelMonitor`]s into your local [`chain::Watch`]. If you're using a -/// [`ChainMonitor`], this is done by calling [`chain::Watch::watch_channel`]. -/// -/// Note that the ordering of #4-7 is not of importance, however all four must occur before you -/// call any other methods on the newly-deserialized [`ChannelManager`]. -/// -/// Note that because some channels may be closed during deserialization, it is critical that you -/// always deserialize only the latest version of a ChannelManager and ChannelMonitors available to -/// you. If you deserialize an old ChannelManager (during which force-closure transactions may be -/// broadcast), and then later deserialize a newer version of the same ChannelManager (which will -/// not force-close the same channels but consider them live), you may end up revoking a state for -/// which you've already broadcasted the transaction. -/// -/// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor -/// [`ChainMonitor::load_existing_monitor`]: crate::chain::chainmonitor::ChainMonitor::load_existing_monitor -pub struct ChannelManagerReadArgs< +// Raw deserialized data from a ChannelManager, before validation or reconstruction. +// This is an internal DTO used in the two-stage deserialization process. +pub(super) struct ChannelManagerData { + chain_hash: ChainHash, + best_block_height: u32, + best_block_hash: BlockHash, + channels: Vec>, + claimable_payments: HashMap, + peer_init_features: Vec<(PublicKey, InitFeatures)>, + pending_events_read: VecDeque<(events::Event, Option)>, + highest_seen_timestamp: u32, + pending_outbound_payments: HashMap, + pending_claiming_payments: HashMap, + received_network_pubkey: Option, + monitor_update_blocked_actions_per_peer: + Vec<(PublicKey, BTreeMap>)>, + fake_scid_rand_bytes: Option<[u8; 32]>, + probing_cookie_secret: Option<[u8; 32]>, + inbound_payment_id_secret: Option<[u8; 32]>, + in_flight_monitor_updates: HashMap<(PublicKey, ChannelId), Vec>, + peer_storage_dir: Vec<(PublicKey, Vec)>, + async_receive_offer_cache: AsyncReceiveOfferCache, + // Marked `_legacy` because in versions > 0.2 we are taking steps to remove the requirement of + // regularly persisting the `ChannelManager` and instead rebuild the set of HTLC forwards from + // `Channel{Monitor}` data. + forward_htlcs_legacy: HashMap>, + pending_intercepted_htlcs_legacy: HashMap, + decode_update_add_htlcs_legacy: HashMap>, + // The `ChannelManager` version that was written. + version: u8, +} + +/// Arguments for deserializing [`ChannelManagerData`]. +struct ChannelManagerDataReadArgs< 'a, - M: Deref, - T: Deref, - ES: Deref, - NS: Deref, - SP: Deref, - F: Deref, - R: Deref, - MR: Deref, - L: Deref + Clone, -> where - M::Target: chain::Watch<::EcdsaSigner>, - T::Target: BroadcasterInterface, - ES::Target: EntropySource, - NS::Target: NodeSigner, - SP::Target: SignerProvider, - F::Target: FeeEstimator, - R::Target: Router, - MR::Target: MessageRouter, - L::Target: Logger, + ES: EntropySource, + NS: NodeSigner, + SP: SignerProvider, + L: Logger, +> { + entropy_source: &'a ES, + node_signer: &'a NS, + signer_provider: &'a SP, + config: UserConfig, + logger: &'a L, +} + +impl<'a, ES: EntropySource, NS: NodeSigner, SP: SignerProvider, L: Logger> + ReadableArgs> for ChannelManagerData { - /// A cryptographically secure source of entropy. - pub entropy_source: ES, + fn read( + reader: &mut R, args: ChannelManagerDataReadArgs<'a, ES, NS, SP, L>, + ) -> Result { + let version = read_ver_prefix!(reader, SERIALIZATION_VERSION); - /// A signer that is able to perform node-scoped cryptographic operations. - pub node_signer: NS, + let chain_hash: ChainHash = Readable::read(reader)?; + let best_block_height: u32 = Readable::read(reader)?; + let best_block_hash: BlockHash = Readable::read(reader)?; - /// The keys provider which will give us relevant keys. Some keys will be loaded during - /// deserialization and [`SignerProvider::derive_channel_signer`] will be used to derive - /// per-Channel signing data. - pub signer_provider: SP, + const MAX_ALLOC_SIZE: usize = 1024 * 64; - /// The fee_estimator for use in the ChannelManager in the future. - /// - /// No calls to the FeeEstimator will be made during deserialization. - pub fee_estimator: F, - /// The chain::Watch for use in the ChannelManager in the future. - /// - /// No calls to the chain::Watch will be made during deserialization. It is assumed that - /// you have deserialized ChannelMonitors separately and will add them to your - /// chain::Watch after deserializing this ChannelManager. - pub chain_monitor: M, + let channel_count: u64 = Readable::read(reader)?; + let mut channels = Vec::with_capacity(cmp::min(channel_count as usize, 128)); + for _ in 0..channel_count { + let channel: FundedChannel = FundedChannel::read( + reader, + ( + args.entropy_source, + args.signer_provider, + &provided_channel_type_features(&args.config), + ), + )?; + channels.push(channel); + } + + let forward_htlcs_legacy: HashMap> = + if version < RECONSTRUCT_HTLCS_FROM_CHANS_VERSION { + let forward_htlcs_count: u64 = Readable::read(reader)?; + let mut fwds = hash_map_with_capacity(cmp::min(forward_htlcs_count as usize, 128)); + for _ in 0..forward_htlcs_count { + let short_channel_id = Readable::read(reader)?; + let pending_forwards_count: u64 = Readable::read(reader)?; + let mut pending_forwards = Vec::with_capacity(cmp::min( + pending_forwards_count as usize, + MAX_ALLOC_SIZE / mem::size_of::(), + )); + for _ in 0..pending_forwards_count { + pending_forwards.push(Readable::read(reader)?); + } + fwds.insert(short_channel_id, pending_forwards); + } + fwds + } else { + new_hash_map() + }; - /// The BroadcasterInterface which will be used in the ChannelManager in the future and may be - /// used to broadcast the latest local commitment transactions of channels which must be - /// force-closed during deserialization. - pub tx_broadcaster: T, - /// The router which will be used in the ChannelManager in the future for finding routes - /// on-the-fly for trampoline payments. Absent in private nodes that don't support forwarding. - /// - /// No calls to the router will be made during deserialization. - pub router: R, + let claimable_htlcs_count: u64 = Readable::read(reader)?; + let mut claimable_htlcs_list = + Vec::with_capacity(cmp::min(claimable_htlcs_count as usize, 128)); + for _ in 0..claimable_htlcs_count { + let payment_hash = Readable::read(reader)?; + let previous_hops_len: u64 = Readable::read(reader)?; + let mut previous_hops = Vec::with_capacity(cmp::min( + previous_hops_len as usize, + MAX_ALLOC_SIZE / mem::size_of::(), + )); + for _ in 0..previous_hops_len { + previous_hops.push(::read(reader)?); + } + claimable_htlcs_list.push((payment_hash, previous_hops)); + } + + let peer_count: u64 = Readable::read(reader)?; + let mut peer_init_features = Vec::with_capacity(cmp::min(peer_count as usize, 128)); + for _ in 0..peer_count { + let peer_pubkey: PublicKey = Readable::read(reader)?; + let latest_features = Readable::read(reader)?; + peer_init_features.push((peer_pubkey, latest_features)); + } + + let event_count: u64 = Readable::read(reader)?; + let mut pending_events_read: VecDeque<(events::Event, Option)> = + VecDeque::with_capacity(cmp::min( + event_count as usize, + MAX_ALLOC_SIZE / mem::size_of::<(events::Event, Option)>(), + )); + for _ in 0..event_count { + match MaybeReadable::read(reader)? { + Some(event) => pending_events_read.push_back((event, None)), + None => continue, + } + } + + let background_event_count: u64 = Readable::read(reader)?; + for _ in 0..background_event_count { + match ::read(reader)? { + 0 => { + // LDK versions prior to 0.0.116 wrote pending `MonitorUpdateRegeneratedOnStartup`s here, + // however we really don't (and never did) need them - we regenerate all + // on-startup monitor updates. + let _: OutPoint = Readable::read(reader)?; + let _: ChannelMonitorUpdate = Readable::read(reader)?; + }, + _ => return Err(DecodeError::InvalidValue), + } + } + + let _last_node_announcement_serial: u32 = Readable::read(reader)?; // Only used < 0.0.111 + let highest_seen_timestamp: u32 = Readable::read(reader)?; + + // The last version where a pending inbound payment may have been added was 0.0.116. + let pending_inbound_payment_count: u64 = Readable::read(reader)?; + for _ in 0..pending_inbound_payment_count { + let payment_hash: PaymentHash = Readable::read(reader)?; + let logger = WithContext::from(args.logger, None, None, Some(payment_hash)); + let inbound: PendingInboundPayment = Readable::read(reader)?; + log_warn!( + logger, + "Ignoring deprecated pending inbound payment with payment hash {}: {:?}", + payment_hash, + inbound + ); + } + + let pending_outbound_payments_count_compat: u64 = Readable::read(reader)?; + let mut pending_outbound_payments_compat: HashMap = + hash_map_with_capacity(cmp::min( + pending_outbound_payments_count_compat as usize, + MAX_ALLOC_SIZE / 32, + )); + for _ in 0..pending_outbound_payments_count_compat { + let session_priv = Readable::read(reader)?; + let payment = PendingOutboundPayment::Legacy { + session_privs: hash_set_from_iter([session_priv]), + }; + if pending_outbound_payments_compat.insert(PaymentId(session_priv), payment).is_some() { + return Err(DecodeError::InvalidValue); + }; + } + + let mut pending_intercepted_htlcs_legacy: Option> = + None; + let mut decode_update_add_htlcs_legacy: Option>> = + None; + // pending_outbound_payments_no_retry is for compatibility with 0.0.101 clients. + let mut pending_outbound_payments_no_retry: Option>> = + None; + let mut pending_outbound_payments = None; + let mut received_network_pubkey: Option = None; + let mut fake_scid_rand_bytes: Option<[u8; 32]> = None; + let mut probing_cookie_secret: Option<[u8; 32]> = None; + let mut claimable_htlc_purposes = None; + let mut claimable_htlc_onion_fields = None; + let mut pending_claiming_payments = None; + let mut monitor_update_blocked_actions_per_peer: Option>)>> = + None; + let mut events_override = None; + let mut legacy_in_flight_monitor_updates: Option< + HashMap<(PublicKey, OutPoint), Vec>, + > = None; + // We use this one over the legacy since they represent the same data, just with a different + // key. We still need to read the legacy one as it's an even TLV. + let mut in_flight_monitor_updates: Option< + HashMap<(PublicKey, ChannelId), Vec>, + > = None; + let mut inbound_payment_id_secret = None; + let mut peer_storage_dir: Option)>> = None; + let mut async_receive_offer_cache: AsyncReceiveOfferCache = AsyncReceiveOfferCache::new(); + read_tlv_fields!(reader, { + (1, pending_outbound_payments_no_retry, option), + (2, pending_intercepted_htlcs_legacy, option), + (3, pending_outbound_payments, option), + (4, pending_claiming_payments, option), + (5, received_network_pubkey, option), + (6, monitor_update_blocked_actions_per_peer, option), + (7, fake_scid_rand_bytes, option), + (8, events_override, option), + (9, claimable_htlc_purposes, optional_vec), + (10, legacy_in_flight_monitor_updates, option), + (11, probing_cookie_secret, option), + (13, claimable_htlc_onion_fields, optional_vec), + (14, decode_update_add_htlcs_legacy, option), + (15, inbound_payment_id_secret, option), + (17, in_flight_monitor_updates, option), + (19, peer_storage_dir, optional_vec), + (21, async_receive_offer_cache, (default_value, async_receive_offer_cache)), + }); + + // Merge legacy pending_outbound_payments fields into a single HashMap. + // Priority: pending_outbound_payments (TLV 3) > pending_outbound_payments_no_retry (TLV 1) + // > pending_outbound_payments_compat (non-TLV legacy) + let pending_outbound_payments = pending_outbound_payments + .or_else(|| { + pending_outbound_payments_no_retry.map(|no_retry| { + no_retry + .into_iter() + .map(|(id, session_privs)| { + (id, PendingOutboundPayment::Legacy { session_privs }) + }) + .collect() + }) + }) + .unwrap_or(pending_outbound_payments_compat); + + // Merge legacy in-flight monitor updates (keyed by OutPoint) into the new format (keyed by + // ChannelId). + if let Some(legacy_in_flight_upds) = legacy_in_flight_monitor_updates { + // We should never serialize an empty map. + if legacy_in_flight_upds.is_empty() { + return Err(DecodeError::InvalidValue); + } + match &in_flight_monitor_updates { + None => { + // Convert legacy format (OutPoint) to new format (ChannelId). + // All channels with legacy in flight monitor updates are v1 channels. + in_flight_monitor_updates = Some( + legacy_in_flight_upds + .into_iter() + .map(|((counterparty_node_id, funding_txo), updates)| { + let channel_id = ChannelId::v1_from_funding_outpoint(funding_txo); + ((counterparty_node_id, channel_id), updates) + }) + .collect(), + ); + }, + Some(upds) if upds.is_empty() => { + // Both TLVs present but new one is empty - invalid. + return Err(DecodeError::InvalidValue); + }, + Some(_) => {}, // New format takes precedence, nothing to do. + } + } + + // Resolve events_override: if present, it replaces pending_events. + let pending_events_read = events_override.unwrap_or(pending_events_read); + + // Combine claimable_htlcs_list with their purposes and onion fields. For very old data + // (pre-0.0.107) that lacks purposes, reconstruct them from legacy hop data. + let expanded_inbound_key = args.node_signer.get_expanded_key(); + + let mut claimable_payments = hash_map_with_capacity(claimable_htlcs_list.len()); + if let Some(purposes) = claimable_htlc_purposes { + if purposes.len() != claimable_htlcs_list.len() { + return Err(DecodeError::InvalidValue); + } + if let Some(onion_fields) = claimable_htlc_onion_fields { + if onion_fields.len() != claimable_htlcs_list.len() { + return Err(DecodeError::InvalidValue); + } + for (purpose, (onion, (payment_hash, htlcs))) in purposes + .into_iter() + .zip(onion_fields.into_iter().zip(claimable_htlcs_list.into_iter())) + { + let claimable = ClaimablePayment { purpose, htlcs, onion_fields: onion }; + let existing_payment = claimable_payments.insert(payment_hash, claimable); + if existing_payment.is_some() { + return Err(DecodeError::InvalidValue); + } + } + } else { + for (purpose, (payment_hash, htlcs)) in + purposes.into_iter().zip(claimable_htlcs_list.into_iter()) + { + let claimable = ClaimablePayment { purpose, htlcs, onion_fields: None }; + let existing_payment = claimable_payments.insert(payment_hash, claimable); + if existing_payment.is_some() { + return Err(DecodeError::InvalidValue); + } + } + } + } else { + // LDK versions prior to 0.0.107 did not write a `pending_htlc_purposes`, but do + // include a `_legacy_hop_data` in the `OnionPayload`. + for (payment_hash, htlcs) in claimable_htlcs_list.into_iter() { + if htlcs.is_empty() { + return Err(DecodeError::InvalidValue); + } + let purpose = match &htlcs[0].onion_payload { + OnionPayload::Invoice { _legacy_hop_data } => { + if let Some(hop_data) = _legacy_hop_data { + events::PaymentPurpose::Bolt11InvoicePayment { + payment_preimage: match inbound_payment::verify( + payment_hash, + &hop_data, + 0, + &expanded_inbound_key, + &args.logger, + ) { + Ok((payment_preimage, _)) => payment_preimage, + Err(()) => { + log_error!(args.logger, "Failed to read claimable payment data for HTLC with payment hash {} - was not a pending inbound payment and didn't match our payment key", &payment_hash); + return Err(DecodeError::InvalidValue); + }, + }, + payment_secret: hop_data.payment_secret, + } + } else { + return Err(DecodeError::InvalidValue); + } + }, + OnionPayload::Spontaneous(payment_preimage) => { + events::PaymentPurpose::SpontaneousPayment(*payment_preimage) + }, + }; + claimable_payments + .insert(payment_hash, ClaimablePayment { purpose, htlcs, onion_fields: None }); + } + } + + Ok(ChannelManagerData { + chain_hash, + best_block_height, + best_block_hash, + channels, + forward_htlcs_legacy, + claimable_payments, + peer_init_features, + pending_events_read, + highest_seen_timestamp, + pending_intercepted_htlcs_legacy: pending_intercepted_htlcs_legacy + .unwrap_or_else(new_hash_map), + pending_outbound_payments, + pending_claiming_payments: pending_claiming_payments.unwrap_or_else(new_hash_map), + received_network_pubkey, + monitor_update_blocked_actions_per_peer: monitor_update_blocked_actions_per_peer + .unwrap_or_else(Vec::new), + fake_scid_rand_bytes, + probing_cookie_secret, + decode_update_add_htlcs_legacy: decode_update_add_htlcs_legacy + .unwrap_or_else(new_hash_map), + inbound_payment_id_secret, + in_flight_monitor_updates: in_flight_monitor_updates.unwrap_or_default(), + peer_storage_dir: peer_storage_dir.unwrap_or_default(), + async_receive_offer_cache, + version, + }) + } +} + +/// Arguments for the creation of a ChannelManager that are not deserialized. +/// +/// At a high-level, the process for deserializing a ChannelManager and resuming normal operation +/// is: +/// 1) Deserialize all stored [`ChannelMonitor`]s. +/// 2) Deserialize the [`ChannelManager`] by filling in this struct and calling: +/// `<(BlockHash, ChannelManager)>::read(reader, args)` +/// This may result in closing some channels if the [`ChannelMonitor`] is newer than the stored +/// [`ChannelManager`] state to ensure no loss of funds. Thus, transactions may be broadcasted. +/// 3) If you are not fetching full blocks, register all relevant [`ChannelMonitor`] outpoints the +/// same way you would handle a [`chain::Filter`] call using +/// [`ChannelMonitor::get_outputs_to_watch`] and [`ChannelMonitor::get_funding_txo`]. +/// 4) Disconnect/connect blocks on your [`ChannelMonitor`]s to get them in sync with the chain. +/// 5) Disconnect/connect blocks on the [`ChannelManager`] to get it in sync with the chain. +/// 6) Optionally re-persist the [`ChannelMonitor`]s to ensure the latest state is on disk. +/// This is important if you have replayed a nontrivial number of blocks in step (4), allowing +/// you to avoid having to replay the same blocks if you shut down quickly after startup. It is +/// otherwise not required. +/// +/// Note that if you're using a [`ChainMonitor`] for your [`chain::Watch`] implementation, you +/// will likely accomplish this as a side-effect of calling [`chain::Watch::watch_channel`] in +/// the next step. +/// +/// If you wish to avoid this for performance reasons, use +/// [`ChainMonitor::load_existing_monitor`]. +/// 7) Move the [`ChannelMonitor`]s into your local [`chain::Watch`]. If you're using a +/// [`ChainMonitor`], this is done by calling [`chain::Watch::watch_channel`]. +/// +/// Note that the ordering of #4-7 is not of importance, however all four must occur before you +/// call any other methods on the newly-deserialized [`ChannelManager`]. +/// +/// Note that because some channels may be closed during deserialization, it is critical that you +/// always deserialize only the latest version of a ChannelManager and ChannelMonitors available to +/// you. If you deserialize an old ChannelManager (during which force-closure transactions may be +/// broadcast), and then later deserialize a newer version of the same ChannelManager (which will +/// not force-close the same channels but consider them live), you may end up revoking a state for +/// which you've already broadcasted the transaction. +/// +/// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor +/// [`ChainMonitor::load_existing_monitor`]: crate::chain::chainmonitor::ChainMonitor::load_existing_monitor +pub struct ChannelManagerReadArgs< + 'a, + M: chain::Watch, + T: BroadcasterInterface, + ES: EntropySource, + NS: NodeSigner, + SP: SignerProvider, + F: FeeEstimator, + R: Router, + MR: MessageRouter, + L: Logger + Clone, +> { + /// A cryptographically secure source of entropy. + pub entropy_source: ES, + + /// A signer that is able to perform node-scoped cryptographic operations. + pub node_signer: NS, + + /// The keys provider which will give us relevant keys. Some keys will be loaded during + /// deserialization and [`SignerProvider::derive_channel_signer`] will be used to derive + /// per-Channel signing data. + pub signer_provider: SP, + + /// The fee_estimator for use in the ChannelManager in the future. + /// + /// No calls to the FeeEstimator will be made during deserialization. + pub fee_estimator: F, + /// The chain::Watch for use in the ChannelManager in the future. + /// + /// No calls to the chain::Watch will be made during deserialization. It is assumed that + /// you have deserialized ChannelMonitors separately and will add them to your + /// chain::Watch after deserializing this ChannelManager. + pub chain_monitor: M, + + /// The BroadcasterInterface which will be used in the ChannelManager in the future and may be + /// used to broadcast the latest local commitment transactions of channels which must be + /// force-closed during deserialization. + pub tx_broadcaster: T, + /// The router which will be used in the ChannelManager in the future for finding routes + /// on-the-fly for trampoline payments. Absent in private nodes that don't support forwarding. + /// + /// No calls to the router will be made during deserialization. + pub router: R, /// The [`MessageRouter`] used for constructing [`BlindedMessagePath`]s for [`Offer`]s, /// [`Refund`]s, and any reply paths. /// @@ -16753,32 +17892,30 @@ pub struct ChannelManagerReadArgs< /// this struct. /// /// This is not exported to bindings users because we have no HashMap bindings - pub channel_monitors: - HashMap::EcdsaSigner>>, + pub channel_monitors: HashMap>, + + /// Whether the `ChannelManager` should attempt to reconstruct its set of pending HTLCs from + /// `Channel{Monitor}` data rather than its own persisted maps, which is planned to become + /// the default behavior in upcoming versions. + /// + /// If `None`, whether we reconstruct or use the legacy maps will be decided randomly during + /// `ChannelManager::from_channel_manager_data`. + #[cfg(test)] + pub reconstruct_manager_from_monitors: Option, } impl< 'a, - M: Deref, - T: Deref, - ES: Deref, - NS: Deref, - SP: Deref, - F: Deref, - R: Deref, - MR: Deref, - L: Deref + Clone, + M: chain::Watch, + T: BroadcasterInterface, + ES: EntropySource, + NS: NodeSigner, + SP: SignerProvider, + F: FeeEstimator, + R: Router, + MR: MessageRouter, + L: Logger + Clone, > ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L> -where - M::Target: chain::Watch<::EcdsaSigner>, - T::Target: BroadcasterInterface, - ES::Target: EntropySource, - NS::Target: NodeSigner, - SP::Target: SignerProvider, - F::Target: FeeEstimator, - R::Target: Router, - MR::Target: MessageRouter, - L::Target: Logger, { /// Simple utility function to create a ChannelManagerReadArgs which creates the monitor /// HashMap for you. This is primarily useful for C bindings where it is not practical to @@ -16786,8 +17923,7 @@ where pub fn new( entropy_source: ES, node_signer: NS, signer_provider: SP, fee_estimator: F, chain_monitor: M, tx_broadcaster: T, router: R, message_router: MR, logger: L, - config: UserConfig, - mut channel_monitors: Vec<&'a ChannelMonitor<::EcdsaSigner>>, + config: UserConfig, mut channel_monitors: Vec<&'a ChannelMonitor>, ) -> Self { Self { entropy_source, @@ -16803,35 +17939,61 @@ where channel_monitors: hash_map_from_iter( channel_monitors.drain(..).map(|monitor| (monitor.channel_id(), monitor)), ), + #[cfg(test)] + reconstruct_manager_from_monitors: None, } } } +// If the HTLC corresponding to `prev_hop_data` is present in `decode_update_add_htlcs`, remove it +// from the map as it is already being stored and processed elsewhere. +fn dedup_decode_update_add_htlcs( + decode_update_add_htlcs: &mut HashMap>, + prev_hop_data: &HTLCPreviousHopData, removal_reason: &'static str, logger: &L, +) { + match decode_update_add_htlcs.entry(prev_hop_data.prev_outbound_scid_alias) { + hash_map::Entry::Occupied(mut update_add_htlcs) => { + update_add_htlcs.get_mut().retain(|update_add| { + let matches = update_add.htlc_id == prev_hop_data.htlc_id; + if matches { + let logger = WithContext::from( + logger, + prev_hop_data.counterparty_node_id, + Some(update_add.channel_id), + Some(update_add.payment_hash), + ); + log_info!( + logger, + "Removing pending to-decode HTLC with id {}: {}", + update_add.htlc_id, + removal_reason + ); + } + !matches + }); + if update_add_htlcs.get().is_empty() { + update_add_htlcs.remove(); + } + }, + _ => {}, + } +} + // Implement ReadableArgs for an Arc'd ChannelManager to make it a bit easier to work with the // SipmleArcChannelManager type: impl< 'a, - M: Deref, - T: Deref, - ES: Deref, - NS: Deref, - SP: Deref, - F: Deref, - R: Deref, - MR: Deref, - L: Deref + Clone, + M: chain::Watch, + T: BroadcasterInterface, + ES: EntropySource, + NS: NodeSigner, + SP: SignerProvider, + F: FeeEstimator, + R: Router, + MR: MessageRouter, + L: Logger + Clone, > ReadableArgs> for (BlockHash, Arc>) -where - M::Target: chain::Watch<::EcdsaSigner>, - T::Target: BroadcasterInterface, - ES::Target: EntropySource, - NS::Target: NodeSigner, - SP::Target: SignerProvider, - F::Target: FeeEstimator, - R::Target: Router, - MR::Target: MessageRouter, - L::Target: Logger, { fn read( reader: &mut Reader, args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>, @@ -16844,36 +18006,86 @@ where impl< 'a, - M: Deref, - T: Deref, - ES: Deref, - NS: Deref, - SP: Deref, - F: Deref, - R: Deref, - MR: Deref, - L: Deref + Clone, + M: chain::Watch, + T: BroadcasterInterface, + ES: EntropySource, + NS: NodeSigner, + SP: SignerProvider, + F: FeeEstimator, + R: Router, + MR: MessageRouter, + L: Logger + Clone, > ReadableArgs> for (BlockHash, ChannelManager) -where - M::Target: chain::Watch<::EcdsaSigner>, - T::Target: BroadcasterInterface, - ES::Target: EntropySource, - NS::Target: NodeSigner, - SP::Target: SignerProvider, - F::Target: FeeEstimator, - R::Target: Router, - MR::Target: MessageRouter, - L::Target: Logger, { fn read( - reader: &mut Reader, mut args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>, + reader: &mut Reader, args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>, ) -> Result { - let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION); + // Stage 1: Pure deserialization into DTO + let data: ChannelManagerData = ChannelManagerData::read( + reader, + ChannelManagerDataReadArgs { + entropy_source: &args.entropy_source, + node_signer: &args.node_signer, + signer_provider: &args.signer_provider, + config: args.config.clone(), + logger: &args.logger, + }, + )?; - let chain_hash: ChainHash = Readable::read(reader)?; - let best_block_height: u32 = Readable::read(reader)?; - let best_block_hash: BlockHash = Readable::read(reader)?; + // Stage 2: Validation and reconstruction + ChannelManager::from_channel_manager_data(data, args) + } +} + +impl< + M: chain::Watch, + T: BroadcasterInterface, + ES: EntropySource, + NS: NodeSigner, + SP: SignerProvider, + F: FeeEstimator, + R: Router, + MR: MessageRouter, + L: Logger + Clone, + > ChannelManager +{ + /// Constructs a `ChannelManager` from deserialized data and runtime dependencies. + /// + /// This is the second stage of deserialization, taking the raw [`ChannelManagerData`] and combining it with the + /// provided [`ChannelManagerReadArgs`] to produce a fully functional `ChannelManager`. + /// + /// This method performs validation, reconciliation with [`ChannelMonitor`]s, and reconstruction of internal state. + /// It may close channels if monitors are ahead of the serialized state, and will replay any pending + /// [`ChannelMonitorUpdate`]s. + pub(super) fn from_channel_manager_data( + data: ChannelManagerData, + mut args: ChannelManagerReadArgs<'_, M, T, ES, NS, SP, F, R, MR, L>, + ) -> Result<(BlockHash, Self), DecodeError> { + let ChannelManagerData { + chain_hash, + best_block_height, + best_block_hash, + channels, + mut forward_htlcs_legacy, + claimable_payments, + peer_init_features, + mut pending_events_read, + highest_seen_timestamp, + mut pending_intercepted_htlcs_legacy, + pending_outbound_payments, + pending_claiming_payments, + received_network_pubkey, + monitor_update_blocked_actions_per_peer, + mut fake_scid_rand_bytes, + mut probing_cookie_secret, + mut decode_update_add_htlcs_legacy, + mut inbound_payment_id_secret, + mut in_flight_monitor_updates, + peer_storage_dir, + async_receive_offer_cache, + version: _version, + } = data; let empty_peer_state = || PeerState { channel_by_id: new_hash_map(), @@ -16888,25 +18100,18 @@ where is_connected: false, }; + const MAX_ALLOC_SIZE: usize = 1024 * 64; let mut failed_htlcs = Vec::new(); - let channel_count: u64 = Readable::read(reader)?; - let mut channel_id_set = hash_set_with_capacity(cmp::min(channel_count as usize, 128)); + let channel_count = channels.len(); + let mut channel_id_set = hash_set_with_capacity(cmp::min(channel_count, 128)); let mut per_peer_state = hash_map_with_capacity(cmp::min( - channel_count as usize, + channel_count, MAX_ALLOC_SIZE / mem::size_of::<(PublicKey, Mutex>)>(), )); - let mut short_to_chan_info = hash_map_with_capacity(cmp::min(channel_count as usize, 128)); + let mut short_to_chan_info = hash_map_with_capacity(cmp::min(channel_count, 128)); let mut channel_closures = VecDeque::new(); let mut close_background_events = Vec::new(); - for _ in 0..channel_count { - let mut channel: FundedChannel = FundedChannel::read( - reader, - ( - &args.entropy_source, - &args.signer_provider, - &provided_channel_type_features(&args.config), - ), - )?; + for mut channel in channels { let logger = WithChannelContext::from(&args.logger, &channel.context, None); let channel_id = channel.context.channel_id(); channel_id_set.insert(channel_id); @@ -17132,10 +18337,6 @@ where let logger = WithChannelMonitor::from(&args.logger, monitor, None); let channel_id = monitor.channel_id(); - log_info!( - logger, - "Queueing monitor update to ensure missing channel is force closed", - ); let monitor_update = ChannelMonitorUpdate { update_id: monitor.get_latest_update_id().saturating_add(1), updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { @@ -17143,6 +18344,11 @@ where }], channel_id: Some(monitor.channel_id()), }; + log_info!( + logger, + "Queueing monitor update {} to ensure missing channel is force closed", + monitor_update.update_id + ); let funding_txo = monitor.get_funding_txo(); let update = BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, @@ -17154,155 +18360,15 @@ where } } - const MAX_ALLOC_SIZE: usize = 1024 * 64; - let forward_htlcs_count: u64 = Readable::read(reader)?; - let mut forward_htlcs = hash_map_with_capacity(cmp::min(forward_htlcs_count as usize, 128)); - for _ in 0..forward_htlcs_count { - let short_channel_id = Readable::read(reader)?; - let pending_forwards_count: u64 = Readable::read(reader)?; - let mut pending_forwards = Vec::with_capacity(cmp::min( - pending_forwards_count as usize, - MAX_ALLOC_SIZE / mem::size_of::(), - )); - for _ in 0..pending_forwards_count { - pending_forwards.push(Readable::read(reader)?); - } - forward_htlcs.insert(short_channel_id, pending_forwards); - } - - let claimable_htlcs_count: u64 = Readable::read(reader)?; - let mut claimable_htlcs_list = - Vec::with_capacity(cmp::min(claimable_htlcs_count as usize, 128)); - for _ in 0..claimable_htlcs_count { - let payment_hash = Readable::read(reader)?; - let previous_hops_len: u64 = Readable::read(reader)?; - let mut previous_hops = Vec::with_capacity(cmp::min( - previous_hops_len as usize, - MAX_ALLOC_SIZE / mem::size_of::(), - )); - for _ in 0..previous_hops_len { - previous_hops.push(::read(reader)?); - } - claimable_htlcs_list.push((payment_hash, previous_hops)); - } - - let peer_count: u64 = Readable::read(reader)?; - for _ in 0..peer_count { - let peer_pubkey: PublicKey = Readable::read(reader)?; - let latest_features = Readable::read(reader)?; + // Apply peer features from deserialized data + for (peer_pubkey, latest_features) in peer_init_features { if let Some(peer_state) = per_peer_state.get_mut(&peer_pubkey) { peer_state.get_mut().unwrap().latest_features = latest_features; } } - let event_count: u64 = Readable::read(reader)?; - let mut pending_events_read: VecDeque<(events::Event, Option)> = - VecDeque::with_capacity(cmp::min( - event_count as usize, - MAX_ALLOC_SIZE / mem::size_of::<(events::Event, Option)>(), - )); - for _ in 0..event_count { - match MaybeReadable::read(reader)? { - Some(event) => pending_events_read.push_back((event, None)), - None => continue, - } - } - - let background_event_count: u64 = Readable::read(reader)?; - for _ in 0..background_event_count { - match ::read(reader)? { - 0 => { - // LDK versions prior to 0.0.116 wrote pending `MonitorUpdateRegeneratedOnStartup`s here, - // however we really don't (and never did) need them - we regenerate all - // on-startup monitor updates. - let _: OutPoint = Readable::read(reader)?; - let _: ChannelMonitorUpdate = Readable::read(reader)?; - }, - _ => return Err(DecodeError::InvalidValue), - } - } - - let _last_node_announcement_serial: u32 = Readable::read(reader)?; // Only used < 0.0.111 - let highest_seen_timestamp: u32 = Readable::read(reader)?; - - // The last version where a pending inbound payment may have been added was 0.0.116. - let pending_inbound_payment_count: u64 = Readable::read(reader)?; - for _ in 0..pending_inbound_payment_count { - let payment_hash: PaymentHash = Readable::read(reader)?; - let logger = WithContext::from(&args.logger, None, None, Some(payment_hash)); - let inbound: PendingInboundPayment = Readable::read(reader)?; - log_warn!( - logger, - "Ignoring deprecated pending inbound payment with payment hash {}: {:?}", - payment_hash, - inbound - ); - } - - let pending_outbound_payments_count_compat: u64 = Readable::read(reader)?; - let mut pending_outbound_payments_compat: HashMap = - hash_map_with_capacity(cmp::min( - pending_outbound_payments_count_compat as usize, - MAX_ALLOC_SIZE / 32, - )); - for _ in 0..pending_outbound_payments_count_compat { - let session_priv = Readable::read(reader)?; - let payment = PendingOutboundPayment::Legacy { - session_privs: hash_set_from_iter([session_priv]), - }; - if pending_outbound_payments_compat.insert(PaymentId(session_priv), payment).is_some() { - return Err(DecodeError::InvalidValue); - }; - } - - // pending_outbound_payments_no_retry is for compatibility with 0.0.101 clients. - let mut pending_outbound_payments_no_retry: Option>> = - None; - let mut pending_outbound_payments = None; - let mut pending_intercepted_htlcs: Option> = - Some(new_hash_map()); - let mut received_network_pubkey: Option = None; - let mut fake_scid_rand_bytes: Option<[u8; 32]> = None; - let mut probing_cookie_secret: Option<[u8; 32]> = None; - let mut claimable_htlc_purposes = None; - let mut claimable_htlc_onion_fields = None; - let mut pending_claiming_payments = Some(new_hash_map()); - let mut monitor_update_blocked_actions_per_peer: Option>)>> = - Some(Vec::new()); - let mut events_override = None; - let mut legacy_in_flight_monitor_updates: Option< - HashMap<(PublicKey, OutPoint), Vec>, - > = None; - // We use this one over the legacy since they represent the same data, just with a different - // key. We still need to read the legacy one as it's an even TLV. - let mut in_flight_monitor_updates: Option< - HashMap<(PublicKey, ChannelId), Vec>, - > = None; - let mut decode_update_add_htlcs: Option>> = None; - let mut inbound_payment_id_secret = None; - let mut peer_storage_dir: Option)>> = None; - let mut async_receive_offer_cache: AsyncReceiveOfferCache = AsyncReceiveOfferCache::new(); - read_tlv_fields!(reader, { - (1, pending_outbound_payments_no_retry, option), - (2, pending_intercepted_htlcs, option), - (3, pending_outbound_payments, option), - (4, pending_claiming_payments, option), - (5, received_network_pubkey, option), - (6, monitor_update_blocked_actions_per_peer, option), - (7, fake_scid_rand_bytes, option), - (8, events_override, option), - (9, claimable_htlc_purposes, optional_vec), - (10, legacy_in_flight_monitor_updates, option), - (11, probing_cookie_secret, option), - (13, claimable_htlc_onion_fields, optional_vec), - (14, decode_update_add_htlcs, option), - (15, inbound_payment_id_secret, option), - (17, in_flight_monitor_updates, option), - (19, peer_storage_dir, optional_vec), - (21, async_receive_offer_cache, (default_value, async_receive_offer_cache)), - }); - let mut decode_update_add_htlcs = decode_update_add_htlcs.unwrap_or_else(|| new_hash_map()); - let peer_storage_dir: Vec<(PublicKey, Vec)> = peer_storage_dir.unwrap_or_else(Vec::new); + // Post-deserialization processing + let mut decode_update_add_htlcs: HashMap> = new_hash_map(); if fake_scid_rand_bytes.is_none() { fake_scid_rand_bytes = Some(args.entropy_source.get_secure_random_bytes()); } @@ -17315,25 +18381,11 @@ where inbound_payment_id_secret = Some(args.entropy_source.get_secure_random_bytes()); } - if let Some(events) = events_override { - pending_events_read = events; - } - if !channel_closures.is_empty() { pending_events_read.append(&mut channel_closures); } - if pending_outbound_payments.is_none() && pending_outbound_payments_no_retry.is_none() { - pending_outbound_payments = Some(pending_outbound_payments_compat); - } else if pending_outbound_payments.is_none() { - let mut outbounds = new_hash_map(); - for (id, session_privs) in pending_outbound_payments_no_retry.unwrap().drain() { - outbounds.insert(id, PendingOutboundPayment::Legacy { session_privs }); - } - pending_outbound_payments = Some(outbounds); - } - let pending_outbounds = - OutboundPayments::new(pending_outbound_payments.unwrap(), args.logger.clone()); + let pending_outbounds = OutboundPayments::new(pending_outbound_payments); for (peer_pubkey, peer_storage) in peer_storage_dir { if let Some(peer_state) = per_peer_state.get_mut(&peer_pubkey) { @@ -17341,28 +18393,6 @@ where } } - // Handle transitioning from the legacy TLV to the new one on upgrades. - if let Some(legacy_in_flight_upds) = legacy_in_flight_monitor_updates { - // We should never serialize an empty map. - if legacy_in_flight_upds.is_empty() { - return Err(DecodeError::InvalidValue); - } - if in_flight_monitor_updates.is_none() { - let in_flight_upds = - in_flight_monitor_updates.get_or_insert_with(|| new_hash_map()); - for ((counterparty_node_id, funding_txo), updates) in legacy_in_flight_upds { - // All channels with legacy in flight monitor updates are v1 channels. - let channel_id = ChannelId::v1_from_funding_outpoint(funding_txo); - in_flight_upds.insert((counterparty_node_id, channel_id), updates); - } - } else { - // We should never serialize an empty map. - if in_flight_monitor_updates.as_ref().unwrap().is_empty() { - return Err(DecodeError::InvalidValue); - } - } - } - // We have to replay (or skip, if they were completed after we wrote the `ChannelManager`) // each `ChannelMonitorUpdate` in `in_flight_monitor_updates`. After doing so, we have to // check that each channel we have isn't newer than the latest `ChannelMonitorUpdate`(s) we @@ -17379,39 +18409,58 @@ where ($counterparty_node_id: expr, $chan_in_flight_upds: expr, $monitor: expr, $peer_state: expr, $logger: expr, $channel_info_log: expr ) => { { + // When all in-flight updates have completed after we were last serialized, we + // need to remove them. However, we can't guarantee that the next serialization + // will have happened after processing the + // `BackgroundEvent::MonitorUpdatesComplete`, so removing them now could lead to the + // channel never being resumed as the event would not be regenerated after another + // reload. At the same time, we don't want to resume the channel now because there + // may be post-update actions to handle. Therefore, we're forced to keep tracking + // the completed in-flight updates (but only when they have all completed) until we + // are processing the `BackgroundEvent::MonitorUpdatesComplete`. let mut max_in_flight_update_id = 0; - let starting_len = $chan_in_flight_upds.len(); - $chan_in_flight_upds.retain(|upd| upd.update_id > $monitor.get_latest_update_id()); - if $chan_in_flight_upds.len() < starting_len { + let num_updates_completed = $chan_in_flight_upds + .iter() + .filter(|update| { + max_in_flight_update_id = cmp::max(max_in_flight_update_id, update.update_id); + update.update_id <= $monitor.get_latest_update_id() + }) + .count(); + if num_updates_completed > 0 { log_debug!( $logger, "{} ChannelMonitorUpdates completed after ChannelManager was last serialized", - starting_len - $chan_in_flight_upds.len() + num_updates_completed, ); } + let all_updates_completed = num_updates_completed == $chan_in_flight_upds.len(); + let funding_txo = $monitor.get_funding_txo(); - for update in $chan_in_flight_upds.iter() { - log_debug!($logger, "Replaying ChannelMonitorUpdate {} for {}channel {}", - update.update_id, $channel_info_log, &$monitor.channel_id()); - max_in_flight_update_id = cmp::max(max_in_flight_update_id, update.update_id); - pending_background_events.push( - BackgroundEvent::MonitorUpdateRegeneratedOnStartup { - counterparty_node_id: $counterparty_node_id, - funding_txo: funding_txo, - channel_id: $monitor.channel_id(), - update: update.clone(), - }); - } - if $chan_in_flight_upds.is_empty() { - // We had some updates to apply, but it turns out they had completed before we - // were serialized, we just weren't notified of that. Thus, we may have to run - // the completion actions for any monitor updates, but otherwise are done. + if all_updates_completed { + log_debug!($logger, "All monitor updates completed since the ChannelManager was last serialized"); pending_background_events.push( BackgroundEvent::MonitorUpdatesComplete { counterparty_node_id: $counterparty_node_id, channel_id: $monitor.channel_id(), + highest_update_id_completed: max_in_flight_update_id, }); } else { + $chan_in_flight_upds.retain(|update| { + let replay = update.update_id > $monitor.get_latest_update_id(); + if replay { + log_debug!($logger, "Replaying ChannelMonitorUpdate {} for {}channel {}", + update.update_id, $channel_info_log, &$monitor.channel_id()); + pending_background_events.push( + BackgroundEvent::MonitorUpdateRegeneratedOnStartup { + counterparty_node_id: $counterparty_node_id, + funding_txo: funding_txo, + channel_id: $monitor.channel_id(), + update: update.clone(), + } + ); + } + replay + }); $peer_state.closed_channel_monitor_update_ids.entry($monitor.channel_id()) .and_modify(|v| *v = cmp::max(max_in_flight_update_id, *v)) .or_insert(max_in_flight_update_id); @@ -17438,22 +18487,20 @@ where .get(chan_id) .expect("We already checked for monitor presence when loading channels"); let mut max_in_flight_update_id = monitor.get_latest_update_id(); - if let Some(in_flight_upds) = &mut in_flight_monitor_updates { - if let Some(mut chan_in_flight_upds) = - in_flight_upds.remove(&(*counterparty_id, *chan_id)) - { - max_in_flight_update_id = cmp::max( - max_in_flight_update_id, - handle_in_flight_updates!( - *counterparty_id, - chan_in_flight_upds, - monitor, - peer_state, - logger, - "" - ), - ); - } + if let Some(mut chan_in_flight_upds) = + in_flight_monitor_updates.remove(&(*counterparty_id, *chan_id)) + { + max_in_flight_update_id = cmp::max( + max_in_flight_update_id, + handle_in_flight_updates!( + *counterparty_id, + chan_in_flight_upds, + monitor, + peer_state, + logger, + "" + ), + ); } if funded_chan.get_latest_unblocked_monitor_update_id() > max_in_flight_update_id @@ -17474,55 +18521,49 @@ where return Err(DecodeError::DangerousValue); } } else { - // We shouldn't have persisted (or read) any unfunded channel types so none should have been - // created in this `channel_by_id` map. - debug_assert!(false); - return Err(DecodeError::InvalidValue); - } - } - } - - if let Some(in_flight_upds) = in_flight_monitor_updates { - for ((counterparty_id, channel_id), mut chan_in_flight_updates) in in_flight_upds { - let logger = - WithContext::from(&args.logger, Some(counterparty_id), Some(channel_id), None); - if let Some(monitor) = args.channel_monitors.get(&channel_id) { - // Now that we've removed all the in-flight monitor updates for channels that are - // still open, we need to replay any monitor updates that are for closed channels, - // creating the neccessary peer_state entries as we go. - let peer_state_mutex = per_peer_state - .entry(counterparty_id) - .or_insert_with(|| Mutex::new(empty_peer_state())); - let mut peer_state = peer_state_mutex.lock().unwrap(); - handle_in_flight_updates!( - counterparty_id, - chan_in_flight_updates, - monitor, - peer_state, - logger, - "closed " - ); - } else { - log_error!(logger, "A ChannelMonitor is missing even though we have in-flight updates for it! This indicates a potentially-critical violation of the chain::Watch API!"); - log_error!( - logger, - " The ChannelMonitor for channel {} is missing.", - channel_id - ); - log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,"); - log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); - log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds."); - log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning"); - log_error!( - logger, - " Pending in-flight updates are: {:?}", - chan_in_flight_updates - ); + // We shouldn't have persisted (or read) any unfunded channel types so none should have been + // created in this `channel_by_id` map. + debug_assert!(false); return Err(DecodeError::InvalidValue); } } } + for ((counterparty_id, channel_id), mut chan_in_flight_updates) in in_flight_monitor_updates + { + let logger = + WithContext::from(&args.logger, Some(counterparty_id), Some(channel_id), None); + if let Some(monitor) = args.channel_monitors.get(&channel_id) { + // Now that we've removed all the in-flight monitor updates for channels that are + // still open, we need to replay any monitor updates that are for closed channels, + // creating the neccessary peer_state entries as we go. + let peer_state_mutex = per_peer_state + .entry(counterparty_id) + .or_insert_with(|| Mutex::new(empty_peer_state())); + let mut peer_state = peer_state_mutex.lock().unwrap(); + handle_in_flight_updates!( + counterparty_id, + chan_in_flight_updates, + monitor, + peer_state, + logger, + "closed " + ); + } else { + log_error!(logger, "A ChannelMonitor is missing even though we have in-flight updates for it! This indicates a potentially-critical violation of the chain::Watch API!"); + log_error!(logger, " The ChannelMonitor for channel {} is missing.", channel_id); + log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,"); + log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); + log_error!( + logger, + " Without the latest ChannelMonitor we cannot continue without risking funds." + ); + log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning"); + log_error!(logger, " Pending in-flight updates are: {:?}", chan_in_flight_updates); + return Err(DecodeError::InvalidValue); + } + } + // The newly generated `close_background_events` have to be added after any updates that // were already in-flight on shutdown, so we append them here. pending_background_events.reserve(close_background_events.len()); @@ -17589,11 +18630,69 @@ where pending_background_events.push(new_event); } + // In LDK 0.2 and below, the `ChannelManager` would track all payments and HTLCs internally and + // persist that state, relying on it being up-to-date on restart. Newer versions are moving + // towards reducing this reliance on regular persistence of the `ChannelManager`, and instead + // reconstruct HTLC/payment state based on `Channel{Monitor}` data if + // `reconstruct_manager_from_monitors` is set below. Currently we set in tests randomly to + // ensure the legacy codepaths also have test coverage. + #[cfg(not(test))] + let reconstruct_manager_from_monitors = _version >= RECONSTRUCT_HTLCS_FROM_CHANS_VERSION; + #[cfg(test)] + let reconstruct_manager_from_monitors = + args.reconstruct_manager_from_monitors.unwrap_or_else(|| { + use core::hash::{BuildHasher, Hasher}; + + match std::env::var("LDK_TEST_REBUILD_MGR_FROM_MONITORS") { + Ok(val) => match val.as_str() { + "1" => true, + "0" => false, + _ => panic!( + "LDK_TEST_REBUILD_MGR_FROM_MONITORS must be 0 or 1, got: {}", + val + ), + }, + Err(_) => { + let rand_val = + std::collections::hash_map::RandomState::new().build_hasher().finish(); + if rand_val % 2 == 0 { + true + } else { + false + } + }, + } + }); + // If there's any preimages for forwarded HTLCs hanging around in ChannelMonitors we // should ensure we try them again on the inbound edge. We put them here and do so after we // have a fully-constructed `ChannelManager` at the end. let mut pending_claims_to_replay = Vec::new(); + // If we find an inbound HTLC that claims to already be forwarded to the outbound edge, we + // store an identifier for it here and verify that it is either (a) present in the outbound + // edge or (b) removed from the outbound edge via claim. If it's in neither of these states, we + // infer that it was removed from the outbound edge via fail, and fail it backwards to ensure + // that it is handled. + let mut already_forwarded_htlcs: HashMap< + (ChannelId, PaymentHash), + Vec<(HTLCPreviousHopData, OutboundHop)>, + > = new_hash_map(); + let prune_forwarded_htlc = |already_forwarded_htlcs: &mut HashMap< + (ChannelId, PaymentHash), + Vec<(HTLCPreviousHopData, OutboundHop)>, + >, + prev_hop: &HTLCPreviousHopData, + payment_hash: &PaymentHash| { + if let hash_map::Entry::Occupied(mut entry) = + already_forwarded_htlcs.entry((prev_hop.channel_id, *payment_hash)) + { + entry.get_mut().retain(|(htlc, _)| prev_hop.htlc_id != htlc.htlc_id); + if entry.get().is_empty() { + entry.remove(); + } + } + }; { // If we're tracking pending payments, ensure we haven't lost any by looking at the // ChannelMonitor data for any channels for which we do not have authorative state @@ -17613,6 +18712,34 @@ where let mut peer_state_lock = peer_state_mtx.lock().unwrap(); let peer_state = &mut *peer_state_lock; is_channel_closed = !peer_state.channel_by_id.contains_key(channel_id); + if reconstruct_manager_from_monitors { + if let Some(chan) = peer_state.channel_by_id.get(channel_id) { + if let Some(funded_chan) = chan.as_funded() { + // Legacy HTLCs are from pre-LDK 0.3 and cannot be reconstructed. + if funded_chan.has_legacy_inbound_htlcs() { + return Err(DecodeError::InvalidValue); + } + // Reconstruct `ChannelManager::decode_update_add_htlcs` from the serialized + // `Channel` as part of removing the requirement to regularly persist the + // `ChannelManager`. + let scid_alias = funded_chan.context.outbound_scid_alias(); + for update_add_htlc in funded_chan.inbound_htlcs_pending_decode() { + decode_update_add_htlcs + .entry(scid_alias) + .or_insert_with(Vec::new) + .push(update_add_htlc); + } + for (payment_hash, prev_hop, next_hop) in + funded_chan.inbound_forwarded_htlcs() + { + already_forwarded_htlcs + .entry((prev_hop.channel_id, payment_hash)) + .or_insert_with(Vec::new) + .push((prev_hop, next_hop)); + } + } + } + } } if is_channel_closed { @@ -17639,18 +18766,41 @@ where session_priv_bytes, &path, best_block_height, + &logger, ); } } } } for (channel_id, monitor) in args.channel_monitors.iter() { - let mut is_channel_closed = true; + let (mut is_channel_closed, mut user_channel_id_opt) = (true, None); let counterparty_node_id = monitor.get_counterparty_node_id(); if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) { let mut peer_state_lock = peer_state_mtx.lock().unwrap(); let peer_state = &mut *peer_state_lock; - is_channel_closed = !peer_state.channel_by_id.contains_key(channel_id); + if let Some(chan) = peer_state.channel_by_id.get(channel_id) { + is_channel_closed = false; + user_channel_id_opt = Some(chan.context().get_user_id()); + + if reconstruct_manager_from_monitors { + if let Some(funded_chan) = chan.as_funded() { + for (payment_hash, prev_hop) in funded_chan.outbound_htlc_forwards() + { + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + &prev_hop, + "HTLC already forwarded to the outbound edge", + &args.logger, + ); + prune_forwarded_htlc( + &mut already_forwarded_htlcs, + &prev_hop, + &payment_hash, + ); + } + } + } + } } if is_channel_closed { @@ -17669,49 +18819,61 @@ where info.prev_funding_outpoint == prev_hop_data.outpoint && info.prev_htlc_id == prev_hop_data.htlc_id }; + + // If `reconstruct_manager_from_monitors` is set, we always add all inbound committed + // HTLCs to `decode_update_add_htlcs` in the above loop, but we need to prune from + // those added HTLCs if they were already forwarded to the outbound edge. Otherwise, + // we'll double-forward. + if reconstruct_manager_from_monitors { + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + &prev_hop_data, + "HTLC already forwarded to the outbound edge", + &&logger, + ); + prune_forwarded_htlc( + &mut already_forwarded_htlcs, + &prev_hop_data, + &htlc.payment_hash, + ); + } + // The ChannelMonitor is now responsible for this HTLC's // failure/success and will let us know what its outcome is. If we - // still have an entry for this HTLC in `forward_htlcs` or - // `pending_intercepted_htlcs`, we were apparently not persisted after - // the monitor was when forwarding the payment. - decode_update_add_htlcs.retain( - |src_outb_alias, update_add_htlcs| { - update_add_htlcs.retain(|update_add_htlc| { - let matches = *src_outb_alias - == prev_hop_data.prev_outbound_scid_alias - && update_add_htlc.htlc_id == prev_hop_data.htlc_id; - if matches { - log_info!(logger, "Removing pending to-decode HTLC as it was forwarded to the closed channel"); - } - !matches - }); - !update_add_htlcs.is_empty() - }, + // still have an entry for this HTLC in `forward_htlcs_legacy`, + // `pending_intercepted_htlcs_legacy`, or + // `decode_update_add_htlcs_legacy`, we were apparently not persisted + // after the monitor was when forwarding the payment. + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs_legacy, + &prev_hop_data, + "HTLC was forwarded to the closed channel", + &&logger, ); - forward_htlcs.retain(|_, forwards| { - forwards.retain(|forward| { - if let HTLCForwardInfo::AddHTLC(htlc_info) = forward { - if pending_forward_matches_htlc(&htlc_info) { - log_info!(logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}", - &htlc.payment_hash, &monitor.channel_id()); - false - } else { true } + forward_htlcs_legacy.retain(|_, forwards| { + forwards.retain(|forward| { + if let HTLCForwardInfo::AddHTLC(htlc_info) = forward { + if pending_forward_matches_htlc(&htlc_info) { + log_info!(logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}", + &htlc.payment_hash, &monitor.channel_id()); + false } else { true } - }); - !forwards.is_empty() - }); - pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| { - if pending_forward_matches_htlc(&htlc_info) { - log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}", - &htlc.payment_hash, &monitor.channel_id()); - pending_events_read.retain(|(event, _)| { - if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event { - intercepted_id != ev_id - } else { true } - }); - false } else { true } }); + !forwards.is_empty() + }); + pending_intercepted_htlcs_legacy.retain(|intercepted_id, htlc_info| { + if pending_forward_matches_htlc(&htlc_info) { + log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}", + &htlc.payment_hash, &monitor.channel_id()); + pending_events_read.retain(|(event, _)| { + if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event { + intercepted_id != ev_id + } else { true } + }); + false + } else { true } + }); }, HTLCSource::OutboundRoute { payment_id, @@ -17729,8 +18891,8 @@ where htlc_id, }; let mut compl_action = Some( - EventCompletionAction::ReleasePaymentCompleteChannelMonitorUpdate(update) - ); + EventCompletionAction::ReleasePaymentCompleteChannelMonitorUpdate(update) + ); pending_outbounds.claim_htlc( payment_id, preimage, @@ -17740,6 +18902,7 @@ where true, &mut compl_action, &pending_events, + &logger, ); // If the completion action was not consumed, then there was no // payment to claim, and we need to tell the `ChannelMonitor` @@ -17754,29 +18917,38 @@ where }; if !have_action && compl_action.is_some() { let mut peer_state = per_peer_state - .get(&counterparty_node_id) - .map(|state| state.lock().unwrap()) - .expect("Channels originating a preimage must have peer state"); + .get(&counterparty_node_id) + .map(|state| state.lock().unwrap()) + .expect( + "Channels originating a preimage must have peer state", + ); let update_id = peer_state - .closed_channel_monitor_update_ids - .get_mut(channel_id) - .expect("Channels originating a preimage must have a monitor"); + .closed_channel_monitor_update_ids + .get_mut(channel_id) + .expect( + "Channels originating a preimage must have a monitor", + ); // Note that for channels closed pre-0.1, the latest // update_id is `u64::MAX`. *update_id = update_id.saturating_add(1); - pending_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup { - counterparty_node_id: monitor.get_counterparty_node_id(), - funding_txo: monitor.get_funding_txo(), - channel_id: monitor.channel_id(), - update: ChannelMonitorUpdate { - update_id: *update_id, - channel_id: Some(monitor.channel_id()), - updates: vec![ChannelMonitorUpdateStep::ReleasePaymentComplete { - htlc: htlc_id, - }], + pending_background_events.push( + BackgroundEvent::MonitorUpdateRegeneratedOnStartup { + counterparty_node_id: monitor + .get_counterparty_node_id(), + funding_txo: monitor.get_funding_txo(), + channel_id: monitor.channel_id(), + update: ChannelMonitorUpdate { + update_id: *update_id, + channel_id: Some(monitor.channel_id()), + updates: vec![ + ChannelMonitorUpdateStep::ReleasePaymentComplete { + htlc: htlc_id, + }, + ], + }, }, - }); + ); } pending_events_read = pending_events.into_inner().unwrap(); } @@ -17784,8 +18956,10 @@ where } } for (htlc_source, payment_hash) in monitor.get_onchain_failed_outbound_htlcs() { + let logger = + WithChannelMonitor::from(&args.logger, monitor, Some(payment_hash)); log_info!( - args.logger, + logger, "Failing HTLC with payment hash {} as it was resolved on-chain.", payment_hash ); @@ -17854,6 +19028,11 @@ where // inbound edge of the payment's monitor has already claimed // the HTLC) we skip trying to replay the claim. let htlc_payment_hash: PaymentHash = payment_preimage.into(); + let logger = WithChannelMonitor::from( + &args.logger, + monitor, + Some(htlc_payment_hash), + ); let balance_could_incl_htlc = |bal| match bal { &Balance::ClaimableOnChannelClose { .. } => { // The channel is still open, assume we can still @@ -17876,7 +19055,7 @@ where // edge monitor but the channel is closed (and thus we'll // immediately panic if we call claim_funds_from_hop). if short_to_chan_info.get(&prev_hop.prev_outbound_scid_alias).is_none() { - log_error!(args.logger, + log_error!(logger, "We need to replay the HTLC claim for payment_hash {} (preimage {}) but cannot do so as the HTLC was forwarded prior to LDK 0.0.124.\ All HTLCs that were forwarded by LDK 0.0.123 and prior must be resolved prior to upgrading to LDK 0.1", htlc_payment_hash, @@ -17891,7 +19070,7 @@ where // of panicking at runtime. The user ideally should have read // the release notes and we wouldn't be here, but we go ahead // and let things run in the hope that it'll all just work out. - log_error!(args.logger, + log_error!(logger, "We need to replay the HTLC claim for payment_hash {} (preimage {}) but don't have all the required information to do so reliably.\ As long as the channel for the inbound edge of the forward remains open, this may work okay, but we may panic at runtime!\ All HTLCs that were forwarded by LDK 0.0.123 and prior must be resolved prior to upgrading to LDK 0.1\ @@ -17903,7 +19082,7 @@ where Some((htlc_source, payment_preimage, htlc.amount_msat, is_channel_closed, monitor.get_counterparty_node_id(), - monitor.get_funding_txo(), monitor.channel_id())) + monitor.get_funding_txo(), monitor.channel_id(), user_channel_id_opt)) } else { None } } else { // If it was an outbound payment, we've handled it above - if a preimage @@ -17922,77 +19101,6 @@ where } } - let expanded_inbound_key = args.node_signer.get_expanded_key(); - - let mut claimable_payments = hash_map_with_capacity(claimable_htlcs_list.len()); - if let Some(purposes) = claimable_htlc_purposes { - if purposes.len() != claimable_htlcs_list.len() { - return Err(DecodeError::InvalidValue); - } - if let Some(onion_fields) = claimable_htlc_onion_fields { - if onion_fields.len() != claimable_htlcs_list.len() { - return Err(DecodeError::InvalidValue); - } - for (purpose, (onion, (payment_hash, htlcs))) in purposes - .into_iter() - .zip(onion_fields.into_iter().zip(claimable_htlcs_list.into_iter())) - { - let claimable = ClaimablePayment { purpose, htlcs, onion_fields: onion }; - let existing_payment = claimable_payments.insert(payment_hash, claimable); - if existing_payment.is_some() { - return Err(DecodeError::InvalidValue); - } - } - } else { - for (purpose, (payment_hash, htlcs)) in - purposes.into_iter().zip(claimable_htlcs_list.into_iter()) - { - let claimable = ClaimablePayment { purpose, htlcs, onion_fields: None }; - let existing_payment = claimable_payments.insert(payment_hash, claimable); - if existing_payment.is_some() { - return Err(DecodeError::InvalidValue); - } - } - } - } else { - // LDK versions prior to 0.0.107 did not write a `pending_htlc_purposes`, but do - // include a `_legacy_hop_data` in the `OnionPayload`. - for (payment_hash, htlcs) in claimable_htlcs_list.drain(..) { - if htlcs.is_empty() { - return Err(DecodeError::InvalidValue); - } - let purpose = match &htlcs[0].onion_payload { - OnionPayload::Invoice { _legacy_hop_data } => { - if let Some(hop_data) = _legacy_hop_data { - events::PaymentPurpose::Bolt11InvoicePayment { - payment_preimage: match inbound_payment::verify( - payment_hash, - &hop_data, - 0, - &expanded_inbound_key, - &args.logger, - ) { - Ok((payment_preimage, _)) => payment_preimage, - Err(()) => { - log_error!(args.logger, "Failed to read claimable payment data for HTLC with payment hash {} - was not a pending inbound payment and didn't match our payment key", &payment_hash); - return Err(DecodeError::InvalidValue); - }, - }, - payment_secret: hop_data.payment_secret, - } - } else { - return Err(DecodeError::InvalidValue); - } - }, - OnionPayload::Spontaneous(payment_preimage) => { - events::PaymentPurpose::SpontaneousPayment(*payment_preimage) - }, - }; - claimable_payments - .insert(payment_hash, ClaimablePayment { purpose, htlcs, onion_fields: None }); - } - } - // Similar to the above cases for forwarded payments, if we have any pending inbound HTLCs // which haven't yet been claimed, we may be missing counterparty_node_id info and would // panic if we attempted to claim them at this point. @@ -18023,6 +19131,8 @@ where let mut secp_ctx = Secp256k1::new(); secp_ctx.seeded_randomize(&args.entropy_source.get_secure_random_bytes()); + let expanded_inbound_key = args.node_signer.get_expanded_key(); + let our_network_pubkey = match args.node_signer.get_node_id(Recipient::Node) { Ok(key) => key, Err(()) => return Err(DecodeError::InvalidValue), @@ -18093,9 +19203,7 @@ where let bounded_fee_estimator = LowerBoundedFeeEstimator::new(args.fee_estimator); - for (node_id, monitor_update_blocked_actions) in - monitor_update_blocked_actions_per_peer.unwrap() - { + for (node_id, monitor_update_blocked_actions) in monitor_update_blocked_actions_per_peer { if let Some(peer_state) = per_peer_state.get(&node_id) { for (channel_id, actions) in monitor_update_blocked_actions.iter() { let logger = @@ -18172,6 +19280,84 @@ where } } + if reconstruct_manager_from_monitors { + // De-duplicate HTLCs that are present in both `failed_htlcs` and `decode_update_add_htlcs`. + // Omitting this de-duplication could lead to redundant HTLC processing and/or bugs. + for (src, payment_hash, _, _, _, _) in failed_htlcs.iter() { + if let HTLCSource::PreviousHopData(prev_hop_data) = src { + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + prev_hop_data, + "HTLC was failed backwards during manager read", + &args.logger, + ); + prune_forwarded_htlc(&mut already_forwarded_htlcs, prev_hop_data, payment_hash); + } + } + + // See above comment on `failed_htlcs`. + for htlcs in claimable_payments.values().map(|pmt| &pmt.htlcs) { + for prev_hop_data in htlcs.iter().map(|h| &h.prev_hop) { + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + prev_hop_data, + "HTLC was already decoded and marked as a claimable payment", + &args.logger, + ); + } + } + } + + let (decode_update_add_htlcs, forward_htlcs, pending_intercepted_htlcs) = + if reconstruct_manager_from_monitors { + (decode_update_add_htlcs, new_hash_map(), new_hash_map()) + } else { + ( + decode_update_add_htlcs_legacy, + forward_htlcs_legacy, + pending_intercepted_htlcs_legacy, + ) + }; + + // If we have a pending intercept HTLC present but no corresponding event, add that now rather + // than relying on the user having persisted the event prior to shutdown. + for (id, fwd) in pending_intercepted_htlcs.iter() { + if !pending_events_read.iter().any( + |(ev, _)| matches!(ev, Event::HTLCIntercepted { intercept_id, .. } if intercept_id == id), + ) { + match create_htlc_intercepted_event(*id, fwd) { + Ok(ev) => pending_events_read.push_back((ev, None)), + Err(()) => debug_assert!(false), + } + } + } + + // We may need to regenerate [`Event::FundingTransactionReadyForSigning`] for channels that + // still need their holder `tx_signatures`. + for (counterparty_node_id, peer_state_mutex) in per_peer_state.iter() { + let peer_state = peer_state_mutex.lock().unwrap(); + for (channel_id, chan) in peer_state.channel_by_id.iter() { + if let Some(signing_session) = + chan.context().interactive_tx_signing_session.as_ref() + { + if signing_session.holder_tx_signatures().is_none() + && signing_session.has_local_contribution() + { + let unsigned_transaction = signing_session.unsigned_tx().tx().clone(); + pending_events_read.push_back(( + Event::FundingTransactionReadyForSigning { + unsigned_transaction, + counterparty_node_id: *counterparty_node_id, + channel_id: *channel_id, + user_channel_id: chan.context().get_user_id(), + }, + None, + )); + } + } + } + } + let best_block = BestBlock::new(best_block_hash, best_block_height); let flow = OffersMessageFlow::new( chain_hash, @@ -18198,13 +19384,13 @@ where inbound_payment_key: expanded_inbound_key, pending_outbound_payments: pending_outbounds, - pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs.unwrap()), + pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs), forward_htlcs: Mutex::new(forward_htlcs), decode_update_add_htlcs: Mutex::new(decode_update_add_htlcs), claimable_payments: Mutex::new(ClaimablePayments { claimable_payments, - pending_claiming_payments: pending_claiming_payments.unwrap(), + pending_claiming_payments, }), outbound_scid_aliases: Mutex::new(outbound_scid_aliases), short_to_chan_info: FairRwLock::new(short_to_chan_info), @@ -18251,9 +19437,46 @@ where }; let mut processed_claims: HashSet> = new_hash_set(); - for (_, monitor) in args.channel_monitors.iter() { + for (channel_id, monitor) in args.channel_monitors.iter() { for (payment_hash, (payment_preimage, payment_claims)) in monitor.get_stored_preimages() { + // If we have unresolved inbound committed HTLCs that were already forwarded to the + // outbound edge and removed via claim, we need to make sure to claim them backwards via + // adding them to `pending_claims_to_replay`. + if let Some(forwarded_htlcs) = + already_forwarded_htlcs.remove(&(*channel_id, payment_hash)) + { + for (prev_hop, next_hop) in forwarded_htlcs { + let new_pending_claim = + !pending_claims_to_replay.iter().any(|(src, _, _, _, _, _, _, _)| { + matches!(src, HTLCSource::PreviousHopData(hop) if hop.htlc_id == prev_hop.htlc_id && hop.channel_id == prev_hop.channel_id) + }); + if new_pending_claim { + let is_downstream_closed = channel_manager + .per_peer_state + .read() + .unwrap() + .get(&next_hop.node_id) + .map_or(true, |peer_state_mtx| { + !peer_state_mtx + .lock() + .unwrap() + .channel_by_id + .contains_key(&next_hop.channel_id) + }); + pending_claims_to_replay.push(( + HTLCSource::PreviousHopData(prev_hop), + payment_preimage, + next_hop.amt_msat, + is_downstream_closed, + next_hop.node_id, + next_hop.funding_txo, + next_hop.channel_id, + Some(next_hop.user_channel_id), + )); + } + } + } if !payment_claims.is_empty() { for payment_claim in payment_claims { if processed_claims.contains(&payment_claim.mpp_parts) { @@ -18495,6 +19718,21 @@ where channel_manager .fail_htlc_backwards_internal(&source, &hash, &reason, receiver, ev_action); } + for ((_, hash), htlcs) in already_forwarded_htlcs.into_iter() { + for (htlc, _) in htlcs { + let channel_id = htlc.channel_id; + let node_id = htlc.counterparty_node_id; + let source = HTLCSource::PreviousHopData(htlc); + let failure_reason = LocalHTLCFailureReason::TemporaryChannelFailure; + let failure_data = channel_manager.get_htlc_inbound_temp_fail_data(failure_reason); + let reason = HTLCFailReason::reason(failure_reason, failure_data); + let receiver = HTLCHandlingFailureType::Forward { node_id, channel_id }; + // The event completion action is only relevant for HTLCs that originate from our node, not + // forwarded HTLCs. + channel_manager + .fail_htlc_backwards_internal(&source, &hash, &reason, receiver, None); + } + } for ( source, @@ -18504,6 +19742,7 @@ where downstream_node_id, downstream_funding, downstream_channel_id, + downstream_user_channel_id, ) in pending_claims_to_replay { // We use `downstream_closed` in place of `from_onchain` here just as a guess - we @@ -18519,7 +19758,7 @@ where downstream_node_id, downstream_funding, downstream_channel_id, - None, + downstream_user_channel_id, None, None, ); @@ -18528,7 +19767,7 @@ where //TODO: Broadcast channel update for closed channels, but only after we've made a //connection or two. - Ok((best_block_hash.clone(), channel_manager)) + Ok((best_block_hash, channel_manager)) } } @@ -18536,12 +19775,11 @@ where mod tests { use crate::events::{ClosureReason, Event, HTLCHandlingFailureType}; use crate::ln::channelmanager::{ - create_recv_pending_htlc_info, inbound_payment, HTLCForwardInfo, InterceptId, PaymentId, + create_recv_pending_htlc_info, inbound_payment, InterceptId, PaymentId, RecipientOnionFields, }; use crate::ln::functional_test_utils::*; use crate::ln::msgs::{self, BaseMessageHandler, ChannelMessageHandler, MessageSendEvent}; - use crate::ln::onion_utils::AttributionData; use crate::ln::onion_utils::{self, LocalHTLCFailureReason}; use crate::ln::outbound_payment::Retry; use crate::ln::types::ChannelId; @@ -18551,7 +19789,6 @@ mod tests { use crate::types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; use crate::util::config::{ChannelConfig, ChannelConfigUpdate}; use crate::util::errors::APIError; - use crate::util::ser::Writeable; use crate::util::test_utils; use bitcoin::secp256k1::ecdh::SharedSecret; use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey}; @@ -18662,7 +19899,7 @@ mod tests { RecipientOnionFields::secret_only(payment_secret), payment_id, &mpp_route).unwrap(); nodes[0].node.test_send_payment_along_path(&mpp_route.paths[0], &our_payment_hash, RecipientOnionFields::secret_only(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[0]).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); pass_along_path(&nodes[0], &[&nodes[1]], 200_000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None); @@ -18672,19 +19909,19 @@ mod tests { Some(payment_preimage), RecipientOnionFields::spontaneous_empty(), PaymentId(payment_preimage.0), route.route_params.clone().unwrap(), Retry::Attempts(0) ).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let ev = events.drain(..).next().unwrap(); let payment_event = SendEvent::from_event(ev); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, false, false); expect_and_process_pending_htlcs(&nodes[1], true); let events = nodes[1].node.get_and_clear_pending_events(); let fail = HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }; expect_htlc_failure_conditions(events, &[fail]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); @@ -18698,7 +19935,7 @@ mod tests { // Send the second half of the original MPP payment. nodes[0].node.test_send_payment_along_path(&mpp_route.paths[1], &our_payment_hash, RecipientOnionFields::secret_only(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[1]).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); pass_along_path(&nodes[0], &[&nodes[1]], 200_000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), true, None); @@ -18709,34 +19946,34 @@ mod tests { // lightning messages manually. nodes[1].node.claim_funds(payment_preimage); expect_payment_claimed!(nodes[1], our_payment_hash, 200_000); - check_added_monitors!(nodes[1], 2); + check_added_monitors(&nodes[1], 2); let mut bs_1st_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), bs_1st_updates.update_fulfill_htlcs.remove(0)); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_1st_updates.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let (as_first_raa, as_first_cs) = get_revoke_commit_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_first_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut bs_2nd_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_first_cs); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), bs_2nd_updates.update_fulfill_htlcs.remove(0)); nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_2nd_updates.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_first_raa); let as_second_updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_second_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_second_updates.commitment_signed); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_third_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); // Note that successful MPP payments will generate a single PaymentSent event upon the first // path's success and a PaymentPathSuccessful event for each path's success. @@ -18790,13 +20027,13 @@ mod tests { Some(payment_preimage), RecipientOnionFields::spontaneous_empty(), PaymentId(payment_preimage.0), route_params.clone(), Retry::Attempts(0) ).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let ev = events.drain(..).next().unwrap(); let payment_event = SendEvent::from_event(ev); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, false, false); // We have to forward pending HTLCs twice - once tries to forward the payment forward (and // fails), the second will process the resulting failure and fail the HTLC backward @@ -18804,7 +20041,7 @@ mod tests { let events = nodes[1].node.get_and_clear_pending_events(); let fail = HTLCHandlingFailureType::Receive { payment_hash }; expect_htlc_failure_conditions(events, &[fail]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); @@ -18828,7 +20065,7 @@ mod tests { Some(payment_preimage), RecipientOnionFields::spontaneous_empty(), PaymentId(payment_preimage.0), route.route_params.clone().unwrap(), Retry::Attempts(0) ).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let event = events.pop().unwrap(); @@ -18839,19 +20076,19 @@ mod tests { let payment_secret = PaymentSecret([43; 32]); nodes[0].node.send_payment_with_route(route.clone(), payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let ev = events.drain(..).next().unwrap(); let payment_event = SendEvent::from_event(ev); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, false, false); expect_and_process_pending_htlcs(&nodes[1], true); let events = nodes[1].node.get_and_clear_pending_events(); let fail = HTLCHandlingFailureType::Receive { payment_hash }; expect_htlc_failure_conditions(events, &[fail]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); @@ -18871,7 +20108,7 @@ mod tests { Some(payment_preimage), RecipientOnionFields::spontaneous_empty(), payment_id_1, route.route_params.clone().unwrap(), Retry::Attempts(0) ).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let event = events.pop().unwrap(); @@ -18888,19 +20125,19 @@ mod tests { Some(payment_preimage), RecipientOnionFields::spontaneous_empty(), payment_id_2, route_params, Retry::Attempts(0) ).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let ev = events.drain(..).next().unwrap(); let payment_event = SendEvent::from_event(ev); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, false, false); expect_and_process_pending_htlcs(&nodes[1], true); let events = nodes[1].node.get_and_clear_pending_events(); let fail = HTLCHandlingFailureType::Receive { payment_hash }; expect_htlc_failure_conditions(events, &[fail]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); @@ -18946,7 +20183,7 @@ mod tests { RecipientOnionFields::spontaneous_empty(), PaymentId(mismatch_payment_hash.0), &route).unwrap(); nodes[0].node.test_send_payment_internal(&route, mismatch_payment_hash, RecipientOnionFields::spontaneous_empty(), Some(test_preimage), PaymentId(mismatch_payment_hash.0), None, session_privs).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); assert_eq!(updates.update_add_htlcs.len(), 1); @@ -19014,7 +20251,7 @@ mod tests { let message = "Channel force-closed".to_owned(); nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id(), message.clone()).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, reason, &[nodes[1].node.get_our_node_id()], 100000); @@ -19078,7 +20315,7 @@ mod tests { .node .force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), message.clone()) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, reason, &[nodes[1].node.get_our_node_id()], 1_000_000); @@ -19164,13 +20401,13 @@ mod tests { #[rustfmt::skip] fn check_unkown_peer_error(res_err: Result, expected_public_key: PublicKey) { - let expected_message = format!("Can't find a peer matching the passed counterparty node_id {}", expected_public_key); + let expected_message = format!("No such peer for the passed counterparty_node_id {}", expected_public_key); check_api_error_message(expected_message, res_err) } #[rustfmt::skip] fn check_channel_unavailable_error(res_err: Result, expected_channel_id: ChannelId, peer_node_id: PublicKey) { - let expected_message = format!("Channel with id {} not found for the passed counterparty node_id {}", expected_channel_id, peer_node_id); + let expected_message = format!("No such channel_id {} for the passed counterparty_node_id {}", expected_channel_id, peer_node_id); check_api_error_message(expected_message, res_err) } @@ -19269,7 +20506,7 @@ mod tests { let mut funding_tx = None; for idx in 0..super::MAX_UNFUNDED_CHANS_PER_PEER { - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_msg); + handle_and_accept_open_channel(&nodes[1], nodes[0].node.get_our_node_id(), &open_channel_msg); let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); if idx == 0 { @@ -19280,13 +20517,13 @@ mod tests { let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()); nodes[0].node.handle_funding_signed(nodes[1].node.get_our_node_id(), &funding_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id()); } open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager); @@ -19343,11 +20580,22 @@ mod tests { // open channels. assert!(peer_pks.len() > super::MAX_UNFUNDED_CHANNEL_PEERS - 1); for i in 0..super::MAX_UNFUNDED_CHANNEL_PEERS - 1 { - nodes[1].node.handle_open_channel(peer_pks[i], &open_channel_msg); + handle_and_accept_open_channel(&nodes[1], peer_pks[i], &open_channel_msg); get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, peer_pks[i]); open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager); } nodes[1].node.handle_open_channel(last_random_pk, &open_channel_msg); + let events = nodes[1].node.get_and_clear_pending_events(); + match events[0] { + Event::OpenChannelRequest { temporary_channel_id, .. } => { + assert!(nodes[1] + .node + .accept_inbound_channel(&temporary_channel_id, &last_random_pk, 23, None,) + .is_err()) + }, + _ => panic!("Unexpected event"), + } + assert_eq!(get_err_msg(&nodes[1], &last_random_pk).channel_id, open_channel_msg.common_fields.temporary_channel_id); @@ -19365,7 +20613,7 @@ mod tests { // Further, because the first channel was funded, we can open another channel with // last_random_pk. - nodes[1].node.handle_open_channel(last_random_pk, &open_channel_msg); + handle_and_accept_open_channel(&nodes[1], last_random_pk, &open_channel_msg); get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, last_random_pk); } @@ -19398,7 +20646,7 @@ mod tests { if let Err(crate::ln::channelmanager::InboundHTLCErr { reason, .. }) = create_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]), sender_intended_amt_msat - extra_fee_msat - 1, 42, None, true, Some(extra_fee_msat), - current_height) + false, current_height) { assert_eq!(reason, LocalHTLCFailureReason::FinalIncorrectHTLCAmount); } else { panic!(); } @@ -19421,7 +20669,7 @@ mod tests { let current_height: u32 = node[0].node.best_block.read().unwrap().height; assert!(create_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]), sender_intended_amt_msat - extra_fee_msat, 42, None, true, Some(extra_fee_msat), - current_height).is_ok()); + false, current_height).is_ok()); } #[test] @@ -19446,7 +20694,7 @@ mod tests { custom_tlvs: Vec::new(), }, shared_secret: SharedSecret::from_bytes([0; 32]), - }, [0; 32], PaymentHash([0; 32]), 100, TEST_FINAL_CLTV + 1, None, true, None, current_height); + }, [0; 32], PaymentHash([0; 32]), 100, TEST_FINAL_CLTV + 1, None, true, None, false, current_height); // Should not return an error as this condition: // https://github.com/lightning/bolts/blob/4dcc377209509b13cf89a4b91fde7d478f5b46d8/04-onion-routing.md?plain=1#L334 @@ -19548,7 +20796,7 @@ mod tests { fn test_trigger_lnd_force_close() { let chanmon_cfg = create_chanmon_cfgs(2); let node_cfg = create_node_cfgs(2, &chanmon_cfg); - let user_config = test_default_channel_config(); + let user_config = test_legacy_channel_config(); let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[Some(user_config.clone()), Some(user_config)]); let nodes = create_network(2, &node_cfg, &node_chanmgr); let message = "Channel force-closed".to_owned(); @@ -19609,66 +20857,6 @@ mod tests { check_spends!(txn[0], funding_tx); } } - - #[test] - #[rustfmt::skip] - fn test_malformed_forward_htlcs_ser() { - // Ensure that `HTLCForwardInfo::FailMalformedHTLC`s are (de)serialized properly. - let chanmon_cfg = create_chanmon_cfgs(1); - let node_cfg = create_node_cfgs(1, &chanmon_cfg); - let persister; - let chain_monitor; - let chanmgrs = create_node_chanmgrs(1, &node_cfg, &[None]); - let deserialized_chanmgr; - let mut nodes = create_network(1, &node_cfg, &chanmgrs); - - let dummy_failed_htlc = |htlc_id| { - HTLCForwardInfo::FailHTLC { htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42], attribution_data: Some(AttributionData::new()) } } - }; - let dummy_malformed_htlc = |htlc_id| { - HTLCForwardInfo::FailMalformedHTLC { - htlc_id, - failure_code: LocalHTLCFailureReason::InvalidOnionPayload.failure_code(), - sha256_of_onion: [0; 32], - } - }; - - let dummy_htlcs_1: Vec = (1..10).map(|htlc_id| { - if htlc_id % 2 == 0 { - dummy_failed_htlc(htlc_id) - } else { - dummy_malformed_htlc(htlc_id) - } - }).collect(); - - let dummy_htlcs_2: Vec = (1..10).map(|htlc_id| { - if htlc_id % 2 == 1 { - dummy_failed_htlc(htlc_id) - } else { - dummy_malformed_htlc(htlc_id) - } - }).collect(); - - - let (scid_1, scid_2) = (42, 43); - let mut forward_htlcs = new_hash_map(); - forward_htlcs.insert(scid_1, dummy_htlcs_1.clone()); - forward_htlcs.insert(scid_2, dummy_htlcs_2.clone()); - - let mut chanmgr_fwd_htlcs = nodes[0].node.forward_htlcs.lock().unwrap(); - *chanmgr_fwd_htlcs = forward_htlcs.clone(); - core::mem::drop(chanmgr_fwd_htlcs); - - reload_node!(nodes[0], nodes[0].node.encode(), &[], persister, chain_monitor, deserialized_chanmgr); - - let mut deserialized_fwd_htlcs = nodes[0].node.forward_htlcs.lock().unwrap(); - for scid in [scid_1, scid_2].iter() { - let deserialized_htlcs = deserialized_fwd_htlcs.remove(scid).unwrap(); - assert_eq!(forward_htlcs.remove(scid).unwrap(), deserialized_htlcs); - } - assert!(deserialized_fwd_htlcs.is_empty()); - core::mem::drop(deserialized_fwd_htlcs); - } } #[cfg(ldk_bench)] @@ -19786,6 +20974,16 @@ pub mod bench { }, false).unwrap(); node_a.create_channel(node_b.get_our_node_id(), 8_000_000, 100_000_000, 42, None, None).unwrap(); node_b.handle_open_channel(node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id())); + let events = node_b.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match &events[0] { + Event::OpenChannelRequest { temporary_channel_id, counterparty_node_id, .. } => { + node_b + .accept_inbound_channel(temporary_channel_id, counterparty_node_id, 42, None) + .unwrap(); + }, + _ => panic!("Unexpected event"), + }; node_a.handle_accept_channel(node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id())); let tx; diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index ff33d7508b5..d3902b26201 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -10,6 +10,7 @@ //! A bunch of useful utilities for building networks of nodes and exchanging messages between //! nodes for functional tests. +use crate::blinded_path::payment::DummyTlvs; use crate::chain::channelmonitor::ChannelMonitor; use crate::chain::transaction::OutPoint; use crate::chain::{BestBlock, ChannelMonitorUpdateStatus, Confirm, Listen, Watch}; @@ -26,14 +27,15 @@ use crate::ln::chan_utils::{ }; use crate::ln::channelmanager::{ AChannelManager, ChainParameters, ChannelManager, ChannelManagerReadArgs, PaymentId, - RAACommitmentOrder, RecipientOnionFields, MIN_CLTV_EXPIRY_DELTA, + RAACommitmentOrder, MIN_CLTV_EXPIRY_DELTA, }; use crate::ln::funding::FundingTxInput; -use crate::ln::msgs; +use crate::ln::msgs::{self, OpenChannel}; use crate::ln::msgs::{ BaseMessageHandler, ChannelMessageHandler, MessageSendEvent, RoutingMessageHandler, }; use crate::ln::onion_utils::LocalHTLCFailureReason; +use crate::ln::outbound_payment::RecipientOnionFields; use crate::ln::outbound_payment::Retry; use crate::ln::peer_handler::IgnoringMessageHandler; use crate::ln::types::ChannelId; @@ -731,15 +733,15 @@ pub trait NodeHolder { fn node( &self, ) -> &ChannelManager< - ::M, - ::T, - ::ES, - ::NS, + ::Watch, + ::Broadcaster, + ::EntropySource, + ::NodeSigner, ::SP, - ::F, - ::R, - ::MR, - ::L, + ::FeeEstimator, + ::Router, + ::MessageRouter, + ::Logger, >; fn chain_monitor(&self) -> Option<&test_utils::TestChainMonitor<'_>>; } @@ -748,15 +750,15 @@ impl NodeHolder for &H { fn node( &self, ) -> &ChannelManager< - ::M, - ::T, - ::ES, - ::NS, + ::Watch, + ::Broadcaster, + ::EntropySource, + ::NodeSigner, ::SP, - ::F, - ::R, - ::MR, - ::L, + ::FeeEstimator, + ::Router, + ::MessageRouter, + ::Logger, > { (*self).node() } @@ -860,6 +862,7 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> { txn_broadcasted: Mutex::new( self.tx_broadcaster.txn_broadcasted.lock().unwrap().clone(), ), + txn_types: Mutex::new(self.tx_broadcaster.txn_types.lock().unwrap().clone()), blocks: Arc::new(Mutex::new(self.tx_broadcaster.blocks.lock().unwrap().clone())), }; @@ -908,6 +911,8 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> { tx_broadcaster: &broadcaster, logger: &self.logger, channel_monitors, + #[cfg(test)] + reconstruct_manager_from_monitors: None, }, ) .unwrap(); @@ -971,7 +976,7 @@ pub fn get_revoke_commit_msgs>( assert_eq!(node_id, recipient); (*msg).clone() }, - _ => panic!("Unexpected event"), + _ => panic!("Unexpected event: {events:?}"), }, match events[1] { MessageSendEvent::UpdateHTLCs { ref node_id, ref channel_id, ref updates } => { @@ -984,7 +989,7 @@ pub fn get_revoke_commit_msgs>( assert!(updates.commitment_signed.iter().all(|cs| cs.channel_id == *channel_id)); updates.commitment_signed.clone() }, - _ => panic!("Unexpected event"), + _ => panic!("Unexpected event: {events:?}"), }, ) } @@ -1085,7 +1090,7 @@ macro_rules! get_event { let ev = events.pop().unwrap(); match ev { $event_type { .. } => ev, - _ => panic!("Unexpected event"), + _ => panic!("Unexpected event {ev:?}"), } }}; } @@ -1124,7 +1129,9 @@ pub fn remove_first_msg_event_to_node( MessageSendEvent::UpdateHTLCs { node_id, .. } => node_id == msg_node_id, MessageSendEvent::SendRevokeAndACK { node_id, .. } => node_id == msg_node_id, MessageSendEvent::SendClosingSigned { node_id, .. } => node_id == msg_node_id, + #[cfg(simple_close)] MessageSendEvent::SendClosingComplete { node_id, .. } => node_id == msg_node_id, + #[cfg(simple_close)] MessageSendEvent::SendClosingSig { node_id, .. } => node_id == msg_node_id, MessageSendEvent::SendShutdown { node_id, .. } => node_id == msg_node_id, MessageSendEvent::SendChannelReestablish { node_id, .. } => node_id == msg_node_id, @@ -1265,14 +1272,11 @@ pub fn check_added_monitors>(node: & } } -/// Check whether N channel monitor(s) have been added. -/// -/// Don't use this, use the identically-named function instead. -#[macro_export] -macro_rules! check_added_monitors { - ($node: expr, $count: expr) => { - $crate::ln::functional_test_utils::check_added_monitors(&$node, $count); - }; +pub fn get_latest_mon_update_id<'a, 'b, 'c>( + node: &Node<'a, 'b, 'c>, channel_id: ChannelId, +) -> (u64, u64) { + let monitor_id_state = node.chain_monitor.latest_monitor_update_id.lock().unwrap(); + monitor_id_state.get(&channel_id).unwrap().clone() } fn claimed_htlc_matches_path<'a, 'b, 'c>( @@ -1307,7 +1311,7 @@ fn check_claimed_htlcs_match_route<'a, 'b, 'c>( pub fn _reload_node<'a, 'b, 'c>( node: &'a Node<'a, 'b, 'c>, config: UserConfig, chanman_encoded: &[u8], - monitors_encoded: &[&[u8]], + monitors_encoded: &[&[u8]], _reconstruct_manager_from_monitors: Option, ) -> TestChannelManager<'b, 'c> { let mut monitors_read = Vec::with_capacity(monitors_encoded.len()); for encoded in monitors_encoded { @@ -1341,6 +1345,8 @@ pub fn _reload_node<'a, 'b, 'c>( tx_broadcaster: node.tx_broadcaster, logger: node.logger, channel_monitors, + #[cfg(test)] + reconstruct_manager_from_monitors: _reconstruct_manager_from_monitors, }, ) .unwrap() @@ -1353,15 +1359,17 @@ pub fn _reload_node<'a, 'b, 'c>( node.chain_monitor.load_existing_monitor(channel_id, monitor), Ok(ChannelMonitorUpdateStatus::Completed), ); - check_added_monitors!(node, 1); + check_added_monitors(&node, 1); } node_deserialized } #[macro_export] -macro_rules! reload_node { - ($node: expr, $new_config: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: ident, $new_chain_monitor: ident, $new_channelmanager: ident) => { +macro_rules! _reload_node_inner { + ($node: expr, $new_config: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: + ident, $new_chain_monitor: ident, $new_channelmanager: ident, $reconstruct_pending_htlcs: expr + ) => { let chanman_encoded = $chanman_encoded; $persister = $crate::util::test_utils::TestPersister::new(); @@ -1375,21 +1383,63 @@ macro_rules! reload_node { ); $node.chain_monitor = &$new_chain_monitor; - $new_channelmanager = - _reload_node(&$node, $new_config, &chanman_encoded, $monitors_encoded); + $new_channelmanager = $crate::ln::functional_test_utils::_reload_node( + &$node, + $new_config, + &chanman_encoded, + $monitors_encoded, + $reconstruct_pending_htlcs, + ); $node.node = &$new_channelmanager; $node.onion_messenger.set_offers_handler(&$new_channelmanager); $node.onion_messenger.set_async_payments_handler(&$new_channelmanager); }; +} + +#[macro_export] +macro_rules! reload_node { + // Reload the node using the node's current config ($node: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: ident, $new_chain_monitor: ident, $new_channelmanager: ident) => { - reload_node!( + let config = $node.node.get_current_config(); + $crate::_reload_node_inner!( + $node, + config, + $chanman_encoded, + $monitors_encoded, + $persister, + $new_chain_monitor, + $new_channelmanager, + None + ); + }; + // Reload the node with the new provided config + ($node: expr, $new_config: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: ident, $new_chain_monitor: ident, $new_channelmanager: ident) => { + $crate::_reload_node_inner!( $node, - test_default_channel_config(), + $new_config, $chanman_encoded, $monitors_encoded, $persister, $new_chain_monitor, - $new_channelmanager + $new_channelmanager, + None + ); + }; + // Reload the node and have the `ChannelManager` use new codepaths that reconstruct its set of + // pending HTLCs from `Channel{Monitor}` data. + ($node: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: + ident, $new_chain_monitor: ident, $new_channelmanager: ident, $reconstruct_pending_htlcs: expr + ) => { + let config = $node.node.get_current_config(); + $crate::_reload_node_inner!( + $node, + config, + $chanman_encoded, + $monitors_encoded, + $persister, + $new_chain_monitor, + $new_channelmanager, + $reconstruct_pending_htlcs ); }; } @@ -1509,7 +1559,7 @@ pub fn sign_funding_transaction<'a, 'b, 'c>( .node .funding_transaction_generated(temporary_channel_id, node_b_id, tx.clone()) .is_ok()); - check_added_monitors!(node_a, 0); + check_added_monitors(&node_a, 0); let funding_created_msg = get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b_id); @@ -1544,7 +1594,7 @@ pub fn sign_funding_transaction<'a, 'b, 'c>( assert_eq!(node_a.tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); assert_eq!(node_a.tx_broadcaster.txn_broadcasted.lock().unwrap()[0], tx); - node_a.tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); + node_a.tx_broadcaster.clear(); // Ensure that funding_transaction_generated is idempotent. assert!(node_a @@ -1552,12 +1602,11 @@ pub fn sign_funding_transaction<'a, 'b, 'c>( .funding_transaction_generated(temporary_channel_id, node_b_id, tx.clone()) .is_err()); assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(node_a, 0); + check_added_monitors(&node_a, 0); tx } -// Receiver must have been initialized with manually_accept_inbound_channels set to true. pub fn open_zero_conf_channel<'a, 'b, 'c, 'd>( initiator: &'a Node<'b, 'c, 'd>, receiver: &'a Node<'b, 'c, 'd>, initiator_config: Option, @@ -1605,7 +1654,6 @@ pub fn exchange_open_accept_zero_conf_chan<'a, 'b, 'c, 'd>( accept_channel.common_fields.temporary_channel_id } -// Receiver must have been initialized with manually_accept_inbound_channels set to true. pub fn open_zero_conf_channel_with_value<'a, 'b, 'c, 'd>( initiator: &'a Node<'b, 'c, 'd>, receiver: &'a Node<'b, 'c, 'd>, initiator_config: Option, channel_value_sat: u64, push_msat: u64, @@ -1634,7 +1682,7 @@ pub fn open_zero_conf_channel_with_value<'a, 'b, 'c, 'd>( get_event_msg!(initiator, MessageSendEvent::SendFundingCreated, receiver_node_id); receiver.node.handle_funding_created(initiator_node_id, &funding_created); - check_added_monitors!(receiver, 1); + check_added_monitors(&receiver, 1); let bs_signed_locked = receiver.node.get_and_clear_pending_msg_events(); assert_eq!(bs_signed_locked.len(), 2); let as_channel_ready; @@ -1644,13 +1692,11 @@ pub fn open_zero_conf_channel_with_value<'a, 'b, 'c, 'd>( initiator.node.handle_funding_signed(receiver_node_id, &msg); expect_channel_pending_event(&initiator, &receiver_node_id); expect_channel_pending_event(&receiver, &initiator_node_id); - check_added_monitors!(initiator, 1); + check_added_monitors(&initiator, 1); assert_eq!(initiator.tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); - assert_eq!( - initiator.tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0)[0], - tx - ); + assert_eq!(initiator.tx_broadcaster.txn_broadcasted.lock().unwrap()[0], tx); + initiator.tx_broadcaster.clear(); as_channel_ready = get_event_msg!(initiator, MessageSendEvent::SendChannelReady, receiver_node_id); @@ -1703,18 +1749,8 @@ pub fn exchange_open_accept_chan<'a, 'b, 'c>( .user_channel_id, 42 ); - node_b.node.handle_open_channel(node_a_id, &open_channel_msg); - if node_b.node.get_current_config().manually_accept_inbound_channels { - let events = node_b.node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match &events[0] { - Event::OpenChannelRequest { temporary_channel_id, counterparty_node_id, .. } => node_b - .node - .accept_inbound_channel(temporary_channel_id, counterparty_node_id, 42, None) - .unwrap(), - _ => panic!("Unexpected event"), - }; - } + handle_and_accept_open_channel(&node_b, node_a_id, &open_channel_msg); + let accept_channel_msg = get_event_msg!(node_b, MessageSendEvent::SendAcceptChannel, node_a_id); assert_eq!(accept_channel_msg.common_fields.temporary_channel_id, create_chan_id); node_a.node.handle_accept_channel(node_b_id, &accept_channel_msg); @@ -1838,11 +1874,11 @@ pub fn create_channel_manual_funding<'a, 'b, 'c: 'd, 'd>( funding_tx.clone(), ) .unwrap(); - check_added_monitors!(node_a, 0); + check_added_monitors(&node_a, 0); let funding_created = get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b_id); node_b.node.handle_funding_created(node_a_id, &funding_created); - check_added_monitors!(node_b, 1); + check_added_monitors(&node_b, 1); let channel_id_b = expect_channel_pending_event(node_b, &node_a_id); if zero_conf { @@ -1995,7 +2031,8 @@ pub fn create_unannounced_chan_between_nodes_with_value<'a, 'b, 'c, 'd>( .create_channel(node_b_id, channel_value, push_msat, 42, None, Some(no_announce_cfg)) .unwrap(); let open_channel = get_event_msg!(nodes[a], MessageSendEvent::SendOpenChannel, node_b_id); - nodes[b].node.handle_open_channel(node_a_id, &open_channel); + handle_and_accept_open_channel(&nodes[b], node_a_id, &open_channel); + let accept_channel = get_event_msg!(nodes[b], MessageSendEvent::SendAcceptChannel, node_a_id); nodes[a].node.handle_accept_channel(node_b_id, &accept_channel); @@ -2008,7 +2045,7 @@ pub fn create_unannounced_chan_between_nodes_with_value<'a, 'b, 'c, 'd>( let as_funding_created = get_event_msg!(nodes[a], MessageSendEvent::SendFundingCreated, node_b_id); nodes[b].node.handle_funding_created(node_a_id, &as_funding_created); - check_added_monitors!(nodes[b], 1); + check_added_monitors(&nodes[b], 1); let cs_funding_signed = get_event_msg!(nodes[b], MessageSendEvent::SendFundingSigned, node_a_id); @@ -2016,11 +2053,11 @@ pub fn create_unannounced_chan_between_nodes_with_value<'a, 'b, 'c, 'd>( nodes[a].node.handle_funding_signed(node_b_id, &cs_funding_signed); expect_channel_pending_event(&nodes[a], &node_b_id); - check_added_monitors!(nodes[a], 1); + check_added_monitors(&nodes[a], 1); assert_eq!(nodes[a].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); assert_eq!(nodes[a].tx_broadcaster.txn_broadcasted.lock().unwrap()[0], tx); - nodes[a].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); + nodes[a].tx_broadcaster.clear(); let conf_height = core::cmp::max(nodes[a].best_block_info().1 + 1, nodes[b].best_block_info().1 + 1); @@ -2063,6 +2100,20 @@ pub fn create_unannounced_chan_between_nodes_with_value<'a, 'b, 'c, 'd>( (as_channel_ready, tx) } +pub fn handle_and_accept_open_channel(node: &Node, counterparty_id: PublicKey, msg: &OpenChannel) { + node.node.handle_open_channel(counterparty_id, &msg); + let events = node.node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match &events[0] { + Event::OpenChannelRequest { temporary_channel_id, counterparty_node_id, .. } => { + node.node + .accept_inbound_channel(temporary_channel_id, counterparty_node_id, 42, None) + .unwrap(); + }, + _ => panic!("Unexpected event"), + }; +} + pub fn update_nodes_with_chan_announce<'a, 'b, 'c, 'd>( nodes: &'a Vec>, a: usize, b: usize, ann: &msgs::ChannelAnnouncement, upd_1: &msgs::ChannelUpdate, upd_2: &msgs::ChannelUpdate, @@ -2621,29 +2672,34 @@ pub fn commitment_signed_dance_through_cp_raa( node_a: &Node<'_, '_, '_>, node_b: &Node<'_, '_, '_>, fail_backwards: bool, includes_claim: bool, ) -> Option { - let (extra_msg_option, bs_revoke_and_ack) = + let (extra_msg_option, bs_revoke_and_ack, node_b_holding_cell_htlcs) = do_main_commitment_signed_dance(node_a, node_b, fail_backwards); + assert!(node_b_holding_cell_htlcs.is_empty()); node_a.node.handle_revoke_and_ack(node_b.node.get_our_node_id(), &bs_revoke_and_ack); check_added_monitors(node_a, if includes_claim { 0 } else { 1 }); extra_msg_option } /// Does the main logic in the commitment_signed dance. After the first `commitment_signed` has -/// been delivered, this method picks up and delivers the response `revoke_and_ack` and -/// `commitment_signed`, returning the recipient's `revoke_and_ack` and any extra message it may -/// have included. +/// been delivered, delivers the response `revoke_and_ack` and `commitment_signed`, and returns: +/// - The recipient's `revoke_and_ack` +/// - The recipient's extra message (if any) after handling the commitment_signed +/// - Any messages released from the initiator's holding cell after handling the `revoke_and_ack` +/// (e.g., a second HTLC on the same channel) pub fn do_main_commitment_signed_dance( node_a: &Node<'_, '_, '_>, node_b: &Node<'_, '_, '_>, fail_backwards: bool, -) -> (Option, msgs::RevokeAndACK) { +) -> (Option, msgs::RevokeAndACK, Vec) { let node_a_id = node_a.node.get_our_node_id(); let node_b_id = node_b.node.get_our_node_id(); let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs(node_a, &node_b_id); - check_added_monitors!(node_b, 0); + check_added_monitors(&node_b, 0); assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); node_b.node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); - assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(node_b, 1); + // Handling the RAA may release HTLCs from node_b's holding cell (e.g., if multiple HTLCs + // were sent over the same channel and the second was queued behind the first). + let node_b_holding_cell_htlcs = node_b.node.get_and_clear_pending_msg_events(); + check_added_monitors(&node_b, 1); node_b.node.handle_commitment_signed_batch_test(node_a_id, &as_commitment_signed); let (bs_revoke_and_ack, extra_msg_option) = { let mut events = node_b.node.get_and_clear_pending_msg_events(); @@ -2660,14 +2716,18 @@ pub fn do_main_commitment_signed_dance( events.get(0).map(|e| e.clone()), ) }; - check_added_monitors!(node_b, 1); + check_added_monitors(&node_b, 1); if fail_backwards { assert!(node_a.node.get_and_clear_pending_events().is_empty()); assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); } - (extra_msg_option, bs_revoke_and_ack) + (extra_msg_option, bs_revoke_and_ack, node_b_holding_cell_htlcs) } +/// Runs the commitment_signed dance by delivering the commitment_signed and handling the +/// responding `revoke_and_ack` and `commitment_signed`. +/// +/// Returns the recipient's `revoke_and_ack`. pub fn commitment_signed_dance_return_raa( node_a: &Node<'_, '_, '_>, node_b: &Node<'_, '_, '_>, commitment_signed: &Vec, fail_backwards: bool, @@ -2678,9 +2738,10 @@ pub fn commitment_signed_dance_return_raa( .node .handle_commitment_signed_batch_test(node_b.node.get_our_node_id(), commitment_signed); check_added_monitors(&node_a, 1); - let (extra_msg_option, bs_revoke_and_ack) = + let (extra_msg_option, bs_revoke_and_ack, node_b_holding_cell_htlcs) = do_main_commitment_signed_dance(&node_a, &node_b, fail_backwards); assert!(extra_msg_option.is_none()); + assert!(node_b_holding_cell_htlcs.is_empty()); bs_revoke_and_ack } @@ -2695,10 +2756,10 @@ pub fn do_commitment_signed_dance( ) { let node_b_id = node_b.node.get_our_node_id(); - check_added_monitors!(node_a, 0); + check_added_monitors(&node_a, 0); assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); node_a.node.handle_commitment_signed_batch_test(node_b_id, commitment_signed); - check_added_monitors!(node_a, 1); + check_added_monitors(&node_a, 1); // If this commitment signed dance was due to a claim, don't check for an RAA monitor update. let channel_id = commitment_signed[0].channel_id; @@ -2722,7 +2783,7 @@ pub fn do_commitment_signed_dance( channel_id, }], ); - check_added_monitors!(node_a, 1); + check_added_monitors(&node_a, 1); let node_a_per_peer_state = node_a.node.per_peer_state.read().unwrap(); let mut number_of_msg_events = 0; @@ -2908,7 +2969,7 @@ pub fn check_payment_claimable( _ => {}, } }, - _ => panic!("Unexpected event"), + _ => panic!("Unexpected event {event:?}"), } } @@ -2916,7 +2977,7 @@ pub fn check_payment_claimable( #[cfg(any(test, ldk_bench, feature = "_test_utils"))] macro_rules! expect_payment_claimable { ($node: expr, $expected_payment_hash: expr, $expected_payment_secret: expr, $expected_recv_value: expr) => { - expect_payment_claimable!( + $crate::expect_payment_claimable!( $node, $expected_payment_hash, $expected_payment_secret, @@ -3224,12 +3285,13 @@ pub fn expect_probe_successful_events( } pub struct PaymentFailedConditions<'a> { - pub(crate) expected_htlc_error_data: Option<(LocalHTLCFailureReason, &'a [u8])>, - pub(crate) expected_blamed_scid: Option, - pub(crate) expected_blamed_chan_closed: Option, - pub(crate) expected_mpp_parts_remain: bool, - pub(crate) retry_expected: bool, - pub(crate) from_mon_update: bool, + pub expected_htlc_error_data: Option<(LocalHTLCFailureReason, &'a [u8])>, + pub expected_blamed_scid: Option, + pub expected_blamed_chan_closed: Option, + pub expected_mpp_parts_remain: bool, + pub retry_expected: bool, + pub from_mon_update: bool, + pub reason: Option, } impl<'a> PaymentFailedConditions<'a> { @@ -3241,6 +3303,7 @@ impl<'a> PaymentFailedConditions<'a> { expected_mpp_parts_remain: false, retry_expected: false, from_mon_update: false, + reason: None, } } pub fn mpp_parts_remain(mut self) -> Self { @@ -3321,14 +3384,21 @@ pub fn expect_payment_failed_conditions_event<'a, 'b, 'c, 'd, 'e>( *payment_failed_permanently, expected_payment_failed_permanently, "unexpected payment_failed_permanently value" ); - { - assert!(error_code.is_some(), "expected error_code.is_some() = true"); - assert!(error_data.is_some(), "expected error_data.is_some() = true"); - let reason: LocalHTLCFailureReason = error_code.unwrap().into(); - if let Some((code, data)) = conditions.expected_htlc_error_data { - assert_eq!(reason, code, "unexpected error code"); - assert_eq!(&error_data.as_ref().unwrap()[..], data, "unexpected error data"); - } + match failure { + PathFailure::OnPath { .. } => { + assert!(error_code.is_some(), "expected error_code.is_some() = true"); + assert!(error_data.is_some(), "expected error_data.is_some() = true"); + let reason: LocalHTLCFailureReason = error_code.unwrap().into(); + if let Some((code, data)) = conditions.expected_htlc_error_data { + assert_eq!(reason, code, "unexpected error code"); + assert_eq!(&error_data.as_ref().unwrap()[..], data); + } + }, + PathFailure::InitialSend { .. } => { + assert!(error_code.is_none()); + assert!(error_data.is_none()); + assert!(conditions.expected_htlc_error_data.is_none()); + }, } if let Some(chan_closed) = conditions.expected_blamed_chan_closed { @@ -3362,7 +3432,9 @@ pub fn expect_payment_failed_conditions_event<'a, 'b, 'c, 'd, 'e>( assert_eq!(*payment_id, expected_payment_id); assert_eq!( reason.unwrap(), - if expected_payment_failed_permanently { + if let Some(expected_reason) = conditions.reason { + expected_reason + } else if expected_payment_failed_permanently { PaymentFailureReason::RecipientRejected } else { PaymentFailureReason::RetriesExhausted @@ -3409,12 +3481,12 @@ pub fn send_along_route_with_secret<'a, 'b, 'c>( Retry::Attempts(0), ) .unwrap(); - check_added_monitors!(origin_node, expected_paths.len()); + check_added_monitors(&origin_node, expected_paths.len()); pass_along_route(origin_node, expected_paths, recv_value, our_payment_hash, our_payment_secret); payment_id } -fn fail_payment_along_path<'a, 'b, 'c>(expected_path: &[&Node<'a, 'b, 'c>]) { +pub fn fail_payment_along_path<'a, 'b, 'c>(expected_path: &[&Node<'a, 'b, 'c>]) { let origin_node_id = expected_path[0].node.get_our_node_id(); // iterate from the receiving node to the origin node and handle update fail htlc. @@ -3423,7 +3495,7 @@ fn fail_payment_along_path<'a, 'b, 'c>(expected_path: &[&Node<'a, 'b, 'c>]) { prev_node .node .handle_update_fail_htlc(node.node.get_our_node_id(), &updates.update_fail_htlcs[0]); - check_added_monitors!(prev_node, 0); + check_added_monitors(&prev_node, 0); let is_first_hop = origin_node_id == prev_node.node.get_our_node_id(); // We do not want to fail backwards on the first hop. All other hops should fail backwards. @@ -3435,6 +3507,7 @@ fn fail_payment_along_path<'a, 'b, 'c>(expected_path: &[&Node<'a, 'b, 'c>]) { pub struct PassAlongPathArgs<'a, 'b, 'c, 'd> { pub origin_node: &'a Node<'b, 'c, 'd>, pub expected_path: &'a [&'a Node<'b, 'c, 'd>], + pub dummy_tlvs: Vec, pub recv_value: u64, pub payment_hash: PaymentHash, pub payment_secret: Option, @@ -3456,6 +3529,7 @@ impl<'a, 'b, 'c, 'd> PassAlongPathArgs<'a, 'b, 'c, 'd> { Self { origin_node, expected_path, + dummy_tlvs: vec![], recv_value, payment_hash, payment_secret: None, @@ -3503,12 +3577,17 @@ impl<'a, 'b, 'c, 'd> PassAlongPathArgs<'a, 'b, 'c, 'd> { self.expected_failure = Some(failure); self } + pub fn with_dummy_tlvs(mut self, dummy_tlvs: &[DummyTlvs]) -> Self { + self.dummy_tlvs = dummy_tlvs.to_vec(); + self + } } pub fn do_pass_along_path<'a, 'b, 'c>(args: PassAlongPathArgs) -> Option { let PassAlongPathArgs { origin_node, expected_path, + dummy_tlvs, recv_value, payment_hash: our_payment_hash, payment_secret: our_payment_secret, @@ -3531,7 +3610,7 @@ pub fn do_pass_along_path<'a, 'b, 'c>(args: PassAlongPathArgs) -> Option assert_eq!(node.node.get_our_node_id(), payment_event.node_id); node.node.handle_update_add_htlc(prev_node.node.get_our_node_id(), &payment_event.msgs[0]); - check_added_monitors!(node, 0); + check_added_monitors(&node, 0); if is_last_hop && is_probe { do_commitment_signed_dance(node, prev_node, &payment_event.commitment_msg, true, true); @@ -3543,6 +3622,16 @@ pub fn do_pass_along_path<'a, 'b, 'c>(args: PassAlongPathArgs) -> Option node.node.process_pending_htlc_forwards(); } + if is_last_hop { + // At the final hop, the incoming packet contains N dummy-hop layers + // before the real HTLC. Each call to `process_pending_htlc_forwards` + // strips exactly one dummy layer, so we call it N times. + for _ in 0..dummy_tlvs.len() { + assert!(node.node.needs_pending_htlc_processing()); + node.node.process_pending_htlc_forwards(); + } + } + if is_last_hop && clear_recipient_events { let events_2 = node.node.get_and_clear_pending_events(); if payment_claimable_expected { @@ -3633,14 +3722,14 @@ pub fn do_pass_along_path<'a, 'b, 'c>(args: PassAlongPathArgs) -> Option assert!(events_2.len() == 1); expect_htlc_handling_failed_destinations!(events_2, &[failure]); node.node.process_pending_htlc_forwards(); - check_added_monitors!(node, 1); + check_added_monitors(&node, 1); } else { assert!(events_2.is_empty()); } } else if !is_last_hop { let mut events_2 = node.node.get_and_clear_pending_msg_events(); assert_eq!(events_2.len(), 1); - check_added_monitors!(node, 1); + check_added_monitors(&node, 1); payment_event = SendEvent::from_event(events_2.remove(0)); assert_eq!(payment_event.msgs.len(), 1); } @@ -3675,7 +3764,7 @@ pub fn send_probe_along_route<'a, 'b, 'c>( let mut events = origin_node.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), expected_route.len()); - check_added_monitors!(origin_node, expected_route.len()); + check_added_monitors(&origin_node, expected_route.len()); for (path, payment_hash) in expected_route.iter() { let ev = remove_first_msg_event_to_node(&path[0].node.get_our_node_id(), &mut events); @@ -3755,6 +3844,29 @@ pub struct ClaimAlongRouteArgs<'a, 'b, 'c, 'd> { pub origin_node: &'a Node<'b, 'c, 'd>, pub expected_paths: &'a [&'a [&'a Node<'b, 'c, 'd>]], pub expected_extra_fees: Vec, + /// A one-off adjustment used only in tests to account for an existing + /// fee-handling trade-off in LDK. + /// + /// When the payer is the introduction node of a blinded path, LDK does not + /// subtract the forward fee for the `payer -> next_hop` channel + /// (see [`BlindedPaymentPath::advance_path_by_one`]). This keeps the fee + /// logic simpler at the cost of a small, intentional overpayment. + /// + /// In the simple two-hop case (payer as introduction node → payee), + /// this overpayment has historically been avoided by simply not charging + /// the payer the forward fee, since the payer knows there is only + /// a single hop after them. + /// + /// However, with the introduction of dummy hops in LDK v0.3, even a + /// two-node real path (payer as introduction node → payee) may appear as a + /// multi-hop blinded path. This makes the existing overpayment surface in + /// tests. + /// + /// Until the fee-handling trade-off is revisited, this field allows tests + /// to compensate for that expected difference. + /// + /// [`BlindedPaymentPath::advance_path_by_one`]: crate::blinded_path::payment::BlindedPaymentPath::advance_path_by_one + pub expected_extra_total_fees_msat: u64, pub expected_min_htlc_overpay: Vec, pub skip_last: bool, pub payment_preimage: PaymentPreimage, @@ -3778,6 +3890,7 @@ impl<'a, 'b, 'c, 'd> ClaimAlongRouteArgs<'a, 'b, 'c, 'd> { origin_node, expected_paths, expected_extra_fees: vec![0; expected_paths.len()], + expected_extra_total_fees_msat: 0, expected_min_htlc_overpay: vec![0; expected_paths.len()], skip_last: false, payment_preimage, @@ -3793,6 +3906,10 @@ impl<'a, 'b, 'c, 'd> ClaimAlongRouteArgs<'a, 'b, 'c, 'd> { self.expected_extra_fees = extra_fees; self } + pub fn with_expected_extra_total_fees_msat(mut self, extra_total_fees: u64) -> Self { + self.expected_extra_total_fees_msat = extra_total_fees; + self + } pub fn with_expected_min_htlc_overpay(mut self, extra_fees: Vec) -> Self { self.expected_min_htlc_overpay = extra_fees; self @@ -3942,7 +4059,7 @@ pub fn pass_claimed_payment_along_route_from_ev( $prev_node.node.get_our_node_id(), next_msgs.as_ref().unwrap().0.clone(), ); - check_added_monitors!($node, 0); + check_added_monitors(&$node, 0); assert!($node.node.get_and_clear_pending_msg_events().is_empty()); let commitment = &next_msgs.as_ref().unwrap().1; do_commitment_signed_dance($node, $prev_node, commitment, false, false); @@ -4007,7 +4124,7 @@ pub fn pass_claimed_payment_along_route_from_ev( ); expected_total_fee_msat += actual_fee.unwrap(); fwd_amt_msat += actual_fee.unwrap(); - check_added_monitors!($node, 1); + check_added_monitors(&$node, 1); let new_next_msgs = if $new_msgs { let events = $node.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -4056,17 +4173,25 @@ pub fn pass_claimed_payment_along_route_from_ev( // Ensure that claim_funds is idempotent. expected_paths[0].last().unwrap().node.claim_funds(our_payment_preimage); assert!(expected_paths[0].last().unwrap().node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(expected_paths[0].last().unwrap(), 0); + check_added_monitors(&expected_paths[0].last().unwrap(), 0); expected_total_fee_msat } + pub fn claim_payment_along_route( args: ClaimAlongRouteArgs, ) -> (Option, Vec) { - let origin_node = args.origin_node; - let payment_preimage = args.payment_preimage; - let skip_last = args.skip_last; - let expected_total_fee_msat = do_claim_payment_along_route(args); + let ClaimAlongRouteArgs { + origin_node, + payment_preimage, + skip_last, + expected_extra_total_fees_msat, + .. + } = args; + + let expected_total_fee_msat = + do_claim_payment_along_route(args) + expected_extra_total_fees_msat; + if !skip_last { expect_payment_sent!(origin_node, payment_preimage, Some(expected_total_fee_msat)) } else { @@ -4151,7 +4276,7 @@ pub fn pass_failed_payment_back<'a, 'b, 'c>( our_payment_hash: PaymentHash, expected_fail_reason: PaymentFailureReason, ) { let mut expected_paths: Vec<_> = expected_paths_slice.iter().collect(); - check_added_monitors!(expected_paths[0].last().unwrap(), expected_paths.len()); + check_added_monitors(&expected_paths[0].last().unwrap(), expected_paths.len()); let mut per_path_msgs: Vec<((msgs::UpdateFailHTLC, Vec), PublicKey)> = Vec::with_capacity(expected_paths.len()); @@ -4263,7 +4388,7 @@ pub fn pass_failed_payment_back<'a, 'b, 'c>( prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0, ); - check_added_monitors!(origin_node, 0); + check_added_monitors(&origin_node, 0); assert!(origin_node.node.get_and_clear_pending_msg_events().is_empty()); let commitment = &next_msgs.as_ref().unwrap().1; do_commitment_signed_dance(origin_node, prev_node, commitment, false, false); @@ -4326,7 +4451,7 @@ pub fn pass_failed_payment_back<'a, 'b, 'c>( pending_events ); assert!(expected_paths[0].last().unwrap().node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(expected_paths[0].last().unwrap(), 0); + check_added_monitors(&expected_paths[0].last().unwrap(), 0); } pub fn fail_payment<'a, 'b, 'c>( @@ -4466,8 +4591,9 @@ pub fn create_node_cfgs_with_node_id_message_router<'a>( ) } -pub fn test_default_channel_config() -> UserConfig { +pub fn test_legacy_channel_config() -> UserConfig { let mut default_config = UserConfig::default(); + default_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = false; // Set cltv_expiry_delta slightly lower to keep the final CLTV values inside one byte in our // tests so that our script-length checks don't fail (see ACCEPTED_HTLC_SCRIPT_WEIGHT). default_config.channel_config.cltv_expiry_delta = MIN_CLTV_EXPIRY_DELTA; @@ -4485,10 +4611,9 @@ pub fn test_default_channel_config() -> UserConfig { default_config } -pub fn test_default_anchors_channel_config() -> UserConfig { - let mut config = test_default_channel_config(); +pub fn test_default_channel_config() -> UserConfig { + let mut config = test_legacy_channel_config(); config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; - config.manually_accept_inbound_channels = true; config } @@ -4555,7 +4680,29 @@ pub fn create_network<'a, 'b: 'a, 'c: 'b>( let mut nodes = Vec::new(); let chan_count = Rc::new(RefCell::new(0)); let payment_count = Rc::new(RefCell::new(0)); - let connect_style = Rc::new(RefCell::new(ConnectStyle::random_style())); + + let connect_style = Rc::new(RefCell::new(match std::env::var("LDK_TEST_CONNECT_STYLE") { + Ok(val) => match val.as_str() { + "BEST_BLOCK_FIRST" => ConnectStyle::BestBlockFirst, + "BEST_BLOCK_FIRST_SKIPPING_BLOCKS" => ConnectStyle::BestBlockFirstSkippingBlocks, + "BEST_BLOCK_FIRST_REORGS_ONLY_TIP" => ConnectStyle::BestBlockFirstReorgsOnlyTip, + "TRANSACTIONS_FIRST" => ConnectStyle::TransactionsFirst, + "TRANSACTIONS_FIRST_SKIPPING_BLOCKS" => ConnectStyle::TransactionsFirstSkippingBlocks, + "TRANSACTIONS_DUPLICATIVELY_FIRST_SKIPPING_BLOCKS" => { + ConnectStyle::TransactionsDuplicativelyFirstSkippingBlocks + }, + "HIGHLY_REDUNDANT_TRANSACTIONS_FIRST_SKIPPING_BLOCKS" => { + ConnectStyle::HighlyRedundantTransactionsFirstSkippingBlocks + }, + "TRANSACTIONS_FIRST_REORGS_ONLY_TIP" => ConnectStyle::TransactionsFirstReorgsOnlyTip, + "FULL_BLOCK_VIA_LISTEN" => ConnectStyle::FullBlockViaListen, + "FULL_BLOCK_DISCONNECTIONS_SKIPPING_VIA_LISTEN" => { + ConnectStyle::FullBlockDisconnectionsSkippingViaListen + }, + _ => panic!("Unknown ConnectStyle '{}'", val), + }, + Err(_) => ConnectStyle::random_style(), + })); for i in 0..node_count { let dedicated_entropy = DedicatedEntropy(RandomBytes::new([i as u8; 32])); @@ -5088,6 +5235,9 @@ pub struct ReconnectArgs<'a, 'b, 'c, 'd> { pub pending_cell_htlc_claims: (usize, usize), pub pending_cell_htlc_fails: (usize, usize), pub pending_raa: (bool, bool), + /// If true, don't assert that pending messages are empty after the commitment dance completes. + /// Useful when holding cell HTLCs will be released and need to be handled by the caller. + pub allow_post_commitment_dance_msgs: (bool, bool), } impl<'a, 'b, 'c, 'd> ReconnectArgs<'a, 'b, 'c, 'd> { @@ -5110,6 +5260,7 @@ impl<'a, 'b, 'c, 'd> ReconnectArgs<'a, 'b, 'c, 'd> { pending_cell_htlc_claims: (0, 0), pending_cell_htlc_fails: (0, 0), pending_raa: (false, false), + allow_post_commitment_dance_msgs: (false, false), } } } @@ -5135,6 +5286,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { pending_raa, pending_responding_commitment_signed, pending_responding_commitment_signed_dup_monitor, + allow_post_commitment_dance_msgs, } = args; connect_nodes(node_a, node_b); let reestablish_1 = get_chan_reestablish_msgs!(node_a, node_b); @@ -5182,9 +5334,9 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { || pending_cell_htlc_fails.0 != 0 || expect_renegotiated_funding_locked_monitor_update.1 { - check_added_monitors!(node_b, 1); + check_added_monitors(&node_b, 1); } else { - check_added_monitors!(node_b, 0); + check_added_monitors(&node_b, 0); } let mut resp_2 = Vec::new(); @@ -5196,9 +5348,9 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { || pending_cell_htlc_fails.1 != 0 || expect_renegotiated_funding_locked_monitor_update.0 { - check_added_monitors!(node_a, 1); + check_added_monitors(&node_a, 1); } else { - check_added_monitors!(node_a, 0); + check_added_monitors(&node_a, 0); } // We don't yet support both needing updates, as that would require a different commitment dance: @@ -5273,7 +5425,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { assert!(chan_msgs.3 == RAACommitmentOrder::RevokeAndACKFirst); node_a.node.handle_revoke_and_ack(node_b_id, &chan_msgs.1.unwrap()); assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(node_a, 1); + check_added_monitors(&node_a, 1); } else { assert!(chan_msgs.1.is_none()); } @@ -5313,16 +5465,18 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { node_b_id, &commitment_update.commitment_signed, ); - check_added_monitors!(node_a, 1); + check_added_monitors(&node_a, 1); let as_revoke_and_ack = get_event_msg!(node_a, MessageSendEvent::SendRevokeAndACK, node_b_id); // No commitment_signed so get_event_msg's assert(len == 1) passes node_b.node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); - assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!( - node_b, - if pending_responding_commitment_signed_dup_monitor.0 { 0 } else { 1 } + check_added_monitors( + &node_b, + if pending_responding_commitment_signed_dup_monitor.0 { 0 } else { 1 }, ); + if !allow_post_commitment_dance_msgs.0 { + assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); + } } } else { assert!(chan_msgs.2.is_none()); @@ -5387,7 +5541,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { assert!(chan_msgs.3 == RAACommitmentOrder::RevokeAndACKFirst); node_b.node.handle_revoke_and_ack(node_a_id, &chan_msgs.1.unwrap()); assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(node_b, 1); + check_added_monitors(&node_b, 1); } else { assert!(chan_msgs.1.is_none()); } @@ -5427,16 +5581,18 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { node_a_id, &commitment_update.commitment_signed, ); - check_added_monitors!(node_b, 1); + check_added_monitors(&node_b, 1); let bs_revoke_and_ack = get_event_msg!(node_b, MessageSendEvent::SendRevokeAndACK, node_a_id); // No commitment_signed so get_event_msg's assert(len == 1) passes node_a.node.handle_revoke_and_ack(node_b_id, &bs_revoke_and_ack); - assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!( - node_a, - if pending_responding_commitment_signed_dup_monitor.1 { 0 } else { 1 } + check_added_monitors( + &node_a, + if pending_responding_commitment_signed_dup_monitor.1 { 0 } else { 1 }, ); + if !allow_post_commitment_dance_msgs.1 { + assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); + } } } else { assert!(chan_msgs.2.is_none()); @@ -5474,7 +5630,8 @@ pub fn create_batch_channel_funding<'a, 'b, 'c>( .unwrap(); let open_channel_msg = get_event_msg!(funding_node, MessageSendEvent::SendOpenChannel, other_node_id); - other_node.node.handle_open_channel(funding_node_id, &open_channel_msg); + handle_and_accept_open_channel(other_node, funding_node_id, &open_channel_msg); + let accept_channel_msg = get_event_msg!(other_node, MessageSendEvent::SendAcceptChannel, funding_node_id); funding_node.node.handle_accept_channel(other_node_id, &accept_channel_msg); @@ -5518,7 +5675,7 @@ pub fn create_batch_channel_funding<'a, 'b, 'c>( tx.clone(), ) .is_ok()); - check_added_monitors!(funding_node, 0); + check_added_monitors(&funding_node, 0); let events = funding_node.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), params.len()); for (other_node, ..) in params { diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index e2963dbeb09..6fe0c83dfe8 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -33,14 +33,15 @@ use crate::ln::channel::{ MIN_CHAN_DUST_LIMIT_SATOSHIS, UNFUNDED_CHANNEL_AGE_LIMIT_TICKS, }; use crate::ln::channelmanager::{ - PaymentId, RAACommitmentOrder, RecipientOnionFields, BREAKDOWN_TIMEOUT, DISABLE_GOSSIP_TICKS, - ENABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA, + PaymentId, RAACommitmentOrder, BREAKDOWN_TIMEOUT, DISABLE_GOSSIP_TICKS, ENABLE_GOSSIP_TICKS, + MIN_CLTV_EXPIRY_DELTA, }; use crate::ln::msgs; use crate::ln::msgs::{ BaseMessageHandler, ChannelMessageHandler, ErrorAction, MessageSendEvent, RoutingMessageHandler, }; use crate::ln::onion_utils::LocalHTLCFailureReason; +use crate::ln::outbound_payment::RecipientOnionFields; use crate::ln::types::ChannelId; use crate::ln::{chan_utils, onion_utils}; use crate::routing::gossip::{NetworkGraph, NetworkUpdate}; @@ -271,7 +272,9 @@ pub fn test_duplicate_htlc_different_direction_onchain() { // in opposite directions, even with the same payment secret. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_b_id = nodes[1].node.get_our_node_id(); @@ -398,7 +401,9 @@ pub fn test_duplicate_htlc_different_direction_onchain() { pub fn test_inbound_outbound_capacity_is_not_zero() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); @@ -436,7 +441,12 @@ fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBac // just before the upstream timeout expires let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs( + 3, + &node_cfgs, + &[Some(legacy_cfg.clone()), Some(legacy_cfg.clone()), Some(legacy_cfg)], + ); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); for node in nodes.iter() { @@ -562,7 +572,18 @@ pub fn channel_monitor_network_test() { // tests that ChannelMonitor is able to recover from various states. let chanmon_cfgs = create_chanmon_cfgs(5); let node_cfgs = create_node_cfgs(5, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(5, &node_cfgs, &[None, None, None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs( + 5, + &node_cfgs, + &[ + Some(legacy_cfg.clone()), + Some(legacy_cfg.clone()), + Some(legacy_cfg.clone()), + Some(legacy_cfg.clone()), + Some(legacy_cfg), + ], + ); let nodes = create_network(5, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -795,11 +816,11 @@ pub fn channel_monitor_network_test() { #[xtest(feature = "_externalize_tests")] pub fn test_justice_tx_htlc_timeout() { // Test justice txn built on revoked HTLC-Timeout tx, against both sides - let mut alice_config = test_default_channel_config(); + let mut alice_config = test_legacy_channel_config(); alice_config.channel_handshake_config.announce_for_forwarding = true; alice_config.channel_handshake_limits.force_announced_channel_preference = false; alice_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 5; - let mut bob_config = test_default_channel_config(); + let mut bob_config = test_legacy_channel_config(); bob_config.channel_handshake_config.announce_for_forwarding = true; bob_config.channel_handshake_limits.force_announced_channel_preference = false; bob_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 3; @@ -883,11 +904,11 @@ pub fn test_justice_tx_htlc_timeout() { #[xtest(feature = "_externalize_tests")] pub fn test_justice_tx_htlc_success() { // Test justice txn built on revoked HTLC-Success tx, against both sides - let mut alice_config = test_default_channel_config(); + let mut alice_config = test_legacy_channel_config(); alice_config.channel_handshake_config.announce_for_forwarding = true; alice_config.channel_handshake_limits.force_announced_channel_preference = false; alice_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 5; - let mut bob_config = test_default_channel_config(); + let mut bob_config = test_legacy_channel_config(); bob_config.channel_handshake_config.announce_for_forwarding = true; bob_config.channel_handshake_limits.force_announced_channel_preference = false; bob_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 3; @@ -960,7 +981,9 @@ pub fn revoked_output_claim() { // transaction is broadcast by its counterparty let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -1011,7 +1034,9 @@ fn do_test_forming_justice_tx_from_monitor_updates(broadcast_initial_commitment: WatchtowerPersister::new(destination_script1), ]; let node_cfgs = create_node_cfgs_with_persisters(2, &chanmon_cfgs, persisters.iter().collect()); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -1074,7 +1099,9 @@ pub fn claim_htlc_outputs() { let mut chanmon_cfgs = create_chanmon_cfgs(2); chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true; let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -1163,7 +1190,6 @@ pub fn do_test_multiple_package_conflicts(p2a_anchor: bool) { // transaction. user_cfg.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; user_cfg.channel_handshake_config.negotiate_anchor_zero_fee_commitments = p2a_anchor; - user_cfg.manually_accept_inbound_channels = true; let configs = [Some(user_cfg.clone()), Some(user_cfg.clone()), Some(user_cfg)]; let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &configs); @@ -1388,7 +1414,12 @@ pub fn test_htlc_on_chain_success() { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs( + 3, + &node_cfgs, + &[Some(legacy_cfg.clone()), Some(legacy_cfg.clone()), Some(legacy_cfg)], + ); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -1635,7 +1666,12 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs( + 3, + &node_cfgs, + &[Some(legacy_cfg.clone()), Some(legacy_cfg.clone()), Some(legacy_cfg)], + ); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -1886,7 +1922,12 @@ fn do_test_commitment_revoked_fail_backward_exhaustive( // commitment_signed) we will be free to fail/fulfill the HTLC backwards. let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs( + 3, + &node_cfgs, + &[Some(legacy_cfg.clone()), Some(legacy_cfg.clone()), Some(legacy_cfg)], + ); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -2270,6 +2311,7 @@ pub fn fail_backward_pending_htlc_upon_channel_failure() { skimmed_fee_msat: None, blinding_point: None, hold_htlc: None, + accountable: None, }; nodes[0].node.handle_update_add_htlc(node_b_id, &update_add_htlc); } @@ -2304,7 +2346,9 @@ pub fn test_htlc_ignore_latest_remote_commitment() { // ignored if we cannot claim them. This originally tickled an invalid unwrap(). let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -2356,7 +2400,12 @@ pub fn test_force_close_fail_back() { // Check which HTLCs are failed-backwards on channel force-closure let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs( + 3, + &node_cfgs, + &[Some(legacy_cfg.clone()), Some(legacy_cfg.clone()), Some(legacy_cfg)], + ); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -2504,7 +2553,7 @@ pub fn test_peer_disconnected_before_funding_broadcasted() { let expected_temporary_channel_id = nodes[0].node.create_channel(node_b_id, 1_000_000, 500_000_000, 42, None, None).unwrap(); let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); - nodes[1].node.handle_open_channel(node_a_id, &open_channel); + handle_and_accept_open_channel(&nodes[1], node_a_id, &open_channel); let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); nodes[0].node.handle_accept_channel(node_b_id, &accept_channel); @@ -3428,7 +3477,7 @@ macro_rules! check_spendable_outputs { let secp_ctx = Secp256k1::new(); for event in events.drain(..) { match event { - Event::SpendableOutputs { mut outputs, channel_id: _ } => { + Event::SpendableOutputs { mut outputs, channel_id: _, counterparty_node_id: _ } => { for outp in outputs.drain(..) { let script = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(); @@ -3468,7 +3517,9 @@ pub fn test_claim_sizeable_push_msat() { // Incidentally test SpendableOutput event generation due to detection of to_local output on commitment tx let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -3505,7 +3556,9 @@ pub fn test_claim_on_remote_sizeable_push_msat() { // to_remote output is encumbered by a P2WPKH let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -3547,7 +3600,9 @@ pub fn test_claim_on_remote_revoked_sizeable_push_msat() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -3580,7 +3635,9 @@ pub fn test_claim_on_remote_revoked_sizeable_push_msat() { pub fn test_static_spendable_outputs_preimage_tx() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -3630,7 +3687,9 @@ pub fn test_static_spendable_outputs_preimage_tx() { pub fn test_static_spendable_outputs_timeout_tx() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -3741,7 +3800,9 @@ pub fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() { let mut chanmon_cfgs = create_chanmon_cfgs(2); chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true; let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -3820,7 +3881,9 @@ pub fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() { let mut chanmon_cfgs = create_chanmon_cfgs(2); chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true; let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -3907,7 +3970,12 @@ pub fn test_onchain_to_onchain_claim() { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs( + 3, + &node_cfgs, + &[Some(legacy_cfg.clone()), Some(legacy_cfg.clone()), Some(legacy_cfg)], + ); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -4053,7 +4121,7 @@ pub fn test_duplicate_payment_hash_one_failure_one_success() { let node_cfgs = create_node_cfgs(5, &chanmon_cfgs); // When this test was written, the default base fee floated based on the HTLC count. // It is now fixed, so we simply set the fee to the expected value here. - let mut config = test_default_channel_config(); + let mut config = test_legacy_channel_config(); config.channel_config.forwarding_fee_base_msat = 196; let configs = [ @@ -4234,7 +4302,9 @@ pub fn test_duplicate_payment_hash_one_failure_one_success() { pub fn test_dynamic_spendable_outputs_local_htlc_success_tx() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -4303,7 +4373,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno let node_cfgs = create_node_cfgs(6, &chanmon_cfgs); // When this test was written, the default base fee floated based on the HTLC count. // It is now fixed, so we simply set the fee to the expected value here. - let mut config = test_default_channel_config(); + let mut config = test_legacy_channel_config(); config.channel_config.forwarding_fee_base_msat = 196; let configs = [ @@ -4721,7 +4791,9 @@ pub fn test_fail_backwards_previous_remote_announce() { pub fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_b_id = nodes[1].node.get_our_node_id(); @@ -4814,7 +4886,12 @@ pub fn test_key_derivation_params() { node_cfgs.remove(0); node_cfgs.insert(0, node); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs( + 3, + &node_cfgs, + &[Some(legacy_cfg.clone()), Some(legacy_cfg.clone()), Some(legacy_cfg)], + ); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); let node_b_id = nodes[1].node.get_our_node_id(); @@ -4931,7 +5008,9 @@ pub fn test_static_output_closing_tx() { fn do_htlc_claim_local_commitment_only(use_dust: bool) { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -4975,7 +5054,9 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) { fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_b_id = nodes[1].node.get_our_node_id(); @@ -5014,7 +5095,12 @@ fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) { fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs( + 3, + &node_cfgs, + &[Some(legacy_cfg.clone()), Some(legacy_cfg.clone()), Some(legacy_cfg)], + ); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -5106,7 +5192,9 @@ pub fn htlc_claim_single_commitment_only_b() { pub fn test_fail_holding_cell_htlc_upon_free() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -5205,7 +5293,9 @@ pub fn test_fail_holding_cell_htlc_upon_free() { pub fn test_free_and_fail_holding_cell_htlcs() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -5348,7 +5438,7 @@ pub fn test_fail_holding_cell_htlc_upon_free_multihop() { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); // Avoid having to include routing fees in calculations - let mut config = test_default_channel_config(); + let mut config = test_legacy_channel_config(); config.channel_config.forwarding_fee_base_msat = 0; config.channel_config.forwarding_fee_proportional_millionths = 0; let node_chanmgrs = create_node_chanmgrs( @@ -5715,7 +5805,9 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) { let mut chanmon_cfgs = create_chanmon_cfgs(2); chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true; let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -5823,7 +5915,12 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs( + 3, + &node_cfgs, + &[Some(legacy_cfg.clone()), Some(legacy_cfg.clone()), Some(legacy_cfg)], + ); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); let node_b_id = nodes[1].node.get_our_node_id(); @@ -6129,7 +6226,9 @@ pub fn test_bump_penalty_txn_on_revoked_commitment() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -6236,7 +6335,9 @@ pub fn test_bump_penalty_txn_on_revoked_htlcs() { let mut chanmon_cfgs = create_chanmon_cfgs(2); chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true; let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -6363,11 +6464,11 @@ pub fn test_bump_penalty_txn_on_revoked_htlcs() { assert_eq!( node_txn[1].input[0].previous_output, - revoked_htlc_txn[1].input[0].previous_output + revoked_htlc_txn[0].input[0].previous_output ); assert_eq!( node_txn[1].input[1].previous_output, - revoked_htlc_txn[0].input[0].previous_output + revoked_htlc_txn[1].input[0].previous_output ); // node_txn[3] spends the revoked outputs from the revoked_htlc_txn (which only have one @@ -6438,7 +6539,9 @@ pub fn test_bump_penalty_txn_on_remote_commitment() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let remote_txn = { @@ -6785,7 +6888,7 @@ pub fn test_override_0msat_htlc_minimum() { let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); assert_eq!(res.common_fields.htlc_minimum_msat, 1); - nodes[1].node.handle_open_channel(node_a_id, &res); + handle_and_accept_open_channel(&nodes[1], node_a_id, &res); let res = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); assert_eq!(res.common_fields.htlc_minimum_msat, 1); } @@ -7359,7 +7462,9 @@ pub fn test_concurrent_monitor_claim() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -7564,7 +7669,7 @@ pub fn test_pre_lockin_no_chan_closed_update() { // Create an initial channel nodes[0].node.create_channel(node_b_id, 100000, 10001, 42, None, None).unwrap(); let mut open_chan_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); - nodes[1].node.handle_open_channel(node_a_id, &open_chan_msg); + handle_and_accept_open_channel(&nodes[1], node_a_id, &open_chan_msg); let accept_chan_msg = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); nodes[0].node.handle_accept_channel(node_b_id, &accept_chan_msg); @@ -7601,7 +7706,9 @@ pub fn test_htlc_no_detection() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_b_id = nodes[1].node.get_our_node_id(); @@ -7668,7 +7775,12 @@ fn do_test_onchain_htlc_settlement_after_close( // 6) Bob claims the offered output on the broadcasted commitment. let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs( + 3, + &node_cfgs, + &[Some(legacy_cfg.clone()), Some(legacy_cfg.clone()), Some(legacy_cfg)], + ); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -7883,9 +7995,8 @@ pub fn test_peer_funding_sidechannel() { let node_b_id = nodes[1].node.get_our_node_id(); let temp_chan_id_ab = exchange_open_accept_chan(&nodes[0], &nodes[1], 1_000_000, 0); - let temp_chan_id_ca = exchange_open_accept_chan(&nodes[1], &nodes[0], 1_000_000, 0); - let (_, tx, funding_output) = create_funding_transaction(&nodes[0], &node_b_id, 1_000_000, 42); + let temp_chan_id_ba = exchange_open_accept_chan(&nodes[1], &nodes[0], 1_000_000, 0); let cs_funding_events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(cs_funding_events.len(), 1); @@ -7897,7 +8008,7 @@ pub fn test_peer_funding_sidechannel() { let output_idx = funding_output.index; nodes[1] .node - .funding_transaction_generated_unchecked(temp_chan_id_ca, node_a_id, tx.clone(), output_idx) + .funding_transaction_generated_unchecked(temp_chan_id_ba, node_a_id, tx.clone(), output_idx) .unwrap(); let funding_created_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingCreated, node_a_id); @@ -7977,7 +8088,12 @@ pub fn test_error_chans_closed() { // we can test various edge cases around it to ensure we don't regress. let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs( + 3, + &node_cfgs, + &[Some(legacy_cfg.clone()), Some(legacy_cfg.clone()), Some(legacy_cfg)], + ); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); let node_b_id = nodes[1].node.get_our_node_id(); @@ -8076,7 +8192,12 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t // aren't broadcasting transactions too early (ie not broadcasting them at all). let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs( + 3, + &node_cfgs, + &[Some(legacy_cfg.clone()), Some(legacy_cfg.clone()), Some(legacy_cfg)], + ); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); *nodes[0].connect_style.borrow_mut() = ConnectStyle::BestBlockFirstSkippingBlocks; @@ -8551,7 +8672,7 @@ fn do_test_max_dust_htlc_exposure( // might be available again for HTLC processing once the dust bandwidth has cleared up. let chanmon_cfgs = create_chanmon_cfgs(2); - let mut config = test_default_channel_config(); + let mut config = test_legacy_channel_config(); // We hard-code the feerate values here but they're re-calculated furter down and asserted. // If the values ever change below these constants should simply be updated. @@ -8601,7 +8722,7 @@ fn do_test_max_dust_htlc_exposure( if on_holder_tx { open_channel.common_fields.dust_limit_satoshis = 546; } - nodes[1].node.handle_open_channel(node_a_id, &open_channel); + handle_and_accept_open_channel(&nodes[1], node_a_id, &open_channel); let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); nodes[0].node.handle_accept_channel(node_b_id, &accept_channel); @@ -8937,7 +9058,7 @@ pub fn test_nondust_htlc_excess_fees_are_dust() { } let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let mut config = test_default_channel_config(); + let mut config = test_legacy_channel_config(); // Set the dust limit to the default value config.channel_config.max_dust_htlc_exposure = MaxDustHTLCExposure::FeeRateMultiplier(10_000); // Make sure the HTLC limits don't get in the way @@ -9152,11 +9273,10 @@ fn do_test_nondust_htlc_fees_dust_exposure_delta(features: ChannelTypeFeatures) assert_eq!(expected_dust_exposure_msat, 528_492); } - let mut default_config = test_default_channel_config(); + let mut default_config = test_legacy_channel_config(); if features == ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies() { default_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; // in addition to the one above, this setting is also needed to create an anchor channel - default_config.manually_accept_inbound_channels = true; } // Set node 1's max dust htlc exposure to 1msat below `expected_dust_exposure_msat` @@ -9564,7 +9684,7 @@ pub fn test_remove_expired_outbound_unfunded_channels() { nodes[0].node.create_channel(node_b_id, 100_000, 0, 42, None, None).unwrap(); let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); - nodes[1].node.handle_open_channel(node_a_id, &open_channel_message); + handle_and_accept_open_channel(&nodes[1], node_a_id, &open_channel_message); let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); nodes[0].node.handle_accept_channel(node_b_id, &accept_channel_message); @@ -9628,7 +9748,7 @@ pub fn test_remove_expired_inbound_unfunded_channels() { nodes[0].node.create_channel(node_b_id, 100_000, 0, 42, None, None).unwrap(); let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); - nodes[1].node.handle_open_channel(node_a_id, &open_channel_message); + handle_and_accept_open_channel(&nodes[1], node_a_id, &open_channel_message); let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); nodes[0].node.handle_accept_channel(node_b_id, &accept_channel_message); @@ -9686,10 +9806,7 @@ fn do_test_manual_broadcast_skips_commitment_until_funding( // forced to broadcast using `ChannelMonitor::broadcast_latest_holder_commitment_txn`. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let mut chan_config = test_default_channel_config(); - if zero_conf_open { - chan_config.manually_accept_inbound_channels = true; - } + let mut chan_config = test_legacy_channel_config(); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(chan_config)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -9899,3 +10016,154 @@ pub fn test_multi_post_event_actions() { do_test_multi_post_event_actions(true); do_test_multi_post_event_actions(false); } + +#[xtest(feature = "_externalize_tests")] +pub fn test_dust_exposure_holding_cell_assertion() { + // Test that we properly move forward if we pop an HTLC-add from the holding cell but fail to + // add it to the channel. In 0.2 this cause a (harmless in prod) debug assertion failure. We + // try to ensure that this won't happen by checking that an HTLC will be able to be added + // before we add it to the holding cell, so getting into this state takes a bit of work. + // + // Here we accomplish this by using the dust exposure limit. This has the unique feature that + // node C can increase node B's dust exposure on the B <-> C channel without B doing anything. + // To exploit this, we get node B one HTLC away from being over-exposed to dust, give it one + // more HTLC in the holding cell, then have node C add an HTLC. By the time the holding-cell + // HTLC is released we are at max-dust-exposure and will fail it. + + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + + // Configure nodes with specific dust limits + let mut config = test_legacy_channel_config(); + // Use a fixed dust exposure limit to make the test simpler + const DUST_HTLC_VALUE_MSAT: u64 = 500_000; + config.channel_config.max_dust_htlc_exposure = MaxDustHTLCExposure::FixedLimitMsat(5_000_000); + config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100; + + let configs = [Some(config.clone()), Some(config.clone()), Some(config.clone())]; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &configs); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + + // Create channels: A <-> B <-> C + create_announced_chan_between_nodes(&nodes, 0, 1); + let bc_chan_id = create_announced_chan_between_nodes(&nodes, 1, 2).2; + send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 10_000_000); + + // Send multiple dust HTLCs from B to C to approach the dust limit (including transaction fees) + for _ in 0..4 { + route_payment(&nodes[1], &[&nodes[2]], DUST_HTLC_VALUE_MSAT); + } + + // At this point we shouldn't be over the dust limit, and should still be able to send HTLCs. + let bs_chans = nodes[1].node.list_channels(); + let bc_chan = bs_chans.iter().find(|chan| chan.counterparty.node_id == node_c_id).unwrap(); + assert_eq!( + bc_chan.next_outbound_htlc_minimum_msat, + config.channel_handshake_config.our_htlc_minimum_msat + ); + + // Add a further HTLC from B to C, but don't deliver the send messages. + // After this we'll only have the ability to add one more HTLC, but by not delivering the send + // messages (leaving B waiting on C's RAA) the next HTLC will go into B's holding cell. + let (route_bc, payment_hash_bc, _payment_preimage_bc, payment_secret_bc) = + get_route_and_payment_hash!(nodes[1], nodes[2], DUST_HTLC_VALUE_MSAT); + let onion_bc = RecipientOnionFields::secret_only(payment_secret_bc); + let id = PaymentId(payment_hash_bc.0); + nodes[1].node.send_payment_with_route(route_bc, payment_hash_bc, onion_bc, id).unwrap(); + check_added_monitors(&nodes[1], 1); + let send_bc = SendEvent::from_node(&nodes[1]); + + let bs_chans = nodes[1].node.list_channels(); + let bc_chan = bs_chans.iter().find(|chan| chan.counterparty.node_id == node_c_id).unwrap(); + assert_eq!( + bc_chan.next_outbound_htlc_minimum_msat, + config.channel_handshake_config.our_htlc_minimum_msat + ); + + // Forward an additional HTLC from A through B to C. This will go in B's holding cell for node + // C as it is waiting on a response to the above messages. + let payment_params_ac = PaymentParameters::from_node_id(node_c_id, TEST_FINAL_CLTV) + .with_bolt11_features(nodes[2].node.bolt11_invoice_features()) + .unwrap(); + let (route_ac, payment_hash_cell, _, payment_secret_ac) = + get_route_and_payment_hash!(nodes[0], nodes[2], payment_params_ac, DUST_HTLC_VALUE_MSAT); + let onion_ac = RecipientOnionFields::secret_only(payment_secret_ac); + let id = PaymentId(payment_hash_cell.0); + nodes[0].node.send_payment_with_route(route_ac, payment_hash_cell, onion_ac, id).unwrap(); + check_added_monitors(&nodes[0], 1); + + let send_ab = SendEvent::from_node(&nodes[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &send_ab.msgs[0]); + do_commitment_signed_dance(&nodes[1], &nodes[0], &send_ab.commitment_msg, false, true); + + // At this point when we process pending forwards the HTLC will go into the holding cell and no + // further messages will be generated. Node B will also be at its maximum dust exposure and + // will refuse to send any dust HTLCs (when it includes the holding cell HTLC). + expect_and_process_pending_htlcs(&nodes[1], false); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + let bs_chans = nodes[1].node.list_channels(); + let bc_chan = bs_chans.iter().find(|chan| chan.counterparty.node_id == node_c_id).unwrap(); + assert!(bc_chan.next_outbound_htlc_minimum_msat > DUST_HTLC_VALUE_MSAT); + + // Send an additional HTLC from C to B. This will make B unable to forward the HTLC already in + // its holding cell as it would be over-exposed to dust. + let (route_cb, payment_hash_cb, payment_preimage_cb, payment_secret_cb) = + get_route_and_payment_hash!(nodes[2], nodes[1], DUST_HTLC_VALUE_MSAT); + let onion_cb = RecipientOnionFields::secret_only(payment_secret_cb); + let id = PaymentId(payment_hash_cb.0); + nodes[2].node.send_payment_with_route(route_cb, payment_hash_cb, onion_cb, id).unwrap(); + check_added_monitors(&nodes[2], 1); + + // Now deliver all the messages and make sure that the HTLC is failed-back. + let send_event_cb = SendEvent::from_node(&nodes[2]); + nodes[1].node.handle_update_add_htlc(node_c_id, &send_event_cb.msgs[0]); + nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &send_event_cb.commitment_msg); + check_added_monitors(&nodes[1], 1); + + nodes[2].node.handle_update_add_htlc(node_b_id, &send_bc.msgs[0]); + nodes[2].node.handle_commitment_signed_batch_test(node_b_id, &send_bc.commitment_msg); + check_added_monitors(&nodes[2], 1); + + let cs_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, node_b_id); + nodes[1].node.handle_revoke_and_ack(node_c_id, &cs_raa); + check_added_monitors(&nodes[1], 1); + let (bs_raa, bs_cs) = get_revoke_commit_msgs(&nodes[1], &node_c_id); + + // When we delivered the RAA above, we attempted (and failed) to add the HTLC to the channel, + // causing it to be ready to fail-back, which we do here: + let next_hop = + HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: bc_chan_id }; + expect_htlc_forwarding_fails(&nodes[1], &[next_hop]); + check_added_monitors(&nodes[1], 1); + fail_payment_along_path(&[&nodes[0], &nodes[1]]); + let conditions = PaymentFailedConditions::new(); + expect_payment_failed_conditions(&nodes[0], payment_hash_cell, false, conditions); + + nodes[2].node.handle_revoke_and_ack(node_b_id, &bs_raa); + check_added_monitors(&nodes[2], 1); + let cs_cs = get_htlc_update_msgs(&nodes[2], &node_b_id); + + nodes[2].node.handle_commitment_signed_batch_test(node_b_id, &bs_cs); + check_added_monitors(&nodes[2], 1); + let cs_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, node_b_id); + + nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &cs_cs.commitment_signed); + check_added_monitors(&nodes[1], 1); + let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_c_id); + + nodes[1].node.handle_revoke_and_ack(node_c_id, &cs_raa); + check_added_monitors(&nodes[1], 1); + expect_and_process_pending_htlcs(&nodes[1], false); + expect_payment_claimable!(nodes[1], payment_hash_cb, payment_secret_cb, DUST_HTLC_VALUE_MSAT); + + nodes[2].node.handle_revoke_and_ack(node_b_id, &bs_raa); + check_added_monitors(&nodes[2], 1); + + // Now that everything has settled, make sure the channels still work with a simple claim. + claim_payment(&nodes[2], &[&nodes[1]], payment_preimage_cb); +} diff --git a/lightning/src/ln/funding.rs b/lightning/src/ln/funding.rs index f80b2b6daea..8092a0e4451 100644 --- a/lightning/src/ln/funding.rs +++ b/lightning/src/ln/funding.rs @@ -20,69 +20,82 @@ use crate::sign::{P2TR_KEY_PATH_WITNESS_WEIGHT, P2WPKH_WITNESS_WEIGHT}; /// The components of a splice's funding transaction that are contributed by one party. #[derive(Debug, Clone)] -pub enum SpliceContribution { - /// When funds are added to a channel. - SpliceIn { - /// The amount to contribute to the splice. - value: Amount, - - /// The inputs included in the splice's funding transaction to meet the contributed amount - /// plus fees. Any excess amount will be sent to a change output. - inputs: Vec, - - /// An optional change output script. This will be used if needed or, when not set, - /// generated using [`SignerProvider::get_destination_script`]. - /// - /// [`SignerProvider::get_destination_script`]: crate::sign::SignerProvider::get_destination_script - change_script: Option, - }, - /// When funds are removed from a channel. - SpliceOut { - /// The outputs to include in the splice's funding transaction. The total value of all - /// outputs plus fees will be the amount that is removed. - outputs: Vec, - }, +pub struct SpliceContribution { + /// The amount from [`inputs`] to contribute to the splice. + /// + /// [`inputs`]: Self::inputs + value_added: Amount, + + /// The inputs included in the splice's funding transaction to meet the contributed amount + /// plus fees. Any excess amount will be sent to a change output. + inputs: Vec, + + /// The outputs to include in the splice's funding transaction. The total value of all + /// outputs plus fees will be the amount that is removed. + outputs: Vec, + + /// An optional change output script. This will be used if needed or, when not set, + /// generated using [`SignerProvider::get_destination_script`]. + /// + /// [`SignerProvider::get_destination_script`]: crate::sign::SignerProvider::get_destination_script + change_script: Option, } impl SpliceContribution { - pub(super) fn value(&self) -> SignedAmount { - match self { - SpliceContribution::SpliceIn { value, .. } => { - value.to_signed().unwrap_or(SignedAmount::MAX) - }, - SpliceContribution::SpliceOut { outputs } => { - let value_removed = outputs - .iter() - .map(|txout| txout.value) - .sum::() - .to_signed() - .unwrap_or(SignedAmount::MAX); - -value_removed - }, - } + /// Creates a contribution for when funds are only added to a channel. + pub fn splice_in( + value_added: Amount, inputs: Vec, change_script: Option, + ) -> Self { + Self { value_added, inputs, outputs: vec![], change_script } + } + + /// Creates a contribution for when funds are only removed from a channel. + pub fn splice_out(outputs: Vec) -> Self { + Self { value_added: Amount::ZERO, inputs: vec![], outputs, change_script: None } + } + + /// Creates a contribution for when funds are both added to and removed from a channel. + /// + /// Note that `value_added` represents the value added by `inputs` but should not account for + /// value removed by `outputs`. The net value contributed can be obtained by calling + /// [`SpliceContribution::net_value`]. + pub fn splice_in_and_out( + value_added: Amount, inputs: Vec, outputs: Vec, + change_script: Option, + ) -> Self { + Self { value_added, inputs, outputs, change_script } + } + + /// The net value contributed to a channel by the splice. If negative, more value will be + /// spliced out than spliced in. + pub fn net_value(&self) -> SignedAmount { + let value_added = self.value_added.to_signed().unwrap_or(SignedAmount::MAX); + let value_removed = self + .outputs + .iter() + .map(|txout| txout.value) + .sum::() + .to_signed() + .unwrap_or(SignedAmount::MAX); + + value_added - value_removed + } + + pub(super) fn value_added(&self) -> Amount { + self.value_added } pub(super) fn inputs(&self) -> &[FundingTxInput] { - match self { - SpliceContribution::SpliceIn { inputs, .. } => &inputs[..], - SpliceContribution::SpliceOut { .. } => &[], - } + &self.inputs[..] } pub(super) fn outputs(&self) -> &[TxOut] { - match self { - SpliceContribution::SpliceIn { .. } => &[], - SpliceContribution::SpliceOut { outputs } => &outputs[..], - } + &self.outputs[..] } pub(super) fn into_tx_parts(self) -> (Vec, Vec, Option) { - match self { - SpliceContribution::SpliceIn { inputs, change_script, .. } => { - (inputs, vec![], change_script) - }, - SpliceContribution::SpliceOut { outputs } => (vec![], outputs, None), - } + let SpliceContribution { value_added: _, inputs, outputs, change_script } = self; + (inputs, outputs, change_script) } } diff --git a/lightning/src/ln/htlc_reserve_unit_tests.rs b/lightning/src/ln/htlc_reserve_unit_tests.rs index 86c95721d47..5b2ffca5fd4 100644 --- a/lightning/src/ln/htlc_reserve_unit_tests.rs +++ b/lightning/src/ln/htlc_reserve_unit_tests.rs @@ -33,7 +33,9 @@ fn do_test_counterparty_no_reserve(send_from_initiator: bool) { // in normal testing, we test it explicitly here. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -59,7 +61,7 @@ fn do_test_counterparty_no_reserve(send_from_initiator: bool) { open_channel_message.channel_reserve_satoshis = 0; open_channel_message.common_fields.max_htlc_value_in_flight_msat = 100_000_000; } - nodes[1].node.handle_open_channel(node_a_id, &open_channel_message); + handle_and_accept_open_channel(&nodes[1], node_a_id, &open_channel_message); // Extract the channel accept message from node1 to node0 let mut accept_channel_message = @@ -121,7 +123,7 @@ pub fn test_channel_reserve_holding_cell_htlcs() { let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); // When this test was written, the default base fee floated based on the HTLC count. // It is now fixed, so we simply set the fee to the expected value here. - let mut config = test_default_channel_config(); + let mut config = test_legacy_channel_config(); config.channel_config.forwarding_fee_base_msat = 239; let configs = [Some(config.clone()), Some(config.clone()), Some(config.clone())]; @@ -749,7 +751,9 @@ pub fn holding_cell_htlc_counting() { pub fn test_basic_channel_reserve() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); @@ -778,14 +782,14 @@ pub fn test_basic_channel_reserve() { #[xtest(feature = "_externalize_tests")] fn test_fee_spike_violation_fails_htlc() { - do_test_fee_spike_buffer(None, true) + let cfg = test_legacy_channel_config(); + do_test_fee_spike_buffer(Some(cfg), true) } #[test] fn test_zero_fee_commitments_no_fee_spike_buffer() { let mut cfg = test_default_channel_config(); cfg.channel_handshake_config.negotiate_anchor_zero_fee_commitments = true; - cfg.manually_accept_inbound_channels = true; do_test_fee_spike_buffer(Some(cfg), false) } @@ -839,6 +843,7 @@ pub fn do_test_fee_spike_buffer(cfg: Option, htlc_fails: bool) { skimmed_fee_msat: None, blinding_point: None, hold_htlc: None, + accountable: None, }; nodes[1].node.handle_update_add_htlc(node_a_id, &msg); @@ -987,7 +992,9 @@ pub fn test_chan_reserve_violation_outbound_htlc_inbound_chan() { // this situation. let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let default_config = UserConfig::default(); @@ -1025,7 +1032,9 @@ pub fn test_chan_reserve_violation_inbound_htlc_outbound_channel() { let mut chanmon_cfgs = create_chanmon_cfgs(2); let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_b_id = nodes[1].node.get_our_node_id(); @@ -1082,6 +1091,7 @@ pub fn test_chan_reserve_violation_inbound_htlc_outbound_channel() { skimmed_fee_msat: None, blinding_point: None, hold_htlc: None, + accountable: None, }; nodes[0].node.handle_update_add_htlc(node_b_id, &msg); @@ -1103,7 +1113,9 @@ pub fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() { let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let default_config = UserConfig::default(); @@ -1152,7 +1164,9 @@ pub fn test_chan_reserve_dust_inbound_htlcs_inbound_chan() { // calculating our counterparty's commitment transaction fee (this was previously broken). let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 98000000); @@ -1266,6 +1280,7 @@ pub fn test_chan_reserve_violation_inbound_htlc_inbound_chan() { skimmed_fee_msat: None, blinding_point: None, hold_htlc: None, + accountable: None, }; nodes[1].node.handle_update_add_htlc(node_a_id, &msg); @@ -1563,7 +1578,9 @@ pub fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() { //BOLT2 Requirement: receiving an amount_msat that the sending node cannot afford at the current feerate_per_kw (while maintaining its channel reserve): SHOULD fail the channel let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -1650,6 +1667,7 @@ pub fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() { skimmed_fee_msat: None, blinding_point: None, hold_htlc: None, + accountable: None, }; for i in 0..50 { @@ -2138,13 +2156,8 @@ pub fn do_test_dust_limit_fee_accounting(can_afford: bool) { let chanmon_cfgs = create_chanmon_cfgs(2); - let mut default_config = test_default_channel_config(); - default_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; - default_config.manually_accept_inbound_channels = true; - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = - create_node_chanmgrs(2, &node_cfgs, &[Some(default_config.clone()), Some(default_config)]); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -2256,6 +2269,7 @@ pub fn do_test_dust_limit_fee_accounting(can_afford: bool) { skimmed_fee_msat: None, blinding_point: None, hold_htlc: None, + accountable: None, }; nodes[1].node.handle_update_add_htlc(node_a_id, &msg); diff --git a/lightning/src/ln/inbound_payment.rs b/lightning/src/ln/inbound_payment.rs index 17c2526e78d..51f8b7bfce9 100644 --- a/lightning/src/ln/inbound_payment.rs +++ b/lightning/src/ln/inbound_payment.rs @@ -27,8 +27,6 @@ use crate::util::logger::Logger; #[allow(unused_imports)] use crate::prelude::*; -use core::ops::Deref; - pub(crate) const IV_LEN: usize = 16; const METADATA_LEN: usize = 16; const METADATA_KEY_LEN: usize = 32; @@ -143,13 +141,10 @@ fn min_final_cltv_expiry_delta_from_metadata(bytes: [u8; METADATA_LEN]) -> u16 { /// /// [phantom node payments]: crate::sign::PhantomKeysManager /// [`NodeSigner::get_expanded_key`]: crate::sign::NodeSigner::get_expanded_key -pub fn create( +pub fn create( keys: &ExpandedKey, min_value_msat: Option, invoice_expiry_delta_secs: u32, entropy_source: &ES, current_time: u64, min_final_cltv_expiry_delta: Option, -) -> Result<(PaymentHash, PaymentSecret), ()> -where - ES::Target: EntropySource, -{ +) -> Result<(PaymentHash, PaymentSecret), ()> { let metadata_bytes = construct_metadata_bytes( min_value_msat, if min_final_cltv_expiry_delta.is_some() { @@ -345,13 +340,10 @@ fn construct_payment_secret( /// [`NodeSigner::get_expanded_key`]: crate::sign::NodeSigner::get_expanded_key /// [`create_inbound_payment`]: crate::ln::channelmanager::ChannelManager::create_inbound_payment /// [`create_inbound_payment_for_hash`]: crate::ln::channelmanager::ChannelManager::create_inbound_payment_for_hash -pub(super) fn verify( +pub(super) fn verify( payment_hash: PaymentHash, payment_data: &msgs::FinalOnionHopData, highest_seen_timestamp: u64, keys: &ExpandedKey, logger: &L, -) -> Result<(Option, Option), ()> -where - L::Target: Logger, -{ +) -> Result<(Option, Option), ()> { let (iv_bytes, metadata_bytes) = decrypt_metadata(payment_data.payment_secret, keys); let payment_type_res = diff --git a/lightning/src/ln/interactivetxs.rs b/lightning/src/ln/interactivetxs.rs index 4340aad420a..a004f6e9f14 100644 --- a/lightning/src/ln/interactivetxs.rs +++ b/lightning/src/ln/interactivetxs.rs @@ -39,7 +39,6 @@ use crate::ln::types::ChannelId; use crate::sign::{EntropySource, P2TR_KEY_PATH_WITNESS_WEIGHT, P2WPKH_WITNESS_WEIGHT}; use core::fmt::Display; -use core::ops::Deref; /// The number of received `tx_add_input` messages during a negotiation at which point the /// negotiation MUST be failed. @@ -668,10 +667,9 @@ impl InteractiveTxSigningSession { self.holder_tx_signatures = Some(tx_signatures); let funding_tx_opt = self.maybe_finalize_funding_tx(); - let holder_tx_signatures = (self.holder_sends_tx_signatures_first - || self.has_received_tx_signatures()) + let holder_tx_signatures = (self.has_received_commitment_signed + && (self.holder_sends_tx_signatures_first || self.has_received_tx_signatures())) .then(|| { - debug_assert!(self.has_received_commitment_signed); self.holder_tx_signatures.clone().expect("Holder tx_signatures were just provided") }); @@ -1990,10 +1988,9 @@ macro_rules! do_state_transition { }}; } -fn generate_holder_serial_id(entropy_source: &ES, is_initiator: bool) -> SerialId -where - ES::Target: EntropySource, -{ +fn generate_holder_serial_id( + entropy_source: &ES, is_initiator: bool, +) -> SerialId { let rand_bytes = entropy_source.get_secure_random_bytes(); let mut serial_id_bytes = [0u8; 8]; serial_id_bytes.copy_from_slice(&rand_bytes[..8]); @@ -2009,10 +2006,7 @@ pub(super) enum HandleTxCompleteValue { NegotiationComplete(Option, OutPoint), } -pub(super) struct InteractiveTxConstructorArgs<'a, ES: Deref> -where - ES::Target: EntropySource, -{ +pub(super) struct InteractiveTxConstructorArgs<'a, ES: EntropySource> { pub entropy_source: &'a ES, pub holder_node_id: PublicKey, pub counterparty_node_id: PublicKey, @@ -2031,10 +2025,9 @@ impl InteractiveTxConstructor { /// /// If the holder is the initiator, they need to send the first message which is a `TxAddInput` /// message. - pub fn new(args: InteractiveTxConstructorArgs) -> Result - where - ES::Target: EntropySource, - { + pub fn new( + args: InteractiveTxConstructorArgs, + ) -> Result { let InteractiveTxConstructorArgs { entropy_source, holder_node_id, @@ -2337,22 +2330,21 @@ impl InteractiveTxConstructor { pub(super) fn calculate_change_output_value( context: &FundingNegotiationContext, is_splice: bool, shared_output_funding_script: &ScriptBuf, change_output_dust_limit: u64, -) -> Result, AbortReason> { - assert!(context.our_funding_contribution > SignedAmount::ZERO); - let our_funding_contribution_satoshis = context.our_funding_contribution.to_sat() as u64; - - let mut total_input_satoshis = 0u64; +) -> Result, AbortReason> { + let mut total_input_value = Amount::ZERO; let mut our_funding_inputs_weight = 0u64; for FundingTxInput { utxo, .. } in context.our_funding_inputs.iter() { - total_input_satoshis = total_input_satoshis.saturating_add(utxo.output.value.to_sat()); + total_input_value = total_input_value.checked_add(utxo.output.value).unwrap_or(Amount::MAX); let weight = BASE_INPUT_WEIGHT + utxo.satisfaction_weight; our_funding_inputs_weight = our_funding_inputs_weight.saturating_add(weight); } let funding_outputs = &context.our_funding_outputs; - let total_output_satoshis = - funding_outputs.iter().fold(0u64, |total, out| total.saturating_add(out.value.to_sat())); + let total_output_value = funding_outputs + .iter() + .fold(Amount::ZERO, |total, out| total.checked_add(out.value).unwrap_or(Amount::MAX)); + let our_funding_outputs_weight = funding_outputs.iter().fold(0u64, |weight, out| { weight.saturating_add(get_output_weight(&out.script_pubkey).to_wu()) }); @@ -2376,15 +2368,25 @@ pub(super) fn calculate_change_output_value( } } - let fees_sats = fee_for_weight(context.funding_feerate_sat_per_1000_weight, weight); - let net_total_less_fees = - total_input_satoshis.saturating_sub(total_output_satoshis).saturating_sub(fees_sats); - if net_total_less_fees < our_funding_contribution_satoshis { + let contributed_fees = + Amount::from_sat(fee_for_weight(context.funding_feerate_sat_per_1000_weight, weight)); + + let contributed_input_value = + context.our_funding_contribution + total_output_value.to_signed().unwrap(); + assert!(contributed_input_value > SignedAmount::ZERO); + let contributed_input_value = contributed_input_value.unsigned_abs(); + + let total_input_value_less_fees = + total_input_value.checked_sub(contributed_fees).unwrap_or(Amount::ZERO); + if total_input_value_less_fees < contributed_input_value { // Not enough to cover contribution plus fees return Err(AbortReason::InsufficientFees); } - let remaining_value = net_total_less_fees.saturating_sub(our_funding_contribution_satoshis); - if remaining_value < change_output_dust_limit { + + let remaining_value = total_input_value_less_fees + .checked_sub(contributed_input_value) + .expect("remaining_value should not be negative"); + if remaining_value.to_sat() < change_output_dust_limit { // Enough to cover contribution plus fees, but leftover is below dust limit; no change Ok(None) } else { @@ -2420,7 +2422,6 @@ mod tests { OutPoint, PubkeyHash, ScriptBuf, Sequence, SignedAmount, Transaction, TxIn, TxOut, WPubkeyHash, }; - use core::ops::Deref; use super::{ get_output_weight, ConstructedTransaction, InteractiveTxSigningSession, TxInMetadata, @@ -2490,19 +2491,15 @@ mod tests { do_test_interactive_tx_constructor_internal(session, &&entropy_source); } - fn do_test_interactive_tx_constructor_with_entropy_source( + fn do_test_interactive_tx_constructor_with_entropy_source( session: TestSession, entropy_source: ES, - ) where - ES::Target: EntropySource, - { + ) { do_test_interactive_tx_constructor_internal(session, &entropy_source); } - fn do_test_interactive_tx_constructor_internal( + fn do_test_interactive_tx_constructor_internal( session: TestSession, entropy_source: &ES, - ) where - ES::Target: EntropySource, - { + ) { let channel_id = ChannelId(entropy_source.get_secure_random_bytes()); let funding_tx_locktime = AbsoluteLockTime::from_height(1337).unwrap(); let holder_node_id = PublicKey::from_secret_key( @@ -3440,14 +3437,14 @@ mod tests { total_inputs - total_outputs - context.our_funding_contribution.to_unsigned().unwrap(); assert_eq!( calculate_change_output_value(&context, false, &ScriptBuf::new(), 300), - Ok(Some((gross_change - fees - common_fees).to_sat())), + Ok(Some(gross_change - fees - common_fees)), ); // There is leftover for change, without common fees let context = FundingNegotiationContext { is_initiator: false, ..context }; assert_eq!( calculate_change_output_value(&context, false, &ScriptBuf::new(), 300), - Ok(Some((gross_change - fees).to_sat())), + Ok(Some(gross_change - fees)), ); // Insufficient inputs, no leftover @@ -3482,7 +3479,7 @@ mod tests { total_inputs - total_outputs - context.our_funding_contribution.to_unsigned().unwrap(); assert_eq!( calculate_change_output_value(&context, false, &ScriptBuf::new(), 100), - Ok(Some((gross_change - fees).to_sat())), + Ok(Some(gross_change - fees)), ); // Larger fee, smaller change @@ -3496,7 +3493,7 @@ mod tests { total_inputs - total_outputs - context.our_funding_contribution.to_unsigned().unwrap(); assert_eq!( calculate_change_output_value(&context, false, &ScriptBuf::new(), 300), - Ok(Some((gross_change - fees * 3 - common_fees * 3).to_sat())), + Ok(Some(gross_change - fees * 3 - common_fees * 3)), ); } diff --git a/lightning/src/ln/interception_tests.rs b/lightning/src/ln/interception_tests.rs new file mode 100644 index 00000000000..c83ef177628 --- /dev/null +++ b/lightning/src/ln/interception_tests.rs @@ -0,0 +1,291 @@ +// This file is Copyright its original authors, visible in version control +// history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license +// , at your option. +// You may not use this file except in accordance with one or both of these +// licenses. + +//! Tests that test standing up a network of ChannelManagers, creating channels, sending +//! payments/messages between them, and often checking the resulting ChannelMonitors are able to +//! claim outputs on-chain. + +use crate::events::{Event, HTLCHandlingFailureReason, HTLCHandlingFailureType}; +use crate::ln::channelmanager::PaymentId; +use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler}; +use crate::ln::onion_utils::LocalHTLCFailureReason; +use crate::ln::outbound_payment::RecipientOnionFields; +use crate::routing::router::PaymentParameters; +use crate::util::config::HTLCInterceptionFlags; + +use crate::prelude::*; + +use crate::ln::functional_test_utils::*; + +#[derive(Clone, Copy, PartialEq, Eq)] +enum ForwardingMod { + FeeTooLow, + CLTVBelowConfig, + CLTVBelowMin, +} + +fn do_test_htlc_interception_flags( + flags_bitmask: u8, flag: HTLCInterceptionFlags, modification: Option, +) { + use HTLCInterceptionFlags as Flag; + + assert_eq!((flag as isize).count_ones(), 1, "We can only test one type of HTLC at once"); + + // Tests that the `htlc_interception_flags` bitmask given by `flags_bitmask` correctly + // intercepts (or doesn't intercept) an HTLC which is of type `flag` + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + + let mut intercept_config = test_default_channel_config(); + intercept_config.htlc_interception_flags = flags_bitmask; + intercept_config.channel_config.forwarding_fee_base_msat = 1000; + intercept_config.channel_config.cltv_expiry_delta = 6 * 24; + intercept_config.accept_forwards_to_priv_channels = true; + + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(intercept_config), None]); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + create_announced_chan_between_nodes(&nodes, 0, 1); + + let node_0_id = nodes[0].node.get_our_node_id(); + let node_1_id = nodes[1].node.get_our_node_id(); + let node_2_id = nodes[2].node.get_our_node_id(); + + // First open the right type of channel (and get it in the right state) for the bit we're + // testing. + let (target_scid, target_chan_id) = match flag { + Flag::ToOfflinePrivateChannels | Flag::ToOnlinePrivateChannels => { + create_unannounced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 0); + let chan_id = nodes[2].node.list_channels()[0].channel_id; + let scid = nodes[2].node.list_channels()[0].short_channel_id.unwrap(); + if flag == Flag::ToOfflinePrivateChannels { + nodes[1].node.peer_disconnected(node_2_id); + nodes[2].node.peer_disconnected(node_1_id); + } else { + assert_eq!(flag, Flag::ToOnlinePrivateChannels); + } + (scid, chan_id) + }, + Flag::ToInterceptSCIDs | Flag::ToPublicChannels | Flag::ToUnknownSCIDs => { + let (chan_upd, _, chan_id, _) = create_announced_chan_between_nodes(&nodes, 1, 2); + if flag == Flag::ToInterceptSCIDs { + (nodes[1].node.get_intercept_scid(), chan_id) + } else if flag == Flag::ToPublicChannels { + (chan_upd.contents.short_channel_id, chan_id) + } else if flag == Flag::ToUnknownSCIDs { + (42424242, chan_id) + } else { + panic!(); + } + }, + _ => panic!("Combined flags aren't allowed"), + }; + + // Start every node on the same block height to ensure we don't hit spurious CLTV issues + connect_blocks(&nodes[0], 2 * CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1); + connect_blocks(&nodes[1], 2 * CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1); + connect_blocks(&nodes[2], 2 * CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1); + + // Send the HTLC from nodes[0] to nodes[1] and process it to generate the interception (if + // we're set to intercept it). + let amt_msat = 100_000; + let bolt11 = nodes[2].node.create_bolt11_invoice(Default::default()).unwrap(); + let pay_params = PaymentParameters::from_bolt11_invoice(&bolt11); + let (mut route, payment_hash, payment_preimage, payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[2], pay_params, amt_msat); + route.paths[0].hops[1].short_channel_id = target_scid; + + let interception_bit_match = (flags_bitmask & (flag as u8)) != 0; + match modification { + Some(ForwardingMod::FeeTooLow) => { + assert!( + interception_bit_match, + "No reason to test failing if we aren't trying to intercept", + ); + route.paths[0].hops[0].fee_msat = 500; + }, + Some(ForwardingMod::CLTVBelowConfig) => { + route.paths[0].hops[0].cltv_expiry_delta = 6 * 12; + assert!( + interception_bit_match, + "No reason to test failing if we aren't trying to intercept", + ); + }, + Some(ForwardingMod::CLTVBelowMin) => { + route.paths[0].hops[0].cltv_expiry_delta = 6; + }, + None => {}, + } + + let onion = RecipientOnionFields::secret_only(payment_secret); + let payment_id = PaymentId(payment_hash.0); + nodes[0].node.send_payment_with_route(route, payment_hash, onion, payment_id).unwrap(); + check_added_monitors(&nodes[0], 1); + + let payment_event = SendEvent::from_node(&nodes[0]); + nodes[1].node.handle_update_add_htlc(node_0_id, &payment_event.msgs[0]); + do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, false, true); + expect_and_process_pending_htlcs(&nodes[1], false); + + if interception_bit_match && modification.is_none() { + // If we were set to intercept, check that we got an interception event then + // forward the HTLC on to nodes[2] and claim the payment. + let intercept_id; + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1, "{events:?}"); + if let Event::HTLCIntercepted { intercept_id: id, requested_next_hop_scid, .. } = &events[0] + { + assert_eq!(*requested_next_hop_scid, target_scid, + "Bitmask {flags_bitmask:#x}: Expected interception for bit {flag:?} to target SCID {target_scid}"); + intercept_id = *id; + } else { + panic!("{events:?}"); + } + + if flag == Flag::ToOfflinePrivateChannels { + let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]); + reconnect_args.send_channel_ready = (true, true); + reconnect_nodes(reconnect_args); + } + + nodes[1] + .node + .forward_intercepted_htlc(intercept_id, &target_chan_id, node_2_id, amt_msat) + .unwrap(); + expect_and_process_pending_htlcs(&nodes[1], false); + check_added_monitors(&nodes[1], 1); + + let forward_ev = SendEvent::from_node(&nodes[1]); + nodes[2].node.handle_update_add_htlc(node_1_id, &forward_ev.msgs[0]); + do_commitment_signed_dance(&nodes[2], &nodes[1], &forward_ev.commitment_msg, false, true); + + nodes[2].node.process_pending_htlc_forwards(); + expect_payment_claimable!(nodes[2], payment_hash, payment_secret, amt_msat); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); + } else { + // If we were not set to intercept, check that the HTLC either failed or was + // automatically forwarded as appropriate. + match (modification, flag) { + (None, Flag::ToOnlinePrivateChannels | Flag::ToPublicChannels) => { + check_added_monitors(&nodes[1], 1); + + let forward_ev = SendEvent::from_node(&nodes[1]); + assert_eq!(forward_ev.node_id, node_2_id); + nodes[2].node.handle_update_add_htlc(node_1_id, &forward_ev.msgs[0]); + let commitment = &forward_ev.commitment_msg; + do_commitment_signed_dance(&nodes[2], &nodes[1], commitment, false, true); + + nodes[2].node.process_pending_htlc_forwards(); + expect_payment_claimable!(nodes[2], payment_hash, payment_secret, amt_msat); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); + }, + _ => { + let events = nodes[1].node.get_and_clear_pending_events(); + let reason_from_mod = match modification { + Some(ForwardingMod::FeeTooLow) => Some(LocalHTLCFailureReason::FeeInsufficient), + Some(ForwardingMod::CLTVBelowConfig) => { + Some(LocalHTLCFailureReason::IncorrectCLTVExpiry) + }, + Some(ForwardingMod::CLTVBelowMin) => { + Some(LocalHTLCFailureReason::IncorrectCLTVExpiry) + }, + None => None, + }; + let (expected_failure_type, reason); + if flag == Flag::ToOfflinePrivateChannels { + expected_failure_type = HTLCHandlingFailureType::Forward { + node_id: Some(node_2_id), + channel_id: target_chan_id, + }; + reason = reason_from_mod.unwrap_or(LocalHTLCFailureReason::PeerOffline); + } else if flag == Flag::ToInterceptSCIDs { + expected_failure_type = HTLCHandlingFailureType::InvalidForward { + requested_forward_scid: target_scid, + }; + reason = reason_from_mod.unwrap_or(LocalHTLCFailureReason::UnknownNextPeer); + } else if flag == Flag::ToUnknownSCIDs { + expected_failure_type = HTLCHandlingFailureType::InvalidForward { + requested_forward_scid: target_scid, + }; + reason = reason_from_mod.unwrap_or(LocalHTLCFailureReason::UnknownNextPeer); + } else { + expected_failure_type = HTLCHandlingFailureType::Forward { + node_id: Some(node_2_id), + channel_id: target_chan_id, + }; + reason = reason_from_mod + .expect("We should only fail because of a mod or unknown next-hop"); + } + if let Event::HTLCHandlingFailed { failure_reason, failure_type, .. } = &events[0] { + assert_eq!(*failure_reason, Some(HTLCHandlingFailureReason::Local { reason })); + assert_eq!(*failure_type, expected_failure_type); + } else { + panic!("{events:?}"); + } + + check_added_monitors(&nodes[1], 1); + let fail_msgs = get_htlc_update_msgs(&nodes[1], &node_0_id); + nodes[0].node.handle_update_fail_htlc(node_1_id, &fail_msgs.update_fail_htlcs[0]); + let commitment = fail_msgs.commitment_signed; + do_commitment_signed_dance(&nodes[0], &nodes[1], &commitment, true, true); + expect_payment_failed!(nodes[0], payment_hash, false); + }, + } + } +} + +const MAX_BITMASK: u8 = HTLCInterceptionFlags::AllValidHTLCs as u8; +const ALL_FLAGS: [HTLCInterceptionFlags; 5] = [ + HTLCInterceptionFlags::ToInterceptSCIDs, + HTLCInterceptionFlags::ToOfflinePrivateChannels, + HTLCInterceptionFlags::ToOnlinePrivateChannels, + HTLCInterceptionFlags::ToPublicChannels, + HTLCInterceptionFlags::ToUnknownSCIDs, +]; + +#[test] +fn test_htlc_interception_flags() { + let mut all_flag_bits = 0; + for flag in ALL_FLAGS { + all_flag_bits |= flag as isize; + } + assert_eq!(all_flag_bits, MAX_BITMASK as isize, "all flags must test all bits"); + + // Test all 2^5 = 32 combinations of the HTLCInterceptionFlags bitmask + // For each combination, test 5 different HTLC forwards and verify correct interception behavior + for flags_bitmask in 0..=MAX_BITMASK { + for flag in ALL_FLAGS { + do_test_htlc_interception_flags(flags_bitmask, flag, None); + } + } +} + +#[test] +fn test_htlc_bad_for_chan_config() { + // Test that interception won't be done if an HTLC fails to meet the target channel's channel + // config. + let have_chan_flags = [ + HTLCInterceptionFlags::ToOfflinePrivateChannels, + HTLCInterceptionFlags::ToOnlinePrivateChannels, + HTLCInterceptionFlags::ToPublicChannels, + ]; + for flag in have_chan_flags { + do_test_htlc_interception_flags(flag as u8, flag, Some(ForwardingMod::FeeTooLow)); + do_test_htlc_interception_flags(flag as u8, flag, Some(ForwardingMod::CLTVBelowConfig)); + } +} + +#[test] +fn test_htlc_bad_no_chan() { + // Test that setting the CLTV below the hard-coded minimum fails whether we're intercepting for + // a channel or not. + for flag in ALL_FLAGS { + do_test_htlc_interception_flags(flag as u8, flag, Some(ForwardingMod::CLTVBelowMin)); + } +} diff --git a/lightning/src/ln/invoice_utils.rs b/lightning/src/ln/invoice_utils.rs index 425cc4d7eb6..1503a9a3a63 100644 --- a/lightning/src/ln/invoice_utils.rs +++ b/lightning/src/ln/invoice_utils.rs @@ -18,11 +18,9 @@ use crate::sign::{EntropySource, NodeSigner, Recipient}; use crate::types::payment::PaymentHash; use crate::util::logger::{Logger, Record}; use alloc::collections::{btree_map, BTreeMap}; -use bitcoin::hashes::Hash; use bitcoin::secp256k1::PublicKey; #[cfg(not(feature = "std"))] use core::iter::Iterator; -use core::ops::Deref; use core::time::Duration; /// Utility to create an invoice that can be paid to one of multiple nodes, or a "phantom invoice." @@ -67,17 +65,12 @@ use core::time::Duration; feature = "std", doc = "This can be used in a `no_std` environment, where [`std::time::SystemTime`] is not available and the current time is supplied by the caller." )] -pub fn create_phantom_invoice( +pub fn create_phantom_invoice( amt_msat: Option, payment_hash: Option, description: String, invoice_expiry_delta_secs: u32, phantom_route_hints: Vec, entropy_source: ES, node_signer: NS, logger: L, network: Currency, min_final_cltv_expiry_delta: Option, duration_since_epoch: Duration, -) -> Result> -where - ES::Target: EntropySource, - NS::Target: NodeSigner, - L::Target: Logger, -{ +) -> Result> { let description = Description::new(description).map_err(SignOrCreationError::CreationError)?; let description = Bolt11InvoiceDescription::Direct(description); _create_phantom_invoice::( @@ -135,17 +128,16 @@ where feature = "std", doc = "This version can be used in a `no_std` environment, where [`std::time::SystemTime`] is not available and the current time is supplied by the caller." )] -pub fn create_phantom_invoice_with_description_hash( +pub fn create_phantom_invoice_with_description_hash< + ES: EntropySource, + NS: NodeSigner, + L: Logger, +>( amt_msat: Option, payment_hash: Option, invoice_expiry_delta_secs: u32, description_hash: Sha256, phantom_route_hints: Vec, entropy_source: ES, node_signer: NS, logger: L, network: Currency, min_final_cltv_expiry_delta: Option, duration_since_epoch: Duration, -) -> Result> -where - ES::Target: EntropySource, - NS::Target: NodeSigner, - L::Target: Logger, -{ +) -> Result> { _create_phantom_invoice::( amt_msat, payment_hash, @@ -163,17 +155,12 @@ where const MAX_CHANNEL_HINTS: usize = 3; -fn _create_phantom_invoice( +fn _create_phantom_invoice( amt_msat: Option, payment_hash: Option, description: Bolt11InvoiceDescription, invoice_expiry_delta_secs: u32, phantom_route_hints: Vec, entropy_source: ES, node_signer: NS, logger: L, network: Currency, min_final_cltv_expiry_delta: Option, duration_since_epoch: Duration, -) -> Result> -where - ES::Target: EntropySource, - NS::Target: NodeSigner, - L::Target: Logger, -{ +) -> Result> { if phantom_route_hints.is_empty() { return Err(SignOrCreationError::CreationError(CreationError::MissingRouteHints)); } @@ -228,7 +215,7 @@ where let mut invoice = invoice .duration_since_epoch(duration_since_epoch) - .payment_hash(Hash::from_slice(&payment_hash.0).unwrap()) + .payment_hash(payment_hash) .payment_secret(payment_secret) .min_final_cltv_expiry_delta( // Add a buffer of 3 to the delta if present, otherwise use LDK's minimum. @@ -268,12 +255,9 @@ where /// * Select one hint from each node, up to three hints or until we run out of hints. /// /// [`PhantomKeysManager`]: crate::sign::PhantomKeysManager -fn select_phantom_hints( +fn select_phantom_hints( amt_msat: Option, phantom_route_hints: Vec, logger: L, -) -> impl Iterator -where - L::Target: Logger, -{ +) -> impl Iterator { let mut phantom_hints: Vec<_> = Vec::new(); for PhantomRouteHints { channels, phantom_scid, real_node_pubkey } in phantom_route_hints { @@ -369,12 +353,9 @@ fn rotate_through_iterators>(mut vecs: Vec) -> impl /// * Limited to a total of 3 channels. /// * Sorted by lowest inbound capacity if an online channel with the minimum amount requested exists, /// otherwise sort by highest inbound capacity to give the payment the best chance of succeeding. -pub(super) fn sort_and_filter_channels( +pub(super) fn sort_and_filter_channels( channels: Vec, min_inbound_capacity_msat: Option, logger: &L, -) -> impl ExactSizeIterator -where - L::Target: Logger, -{ +) -> impl ExactSizeIterator { let mut filtered_channels: BTreeMap = BTreeMap::new(); let min_inbound_capacity = min_inbound_capacity_msat.unwrap_or(0); let mut min_capacity_channel_exists = false; @@ -580,20 +561,14 @@ fn prefer_current_channel( } /// Adds relevant context to a [`Record`] before passing it to the wrapped [`Logger`]. -struct WithChannelDetails<'a, 'b, L: Deref> -where - L::Target: Logger, -{ +struct WithChannelDetails<'a, 'b, L: Logger> { /// The logger to delegate to after adding context to the record. logger: &'a L, /// The [`ChannelDetails`] for adding relevant context to the logged record. details: &'b ChannelDetails, } -impl<'a, 'b, L: Deref> Logger for WithChannelDetails<'a, 'b, L> -where - L::Target: Logger, -{ +impl<'a, 'b, L: Logger> Logger for WithChannelDetails<'a, 'b, L> { fn log(&self, mut record: Record) { record.peer_id = Some(self.details.counterparty.node_id); record.channel_id = Some(self.details.channel_id); @@ -601,10 +576,7 @@ where } } -impl<'a, 'b, L: Deref> WithChannelDetails<'a, 'b, L> -where - L::Target: Logger, -{ +impl<'a, 'b, L: Logger> WithChannelDetails<'a, 'b, L> { fn from(logger: &'a L, details: &'b ChannelDetails) -> Self { Self { logger, details } } @@ -615,19 +587,21 @@ mod test { use super::*; use crate::chain::channelmonitor::HTLC_FAIL_BACK_BUFFER; use crate::ln::channelmanager::{ - Bolt11InvoiceParameters, PaymentId, PhantomRouteHints, RecipientOnionFields, Retry, + Bolt11InvoiceParameters, OptionalBolt11PaymentParams, PaymentId, PhantomRouteHints, MIN_FINAL_CLTV_EXPIRY_DELTA, }; use crate::ln::functional_test_utils::*; use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, MessageSendEvent}; - use crate::routing::router::{PaymentParameters, RouteParameters}; + use crate::ln::outbound_payment::RecipientCustomTlvs; + use crate::ln::outbound_payment::{RecipientOnionFields, Retry}; + use crate::routing::router::{PaymentParameters, RouteParameters, RouteParametersConfig}; use crate::sign::PhantomKeysManager; use crate::types::payment::{PaymentHash, PaymentPreimage}; use crate::util::config::UserConfig; use crate::util::dyn_signer::{DynKeysInterface, DynPhantomKeysInterface}; use crate::util::test_utils; use bitcoin::hashes::sha256::Hash as Sha256; - use bitcoin::hashes::{sha256, Hash}; + use bitcoin::hashes::Hash; use bitcoin::network::Network; use core::time::Duration; use lightning_invoice::{ @@ -663,26 +637,26 @@ mod test { } #[test] - fn create_and_pay_for_bolt11_invoice() { + fn create_and_pay_for_bolt11_invoice_with_custom_tlvs() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001); - let node_a_id = nodes[0].node.get_our_node_id(); - + let amt_msat = 10_000; let description = Bolt11InvoiceDescription::Direct(Description::new("test".to_string()).unwrap()); let non_default_invoice_expiry_secs = 4200; + let invoice_params = Bolt11InvoiceParameters { - amount_msats: Some(10_000), + amount_msats: Some(amt_msat), description, invoice_expiry_delta_secs: Some(non_default_invoice_expiry_secs), ..Default::default() }; let invoice = nodes[1].node.create_bolt11_invoice(invoice_params).unwrap(); - assert_eq!(invoice.amount_milli_satoshis(), Some(10_000)); + assert_eq!(invoice.amount_milli_satoshis(), Some(amt_msat)); // If no `min_final_cltv_expiry_delta` is specified, then it should be `MIN_FINAL_CLTV_EXPIRY_DELTA`. assert_eq!(invoice.min_final_cltv_expiry_delta(), MIN_FINAL_CLTV_EXPIRY_DELTA as u64); assert_eq!( @@ -694,6 +668,10 @@ mod test { Duration::from_secs(non_default_invoice_expiry_secs.into()) ); + let (payment_hash, payment_secret) = (invoice.payment_hash(), *invoice.payment_secret()); + + let preimage = nodes[1].node.get_payment_preimage(payment_hash, payment_secret).unwrap(); + // Invoice SCIDs should always use inbound SCID aliases over the real channel ID, if one is // available. let chan = &nodes[1].node.list_usable_channels()[0]; @@ -707,21 +685,34 @@ mod test { assert_eq!(invoice.route_hints()[0].0[0].htlc_minimum_msat, chan.inbound_htlc_minimum_msat); assert_eq!(invoice.route_hints()[0].0[0].htlc_maximum_msat, chan.inbound_htlc_maximum_msat); - let retry = Retry::Attempts(0); + let custom_tlvs = RecipientCustomTlvs::new(vec![(65537, vec![42; 42])]).unwrap(); + let optional_params = OptionalBolt11PaymentParams { + custom_tlvs: custom_tlvs.clone(), + route_params_config: RouteParametersConfig::default(), + retry_strategy: Retry::Attempts(0), + }; + nodes[0] .node - .pay_for_bolt11_invoice(&invoice, PaymentId([42; 32]), None, Default::default(), retry) + .pay_for_bolt11_invoice(&invoice, PaymentId([42; 32]), None, optional_params) .unwrap(); check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); - let payment_event = SendEvent::from_event(events.remove(0)); - nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); - nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); - check_added_monitors(&nodes[1], 1); - let events = nodes[1].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 2); + let ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events); + + let path = &[&nodes[1]]; + let args = PassAlongPathArgs::new(&nodes[0], path, amt_msat, payment_hash, ev) + .with_payment_preimage(preimage) + .with_payment_secret(payment_secret) + .with_custom_tlvs(custom_tlvs.clone().into_inner()); + + do_pass_along_path(args); + claim_payment_along_route( + ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1]]], preimage) + .with_custom_tlvs(custom_tlvs.into_inner()), + ); } fn do_create_invoice_min_final_cltv_delta(with_custom_delta: bool) { @@ -829,7 +820,7 @@ mod test { invoice.description(), Bolt11InvoiceDescriptionRef::Direct(&Description::new("test".to_string()).unwrap()) ); - assert_eq!(invoice.payment_hash(), &sha256::Hash::from_slice(&payment_hash.0[..]).unwrap()); + assert_eq!(invoice.payment_hash(), payment_hash); } #[cfg(not(feature = "std"))] @@ -1061,7 +1052,7 @@ mod test { .create_channel(node_a_id, 1_000_000, 500_000_000, 42, None, Some(private_chan_cfg)) .unwrap(); let open_channel = get_event_msg!(nodes[2], MessageSendEvent::SendOpenChannel, node_a_id); - nodes[0].node.handle_open_channel(node_c_id, &open_channel); + handle_and_accept_open_channel(&nodes[0], node_c_id, &open_channel); let accept_channel = get_event_msg!(nodes[0], MessageSendEvent::SendAcceptChannel, node_c_id); nodes[2].node.handle_accept_channel(node_a_id, &accept_channel); @@ -1134,7 +1125,8 @@ mod test { fn test_channels_with_lower_inbound_capacity_than_invoice_amt_hints_filtering() { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(legacy_cfg), None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); let chan_1_0 = create_unannounced_chan_between_nodes_with_value(&nodes, 1, 0, 100_000, 0); let chan_2_0 = create_unannounced_chan_between_nodes_with_value(&nodes, 2, 0, 1_000_000, 0); @@ -1257,8 +1249,8 @@ mod test { Duration::from_secs(genesis_timestamp), ) .unwrap(); - let (payment_hash, payment_secret) = - (PaymentHash(invoice.payment_hash().to_byte_array()), *invoice.payment_secret()); + let payment_hash = invoice.payment_hash(); + let payment_secret = *invoice.payment_secret(); let payment_preimage = if user_generated_pmt_hash { user_payment_preimage } else { @@ -1290,7 +1282,7 @@ mod test { invoice.amount_milli_satoshis().unwrap(), ); - let payment_hash = PaymentHash(invoice.payment_hash().to_byte_array()); + let payment_hash = invoice.payment_hash(); let id = PaymentId(payment_hash.0); let onion = RecipientOnionFields::secret_only(*invoice.payment_secret()); nodes[0].node.send_payment(payment_hash, onion, id, params, Retry::Attempts(0)).unwrap(); @@ -1592,7 +1584,7 @@ mod test { .create_channel(node_d_id, 1_000_000, 500_000_000, 42, None, Some(private_chan_cfg)) .unwrap(); let open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, node_d_id); - nodes[3].node.handle_open_channel(nodes[1].node.get_our_node_id(), &open_channel); + handle_and_accept_open_channel(&nodes[3], node_b_id, &open_channel); let accept_channel = get_event_msg!(nodes[3], MessageSendEvent::SendAcceptChannel, node_b_id); nodes[1].node.handle_accept_channel(nodes[3].node.get_our_node_id(), &accept_channel); @@ -1740,7 +1732,9 @@ mod test { chanmon_cfgs[1].keys_manager.backing = make_dyn_keys_interface(&seed_1); chanmon_cfgs[2].keys_manager.backing = make_dyn_keys_interface(&seed_2); let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(4, &node_cfgs, &[Some(legacy_cfg), None, None, None]); let nodes = create_network(4, &node_cfgs, &node_chanmgrs); let chan_0_2 = create_unannounced_chan_between_nodes_with_value(&nodes, 0, 2, 1_000_000, 0); diff --git a/lightning/src/ln/max_payment_path_len_tests.rs b/lightning/src/ln/max_payment_path_len_tests.rs index f67ad442c29..b947273115e 100644 --- a/lightning/src/ln/max_payment_path_len_tests.rs +++ b/lightning/src/ln/max_payment_path_len_tests.rs @@ -23,7 +23,9 @@ use crate::ln::msgs; use crate::ln::msgs::{BaseMessageHandler, OnionMessageHandler}; use crate::ln::onion_utils; use crate::ln::onion_utils::MIN_FINAL_VALUE_ESTIMATE_WITH_OVERPAY; -use crate::ln::outbound_payment::{RecipientOnionFields, Retry, RetryableSendFailure}; +use crate::ln::outbound_payment::{ + RecipientCustomTlvs, RecipientOnionFields, Retry, RetryableSendFailure, +}; use crate::prelude::*; use crate::routing::router::{ PaymentParameters, RouteParameters, DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA, @@ -92,7 +94,7 @@ fn large_payment_metadata() { .node .send_payment(payment_hash, max_sized_onion.clone(), id, route_params, Retry::Attempts(0)) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let path = &[&nodes[1]]; @@ -174,7 +176,7 @@ fn large_payment_metadata() { .node .send_payment(payment_hash_2, onion_allowing_2_hops, id, route_params, Retry::Attempts(0)) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let path = &[&nodes[1], &nodes[2]]; @@ -259,9 +261,9 @@ fn one_hop_blinded_path_with_custom_tlv() { - final_payload_len_without_custom_tlv; // Check that we can send the maximum custom TLV with 1 blinded hop. - let max_sized_onion = RecipientOnionFields::spontaneous_empty() - .with_custom_tlvs(vec![(CUSTOM_TLV_TYPE, vec![42; max_custom_tlv_len])]) - .unwrap(); + let max_sized_onion = RecipientOnionFields::spontaneous_empty().with_custom_tlvs( + RecipientCustomTlvs::new(vec![(CUSTOM_TLV_TYPE, vec![42; max_custom_tlv_len])]).unwrap(), + ); let id = PaymentId(payment_hash.0); let no_retry = Retry::Attempts(0); nodes[1] @@ -385,9 +387,9 @@ fn blinded_path_with_custom_tlv() { - reserved_packet_bytes_without_custom_tlv; // Check that we can send the maximum custom TLV size with 0 intermediate unblinded hops. - let max_sized_onion = RecipientOnionFields::spontaneous_empty() - .with_custom_tlvs(vec![(CUSTOM_TLV_TYPE, vec![42; max_custom_tlv_len])]) - .unwrap(); + let max_sized_onion = RecipientOnionFields::spontaneous_empty().with_custom_tlvs( + RecipientCustomTlvs::new(vec![(CUSTOM_TLV_TYPE, vec![42; max_custom_tlv_len])]).unwrap(), + ); let no_retry = Retry::Attempts(0); let id = PaymentId(payment_hash.0); nodes[1] diff --git a/lightning/src/ln/mod.rs b/lightning/src/ln/mod.rs index 9473142cfed..d6e0b92f1d0 100644 --- a/lightning/src/ln/mod.rs +++ b/lightning/src/ln/mod.rs @@ -41,26 +41,19 @@ pub mod channel; #[cfg(not(fuzzing))] pub(crate) mod channel; -pub(crate) mod onion_utils; -mod outbound_payment; +pub mod onion_utils; +pub mod outbound_payment; pub mod wire; #[allow(dead_code)] // TODO(dual_funding): Remove once contribution to V2 channels is enabled. pub(crate) mod interactivetxs; -pub use onion_utils::{create_payment_onion, LocalHTLCFailureReason}; // Older rustc (which we support) refuses to let us call the get_payment_preimage_hash!() macro // without the node parameter being mut. This is incorrect, and thus newer rustcs will complain // about an unnecessary mut. Thus, we silence the unused_mut warning in two test modules below. -#[cfg(fuzzing)] -pub use onion_utils::decode_fulfill_attribution_data; -#[cfg(fuzzing)] -pub use onion_utils::process_onion_failure; - -#[cfg(fuzzing)] -pub use onion_utils::AttributionData; - +#[cfg(test)] +mod accountable_tests; #[cfg(test)] #[allow(unused_mut)] mod async_payments_tests; @@ -91,6 +84,9 @@ pub mod functional_tests; #[cfg(any(test, feature = "_externalize_tests"))] #[allow(unused_mut)] pub mod htlc_reserve_unit_tests; +#[cfg(any(test, feature = "_externalize_tests"))] +#[allow(unused_mut)] +pub mod interception_tests; #[cfg(test)] #[allow(unused_mut)] mod max_payment_path_len_tests; diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 34064ebb484..fd33ec217ca 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -21,7 +21,8 @@ use crate::events::{Event, ClosureReason, HTLCHandlingFailureType}; use crate::ln::channel; use crate::ln::types::ChannelId; use crate::ln::chan_utils; -use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, PaymentId, RecipientOnionFields}; +use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, PaymentId}; +use crate::ln::outbound_payment::RecipientOnionFields; use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, MessageSendEvent}; use crate::crypto::utils::sign; use crate::util::ser::Writeable; @@ -68,7 +69,7 @@ fn chanmon_fail_from_stale_commitment() { let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 1_000_000); nodes[0].node.send_payment_with_route(route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let bs_txn = get_local_commitment_txn!(nodes[1], chan_id_2); @@ -78,19 +79,19 @@ fn chanmon_fail_from_stale_commitment() { expect_and_process_pending_htlcs(&nodes[1], false); get_htlc_update_msgs(&nodes[1], &nodes[2].node.get_our_node_id()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Don't bother delivering the new HTLC add/commits, instead confirming the pre-HTLC commitment // transaction for nodes[1]. mine_transaction(&nodes[1], &bs_txn[0]); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[2].node.get_our_node_id()], 100000); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); expect_and_process_pending_htlcs_and_htlc_handling_failed(&nodes[1], &[HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let fail_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_updates.update_fail_htlcs[0]); @@ -140,7 +141,7 @@ fn revoked_output_htlc_resolution_timing() { // Confirm the revoked commitment transaction, closing the channel. mine_transaction(&nodes[1], &revoked_local_txn[0]); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[0].node.get_our_node_id()], 1000000); // Two justice transactions will be broadcast, one on the unpinnable, revoked to_self output, @@ -174,7 +175,7 @@ fn archive_fully_resolved_monitors() { // Test we archive fully resolved channel monitors at the right time. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let mut user_config = test_default_channel_config(); + let mut user_config = test_legacy_channel_config(); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(user_config.clone()), Some(user_config)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -185,7 +186,7 @@ fn archive_fully_resolved_monitors() { let message = "Channel force-closed".to_owned(); nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), message.clone()).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_closed_broadcast!(nodes[0], true); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, reason, &[nodes[1].node.get_our_node_id()], 1_000_000); @@ -318,10 +319,8 @@ fn do_chanmon_claim_value_coop_close(keyed_anchors: bool, p2a_anchor: bool) { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let mut user_config = test_default_channel_config(); user_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; - user_config.manually_accept_inbound_channels = true; user_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = keyed_anchors; user_config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = p2a_anchor; - user_config.manually_accept_inbound_channels = keyed_anchors || p2a_anchor; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(user_config.clone()), Some(user_config)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -472,7 +471,6 @@ fn do_test_claim_value_force_close(keyed_anchors: bool, p2a_anchor: bool, prev_c let mut user_config = test_default_channel_config(); user_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = keyed_anchors; user_config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = p2a_anchor; - user_config.manually_accept_inbound_channels = keyed_anchors || p2a_anchor; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(user_config.clone()), Some(user_config)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -565,18 +563,18 @@ fn do_test_claim_value_force_close(keyed_anchors: bool, p2a_anchor: bool, prev_c sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); nodes[1].node.claim_funds(payment_preimage); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash, 3_000_100); let mut b_htlc_msgs = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); // We claim the dust payment here as well, but it won't impact our claimable balances as its // dust and thus doesn't appear on chain at all. nodes[1].node.claim_funds(dust_payment_preimage); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], dust_payment_hash, 3_000); nodes[1].node.claim_funds(timeout_payment_preimage); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], timeout_payment_hash, 4_000_200); if prev_commitment_tx { @@ -585,14 +583,14 @@ fn do_test_claim_value_force_close(keyed_anchors: bool, p2a_anchor: bool, prev_c nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), bs_fulfill); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &b_htlc_msgs.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let (as_raa, as_cs) = get_revoke_commit_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_raa); let _htlc_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_cs); let _bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); } // Once B has received the payment preimage, it includes the value of the HTLC in its @@ -681,11 +679,11 @@ fn do_test_claim_value_force_close(keyed_anchors: bool, p2a_anchor: bool, prev_c assert_eq!(remote_txn[0].output[b_broadcast_txn[1].input[0].previous_output.vout as usize].value.to_sat(), 4_000); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[1].node.get_our_node_id()], 1000000); assert!(nodes[0].node.list_channels().is_empty()); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[0].node.get_our_node_id()], 1000000); assert!(nodes[1].node.list_channels().is_empty()); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); @@ -871,7 +869,6 @@ fn do_test_balances_on_local_commitment_htlcs(keyed_anchors: bool, p2a_anchor: b let mut user_config = test_default_channel_config(); user_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = keyed_anchors; user_config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = p2a_anchor; - user_config.manually_accept_inbound_channels = keyed_anchors || p2a_anchor; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(user_config.clone()), Some(user_config)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -885,7 +882,7 @@ fn do_test_balances_on_local_commitment_htlcs(keyed_anchors: bool, p2a_anchor: b let htlc_cltv_timeout = nodes[0].best_block_info().1 + TEST_FINAL_CLTV + 1; // Note ChannelManager adds one to CLTV timeouts for safety nodes[0].node.send_payment_with_route(route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); @@ -897,7 +894,7 @@ fn do_test_balances_on_local_commitment_htlcs(keyed_anchors: bool, p2a_anchor: b let (route_2, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 20_000_000); nodes[0].node.send_payment_with_route(route_2, payment_hash_2, RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); @@ -907,7 +904,7 @@ fn do_test_balances_on_local_commitment_htlcs(keyed_anchors: bool, p2a_anchor: b expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 20_000_000); nodes[1].node.claim_funds(payment_preimage_2); get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash_2, 20_000_000); let chan_feerate = get_feerate!(nodes[0], nodes[1], chan_id) as u64; @@ -918,7 +915,7 @@ fn do_test_balances_on_local_commitment_htlcs(keyed_anchors: bool, p2a_anchor: b let message = "Channel force-closed".to_owned(); let node_a_commitment_claimable = nodes[0].best_block_info().1 + BREAKDOWN_TIMEOUT as u32; nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), message.clone()).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_closed_broadcast!(nodes[0], true); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, reason, &[nodes[1].node.get_our_node_id()], 1000000); @@ -980,7 +977,7 @@ fn do_test_balances_on_local_commitment_htlcs(keyed_anchors: bool, p2a_anchor: b // Get nodes[1]'s HTLC claim tx for the second HTLC mine_transaction(&nodes[1], &commitment_tx); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[0].node.get_our_node_id()], 1000000); let bs_htlc_claim_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(bs_htlc_claim_txn.len(), 1); @@ -1124,7 +1121,8 @@ fn test_no_preimage_inbound_htlc_balances() { // have a preimage. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000); @@ -1208,9 +1206,9 @@ fn test_no_preimage_inbound_htlc_balances() { }, a_received_htlc_balance.clone(), a_sent_htlc_balance.clone()]); mine_transaction(&nodes[0], &as_txn[0]); - nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); + nodes[0].tx_broadcaster.clear(); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[1].node.get_our_node_id()], 1000000); assert_eq!(as_pre_spend_claims, @@ -1218,7 +1216,7 @@ fn test_no_preimage_inbound_htlc_balances() { mine_transaction(&nodes[1], &as_txn[0]); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[0].node.get_our_node_id()], 1000000); let node_b_commitment_claimable = nodes[1].best_block_info().1 + ANTI_REORG_DELAY - 1; @@ -1255,7 +1253,7 @@ fn test_no_preimage_inbound_htlc_balances() { bs_pre_spend_claims.retain(|e| if let Balance::ClaimableAwaitingConfirmations { .. } = e { false } else { true }); // The next few blocks for B look the same as for A, though for the opposite HTLC - nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); + nodes[1].tx_broadcaster.clear(); connect_blocks(&nodes[1], TEST_FINAL_CLTV - (ANTI_REORG_DELAY - 1)); expect_htlc_failure_conditions(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash: to_b_failed_payment_hash }]); nodes[1].node.process_pending_htlc_forwards(); @@ -1382,7 +1380,6 @@ fn do_test_revoked_counterparty_commitment_balances(keyed_anchors: bool, p2a_anc let mut user_config = test_default_channel_config(); user_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = keyed_anchors; user_config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = p2a_anchor; - user_config.manually_accept_inbound_channels = keyed_anchors || p2a_anchor; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(user_config.clone()), Some(user_config)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -1427,12 +1424,12 @@ fn do_test_revoked_counterparty_commitment_balances(keyed_anchors: bool, p2a_anc nodes[1].node.claim_funds(claimed_payment_preimage); expect_payment_claimed!(nodes[1], claimed_payment_hash, 3_000_100); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let _b_htlc_msgs = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); connect_blocks(&nodes[0], htlc_cltv_timeout + 1 - 10); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 5); @@ -1461,7 +1458,7 @@ fn do_test_revoked_counterparty_commitment_balances(keyed_anchors: bool, p2a_anc connect_blocks(&nodes[1], htlc_cltv_timeout + 1 - 10); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_events(&nodes[1], &[ExpectedCloseEvent { channel_capacity_sats: Some(1_000_000), channel_id: Some(chan_id), @@ -1686,7 +1683,6 @@ fn do_test_revoked_counterparty_htlc_tx_balances(keyed_anchors: bool, p2a_anchor let mut user_config = test_default_channel_config(); user_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = keyed_anchors; user_config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = p2a_anchor; - user_config.manually_accept_inbound_channels = keyed_anchors || p2a_anchor; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(user_config.clone()), Some(user_config)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -1723,7 +1719,7 @@ fn do_test_revoked_counterparty_htlc_tx_balances(keyed_anchors: bool, p2a_anchor // B will generate an HTLC-Success from its revoked commitment tx mine_transaction(&nodes[1], &revoked_local_txn[0]); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[0].node.get_our_node_id()], 1000000); if keyed_anchors || p2a_anchor { handle_bump_htlc_event(&nodes[1], 1); @@ -1767,7 +1763,7 @@ fn do_test_revoked_counterparty_htlc_tx_balances(keyed_anchors: bool, p2a_anchor // A will generate justice tx from B's revoked commitment/HTLC tx mine_transaction(&nodes[0], &revoked_local_txn[0]); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[1].node.get_our_node_id()], 1000000); let to_remote_conf_height = nodes[0].best_block_info().1 + ANTI_REORG_DELAY - 1; @@ -1976,7 +1972,6 @@ fn do_test_revoked_counterparty_aggregated_claims(keyed_anchors: bool, p2a_ancho let mut user_config = test_default_channel_config(); user_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = keyed_anchors; user_config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = p2a_anchor; - user_config.manually_accept_inbound_channels = keyed_anchors || p2a_anchor; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(user_config.clone()), Some(user_config)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -2020,7 +2015,7 @@ fn do_test_revoked_counterparty_aggregated_claims(keyed_anchors: bool, p2a_ancho nodes[0].node.claim_funds(claimed_payment_preimage); expect_payment_claimed!(nodes[0], claimed_payment_hash, 3_000_100); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let _a_htlc_msgs = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); assert_eq!(sorted_vec(vec![Balance::ClaimableOnChannelClose { @@ -2049,7 +2044,7 @@ fn do_test_revoked_counterparty_aggregated_claims(keyed_anchors: bool, p2a_ancho mine_transaction(&nodes[1], &as_revoked_txn[0]); check_closed_broadcast!(nodes[1], true); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[0].node.get_our_node_id()], 1000000); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut claim_txn = nodes[1].tx_broadcaster.txn_broadcast(); assert_eq!(claim_txn.len(), 2); @@ -2208,7 +2203,7 @@ fn do_test_revoked_counterparty_aggregated_claims(keyed_anchors: bool, p2a_ancho let spendable_output_events = nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events(); assert_eq!(spendable_output_events.len(), 2); for event in spendable_output_events { - if let Event::SpendableOutputs { outputs, channel_id: _ } = event { + if let Event::SpendableOutputs { outputs, channel_id: _, counterparty_node_id: _ } = event { assert_eq!(outputs.len(), 1); let spend_tx = nodes[1].keys_manager.backing.spend_spendable_outputs( &[&outputs[0]], Vec::new(), ScriptBuf::new_op_return(&[]), 253, None, &Secp256k1::new(), @@ -2253,7 +2248,6 @@ fn do_test_claimable_balance_correct_while_payment_pending(outbound_payment: boo let mut user_config = test_default_channel_config(); user_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = keyed_anchors; user_config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = p2a_anchor; - user_config.manually_accept_inbound_channels = keyed_anchors || p2a_anchor; let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(user_config.clone()), Some(user_config.clone()), Some(user_config)]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); @@ -2324,7 +2318,8 @@ fn do_test_restored_packages_retry(check_old_monitor_retries_after_upgrade: bool let persister; let new_chain_monitor; - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let node_deserialized; let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -2401,7 +2396,6 @@ fn do_test_monitor_rebroadcast_pending_claims(keyed_anchors: bool, p2a_anchor: b let mut config = test_default_channel_config(); config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = keyed_anchors; config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = p2a_anchor; - config.manually_accept_inbound_channels = keyed_anchors || p2a_anchor; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config.clone()), Some(config)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -2527,7 +2521,6 @@ fn do_test_yield_anchors_events(have_htlcs: bool, p2a_anchor: bool) { anchors_config.channel_handshake_config.announce_for_forwarding = true; anchors_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; anchors_config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = p2a_anchor; - anchors_config.manually_accept_inbound_channels = true; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(anchors_config.clone()), Some(anchors_config)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -2635,9 +2628,9 @@ fn do_test_yield_anchors_events(have_htlcs: bool, p2a_anchor: bool) { } mine_transactions(&nodes[0], &[&commitment_tx, &anchor_tx]); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); mine_transactions(&nodes[1], &[&commitment_tx, &anchor_tx]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); if !have_htlcs { // If we don't have any HTLCs, we're done, the rest of the test is about HTLC transactions @@ -2729,7 +2722,6 @@ fn do_test_anchors_aggregated_revoked_htlc_tx(p2a_anchor: bool) { let mut anchors_config = test_default_channel_config(); anchors_config.channel_handshake_config.announce_for_forwarding = true; - anchors_config.manually_accept_inbound_channels = true; anchors_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; anchors_config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = p2a_anchor; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(anchors_config.clone()), Some(anchors_config.clone())]); @@ -2828,7 +2820,7 @@ fn do_test_anchors_aggregated_revoked_htlc_tx(p2a_anchor: bool) { } } check_closed_broadcast(&nodes[0], 2, true); - check_added_monitors!(&nodes[0], 2); + check_added_monitors(&nodes[0], 2); check_closed_event(&nodes[0], 2, ClosureReason::CommitmentTxConfirmed, &[nodes[1].node.get_our_node_id(); 2], 1000000); // Alice should detect the confirmed revoked commitments, and attempt to claim all of the @@ -2991,7 +2983,7 @@ fn do_test_anchors_aggregated_revoked_htlc_tx(p2a_anchor: bool) { // - 1 static to_remote output. assert_eq!(spendable_output_events.len(), 4); for event in spendable_output_events { - if let Event::SpendableOutputs { outputs, channel_id } = event { + if let Event::SpendableOutputs { outputs, channel_id, counterparty_node_id: _ } = event { assert_eq!(outputs.len(), 1); assert!([chan_b.2, chan_a.2].contains(&channel_id.unwrap())); let spend_tx = nodes[0].keys_manager.backing.spend_spendable_outputs( @@ -3036,7 +3028,6 @@ fn do_test_anchors_monitor_fixes_counterparty_payment_script_on_reload(confirm_c let chain_monitor; let mut user_config = test_default_channel_config(); user_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; - user_config.manually_accept_inbound_channels = true; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(user_config.clone()), Some(user_config.clone())]); let node_deserialized; let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -3127,7 +3118,6 @@ fn do_test_monitor_claims_with_random_signatures(keyed_anchors: bool, p2a_anchor let mut user_config = test_default_channel_config(); user_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = keyed_anchors; user_config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = p2a_anchor; - user_config.manually_accept_inbound_channels = keyed_anchors || p2a_anchor; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(user_config.clone()), Some(user_config)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -3167,13 +3157,13 @@ fn do_test_monitor_claims_with_random_signatures(keyed_anchors: bool, p2a_anchor mine_transaction(closing_node, anchor_tx.as_ref().unwrap()); } check_closed_broadcast!(closing_node, true); - check_added_monitors!(closing_node, 1); + check_added_monitors(&closing_node, 1); let message = "ChannelMonitor-initiated commitment transaction broadcast".to_string(); check_closed_event(&closing_node, 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }, &[other_node.node.get_our_node_id()], 1_000_000); mine_transaction(other_node, &commitment_tx); check_closed_broadcast!(other_node, true); - check_added_monitors!(other_node, 1); + check_added_monitors(&other_node, 1); check_closed_event(&other_node, 1, ClosureReason::CommitmentTxConfirmed, &[closing_node.node.get_our_node_id()], 1_000_000); // If we update the best block to the new height before providing the confirmed transactions, @@ -3418,7 +3408,6 @@ fn do_test_lost_preimage_monitor_events(on_counterparty_tx: bool, p2a_anchor: bo // Here we test that losing `MonitorEvent`s that contain HTLC resolution preimages does not // cause us to lose funds or miss a `PaymentSent` event. let mut cfg = test_default_channel_config(); - cfg.manually_accept_inbound_channels = true; cfg.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; cfg.channel_handshake_config.negotiate_anchor_zero_fee_commitments = p2a_anchor; let cfgs = [Some(cfg.clone()), Some(cfg.clone()), Some(cfg.clone())]; @@ -3600,7 +3589,6 @@ fn do_test_lost_timeout_monitor_events(confirm_tx: CommitmentType, dust_htlcs: b // Here we test that losing `MonitorEvent`s that contain HTLC resolution via timeouts does not // cause us to lose a `PaymentFailed` event. let mut cfg = test_default_channel_config(); - cfg.manually_accept_inbound_channels = true; cfg.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; cfg.channel_handshake_config.negotiate_anchor_zero_fee_commitments = p2a_anchor; let cfgs = [Some(cfg.clone()), Some(cfg.clone()), Some(cfg.clone())]; @@ -3844,7 +3832,8 @@ fn test_ladder_preimage_htlc_claims() { // already claimed) resulting in an invalid claim transaction. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_id_0 = nodes[0].node.get_our_node_id(); diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index 8e230fab1d9..67f7807a487 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -32,7 +32,7 @@ use bitcoin::secp256k1::PublicKey; use bitcoin::{secp256k1, Transaction, Witness}; use crate::blinded_path::message::BlindedMessagePath; -use crate::blinded_path::payment::{BlindedPaymentTlvs, ForwardTlvs, ReceiveTlvs}; +use crate::blinded_path::payment::{BlindedPaymentTlvs, DummyTlvs, ForwardTlvs, ReceiveTlvs}; use crate::blinded_path::payment::{BlindedTrampolineTlvs, TrampolineForwardTlvs}; use crate::ln::onion_utils; use crate::ln::types::ChannelId; @@ -768,6 +768,45 @@ pub struct UpdateAddHTLC { /// /// [`ReleaseHeldHtlc`]: crate::onion_message::async_payments::ReleaseHeldHtlc pub hold_htlc: Option<()>, + /// An experimental field indicating whether the receiving node's reputation would be held + /// accountable for the timely resolution of the HTLC. + /// + /// Note that this field is [`experimental`] so should not be used for forwarding decisions. + /// + /// [`experimental`]: https://github.com/lightning/blips/blob/master/blip-0004.md + pub accountable: Option, +} + +struct AccountableBool(T); + +impl Writeable for AccountableBool { + #[inline] + fn write(&self, writer: &mut W) -> Result<(), io::Error> { + let wire_value = if self.0 { 7u8 } else { 0u8 }; + writer.write_all(&[wire_value]) + } +} + +impl Readable for AccountableBool { + #[inline] + fn read(reader: &mut R) -> Result, DecodeError> { + let mut buf = [0u8; 1]; + reader.read_exact(&mut buf)?; + let bool_value = buf[0] == 7; + Ok(AccountableBool(bool_value)) + } +} + +impl From for AccountableBool { + fn from(val: bool) -> Self { + Self(val) + } +} + +impl From> for bool { + fn from(val: AccountableBool) -> Self { + val.0 + } } /// An [`onion message`] to be sent to or received from a peer. @@ -1857,6 +1896,7 @@ pub enum MessageSendEvent { msg: ClosingSigned, }, /// Used to indicate that a `closing_complete` message should be sent to the peer with the given `node_id`. + #[cfg(simple_close)] SendClosingComplete { /// The node_id of the node which should receive this message node_id: PublicKey, @@ -1864,6 +1904,7 @@ pub enum MessageSendEvent { msg: ClosingComplete, }, /// Used to indicate that a `closing_sig` message should be sent to the peer with the given `node_id`. + #[cfg(simple_close)] SendClosingSig { /// The node_id of the node which should receive this message node_id: PublicKey, @@ -2034,6 +2075,26 @@ pub trait BaseMessageHandler { -> Result<(), ()>; } +impl> BaseMessageHandler for B { + fn get_and_clear_pending_msg_events(&self) -> Vec { + self.deref().get_and_clear_pending_msg_events() + } + fn peer_disconnected(&self, their_node_id: PublicKey) { + self.deref().peer_disconnected(their_node_id) + } + fn provided_node_features(&self) -> NodeFeatures { + self.deref().provided_node_features() + } + fn provided_init_features(&self, their_node_id: PublicKey) -> InitFeatures { + self.deref().provided_init_features(their_node_id) + } + fn peer_connected( + &self, their_node_id: PublicKey, msg: &Init, inbound: bool, + ) -> Result<(), ()> { + self.deref().peer_connected(their_node_id, msg, inbound) + } +} + /// A trait to describe an object which can receive channel messages. /// /// Messages MAY be called in parallel when they originate from different `their_node_ids`, however @@ -2174,6 +2235,137 @@ pub trait ChannelMessageHandler: BaseMessageHandler { fn message_received(&self); } +impl> ChannelMessageHandler for C { + fn handle_open_channel(&self, their_node_id: PublicKey, msg: &OpenChannel) { + self.deref().handle_open_channel(their_node_id, msg) + } + fn handle_open_channel_v2(&self, their_node_id: PublicKey, msg: &OpenChannelV2) { + self.deref().handle_open_channel_v2(their_node_id, msg) + } + fn handle_accept_channel(&self, their_node_id: PublicKey, msg: &AcceptChannel) { + self.deref().handle_accept_channel(their_node_id, msg) + } + fn handle_accept_channel_v2(&self, their_node_id: PublicKey, msg: &AcceptChannelV2) { + self.deref().handle_accept_channel_v2(their_node_id, msg) + } + fn handle_funding_created(&self, their_node_id: PublicKey, msg: &FundingCreated) { + self.deref().handle_funding_created(their_node_id, msg) + } + fn handle_funding_signed(&self, their_node_id: PublicKey, msg: &FundingSigned) { + self.deref().handle_funding_signed(their_node_id, msg) + } + fn handle_channel_ready(&self, their_node_id: PublicKey, msg: &ChannelReady) { + self.deref().handle_channel_ready(their_node_id, msg) + } + fn handle_peer_storage(&self, their_node_id: PublicKey, msg: PeerStorage) { + self.deref().handle_peer_storage(their_node_id, msg) + } + fn handle_peer_storage_retrieval(&self, their_node_id: PublicKey, msg: PeerStorageRetrieval) { + self.deref().handle_peer_storage_retrieval(their_node_id, msg) + } + fn handle_shutdown(&self, their_node_id: PublicKey, msg: &Shutdown) { + self.deref().handle_shutdown(their_node_id, msg) + } + fn handle_closing_signed(&self, their_node_id: PublicKey, msg: &ClosingSigned) { + self.deref().handle_closing_signed(their_node_id, msg) + } + #[cfg(simple_close)] + fn handle_closing_complete(&self, their_node_id: PublicKey, msg: ClosingComplete) { + self.deref().handle_closing_complete(their_node_id, msg) + } + #[cfg(simple_close)] + fn handle_closing_sig(&self, their_node_id: PublicKey, msg: ClosingSig) { + self.deref().handle_closing_sig(their_node_id, msg) + } + fn handle_stfu(&self, their_node_id: PublicKey, msg: &Stfu) { + self.deref().handle_stfu(their_node_id, msg) + } + fn handle_splice_init(&self, their_node_id: PublicKey, msg: &SpliceInit) { + self.deref().handle_splice_init(their_node_id, msg) + } + fn handle_splice_ack(&self, their_node_id: PublicKey, msg: &SpliceAck) { + self.deref().handle_splice_ack(their_node_id, msg) + } + fn handle_splice_locked(&self, their_node_id: PublicKey, msg: &SpliceLocked) { + self.deref().handle_splice_locked(their_node_id, msg) + } + fn handle_tx_add_input(&self, their_node_id: PublicKey, msg: &TxAddInput) { + self.deref().handle_tx_add_input(their_node_id, msg) + } + fn handle_tx_add_output(&self, their_node_id: PublicKey, msg: &TxAddOutput) { + self.deref().handle_tx_add_output(their_node_id, msg) + } + fn handle_tx_remove_input(&self, their_node_id: PublicKey, msg: &TxRemoveInput) { + self.deref().handle_tx_remove_input(their_node_id, msg) + } + fn handle_tx_remove_output(&self, their_node_id: PublicKey, msg: &TxRemoveOutput) { + self.deref().handle_tx_remove_output(their_node_id, msg) + } + fn handle_tx_complete(&self, their_node_id: PublicKey, msg: &TxComplete) { + self.deref().handle_tx_complete(their_node_id, msg) + } + fn handle_tx_signatures(&self, their_node_id: PublicKey, msg: &TxSignatures) { + self.deref().handle_tx_signatures(their_node_id, msg) + } + fn handle_tx_init_rbf(&self, their_node_id: PublicKey, msg: &TxInitRbf) { + self.deref().handle_tx_init_rbf(their_node_id, msg) + } + fn handle_tx_ack_rbf(&self, their_node_id: PublicKey, msg: &TxAckRbf) { + self.deref().handle_tx_ack_rbf(their_node_id, msg) + } + fn handle_tx_abort(&self, their_node_id: PublicKey, msg: &TxAbort) { + self.deref().handle_tx_abort(their_node_id, msg) + } + fn handle_update_add_htlc(&self, their_node_id: PublicKey, msg: &UpdateAddHTLC) { + self.deref().handle_update_add_htlc(their_node_id, msg) + } + fn handle_update_fulfill_htlc(&self, their_node_id: PublicKey, msg: UpdateFulfillHTLC) { + self.deref().handle_update_fulfill_htlc(their_node_id, msg) + } + fn handle_update_fail_htlc(&self, their_node_id: PublicKey, msg: &UpdateFailHTLC) { + self.deref().handle_update_fail_htlc(their_node_id, msg) + } + fn handle_update_fail_malformed_htlc( + &self, their_node_id: PublicKey, msg: &UpdateFailMalformedHTLC, + ) { + self.deref().handle_update_fail_malformed_htlc(their_node_id, msg) + } + fn handle_commitment_signed(&self, their_node_id: PublicKey, msg: &CommitmentSigned) { + self.deref().handle_commitment_signed(their_node_id, msg) + } + fn handle_commitment_signed_batch( + &self, their_node_id: PublicKey, channel_id: ChannelId, batch: Vec, + ) { + self.deref().handle_commitment_signed_batch(their_node_id, channel_id, batch) + } + fn handle_revoke_and_ack(&self, their_node_id: PublicKey, msg: &RevokeAndACK) { + self.deref().handle_revoke_and_ack(their_node_id, msg) + } + fn handle_update_fee(&self, their_node_id: PublicKey, msg: &UpdateFee) { + self.deref().handle_update_fee(their_node_id, msg) + } + fn handle_announcement_signatures( + &self, their_node_id: PublicKey, msg: &AnnouncementSignatures, + ) { + self.deref().handle_announcement_signatures(their_node_id, msg) + } + fn handle_channel_reestablish(&self, their_node_id: PublicKey, msg: &ChannelReestablish) { + self.deref().handle_channel_reestablish(their_node_id, msg) + } + fn handle_channel_update(&self, their_node_id: PublicKey, msg: &ChannelUpdate) { + self.deref().handle_channel_update(their_node_id, msg) + } + fn handle_error(&self, their_node_id: PublicKey, msg: &ErrorMessage) { + self.deref().handle_error(their_node_id, msg) + } + fn get_chain_hashes(&self) -> Option> { + self.deref().get_chain_hashes() + } + fn message_received(&self) { + self.deref().message_received() + } +} + /// A trait to describe an object which can receive routing messages. /// /// # Implementor DoS Warnings @@ -2248,6 +2440,57 @@ pub trait RoutingMessageHandler: BaseMessageHandler { fn processing_queue_high(&self) -> bool; } +impl> RoutingMessageHandler for R { + fn handle_node_announcement( + &self, their_node_id: Option, msg: &NodeAnnouncement, + ) -> Result { + self.deref().handle_node_announcement(their_node_id, msg) + } + fn handle_channel_announcement( + &self, their_node_id: Option, msg: &ChannelAnnouncement, + ) -> Result { + self.deref().handle_channel_announcement(their_node_id, msg) + } + fn handle_channel_update( + &self, their_node_id: Option, msg: &ChannelUpdate, + ) -> Result, LightningError> { + self.deref().handle_channel_update(their_node_id, msg) + } + fn get_next_channel_announcement( + &self, starting_point: u64, + ) -> Option<(ChannelAnnouncement, Option, Option)> { + self.deref().get_next_channel_announcement(starting_point) + } + fn get_next_node_announcement( + &self, starting_point: Option<&NodeId>, + ) -> Option { + self.deref().get_next_node_announcement(starting_point) + } + fn handle_reply_channel_range( + &self, their_node_id: PublicKey, msg: ReplyChannelRange, + ) -> Result<(), LightningError> { + self.deref().handle_reply_channel_range(their_node_id, msg) + } + fn handle_reply_short_channel_ids_end( + &self, their_node_id: PublicKey, msg: ReplyShortChannelIdsEnd, + ) -> Result<(), LightningError> { + self.deref().handle_reply_short_channel_ids_end(their_node_id, msg) + } + fn handle_query_channel_range( + &self, their_node_id: PublicKey, msg: QueryChannelRange, + ) -> Result<(), LightningError> { + self.deref().handle_query_channel_range(their_node_id, msg) + } + fn handle_query_short_channel_ids( + &self, their_node_id: PublicKey, msg: QueryShortChannelIds, + ) -> Result<(), LightningError> { + self.deref().handle_query_short_channel_ids(their_node_id, msg) + } + fn processing_queue_high(&self) -> bool { + self.deref().processing_queue_high() + } +} + /// A handler for received [`OnionMessage`]s and for providing generated ones to send. pub trait OnionMessageHandler: BaseMessageHandler { /// Handle an incoming `onion_message` message from the given peer. @@ -2264,6 +2507,18 @@ pub trait OnionMessageHandler: BaseMessageHandler { fn timer_tick_occurred(&self); } +impl> OnionMessageHandler for O { + fn handle_onion_message(&self, peer_node_id: PublicKey, msg: &OnionMessage) { + self.deref().handle_onion_message(peer_node_id, msg) + } + fn next_onion_message_for_peer(&self, peer_node_id: PublicKey) -> Option { + self.deref().next_onion_message_for_peer(peer_node_id) + } + fn timer_tick_occurred(&self) { + self.deref().timer_tick_occurred() + } +} + /// A handler which can only be used to send messages. /// /// This is implemented by [`ChainMonitor`]. @@ -2271,6 +2526,8 @@ pub trait OnionMessageHandler: BaseMessageHandler { /// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor pub trait SendOnlyMessageHandler: BaseMessageHandler {} +impl> SendOnlyMessageHandler for S {} + #[derive(Clone, Debug, PartialEq, Eq)] /// Information communicated in the onion to the recipient for multi-part tracking and proof that /// the payment is associated with an invoice. @@ -2336,6 +2593,11 @@ mod fuzzy_internal_msgs { pub intro_node_blinding_point: Option, pub next_blinding_override: Option, } + pub struct InboundOnionDummyPayload { + pub payment_relay: PaymentRelay, + pub payment_constraints: PaymentConstraints, + pub intro_node_blinding_point: Option, + } pub struct InboundOnionBlindedReceivePayload { pub sender_intended_htlc_amt_msat: u64, pub total_msat: u64, @@ -2355,6 +2617,7 @@ mod fuzzy_internal_msgs { Receive(InboundOnionReceivePayload), BlindedForward(InboundOnionBlindedForwardPayload), BlindedReceive(InboundOnionBlindedReceivePayload), + Dummy(InboundOnionDummyPayload), } pub struct InboundTrampolineForwardPayload { @@ -3373,6 +3636,7 @@ impl_writeable_msg!(UpdateAddHTLC, { // TODO: currently we may fail to read the `ChannelManager` if we write a new even TLV in this message // and then downgrade. Once this is fixed, update the type here to match BOLTs PR 989. (75537, hold_htlc, option), + (106823, accountable, (option, encoding: (bool, AccountableBool))), }); impl LengthReadable for OnionMessage { @@ -3489,7 +3753,7 @@ impl<'a> Writeable for OutboundOnionPayload<'a> { ref invoice_request, ref custom_tlvs, } => { - // We need to update [`ln::outbound_payment::RecipientOnionFields::with_custom_tlvs`] + // We need to update [`ln::outbound_payments::RecipientCustomTlvs::new`] // to reject any reserved types in the experimental range if new ones are ever // standardized. let invoice_request_tlv = invoice_request.map(|invreq| (77_777, invreq.encode())); // TODO: update TLV type once the async payments spec is merged @@ -3589,10 +3853,7 @@ impl<'a> Writeable for OutboundTrampolinePayload<'a> { } } -impl ReadableArgs<(Option, NS)> for InboundOnionPayload -where - NS::Target: NodeSigner, -{ +impl ReadableArgs<(Option, NS)> for InboundOnionPayload { fn read(r: &mut R, args: (Option, NS)) -> Result { let (update_add_blinding_point, node_signer) = args; @@ -3694,6 +3955,25 @@ where next_blinding_override, })) }, + ChaChaDualPolyReadAdapter { + readable: + BlindedPaymentTlvs::Dummy(DummyTlvs { payment_relay, payment_constraints }), + used_aad, + } => { + if amt.is_some() + || cltv_value.is_some() || total_msat.is_some() + || keysend_preimage.is_some() + || invoice_request.is_some() + || !used_aad + { + return Err(DecodeError::InvalidValue); + } + Ok(Self::Dummy(InboundOnionDummyPayload { + payment_relay, + payment_constraints, + intro_node_blinding_point, + })) + }, ChaChaDualPolyReadAdapter { readable: BlindedPaymentTlvs::Receive(receive_tlvs), used_aad, @@ -3757,10 +4037,7 @@ where } } -impl ReadableArgs<(Option, NS)> for InboundTrampolinePayload -where - NS::Target: NodeSigner, -{ +impl ReadableArgs<(Option, NS)> for InboundTrampolinePayload { fn read(r: &mut R, args: (Option, NS)) -> Result { let (update_add_blinding_point, node_signer) = args; let receive_auth_key = node_signer.get_receive_auth_key(); @@ -4364,7 +4641,7 @@ mod tests { InboundOnionForwardPayload, InboundOnionReceivePayload, OutboundTrampolinePayload, TrampolineOnionPacket, }; - use crate::ln::onion_utils::{AttributionData, HMAC_COUNT, HMAC_LEN, HOLD_TIME_LEN, MAX_HOPS}; + use crate::ln::onion_utils::AttributionData; use crate::ln::types::ChannelId; use crate::routing::gossip::{NodeAlias, NodeId}; use crate::types::features::{ @@ -4372,7 +4649,7 @@ mod tests { }; use crate::types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; use crate::util::ser::{BigSize, Hostname, LengthReadable, Readable, ReadableArgs, Writeable}; - use crate::util::test_utils; + use crate::util::test_utils::{self, pubkey}; use bitcoin::hex::DisplayHex; use bitcoin::{Amount, ScriptBuf, Sequence, Transaction, TxIn, TxOut, Witness}; @@ -5872,6 +6149,7 @@ mod tests { skimmed_fee_msat: None, blinding_point: None, hold_htlc: None, + accountable: None, }; let encoded_value = update_add_htlc.encode(); let target_value = >::from_hex("020202020202020202020202020202020202020202020202020202020202020200083a840000034d32144668701144760101010101010101010101010101010101010101010101010101010101010101000c89d4ff031b84c5567b126440995d3ed5aaba0565d71e1834604819ff9c17f5e9d5dd078f010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010202020202020202020202020202020202020202020202020202020202020202").unwrap(); @@ -5897,13 +6175,10 @@ mod tests { channel_id: ChannelId::from_bytes([2; 32]), htlc_id: 2316138423780173, reason: [1; 32].to_vec(), - attribution_data: Some(AttributionData { - hold_times: [3; MAX_HOPS * HOLD_TIME_LEN], - hmacs: [3; HMAC_LEN * HMAC_COUNT], - }), + attribution_data: Some(AttributionData::new()), }; let encoded_value = update_fail_htlc.encode(); - let target_value = >::from_hex("020202020202020202020202020202020202020202020202020202020202020200083a840000034d0020010101010101010101010101010101010101010101010101010101010101010101fd03980303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303").unwrap(); + let target_value = >::from_hex("020202020202020202020202020202020202020202020202020202020202020200083a840000034d0020010101010101010101010101010101010101010101010101010101010101010101fd03980000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap(); assert_eq!(encoded_value, target_value); } @@ -6762,4 +7037,71 @@ mod tests { .to_socket_addrs() .is_err()); } + + fn test_update_add_htlc() -> msgs::UpdateAddHTLC { + msgs::UpdateAddHTLC { + channel_id: ChannelId::from_bytes([2; 32]), + htlc_id: 42, + amount_msat: 1000, + payment_hash: PaymentHash([1; 32]), + cltv_expiry: 500000, + skimmed_fee_msat: None, + onion_routing_packet: msgs::OnionPacket { + version: 0, + public_key: Ok(pubkey(42)), + hop_data: [1; 20 * 65], + hmac: [2; 32], + }, + blinding_point: None, + hold_htlc: None, + accountable: None, + } + } + + #[test] + fn test_update_add_htlc_accountable_encoding() { + // Tests that accountable boolean values are written to the wire with correct u8 values. + for (bool_signal, wire_value) in [(Some(false), 0u8), (Some(true), 7u8)] { + let mut base_msg = test_update_add_htlc(); + base_msg.accountable = bool_signal; + let encoded = base_msg.encode(); + assert_eq!( + *encoded.last().unwrap(), + wire_value, + "wrong wire value for accountable={:?}", + bool_signal + ); + } + } + + fn do_test_htlc_accountable_from_u8(accountable_override: Option, expected: Option) { + // Tests custom encoding conversion of u8 wire values to appropriate boolean, manually + // writing to support values that we wouldn't encode ourselves but should be able to read. + let base_msg = test_update_add_htlc(); + let mut encoded = base_msg.encode(); + if let Some(value) = accountable_override { + encoded.extend_from_slice(&[0xfe, 0x00, 0x01, 0xa1, 0x47]); + encoded.push(1); + encoded.push(value); + } + + let decoded: msgs::UpdateAddHTLC = + LengthReadable::read_from_fixed_length_buffer(&mut &encoded[..]).unwrap(); + + assert_eq!( + decoded.accountable, expected, + "accountable={:?} with override={:?} not eq to expected={:?}", + decoded.accountable, accountable_override, expected + ); + } + + #[test] + fn update_add_htlc_accountable_from_u8() { + // Tests that accountable signals encoded as a u8 are properly translated to a bool. + do_test_htlc_accountable_from_u8(None, None); + do_test_htlc_accountable_from_u8(Some(8), Some(false)); // 8 is an invalid value + do_test_htlc_accountable_from_u8(Some(7), Some(true)); + do_test_htlc_accountable_from_u8(Some(3), Some(false)); + do_test_htlc_accountable_from_u8(Some(0), Some(false)); + } } diff --git a/lightning/src/ln/offers_tests.rs b/lightning/src/ln/offers_tests.rs index 4c53aefe58d..12e631b4042 100644 --- a/lightning/src/ln/offers_tests.rs +++ b/lightning/src/ln/offers_tests.rs @@ -47,10 +47,11 @@ use bitcoin::secp256k1::{PublicKey, Secp256k1}; use core::time::Duration; use crate::blinded_path::IntroductionNode; use crate::blinded_path::message::BlindedMessagePath; -use crate::blinded_path::payment::{Bolt12OfferContext, Bolt12RefundContext, PaymentContext}; +use crate::blinded_path::payment::{Bolt12OfferContext, Bolt12RefundContext, DummyTlvs, PaymentContext}; use crate::blinded_path::message::OffersContext; use crate::events::{ClosureReason, Event, HTLCHandlingFailureType, PaidBolt12Invoice, PaymentFailureReason, PaymentPurpose}; -use crate::ln::channelmanager::{Bolt12PaymentError, PaymentId, RecentPaymentDetails, RecipientOnionFields, Retry, self}; +use crate::ln::channelmanager::{PaymentId, RecentPaymentDetails, self}; +use crate::ln::outbound_payment::{Bolt12PaymentError, RecipientOnionFields, Retry}; use crate::types::features::Bolt12InvoiceFeatures; use crate::ln::functional_test_utils::*; use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, Init, NodeAnnouncement, OnionMessage, OnionMessageHandler, RoutingMessageHandler, SocketAddress, UnsignedGossipMessage, UnsignedNodeAnnouncement}; @@ -60,10 +61,10 @@ use crate::offers::invoice_error::InvoiceError; use crate::offers::invoice_request::{InvoiceRequest, InvoiceRequestFields, InvoiceRequestVerifiedFromOffer}; use crate::offers::nonce::Nonce; use crate::offers::parse::Bolt12SemanticError; -use crate::onion_message::messenger::{DefaultMessageRouter, Destination, MessageSendInstructions, NodeIdMessageRouter, NullMessageRouter, PeeledOnion, PADDED_PATH_LENGTH}; +use crate::onion_message::messenger::{DefaultMessageRouter, Destination, MessageSendInstructions, NodeIdMessageRouter, NullMessageRouter, PeeledOnion, DUMMY_HOPS_PATH_LENGTH, QR_CODED_DUMMY_HOPS_PATH_LENGTH}; use crate::onion_message::offers::OffersMessage; use crate::routing::gossip::{NodeAlias, NodeId}; -use crate::routing::router::{PaymentParameters, RouteParameters, RouteParametersConfig}; +use crate::routing::router::{DEFAULT_PAYMENT_DUMMY_HOPS, PaymentParameters, RouteParameters, RouteParametersConfig}; use crate::sign::{NodeSigner, Recipient}; use crate::util::ser::Writeable; @@ -163,6 +164,20 @@ fn check_compact_path_introduction_node<'a, 'b, 'c>( && matches!(path.introduction_node(), IntroductionNode::DirectedShortChannelId(..)) } +fn check_dummy_hopped_path_length<'a, 'b, 'c>( + path: &BlindedMessagePath, + lookup_node: &Node<'a, 'b, 'c>, + expected_introduction_node: PublicKey, + expected_path_length: usize, +) -> bool { + let introduction_node_id = resolve_introduction_node(lookup_node, path); + let first_hop_len = path.blinded_hops().first().unwrap().encrypted_payload.len(); + let hops = path.blinded_hops(); + introduction_node_id == expected_introduction_node + && hops.len() == expected_path_length + && hops.iter().take(hops.len() - 1).all(|hop| hop.encrypted_payload.len() == first_hop_len) +} + fn route_bolt12_payment<'a, 'b, 'c>( node: &Node<'a, 'b, 'c>, path: &[&Node<'a, 'b, 'c>], invoice: &Bolt12Invoice ) { @@ -178,14 +193,28 @@ fn route_bolt12_payment<'a, 'b, 'c>( let amount_msats = invoice.amount_msats(); let payment_hash = invoice.payment_hash(); let args = PassAlongPathArgs::new(node, path, amount_msats, payment_hash, ev) - .without_clearing_recipient_events(); + .without_clearing_recipient_events() + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); do_pass_along_path(args); } fn claim_bolt12_payment<'a, 'b, 'c>( node: &Node<'a, 'b, 'c>, path: &[&Node<'a, 'b, 'c>], expected_payment_context: PaymentContext, invoice: &Bolt12Invoice ) { - let recipient = &path[path.len() - 1]; + claim_bolt12_payment_with_extra_fees( + node, + path, + expected_payment_context, + invoice, + None, + ) +} + +fn claim_bolt12_payment_with_extra_fees<'a, 'b, 'c>( + node: &Node<'a, 'b, 'c>, path: &[&Node<'a, 'b, 'c>], expected_payment_context: PaymentContext, invoice: &Bolt12Invoice, + expected_extra_fees_msat: Option, +) { + let recipient = path.last().expect("Empty path?"); let payment_purpose = match get_event!(recipient, Event::PaymentClaimable) { Event::PaymentClaimable { purpose, .. } => purpose, _ => panic!("No Event::PaymentClaimable"), @@ -194,20 +223,29 @@ fn claim_bolt12_payment<'a, 'b, 'c>( Some(preimage) => preimage, None => panic!("No preimage in Event::PaymentClaimable"), }; - match payment_purpose { - PaymentPurpose::Bolt12OfferPayment { payment_context, .. } => { - assert_eq!(PaymentContext::Bolt12Offer(payment_context), expected_payment_context); - }, - PaymentPurpose::Bolt12RefundPayment { payment_context, .. } => { - assert_eq!(PaymentContext::Bolt12Refund(payment_context), expected_payment_context); - }, + let context = match payment_purpose { + PaymentPurpose::Bolt12OfferPayment { payment_context, .. } => + PaymentContext::Bolt12Offer(payment_context), + PaymentPurpose::Bolt12RefundPayment { payment_context, .. } => + PaymentContext::Bolt12Refund(payment_context), _ => panic!("Unexpected payment purpose: {:?}", payment_purpose), - } - if let Some(inv) = claim_payment(node, path, payment_preimage) { - assert_eq!(inv, PaidBolt12Invoice::Bolt12Invoice(invoice.to_owned())); - } else { - panic!("Expected PaidInvoice::Bolt12Invoice"); }; + + assert_eq!(context, expected_payment_context); + + let expected_paths = [path]; + let mut args = ClaimAlongRouteArgs::new( + node, + &expected_paths, + payment_preimage, + ); + + if let Some(extra) = expected_extra_fees_msat { + args = args.with_expected_extra_total_fees_msat(extra); + } + + let (inv, _) = claim_payment_along_route(args); + assert_eq!(inv, Some(PaidBolt12Invoice::Bolt12Invoice(invoice.clone()))); } fn extract_offer_nonce<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, message: &OnionMessage) -> Nonce { @@ -455,7 +493,7 @@ fn check_dummy_hop_pattern_in_offer() { let bob_id = bob.node.get_our_node_id(); // Case 1: DefaultMessageRouter → uses compact blinded paths (via SCIDs) - // Expected: No dummy hops; each path contains only the recipient. + // Expected: Padded to QR_CODED_DUMMY_HOPS_PATH_LENGTH for QR code size optimization let default_router = DefaultMessageRouter::new(alice.network_graph, alice.keys_manager); let compact_offer = alice.node @@ -467,8 +505,8 @@ fn check_dummy_hop_pattern_in_offer() { for path in compact_offer.paths() { assert_eq!( - path.blinded_hops().len(), 1, - "Compact paths must include only the recipient" + path.blinded_hops().len(), QR_CODED_DUMMY_HOPS_PATH_LENGTH, + "Compact offer paths are padded to QR_CODED_DUMMY_HOPS_PATH_LENGTH" ); } @@ -480,10 +518,10 @@ fn check_dummy_hop_pattern_in_offer() { assert_eq!(invoice_request.amount_msats(), Some(10_000_000)); assert_ne!(invoice_request.payer_signing_pubkey(), bob_id); - assert!(check_compact_path_introduction_node(&reply_path, alice, bob_id)); + assert!(check_dummy_hopped_path_length(&reply_path, alice, bob_id, DUMMY_HOPS_PATH_LENGTH)); // Case 2: NodeIdMessageRouter → uses node ID-based blinded paths - // Expected: 0 to MAX_DUMMY_HOPS_COUNT dummy hops, followed by recipient. + // Expected: Also padded to QR_CODED_DUMMY_HOPS_PATH_LENGTH for QR code size optimization let node_id_router = NodeIdMessageRouter::new(alice.network_graph, alice.keys_manager); let padded_offer = alice.node @@ -492,7 +530,7 @@ fn check_dummy_hop_pattern_in_offer() { .build().unwrap(); assert!(!padded_offer.paths().is_empty()); - assert!(padded_offer.paths().iter().all(|path| path.blinded_hops().len() == PADDED_PATH_LENGTH)); + assert!(padded_offer.paths().iter().all(|path| path.blinded_hops().len() == QR_CODED_DUMMY_HOPS_PATH_LENGTH)); let payment_id = PaymentId([2; 32]); bob.node.pay_for_offer(&padded_offer, None, payment_id, Default::default()).unwrap(); @@ -502,7 +540,7 @@ fn check_dummy_hop_pattern_in_offer() { assert_eq!(invoice_request.amount_msats(), Some(10_000_000)); assert_ne!(invoice_request.payer_signing_pubkey(), bob_id); - assert!(check_compact_path_introduction_node(&reply_path, alice, bob_id)); + assert!(check_dummy_hopped_path_length(&reply_path, alice, bob_id, DUMMY_HOPS_PATH_LENGTH)); } /// Checks that blinded paths are compact for short-lived offers. @@ -687,7 +725,7 @@ fn creates_and_pays_for_offer_using_two_hop_blinded_path() { }); assert_eq!(invoice_request.amount_msats(), Some(10_000_000)); assert_ne!(invoice_request.payer_signing_pubkey(), david_id); - assert!(check_compact_path_introduction_node(&reply_path, bob, charlie_id)); + assert!(check_dummy_hopped_path_length(&reply_path, bob, charlie_id, DUMMY_HOPS_PATH_LENGTH)); let onion_message = alice.onion_messenger.next_onion_message_for_peer(charlie_id).unwrap(); charlie.onion_messenger.handle_onion_message(alice_id, &onion_message); @@ -706,8 +744,8 @@ fn creates_and_pays_for_offer_using_two_hop_blinded_path() { // to Alice when she's handling the message. Therefore, either Bob or Charlie could // serve as the introduction node for the reply path back to Alice. assert!( - check_compact_path_introduction_node(&reply_path, david, bob_id) || - check_compact_path_introduction_node(&reply_path, david, charlie_id) + check_dummy_hopped_path_length(&reply_path, david, bob_id, DUMMY_HOPS_PATH_LENGTH) || + check_dummy_hopped_path_length(&reply_path, david, charlie_id, DUMMY_HOPS_PATH_LENGTH) ); route_bolt12_payment(david, &[charlie, bob, alice], &invoice); @@ -790,7 +828,7 @@ fn creates_and_pays_for_refund_using_two_hop_blinded_path() { for path in invoice.payment_paths() { assert_eq!(path.introduction_node(), &IntroductionNode::NodeId(bob_id)); } - assert!(check_compact_path_introduction_node(&reply_path, alice, bob_id)); + assert!(check_dummy_hopped_path_length(&reply_path, alice, bob_id, DUMMY_HOPS_PATH_LENGTH)); route_bolt12_payment(david, &[charlie, bob, alice], &invoice); expect_recent_payment!(david, RecentPaymentDetails::Pending, payment_id); @@ -845,7 +883,7 @@ fn creates_and_pays_for_offer_using_one_hop_blinded_path() { }); assert_eq!(invoice_request.amount_msats(), Some(10_000_000)); assert_ne!(invoice_request.payer_signing_pubkey(), bob_id); - assert!(check_compact_path_introduction_node(&reply_path, alice, bob_id)); + assert!(check_dummy_hopped_path_length(&reply_path, alice, bob_id, DUMMY_HOPS_PATH_LENGTH)); let onion_message = alice.onion_messenger.next_onion_message_for_peer(bob_id).unwrap(); bob.onion_messenger.handle_onion_message(alice_id, &onion_message); @@ -857,7 +895,7 @@ fn creates_and_pays_for_offer_using_one_hop_blinded_path() { for path in invoice.payment_paths() { assert_eq!(path.introduction_node(), &IntroductionNode::NodeId(alice_id)); } - assert!(check_compact_path_introduction_node(&reply_path, bob, alice_id)); + assert!(check_dummy_hopped_path_length(&reply_path, bob, alice_id, DUMMY_HOPS_PATH_LENGTH)); route_bolt12_payment(bob, &[alice], &invoice); expect_recent_payment!(bob, RecentPaymentDetails::Pending, payment_id); @@ -913,7 +951,7 @@ fn creates_and_pays_for_refund_using_one_hop_blinded_path() { for path in invoice.payment_paths() { assert_eq!(path.introduction_node(), &IntroductionNode::NodeId(alice_id)); } - assert!(check_compact_path_introduction_node(&reply_path, bob, alice_id)); + assert!(check_dummy_hopped_path_length(&reply_path, bob, alice_id, DUMMY_HOPS_PATH_LENGTH)); route_bolt12_payment(bob, &[alice], &invoice); expect_recent_payment!(bob, RecentPaymentDetails::Pending, payment_id); @@ -1059,6 +1097,7 @@ fn send_invoice_requests_with_distinct_reply_path() { let bob_id = bob.node.get_our_node_id(); let charlie_id = charlie.node.get_our_node_id(); let david_id = david.node.get_our_node_id(); + let frank_id = nodes[6].node.get_our_node_id(); disconnect_peers(alice, &[charlie, david, &nodes[4], &nodes[5], &nodes[6]]); disconnect_peers(david, &[bob, &nodes[4], &nodes[5]]); @@ -1089,7 +1128,7 @@ fn send_invoice_requests_with_distinct_reply_path() { alice.onion_messenger.handle_onion_message(bob_id, &onion_message); let (_, reply_path) = extract_invoice_request(alice, &onion_message); - assert!(check_compact_path_introduction_node(&reply_path, alice, charlie_id)); + assert!(check_dummy_hopped_path_length(&reply_path, alice, charlie_id, DUMMY_HOPS_PATH_LENGTH)); // Send, extract and verify the second Invoice Request message let onion_message = david.onion_messenger.next_onion_message_for_peer(bob_id).unwrap(); @@ -1099,7 +1138,7 @@ fn send_invoice_requests_with_distinct_reply_path() { alice.onion_messenger.handle_onion_message(bob_id, &onion_message); let (_, reply_path) = extract_invoice_request(alice, &onion_message); - assert!(check_compact_path_introduction_node(&reply_path, alice, nodes[6].node.get_our_node_id())); + assert!(check_dummy_hopped_path_length(&reply_path, alice, frank_id, DUMMY_HOPS_PATH_LENGTH)); } /// This test checks that when multiple potential introduction nodes are available for the payee, @@ -1170,7 +1209,7 @@ fn send_invoice_for_refund_with_distinct_reply_path() { let onion_message = bob.onion_messenger.next_onion_message_for_peer(alice_id).unwrap(); let (_, reply_path) = extract_invoice(alice, &onion_message); - assert!(check_compact_path_introduction_node(&reply_path, alice, charlie_id)); + assert!(check_dummy_hopped_path_length(&reply_path, alice, charlie_id, DUMMY_HOPS_PATH_LENGTH)); // Send, extract and verify the second Invoice Request message let onion_message = david.onion_messenger.next_onion_message_for_peer(bob_id).unwrap(); @@ -1179,7 +1218,7 @@ fn send_invoice_for_refund_with_distinct_reply_path() { let onion_message = bob.onion_messenger.next_onion_message_for_peer(alice_id).unwrap(); let (_, reply_path) = extract_invoice(alice, &onion_message); - assert!(check_compact_path_introduction_node(&reply_path, alice, nodes[6].node.get_our_node_id())); + assert!(check_dummy_hopped_path_length(&reply_path, alice, nodes[6].node.get_our_node_id(), DUMMY_HOPS_PATH_LENGTH)); } /// Verifies that the invoice request message can be retried if it fails to reach the @@ -1233,7 +1272,7 @@ fn creates_and_pays_for_offer_with_retry() { }); assert_eq!(invoice_request.amount_msats(), Some(10_000_000)); assert_ne!(invoice_request.payer_signing_pubkey(), bob_id); - assert!(check_compact_path_introduction_node(&reply_path, alice, bob_id)); + assert!(check_dummy_hopped_path_length(&reply_path, alice, bob_id, DUMMY_HOPS_PATH_LENGTH)); let onion_message = alice.onion_messenger.next_onion_message_for_peer(bob_id).unwrap(); bob.onion_messenger.handle_onion_message(alice_id, &onion_message); @@ -1410,7 +1449,20 @@ fn creates_offer_with_blinded_path_using_unannounced_introduction_node() { route_bolt12_payment(bob, &[alice], &invoice); expect_recent_payment!(bob, RecentPaymentDetails::Pending, payment_id); - claim_bolt12_payment(bob, &[alice], payment_context, &invoice); + // When the payer is the introduction node of a blinded path, LDK doesn't + // subtract the forward fee for the `payer -> next_hop` channel (see + // `BlindedPaymentPath::advance_path_by_one`). This keeps fee logic simple, + // at the cost of a small, intentional overpayment. + // + // In the old two-hop case (payer as introduction node → payee), this never + // surfaced because the payer simply wasn’t charged the forward fee. + // + // With dummy hops in LDK v0.3, even a real two-node path can appear as a + // longer blinded route, so the overpayment shows up in tests. + // + // Until the fee-handling trade-off is revisited, we pass an expected extra + // fee here so tests can compensate for it. + claim_bolt12_payment_with_extra_fees(bob, &[alice], payment_context, &invoice, Some(1000)); expect_recent_payment!(bob, RecentPaymentDetails::Fulfilled, payment_id); } @@ -1534,7 +1586,7 @@ fn fails_authentication_when_handling_invoice_request() { let (invoice_request, reply_path) = extract_invoice_request(alice, &onion_message); assert_eq!(invoice_request.amount_msats(), Some(10_000_000)); assert_ne!(invoice_request.payer_signing_pubkey(), david_id); - assert!(check_compact_path_introduction_node(&reply_path, david, charlie_id)); + assert!(check_dummy_hopped_path_length(&reply_path, david, charlie_id, DUMMY_HOPS_PATH_LENGTH)); assert_eq!(alice.onion_messenger.next_onion_message_for_peer(charlie_id), None); @@ -1563,7 +1615,7 @@ fn fails_authentication_when_handling_invoice_request() { let (invoice_request, reply_path) = extract_invoice_request(alice, &onion_message); assert_eq!(invoice_request.amount_msats(), Some(10_000_000)); assert_ne!(invoice_request.payer_signing_pubkey(), david_id); - assert!(check_compact_path_introduction_node(&reply_path, david, charlie_id)); + assert!(check_dummy_hopped_path_length(&reply_path, david, charlie_id, DUMMY_HOPS_PATH_LENGTH)); assert_eq!(alice.onion_messenger.next_onion_message_for_peer(charlie_id), None); } @@ -1663,7 +1715,7 @@ fn fails_authentication_when_handling_invoice_for_offer() { let (invoice_request, reply_path) = extract_invoice_request(alice, &onion_message); assert_eq!(invoice_request.amount_msats(), Some(10_000_000)); assert_ne!(invoice_request.payer_signing_pubkey(), david_id); - assert!(check_compact_path_introduction_node(&reply_path, david, charlie_id)); + assert!(check_dummy_hopped_path_length(&reply_path, david, charlie_id, DUMMY_HOPS_PATH_LENGTH)); let onion_message = alice.onion_messenger.next_onion_message_for_peer(charlie_id).unwrap(); charlie.onion_messenger.handle_onion_message(alice_id, &onion_message); @@ -2414,7 +2466,7 @@ fn rejects_keysend_to_non_static_invoice_path() { Some(payment_preimage), RecipientOnionFields::spontaneous_empty(), keysend_payment_id, route_params, Retry::Attempts(0) ).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events); @@ -2422,12 +2474,13 @@ fn rejects_keysend_to_non_static_invoice_path() { let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev) .with_payment_preimage(payment_preimage) - .expect_failure(HTLCHandlingFailureType::Receive { payment_hash }); + .expect_failure(HTLCHandlingFailureType::Receive { payment_hash }) + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); do_pass_along_path(args); let mut updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); + nodes[0].node.handle_update_fail_malformed_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_malformed_htlcs[0]); do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false); - expect_payment_failed_conditions(&nodes[0], payment_hash, true, PaymentFailedConditions::new()); + expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new()); } #[test] @@ -2482,16 +2535,18 @@ fn no_double_pay_with_stale_channelmanager() { let expected_route: &[&[&Node]] = &[&[&nodes[1]], &[&nodes[1]]]; let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); - check_added_monitors!(nodes[0], 2); + check_added_monitors(&nodes[0], 2); let ev = remove_first_msg_event_to_node(&bob_id, &mut events); let args = PassAlongPathArgs::new(&nodes[0], expected_route[0], amt_msat, payment_hash, ev) - .without_clearing_recipient_events(); + .without_clearing_recipient_events() + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); do_pass_along_path(args); let ev = remove_first_msg_event_to_node(&bob_id, &mut events); let args = PassAlongPathArgs::new(&nodes[0], expected_route[0], amt_msat, payment_hash, ev) - .without_clearing_recipient_events(); + .without_clearing_recipient_events() + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); do_pass_along_path(args); expect_recent_payment!(nodes[0], RecentPaymentDetails::Pending, payment_id); @@ -2507,7 +2562,7 @@ fn no_double_pay_with_stale_channelmanager() { reload_node!(nodes[0], &alice_chan_manager_serialized, &[&monitor_0, &monitor_1], persister, chain_monitor, alice_deserialized); // The stale manager results in closing the channels. check_closed_event(&nodes[0], 2, ClosureReason::OutdatedChannelManager, &[bob_id, bob_id], 10_000_000); - check_added_monitors!(nodes[0], 2); + check_added_monitors(&nodes[0], 2); // Alice receives a duplicate invoice, but the payment should be transitioned to Retryable by now. nodes[0].onion_messenger.handle_onion_message(bob_id, &invoice_om); diff --git a/lightning/src/ln/onion_payment.rs b/lightning/src/ln/onion_payment.rs index 1abe4330a25..555cc7a87af 100644 --- a/lightning/src/ln/onion_payment.rs +++ b/lightning/src/ln/onion_payment.rs @@ -26,8 +26,6 @@ use crate::util::logger::Logger; #[allow(unused_imports)] use crate::prelude::*; -use core::ops::Deref; - /// Invalid inbound onion payment. #[derive(Clone, Debug, Hash, PartialEq, Eq)] pub struct InboundHTLCErr { @@ -149,6 +147,14 @@ pub(super) fn create_fwd_pending_htlc_info( (RoutingInfo::Direct { short_channel_id, new_packet_bytes, next_hop_hmac }, amt_to_forward, outgoing_cltv_value, intro_node_blinding_point, next_blinding_override) }, + onion_utils::Hop::Dummy { .. } => { + debug_assert!(false, "Dummy hop should have been peeled earlier"); + return Err(InboundHTLCErr { + msg: "Dummy Hop OnionHopData provided for us as an intermediary node", + reason: LocalHTLCFailureReason::InvalidOnionPayload, + err_data: Vec::new(), + }) + }, onion_utils::Hop::Receive { .. } | onion_utils::Hop::BlindedReceive { .. } => return Err(InboundHTLCErr { msg: "Final Node OnionHopData provided for us as an intermediary node", @@ -267,6 +273,7 @@ pub(super) fn create_fwd_pending_htlc_info( outgoing_amt_msat: amt_to_forward, outgoing_cltv_value, skimmed_fee_msat: None, + incoming_accountable: msg.accountable.unwrap_or(false), }) } @@ -274,7 +281,7 @@ pub(super) fn create_fwd_pending_htlc_info( pub(super) fn create_recv_pending_htlc_info( hop_data: onion_utils::Hop, shared_secret: [u8; 32], payment_hash: PaymentHash, amt_msat: u64, cltv_expiry: u32, phantom_shared_secret: Option<[u8; 32]>, allow_underpay: bool, - counterparty_skimmed_fee_msat: Option, current_height: u32 + counterparty_skimmed_fee_msat: Option, incoming_accountable: bool, current_height: u32 ) -> Result { let ( payment_data, keysend_preimage, custom_tlvs, onion_amt_msat, onion_cltv_expiry, @@ -364,6 +371,14 @@ pub(super) fn create_recv_pending_htlc_info( msg: "Got blinded non final data with an HMAC of 0", }) }, + onion_utils::Hop::Dummy { .. } => { + debug_assert!(false, "Dummy hop should have been peeled earlier"); + return Err(InboundHTLCErr { + reason: LocalHTLCFailureReason::InvalidOnionBlinding, + err_data: vec![0; 32], + msg: "Got blinded non final data with an HMAC of 0", + }) + } onion_utils::Hop::TrampolineForward { .. } | onion_utils::Hop::TrampolineBlindedForward { .. } => { return Err(InboundHTLCErr { reason: LocalHTLCFailureReason::InvalidOnionPayload, @@ -456,6 +471,7 @@ pub(super) fn create_recv_pending_htlc_info( outgoing_amt_msat: onion_amt_msat, outgoing_cltv_value: onion_cltv_expiry, skimmed_fee_msat: counterparty_skimmed_fee_msat, + incoming_accountable, }) } @@ -469,16 +485,12 @@ pub(super) fn create_recv_pending_htlc_info( /// /// [`Event::PaymentClaimable`]: crate::events::Event::PaymentClaimable #[rustfmt::skip] -pub fn peel_payment_onion( +pub fn peel_payment_onion( msg: &msgs::UpdateAddHTLC, node_signer: NS, logger: L, secp_ctx: &Secp256k1, cur_height: u32, allow_skimmed_fees: bool, -) -> Result -where - NS::Target: NodeSigner, - L::Target: Logger, -{ +) -> Result { let (hop, next_packet_details_opt) = - decode_incoming_update_add_htlc_onion(msg, node_signer, logger, secp_ctx + decode_incoming_update_add_htlc_onion(msg, &node_signer, &logger, secp_ctx ).map_err(|(msg, failure_reason)| { let (reason, err_data) = match msg { HTLCFailureMsg::Malformed(_) => (failure_reason, Vec::new()), @@ -516,11 +528,35 @@ where // onion here and check it. create_fwd_pending_htlc_info(msg, hop, shared_secret.secret_bytes(), Some(next_packet_pubkey))? }, + onion_utils::Hop::Dummy { dummy_hop_data, next_hop_hmac, new_packet_bytes, .. } => { + let next_packet_details = match next_packet_details_opt { + Some(next_packet_details) => next_packet_details, + // Dummy Hops should always include the next hop details + None => return Err(InboundHTLCErr { + msg: "Failed to decode update add htlc onion", + reason: LocalHTLCFailureReason::InvalidOnionPayload, + err_data: Vec::new(), + }), + }; + + let new_update_add_htlc = onion_utils::peel_dummy_hop_update_add_htlc( + msg, + dummy_hop_data, + next_hop_hmac, + new_packet_bytes, + next_packet_details, + &node_signer, + secp_ctx + ); + + peel_payment_onion(&new_update_add_htlc, node_signer, logger, secp_ctx, cur_height, allow_skimmed_fees)? + }, _ => { let shared_secret = hop.shared_secret().secret_bytes(); create_recv_pending_htlc_info( hop, shared_secret, msg.payment_hash, msg.amount_msat, msg.cltv_expiry, - None, allow_skimmed_fees, msg.skimmed_fee_msat, cur_height, + None, allow_skimmed_fees, msg.skimmed_fee_msat, + msg.accountable.unwrap_or(false), cur_height, )? } }) @@ -529,6 +565,8 @@ where pub(super) enum HopConnector { // scid-based routing ShortChannelId(u64), + // Dummy hop for path padding + Dummy, // Trampoline-based routing #[allow(unused)] Trampoline(PublicKey), @@ -542,13 +580,9 @@ pub(super) struct NextPacketDetails { } #[rustfmt::skip] -pub(super) fn decode_incoming_update_add_htlc_onion( +pub(super) fn decode_incoming_update_add_htlc_onion( msg: &msgs::UpdateAddHTLC, node_signer: NS, logger: L, secp_ctx: &Secp256k1, -) -> Result<(onion_utils::Hop, Option), (HTLCFailureMsg, LocalHTLCFailureReason)> -where - NS::Target: NodeSigner, - L::Target: Logger, -{ +) -> Result<(onion_utils::Hop, Option), (HTLCFailureMsg, LocalHTLCFailureReason)> { let encode_malformed_error = |message: &str, failure_reason: LocalHTLCFailureReason| { log_info!(logger, "Failed to accept/forward incoming HTLC: {}", message); let (sha256_of_onion, failure_reason) = if msg.blinding_point.is_some() || failure_reason == LocalHTLCFailureReason::InvalidOnionBlinding { @@ -633,6 +667,22 @@ where outgoing_cltv_value }) } + onion_utils::Hop::Dummy { dummy_hop_data: msgs::InboundOnionDummyPayload { ref payment_relay, ref payment_constraints, .. }, shared_secret, .. } => { + let (amt_to_forward, outgoing_cltv_value) = match check_blinded_forward( + msg.amount_msat, msg.cltv_expiry, &payment_relay, &payment_constraints, &BlindedHopFeatures::empty() + ) { + Ok((amt, cltv)) => (amt, cltv), + Err(()) => { + return encode_relay_error("Underflow calculating outbound amount or cltv value for blinded forward", + LocalHTLCFailureReason::InvalidOnionBlinding, shared_secret.secret_bytes(), None, &[0; 32]); + } + }; + + let next_packet_pubkey = onion_utils::next_hop_pubkey(secp_ctx, + msg.onion_routing_packet.public_key.unwrap(), &shared_secret.secret_bytes()); + + Some(NextPacketDetails { next_packet_pubkey, outgoing_connector: HopConnector::Dummy, outgoing_amt_msat: amt_to_forward, outgoing_cltv_value }) + } onion_utils::Hop::TrampolineForward { next_trampoline_hop_data: msgs::InboundTrampolineForwardPayload { amt_to_forward, outgoing_cltv_value, next_trampoline }, trampoline_shared_secret, incoming_trampoline_public_key, .. } => { let next_trampoline_packet_pubkey = onion_utils::next_hop_pubkey(secp_ctx, incoming_trampoline_public_key, &trampoline_shared_secret.secret_bytes()); @@ -700,10 +750,11 @@ pub(super) fn check_incoming_htlc_cltv( #[cfg(test)] mod tests { - use crate::ln::channelmanager::{RecipientOnionFields, MIN_CLTV_EXPIRY_DELTA}; + use crate::ln::channelmanager::MIN_CLTV_EXPIRY_DELTA; use crate::ln::functional_test_utils::TEST_FINAL_CLTV; use crate::ln::msgs; use crate::ln::onion_utils::create_payment_onion; + use crate::ln::outbound_payment::RecipientOnionFields; use crate::ln::types::ChannelId; use crate::routing::router::{Path, RouteHop}; use crate::types::features::{ChannelFeatures, NodeFeatures}; @@ -814,6 +865,7 @@ mod tests { skimmed_fee_msat: None, blinding_point: None, hold_htlc: None, + accountable: None, } } diff --git a/lightning/src/ln/onion_route_tests.rs b/lightning/src/ln/onion_route_tests.rs index f9b4ab28e88..27e0cfafade 100644 --- a/lightning/src/ln/onion_route_tests.rs +++ b/lightning/src/ln/onion_route_tests.rs @@ -16,8 +16,7 @@ use crate::events::{Event, HTLCHandlingFailureType, PathFailure, PaymentFailureR use crate::ln::channel::EXPIRE_PREV_CONFIG_TICKS; use crate::ln::channelmanager::{ FailureCode, HTLCForwardInfo, PaymentId, PendingAddHTLCInfo, PendingHTLCInfo, - PendingHTLCRouting, RecipientOnionFields, CLTV_FAR_FAR_AWAY, DISABLE_GOSSIP_TICKS, - MIN_CLTV_EXPIRY_DELTA, + PendingHTLCRouting, CLTV_FAR_FAR_AWAY, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA, }; use crate::ln::functional_test_utils::test_default_channel_config; use crate::ln::msgs; @@ -28,6 +27,7 @@ use crate::ln::msgs::{ use crate::ln::onion_utils::{ self, build_onion_payloads, construct_onion_keys, LocalHTLCFailureReason, }; +use crate::ln::outbound_payment::RecipientOnionFields; use crate::ln::wire::Encode; use crate::routing::gossip::{NetworkUpdate, RoutingFees}; use crate::routing::router::{ @@ -133,7 +133,7 @@ fn run_onion_failure_test_with_fail_intercept( .node .send_payment_with_route(route.clone(), *payment_hash, recipient_onion, payment_id) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); // temper update_add (0 => 1) let mut update_add_0 = update_0.update_add_htlcs[0].clone(); @@ -170,7 +170,7 @@ fn run_onion_failure_test_with_fail_intercept( expect_htlc_forward!(&nodes[1]); let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[2].node.get_our_node_id()); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert_eq!(update_1.update_add_htlcs.len(), 1); // tamper update_add (1 => 2) let mut update_add_1 = update_1.update_add_htlcs[0].clone(); @@ -202,7 +202,7 @@ fn run_onion_failure_test_with_fail_intercept( }, _ => {}, } - check_added_monitors!(&nodes[2], 1); + check_added_monitors(&nodes[2], 1); let update_2_1 = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); assert!(update_2_1.update_fail_htlcs.len() == 1); @@ -405,7 +405,7 @@ fn test_fee_failures() { .node .send_payment_with_route(route.clone(), payment_hash_success, recipient_onion, payment_id) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); pass_along_route( &nodes[0], &[&[&nodes[1], &nodes[2]]], @@ -456,7 +456,7 @@ fn test_fee_failures() { .node .send_payment_with_route(route, payment_hash_success, recipient_onion, payment_id) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); pass_along_route( &nodes[0], &[&[&nodes[1], &nodes[2]]], @@ -1548,7 +1548,7 @@ fn test_overshoot_final_cltv() { .send_payment_with_route(route, payment_hash, recipient_onion, payment_id) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add_0 = update_0.update_add_htlcs[0].clone(); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &update_add_0); @@ -1567,7 +1567,7 @@ fn test_overshoot_final_cltv() { } expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[2].node.get_our_node_id()); let mut update_add_1 = update_1.update_add_htlcs[0].clone(); nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &update_add_1); @@ -2285,7 +2285,7 @@ fn do_test_fail_htlc_backwards_with_reason(failure_code: FailureCode) { .node .send_payment_with_route(route, payment_hash, recipient_onion, PaymentId(payment_hash.0)) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); let mut payment_event = SendEvent::from_event(events.pop().unwrap()); @@ -2300,7 +2300,7 @@ fn do_test_fail_htlc_backwards_with_reason(failure_code: FailureCode) { &nodes[1], &[HTLCHandlingFailureType::Receive { payment_hash }], ); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -2435,7 +2435,7 @@ fn test_phantom_onion_hmac_failure() { .node .send_payment_with_route(route, payment_hash, recipient_onion, PaymentId(payment_hash.0)) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); @@ -2470,7 +2470,7 @@ fn test_phantom_onion_hmac_failure() { ); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(update_1.update_fail_htlcs.len() == 1); let fail_msg = update_1.update_fail_htlcs[0].clone(); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_msg); @@ -2508,7 +2508,7 @@ fn test_phantom_invalid_onion_payload() { .node .send_payment_with_route(route.clone(), payment_hash, recipient_onion, payment_id) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); @@ -2571,7 +2571,7 @@ fn test_phantom_invalid_onion_payload() { ); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(update_1.update_fail_htlcs.len() == 1); let fail_msg = update_1.update_fail_htlcs[0].clone(); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_msg); @@ -2607,7 +2607,7 @@ fn test_phantom_final_incorrect_cltv_expiry() { .node .send_payment_with_route(route, payment_hash, recipient_onion, PaymentId(payment_hash.0)) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); @@ -2637,7 +2637,7 @@ fn test_phantom_final_incorrect_cltv_expiry() { ); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(update_1.update_fail_htlcs.len() == 1); let fail_msg = update_1.update_fail_htlcs[0].clone(); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_msg); @@ -2676,7 +2676,7 @@ fn test_phantom_failure_too_low_cltv() { .node .send_payment_with_route(route, payment_hash, recipient_onion, PaymentId(payment_hash.0)) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); @@ -2691,7 +2691,7 @@ fn test_phantom_failure_too_low_cltv() { ); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(update_1.update_fail_htlcs.len() == 1); let fail_msg = update_1.update_fail_htlcs[0].clone(); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_msg); @@ -2729,7 +2729,7 @@ fn test_phantom_failure_modified_cltv() { .node .send_payment_with_route(route, payment_hash, recipient_onion, PaymentId(payment_hash.0)) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); @@ -2784,7 +2784,7 @@ fn test_phantom_failure_expires_too_soon() { .node .send_payment_with_route(route, payment_hash, recipient_onion, PaymentId(payment_hash.0)) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); @@ -2834,7 +2834,7 @@ fn test_phantom_failure_too_low_recv_amt() { .node .send_payment_with_route(route, payment_hash, recipient_onion, PaymentId(payment_hash.0)) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); @@ -2851,7 +2851,7 @@ fn test_phantom_failure_too_low_recv_amt() { ); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(update_1.update_fail_htlcs.len() == 1); let fail_msg = update_1.update_fail_htlcs[0].clone(); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_msg); @@ -2904,7 +2904,7 @@ fn do_test_phantom_dust_exposure_failure(multiplier_dust_limit: bool) { .node .send_payment_with_route(route.clone(), payment_hash, recipient_onion, payment_id) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); @@ -2954,7 +2954,7 @@ fn test_phantom_failure_reject_payment() { .node .send_payment_with_route(route.clone(), payment_hash, recipient_onion, payment_id) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); let mut update_add = update_0.update_add_htlcs[0].clone(); @@ -2981,7 +2981,7 @@ fn test_phantom_failure_reject_payment() { nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(update_1.update_fail_htlcs.len() == 1); let fail_msg = update_1.update_fail_htlcs[0].clone(); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_msg); diff --git a/lightning/src/ln/onion_utils.rs b/lightning/src/ln/onion_utils.rs index 18aa43e27c6..605f27e9666 100644 --- a/lightning/src/ln/onion_utils.rs +++ b/lightning/src/ln/onion_utils.rs @@ -7,14 +7,18 @@ // You may not use this file except in accordance with one or both of these // licenses. +//! Low-level onion manipulation logic and fields + use super::msgs::OnionErrorPacket; use crate::blinded_path::BlindedHop; use crate::crypto::chacha20::ChaCha20; use crate::crypto::streams::ChaChaReader; use crate::events::HTLCHandlingFailureReason; use crate::ln::channel::TOTAL_BITCOIN_SUPPLY_SATOSHIS; -use crate::ln::channelmanager::{HTLCSource, RecipientOnionFields}; -use crate::ln::msgs::{self, DecodeError}; +use crate::ln::channelmanager::HTLCSource; +use crate::ln::msgs::{self, DecodeError, InboundOnionDummyPayload, OnionPacket, UpdateAddHTLC}; +use crate::ln::onion_payment::{HopConnector, NextPacketDetails}; +use crate::ln::outbound_payment::RecipientOnionFields; use crate::offers::invoice_request::InvoiceRequest; use crate::routing::gossip::NetworkUpdate; use crate::routing::router::{BlindedTail, Path, RouteHop, RouteParameters, TrampolineHop}; @@ -37,7 +41,6 @@ use bitcoin::secp256k1::ecdh::SharedSecret; use bitcoin::secp256k1::{PublicKey, Scalar, Secp256k1, SecretKey}; use crate::io::{Cursor, Read}; -use core::ops::Deref; #[allow(unused_imports)] use crate::prelude::*; @@ -979,36 +982,79 @@ mod fuzzy_onion_utils { #[cfg(test)] pub(crate) attribution_failed_channel: Option, } + + pub fn process_onion_failure( + secp_ctx: &Secp256k1, logger: &L, htlc_source: &HTLCSource, + encrypted_packet: OnionErrorPacket, + ) -> DecodedOnionFailure { + let (path, session_priv) = match htlc_source { + HTLCSource::OutboundRoute { ref path, ref session_priv, .. } => (path, session_priv), + _ => unreachable!(), + }; + + process_onion_failure_inner(secp_ctx, logger, path, &session_priv, None, encrypted_packet) + } + + /// Decodes the attribution data that we got back from upstream on a payment we sent. + pub fn decode_fulfill_attribution_data( + secp_ctx: &Secp256k1, logger: &L, path: &Path, outer_session_priv: &SecretKey, + mut attribution_data: AttributionData, + ) -> Vec { + let mut hold_times = Vec::new(); + + // Only consider hops in the regular path for attribution data. Blinded path attribution data isn't accessible. + let shared_secrets = + construct_onion_keys_generic(secp_ctx, &path.hops, None, outer_session_priv) + .map(|(shared_secret, _, _, _, _)| shared_secret); + + // Path length can reach 27 hops, but attribution data can only be conveyed back to the sender from the first 20 + // hops. Determine the number of hops to be used for attribution data. + let attributable_hop_count = usize::min(path.hops.len(), MAX_HOPS); + + for (route_hop_idx, shared_secret) in + shared_secrets.enumerate().take(attributable_hop_count) + { + attribution_data.crypt(shared_secret.as_ref()); + + // Calculate position relative to the last attributable hop. The last attributable hop is at position 0. We need + // to look at the chain of HMACs that does include all data up to the last attributable hop. Hold times beyond + // the last attributable hop will not be available. + let position = attributable_hop_count - route_hop_idx - 1; + let res = attribution_data.verify(&Vec::new(), shared_secret.as_ref(), position); + match res { + Ok(hold_time) => { + hold_times.push(hold_time); + + // Shift attribution data to prepare for processing the next hop. + attribution_data.shift_left(); + }, + Err(()) => { + // We will hit this if there is a node on the path that does not support fulfill attribution data. + log_debug!( + logger, + "Invalid fulfill HMAC in attribution data for node at pos {}", + route_hop_idx + ); + + break; + }, + } + } + + hold_times + } } #[cfg(fuzzing)] pub use self::fuzzy_onion_utils::*; #[cfg(not(fuzzing))] pub(crate) use self::fuzzy_onion_utils::*; -pub fn process_onion_failure( - secp_ctx: &Secp256k1, logger: &L, htlc_source: &HTLCSource, - encrypted_packet: OnionErrorPacket, -) -> DecodedOnionFailure -where - L::Target: Logger, -{ - let (path, session_priv) = match htlc_source { - HTLCSource::OutboundRoute { ref path, ref session_priv, .. } => (path, session_priv), - _ => unreachable!(), - }; - - process_onion_failure_inner(secp_ctx, logger, path, &session_priv, None, encrypted_packet) -} - /// Process failure we got back from upstream on a payment we sent (implying htlc_source is an /// OutboundRoute). -fn process_onion_failure_inner( +fn process_onion_failure_inner( secp_ctx: &Secp256k1, logger: &L, path: &Path, session_priv: &SecretKey, trampoline_session_priv_override: Option, mut encrypted_packet: OnionErrorPacket, -) -> DecodedOnionFailure -where - L::Target: Logger, -{ +) -> DecodedOnionFailure { // Check that there is at least enough data for an hmac, otherwise none of the checking that we may do makes sense. // Also prevent slice out of bounds further down. if encrypted_packet.data.len() < 32 { @@ -1449,56 +1495,6 @@ where } } -/// Decodes the attribution data that we got back from upstream on a payment we sent. -pub fn decode_fulfill_attribution_data( - secp_ctx: &Secp256k1, logger: &L, path: &Path, outer_session_priv: &SecretKey, - mut attribution_data: AttributionData, -) -> Vec -where - L::Target: Logger, -{ - let mut hold_times = Vec::new(); - - // Only consider hops in the regular path for attribution data. Blinded path attribution data isn't accessible. - let shared_secrets = - construct_onion_keys_generic(secp_ctx, &path.hops, None, outer_session_priv) - .map(|(shared_secret, _, _, _, _)| shared_secret); - - // Path length can reach 27 hops, but attribution data can only be conveyed back to the sender from the first 20 - // hops. Determine the number of hops to be used for attribution data. - let attributable_hop_count = usize::min(path.hops.len(), MAX_HOPS); - - for (route_hop_idx, shared_secret) in shared_secrets.enumerate().take(attributable_hop_count) { - attribution_data.crypt(shared_secret.as_ref()); - - // Calculate position relative to the last attributable hop. The last attributable hop is at position 0. We need - // to look at the chain of HMACs that does include all data up to the last attributable hop. Hold times beyond - // the last attributable hop will not be available. - let position = attributable_hop_count - route_hop_idx - 1; - let res = attribution_data.verify(&Vec::new(), shared_secret.as_ref(), position); - match res { - Ok(hold_time) => { - hold_times.push(hold_time); - - // Shift attribution data to prepare for processing the next hop. - attribution_data.shift_left(); - }, - Err(()) => { - // We will hit this if there is a node on the path that does not support fulfill attribution data. - log_debug!( - logger, - "Invalid fulfill HMAC in attribution data for node at pos {}", - route_hop_idx - ); - - break; - }, - } - } - - hold_times -} - const BADONION: u16 = 0x8000; const PERM: u16 = 0x4000; const NODE: u16 = 0x2000; @@ -1568,7 +1564,8 @@ pub enum LocalHTLCFailureReason { /// /// The forwarding node has tampered with this value, or has a bug in its implementation. FinalIncorrectHTLCAmount, - /// The channel has been marked as disabled because the channel peer is offline. + /// The HTLC couldn't be forwarded because the channel counterparty has been offline for some + /// time. ChannelDisabled, /// The HTLC expires too far in the future, so it is rejected to avoid the worst-case outcome /// of funds being held for extended periods of time. @@ -1946,14 +1943,14 @@ impl Readable for HTLCFailReason { impl_writeable_tlv_based_enum!(HTLCFailReasonRepr, (0, LightningError) => { - (0, data, (legacy, Vec, |us| + (0, data, (legacy, Vec, |_| Ok(()), |us| if let &HTLCFailReasonRepr::LightningError { err: msgs::OnionErrorPacket { ref data, .. }, .. } = us { Some(data) } else { None }) ), - (1, attribution_data, (legacy, AttributionData, |us| + (1, attribution_data, (legacy, AttributionData, |_| Ok(()), |us| if let &HTLCFailReasonRepr::LightningError { err: msgs::OnionErrorPacket { ref attribution_data, .. }, .. } = us { attribution_data.as_ref() } else { @@ -1964,7 +1961,7 @@ impl_writeable_tlv_based_enum!(HTLCFailReasonRepr, (_unused, err, (static_value, msgs::OnionErrorPacket { data: data.ok_or(DecodeError::InvalidValue)?, attribution_data })), }, (1, Reason) => { - (0, _failure_code, (legacy, u16, + (0, _failure_code, (legacy, u16, |_| Ok(()), |r: &HTLCFailReasonRepr| match r { HTLCFailReasonRepr::LightningError{ .. } => None, HTLCFailReasonRepr::Reason{ failure_reason, .. } => Some(failure_reason.failure_code()) @@ -2118,12 +2115,9 @@ impl HTLCFailReason { } } - pub(super) fn decode_onion_failure( + pub(super) fn decode_onion_failure( &self, secp_ctx: &Secp256k1, logger: &L, htlc_source: &HTLCSource, - ) -> DecodedOnionFailure - where - L::Target: Logger, - { + ) -> DecodedOnionFailure { match self.0 { HTLCFailReasonRepr::LightningError { ref err, .. } => { process_onion_failure(secp_ctx, logger, &htlc_source, err.clone()) @@ -2223,6 +2217,17 @@ pub(crate) enum Hop { /// Bytes of the onion packet we're forwarding. new_packet_bytes: [u8; ONION_DATA_LEN], }, + /// This onion payload is dummy, and needs to be peeled by us. + Dummy { + /// Blinding point for introduction-node dummy hops. + dummy_hop_data: msgs::InboundOnionDummyPayload, + /// Shared secret for decrypting the next-hop public key. + shared_secret: SharedSecret, + /// HMAC of the next hop's onion packet. + next_hop_hmac: [u8; 32], + /// Onion packet bytes after this dummy layer is peeled. + new_packet_bytes: [u8; ONION_DATA_LEN], + }, /// This onion payload was for us, not for forwarding to a next-hop. Contains information for /// verifying the incoming payment. Receive { @@ -2277,6 +2282,7 @@ impl Hop { match self { Hop::Forward { shared_secret, .. } => shared_secret, Hop::BlindedForward { shared_secret, .. } => shared_secret, + Hop::Dummy { shared_secret, .. } => shared_secret, Hop::TrampolineForward { outer_shared_secret, .. } => outer_shared_secret, Hop::TrampolineBlindedForward { outer_shared_secret, .. } => outer_shared_secret, Hop::Receive { shared_secret, .. } => shared_secret, @@ -2304,13 +2310,10 @@ pub(crate) enum OnionDecodeErr { }, } -pub(crate) fn decode_next_payment_hop( +pub(crate) fn decode_next_payment_hop( recipient: Recipient, hop_pubkey: &PublicKey, hop_data: &[u8], hmac_bytes: [u8; 32], payment_hash: PaymentHash, blinding_point: Option, node_signer: NS, -) -> Result -where - NS::Target: NodeSigner, -{ +) -> Result { let blinded_node_id_tweak = blinding_point.map(|bp| { let blinded_tlvs_ss = node_signer.ecdh(recipient, &bp, None).unwrap().secret_bytes(); let mut hmac = HmacEngine::::new(b"blinded_node_id"); @@ -2325,7 +2328,7 @@ where hop_data, hmac_bytes, Some(payment_hash), - (blinding_point, &(*node_signer)), + (blinding_point, &node_signer), ); match decoded_hop { Ok((next_hop_data, Some((next_hop_hmac, FixedSizeOnionPacket(new_packet_bytes))))) => { @@ -2344,6 +2347,12 @@ where new_packet_bytes, }) }, + msgs::InboundOnionPayload::Dummy(dummy_hop_data) => Ok(Hop::Dummy { + dummy_hop_data, + shared_secret, + next_hop_hmac, + new_packet_bytes, + }), _ => { if blinding_point.is_some() { return Err(OnionDecodeErr::Malformed { @@ -2393,7 +2402,7 @@ where &hop_data.trampoline_packet.hop_data, hop_data.trampoline_packet.hmac, Some(payment_hash), - (blinding_point, node_signer), + (blinding_point, &node_signer), ); match decoded_trampoline_hop { Ok(( @@ -2521,7 +2530,60 @@ where } } +/// Peels a single dummy hop from an inbound `UpdateAddHTLC` by reconstructing the next +/// onion packet and HTLC state. +/// +/// This helper is used when processing dummy hops in a blinded path. Dummy hops are not +/// forwarded on the network; instead, their onion layer is removed locally and a new +/// `UpdateAddHTLC` is constructed with the next onion packet and updated amount/CLTV +/// values. +/// +/// This function performs no validation and does not enqueue or forward the HTLC. +/// It only reconstructs the next `UpdateAddHTLC` for further local processing. +pub(super) fn peel_dummy_hop_update_add_htlc( + msg: &UpdateAddHTLC, dummy_hop_data: InboundOnionDummyPayload, next_hop_hmac: [u8; 32], + new_packet_bytes: [u8; ONION_DATA_LEN], next_packet_details: NextPacketDetails, + node_signer: NS, secp_ctx: &Secp256k1, +) -> UpdateAddHTLC { + let NextPacketDetails { + next_packet_pubkey, + outgoing_amt_msat, + outgoing_connector, + outgoing_cltv_value, + } = next_packet_details; + + debug_assert!( + matches!(outgoing_connector, HopConnector::Dummy), + "Dummy hop must always map to HopConnector::Dummy" + ); + + let next_blinding_point = dummy_hop_data + .intro_node_blinding_point + .or(msg.blinding_point) + .and_then(|blinding_point| { + let ss = node_signer.ecdh(Recipient::Node, &blinding_point, None).ok()?.secret_bytes(); + + next_hop_pubkey(secp_ctx, blinding_point, &ss).ok() + }); + + let new_onion_packet = OnionPacket { + version: 0, + public_key: next_packet_pubkey, + hop_data: new_packet_bytes, + hmac: next_hop_hmac, + }; + + UpdateAddHTLC { + onion_routing_packet: new_onion_packet, + blinding_point: next_blinding_point, + amount_msat: outgoing_amt_msat, + cltv_expiry: outgoing_cltv_value, + ..msg.clone() + } +} + /// Build a payment onion, returning the first hop msat and cltv values as well. +/// /// `cur_block_height` should be set to the best known block height + 1. pub fn create_payment_onion( secp_ctx: &Secp256k1, path: &Path, session_priv: &SecretKey, total_msat: u64, @@ -2711,22 +2773,28 @@ fn decode_next_hop, N: NextPacketBytes>( } } -pub const HOLD_TIME_LEN: usize = 4; -pub const MAX_HOPS: usize = 20; -pub const HMAC_LEN: usize = 4; +pub(crate) const HOLD_TIME_LEN: usize = 4; +pub(crate) const MAX_HOPS: usize = 20; +pub(crate) const HMAC_LEN: usize = 4; // Define the number of HMACs in the attributable data block. For the first node, there are 20 HMACs, and then for every // subsequent node, the number of HMACs decreases by 1. 20 + 19 + 18 + ... + 1 = 20 * 21 / 2 = 210. -pub const HMAC_COUNT: usize = MAX_HOPS * (MAX_HOPS + 1) / 2; +pub(crate) const HMAC_COUNT: usize = MAX_HOPS * (MAX_HOPS + 1) / 2; #[derive(Clone, Debug, Hash, PartialEq, Eq)] +/// Attribution data allows the sender of an HTLC to identify which hop failed an HTLC robustly, +/// preventing earlier hops from corrupting the HTLC failure information (or at least allowing the +/// sender to identify the earliest hop which corrupted HTLC failure information). +/// +/// Additionally, it allows a sender to identify how long each hop along a path held an HTLC, with +/// 100ms granularity. pub struct AttributionData { - pub hold_times: [u8; MAX_HOPS * HOLD_TIME_LEN], - pub hmacs: [u8; HMAC_LEN * HMAC_COUNT], + hold_times: [u8; MAX_HOPS * HOLD_TIME_LEN], + hmacs: [u8; HMAC_LEN * HMAC_COUNT], } impl AttributionData { - pub fn new() -> Self { + pub(crate) fn new() -> Self { Self { hold_times: [0; MAX_HOPS * HOLD_TIME_LEN], hmacs: [0; HMAC_LEN * HMAC_COUNT] } } } @@ -2775,7 +2843,7 @@ impl AttributionData { /// Writes the HMACs corresponding to the given position that have been added already by downstream hops. Position is /// relative to the final node. The final node is at position 0. - pub fn write_downstream_hmacs(&self, position: usize, w: &mut HmacEngine) { + pub(crate) fn write_downstream_hmacs(&self, position: usize, w: &mut HmacEngine) { // Set the index to the first downstream HMAC that we need to include. Note that we skip the first MAX_HOPS HMACs // because this is space reserved for the HMACs that we are producing for the current node. let mut hmac_idx = MAX_HOPS + MAX_HOPS - position - 1; diff --git a/lightning/src/ln/outbound_payment.rs b/lightning/src/ln/outbound_payment.rs index 75fe55bfeac..170e4e13830 100644 --- a/lightning/src/ln/outbound_payment.rs +++ b/lightning/src/ln/outbound_payment.rs @@ -7,7 +7,7 @@ // You may not use this file except in accordance with one or both of these // licenses. -//! Utilities to send payments and manage outbound payment information. +//! This module contains various types which are used to configure or process outbound payments. use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hashes::Hash; @@ -18,7 +18,8 @@ use crate::blinded_path::{IntroductionNode, NodeIdLookUp}; use crate::events::{self, PaidBolt12Invoice, PaymentFailureReason}; use crate::ln::channel_state::ChannelDetails; use crate::ln::channelmanager::{ - EventCompletionAction, HTLCSource, PaymentCompleteUpdate, PaymentId, + EventCompletionAction, HTLCSource, OptionalBolt11PaymentParams, PaymentCompleteUpdate, + PaymentId, }; use crate::ln::onion_utils; use crate::ln::onion_utils::{DecodedOnionFailure, HTLCFailReason}; @@ -34,13 +35,12 @@ use crate::sign::{EntropySource, NodeSigner, Recipient}; use crate::types::features::Bolt12InvoiceFeatures; use crate::types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; use crate::util::errors::APIError; -use crate::util::logger::Logger; +use crate::util::logger::{Logger, WithContext}; use crate::util::ser::ReadableArgs; #[cfg(feature = "std")] use crate::util::time::Instant; use core::fmt::{self, Display, Formatter}; -use core::ops::Deref; use core::sync::atomic::{AtomicBool, Ordering}; use core::time::Duration; @@ -677,6 +677,54 @@ pub enum ProbeSendFailure { DuplicateProbe, } +/// A validated, sorted set of custom TLVs for payment recipient onion fields. +#[derive(Clone)] +pub struct RecipientCustomTlvs(Vec<(u64, Vec)>); + +impl RecipientCustomTlvs { + /// Each TLV is provided as a `(u64, Vec)` for the type number and + /// serialized value respectively. TLV type numbers must be unique and + /// within the range reserved for custom types, i.e. >= 2^16, otherwise + /// this method will return `Err(())`. + /// + /// This method will also error for TLV types in the experimental range + /// which have since been standardized within the protocol. This currently + /// includes 5482373484 (keysend) and 77_777 (invoice requests for async + /// payments). + pub fn new(mut tlvs: Vec<(u64, Vec)>) -> Result { + tlvs.sort_unstable_by_key(|(typ, _)| *typ); + let mut prev_type = None; + for (typ, _) in tlvs.iter() { + if *typ < 1 << 16 { + return Err(()); + } + if *typ == 5482373484 { + return Err(()); + } // keysend + if *typ == 77_777 { + return Err(()); + } // invoice requests for async payments + match prev_type { + Some(prev) if prev >= *typ => return Err(()), + _ => {}, + } + prev_type = Some(*typ); + } + + Ok(Self(tlvs)) + } + + /// Returns the inner TLV list. + pub(super) fn into_inner(self) -> Vec<(u64, Vec)> { + self.0 + } + + /// Borrow the inner TLV list. + pub fn as_slice(&self) -> &[(u64, Vec)] { + &self.0 + } +} + /// Information which is provided, encrypted, to the payment recipient when sending HTLCs. /// /// This should generally be constructed with data communicated to us from the recipient (via a @@ -739,31 +787,13 @@ impl RecipientOnionFields { Self { payment_secret: None, payment_metadata: None, custom_tlvs: Vec::new() } } - /// Creates a new [`RecipientOnionFields`] from an existing one, adding custom TLVs. Each - /// TLV is provided as a `(u64, Vec)` for the type number and serialized value - /// respectively. TLV type numbers must be unique and within the range - /// reserved for custom types, i.e. >= 2^16, otherwise this method will return `Err(())`. - /// - /// This method will also error for types in the experimental range which have been - /// standardized within the protocol, which only includes 5482373484 (keysend) for now. + /// Creates a new [`RecipientOnionFields`] from an existing one, adding validated custom TLVs. /// /// See [`Self::custom_tlvs`] for more info. #[rustfmt::skip] - pub fn with_custom_tlvs(mut self, mut custom_tlvs: Vec<(u64, Vec)>) -> Result { - custom_tlvs.sort_unstable_by_key(|(typ, _)| *typ); - let mut prev_type = None; - for (typ, _) in custom_tlvs.iter() { - if *typ < 1 << 16 { return Err(()); } - if *typ == 5482373484 { return Err(()); } // keysend - if *typ == 77_777 { return Err(()); } // invoice requests for async payments - match prev_type { - Some(prev) if prev >= *typ => return Err(()), - _ => {}, - } - prev_type = Some(*typ); - } - self.custom_tlvs = custom_tlvs; - Ok(self) + pub fn with_custom_tlvs(mut self, custom_tlvs: RecipientCustomTlvs) -> Self { + self.custom_tlvs = custom_tlvs.into_inner(); + self } /// Gets the custom TLVs that will be sent or have been received. @@ -837,22 +867,15 @@ pub(super) struct SendAlongPathArgs<'a> { pub hold_htlc_at_next_hop: bool, } -pub(super) struct OutboundPayments -where - L::Target: Logger, -{ +pub(super) struct OutboundPayments { pub(super) pending_outbound_payments: Mutex>, awaiting_invoice: AtomicBool, retry_lock: Mutex<()>, - logger: L, } -impl OutboundPayments -where - L::Target: Logger, -{ +impl OutboundPayments { pub(super) fn new( - pending_outbound_payments: HashMap, logger: L, + pending_outbound_payments: HashMap, ) -> Self { let has_invoice_requests = pending_outbound_payments.values().any(|payment| { matches!( @@ -867,42 +890,39 @@ where pending_outbound_payments: Mutex::new(pending_outbound_payments), awaiting_invoice: AtomicBool::new(has_invoice_requests), retry_lock: Mutex::new(()), - logger, } } +} +impl OutboundPayments { #[rustfmt::skip] - pub(super) fn send_payment( + pub(super) fn send_payment( &self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId, retry_strategy: Retry, route_params: RouteParameters, router: &R, first_hops: Vec, compute_inflight_htlcs: IH, entropy_source: &ES, node_signer: &NS, best_block_height: u32, pending_events: &Mutex)>>, send_payment_along_path: SP, + logger: &WithContext, ) -> Result<(), RetryableSendFailure> where - R::Target: Router, - ES::Target: EntropySource, - NS::Target: NodeSigner, IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, { self.send_payment_for_non_bolt12_invoice(payment_id, payment_hash, recipient_onion, None, retry_strategy, route_params, router, first_hops, &compute_inflight_htlcs, entropy_source, node_signer, - best_block_height, pending_events, &send_payment_along_path) + best_block_height, pending_events, &send_payment_along_path, logger) } #[rustfmt::skip] - pub(super) fn send_spontaneous_payment( + pub(super) fn send_spontaneous_payment( &self, payment_preimage: Option, recipient_onion: RecipientOnionFields, payment_id: PaymentId, retry_strategy: Retry, route_params: RouteParameters, router: &R, first_hops: Vec, inflight_htlcs: IH, entropy_source: &ES, node_signer: &NS, best_block_height: u32, - pending_events: &Mutex)>>, send_payment_along_path: SP + pending_events: &Mutex)>>, send_payment_along_path: SP, + logger: &WithContext, ) -> Result where - R::Target: Router, - ES::Target: EntropySource, - NS::Target: NodeSigner, IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, { @@ -911,29 +931,27 @@ where let payment_hash = PaymentHash(Sha256::hash(&preimage.0).to_byte_array()); self.send_payment_for_non_bolt12_invoice(payment_id, payment_hash, recipient_onion, Some(preimage), retry_strategy, route_params, router, first_hops, inflight_htlcs, entropy_source, - node_signer, best_block_height, pending_events, send_payment_along_path) - .map(|()| payment_hash) + node_signer, best_block_height, pending_events, send_payment_along_path, logger, + ) + .map(|()| payment_hash) } #[rustfmt::skip] - pub(super) fn pay_for_bolt11_invoice( + pub(super) fn pay_for_bolt11_invoice( &self, invoice: &Bolt11Invoice, payment_id: PaymentId, amount_msats: Option, - route_params_config: RouteParametersConfig, - retry_strategy: Retry, + optional_params: OptionalBolt11PaymentParams, router: &R, first_hops: Vec, compute_inflight_htlcs: IH, entropy_source: &ES, node_signer: &NS, best_block_height: u32, pending_events: &Mutex)>>, send_payment_along_path: SP, + logger: &WithContext, ) -> Result<(), Bolt11PaymentError> where - R::Target: Router, - ES::Target: EntropySource, - NS::Target: NodeSigner, IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, { - let payment_hash = PaymentHash((*invoice.payment_hash()).to_byte_array()); + let payment_hash = invoice.payment_hash(); let amount = match (invoice.amount_milli_satoshis(), amount_msats) { (Some(amt), None) | (None, Some(amt)) => amt, @@ -942,41 +960,38 @@ where (None, None) => return Err(Bolt11PaymentError::InvalidAmount), }; - let mut recipient_onion = RecipientOnionFields::secret_only(*invoice.payment_secret()); + let mut recipient_onion = RecipientOnionFields::secret_only(*invoice.payment_secret()) + .with_custom_tlvs(optional_params.custom_tlvs); recipient_onion.payment_metadata = invoice.payment_metadata().map(|v| v.clone()); let payment_params = PaymentParameters::from_bolt11_invoice(invoice) - .with_user_config_ignoring_fee_limit(route_params_config); + .with_user_config_ignoring_fee_limit(optional_params.route_params_config); let mut route_params = RouteParameters::from_payment_params_and_value(payment_params, amount); - if let Some(max_fee_msat) = route_params_config.max_total_routing_fee_msat { + if let Some(max_fee_msat) = optional_params.route_params_config.max_total_routing_fee_msat { route_params.max_total_routing_fee_msat = Some(max_fee_msat); } - self.send_payment_for_non_bolt12_invoice(payment_id, payment_hash, recipient_onion, None, retry_strategy, route_params, + self.send_payment_for_non_bolt12_invoice(payment_id, payment_hash, recipient_onion, None, optional_params.retry_strategy, route_params, router, first_hops, compute_inflight_htlcs, entropy_source, node_signer, best_block_height, - pending_events, send_payment_along_path + pending_events, send_payment_along_path, logger, ).map_err(|err| Bolt11PaymentError::SendingFailed(err)) } #[rustfmt::skip] pub(super) fn send_payment_for_bolt12_invoice< - R: Deref, ES: Deref, NS: Deref, NL: Deref, IH, SP + R: Router, ES: EntropySource, NS: NodeSigner, NL: NodeIdLookUp, IH, SP, L: Logger, >( &self, invoice: &Bolt12Invoice, payment_id: PaymentId, router: &R, first_hops: Vec, features: Bolt12InvoiceFeatures, inflight_htlcs: IH, entropy_source: &ES, node_signer: &NS, node_id_lookup: &NL, secp_ctx: &Secp256k1, best_block_height: u32, pending_events: &Mutex)>>, - send_payment_along_path: SP, + send_payment_along_path: SP, logger: &WithContext, ) -> Result<(), Bolt12PaymentError> where - R::Target: Router, - ES::Target: EntropySource, - NS::Target: NodeSigner, - NL::Target: NodeIdLookUp, IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, { @@ -1002,13 +1017,13 @@ where self.send_payment_for_bolt12_invoice_internal( payment_id, payment_hash, None, None, invoice, route_params, retry_strategy, false, router, first_hops, inflight_htlcs, entropy_source, node_signer, node_id_lookup, secp_ctx, - best_block_height, pending_events, send_payment_along_path + best_block_height, pending_events, send_payment_along_path, logger, ) } #[rustfmt::skip] fn send_payment_for_bolt12_invoice_internal< - R: Deref, ES: Deref, NS: Deref, NL: Deref, IH, SP + R: Router, ES: EntropySource, NS: NodeSigner, NL: NodeIdLookUp, IH, SP, L: Logger, >( &self, payment_id: PaymentId, payment_hash: PaymentHash, keysend_preimage: Option, invoice_request: Option<&InvoiceRequest>, @@ -1017,13 +1032,9 @@ where first_hops: Vec, inflight_htlcs: IH, entropy_source: &ES, node_signer: &NS, node_id_lookup: &NL, secp_ctx: &Secp256k1, best_block_height: u32, pending_events: &Mutex)>>, - send_payment_along_path: SP, + send_payment_along_path: SP, logger: &WithContext, ) -> Result<(), Bolt12PaymentError> where - R::Target: Router, - ES::Target: EntropySource, - NS::Target: NodeSigner, - NL::Target: NodeIdLookUp, IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, { @@ -1053,6 +1064,7 @@ where let route = match self.find_initial_route( payment_id, payment_hash, &recipient_onion, keysend_preimage, invoice_request, &mut route_params, router, &first_hops, &inflight_htlcs, node_signer, best_block_height, + logger, ) { Ok(route) => route, Err(e) => { @@ -1102,27 +1114,24 @@ where best_block_height, &send_payment_along_path ); log_info!( - self.logger, "Sending payment with id {} and hash {} returned {:?}", payment_id, + logger, "Sending payment with id {} and hash {} returned {:?}", payment_id, payment_hash, result ); if let Err(e) = result { self.handle_pay_route_err( e, payment_id, payment_hash, route, route_params, onion_session_privs, router, first_hops, &inflight_htlcs, entropy_source, node_signer, best_block_height, pending_events, - &send_payment_along_path + &send_payment_along_path, logger, ); } Ok(()) } - pub(super) fn static_invoice_received( + pub(super) fn static_invoice_received( &self, invoice: &StaticInvoice, payment_id: PaymentId, features: Bolt12InvoiceFeatures, best_block_height: u32, duration_since_epoch: Duration, entropy_source: ES, pending_events: &Mutex)>>, - ) -> Result<(), Bolt12PaymentError> - where - ES::Target: EntropySource, - { + ) -> Result<(), Bolt12PaymentError> { macro_rules! abandon_with_entry { ($payment: expr, $reason: expr) => { assert!( @@ -1225,27 +1234,20 @@ where } pub(super) fn send_payment_for_static_invoice< - R: Deref, - ES: Deref, - NS: Deref, - NL: Deref, - IH, - SP, + R: Router, + ES: EntropySource, + NS: NodeSigner, + NL: NodeIdLookUp, + IH: Fn() -> InFlightHtlcs, + SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, + L: Logger, >( &self, payment_id: PaymentId, hold_htlcs_at_next_hop: bool, router: &R, first_hops: Vec, inflight_htlcs: IH, entropy_source: &ES, node_signer: &NS, node_id_lookup: &NL, secp_ctx: &Secp256k1, best_block_height: u32, pending_events: &Mutex)>>, - send_payment_along_path: SP, - ) -> Result<(), Bolt12PaymentError> - where - R::Target: Router, - ES::Target: EntropySource, - NS::Target: NodeSigner, - NL::Target: NodeIdLookUp, - IH: Fn() -> InFlightHtlcs, - SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, - { + send_payment_along_path: SP, logger: &WithContext, + ) -> Result<(), Bolt12PaymentError> { let ( payment_hash, keysend_preimage, @@ -1303,20 +1305,26 @@ where best_block_height, pending_events, send_payment_along_path, + logger, ) } // Returns whether the data changed and needs to be repersisted. - pub(super) fn check_retry_payments( + pub(super) fn check_retry_payments< + R: Router, + ES: EntropySource, + NS: NodeSigner, + SP, + IH, + FH, + L: Logger, + >( &self, router: &R, first_hops: FH, inflight_htlcs: IH, entropy_source: &ES, node_signer: &NS, best_block_height: u32, pending_events: &Mutex)>>, - send_payment_along_path: SP, + send_payment_along_path: SP, logger: &WithContext, ) -> bool where - R::Target: Router, - ES::Target: EntropySource, - NS::Target: NodeSigner, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, IH: Fn() -> InFlightHtlcs, FH: Fn() -> Vec, @@ -1357,6 +1365,9 @@ where } core::mem::drop(outbounds); if let Some((payment_hash, payment_id, route_params)) = retry_id_route_params { + let logger = + WithContext::for_payment(&logger, None, None, Some(payment_hash), payment_id); + let logger = &logger; self.find_route_and_send_payment( payment_hash, payment_id, @@ -1369,6 +1380,7 @@ where best_block_height, pending_events, &send_payment_along_path, + logger, ); should_persist = true; } else { @@ -1414,21 +1426,18 @@ where } #[rustfmt::skip] - fn find_initial_route( + fn find_initial_route( &self, payment_id: PaymentId, payment_hash: PaymentHash, recipient_onion: &RecipientOnionFields, keysend_preimage: Option, invoice_request: Option<&InvoiceRequest>, route_params: &mut RouteParameters, router: &R, first_hops: &Vec, - inflight_htlcs: &IH, node_signer: &NS, best_block_height: u32, + inflight_htlcs: &IH, node_signer: &NS, best_block_height: u32, logger: &WithContext, ) -> Result where - R::Target: Router, - NS::Target: NodeSigner, - L::Target: Logger, IH: Fn() -> InFlightHtlcs, { #[cfg(feature = "std")] { if has_expired(&route_params) { - log_error!(self.logger, "Payment with id {} and hash {} had expired before we started paying", + log_error!(logger, "Payment with id {} and hash {} had expired before we started paying", payment_id, payment_hash); return Err(RetryableSendFailure::PaymentExpired) } @@ -1438,7 +1447,7 @@ where route_params, recipient_onion, keysend_preimage, invoice_request, best_block_height ) .map_err(|()| { - log_error!(self.logger, "Can't construct an onion packet without exceeding 1300-byte onion \ + log_error!(logger, "Can't construct an onion packet without exceeding 1300-byte onion \ hop_data length for payment with id {} and hash {}", payment_id, payment_hash); RetryableSendFailure::OnionPacketSizeExceeded })?; @@ -1448,7 +1457,7 @@ where Some(&first_hops.iter().collect::>()), inflight_htlcs(), payment_hash, payment_id, ).map_err(|_| { - log_error!(self.logger, "Failed to find route for payment with id {} and hash {}", + log_error!(logger, "Failed to find route for payment with id {} and hash {}", payment_id, payment_hash); RetryableSendFailure::RouteNotFound })?; @@ -1469,31 +1478,28 @@ where /// [`Event::PaymentPathFailed`]: crate::events::Event::PaymentPathFailed /// [`Event::PaymentFailed`]: crate::events::Event::PaymentFailed #[rustfmt::skip] - fn send_payment_for_non_bolt12_invoice( + fn send_payment_for_non_bolt12_invoice( &self, payment_id: PaymentId, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, keysend_preimage: Option, retry_strategy: Retry, mut route_params: RouteParameters, router: &R, first_hops: Vec, inflight_htlcs: IH, entropy_source: &ES, node_signer: &NS, best_block_height: u32, pending_events: &Mutex)>>, send_payment_along_path: SP, + logger: &WithContext, ) -> Result<(), RetryableSendFailure> where - R::Target: Router, - ES::Target: EntropySource, - NS::Target: NodeSigner, - L::Target: Logger, IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, { let route = self.find_initial_route( payment_id, payment_hash, &recipient_onion, keysend_preimage, None, &mut route_params, router, - &first_hops, &inflight_htlcs, node_signer, best_block_height, + &first_hops, &inflight_htlcs, node_signer, best_block_height, logger, )?; let onion_session_privs = self.add_new_pending_payment(payment_hash, recipient_onion.clone(), payment_id, keysend_preimage, &route, Some(retry_strategy), Some(route_params.payment_params.clone()), entropy_source, best_block_height, None) .map_err(|_| { - log_error!(self.logger, "Payment with id {} is already pending. New payment had payment hash {}", + log_error!(logger, "Payment with id {} is already pending. New payment had payment hash {}", payment_id, payment_hash); RetryableSendFailure::DuplicatePayment })?; @@ -1501,36 +1507,33 @@ where let res = self.pay_route_internal(&route, payment_hash, &recipient_onion, keysend_preimage, None, None, payment_id, None, &onion_session_privs, false, node_signer, best_block_height, &send_payment_along_path); - log_info!(self.logger, "Sending payment with id {} and hash {} returned {:?}", + log_info!(logger, "Sending payment with id {} and hash {} returned {:?}", payment_id, payment_hash, res); if let Err(e) = res { self.handle_pay_route_err( e, payment_id, payment_hash, route, route_params, onion_session_privs, router, first_hops, &inflight_htlcs, entropy_source, node_signer, best_block_height, pending_events, - &send_payment_along_path + &send_payment_along_path, logger, ); } Ok(()) } #[rustfmt::skip] - fn find_route_and_send_payment( + fn find_route_and_send_payment( &self, payment_hash: PaymentHash, payment_id: PaymentId, route_params: RouteParameters, router: &R, first_hops: Vec, inflight_htlcs: &IH, entropy_source: &ES, node_signer: &NS, best_block_height: u32, - pending_events: &Mutex)>>, send_payment_along_path: &SP, + pending_events: &Mutex)>>, + send_payment_along_path: &SP, logger: &WithContext, ) where - R::Target: Router, - ES::Target: EntropySource, - NS::Target: NodeSigner, - L::Target: Logger, IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, { #[cfg(feature = "std")] { if has_expired(&route_params) { - log_error!(self.logger, "Payment params expired on retry, abandoning payment {}", &payment_id); + log_error!(logger, "Payment params expired on retry, abandoning payment {}", &payment_id); self.abandon_payment(payment_id, PaymentFailureReason::PaymentExpired, pending_events); return } @@ -1543,7 +1546,7 @@ where ) { Ok(route) => route, Err(e) => { - log_error!(self.logger, "Failed to find a route on retry, abandoning payment {}: {:#?}", &payment_id, e); + log_error!(logger, "Failed to find a route on retry, abandoning payment {}: {:#?}", &payment_id, e); self.abandon_payment(payment_id, PaymentFailureReason::RouteNotFound, pending_events); return } @@ -1557,7 +1560,7 @@ where for path in route.paths.iter() { if path.hops.len() == 0 { - log_error!(self.logger, "Unusable path in route (path.hops.len() must be at least 1"); + log_error!(logger, "Unusable path in route (path.hops.len() must be at least 1"); self.abandon_payment(payment_id, PaymentFailureReason::UnexpectedError, pending_events); return } @@ -1590,13 +1593,13 @@ where const RETRY_OVERFLOW_PERCENTAGE: u64 = 10; let retry_amt_msat = route.get_total_amount(); if retry_amt_msat + *pending_amt_msat > *total_msat * (100 + RETRY_OVERFLOW_PERCENTAGE) / 100 { - log_error!(self.logger, "retry_amt_msat of {} will put pending_amt_msat (currently: {}) more than 10% over total_payment_amt_msat of {}", retry_amt_msat, pending_amt_msat, total_msat); + log_error!(logger, "retry_amt_msat of {} will put pending_amt_msat (currently: {}) more than 10% over total_payment_amt_msat of {}", retry_amt_msat, pending_amt_msat, total_msat); abandon_with_entry!(payment, PaymentFailureReason::UnexpectedError); return } if !payment.get().is_retryable_now() { - log_error!(self.logger, "Retries exhausted for payment id {}", &payment_id); + log_error!(logger, "Retries exhausted for payment id {}", &payment_id); abandon_with_entry!(payment, PaymentFailureReason::RetriesExhausted); return } @@ -1625,38 +1628,38 @@ where (total_msat, recipient_onion, keysend_preimage, onion_session_privs, invoice_request, bolt12_invoice.cloned()) }, PendingOutboundPayment::Legacy { .. } => { - log_error!(self.logger, "Unable to retry payments that were initially sent on LDK versions prior to 0.0.102"); + log_error!(logger, "Unable to retry payments that were initially sent on LDK versions prior to 0.0.102"); return }, PendingOutboundPayment::AwaitingInvoice { .. } | PendingOutboundPayment::AwaitingOffer { .. } => { - log_error!(self.logger, "Payment not yet sent"); + log_error!(logger, "Payment not yet sent"); debug_assert!(false); return }, PendingOutboundPayment::InvoiceReceived { .. } => { - log_error!(self.logger, "Payment already initiating"); + log_error!(logger, "Payment already initiating"); debug_assert!(false); return }, PendingOutboundPayment::StaticInvoiceReceived { .. } => { - log_error!(self.logger, "Payment already initiating"); + log_error!(logger, "Payment already initiating"); debug_assert!(false); return }, PendingOutboundPayment::Fulfilled { .. } => { - log_error!(self.logger, "Payment already completed"); + log_error!(logger, "Payment already completed"); return }, PendingOutboundPayment::Abandoned { .. } => { - log_error!(self.logger, "Payment already abandoned (with some HTLCs still pending)"); + log_error!(logger, "Payment already abandoned (with some HTLCs still pending)"); return }, } }, hash_map::Entry::Vacant(_) => { - log_error!(self.logger, "Payment with ID {} not found", &payment_id); + log_error!(logger, "Payment with ID {} not found", &payment_id); return } } @@ -1664,37 +1667,34 @@ where let res = self.pay_route_internal(&route, payment_hash, &recipient_onion, keysend_preimage, invoice_request.as_ref(), bolt12_invoice.as_ref(), payment_id, Some(total_msat), &onion_session_privs, false, node_signer, best_block_height, &send_payment_along_path); - log_info!(self.logger, "Result retrying payment id {}: {:?}", &payment_id, res); + log_info!(logger, "Result retrying payment id {}: {:?}", &payment_id, res); if let Err(e) = res { self.handle_pay_route_err( e, payment_id, payment_hash, route, route_params, onion_session_privs, router, first_hops, inflight_htlcs, entropy_source, node_signer, best_block_height, pending_events, - send_payment_along_path + send_payment_along_path, logger ); } } #[rustfmt::skip] - fn handle_pay_route_err( + fn handle_pay_route_err( &self, err: PaymentSendFailure, payment_id: PaymentId, payment_hash: PaymentHash, route: Route, mut route_params: RouteParameters, onion_session_privs: Vec<[u8; 32]>, router: &R, first_hops: Vec, inflight_htlcs: &IH, entropy_source: &ES, node_signer: &NS, best_block_height: u32, pending_events: &Mutex)>>, - send_payment_along_path: &SP, + send_payment_along_path: &SP, logger: &WithContext, ) where - R::Target: Router, - ES::Target: EntropySource, - NS::Target: NodeSigner, IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, { match err { PaymentSendFailure::AllFailedResendSafe(errs) => { self.remove_session_privs(payment_id, route.paths.iter().zip(onion_session_privs.iter())); - Self::push_path_failed_evs_and_scids(payment_id, payment_hash, &mut route_params, route.paths, errs.into_iter().map(|e| Err(e)), &self.logger, pending_events); - self.find_route_and_send_payment(payment_hash, payment_id, route_params, router, first_hops, inflight_htlcs, entropy_source, node_signer, best_block_height, pending_events, send_payment_along_path); + Self::push_path_failed_evs_and_scids(payment_id, payment_hash, &mut route_params, route.paths, errs.into_iter().map(|e| Err(e)), pending_events, logger); + self.find_route_and_send_payment(payment_hash, payment_id, route_params, router, first_hops, inflight_htlcs, entropy_source, node_signer, best_block_height, pending_events, send_payment_along_path, logger); }, PaymentSendFailure::PartialFailure { failed_paths_retry: Some(mut retry), results, .. } => { debug_assert_eq!(results.len(), route.paths.len()); @@ -1710,11 +1710,11 @@ where } }); self.remove_session_privs(payment_id, failed_paths); - Self::push_path_failed_evs_and_scids(payment_id, payment_hash, &mut retry, route.paths, results.into_iter(), &self.logger, pending_events); + Self::push_path_failed_evs_and_scids(payment_id, payment_hash, &mut retry, route.paths, results.into_iter(), pending_events, logger); // Some paths were sent, even if we failed to send the full MPP value our recipient may // misbehave and claim the funds, at which point we have to consider the payment sent, so // return `Ok()` here, ignoring any retry errors. - self.find_route_and_send_payment(payment_hash, payment_id, retry, router, first_hops, inflight_htlcs, entropy_source, node_signer, best_block_height, pending_events, send_payment_along_path); + self.find_route_and_send_payment(payment_hash, payment_id, retry, router, first_hops, inflight_htlcs, entropy_source, node_signer, best_block_height, pending_events, send_payment_along_path, logger); }, PaymentSendFailure::PartialFailure { failed_paths_retry: None, .. } => { // This may happen if we send a payment and some paths fail, but only due to a temporary @@ -1722,13 +1722,13 @@ where // initial HTLC-Add messages yet. }, PaymentSendFailure::PathParameterError(results) => { - log_error!(self.logger, "Failed to send to route due to parameter error in a single path. Your router is buggy"); + log_error!(logger, "Failed to send to route due to parameter error in a single path. Your router is buggy"); self.remove_session_privs(payment_id, route.paths.iter().zip(onion_session_privs.iter())); - Self::push_path_failed_evs_and_scids(payment_id, payment_hash, &mut route_params, route.paths, results.into_iter(), &self.logger, pending_events); + Self::push_path_failed_evs_and_scids(payment_id, payment_hash, &mut route_params, route.paths, results.into_iter(), pending_events, logger); self.abandon_payment(payment_id, PaymentFailureReason::UnexpectedError, pending_events); }, PaymentSendFailure::ParameterError(e) => { - log_error!(self.logger, "Failed to send to route due to parameter error: {:?}. Your router is buggy", e); + log_error!(logger, "Failed to send to route due to parameter error: {:?}. Your router is buggy", e); self.remove_session_privs(payment_id, route.paths.iter().zip(onion_session_privs.iter())); self.abandon_payment(payment_id, PaymentFailureReason::UnexpectedError, pending_events); }, @@ -1738,10 +1738,12 @@ where fn push_path_failed_evs_and_scids< I: ExactSizeIterator + Iterator>, + L: Logger, >( payment_id: PaymentId, payment_hash: PaymentHash, route_params: &mut RouteParameters, - paths: Vec, path_results: I, logger: &L, + paths: Vec, path_results: I, pending_events: &Mutex)>>, + logger: &WithContext, ) { let mut events = pending_events.lock().unwrap(); debug_assert_eq!(paths.len(), path_results.len()); @@ -1792,13 +1794,11 @@ where } #[rustfmt::skip] - pub(super) fn send_probe( + pub(super) fn send_probe( &self, path: Path, probing_cookie_secret: [u8; 32], entropy_source: &ES, node_signer: &NS, - best_block_height: u32, send_payment_along_path: F + best_block_height: u32, send_payment_along_path: F, ) -> Result<(PaymentHash, PaymentId), ProbeSendFailure> where - ES::Target: EntropySource, - NS::Target: NodeSigner, F: Fn(SendAlongPathArgs) -> Result<(), APIError>, { let payment_id = PaymentId(entropy_source.get_secure_random_bytes()); @@ -1867,20 +1867,20 @@ where #[cfg(any(test, feature = "_externalize_tests"))] #[rustfmt::skip] - pub(super) fn test_add_new_pending_payment( + pub(super) fn test_add_new_pending_payment( &self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId, route: &Route, retry_strategy: Option, entropy_source: &ES, best_block_height: u32 - ) -> Result, PaymentSendFailure> where ES::Target: EntropySource { + ) -> Result, PaymentSendFailure> { self.add_new_pending_payment(payment_hash, recipient_onion, payment_id, None, route, retry_strategy, None, entropy_source, best_block_height, None) } #[rustfmt::skip] - pub(super) fn add_new_pending_payment( + pub(super) fn add_new_pending_payment( &self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId, keysend_preimage: Option, route: &Route, retry_strategy: Option, payment_params: Option, entropy_source: &ES, best_block_height: u32, bolt12_invoice: Option - ) -> Result, PaymentSendFailure> where ES::Target: EntropySource { + ) -> Result, PaymentSendFailure> { let mut pending_outbounds = self.pending_outbound_payments.lock().unwrap(); match pending_outbounds.entry(payment_id) { hash_map::Entry::Occupied(_) => Err(PaymentSendFailure::DuplicatePayment), @@ -1896,15 +1896,12 @@ where } #[rustfmt::skip] - fn create_pending_payment( + fn create_pending_payment( payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, keysend_preimage: Option, invoice_request: Option, bolt12_invoice: Option, route: &Route, retry_strategy: Option, payment_params: Option, entropy_source: &ES, best_block_height: u32 - ) -> (PendingOutboundPayment, Vec<[u8; 32]>) - where - ES::Target: EntropySource, - { + ) -> (PendingOutboundPayment, Vec<[u8; 32]>) { let mut onion_session_privs = Vec::with_capacity(route.paths.len()); for _ in 0..route.paths.len() { onion_session_privs.push(entropy_source.get_secure_random_bytes()); @@ -2068,14 +2065,13 @@ where } #[rustfmt::skip] - fn pay_route_internal( + fn pay_route_internal( &self, route: &Route, payment_hash: PaymentHash, recipient_onion: &RecipientOnionFields, keysend_preimage: Option, invoice_request: Option<&InvoiceRequest>, bolt12_invoice: Option<&PaidBolt12Invoice>, payment_id: PaymentId, recv_value_msat: Option, onion_session_privs: &Vec<[u8; 32]>, hold_htlcs_at_next_hop: bool, node_signer: &NS, best_block_height: u32, send_payment_along_path: &F ) -> Result<(), PaymentSendFailure> where - NS::Target: NodeSigner, F: Fn(SendAlongPathArgs) -> Result<(), APIError>, { if route.paths.len() < 1 { @@ -2183,14 +2179,13 @@ where #[cfg(any(test, feature = "_externalize_tests"))] #[rustfmt::skip] - pub(super) fn test_send_payment_internal( + pub(super) fn test_send_payment_internal( &self, route: &Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, keysend_preimage: Option, payment_id: PaymentId, recv_value_msat: Option, onion_session_privs: Vec<[u8; 32]>, node_signer: &NS, best_block_height: u32, send_payment_along_path: F ) -> Result<(), PaymentSendFailure> where - NS::Target: NodeSigner, F: Fn(SendAlongPathArgs) -> Result<(), APIError>, { self.pay_route_internal(route, payment_hash, &recipient_onion, @@ -2216,11 +2211,14 @@ where } #[rustfmt::skip] - pub(super) fn claim_htlc( + pub(super) fn claim_htlc( &self, payment_id: PaymentId, payment_preimage: PaymentPreimage, bolt12_invoice: Option, session_priv: SecretKey, path: Path, from_onchain: bool, ev_completion_action: &mut Option, pending_events: &Mutex)>>, - ) { + logger: &WithContext, + ) + where + { let mut session_priv_bytes = [0; 32]; session_priv_bytes.copy_from_slice(&session_priv[..]); let mut outbounds = self.pending_outbound_payments.lock().unwrap(); @@ -2228,7 +2226,7 @@ where if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(payment_id) { if !payment.get().is_fulfilled() { let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).to_byte_array()); - log_info!(self.logger, "Payment with id {} and hash {} sent!", payment_id, payment_hash); + log_info!(logger, "Payment with id {} and hash {} sent!", payment_id, payment_hash); let fee_paid_msat = payment.get().get_pending_fee_msat(); let amount_msat = payment.get().total_msat(); pending_events.push_back((events::Event::PaymentSent { @@ -2258,7 +2256,7 @@ where } } } else { - log_trace!(self.logger, "Received duplicative fulfill for HTLC with payment_preimage {}", &payment_preimage); + log_trace!(logger, "Received duplicative fulfill for HTLC with payment_preimage {}", &payment_preimage); } } @@ -2378,12 +2376,12 @@ where }); } - pub(super) fn fail_htlc( + pub(super) fn fail_htlc( &self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, path: &Path, session_priv: &SecretKey, payment_id: &PaymentId, probing_cookie_secret: [u8; 32], secp_ctx: &Secp256k1, pending_events: &Mutex)>>, - completion_action: &mut Option, + completion_action: &mut Option, logger: &WithContext, ) { #[cfg(any(test, feature = "_test_utils"))] let DecodedOnionFailure { @@ -2395,7 +2393,7 @@ where failed_within_blinded_path, hold_times, .. - } = onion_error.decode_onion_failure(secp_ctx, &self.logger, &source); + } = onion_error.decode_onion_failure(secp_ctx, &logger, &source); #[cfg(not(any(test, feature = "_test_utils")))] let DecodedOnionFailure { network_update, @@ -2404,7 +2402,7 @@ where failed_within_blinded_path, hold_times, .. - } = onion_error.decode_onion_failure(secp_ctx, &self.logger, &source); + } = onion_error.decode_onion_failure(secp_ctx, &logger, &source); let payment_is_probe = payment_is_probe(payment_hash, &payment_id, probing_cookie_secret); let mut session_priv_bytes = [0; 32]; @@ -2429,7 +2427,7 @@ where if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(*payment_id) { if !payment.get_mut().remove(&session_priv_bytes, Some(&path)) { log_trace!( - self.logger, + logger, "Received duplicative fail for HTLC with payment_hash {}", &payment_hash ); @@ -2437,7 +2435,7 @@ where } if payment.get().is_fulfilled() { log_trace!( - self.logger, + logger, "Received failure of HTLC with payment_hash {} after payment completion", &payment_hash ); @@ -2485,18 +2483,13 @@ where is_retryable_now } else { log_trace!( - self.logger, - "Received duplicative fail for HTLC with payment_hash {}", - &payment_hash + logger, + "Received duplicative fail for HTLC with payment_hash {payment_hash}" ); return; }; core::mem::drop(outbounds); - log_trace!( - self.logger, - "Failing outbound payment HTLC with payment_hash {}", - &payment_hash - ); + log_trace!(logger, "Failing outbound payment HTLC with payment_hash {payment_hash}"); let path_failure = { if payment_is_probe { @@ -2618,9 +2611,9 @@ where invoice_requests } - pub(super) fn insert_from_monitor_on_startup( + pub(super) fn insert_from_monitor_on_startup( &self, payment_id: PaymentId, payment_hash: PaymentHash, session_priv_bytes: [u8; 32], - path: &Path, best_block_height: u32, + path: &Path, best_block_height: u32, logger: &WithContext, ) { let path_amt = path.final_value_msat(); let path_fee = path.fee_msat(); @@ -2670,12 +2663,12 @@ where entry.get_mut().insert(session_priv_bytes, &path) }, }; - log_info!(self.logger, "{} a pending payment path for {} msat for session priv {} on an existing pending payment with payment hash {}", + log_info!(logger, "{} a pending payment path for {} msat for session priv {} on an existing pending payment with payment hash {}", if newly_added { "Added" } else { "Had" }, path_amt, log_bytes!(session_priv_bytes), payment_hash); }, hash_map::Entry::Vacant(entry) => { entry.insert(new_retryable!()); - log_info!(self.logger, "Added a pending payment for {} msat with payment hash {} for path with session priv {}", + log_info!(logger, "Added a pending payment for {} msat with payment hash {} for path with session priv {}", path_amt, payment_hash, log_bytes!(session_priv_bytes)); }, } @@ -2738,7 +2731,7 @@ impl_writeable_tlv_based_enum_upgradable!(PendingOutboundPayment, (5, AwaitingInvoice) => { (0, expiration, required), (2, retry_strategy, required), - (4, _max_total_routing_fee_msat, (legacy, u64, + (4, _max_total_routing_fee_msat, (legacy, u64, |_| Ok(()), |us: &PendingOutboundPayment| match us { PendingOutboundPayment::AwaitingInvoice { route_params_config, .. } => route_params_config.max_total_routing_fee_msat, _ => None, @@ -2755,7 +2748,7 @@ impl_writeable_tlv_based_enum_upgradable!(PendingOutboundPayment, (7, InvoiceReceived) => { (0, payment_hash, required), (2, retry_strategy, required), - (4, _max_total_routing_fee_msat, (legacy, u64, + (4, _max_total_routing_fee_msat, (legacy, u64, |_| Ok(()), |us: &PendingOutboundPayment| match us { PendingOutboundPayment::InvoiceReceived { route_params_config, .. } => route_params_config.max_total_routing_fee_msat, _ => None, @@ -2786,7 +2779,7 @@ impl_writeable_tlv_based_enum_upgradable!(PendingOutboundPayment, (11, AwaitingOffer) => { (0, expiration, required), (2, retry_strategy, required), - (4, _max_total_routing_fee_msat, (legacy, u64, + (4, _max_total_routing_fee_msat, (legacy, u64, |_| Ok(()), |us: &PendingOutboundPayment| match us { PendingOutboundPayment::AwaitingOffer { route_params_config, .. } => route_params_config.max_total_routing_fee_msat, _ => None, @@ -2812,11 +2805,12 @@ mod tests { use crate::blinded_path::EmptyNodeIdLookUp; use crate::events::{Event, PathFailure, PaymentFailureReason}; - use crate::ln::channelmanager::{PaymentId, RecipientOnionFields}; + use crate::ln::channelmanager::PaymentId; use crate::ln::inbound_payment::ExpandedKey; + use crate::ln::outbound_payment::RecipientOnionFields; use crate::ln::outbound_payment::{ - Bolt12PaymentError, OutboundPayments, PendingOutboundPayment, Retry, RetryableSendFailure, - StaleExpiration, + Bolt12PaymentError, OutboundPayments, PendingOutboundPayment, RecipientCustomTlvs, Retry, + RetryableSendFailure, StaleExpiration, }; #[cfg(feature = "std")] use crate::offers::invoice::DEFAULT_RELATIVE_EXPIRY; @@ -2834,6 +2828,7 @@ mod tests { use crate::types::payment::{PaymentHash, PaymentPreimage}; use crate::util::errors::APIError; use crate::util::hash_tables::new_hash_map; + use crate::util::logger::WithContext; use crate::util::test_utils; use alloc::collections::VecDeque; @@ -2843,22 +2838,23 @@ mod tests { fn test_recipient_onion_fields_with_custom_tlvs() { let onion_fields = RecipientOnionFields::spontaneous_empty(); - let bad_type_range_tlvs = vec![ + let bad_type_range_tlvs = RecipientCustomTlvs::new(vec![ (0, vec![42]), (1, vec![42; 32]), - ]; - assert!(onion_fields.clone().with_custom_tlvs(bad_type_range_tlvs).is_err()); + ]); + assert!(bad_type_range_tlvs.is_err()); - let keysend_tlv = vec![ + let keysend_tlv = RecipientCustomTlvs::new(vec![ (5482373484, vec![42; 32]), - ]; - assert!(onion_fields.clone().with_custom_tlvs(keysend_tlv).is_err()); + ]); + assert!(keysend_tlv.is_err()); - let good_tlvs = vec![ + let good_tlvs = RecipientCustomTlvs::new(vec![ ((1 << 16) + 1, vec![42]), ((1 << 16) + 3, vec![42; 32]), - ]; - assert!(onion_fields.with_custom_tlvs(good_tlvs).is_ok()); + ]); + assert!(good_tlvs.is_ok()); + onion_fields.with_custom_tlvs(good_tlvs.unwrap()); } #[test] @@ -2871,7 +2867,9 @@ mod tests { #[rustfmt::skip] fn do_fails_paying_after_expiration(on_retry: bool) { let logger = test_utils::TestLogger::new(); - let outbound_payments = OutboundPayments::new(new_hash_map(), &logger); + let logger_ref = &logger; + let log = WithContext::from(&logger_ref, None, None, Some(PaymentHash([0; 32]))); + let outbound_payments = OutboundPayments::new(new_hash_map()); let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger)); let scorer = RwLock::new(test_utils::TestScorer::new()); let router = test_utils::TestRouter::new(network_graph, &logger, &scorer); @@ -2893,7 +2891,7 @@ mod tests { outbound_payments.find_route_and_send_payment( PaymentHash([0; 32]), PaymentId([0; 32]), expired_route_params, &&router, vec![], &|| InFlightHtlcs::new(), &&keys_manager, &&keys_manager, 0, &pending_events, - &|_| Ok(())); + &|_| Ok(()), &log); let events = pending_events.lock().unwrap(); assert_eq!(events.len(), 1); if let Event::PaymentFailed { ref reason, .. } = events[0].0 { @@ -2903,7 +2901,7 @@ mod tests { let err = outbound_payments.send_payment( PaymentHash([0; 32]), RecipientOnionFields::spontaneous_empty(), PaymentId([0; 32]), Retry::Attempts(0), expired_route_params, &&router, vec![], || InFlightHtlcs::new(), - &&keys_manager, &&keys_manager, 0, &pending_events, |_| Ok(())).unwrap_err(); + &&keys_manager, &&keys_manager, 0, &pending_events, |_| Ok(()), &log).unwrap_err(); if let RetryableSendFailure::PaymentExpired = err { } else { panic!("Unexpected error"); } } } @@ -2916,7 +2914,9 @@ mod tests { #[rustfmt::skip] fn do_find_route_error(on_retry: bool) { let logger = test_utils::TestLogger::new(); - let outbound_payments = OutboundPayments::new(new_hash_map(), &logger); + let logger_ref = &logger; + let log = WithContext::from(&logger_ref, None, None, Some(PaymentHash([0; 32]))); + let outbound_payments = OutboundPayments::new(new_hash_map()); let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger)); let scorer = RwLock::new(test_utils::TestScorer::new()); let router = test_utils::TestRouter::new(network_graph, &logger, &scorer); @@ -2937,7 +2937,7 @@ mod tests { outbound_payments.find_route_and_send_payment( PaymentHash([0; 32]), PaymentId([0; 32]), route_params, &&router, vec![], &|| InFlightHtlcs::new(), &&keys_manager, &&keys_manager, 0, &pending_events, - &|_| Ok(())); + &|_| Ok(()), &log); let events = pending_events.lock().unwrap(); assert_eq!(events.len(), 1); if let Event::PaymentFailed { .. } = events[0].0 { } else { panic!("Unexpected event"); } @@ -2945,7 +2945,7 @@ mod tests { let err = outbound_payments.send_payment( PaymentHash([0; 32]), RecipientOnionFields::spontaneous_empty(), PaymentId([0; 32]), Retry::Attempts(0), route_params, &&router, vec![], || InFlightHtlcs::new(), - &&keys_manager, &&keys_manager, 0, &pending_events, |_| Ok(())).unwrap_err(); + &&keys_manager, &&keys_manager, 0, &pending_events, |_| Ok(()), &log).unwrap_err(); if let RetryableSendFailure::RouteNotFound = err { } else { panic!("Unexpected error"); } } @@ -2955,7 +2955,9 @@ mod tests { #[rustfmt::skip] fn initial_send_payment_path_failed_evs() { let logger = test_utils::TestLogger::new(); - let outbound_payments = OutboundPayments::new(new_hash_map(), &logger); + let logger_ref = &logger; + let log = WithContext::from(&logger_ref, None, None, Some(PaymentHash([0; 32]))); + let outbound_payments = OutboundPayments::new(new_hash_map()); let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger)); let scorer = RwLock::new(test_utils::TestScorer::new()); let router = test_utils::TestRouter::new(network_graph, &logger, &scorer); @@ -2995,7 +2997,7 @@ mod tests { PaymentHash([0; 32]), RecipientOnionFields::spontaneous_empty(), PaymentId([0; 32]), Retry::Attempts(0), route_params.clone(), &&router, vec![], || InFlightHtlcs::new(), &&keys_manager, &&keys_manager, 0, &pending_events, - |_| Err(APIError::ChannelUnavailable { err: "test".to_owned() })).unwrap(); + |_| Err(APIError::ChannelUnavailable { err: "test".to_owned() }), &log).unwrap(); let mut events = pending_events.lock().unwrap(); assert_eq!(events.len(), 2); if let Event::PaymentPathFailed { @@ -3013,7 +3015,7 @@ mod tests { PaymentHash([0; 32]), RecipientOnionFields::spontaneous_empty(), PaymentId([0; 32]), Retry::Attempts(0), route_params.clone(), &&router, vec![], || InFlightHtlcs::new(), &&keys_manager, &&keys_manager, 0, &pending_events, - |_| Err(APIError::MonitorUpdateInProgress)).unwrap(); + |_| Err(APIError::MonitorUpdateInProgress), &log).unwrap(); assert_eq!(pending_events.lock().unwrap().len(), 0); // Ensure that any other error will result in a PaymentPathFailed event but no blamed scid. @@ -3021,7 +3023,7 @@ mod tests { PaymentHash([0; 32]), RecipientOnionFields::spontaneous_empty(), PaymentId([1; 32]), Retry::Attempts(0), route_params.clone(), &&router, vec![], || InFlightHtlcs::new(), &&keys_manager, &&keys_manager, 0, &pending_events, - |_| Err(APIError::APIMisuseError { err: "test".to_owned() })).unwrap(); + |_| Err(APIError::APIMisuseError { err: "test".to_owned() }), &log).unwrap(); let events = pending_events.lock().unwrap(); assert_eq!(events.len(), 2); if let Event::PaymentPathFailed { @@ -3037,8 +3039,7 @@ mod tests { #[rustfmt::skip] fn removes_stale_awaiting_invoice_using_absolute_timeout() { let pending_events = Mutex::new(VecDeque::new()); - let logger = test_utils::TestLogger::new(); - let outbound_payments = OutboundPayments::new(new_hash_map(), &logger); + let outbound_payments = OutboundPayments::new(new_hash_map()); let payment_id = PaymentId([0; 32]); let absolute_expiry = 100; let tick_interval = 10; @@ -3093,8 +3094,7 @@ mod tests { #[rustfmt::skip] fn removes_stale_awaiting_invoice_using_timer_ticks() { let pending_events = Mutex::new(VecDeque::new()); - let logger = test_utils::TestLogger::new(); - let outbound_payments = OutboundPayments::new(new_hash_map(), &logger); + let outbound_payments = OutboundPayments::new(new_hash_map()); let payment_id = PaymentId([0; 32]); let timer_ticks = 3; let expiration = StaleExpiration::TimerTicks(timer_ticks); @@ -3148,8 +3148,7 @@ mod tests { #[rustfmt::skip] fn removes_abandoned_awaiting_invoice() { let pending_events = Mutex::new(VecDeque::new()); - let logger = test_utils::TestLogger::new(); - let outbound_payments = OutboundPayments::new(new_hash_map(), &logger); + let outbound_payments = OutboundPayments::new(new_hash_map()); let payment_id = PaymentId([0; 32]); let expiration = StaleExpiration::AbsoluteTimeout(Duration::from_secs(100)); @@ -3180,6 +3179,8 @@ mod tests { #[rustfmt::skip] fn fails_sending_payment_for_expired_bolt12_invoice() { let logger = test_utils::TestLogger::new(); + let logger_ref = &logger; + let log = WithContext::from(&logger_ref, None, None, Some(PaymentHash([0; 32]))); let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger)); let scorer = RwLock::new(test_utils::TestScorer::new()); let router = test_utils::TestRouter::new(network_graph, &logger, &scorer); @@ -3189,7 +3190,7 @@ mod tests { let nonce = Nonce([0; 16]); let pending_events = Mutex::new(VecDeque::new()); - let outbound_payments = OutboundPayments::new(new_hash_map(), &logger); + let outbound_payments = OutboundPayments::new(new_hash_map()); let payment_id = PaymentId([0; 32]); let expiration = StaleExpiration::AbsoluteTimeout(Duration::from_secs(100)); @@ -3214,7 +3215,7 @@ mod tests { outbound_payments.send_payment_for_bolt12_invoice( &invoice, payment_id, &&router, vec![], Bolt12InvoiceFeatures::empty(), || InFlightHtlcs::new(), &&keys_manager, &&keys_manager, &EmptyNodeIdLookUp {}, - &secp_ctx, 0, &pending_events, |_| panic!() + &secp_ctx, 0, &pending_events, |_| panic!(), &log ), Err(Bolt12PaymentError::SendingFailed(RetryableSendFailure::PaymentExpired)), ); @@ -3235,6 +3236,8 @@ mod tests { #[rustfmt::skip] fn fails_finding_route_for_bolt12_invoice() { let logger = test_utils::TestLogger::new(); + let logger_ref = &logger; + let log = WithContext::from(&logger_ref, None, None, Some(PaymentHash([0; 32]))); let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger)); let scorer = RwLock::new(test_utils::TestScorer::new()); let router = test_utils::TestRouter::new(network_graph, &logger, &scorer); @@ -3242,7 +3245,7 @@ mod tests { let keys_manager = test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet); let pending_events = Mutex::new(VecDeque::new()); - let outbound_payments = OutboundPayments::new(new_hash_map(), &logger); + let outbound_payments = OutboundPayments::new(new_hash_map()); let expanded_key = ExpandedKey::new([42; 32]); let nonce = Nonce([0; 16]); let payment_id = PaymentId([0; 32]); @@ -3277,7 +3280,7 @@ mod tests { outbound_payments.send_payment_for_bolt12_invoice( &invoice, payment_id, &&router, vec![], Bolt12InvoiceFeatures::empty(), || InFlightHtlcs::new(), &&keys_manager, &&keys_manager, &EmptyNodeIdLookUp {}, - &secp_ctx, 0, &pending_events, |_| panic!() + &secp_ctx, 0, &pending_events, |_| panic!(), &log ), Err(Bolt12PaymentError::SendingFailed(RetryableSendFailure::RouteNotFound)), ); @@ -3298,6 +3301,8 @@ mod tests { #[rustfmt::skip] fn sends_payment_for_bolt12_invoice() { let logger = test_utils::TestLogger::new(); + let logger_ref = &logger; + let log = WithContext::from(&logger_ref, None, None, Some(PaymentHash([0; 32]))); let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger)); let scorer = RwLock::new(test_utils::TestScorer::new()); let router = test_utils::TestRouter::new(network_graph, &logger, &scorer); @@ -3305,7 +3310,7 @@ mod tests { let keys_manager = test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet); let pending_events = Mutex::new(VecDeque::new()); - let outbound_payments = OutboundPayments::new(new_hash_map(), &logger); + let outbound_payments = OutboundPayments::new(new_hash_map()); let expanded_key = ExpandedKey::new([42; 32]); let nonce = Nonce([0; 16]); let payment_id = PaymentId([0; 32]); @@ -3353,7 +3358,7 @@ mod tests { outbound_payments.send_payment_for_bolt12_invoice( &invoice, payment_id, &&router, vec![], Bolt12InvoiceFeatures::empty(), || InFlightHtlcs::new(), &&keys_manager, &&keys_manager, &EmptyNodeIdLookUp {}, - &secp_ctx, 0, &pending_events, |_| panic!() + &secp_ctx, 0, &pending_events, |_| panic!(), &log ), Err(Bolt12PaymentError::UnexpectedInvoice), ); @@ -3373,7 +3378,7 @@ mod tests { outbound_payments.send_payment_for_bolt12_invoice( &invoice, payment_id, &&router, vec![], Bolt12InvoiceFeatures::empty(), || InFlightHtlcs::new(), &&keys_manager, &&keys_manager, &EmptyNodeIdLookUp {}, - &secp_ctx, 0, &pending_events, |_| Ok(()) + &secp_ctx, 0, &pending_events, |_| Ok(()), &log ), Ok(()), ); @@ -3384,7 +3389,7 @@ mod tests { outbound_payments.send_payment_for_bolt12_invoice( &invoice, payment_id, &&router, vec![], Bolt12InvoiceFeatures::empty(), || InFlightHtlcs::new(), &&keys_manager, &&keys_manager, &EmptyNodeIdLookUp {}, - &secp_ctx, 0, &pending_events, |_| panic!() + &secp_ctx, 0, &pending_events, |_| panic!(), &log ), Err(Bolt12PaymentError::DuplicateInvoice), ); @@ -3413,8 +3418,7 @@ mod tests { #[rustfmt::skip] fn time_out_unreleased_async_payments() { let pending_events = Mutex::new(VecDeque::new()); - let logger = test_utils::TestLogger::new(); - let outbound_payments = OutboundPayments::new(new_hash_map(), &logger); + let outbound_payments = OutboundPayments::new(new_hash_map()); let payment_id = PaymentId([0; 32]); let absolute_expiry = 60; @@ -3464,8 +3468,7 @@ mod tests { #[rustfmt::skip] fn abandon_unreleased_async_payment() { let pending_events = Mutex::new(VecDeque::new()); - let logger = test_utils::TestLogger::new(); - let outbound_payments = OutboundPayments::new(new_hash_map(), &logger); + let outbound_payments = OutboundPayments::new(new_hash_map()); let payment_id = PaymentId([0; 32]); let absolute_expiry = 60; diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 6c982738a52..0eace2eab08 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -26,13 +26,14 @@ use crate::ln::channel::{ }; use crate::ln::channelmanager::{ HTLCForwardInfo, PaymentId, PendingAddHTLCInfo, PendingHTLCRouting, RecentPaymentDetails, - RecipientOnionFields, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MPP_TIMEOUT_TICKS, + BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MPP_TIMEOUT_TICKS, }; use crate::ln::msgs; use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, MessageSendEvent}; use crate::ln::onion_utils::{self, LocalHTLCFailureReason}; use crate::ln::outbound_payment::{ - ProbeSendFailure, Retry, RetryableSendFailure, IDEMPOTENCY_TIMEOUT_TICKS, + ProbeSendFailure, RecipientCustomTlvs, RecipientOnionFields, Retry, RetryableSendFailure, + IDEMPOTENCY_TIMEOUT_TICKS, }; use crate::ln::types::ChannelId; use crate::routing::gossip::{EffectiveCapacity, RoutingFees}; @@ -45,6 +46,7 @@ use crate::sign::EntropySource; use crate::types::features::{Bolt11InvoiceFeatures, ChannelTypeFeatures}; use crate::types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; use crate::types::string::UntrustedString; +use crate::util::config::HTLCInterceptionFlags; use crate::util::errors::APIError; use crate::util::ser::Writeable; use crate::util::test_utils; @@ -144,7 +146,7 @@ fn mpp_retry() { let onion = RecipientOnionFields::secret_only(pay_secret); let retry = Retry::Attempts(1); nodes[0].node.send_payment(hash, onion, id, route_params.clone(), retry).unwrap(); - check_added_monitors!(nodes[0], 2); // one monitor per path + check_added_monitors(&nodes[0], 2); // one monitor per path let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); @@ -169,7 +171,7 @@ fn mpp_retry() { assert_eq!(htlc_updates.update_fail_htlcs.len(), 1); assert!(htlc_updates.update_fulfill_htlcs.is_empty()); assert!(htlc_updates.update_fail_malformed_htlcs.is_empty()); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); nodes[0].node.handle_update_fail_htlc(node_c_id, &htlc_updates.update_fail_htlcs[0]); do_commitment_signed_dance(&nodes[0], &nodes[2], &htlc_updates.commitment_signed, false, false); let mut events = nodes[0].node.get_and_clear_pending_events(); @@ -191,7 +193,7 @@ fn mpp_retry() { route.route_params = Some(route_params.clone()); nodes[0].router.expect_find_route(route_params, Ok(route)); expect_and_process_pending_htlcs(&nodes[0], false); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let event = events.pop().unwrap(); @@ -210,7 +212,7 @@ fn mpp_retry_overpay() { let chanmon_cfgs = create_chanmon_cfgs(4); let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); - let mut user_config = test_default_channel_config(); + let mut user_config = test_legacy_channel_config(); user_config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100; let mut limited_1 = user_config.clone(); limited_1.channel_handshake_config.our_htlc_minimum_msat = 35_000_000; @@ -262,7 +264,7 @@ fn mpp_retry_overpay() { let onion = RecipientOnionFields::secret_only(pay_secret); let retry = Retry::Attempts(1); nodes[0].node.send_payment(hash, onion, id, route_params.clone(), retry).unwrap(); - check_added_monitors!(nodes[0], 2); // one monitor per path + check_added_monitors(&nodes[0], 2); // one monitor per path let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); @@ -288,7 +290,7 @@ fn mpp_retry_overpay() { assert_eq!(htlc_updates.update_fail_htlcs.len(), 1); assert!(htlc_updates.update_fulfill_htlcs.is_empty()); assert!(htlc_updates.update_fail_malformed_htlcs.is_empty()); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); nodes[0].node.handle_update_fail_htlc(node_c_id, &htlc_updates.update_fail_htlcs[0]); do_commitment_signed_dance(&nodes[0], &nodes[2], &htlc_updates.commitment_signed, false, false); let mut events = nodes[0].node.get_and_clear_pending_events(); @@ -314,7 +316,7 @@ fn mpp_retry_overpay() { nodes[0].router.expect_find_route(route_params, Ok(route)); nodes[0].node.process_pending_htlc_forwards(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let event = events.pop().unwrap(); @@ -362,7 +364,7 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) { // Initiate the MPP payment. let onion = RecipientOnionFields::secret_only(payment_secret); nodes[0].node.send_payment_with_route(route, hash, onion, PaymentId(hash.0)).unwrap(); - check_added_monitors!(nodes[0], 2); // one monitor per path + check_added_monitors(&nodes[0], 2); // one monitor per path let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); @@ -384,7 +386,7 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) { let htlc_fail_updates = get_htlc_update_msgs(&nodes[3], &node_b_id); assert_eq!(htlc_fail_updates.update_fail_htlcs.len(), 1); nodes[1].node.handle_update_fail_htlc(node_d_id, &htlc_fail_updates.update_fail_htlcs[0]); - check_added_monitors!(nodes[3], 1); + check_added_monitors(&nodes[3], 1); let commitment = &htlc_fail_updates.commitment_signed; do_commitment_signed_dance(&nodes[1], &nodes[3], commitment, false, false); @@ -397,7 +399,7 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) { let htlc_fail_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); assert_eq!(htlc_fail_updates.update_fail_htlcs.len(), 1); nodes[0].node.handle_update_fail_htlc(node_b_id, &htlc_fail_updates.update_fail_htlcs[0]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let commitment = &htlc_fail_updates.commitment_signed; do_commitment_signed_dance(&nodes[0], &nodes[1], commitment, false, false); @@ -461,7 +463,7 @@ fn do_test_keysend_payments(public_node: bool) { nodes[0].node.send_spontaneous_payment(preimage, onion, id, route_params, retry).unwrap(); } - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let send_event = SendEvent::from_node(&nodes[0]); nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); do_commitment_signed_dance(&nodes[1], &nodes[0], &send_event.commitment_msg, false, false); @@ -510,7 +512,7 @@ fn test_mpp_keysend() { let id = PaymentId([42; 32]); let hash = nodes[0].node.send_spontaneous_payment(preimage, onion, id, route_params, retry).unwrap(); - check_added_monitors!(nodes[0], 2); + check_added_monitors(&nodes[0], 2); let route: &[&[&Node]] = &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]]; let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -553,7 +555,7 @@ fn test_fulfill_hold_times() { let id = PaymentId([42; 32]); let hash = nodes[0].node.send_spontaneous_payment(preimage, onion, id, route_params, retry).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let route: &[&[&Node]] = &[&[&nodes[1], &nodes[2]]]; let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -621,7 +623,7 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { let onion = RecipientOnionFields::spontaneous_empty(); let retry = Retry::Attempts(0); nodes[0].node.send_spontaneous_payment(preimage, onion, payment_id_0, params, retry).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &node_b_id); let update_add_0 = update_0.update_add_htlcs[0].clone(); @@ -629,7 +631,7 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { do_commitment_signed_dance(&nodes[1], &nodes[0], &update_0.commitment_signed, false, true); expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); let update_1 = get_htlc_update_msgs(&nodes[1], &node_d_id); let update_add_1 = update_1.update_add_htlcs[0].clone(); nodes[3].node.handle_update_add_htlc(node_b_id, &update_add_1); @@ -670,7 +672,7 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { let params = route.route_params.clone().unwrap(); let retry = Retry::Attempts(0); nodes[0].node.send_spontaneous_payment(preimage, onion, payment_id_1, params, retry).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_2 = get_htlc_update_msgs(&nodes[0], &node_c_id); let update_add_2 = update_2.update_add_htlcs[0].clone(); @@ -678,7 +680,7 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { do_commitment_signed_dance(&nodes[2], &nodes[0], &update_2.commitment_signed, false, true); expect_and_process_pending_htlcs(&nodes[2], false); - check_added_monitors!(&nodes[2], 1); + check_added_monitors(&nodes[2], 1); let update_3 = get_htlc_update_msgs(&nodes[2], &node_d_id); let update_add_3 = update_3.update_add_htlcs[0].clone(); nodes[3].node.handle_update_add_htlc(node_c_id, &update_add_3); @@ -710,7 +712,7 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { nodes[3].node.process_pending_htlc_forwards(); let fail_type = HTLCHandlingFailureType::Receive { payment_hash }; expect_and_process_pending_htlcs_and_htlc_handling_failed(&nodes[3], &[fail_type]); - check_added_monitors!(nodes[3], 1); + check_added_monitors(&nodes[3], 1); // Fail back along nodes[2] let update_fail_0 = get_htlc_update_msgs(&nodes[3], &node_c_id); @@ -721,7 +723,7 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { let fail_type = HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_4_chan_id }; expect_and_process_pending_htlcs_and_htlc_handling_failed(&nodes[2], &[fail_type]); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let update_fail_1 = get_htlc_update_msgs(&nodes[2], &node_a_id); nodes[0].node.handle_update_fail_htlc(node_c_id, &update_fail_1.update_fail_htlcs[0]); @@ -780,7 +782,12 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let persister; let new_chain_monitor; - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs( + 3, + &node_cfgs, + &[Some(legacy_cfg.clone()), Some(legacy_cfg.clone()), Some(legacy_cfg)], + ); let node_a_reload; let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); @@ -806,7 +813,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[0].node.send_payment(payment_hash, onion, id, route_params, Retry::Attempts(1)).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -837,7 +844,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { let as_commitment_tx = get_local_commitment_txn!(nodes[0], chan_id)[0].clone(); if confirm_before_reload { mine_transaction(&nodes[0], &as_commitment_tx); - nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); + nodes[0].tx_broadcaster.clear(); } // The ChannelMonitor should always be the latest version, as we're required to persist it @@ -862,7 +869,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { } else { assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty()); } - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[1].node.peer_disconnected(node_a_id); @@ -888,11 +895,16 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { } => { assert_eq!(node_id, node_b_id); nodes[1].node.handle_error(node_a_id, msg); - check_closed_event(&nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", - &node_b_id)) }, &[node_a_id], 100000); - check_added_monitors!(nodes[1], 1); + let peer_msg = format!( + "Got a message for a channel from the wrong node! No such channel_id {} for the passed counterparty_node_id {}", + chan_id, node_b_id + ); + let reason = + ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(peer_msg) }; + check_closed_event(&nodes[1], 1, reason, &[node_a_id], 100000); + check_added_monitors(&nodes[1], 1); assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); - nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); + nodes[1].tx_broadcaster.clear(); }, _ => panic!("Unexpected event"), } @@ -901,13 +913,13 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { // Now claim the first payment, which should allow nodes[1] to claim the payment on-chain when // we close in a moment. nodes[2].node.claim_funds(payment_preimage_1); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash_1, 1_000_000); let mut htlc_fulfill = get_htlc_update_msgs(&nodes[2], &node_b_id); let fulfill_msg = htlc_fulfill.update_fulfill_htlcs.remove(0); nodes[1].node.handle_update_fulfill_htlc(node_c_id, fulfill_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); do_commitment_signed_dance(&nodes[1], &nodes[2], &htlc_fulfill.commitment_signed, false, false); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], None, true, false); @@ -953,7 +965,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { } else { confirm_transaction(&nodes[0], &first_htlc_timeout_tx); } - nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); + nodes[0].tx_broadcaster.clear(); let conditions = PaymentFailedConditions::new().from_mon_update(); expect_payment_failed_conditions(&nodes[0], payment_hash, false, conditions); @@ -990,7 +1002,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { let id = PaymentId(payment_hash.0); let onion = RecipientOnionFields::secret_only(payment_secret); nodes[0].node.send_payment_with_route(new_route.clone(), payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -1017,8 +1029,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let mut manually_accept_config = test_default_channel_config(); - manually_accept_config.manually_accept_inbound_channels = true; + let mut legacy_cfg = test_legacy_channel_config(); let persist_1; let chain_monitor_1; @@ -1027,8 +1038,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { let persist_3; let chain_monitor_3; - let node_chanmgrs = - create_node_chanmgrs(3, &node_cfgs, &[None, Some(manually_accept_config), None]); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(legacy_cfg), None]); let node_a_1; let node_a_2; let node_a_3; @@ -1071,7 +1081,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { assert!(nodes[0].node.list_channels().is_empty()); assert!(nodes[0].node.has_pending_payments()); assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let init_msg = msgs::Init { features: nodes[1].node.init_features(), @@ -1096,13 +1106,14 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { } => { assert_eq!(node_id, node_b_id); nodes[1].node.handle_error(node_a_id, msg); - let msg = format!( - "Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", - &node_b_id + let peer_msg = format!( + "Got a message for a channel from the wrong node! No such channel_id {} for the passed counterparty_node_id {}", + chan_id, node_b_id ); - let reason = ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(msg) }; + let reason = + ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(peer_msg) }; check_closed_event(&nodes[1], 1, reason, &[node_a_id], 100000); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); bs_commitment_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); }, _ => panic!("Unexpected event"), @@ -1115,7 +1126,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { nodes[2].node.fail_htlc_backwards(&hash); let fail_type = HTLCHandlingFailureType::Receive { payment_hash: hash }; expect_and_process_pending_htlcs_and_htlc_handling_failed(&nodes[2], &[fail_type]); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let htlc_fulfill_updates = get_htlc_update_msgs(&nodes[2], &node_b_id); nodes[1].node.handle_update_fail_htlc(node_c_id, &htlc_fulfill_updates.update_fail_htlcs[0]); @@ -1197,7 +1208,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { // the payment is not (spuriously) listed as still pending. let onion = RecipientOnionFields::secret_only(payment_secret); nodes[0].node.send_payment_with_route(new_route.clone(), hash, onion, payment_id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], amt, hash, payment_secret); claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); @@ -1253,7 +1264,9 @@ fn do_test_dup_htlc_onchain_doesnt_fail_on_reload( let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let persister; let chain_monitor; - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let node_a_reload; let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -1271,7 +1284,7 @@ fn do_test_dup_htlc_onchain_doesnt_fail_on_reload( .force_close_broadcasting_latest_txn(&chan_id, &node_b_id, message.clone()) .unwrap(); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, reason, &[node_b_id], 100000); @@ -1289,12 +1302,12 @@ fn do_test_dup_htlc_onchain_doesnt_fail_on_reload( }; nodes[1].node.claim_funds(payment_preimage); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash, 10_000_000); mine_transaction(&nodes[1], &commitment_tx); check_closed_broadcast(&nodes[1], 1, false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[1], 1, reason, &[node_a_id], 100000); let htlc_success_tx = { @@ -1450,7 +1463,7 @@ fn test_fulfill_restart_failure() { let mon_ser = get_monitor!(nodes[1], chan_id).encode(); nodes[1].node.claim_funds(payment_preimage); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash, 100_000); let mut htlc_fulfill = get_htlc_update_msgs(&nodes[1], &node_a_id); @@ -1467,7 +1480,7 @@ fn test_fulfill_restart_failure() { nodes[1].node.fail_htlc_backwards(&payment_hash); let fail_type = HTLCHandlingFailureType::Receive { payment_hash }; expect_and_process_pending_htlcs_and_htlc_handling_failed(&nodes[1], &[fail_type]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let htlc_fail_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fail_htlc(node_b_id, &htlc_fail_updates.update_fail_htlcs[0]); @@ -1517,7 +1530,7 @@ fn get_ldk_payment_preimage() { let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route.unwrap(), payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); // Make sure to use `get_payment_preimage` let preimage = Some(nodes[1].node.get_payment_preimage(payment_hash, payment_secret).unwrap()); @@ -1560,7 +1573,7 @@ fn sent_probe_is_probe_of_sending_node() { } get_htlc_update_msgs(&nodes[0], &node_b_id); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); } #[test] @@ -1607,20 +1620,20 @@ fn failed_probe_yields_event() { let (payment_hash, payment_id) = nodes[0].node.send_probe(route.paths[0].clone()).unwrap(); // node[0] -- update_add_htlcs -> node[1] - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let updates = get_htlc_update_msgs(&nodes[0], &node_b_id); let probe_event = SendEvent::from_commitment_update(node_b_id, channel_id, updates); nodes[1].node.handle_update_add_htlc(node_a_id, &probe_event.msgs[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &probe_event.commitment_msg, false, false); expect_and_process_pending_htlcs(&nodes[1], true); // node[0] <- update_fail_htlcs -- node[1] - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let updates = get_htlc_update_msgs(&nodes[1], &node_a_id); let _events = nodes[1].node.get_and_clear_pending_events(); nodes[0].node.handle_update_fail_htlc(node_b_id, &updates.update_fail_htlcs[0]); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false); let mut events = nodes[0].node.get_and_clear_pending_events(); @@ -1658,15 +1671,15 @@ fn onchain_failed_probe_yields_event() { let (payment_hash, payment_id) = nodes[0].node.send_probe(route.paths[0].clone()).unwrap(); // node[0] -- update_add_htlcs -> node[1] - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let updates = get_htlc_update_msgs(&nodes[0], &node_b_id); let probe_event = SendEvent::from_commitment_update(node_b_id, chan_id, updates); nodes[1].node.handle_update_add_htlc(node_a_id, &probe_event.msgs[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &probe_event.commitment_msg, false, false); expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let _ = get_htlc_update_msgs(&nodes[1], &node_c_id); // Don't bother forwarding the HTLC onwards and just confirm the force-close transaction on @@ -1674,7 +1687,7 @@ fn onchain_failed_probe_yields_event() { let bs_txn = get_local_commitment_txn!(nodes[1], chan_id); confirm_transaction(&nodes[0], &bs_txn[0]); check_closed_broadcast!(&nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_added_monitors(&nodes[0], 0); let mut events = nodes[0].node.get_and_clear_pending_events(); @@ -1925,7 +1938,7 @@ fn claimed_send_payment_idempotent() { let onion = RecipientOnionFields::secret_only(second_payment_secret); nodes[0].node.send_payment_with_route(route, hash_b, onion, payment_id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); pass_along_route(&nodes[0], &[&[&nodes[1]]], 100_000, hash_b, second_payment_secret); claim_payment(&nodes[0], &[&nodes[1]], preimage_b); } @@ -1994,7 +2007,7 @@ fn abandoned_send_payment_idempotent() { // failed payment back. let onion = RecipientOnionFields::secret_only(second_payment_secret); nodes[0].node.send_payment_with_route(route, hash_b, onion, payment_id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); pass_along_route(&nodes[0], &[&[&nodes[1]]], 100_000, hash_b, second_payment_secret); claim_payment(&nodes[0], &[&nodes[1]], second_payment_preimage); } @@ -2163,12 +2176,12 @@ fn test_holding_cell_inflight_htlcs() { let onion = RecipientOnionFields::secret_only(payment_secret_1); let id = PaymentId(payment_hash_1.0); nodes[0].node.send_payment_with_route(route.clone(), payment_hash_1, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let onion = RecipientOnionFields::secret_only(payment_secret_2); let id = PaymentId(payment_hash_2.0); nodes[0].node.send_payment_with_route(route, payment_hash_2, onion, id).unwrap(); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); } let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs(); @@ -2208,9 +2221,9 @@ fn do_test_intercepted_payment(test: InterceptTest) { let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let mut zero_conf_chan_config = test_default_channel_config(); - zero_conf_chan_config.manually_accept_inbound_channels = true; let mut intercept_forwards_config = test_default_channel_config(); - intercept_forwards_config.accept_intercept_htlcs = true; + intercept_forwards_config.htlc_interception_flags = + HTLCInterceptionFlags::ToInterceptSCIDs as u8; let configs = [None, Some(intercept_forwards_config), Some(zero_conf_chan_config)]; let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &configs); @@ -2275,6 +2288,7 @@ fn do_test_intercepted_payment(test: InterceptTest) { // Check that we generate the PaymentIntercepted event when an intercept forward is detected. let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); + let expected_cltv = nodes[0].best_block_info().1 + TEST_FINAL_CLTV + 1; let (intercept_id, outbound_amt) = match events[0] { crate::events::Event::HTLCIntercepted { intercept_id, @@ -2282,10 +2296,12 @@ fn do_test_intercepted_payment(test: InterceptTest) { payment_hash, inbound_amount_msat, requested_next_hop_scid: short_channel_id, + outgoing_htlc_expiry_block_height, } => { assert_eq!(payment_hash, hash); assert_eq!(inbound_amount_msat, route.get_total_amount() + route.get_total_fees()); assert_eq!(short_channel_id, intercept_scid); + assert_eq!(outgoing_htlc_expiry_block_height.unwrap(), expected_cltv); (intercept_id, expected_outbound_amount_msat) }, _ => panic!(), @@ -2296,7 +2312,7 @@ fn do_test_intercepted_payment(test: InterceptTest) { let unknown_chan_id_err = nodes[1].node.forward_intercepted_htlc(intercept_id, &chan_id, node_c_id, outbound_amt); let err = format!( - "Channel with id {} not found for the passed counterparty node_id {}", + "No such channel_id {} for the passed counterparty_node_id {}", chan_id, node_c_id, ); assert_eq!(unknown_chan_id_err, Err(APIError::ChannelUnavailable { err })); @@ -2309,7 +2325,7 @@ fn do_test_intercepted_payment(test: InterceptTest) { expect_htlc_failure_conditions(nodes[1].node.get_and_clear_pending_events(), &[fail]); nodes[1].node.process_pending_htlc_forwards(); let update_fail = get_htlc_update_msgs(&nodes[1], &node_a_id); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(update_fail.update_fail_htlcs.len() == 1); let fail_msg = update_fail.update_fail_htlcs[0].clone(); nodes[0].node.handle_update_fail_htlc(node_b_id, &fail_msg); @@ -2354,6 +2370,7 @@ fn do_test_intercepted_payment(test: InterceptTest) { assert_eq!(events.len(), 1); SendEvent::from_event(events.remove(0)) }; + assert_eq!(payment_event.msgs[0].cltv_expiry, expected_cltv); nodes[2].node.handle_update_add_htlc(node_b_id, &payment_event.msgs[0]); let commitment = &payment_event.commitment_msg; do_commitment_signed_dance(&nodes[2], &nodes[1], commitment, false, true); @@ -2394,7 +2411,7 @@ fn do_test_intercepted_payment(test: InterceptTest) { let fail_type = HTLCHandlingFailureType::InvalidForward { requested_forward_scid: intercept_scid }; expect_and_process_pending_htlcs_and_htlc_handling_failed(&nodes[1], &[fail_type]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let htlc_fail = get_htlc_update_msgs(&nodes[1], &node_a_id); assert!(htlc_fail.update_add_htlcs.is_empty()); @@ -2435,7 +2452,8 @@ fn do_accept_underpaying_htlcs_config(num_mpp_parts: usize) { let max_in_flight_percent = 10; let mut intercept_forwards_config = test_default_channel_config(); - intercept_forwards_config.accept_intercept_htlcs = true; + intercept_forwards_config.htlc_interception_flags = + HTLCInterceptionFlags::ToInterceptSCIDs as u8; intercept_forwards_config .channel_handshake_config .max_inbound_htlc_value_in_flight_percent_of_channel = max_in_flight_percent; @@ -2490,7 +2508,7 @@ fn do_accept_underpaying_htlcs_config(num_mpp_parts: usize) { let id = PaymentId(payment_hash.0); nodes[0].node.send_payment(payment_hash, onion, id, route_params, Retry::Attempts(0)).unwrap(); - check_added_monitors!(nodes[0], num_mpp_parts); // one monitor per path + check_added_monitors(&nodes[0], num_mpp_parts); // one monitor per path let events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), num_mpp_parts); @@ -2647,7 +2665,7 @@ fn do_automatic_retries(test: AutoRetry) { macro_rules! pass_failed_attempt_with_retry_along_path { ($failing_channel_id: expr, $expect_pending_htlcs_forwardable: expr) => { // Send a payment attempt that fails due to lack of liquidity on the second hop - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_0 = get_htlc_update_msgs(&nodes[0], &node_b_id); let mut update_add = update_0.update_add_htlcs[0].clone(); nodes[1].node.handle_update_add_htlc(node_a_id, &update_add); @@ -2664,7 +2682,7 @@ fn do_automatic_retries(test: AutoRetry) { ); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs(&nodes[1], &node_a_id); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(update_1.update_fail_htlcs.len() == 1); let fail_msg = update_1.update_fail_htlcs[0].clone(); nodes[0].node.handle_update_fail_htlc(node_b_id, &fail_msg); @@ -2710,7 +2728,7 @@ fn do_automatic_retries(test: AutoRetry) { // We retry payments in `process_pending_htlc_forwards` nodes[0].node.process_pending_htlc_forwards(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); @@ -2738,7 +2756,7 @@ fn do_automatic_retries(test: AutoRetry) { // We retry payments in `process_pending_htlc_forwards` nodes[0].node.process_pending_htlc_forwards(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); @@ -2861,7 +2879,9 @@ fn auto_retry_partial_failure() { // Test that we'll retry appropriately on send partial failure and retry partial failure. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -3008,7 +3028,7 @@ fn auto_retry_partial_failure() { } // Pass the first part of the payment along the path. - check_added_monitors!(nodes[0], 1); // only one HTLC actually made it out + check_added_monitors(&nodes[0], 1); // only one HTLC actually made it out let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events(); // Only one HTLC/channel update actually made it out @@ -3017,35 +3037,35 @@ fn auto_retry_partial_failure() { nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_first_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_2nd_htlcs = SendEvent::from_node(&nodes[0]); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_first_cs); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_first_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.handle_update_add_htlc(node_a_id, &as_2nd_htlcs.msgs[0]); nodes[1].node.handle_update_add_htlc(node_a_id, &as_2nd_htlcs.msgs[1]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_2nd_htlcs.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let (bs_second_raa, bs_second_cs) = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_second_cs); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_second_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_htlc_failure_conditions(nodes[1].node.get_and_clear_pending_events(), &[]); nodes[1].node.process_pending_htlc_forwards(); @@ -3058,19 +3078,19 @@ fn auto_retry_partial_failure() { nodes[0].node.handle_update_fulfill_htlc(node_b_id, bs_claim.update_fulfill_htlcs.remove(0)); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_claim.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let (as_third_raa, as_third_cs) = get_revoke_commit_msgs(&nodes[0], &node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_third_raa); - check_added_monitors!(nodes[1], 4); + check_added_monitors(&nodes[1], 4); let mut bs_2nd_claim = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_third_cs); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_third_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); expect_payment_path_successful!(nodes[0]); let bs_second_fulfill_a = bs_2nd_claim.update_fulfill_htlcs.remove(0); @@ -3078,18 +3098,18 @@ fn auto_retry_partial_failure() { nodes[0].node.handle_update_fulfill_htlc(node_b_id, bs_second_fulfill_a); nodes[0].node.handle_update_fulfill_htlc(node_b_id, bs_second_fulfill_b); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_2nd_claim.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let (as_fourth_raa, as_fourth_cs) = get_revoke_commit_msgs(&nodes[0], &node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_fourth_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_fourth_cs); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); if let Event::PaymentPathSuccessful { .. } = events[0] { @@ -3106,7 +3126,9 @@ fn auto_retry_partial_failure() { fn auto_retry_zero_attempts_send_error() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_b_id = nodes[1].node.get_our_node_id(); @@ -3167,7 +3189,7 @@ fn auto_retry_zero_attempts_send_error() { } else { panic!(); } - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); } #[test] @@ -3203,12 +3225,12 @@ fn fails_paying_after_rejected_by_payee() { let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[0].node.send_payment(payment_hash, onion, id, route_params, Retry::Attempts(1)).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let mut payment_event = SendEvent::from_event(events.pop().unwrap()); nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, false, false); expect_and_process_pending_htlcs(&nodes[1], false); expect_payment_claimable!(&nodes[1], payment_hash, payment_secret, amt_msat); @@ -3336,7 +3358,7 @@ fn retry_multi_path_single_failed_payment() { } let htlc_msgs = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(htlc_msgs.len(), 2); - check_added_monitors!(nodes[0], 2); + check_added_monitors(&nodes[0], 2); } #[test] @@ -3417,7 +3439,7 @@ fn immediate_retry_on_failure() { } let htlc_msgs = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(htlc_msgs.len(), 2); - check_added_monitors!(nodes[0], 2); + check_added_monitors(&nodes[0], 2); } #[test] @@ -3541,40 +3563,40 @@ fn no_extra_retries_on_back_to_back_fail() { nodes[0].node.send_payment(payment_hash, onion, id, route_params, Retry::Attempts(1)).unwrap(); let first_htlc = SendEvent::from_node(&nodes[0]); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert_eq!(first_htlc.msgs.len(), 1); nodes[1].node.handle_update_add_htlc(node_a_id, &first_htlc.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &first_htlc.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_first_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let second_htlc = SendEvent::from_node(&nodes[0]); assert_eq!(second_htlc.msgs.len(), 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_first_cs); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_first_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.handle_update_add_htlc(node_a_id, &second_htlc.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &second_htlc.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let (bs_second_raa, bs_second_cs) = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_second_cs); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_second_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_and_process_pending_htlcs(&nodes[1], false); let next_hop_failure = @@ -3631,7 +3653,7 @@ fn no_extra_retries_on_back_to_back_fail() { nodes[0].node.process_pending_htlc_forwards(); let retry_htlc_updates = SendEvent::from_node(&nodes[0]); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[1].node.handle_update_add_htlc(node_a_id, &retry_htlc_updates.msgs[0]); let commitment = &retry_htlc_updates.commitment_msg; @@ -3785,26 +3807,26 @@ fn test_simple_partial_retry() { let id = PaymentId(payment_hash.0); nodes[0].node.send_payment(payment_hash, onion, id, route_params, Retry::Attempts(1)).unwrap(); let first_htlc = SendEvent::from_node(&nodes[0]); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert_eq!(first_htlc.msgs.len(), 1); nodes[1].node.handle_update_add_htlc(node_a_id, &first_htlc.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &first_htlc.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_first_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let second_htlc_updates = SendEvent::from_node(&nodes[0]); assert_eq!(second_htlc_updates.msgs.len(), 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_first_cs); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_first_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.handle_update_add_htlc(node_a_id, &second_htlc_updates.msgs[0]); let commitment = &second_htlc_updates.commitment_msg; @@ -3860,14 +3882,14 @@ fn test_simple_partial_retry() { nodes[0].node.process_pending_htlc_forwards(); let retry_htlc_updates = SendEvent::from_node(&nodes[0]); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[1].node.handle_update_add_htlc(node_a_id, &retry_htlc_updates.msgs[0]); let commitment = &retry_htlc_updates.commitment_msg; do_commitment_signed_dance(&nodes[1], &nodes[0], commitment, false, true); expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_second_forward = get_htlc_update_msgs(&nodes[1], &node_c_id); nodes[2].node.handle_update_add_htlc(node_b_id, &bs_second_forward.update_add_htlcs[0]); @@ -3987,7 +4009,7 @@ fn test_threaded_payment_retries() { let id = PaymentId(payment_hash.0); let retry = Retry::Attempts(0xdeadbeef); nodes[0].node.send_payment(payment_hash, onion, id, route_params.clone(), retry).unwrap(); - check_added_monitors!(nodes[0], 2); + check_added_monitors(&nodes[0], 2); let mut send_msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(send_msg_events.len(), 2); send_msg_events.retain(|msg| { @@ -4086,7 +4108,7 @@ fn test_threaded_payment_retries() { nodes[0].node.process_pending_htlc_forwards(); send_msg_events = nodes[0].node.get_and_clear_pending_msg_events(); - check_added_monitors!(nodes[0], 2); + check_added_monitors(&nodes[0], 2); if cur_time > end_time { break; @@ -4102,7 +4124,9 @@ fn do_no_missing_sent_on_reload(persist_manager_with_payment: bool, at_midpoint: let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let (persist_a, persist_b, persist_c); let (chain_monitor_a, chain_monitor_b, chain_monitor_c); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let (node_a_1, node_a_2, node_a_3); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -4124,14 +4148,14 @@ fn do_no_missing_sent_on_reload(persist_manager_with_payment: bool, at_midpoint: } nodes[1].node.claim_funds(our_payment_preimage); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], our_payment_hash, 1_000_000); if at_midpoint { let mut updates = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fulfill_htlc(node_b_id, updates.update_fulfill_htlcs.remove(0)); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &updates.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); } else { let mut fulfill = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fulfill_htlc(node_b_id, fulfill.update_fulfill_htlcs.remove(0)); @@ -4243,7 +4267,17 @@ fn do_claim_from_closed_chan(fail_payment: bool) { // CLTVs on the paths to different value resulting in a different claim deadline. let chanmon_cfgs = create_chanmon_cfgs(4); let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs( + 4, + &node_cfgs, + &[ + Some(legacy_cfg.clone()), + Some(legacy_cfg.clone()), + Some(legacy_cfg.clone()), + Some(legacy_cfg), + ], + ); let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -4466,7 +4500,7 @@ fn do_test_custom_tlvs(spontaneous: bool, even_tlvs: bool, known_tlvs: bool) { let mut payment_event = SendEvent::from_event(ev); nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); - check_added_monitors!(&nodes[1], 0); + check_added_monitors(&nodes[1], 0); do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, false, false); expect_and_process_pending_htlcs(&nodes[1], false); @@ -4532,11 +4566,11 @@ fn test_retry_custom_tlvs() { let custom_tlvs = vec![((1 << 16) + 1, vec![0x42u8; 16])]; let onion = RecipientOnionFields::secret_only(payment_secret); - let onion = onion.with_custom_tlvs(custom_tlvs.clone()).unwrap(); + let onion = onion.with_custom_tlvs(RecipientCustomTlvs::new(custom_tlvs.clone()).unwrap()); nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone())); nodes[0].node.send_payment(hash, onion, id, route_params.clone(), Retry::Attempts(1)).unwrap(); - check_added_monitors!(nodes[0], 1); // one monitor per path + check_added_monitors(&nodes[0], 1); // one monitor per path // Add the HTLC along the first hop. let htlc_updates = get_htlc_update_msgs(&nodes[0], &node_b_id); @@ -4550,7 +4584,7 @@ fn test_retry_custom_tlvs() { let events = nodes[1].node.get_and_clear_pending_events(); let fail = HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2_id }; expect_htlc_failure_conditions(events, &[fail]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let htlc_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); let msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. } = htlc_updates; @@ -4571,7 +4605,7 @@ fn test_retry_custom_tlvs() { route.route_params = Some(route_params.clone()); nodes[0].router.expect_find_route(route_params, Ok(route)); nodes[0].node.process_pending_htlc_forwards(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let path = &[&nodes[1], &nodes[2]]; @@ -4673,7 +4707,7 @@ fn do_test_custom_tlvs_consistency( .node .test_send_payment_along_path(path_a, &hash, onion, amt_msat, cur_height, id, &None, priv_a) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -4695,7 +4729,7 @@ fn do_test_custom_tlvs_consistency( .node .test_send_payment_along_path(path_b, &hash, onion, amt_msat, cur_height, id, &None, priv_b) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); { let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -4707,14 +4741,14 @@ fn do_test_custom_tlvs_consistency( do_commitment_signed_dance(&nodes[2], &nodes[0], commitment, false, false); expect_and_process_pending_htlcs(&nodes[2], false); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let mut events = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); nodes[3].node.handle_update_add_htlc(node_c_id, &payment_event.msgs[0]); - check_added_monitors!(nodes[3], 0); + check_added_monitors(&nodes[3], 0); do_commitment_signed_dance(&nodes[3], &nodes[2], &payment_event.commitment_msg, true, true); } expect_htlc_failure_conditions(nodes[3].node.get_and_clear_pending_events(), &[]); @@ -4743,7 +4777,7 @@ fn do_test_custom_tlvs_consistency( &nodes[3], &expected_destinations, ); - check_added_monitors!(nodes[3], 1); + check_added_monitors(&nodes[3], 1); let fail_updates_1 = get_htlc_update_msgs(&nodes[3], &node_c_id); nodes[2].node.handle_update_fail_htlc(node_d_id, &fail_updates_1.update_fail_htlcs[0]); @@ -4753,7 +4787,7 @@ fn do_test_custom_tlvs_consistency( let fail = HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_2_3.2 }; expect_and_process_pending_htlcs_and_htlc_handling_failed(&nodes[2], &[fail]); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let fail_updates_2 = get_htlc_update_msgs(&nodes[2], &node_a_id); nodes[0].node.handle_update_fail_htlc(node_c_id, &fail_updates_2.update_fail_htlcs[0]); @@ -4815,7 +4849,7 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { }; let retry = Retry::Attempts(1); nodes[0].node.send_payment(payment_hash, onion, payment_id, route_params, retry).unwrap(); - check_added_monitors!(nodes[0], 2); + check_added_monitors(&nodes[0], 2); let mut send_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(send_events.len(), 2); @@ -4951,7 +4985,6 @@ fn test_htlc_forward_considers_anchor_outputs_value() { // balance to dip below the reserve when considering the value of anchor outputs. let mut config = test_default_channel_config(); config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; - config.manually_accept_inbound_channels = true; config.channel_config.forwarding_fee_base_msat = 0; config.channel_config.forwarding_fee_proportional_millionths = 0; @@ -5009,7 +5042,7 @@ fn test_htlc_forward_considers_anchor_outputs_value() { let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -5072,8 +5105,7 @@ fn peel_payment_onion_custom_tlvs() { let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat); let route = functional_test_utils::get_route(&nodes[0], &route_params).unwrap(); let mut recipient_onion = RecipientOnionFields::spontaneous_empty() - .with_custom_tlvs(vec![(414141, vec![42; 1200])]) - .unwrap(); + .with_custom_tlvs(RecipientCustomTlvs::new(vec![(414141, vec![42; 1200])]).unwrap()); let prng_seed = chanmon_cfgs[0].keys_manager.get_secure_random_bytes(); let session_priv = SecretKey::from_slice(&prng_seed[..]).expect("RNG is busted"); let keysend_preimage = PaymentPreimage([42; 32]); @@ -5103,6 +5135,7 @@ fn peel_payment_onion_custom_tlvs() { onion_routing_packet, blinding_point: None, hold_htlc: None, + accountable: None, }; let peeled_onion = crate::ln::onion_payment::peel_payment_onion( &update_add, @@ -5134,7 +5167,7 @@ fn test_non_strict_forwarding() { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let mut config = test_default_channel_config(); + let mut config = test_legacy_channel_config(); config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100; let configs = [Some(config.clone()), Some(config.clone()), Some(config)]; @@ -5169,7 +5202,7 @@ fn test_non_strict_forwarding() { let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route.clone(), payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); let mut send_event = SendEvent::from_event(msg_events.remove(0)); @@ -5177,7 +5210,7 @@ fn test_non_strict_forwarding() { do_commitment_signed_dance(&nodes[1], &nodes[0], &send_event.commitment_msg, false, false); expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); msg_events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); send_event = SendEvent::from_event(msg_events.remove(0)); @@ -5209,7 +5242,7 @@ fn test_non_strict_forwarding() { let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route.clone(), payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); let mut send_event = SendEvent::from_event(msg_events.remove(0)); @@ -5217,7 +5250,7 @@ fn test_non_strict_forwarding() { do_commitment_signed_dance(&nodes[1], &nodes[0], &send_event.commitment_msg, false, false); expect_and_process_pending_htlcs(&nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let routed_scid = route.paths[0].hops[1].short_channel_id; let routed_chan_id = match routed_scid { scid if scid == chan_update_1.contents.short_channel_id => channel_id_1, @@ -5346,7 +5379,7 @@ fn pay_route_without_params() { let id = PaymentId(hash.0); nodes[0].node.send_payment_with_route(route, hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let node_1_msgs = remove_first_msg_event_to_node(&node_b_id, &mut events); @@ -5396,11 +5429,10 @@ fn max_out_mpp_path() { ..Default::default() }; let invoice = nodes[2].node.create_bolt11_invoice(invoice_params).unwrap(); - let route_params_cfg = crate::routing::router::RouteParametersConfig::default(); + let optional_params = crate::ln::channelmanager::OptionalBolt11PaymentParams::default(); let id = PaymentId([42; 32]); - let retry = Retry::Attempts(0); - nodes[0].node.pay_for_bolt11_invoice(&invoice, id, None, route_params_cfg, retry).unwrap(); + nodes[0].node.pay_for_bolt11_invoice(&invoice, id, None, optional_params).unwrap(); assert!(nodes[0].node.list_recent_payments().len() == 1); check_added_monitors(&nodes[0], 2); // one monitor update per MPP part diff --git a/lightning/src/ln/peer_channel_encryptor.rs b/lightning/src/ln/peer_channel_encryptor.rs index 09b970a9ab2..5554c5a8c19 100644 --- a/lightning/src/ln/peer_channel_encryptor.rs +++ b/lightning/src/ln/peer_channel_encryptor.rs @@ -12,7 +12,9 @@ use crate::prelude::*; use crate::ln::msgs; use crate::ln::msgs::LightningError; use crate::ln::wire; +use crate::ln::wire::Type; use crate::sign::{NodeSigner, Recipient}; +use crate::util::ser::Writeable; use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hashes::{Hash, HashEngine}; @@ -28,8 +30,6 @@ use crate::crypto::chacha20poly1305rfc::ChaCha20Poly1305RFC; use crate::crypto::utils::hkdf_extract_expand_twice; use crate::util::ser::VecWriter; -use core::ops::Deref; - /// Maximum Lightning message data length according to /// [BOLT-8](https://github.com/lightning/bolts/blob/v1.0/08-transport.md#lightning-message-specification) /// and [BOLT-1](https://github.com/lightning/bolts/blob/master/01-messaging.md#lightning-message-format): @@ -50,10 +50,7 @@ const NOISE_H: [u8; 32] = [ 0x4b, 0xb4, 0x20, 0xd8, 0x9d, 0x2a, 0x04, 0x8a, 0x3c, 0x4f, 0x4c, 0x09, 0x2e, 0x37, 0xb6, 0x76, ]; -enum NoiseSecretKey<'a, 'b, NS: Deref> -where - NS::Target: NodeSigner, -{ +enum NoiseSecretKey<'a, 'b, NS: NodeSigner> { InMemory(&'a SecretKey), NodeSigner(&'b NS), } @@ -128,10 +125,7 @@ impl PeerChannelEncryptor { } } - pub fn new_inbound(node_signer: &NS) -> PeerChannelEncryptor - where - NS::Target: NodeSigner, - { + pub fn new_inbound(node_signer: &NS) -> PeerChannelEncryptor { let mut sha = Sha256::engine(); sha.input(&NOISE_H); let our_node_id = node_signer.get_node_id(Recipient::Node).unwrap(); @@ -246,12 +240,9 @@ impl PeerChannelEncryptor { } #[inline] - fn inbound_noise_act<'a, 'b, NS: Deref>( + fn inbound_noise_act<'a, 'b, NS: NodeSigner>( state: &mut BidirectionalNoiseState, act: &[u8], secret_key: NoiseSecretKey<'a, 'b, NS>, - ) -> Result<(PublicKey, [u8; 32]), LightningError> - where - NS::Target: NodeSigner, - { + ) -> Result<(PublicKey, [u8; 32]), LightningError> { assert_eq!(act.len(), 50); if act[0] != 0 { @@ -325,13 +316,10 @@ impl PeerChannelEncryptor { } } - pub fn process_act_one_with_keys( + pub fn process_act_one_with_keys( &mut self, act_one: &[u8], node_signer: &NS, our_ephemeral: SecretKey, secp_ctx: &Secp256k1, - ) -> Result<[u8; 50], LightningError> - where - NS::Target: NodeSigner, - { + ) -> Result<[u8; 50], LightningError> { assert_eq!(act_one.len(), 50); match self.noise_state { @@ -370,12 +358,9 @@ impl PeerChannelEncryptor { } } - pub fn process_act_two( + pub fn process_act_two( &mut self, act_two: &[u8], node_signer: &NS, - ) -> Result<([u8; 66], PublicKey), LightningError> - where - NS::Target: NodeSigner, - { + ) -> Result<([u8; 66], PublicKey), LightningError> { assert_eq!(act_two.len(), 50); let final_hkdf; @@ -565,12 +550,14 @@ impl PeerChannelEncryptor { /// Encrypts the given message, returning the encrypted version. /// panics if the length of `message`, once encoded, is greater than 65535 or if the Noise /// handshake has not finished. - pub fn encrypt_message(&mut self, message: &M) -> Vec { + pub fn encrypt_message(&mut self, message: wire::Message) -> Vec { // Allocate a buffer with 2KB, fitting most common messages. Reserve the first 16+2 bytes // for the 2-byte message type prefix and its MAC. let mut res = VecWriter(Vec::with_capacity(MSG_BUF_ALLOC_SIZE)); res.0.resize(16 + 2, 0); - wire::write(message, &mut res).expect("In-memory messages must never fail to serialize"); + + message.type_id().write(&mut res).expect("In-memory messages must never fail to serialize"); + message.write(&mut res).expect("In-memory messages must never fail to serialize"); self.encrypt_message_with_header_0s(&mut res.0); res.0 diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index c3b490ef31a..759a1e7d887 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -29,7 +29,7 @@ use crate::ln::peer_channel_encryptor::{ }; use crate::ln::types::ChannelId; use crate::ln::wire; -use crate::ln::wire::{Encode, Type}; +use crate::ln::wire::{Encode, Message, Type}; use crate::onion_message::async_payments::{ AsyncPaymentsMessageHandler, HeldHtlcAvailable, OfferPaths, OfferPathsRequest, ReleaseHeldHtlc, ServeStaticInvoice, StaticInvoicePersisted, @@ -59,6 +59,7 @@ use core::convert::Infallible; use core::ops::Deref; use core::sync::atomic::{AtomicBool, AtomicI32, AtomicU32, Ordering}; use core::{cmp, fmt, hash, mem}; + #[cfg(not(c_bindings))] use { crate::chain::chainmonitor::ChainMonitor, @@ -124,6 +125,31 @@ pub trait CustomMessageHandler: wire::CustomMessageReader { fn provided_init_features(&self, their_node_id: PublicKey) -> InitFeatures; } +impl> CustomMessageHandler for C { + fn handle_custom_message( + &self, msg: Self::CustomMessage, sender_node_id: PublicKey, + ) -> Result<(), LightningError> { + self.deref().handle_custom_message(msg, sender_node_id) + } + fn get_and_clear_pending_msg(&self) -> Vec<(PublicKey, Self::CustomMessage)> { + self.deref().get_and_clear_pending_msg() + } + fn peer_disconnected(&self, their_node_id: PublicKey) { + self.deref().peer_disconnected(their_node_id) + } + fn peer_connected( + &self, their_node_id: PublicKey, msg: &Init, inbound: bool, + ) -> Result<(), ()> { + self.deref().peer_connected(their_node_id, msg, inbound) + } + fn provided_node_features(&self) -> NodeFeatures { + self.deref().provided_node_features() + } + fn provided_init_features(&self, their_node_id: PublicKey) -> InitFeatures { + self.deref().provided_init_features(their_node_id) + } +} + /// A dummy struct which implements `RoutingMessageHandler` without storing any routing information /// or doing any processing. You can provide one of these as the route_handler in a MessageHandler. pub struct IgnoringMessageHandler {} @@ -286,13 +312,6 @@ impl OnionMessageContents for Infallible { } } -impl Deref for IgnoringMessageHandler { - type Target = IgnoringMessageHandler; - fn deref(&self) -> &Self { - self - } -} - // Implement Type for Infallible, note that it cannot be constructed, and thus you can never call a // method that takes self for it. impl wire::Type for Infallible { @@ -566,22 +585,14 @@ impl ChannelMessageHandler for ErroringMessageHandler { fn message_received(&self) {} } -impl Deref for ErroringMessageHandler { - type Target = ErroringMessageHandler; - fn deref(&self) -> &Self { - self - } -} - /// Provides references to trait impls which handle different types of messages. -pub struct MessageHandler -where - CM::Target: ChannelMessageHandler, - RM::Target: RoutingMessageHandler, - OM::Target: OnionMessageHandler, - CustomM::Target: CustomMessageHandler, - SM::Target: SendOnlyMessageHandler, -{ +pub struct MessageHandler< + CM: ChannelMessageHandler, + RM: RoutingMessageHandler, + OM: OnionMessageHandler, + CustomM: CustomMessageHandler, + SM: SendOnlyMessageHandler, +> { /// A message handler which handles messages specific to channels. Usually this is just a /// [`ChannelManager`] object or an [`ErroringMessageHandler`]. /// @@ -969,20 +980,13 @@ pub type SimpleRefPeerManager< #[allow(missing_docs)] pub trait APeerManager { type Descriptor: SocketDescriptor; - type CMT: ChannelMessageHandler + ?Sized; - type CM: Deref; - type RMT: RoutingMessageHandler + ?Sized; - type RM: Deref; - type OMT: OnionMessageHandler + ?Sized; - type OM: Deref; - type LT: Logger + ?Sized; - type L: Deref; - type CMHT: CustomMessageHandler + ?Sized; - type CMH: Deref; - type NST: NodeSigner + ?Sized; - type NS: Deref; - type SMT: SendOnlyMessageHandler + ?Sized; - type SM: Deref; + type CM: ChannelMessageHandler; + type RM: RoutingMessageHandler; + type OM: OnionMessageHandler; + type Logger: Logger; + type CMH: CustomMessageHandler; + type NodeSigner: NodeSigner; + type SM: SendOnlyMessageHandler; /// Gets a reference to the underlying [`PeerManager`]. fn as_ref( &self, @@ -991,46 +995,31 @@ pub trait APeerManager { Self::CM, Self::RM, Self::OM, - Self::L, + Self::Logger, Self::CMH, - Self::NS, + Self::NodeSigner, Self::SM, >; } impl< Descriptor: SocketDescriptor, - CM: Deref, - RM: Deref, - OM: Deref, - L: Deref, - CMH: Deref, - NS: Deref, - SM: Deref, + CM: ChannelMessageHandler, + RM: RoutingMessageHandler, + OM: OnionMessageHandler, + L: Logger, + CMH: CustomMessageHandler, + NS: NodeSigner, + SM: SendOnlyMessageHandler, > APeerManager for PeerManager -where - CM::Target: ChannelMessageHandler, - RM::Target: RoutingMessageHandler, - OM::Target: OnionMessageHandler, - L::Target: Logger, - CMH::Target: CustomMessageHandler, - NS::Target: NodeSigner, - SM::Target: SendOnlyMessageHandler, { type Descriptor = Descriptor; - type CMT = ::Target; type CM = CM; - type RMT = ::Target; type RM = RM; - type OMT = ::Target; type OM = OM; - type LT = ::Target; - type L = L; - type CMHT = ::Target; + type Logger = L; type CMH = CMH; - type NST = ::Target; - type NS = NS; - type SMT = ::Target; + type NodeSigner = NS; type SM = SM; fn as_ref(&self) -> &PeerManager { self @@ -1058,22 +1047,14 @@ where /// [`read_event`]: PeerManager::read_event pub struct PeerManager< Descriptor: SocketDescriptor, - CM: Deref, - RM: Deref, - OM: Deref, - L: Deref, - CMH: Deref, - NS: Deref, - SM: Deref, -> where - CM::Target: ChannelMessageHandler, - RM::Target: RoutingMessageHandler, - OM::Target: OnionMessageHandler, - L::Target: Logger, - CMH::Target: CustomMessageHandler, - NS::Target: NodeSigner, - SM::Target: SendOnlyMessageHandler, -{ + CM: ChannelMessageHandler, + RM: RoutingMessageHandler, + OM: OnionMessageHandler, + L: Logger, + CMH: CustomMessageHandler, + NS: NodeSigner, + SM: SendOnlyMessageHandler, +> { message_handler: MessageHandler, /// Connection state for each connected peer - we have an outer read-write lock which is taken /// as read while we're doing processing for a peer and taken write when a peer is being added @@ -1121,7 +1102,7 @@ pub struct PeerManager< } enum LogicalMessage { - FromWire(wire::Message), + FromWire(Message), CommitmentSignedBatch(ChannelId, Vec), } @@ -1142,22 +1123,21 @@ impl From for MessageHandlingError { } } -macro_rules! encode_msg { - ($msg: expr) => {{ - let mut buffer = VecWriter(Vec::with_capacity(MSG_BUF_ALLOC_SIZE)); - wire::write($msg, &mut buffer).unwrap(); - buffer.0 - }}; +fn encode_message(message: wire::Message) -> Vec { + let mut buffer = VecWriter(Vec::with_capacity(MSG_BUF_ALLOC_SIZE)); + message.type_id().write(&mut buffer).expect("In-memory messages must never fail to serialize"); + message.write(&mut buffer).expect("In-memory messages must never fail to serialize"); + buffer.0 } -impl - PeerManager -where - CM::Target: ChannelMessageHandler, - OM::Target: OnionMessageHandler, - L::Target: Logger, - NS::Target: NodeSigner, - SM::Target: SendOnlyMessageHandler, +impl< + Descriptor: SocketDescriptor, + CM: ChannelMessageHandler, + OM: OnionMessageHandler, + L: Logger, + NS: NodeSigner, + SM: SendOnlyMessageHandler, + > PeerManager { /// Constructs a new `PeerManager` with the given `ChannelMessageHandler` and /// `OnionMessageHandler`. No routing message handler is used and network graph messages are @@ -1193,7 +1173,7 @@ where } } -impl +impl PeerManager< Descriptor, ErroringMessageHandler, @@ -1203,10 +1183,7 @@ impl IgnoringMessageHandler, NS, IgnoringMessageHandler, - > where - RM::Target: RoutingMessageHandler, - L::Target: Logger, - NS::Target: NodeSigner, + > { /// Constructs a new `PeerManager` with the given `RoutingMessageHandler`. No channel message /// handler or onion message handler is used and onion and channel messages will be ignored (or @@ -1292,22 +1269,14 @@ fn filter_addresses(ip_address: Option) -> Option impl< Descriptor: SocketDescriptor, - CM: Deref, - RM: Deref, - OM: Deref, - L: Deref, - CMH: Deref, - NS: Deref, - SM: Deref, + CM: ChannelMessageHandler, + RM: RoutingMessageHandler, + OM: OnionMessageHandler, + L: Logger, + CMH: CustomMessageHandler, + NS: NodeSigner, + SM: SendOnlyMessageHandler, > PeerManager -where - CM::Target: ChannelMessageHandler, - RM::Target: RoutingMessageHandler, - OM::Target: OnionMessageHandler, - L::Target: Logger, - CMH::Target: CustomMessageHandler, - NS::Target: NodeSigner, - SM::Target: SendOnlyMessageHandler, { /// Constructs a new `PeerManager` with the given message handlers. /// @@ -1572,7 +1541,8 @@ where if let Some(next_onion_message) = handler.next_onion_message_for_peer(peer_node_id) { - self.enqueue_message(peer, &next_onion_message); + let msg = Message::OnionMessage(next_onion_message); + self.enqueue_message(peer, msg); } } } @@ -1590,16 +1560,20 @@ where if let Some((announce, update_a_option, update_b_option)) = self.message_handler.route_handler.get_next_channel_announcement(c) { - self.enqueue_message(peer, &announce); + peer.sync_status = InitSyncTracker::ChannelsSyncing( + announce.contents.short_channel_id + 1, + ); + let msg = Message::ChannelAnnouncement(announce); + self.enqueue_message(peer, msg); + if let Some(update_a) = update_a_option { - self.enqueue_message(peer, &update_a); + let msg = Message::ChannelUpdate(update_a); + self.enqueue_message(peer, msg); } if let Some(update_b) = update_b_option { - self.enqueue_message(peer, &update_b); + let msg = Message::ChannelUpdate(update_b); + self.enqueue_message(peer, msg); } - peer.sync_status = InitSyncTracker::ChannelsSyncing( - announce.contents.short_channel_id + 1, - ); } else { peer.sync_status = InitSyncTracker::ChannelsSyncing(0xffff_ffff_ffff_ffff); @@ -1608,8 +1582,9 @@ where InitSyncTracker::ChannelsSyncing(c) if c == 0xffff_ffff_ffff_ffff => { let handler = &self.message_handler.route_handler; if let Some(msg) = handler.get_next_node_announcement(None) { - self.enqueue_message(peer, &msg); peer.sync_status = InitSyncTracker::NodesSyncing(msg.contents.node_id); + let msg = Message::NodeAnnouncement(msg); + self.enqueue_message(peer, msg); } else { peer.sync_status = InitSyncTracker::NoSyncRequested; } @@ -1618,8 +1593,9 @@ where InitSyncTracker::NodesSyncing(sync_node_id) => { let handler = &self.message_handler.route_handler; if let Some(msg) = handler.get_next_node_announcement(Some(&sync_node_id)) { - self.enqueue_message(peer, &msg); peer.sync_status = InitSyncTracker::NodesSyncing(msg.contents.node_id); + let msg = Message::NodeAnnouncement(msg); + self.enqueue_message(peer, msg); } else { peer.sync_status = InitSyncTracker::NoSyncRequested; } @@ -1727,7 +1703,7 @@ where } /// Append a message to a peer's pending outbound/write buffer - fn enqueue_message(&self, peer: &mut Peer, message: &M) { + fn enqueue_message(&self, peer: &mut Peer, message: Message) { let their_node_id = peer.their_node_id.map(|p| p.0); if their_node_id.is_some() { let logger = WithContext::from(&self.logger, their_node_id, None, None); @@ -1792,12 +1768,14 @@ where }, msgs::ErrorAction::SendErrorMessage { msg } => { log_debug!(logger, "Error handling message{}; sending error message with: {}", OptionalFromDebugger(&peer_node_id), e.err); - self.enqueue_message($peer, &msg); + let msg = Message::Error(msg); + self.enqueue_message($peer, msg); continue; }, msgs::ErrorAction::SendWarningMessage { msg, log_level } => { log_given_level!(logger, log_level, "Error handling message{}; sending warning message with: {}", OptionalFromDebugger(&peer_node_id), e.err); - self.enqueue_message($peer, &msg); + let msg = Message::Warning(msg); + self.enqueue_message($peer, msg); continue; }, } @@ -1892,7 +1870,8 @@ where peer.their_socket_address.clone(), ), }; - self.enqueue_message(peer, &resp); + let msg = Message::Init(resp); + self.enqueue_message(peer, msg); }, NextNoiseStep::ActThree => { let res = peer @@ -1912,7 +1891,8 @@ where peer.their_socket_address.clone(), ), }; - self.enqueue_message(peer, &resp); + let msg = Message::Init(resp); + self.enqueue_message(peer, msg); }, NextNoiseStep::NoiseComplete => { if peer.pending_read_is_header { @@ -1939,7 +1919,7 @@ where let message_result = wire::read( &mut &peer.pending_read_buffer [..peer.pending_read_buffer.len() - 16], - &*self.message_handler.custom_message_handler, + &self.message_handler.custom_message_handler, ); // Reset read buffer @@ -1972,8 +1952,11 @@ where let channel_id = ChannelId::new_zero(); let data = "Unsupported message compression: zlib" .to_owned(); - let msg = msgs::WarningMessage { channel_id, data }; - self.enqueue_message(peer, &msg); + let msg = Message::Warning(msgs::WarningMessage { + channel_id, + data, + }); + self.enqueue_message(peer, msg); continue; }, (_, Some(ty)) if is_gossip_msg(ty) => { @@ -1983,8 +1966,11 @@ where "Unreadable/bogus gossip message of type {}", ty ); - let msg = msgs::WarningMessage { channel_id, data }; - self.enqueue_message(peer, &msg); + let msg = Message::Warning(msgs::WarningMessage { + channel_id, + data, + }); + self.enqueue_message(peer, msg); continue; }, (msgs::DecodeError::UnknownRequiredFeature, _) => { @@ -2046,7 +2032,7 @@ where for msg in msgs_to_forward.drain(..) { self.forward_broadcast_msg( &*peers, - &msg, + msg, peer_node_id.as_ref().map(|(pk, _)| pk), false, ); @@ -2060,9 +2046,7 @@ where /// Returns the message back if it needs to be broadcasted to all other peers. fn handle_message( &self, peer_mutex: &Mutex, peer_lock: MutexGuard, - message: wire::Message< - <::Target as wire::CustomMessageReader>::CustomMessage, - >, + message: Message, ) -> Result, MessageHandlingError> { let their_node_id = peer_lock .their_node_id @@ -2102,21 +2086,13 @@ where // Returns `None` if the message was fully processed and otherwise returns the message back to // allow it to be subsequently processed by `do_handle_message_without_peer_lock`. fn do_handle_message_holding_peer_lock<'a>( - &self, mut peer_lock: MutexGuard, - message: wire::Message< - <::Target as wire::CustomMessageReader>::CustomMessage, - >, + &self, mut peer_lock: MutexGuard, message: Message, their_node_id: PublicKey, logger: &WithContext<'a, L>, - ) -> Result< - Option< - LogicalMessage<<::Target as wire::CustomMessageReader>::CustomMessage>, - >, - MessageHandlingError, - > { + ) -> Result>, MessageHandlingError> { peer_lock.received_message_since_timer_tick = true; // Need an Init as first message - if let wire::Message::Init(msg) = message { + if let Message::Init(msg) = message { // Check if we have any compatible chains if the `networks` field is specified. if let Some(networks) = &msg.networks { let chan_handler = &self.message_handler.chan_handler; @@ -2225,7 +2201,7 @@ where // During splicing, commitment_signed messages need to be collected into a single batch // before they are handled. - if let wire::Message::StartBatch(msg) = message { + if let Message::StartBatch(msg) = message { if peer_lock.message_batch.is_some() { let error = format!( "Peer {} sent start_batch for channel {} before previous batch completed", @@ -2296,7 +2272,7 @@ where return Ok(None); } - if let wire::Message::CommitmentSigned(msg) = message { + if let Message::CommitmentSigned(msg) = message { if let Some(message_batch) = &mut peer_lock.message_batch { let MessageBatchImpl::CommitmentSigned(ref mut messages) = &mut message_batch.messages; @@ -2325,7 +2301,7 @@ where return Ok(None); } } else { - return Ok(Some(LogicalMessage::FromWire(wire::Message::CommitmentSigned(msg)))); + return Ok(Some(LogicalMessage::FromWire(Message::CommitmentSigned(msg)))); } } else if let Some(message_batch) = &peer_lock.message_batch { match message_batch.messages { @@ -2341,7 +2317,7 @@ where return Err(PeerHandleError {}.into()); } - if let wire::Message::GossipTimestampFilter(_msg) = message { + if let Message::GossipTimestampFilter(_msg) = message { // When supporting gossip messages, start initial gossip sync only after we receive // a GossipTimestampFilter if peer_lock.their_features.as_ref().unwrap().supports_gossip_queries() @@ -2373,7 +2349,7 @@ where return Ok(None); } - if let wire::Message::ChannelAnnouncement(ref _msg) = message { + if let Message::ChannelAnnouncement(ref _msg) = message { peer_lock.received_channel_announce_since_backlogged = true; } @@ -2384,10 +2360,7 @@ where // // Returns the message back if it needs to be broadcasted to all other peers. fn do_handle_message_without_peer_lock<'a>( - &self, peer_mutex: &Mutex, - message: wire::Message< - <::Target as wire::CustomMessageReader>::CustomMessage, - >, + &self, peer_mutex: &Mutex, message: Message, their_node_id: PublicKey, logger: &WithContext<'a, L>, ) -> Result, MessageHandlingError> { if is_gossip_msg(message.type_id()) { @@ -2400,13 +2373,13 @@ where match message { // Setup and Control messages: - wire::Message::Init(_) => { + Message::Init(_) => { // Handled above }, - wire::Message::GossipTimestampFilter(_) => { + Message::GossipTimestampFilter(_) => { // Handled above }, - wire::Message::Error(msg) => { + Message::Error(msg) => { log_debug!( logger, "Got Err message from {}: {}", @@ -2418,149 +2391,150 @@ where return Err(PeerHandleError {}.into()); } }, - wire::Message::Warning(msg) => { + Message::Warning(msg) => { log_debug!(logger, "Got warning message: {}", PrintableString(&msg.data)); }, - wire::Message::Ping(msg) => { + Message::Ping(msg) => { if msg.ponglen < 65532 { let resp = msgs::Pong { byteslen: msg.ponglen }; - self.enqueue_message(&mut *peer_mutex.lock().unwrap(), &resp); + let msg = Message::Pong(resp); + self.enqueue_message(&mut *peer_mutex.lock().unwrap(), msg); } }, - wire::Message::Pong(_msg) => { + Message::Pong(_msg) => { let mut peer_lock = peer_mutex.lock().unwrap(); peer_lock.awaiting_pong_timer_tick_intervals = 0; peer_lock.msgs_sent_since_pong = 0; }, // Channel messages: - wire::Message::StartBatch(_msg) => { + Message::StartBatch(_msg) => { debug_assert!(false); }, - wire::Message::OpenChannel(msg) => { + Message::OpenChannel(msg) => { self.message_handler.chan_handler.handle_open_channel(their_node_id, &msg); }, - wire::Message::OpenChannelV2(_msg) => { + Message::OpenChannelV2(_msg) => { self.message_handler.chan_handler.handle_open_channel_v2(their_node_id, &_msg); }, - wire::Message::AcceptChannel(msg) => { + Message::AcceptChannel(msg) => { self.message_handler.chan_handler.handle_accept_channel(their_node_id, &msg); }, - wire::Message::AcceptChannelV2(msg) => { + Message::AcceptChannelV2(msg) => { self.message_handler.chan_handler.handle_accept_channel_v2(their_node_id, &msg); }, - wire::Message::FundingCreated(msg) => { + Message::FundingCreated(msg) => { self.message_handler.chan_handler.handle_funding_created(their_node_id, &msg); }, - wire::Message::FundingSigned(msg) => { + Message::FundingSigned(msg) => { self.message_handler.chan_handler.handle_funding_signed(their_node_id, &msg); }, - wire::Message::ChannelReady(msg) => { + Message::ChannelReady(msg) => { self.message_handler.chan_handler.handle_channel_ready(their_node_id, &msg); }, - wire::Message::PeerStorage(msg) => { + Message::PeerStorage(msg) => { self.message_handler.chan_handler.handle_peer_storage(their_node_id, msg); }, - wire::Message::PeerStorageRetrieval(msg) => { + Message::PeerStorageRetrieval(msg) => { self.message_handler.chan_handler.handle_peer_storage_retrieval(their_node_id, msg); }, // Quiescence messages: - wire::Message::Stfu(msg) => { + Message::Stfu(msg) => { self.message_handler.chan_handler.handle_stfu(their_node_id, &msg); }, // Splicing messages: - wire::Message::SpliceInit(msg) => { + Message::SpliceInit(msg) => { self.message_handler.chan_handler.handle_splice_init(their_node_id, &msg); }, - wire::Message::SpliceAck(msg) => { + Message::SpliceAck(msg) => { self.message_handler.chan_handler.handle_splice_ack(their_node_id, &msg); }, - wire::Message::SpliceLocked(msg) => { + Message::SpliceLocked(msg) => { self.message_handler.chan_handler.handle_splice_locked(their_node_id, &msg); }, // Interactive transaction construction messages: - wire::Message::TxAddInput(msg) => { + Message::TxAddInput(msg) => { self.message_handler.chan_handler.handle_tx_add_input(their_node_id, &msg); }, - wire::Message::TxAddOutput(msg) => { + Message::TxAddOutput(msg) => { self.message_handler.chan_handler.handle_tx_add_output(their_node_id, &msg); }, - wire::Message::TxRemoveInput(msg) => { + Message::TxRemoveInput(msg) => { self.message_handler.chan_handler.handle_tx_remove_input(their_node_id, &msg); }, - wire::Message::TxRemoveOutput(msg) => { + Message::TxRemoveOutput(msg) => { self.message_handler.chan_handler.handle_tx_remove_output(their_node_id, &msg); }, - wire::Message::TxComplete(msg) => { + Message::TxComplete(msg) => { self.message_handler.chan_handler.handle_tx_complete(their_node_id, &msg); }, - wire::Message::TxSignatures(msg) => { + Message::TxSignatures(msg) => { self.message_handler.chan_handler.handle_tx_signatures(their_node_id, &msg); }, - wire::Message::TxInitRbf(msg) => { + Message::TxInitRbf(msg) => { self.message_handler.chan_handler.handle_tx_init_rbf(their_node_id, &msg); }, - wire::Message::TxAckRbf(msg) => { + Message::TxAckRbf(msg) => { self.message_handler.chan_handler.handle_tx_ack_rbf(their_node_id, &msg); }, - wire::Message::TxAbort(msg) => { + Message::TxAbort(msg) => { self.message_handler.chan_handler.handle_tx_abort(their_node_id, &msg); }, - wire::Message::Shutdown(msg) => { + Message::Shutdown(msg) => { self.message_handler.chan_handler.handle_shutdown(their_node_id, &msg); }, - wire::Message::ClosingSigned(msg) => { + Message::ClosingSigned(msg) => { self.message_handler.chan_handler.handle_closing_signed(their_node_id, &msg); }, #[cfg(simple_close)] - wire::Message::ClosingComplete(msg) => { + Message::ClosingComplete(msg) => { self.message_handler.chan_handler.handle_closing_complete(their_node_id, msg); }, #[cfg(simple_close)] - wire::Message::ClosingSig(msg) => { + Message::ClosingSig(msg) => { self.message_handler.chan_handler.handle_closing_sig(their_node_id, msg); }, // Commitment messages: - wire::Message::UpdateAddHTLC(msg) => { + Message::UpdateAddHTLC(msg) => { self.message_handler.chan_handler.handle_update_add_htlc(their_node_id, &msg); }, - wire::Message::UpdateFulfillHTLC(msg) => { + Message::UpdateFulfillHTLC(msg) => { self.message_handler.chan_handler.handle_update_fulfill_htlc(their_node_id, msg); }, - wire::Message::UpdateFailHTLC(msg) => { + Message::UpdateFailHTLC(msg) => { self.message_handler.chan_handler.handle_update_fail_htlc(their_node_id, &msg); }, - wire::Message::UpdateFailMalformedHTLC(msg) => { + Message::UpdateFailMalformedHTLC(msg) => { let chan_handler = &self.message_handler.chan_handler; chan_handler.handle_update_fail_malformed_htlc(their_node_id, &msg); }, - wire::Message::CommitmentSigned(msg) => { + Message::CommitmentSigned(msg) => { self.message_handler.chan_handler.handle_commitment_signed(their_node_id, &msg); }, - wire::Message::RevokeAndACK(msg) => { + Message::RevokeAndACK(msg) => { self.message_handler.chan_handler.handle_revoke_and_ack(their_node_id, &msg); }, - wire::Message::UpdateFee(msg) => { + Message::UpdateFee(msg) => { self.message_handler.chan_handler.handle_update_fee(their_node_id, &msg); }, - wire::Message::ChannelReestablish(msg) => { + Message::ChannelReestablish(msg) => { self.message_handler.chan_handler.handle_channel_reestablish(their_node_id, &msg); }, // Routing messages: - wire::Message::AnnouncementSignatures(msg) => { + Message::AnnouncementSignatures(msg) => { let chan_handler = &self.message_handler.chan_handler; chan_handler.handle_announcement_signatures(their_node_id, &msg); }, - wire::Message::ChannelAnnouncement(msg) => { + Message::ChannelAnnouncement(msg) => { let route_handler = &self.message_handler.route_handler; if route_handler .handle_channel_announcement(Some(their_node_id), &msg) @@ -2570,7 +2544,7 @@ where } self.update_gossip_backlogged(); }, - wire::Message::NodeAnnouncement(msg) => { + Message::NodeAnnouncement(msg) => { let route_handler = &self.message_handler.route_handler; if route_handler .handle_node_announcement(Some(their_node_id), &msg) @@ -2580,7 +2554,7 @@ where } self.update_gossip_backlogged(); }, - wire::Message::ChannelUpdate(msg) => { + Message::ChannelUpdate(msg) => { let chan_handler = &self.message_handler.chan_handler; chan_handler.handle_channel_update(their_node_id, &msg); @@ -2594,31 +2568,31 @@ where } self.update_gossip_backlogged(); }, - wire::Message::QueryShortChannelIds(msg) => { + Message::QueryShortChannelIds(msg) => { let route_handler = &self.message_handler.route_handler; route_handler.handle_query_short_channel_ids(their_node_id, msg)?; }, - wire::Message::ReplyShortChannelIdsEnd(msg) => { + Message::ReplyShortChannelIdsEnd(msg) => { let route_handler = &self.message_handler.route_handler; route_handler.handle_reply_short_channel_ids_end(their_node_id, msg)?; }, - wire::Message::QueryChannelRange(msg) => { + Message::QueryChannelRange(msg) => { let route_handler = &self.message_handler.route_handler; route_handler.handle_query_channel_range(their_node_id, msg)?; }, - wire::Message::ReplyChannelRange(msg) => { + Message::ReplyChannelRange(msg) => { let route_handler = &self.message_handler.route_handler; route_handler.handle_reply_channel_range(their_node_id, msg)?; }, // Onion message: - wire::Message::OnionMessage(msg) => { + Message::OnionMessage(msg) => { let onion_message_handler = &self.message_handler.onion_message_handler; onion_message_handler.handle_onion_message(their_node_id, &msg); }, // Unknown messages: - wire::Message::Unknown(type_id) if message.is_even() => { + Message::Unknown(type_id) if message.is_even() => { log_debug!( logger, "Received unknown even message of type {}, disconnecting peer!", @@ -2626,10 +2600,10 @@ where ); return Err(PeerHandleError {}.into()); }, - wire::Message::Unknown(type_id) => { + Message::Unknown(type_id) => { log_trace!(logger, "Received unknown odd message of type {}, ignoring", type_id); }, - wire::Message::Custom(custom) => { + Message::Custom(custom) => { let custom_message_handler = &self.message_handler.custom_message_handler; custom_message_handler.handle_custom_message(custom, their_node_id)?; }, @@ -2644,22 +2618,24 @@ where /// unless `allow_large_buffer` is set, in which case the message will be treated as critical /// and delivered no matter the available buffer space. fn forward_broadcast_msg( - &self, peers: &HashMap>, msg: &BroadcastGossipMessage, + &self, peers: &HashMap>, msg: BroadcastGossipMessage, except_node: Option<&PublicKey>, allow_large_buffer: bool, ) { match msg { - BroadcastGossipMessage::ChannelAnnouncement(ref msg) => { + BroadcastGossipMessage::ChannelAnnouncement(msg) => { log_gossip!(self.logger, "Sending message to all peers except {:?} or the announced channel's counterparties: {:?}", except_node, msg); - let encoded_msg = encode_msg!(msg); let our_channel = self.our_node_id == msg.contents.node_id_1 || self.our_node_id == msg.contents.node_id_2; - + let scid = msg.contents.short_channel_id; + let node_id_1 = msg.contents.node_id_1; + let node_id_2 = msg.contents.node_id_2; + let msg: Message = Message::ChannelAnnouncement(msg); + let encoded_msg = encode_message(msg); for (_, peer_mutex) in peers.iter() { let mut peer = peer_mutex.lock().unwrap(); if !peer.handshake_complete() { continue; } - let scid = msg.contents.short_channel_id; if !our_channel && !peer.should_forward_channel_announcement(scid) { continue; } @@ -2676,9 +2652,7 @@ where continue; } if let Some((_, their_node_id)) = peer.their_node_id { - if their_node_id == msg.contents.node_id_1 - || their_node_id == msg.contents.node_id_2 - { + if their_node_id == node_id_1 || their_node_id == node_id_2 { continue; } } @@ -2691,23 +2665,24 @@ where peer.gossip_broadcast_buffer.push_back(encoded_message); } }, - BroadcastGossipMessage::NodeAnnouncement(ref msg) => { + BroadcastGossipMessage::NodeAnnouncement(msg) => { log_gossip!( self.logger, "Sending message to all peers except {:?} or the announced node: {:?}", except_node, msg ); - let encoded_msg = encode_msg!(msg); let our_announcement = self.our_node_id == msg.contents.node_id; + let msg_node_id = msg.contents.node_id; + let msg: Message = Message::NodeAnnouncement(msg); + let encoded_msg = encode_message(msg); for (_, peer_mutex) in peers.iter() { let mut peer = peer_mutex.lock().unwrap(); if !peer.handshake_complete() { continue; } - let node_id = msg.contents.node_id; - if !our_announcement && !peer.should_forward_node_announcement(node_id) { + if !our_announcement && !peer.should_forward_node_announcement(msg_node_id) { continue; } debug_assert!(peer.their_node_id.is_some()); @@ -2723,7 +2698,7 @@ where continue; } if let Some((_, their_node_id)) = peer.their_node_id { - if their_node_id == msg.contents.node_id { + if their_node_id == msg_node_id { continue; } } @@ -2743,15 +2718,15 @@ where except_node, msg ); - let encoded_msg = encode_msg!(msg); - let our_channel = self.our_node_id == *node_id_1 || self.our_node_id == *node_id_2; - + let our_channel = self.our_node_id == node_id_1 || self.our_node_id == node_id_2; + let scid = msg.contents.short_channel_id; + let msg: Message = Message::ChannelUpdate(msg); + let encoded_msg = encode_message(msg); for (_, peer_mutex) in peers.iter() { let mut peer = peer_mutex.lock().unwrap(); if !peer.handshake_complete() { continue; } - let scid = msg.contents.short_channel_id; if !our_channel && !peer.should_forward_channel_announcement(scid) { continue; } @@ -2858,68 +2833,77 @@ where // robustly gossip broadcast events even if a peer's message buffer is full. let mut handle_event = |event, from_chan_handler| { match event { - MessageSendEvent::SendPeerStorage { ref node_id, ref msg } => { + MessageSendEvent::SendPeerStorage { ref node_id, msg } => { log_debug!( WithContext::from(&self.logger, Some(*node_id), None, None), "Handling SendPeerStorage event in peer_handler for {}", node_id, ); + let msg = Message::PeerStorage(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendPeerStorageRetrieval { ref node_id, ref msg } => { + MessageSendEvent::SendPeerStorageRetrieval { ref node_id, msg } => { log_debug!( WithContext::from(&self.logger, Some(*node_id), None, None), "Handling SendPeerStorageRetrieval event in peer_handler for {}", node_id, ); + let msg = Message::PeerStorageRetrieval(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendAcceptChannel { ref node_id, ref msg } => { + MessageSendEvent::SendAcceptChannel { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.common_fields.temporary_channel_id), None), "Handling SendAcceptChannel event in peer_handler for node {} for channel {}", node_id, &msg.common_fields.temporary_channel_id); + let msg = Message::AcceptChannel(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendAcceptChannelV2 { ref node_id, ref msg } => { + MessageSendEvent::SendAcceptChannelV2 { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.common_fields.temporary_channel_id), None), "Handling SendAcceptChannelV2 event in peer_handler for node {} for channel {}", node_id, &msg.common_fields.temporary_channel_id); + let msg = Message::AcceptChannelV2(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendOpenChannel { ref node_id, ref msg } => { + MessageSendEvent::SendOpenChannel { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.common_fields.temporary_channel_id), None), "Handling SendOpenChannel event in peer_handler for node {} for channel {}", node_id, &msg.common_fields.temporary_channel_id); + let msg = Message::OpenChannel(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendOpenChannelV2 { ref node_id, ref msg } => { + MessageSendEvent::SendOpenChannelV2 { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.common_fields.temporary_channel_id), None), "Handling SendOpenChannelV2 event in peer_handler for node {} for channel {}", node_id, &msg.common_fields.temporary_channel_id); + let msg = Message::OpenChannelV2(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendFundingCreated { ref node_id, ref msg } => { + MessageSendEvent::SendFundingCreated { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.temporary_channel_id), None), "Handling SendFundingCreated event in peer_handler for node {} for channel {} (which becomes {})", node_id, &msg.temporary_channel_id, ChannelId::v1_from_funding_txid(msg.funding_txid.as_byte_array(), msg.funding_output_index)); // TODO: If the peer is gone we should generate a DiscardFunding event // indicating to the wallet that they should just throw away this funding transaction + let msg = Message::FundingCreated(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendFundingSigned { ref node_id, ref msg } => { + MessageSendEvent::SendFundingSigned { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendFundingSigned event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::FundingSigned(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendChannelReady { ref node_id, ref msg } => { + MessageSendEvent::SendChannelReady { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendChannelReady event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::ChannelReady(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendStfu { ref node_id, ref msg } => { + MessageSendEvent::SendStfu { ref node_id, msg } => { let logger = WithContext::from( &self.logger, Some(*node_id), @@ -2929,9 +2913,10 @@ where log_debug!(logger, "Handling SendStfu event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::Stfu(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendSpliceInit { ref node_id, ref msg } => { + MessageSendEvent::SendSpliceInit { ref node_id, msg } => { let logger = WithContext::from( &self.logger, Some(*node_id), @@ -2941,9 +2926,10 @@ where log_debug!(logger, "Handling SendSpliceInit event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::SpliceInit(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendSpliceAck { ref node_id, ref msg } => { + MessageSendEvent::SendSpliceAck { ref node_id, msg } => { let logger = WithContext::from( &self.logger, Some(*node_id), @@ -2953,9 +2939,10 @@ where log_debug!(logger, "Handling SendSpliceAck event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::SpliceAck(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendSpliceLocked { ref node_id, ref msg } => { + MessageSendEvent::SendSpliceLocked { ref node_id, msg } => { let logger = WithContext::from( &self.logger, Some(*node_id), @@ -2965,66 +2952,77 @@ where log_debug!(logger, "Handling SendSpliceLocked event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::SpliceLocked(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendTxAddInput { ref node_id, ref msg } => { + MessageSendEvent::SendTxAddInput { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendTxAddInput event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::TxAddInput(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendTxAddOutput { ref node_id, ref msg } => { + MessageSendEvent::SendTxAddOutput { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendTxAddOutput event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::TxAddOutput(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendTxRemoveInput { ref node_id, ref msg } => { + MessageSendEvent::SendTxRemoveInput { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendTxRemoveInput event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::TxRemoveInput(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendTxRemoveOutput { ref node_id, ref msg } => { + MessageSendEvent::SendTxRemoveOutput { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendTxRemoveOutput event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::TxRemoveOutput(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendTxComplete { ref node_id, ref msg } => { + MessageSendEvent::SendTxComplete { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendTxComplete event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::TxComplete(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendTxSignatures { ref node_id, ref msg } => { + MessageSendEvent::SendTxSignatures { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendTxSignatures event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::TxSignatures(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendTxInitRbf { ref node_id, ref msg } => { + MessageSendEvent::SendTxInitRbf { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendTxInitRbf event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::TxInitRbf(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendTxAckRbf { ref node_id, ref msg } => { + MessageSendEvent::SendTxAckRbf { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendTxAckRbf event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::TxAckRbf(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendTxAbort { ref node_id, ref msg } => { + MessageSendEvent::SendTxAbort { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendTxAbort event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::TxAbort(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendAnnouncementSignatures { ref node_id, ref msg } => { + MessageSendEvent::SendAnnouncementSignatures { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendAnnouncementSignatures event in peer_handler for node {} for channel {})", node_id, &msg.channel_id); + let msg = Message::AnnouncementSignatures(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, MessageSendEvent::UpdateHTLCs { @@ -3032,12 +3030,12 @@ where ref channel_id, updates: msgs::CommitmentUpdate { - ref update_add_htlcs, - ref update_fulfill_htlcs, - ref update_fail_htlcs, - ref update_fail_malformed_htlcs, - ref update_fee, - ref commitment_signed, + update_add_htlcs, + update_fulfill_htlcs, + update_fail_htlcs, + update_fail_malformed_htlcs, + update_fee, + commitment_signed, }, } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(*channel_id), None), "Handling UpdateHTLCs event in peer_handler for node {} with {} adds, {} fulfills, {} fails, {} commits for channel {}", @@ -3049,18 +3047,23 @@ where channel_id); let mut peer = get_peer_for_forwarding!(node_id)?; for msg in update_fulfill_htlcs { + let msg = Message::UpdateFulfillHTLC(msg); self.enqueue_message(&mut *peer, msg); } for msg in update_fail_htlcs { + let msg = Message::UpdateFailHTLC(msg); self.enqueue_message(&mut *peer, msg); } for msg in update_fail_malformed_htlcs { + let msg = Message::UpdateFailMalformedHTLC(msg); self.enqueue_message(&mut *peer, msg); } for msg in update_add_htlcs { + let msg = Message::UpdateAddHTLC(msg); self.enqueue_message(&mut *peer, msg); } - if let &Some(ref msg) = update_fee { + if let Some(msg) = update_fee { + let msg = Message::UpdateFee(msg); self.enqueue_message(&mut *peer, msg); } if commitment_signed.len() > 1 { @@ -3069,37 +3072,45 @@ where batch_size: commitment_signed.len() as u16, message_type: Some(msgs::CommitmentSigned::TYPE), }; - self.enqueue_message(&mut *peer, &msg); + let msg = Message::StartBatch(msg); + self.enqueue_message(&mut *peer, msg); } for msg in commitment_signed { + let msg = Message::CommitmentSigned(msg); self.enqueue_message(&mut *peer, msg); } }, - MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { + MessageSendEvent::SendRevokeAndACK { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendRevokeAndACK event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::RevokeAndACK(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => { + MessageSendEvent::SendClosingSigned { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendClosingSigned event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::ClosingSigned(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendClosingComplete { ref node_id, ref msg } => { + #[cfg(simple_close)] + MessageSendEvent::SendClosingComplete { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendClosingComplete event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::ClosingComplete(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendClosingSig { ref node_id, ref msg } => { + #[cfg(simple_close)] + MessageSendEvent::SendClosingSig { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendClosingSig event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::ClosingSig(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendShutdown { ref node_id, ref msg } => { + MessageSendEvent::SendShutdown { ref node_id, msg } => { log_debug!( WithContext::from( &self.logger, @@ -3109,23 +3120,27 @@ where ), "Handling Shutdown event in peer_handler", ); + let msg = Message::Shutdown(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => { + MessageSendEvent::SendChannelReestablish { ref node_id, msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendChannelReestablish event in peer_handler for node {} for channel {}", node_id, &msg.channel_id); + let msg = Message::ChannelReestablish(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, MessageSendEvent::SendChannelAnnouncement { ref node_id, - ref msg, - ref update_msg, + msg, + update_msg, } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), None, None), "Handling SendChannelAnnouncement event in peer_handler for node {} for short channel id {}", node_id, msg.contents.short_channel_id); + let msg = Message::ChannelAnnouncement(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); + let update_msg = Message::ChannelUpdate(update_msg); self.enqueue_message( &mut *get_peer_for_forwarding!(node_id)?, update_msg, @@ -3144,7 +3159,7 @@ where let forward = BroadcastGossipMessage::ChannelAnnouncement(msg); self.forward_broadcast_msg( peers, - &forward, + forward, None, from_chan_handler, ); @@ -3165,7 +3180,7 @@ where }; self.forward_broadcast_msg( peers, - &forward, + forward, None, from_chan_handler, ); @@ -3189,7 +3204,7 @@ where }; self.forward_broadcast_msg( peers, - &forward, + forward, None, from_chan_handler, ); @@ -3208,7 +3223,7 @@ where let forward = BroadcastGossipMessage::NodeAnnouncement(msg); self.forward_broadcast_msg( peers, - &forward, + forward, None, from_chan_handler, ); @@ -3216,12 +3231,13 @@ where _ => {}, } }, - MessageSendEvent::SendChannelUpdate { ref node_id, ref msg } => { + MessageSendEvent::SendChannelUpdate { ref node_id, msg } => { log_trace!( WithContext::from(&self.logger, Some(*node_id), None, None), "Handling SendChannelUpdate event in peer_handler for channel {}", msg.contents.short_channel_id ); + let msg = Message::ChannelUpdate(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, MessageSendEvent::HandleError { node_id, action } => { @@ -3238,9 +3254,8 @@ where // We do not have the peers write lock, so we just store that we're // about to disconnect the peer and do it after we finish // processing most messages. - let msg = msg.map(|msg| { - wire::Message::<<::Target as wire::CustomMessageReader>::CustomMessage>::Error(msg) - }); + let msg = + msg.map(|msg| Message::::Error(msg)); peers_to_disconnect.insert(node_id, msg); }, msgs::ErrorAction::DisconnectPeerWithWarning { msg } => { @@ -3250,7 +3265,7 @@ where // about to disconnect the peer and do it after we finish // processing most messages. peers_to_disconnect - .insert(node_id, Some(wire::Message::Warning(msg))); + .insert(node_id, Some(Message::Warning(msg))); }, msgs::ErrorAction::IgnoreAndLog(level) => { log_given_level!( @@ -3266,22 +3281,21 @@ where "Received a HandleError event to be ignored", ); }, - msgs::ErrorAction::SendErrorMessage { ref msg } => { + msgs::ErrorAction::SendErrorMessage { msg } => { log_trace!(logger, "Handling SendErrorMessage HandleError event in peer_handler with message {}", msg.data); + let msg = Message::Error(msg); self.enqueue_message( &mut *get_peer_for_forwarding!(&node_id)?, msg, ); }, - msgs::ErrorAction::SendWarningMessage { - ref msg, - ref log_level, - } => { + msgs::ErrorAction::SendWarningMessage { msg, ref log_level } => { log_given_level!(logger, *log_level, "Handling SendWarningMessage HandleError event in peer_handler with message {}", msg.data); + let msg = Message::Warning(msg); self.enqueue_message( &mut *get_peer_for_forwarding!(&node_id)?, msg, @@ -3289,33 +3303,37 @@ where }, } }, - MessageSendEvent::SendChannelRangeQuery { ref node_id, ref msg } => { + MessageSendEvent::SendChannelRangeQuery { ref node_id, msg } => { log_gossip!(WithContext::from(&self.logger, Some(*node_id), None, None), "Handling SendChannelRangeQuery event in peer_handler with first_blocknum={}, number_of_blocks={}", msg.first_blocknum, msg.number_of_blocks); + let msg = Message::QueryChannelRange(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendShortIdsQuery { ref node_id, ref msg } => { + MessageSendEvent::SendShortIdsQuery { ref node_id, msg } => { log_gossip!(WithContext::from(&self.logger, Some(*node_id), None, None), "Handling SendShortIdsQuery event in peer_handler with num_scids={}", msg.short_channel_ids.len()); + let msg = Message::QueryShortChannelIds(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendReplyChannelRange { ref node_id, ref msg } => { + MessageSendEvent::SendReplyChannelRange { ref node_id, msg } => { log_gossip!(WithContext::from(&self.logger, Some(*node_id), None, None), "Handling SendReplyChannelRange event in peer_handler with num_scids={} first_blocknum={} number_of_blocks={}, sync_complete={}", msg.short_channel_ids.len(), msg.first_blocknum, msg.number_of_blocks, msg.sync_complete); + let msg = Message::ReplyChannelRange(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, - MessageSendEvent::SendGossipTimestampFilter { ref node_id, ref msg } => { + MessageSendEvent::SendGossipTimestampFilter { ref node_id, msg } => { log_gossip!(WithContext::from(&self.logger, Some(*node_id), None, None), "Handling SendGossipTimestampFilter event in peer_handler with first_timestamp={}, timestamp_range={}", msg.first_timestamp, msg.timestamp_range); + let msg = Message::GossipTimestampFilter(msg); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, } @@ -3351,7 +3369,8 @@ where } else { continue; }; - self.enqueue_message(&mut peer, &msg); + let msg = Message::Custom(msg); + self.enqueue_message(&mut peer, msg); } for (descriptor, peer_mutex) in peers.iter() { @@ -3381,7 +3400,7 @@ where if let Some(peer_mutex) = peers.remove(&descriptor) { let mut peer = peer_mutex.lock().unwrap(); if let Some(msg) = msg { - self.enqueue_message(&mut *peer, &msg); + self.enqueue_message(&mut *peer, msg); // This isn't guaranteed to work, but if there is enough free // room in the send buffer, put the error message there... self.do_attempt_write_data(&mut descriptor, &mut *peer, false); @@ -3506,7 +3525,8 @@ where if peer.awaiting_pong_timer_tick_intervals == 0 { peer.awaiting_pong_timer_tick_intervals = -1; let ping = msgs::Ping { ponglen: 0, byteslen: 64 }; - self.enqueue_message(peer, &ping); + let msg: Message = Message::Ping(ping); + self.enqueue_message(peer, msg); } } @@ -3577,7 +3597,8 @@ where peer.awaiting_pong_timer_tick_intervals = 1; let ping = msgs::Ping { ponglen: 0, byteslen: 64 }; - self.enqueue_message(&mut *peer, &ping); + let msg = Message::Ping(ping); + self.enqueue_message(&mut *peer, msg); break; } self.do_attempt_write_data( @@ -3677,7 +3698,7 @@ where let _ = self.message_handler.route_handler.handle_node_announcement(None, &msg); self.forward_broadcast_msg( &*self.peers.read().unwrap(), - &BroadcastGossipMessage::NodeAnnouncement(msg), + BroadcastGossipMessage::NodeAnnouncement(msg), None, true, ); @@ -4226,7 +4247,7 @@ mod tests { .push(MessageSendEvent::SendShutdown { node_id: their_id, msg: msg.clone() }); peers[0].message_handler.chan_handler = &a_chan_handler; - b_chan_handler.expect_receive_msg(wire::Message::Shutdown(msg)); + b_chan_handler.expect_receive_msg(Message::Shutdown(msg)); peers[1].message_handler.chan_handler = &b_chan_handler; peers[0].process_events(); @@ -4261,7 +4282,8 @@ mod tests { peers[0].read_event(&mut fd_dup, &act_three).unwrap(); let not_init_msg = msgs::Ping { ponglen: 4, byteslen: 0 }; - let msg_bytes = dup_encryptor.encrypt_message(¬_init_msg); + let msg: Message<()> = Message::Ping(not_init_msg); + let msg_bytes = dup_encryptor.encrypt_message(msg); assert!(peers[0].read_event(&mut fd_dup, &msg_bytes).is_err()); } @@ -4491,7 +4513,8 @@ mod tests { assert_eq!(peer.gossip_broadcast_buffer.len(), 1); let pending_msg = &peer.gossip_broadcast_buffer[0]; - let expected = encode_msg!(&msg_100); + let msg: Message<()> = Message::ChannelUpdate(msg_100); + let expected = encode_message(msg); assert_eq!(expected, pending_msg.fetch_encoded_msg_with_type_pfx()); } } @@ -4639,13 +4662,12 @@ mod tests { { let peers = peer_a.peers.read().unwrap(); let mut peer_b = peers.get(&fd_a).unwrap().lock().unwrap(); - peer_a.enqueue_message( - &mut peer_b, - &msgs::WarningMessage { - channel_id: ChannelId([0; 32]), - data: "no disconnect plz".to_string(), - }, - ); + let warning = msgs::WarningMessage { + channel_id: ChannelId([0; 32]), + data: "no disconnect plz".to_string(), + }; + let msg = Message::Warning(warning); + peer_a.enqueue_message(&mut peer_b, msg); } peer_a.process_events(); let msg = fd_a.outbound_data.lock().unwrap().split_off(0); diff --git a/lightning/src/ln/priv_short_conf_tests.rs b/lightning/src/ln/priv_short_conf_tests.rs index ea34e88f619..a5ccac780f9 100644 --- a/lightning/src/ln/priv_short_conf_tests.rs +++ b/lightning/src/ln/priv_short_conf_tests.rs @@ -12,13 +12,15 @@ //! LSP). use crate::chain::ChannelMonitorUpdateStatus; -use crate::events::{ClosureReason, Event, HTLCHandlingFailureType}; -use crate::ln::channelmanager::{PaymentId, RecipientOnionFields, MIN_CLTV_EXPIRY_DELTA}; +use crate::events::{ClosureReason, Event, HTLCHandlingFailureType, PaymentFailureReason}; +use crate::ln::channel::CHANNEL_ANNOUNCEMENT_PROPAGATION_DELAY; +use crate::ln::channelmanager::{PaymentId, MIN_CLTV_EXPIRY_DELTA}; use crate::ln::msgs; use crate::ln::msgs::{ BaseMessageHandler, ChannelMessageHandler, ErrorAction, MessageSendEvent, RoutingMessageHandler, }; use crate::ln::onion_utils::LocalHTLCFailureReason; +use crate::ln::outbound_payment::RecipientOnionFields; use crate::ln::types::ChannelId; use crate::routing::gossip::RoutingFees; use crate::routing::router::{PaymentParameters, RouteHint, RouteHintHop}; @@ -82,7 +84,7 @@ fn test_priv_forwarding_rejection() { let onion = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route.clone(), our_payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let payment_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); @@ -165,7 +167,7 @@ fn test_priv_forwarding_rejection() { let onion = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); pass_along_route( &nodes[0], &[&[&nodes[1], &nodes[2]]], @@ -349,7 +351,7 @@ fn test_routed_scid_alias() { let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], 100_000, payment_hash, payment_secret); @@ -446,7 +448,7 @@ fn test_scid_privacy_negotiation() { .as_ref() .unwrap() .supports_scid_privacy()); - nodes[1].node.handle_open_channel(node_a_id, &second_open_channel); + handle_and_accept_open_channel(&nodes[1], node_a_id, &second_open_channel); nodes[0].node.handle_accept_channel( node_b_id, &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id), @@ -499,7 +501,7 @@ fn test_inbound_scid_privacy() { assert!(open_channel.common_fields.channel_type.as_ref().unwrap().requires_scid_privacy()); - nodes[2].node.handle_open_channel(node_b_id, &open_channel); + handle_and_accept_open_channel(&nodes[2], node_b_id, &open_channel); let accept_channel = get_event_msg!(nodes[2], MessageSendEvent::SendAcceptChannel, node_b_id); nodes[1].node.handle_accept_channel(node_c_id, &accept_channel); @@ -513,7 +515,7 @@ fn test_inbound_scid_privacy() { node_b_id, &get_event_msg!(nodes[1], MessageSendEvent::SendFundingCreated, node_c_id), ); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let cs_funding_signed = get_event_msg!(nodes[2], MessageSendEvent::SendFundingSigned, node_b_id); @@ -521,7 +523,7 @@ fn test_inbound_scid_privacy() { nodes[1].node.handle_funding_signed(node_c_id, &cs_funding_signed); expect_channel_pending_event(&nodes[1], &node_c_id); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let conf_height = core::cmp::max(nodes[1].best_block_info().1 + 1, nodes[2].best_block_info().1 + 1); @@ -579,7 +581,7 @@ fn test_inbound_scid_privacy() { let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], 100_000, payment_hash, payment_secret); claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); @@ -600,7 +602,7 @@ fn test_inbound_scid_privacy() { let onion = RecipientOnionFields::secret_only(payment_secret_2); let id = PaymentId(payment_hash_2.0); nodes[0].node.send_payment_with_route(route_2, payment_hash_2, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let payment_event = SendEvent::from_node(&nodes[0]); assert_eq!(node_b_id, payment_event.node_id); @@ -697,7 +699,7 @@ fn test_scid_alias_returned() { let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route.clone(), payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_updates = get_htlc_update_msgs(&nodes[0], &node_b_id); nodes[1].node.handle_update_add_htlc(node_a_id, &as_updates.update_add_htlcs[0]); do_commitment_signed_dance(&nodes[1], &nodes[0], &as_updates.commitment_signed, false, true); @@ -709,7 +711,7 @@ fn test_scid_alias_returned() { channel_id: chan.0.channel_id, }]; expect_htlc_failure_conditions(events, &expected_failures); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fail_htlc(node_b_id, &bs_updates.update_fail_htlcs[0]); @@ -734,7 +736,7 @@ fn test_scid_alias_returned() { let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_updates = get_htlc_update_msgs(&nodes[0], &node_b_id); nodes[1].node.handle_update_add_htlc(node_a_id, &as_updates.update_add_htlcs[0]); do_commitment_signed_dance(&nodes[1], &nodes[0], &as_updates.commitment_signed, false, true); @@ -779,7 +781,6 @@ fn test_simple_0conf_channel() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let mut chan_config = test_default_channel_config(); - chan_config.manually_accept_inbound_channels = true; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(chan_config)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -798,7 +799,6 @@ fn test_0conf_channel_with_async_monitor() { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let mut chan_config = test_default_channel_config(); - chan_config.manually_accept_inbound_channels = true; let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(chan_config.clone()), None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); @@ -844,7 +844,7 @@ fn test_0conf_channel_with_async_monitor() { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.handle_funding_created(node_a_id, &funding_created); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); let channel_id = ChannelId::v1_from_funding_outpoint(funding_output); @@ -859,7 +859,7 @@ fn test_0conf_channel_with_async_monitor() { MessageSendEvent::SendFundingSigned { node_id, msg } => { assert_eq!(*node_id, node_a_id); nodes[0].node.handle_funding_signed(node_b_id, &msg); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); }, _ => panic!("Unexpected event"), } @@ -937,26 +937,26 @@ fn test_0conf_channel_with_async_monitor() { let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_send = SendEvent::from_node(&nodes[0]); nodes[1].node.handle_update_add_htlc(node_a_id, &as_send.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_send.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let (bs_raa, bs_commitment_signed) = get_revoke_commit_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.handle_revoke_and_ack( node_a_id, &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id), ); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); @@ -970,10 +970,10 @@ fn test_0conf_channel_with_async_monitor() { .chain_monitor .channel_monitor_updated(bs_raa.channel_id, latest_update) .unwrap(); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); expect_and_process_pending_htlcs(&nodes[1], false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_send = SendEvent::from_node(&nodes[1]); nodes[2].node.handle_update_add_htlc(node_b_id, &bs_send.msgs[0]); @@ -995,7 +995,6 @@ fn test_0conf_close_no_early_chan_update() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let mut chan_config = test_default_channel_config(); - chan_config.manually_accept_inbound_channels = true; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(chan_config.clone())]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -1010,7 +1009,7 @@ fn test_0conf_close_no_early_chan_update() { send_payment(&nodes[0], &[&nodes[1]], 100_000); nodes[0].node.force_close_all_channels_broadcasting_latest_txn(message.clone()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, reason, &[node_b_id], 100000); let _ = get_err_msg(&nodes[0], &node_b_id); @@ -1022,7 +1021,6 @@ fn test_public_0conf_channel() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let mut chan_config = test_default_channel_config(); - chan_config.manually_accept_inbound_channels = true; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(chan_config.clone())]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -1078,66 +1076,232 @@ fn test_public_0conf_channel() { #[test] fn test_0conf_channel_reorg() { // If we accept a 0conf channel, which is then confirmed, but then changes SCID in a reorg, we - // have to make sure we handle this correctly (or, currently, just force-close the channel). + // have to ensure we still accept relays to the previous SCID, at least for some time, as well + // as send a fresh channel announcement. - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let mut chan_config = test_default_channel_config(); - chan_config.manually_accept_inbound_channels = true; - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(chan_config.clone())]); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let node_a_id = nodes[0].node.get_our_node_id(); + let node_chanmgrs = + create_node_chanmgrs(3, &node_cfgs, &[None, None, Some(chan_config.clone())]); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + + create_chan_between_nodes(&nodes[0], &nodes[1]); + + // Make sure all nodes are at the same starting height + connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1); + connect_blocks(&nodes[1], CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1); + connect_blocks(&nodes[2], CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1); // This is the default but we force it on anyway chan_config.channel_handshake_config.announce_for_forwarding = true; - let (tx, ..) = open_zero_conf_channel(&nodes[0], &nodes[1], Some(chan_config)); + let (tx, ..) = open_zero_conf_channel(&nodes[1], &nodes[2], Some(chan_config)); // We can use the channel immediately, but we can't announce it until we get 6+ confirmations - send_payment(&nodes[0], &[&nodes[1]], 100_000); + send_payment(&nodes[1], &[&nodes[2]], 100_000); - mine_transaction(&nodes[0], &tx); mine_transaction(&nodes[1], &tx); + mine_transaction(&nodes[2], &tx); // Send a payment using the channel's real SCID, which will be public in a few blocks once we // can generate a channel_announcement. - let real_scid = nodes[0].node.list_usable_channels()[0].short_channel_id.unwrap(); - assert_eq!(nodes[1].node.list_usable_channels()[0].short_channel_id.unwrap(), real_scid); + let bs_chans = nodes[1].node.list_usable_channels(); + let bs_chan = bs_chans.iter().find(|chan| chan.counterparty.node_id == node_c_id).unwrap(); + let original_scid = bs_chan.short_channel_id.unwrap(); + assert_eq!(nodes[2].node.list_usable_channels()[0].short_channel_id.unwrap(), original_scid); let (mut route, payment_hash, payment_preimage, payment_secret) = - get_route_and_payment_hash!(nodes[0], nodes[1], 10_000); - assert_eq!(route.paths[0].hops[0].short_channel_id, real_scid); + get_route_and_payment_hash!(nodes[1], nodes[2], 10_000); + assert_eq!(route.paths[0].hops[0].short_channel_id, original_scid); + send_along_route_with_secret( + &nodes[1], + route.clone(), + &[&[&nodes[2]]], + 10_000, + payment_hash, + payment_secret, + ); + claim_payment(&nodes[1], &[&nodes[2]], payment_preimage); + + // Check that we can forward a payment over the channel's SCID as well (i.e. as if node C + // generated an invoice with a route hint through the 0-conf channel). + let mut forwarded_route = route.clone(); + let (ab_route, ..) = get_route_and_payment_hash!(nodes[0], nodes[1], 10_000); + forwarded_route.paths[0].hops.insert(0, ab_route.paths[0].hops[0].clone()); + forwarded_route.paths[0].hops[0].fee_msat = 1000; + forwarded_route.paths[0].hops[0].cltv_expiry_delta = MIN_CLTV_EXPIRY_DELTA.into(); send_along_route_with_secret( &nodes[0], - route, - &[&[&nodes[1]]], + forwarded_route.clone(), + &[&[&nodes[1], &nodes[2]]], 10_000, payment_hash, payment_secret, ); - claim_payment(&nodes[0], &[&nodes[1]], payment_preimage); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); - disconnect_blocks(&nodes[0], 1); + // Now disconnect blocks, checking that the SCID was wiped but that it still works both for a + // forwarded HTLC and a directly-sent one. disconnect_blocks(&nodes[1], 1); + disconnect_blocks(&nodes[2], 1); - // At this point the channel no longer has an SCID again. In the future we should likely - // support simply un-setting the SCID and waiting until the channel gets re-confirmed, but for - // now we force-close the channel here. - let reason = ClosureReason::ProcessingError { - err: "Funding transaction was un-confirmed. Locked at 0 confs, now have 0 confs." - .to_owned(), - }; - check_closed_event(&nodes[0], 1, reason, &[node_b_id], 100000); - check_closed_broadcast!(nodes[0], true); + let bs_chans = nodes[1].node.list_usable_channels(); + let bs_chan = bs_chans.iter().find(|chan| chan.counterparty.node_id == node_c_id).unwrap(); + assert!(bs_chan.short_channel_id.is_none()); + assert!(nodes[2].node.list_usable_channels()[0].short_channel_id.is_none()); + + send_along_route_with_secret( + &nodes[1], + route.clone(), + &[&[&nodes[2]]], + 10_000, + payment_hash, + payment_secret, + ); + claim_payment(&nodes[1], &[&nodes[2]], payment_preimage); + + send_along_route_with_secret( + &nodes[0], + forwarded_route.clone(), + &[&[&nodes[1], &nodes[2]]], + 10_000, + payment_hash, + payment_secret, + ); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); + + // Finally, connect an extra block then re-mine the funding tx, giving the channel a new SCID. + connect_blocks(&nodes[1], 1); + connect_blocks(&nodes[2], 1); + + mine_transaction(&nodes[1], &tx); + mine_transaction(&nodes[2], &tx); + + let bs_chans = nodes[1].node.list_usable_channels(); + let bs_chan = bs_chans.iter().find(|chan| chan.counterparty.node_id == node_c_id).unwrap(); + let new_scid = bs_chan.short_channel_id.unwrap(); + assert_ne!(original_scid, new_scid); + assert_eq!(nodes[2].node.list_usable_channels()[0].short_channel_id.unwrap(), new_scid); + + // At this point, the channel should happily forward or send payments with either the old SCID + // or the new SCID... + send_along_route_with_secret( + &nodes[1], + route.clone(), + &[&[&nodes[2]]], + 10_000, + payment_hash, + payment_secret, + ); + claim_payment(&nodes[1], &[&nodes[2]], payment_preimage); + + send_along_route_with_secret( + &nodes[0], + forwarded_route.clone(), + &[&[&nodes[1], &nodes[2]]], + 10_000, + payment_hash, + payment_secret, + ); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); + + let mut new_scid_route = route.clone(); + new_scid_route.paths[0].hops[0].short_channel_id = new_scid; + send_along_route_with_secret( + &nodes[1], + new_scid_route.clone(), + &[&[&nodes[2]]], + 10_000, + payment_hash, + payment_secret, + ); + claim_payment(&nodes[1], &[&nodes[2]], payment_preimage); + + let mut new_scid_forwarded_route = forwarded_route.clone(); + new_scid_forwarded_route.paths[0].hops[1].short_channel_id = new_scid; + send_along_route_with_secret( + &nodes[0], + new_scid_forwarded_route.clone(), + &[&[&nodes[1], &nodes[2]]], + 10_000, + payment_hash, + payment_secret, + ); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); + + // However after CHANNEL_ANNOUNCEMENT_PROPAGATION_DELAY blocks, the old SCID should be removed + // and will no longer work for sent or forwarded payments (but the new one still will). + connect_blocks(&nodes[1], 5); + let bs_announcement_sigs = + get_event_msg!(nodes[1], MessageSendEvent::SendAnnouncementSignatures, node_c_id); + + connect_blocks(&nodes[2], 5); + let cs_announcement_sigs = + get_event_msg!(nodes[2], MessageSendEvent::SendAnnouncementSignatures, node_b_id); + + nodes[2].node.handle_announcement_signatures(node_b_id, &bs_announcement_sigs); + let cs_broadcast = nodes[2].node.get_and_clear_pending_msg_events(); + assert_eq!(cs_broadcast.len(), 1); + if let MessageSendEvent::BroadcastChannelAnnouncement { .. } = cs_broadcast[0] { + } else { + panic!("Expected broadcast"); + } + + nodes[1].node.handle_announcement_signatures(node_c_id, &cs_announcement_sigs); + let bs_broadcast = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(bs_broadcast.len(), 1); + if let MessageSendEvent::BroadcastChannelAnnouncement { .. } = bs_broadcast[0] { + } else { + panic!("Expected broadcast"); + } + + connect_blocks(&nodes[0], CHANNEL_ANNOUNCEMENT_PROPAGATION_DELAY); + connect_blocks(&nodes[1], CHANNEL_ANNOUNCEMENT_PROPAGATION_DELAY - 5); + connect_blocks(&nodes[2], CHANNEL_ANNOUNCEMENT_PROPAGATION_DELAY - 5); + + send_along_route_with_secret( + &nodes[1], + new_scid_route, + &[&[&nodes[2]]], + 10_000, + payment_hash, + payment_secret, + ); + claim_payment(&nodes[1], &[&nodes[2]], payment_preimage); + + send_along_route_with_secret( + &nodes[0], + new_scid_forwarded_route, + &[&[&nodes[1], &nodes[2]]], + 10_000, + payment_hash, + payment_secret, + ); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); + + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId([0; 32]); + nodes[1].node.send_payment_with_route(route, payment_hash, onion.clone(), id).unwrap(); + let mut conditions = PaymentFailedConditions::new(); + conditions.reason = Some(PaymentFailureReason::RouteNotFound); + expect_payment_failed_conditions(&nodes[1], payment_hash, false, conditions); + + nodes[0].node.send_payment_with_route(forwarded_route, payment_hash, onion, id).unwrap(); check_added_monitors(&nodes[0], 1); - let reason = ClosureReason::ProcessingError { - err: "Funding transaction was un-confirmed. Locked at 0 confs, now have 0 confs." - .to_owned(), - }; - check_closed_event(&nodes[1], 1, reason, &[node_a_id], 100000); - check_closed_broadcast!(nodes[1], true); - check_added_monitors(&nodes[1], 1); + let mut ev = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(ev.len(), 1); + let ev = ev.pop().unwrap(); + let path = &[&nodes[1]]; + let failure = HTLCHandlingFailureType::InvalidForward { requested_forward_scid: original_scid }; + let args = + PassAlongPathArgs::new(&nodes[0], path, 10_000, payment_hash, ev).expect_failure(failure); + do_pass_along_path(args); + fail_payment_along_path(&[&nodes[0], &nodes[1]]); + expect_payment_failed!(nodes[0], payment_hash, false); } #[test] @@ -1145,7 +1309,7 @@ fn test_zero_conf_accept_reject() { let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key(); channel_type_features.set_zero_conf_required(); - // 1. Check we reject zero conf channels by default + // Check we can accept zero conf channels via the right method let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -1153,6 +1317,7 @@ fn test_zero_conf_accept_reject() { let node_a_id = nodes[0].node.get_our_node_id(); let node_b_id = nodes[1].node.get_our_node_id(); + // 1. First try the non-0conf method to manually accept nodes[0].node.create_channel(node_b_id, 100000, 10001, 42, None, None).unwrap(); let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); @@ -1161,41 +1326,6 @@ fn test_zero_conf_accept_reject() { nodes[1].node.handle_open_channel(node_a_id, &open_channel_msg); - let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); - match msg_events[0] { - MessageSendEvent::HandleError { - action: ErrorAction::SendErrorMessage { ref msg, .. }, - .. - } => { - assert_eq!(msg.data, "No zero confirmation channels accepted".to_owned()); - }, - _ => panic!(), - } - - // 2. Check we can manually accept zero conf channels via the right method - let mut manually_accept_conf = UserConfig::default(); - manually_accept_conf.manually_accept_inbound_channels = true; - - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = - create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let node_a_id = nodes[0].node.get_our_node_id(); - let node_b_id = nodes[1].node.get_our_node_id(); - - // 2.1 First try the non-0conf method to manually accept - nodes[0] - .node - .create_channel(node_b_id, 100000, 10001, 42, None, Some(manually_accept_conf.clone())) - .unwrap(); - let mut open_channel_msg = - get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); - - open_channel_msg.common_fields.channel_type = Some(channel_type_features.clone()); - - nodes[1].node.handle_open_channel(node_a_id, &open_channel_msg); - // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in the `msg_events`. assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1223,11 +1353,8 @@ fn test_zero_conf_accept_reject() { _ => panic!(), } - // 2.2 Try again with the 0conf method to manually accept - nodes[0] - .node - .create_channel(node_b_id, 100000, 10001, 42, None, Some(manually_accept_conf)) - .unwrap(); + // 2. Try again with the 0conf method to manually accept + nodes[0].node.create_channel(node_b_id, 100000, 10001, 42, None, None).unwrap(); let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); @@ -1269,10 +1396,7 @@ fn test_connect_before_funding() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let mut manually_accept_conf = test_default_channel_config(); - manually_accept_conf.manually_accept_inbound_channels = true; - - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf)]); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); let node_b_id = nodes[1].node.get_our_node_id(); @@ -1323,7 +1447,6 @@ fn test_0conf_ann_sigs_racing_conf() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let mut chan_config = test_default_channel_config(); - chan_config.manually_accept_inbound_channels = true; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(chan_config.clone())]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -1352,3 +1475,133 @@ fn test_0conf_ann_sigs_racing_conf() { let as_announcement = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(as_announcement.len(), 1); } + +#[test] +fn test_channel_update_dont_forward_flag() { + // Test that the `dont_forward` bit (bit 1 of message_flags) is set correctly: + // - For private channels: message_flags should have bit 1 set (value 3 = must_be_one + dont_forward) + // - For public channels: message_flags should NOT have bit 1 set (value 1 = must_be_one only) + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + + // Create a public (announced) channel between nodes[0] and nodes[1] + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000); + + // Create a private (unannounced) channel between nodes[1] and nodes[2] + create_unannounced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 500_000_000); + + // Get the channel details for both channels + let public_channel = nodes[0] + .node + .list_channels() + .into_iter() + .find(|c| c.counterparty.node_id == node_b_id) + .unwrap(); + let private_channel = nodes[1] + .node + .list_channels() + .into_iter() + .find(|c| c.counterparty.node_id == node_c_id) + .unwrap(); + + // Verify is_announced correctly reflects the channel type + assert!(public_channel.is_announced, "Public channel should have is_announced = true"); + assert!(!private_channel.is_announced, "Private channel should have is_announced = false"); + + // Trigger channel_update by changing config on the public channel + let mut new_config = public_channel.config.unwrap(); + new_config.forwarding_fee_base_msat += 10; + nodes[0] + .node + .update_channel_config(&node_b_id, &[public_channel.channel_id], &new_config) + .unwrap(); + + // Get the channel_update for the public channel and verify dont_forward is NOT set + let events = nodes[0].node.get_and_clear_pending_msg_events(); + let public_channel_update = events + .iter() + .find_map(|e| { + if let MessageSendEvent::BroadcastChannelUpdate { ref msg, .. } = e { + Some(msg.clone()) + } else { + None + } + }) + .expect("Expected BroadcastChannelUpdate for public channel"); + // message_flags should be 1 (only must_be_one bit set, dont_forward NOT set) + assert_eq!( + public_channel_update.contents.message_flags & (1 << 1), + 0, + "Public channel update should NOT have dont_forward bit set" + ); + assert_eq!( + public_channel_update.contents.message_flags & 1, + 1, + "Public channel update should have must_be_one bit set" + ); + + // Trigger channel_update by changing config on the private channel + let mut new_config = private_channel.config.unwrap(); + new_config.forwarding_fee_base_msat += 10; + nodes[1] + .node + .update_channel_config(&node_c_id, &[private_channel.channel_id], &new_config) + .unwrap(); + + // Get the channel_update for the private channel and verify dont_forward IS set + let private_channel_update = + get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, node_c_id); + // message_flags should have dont_forward bit set + assert_ne!( + private_channel_update.contents.message_flags & (1 << 1), + 0, + "Private channel update should have dont_forward bit set" + ); + assert_eq!( + private_channel_update.contents.message_flags & 1, + 1, + "Private channel update should have must_be_one bit set" + ); +} + +#[test] +fn test_unknown_channel_update_with_dont_forward_logs_debug() { + use bitcoin::constants::ChainHash; + use bitcoin::secp256k1::ecdsa::Signature; + use bitcoin::secp256k1::ffi::Signature as FFISignature; + use bitcoin::Network; + + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let unknown_scid = 42; + let msg = msgs::ChannelUpdate { + signature: Signature::from(unsafe { FFISignature::new() }), + contents: msgs::UnsignedChannelUpdate { + chain_hash: ChainHash::using_genesis_block(Network::Testnet), + short_channel_id: unknown_scid, + timestamp: 0, + message_flags: 1 | (1 << 1), // must_be_one + dont_forward + channel_flags: 0, + cltv_expiry_delta: 0, + htlc_minimum_msat: 0, + htlc_maximum_msat: msgs::MAX_VALUE_MSAT, + fee_base_msat: 0, + fee_proportional_millionths: 0, + excess_data: Vec::new(), + }, + }; + + nodes[0].node.handle_channel_update(nodes[1].node.get_our_node_id(), &msg); + nodes[0].logger.assert_log_contains( + "lightning::ln::channelmanager", + "Received channel_update for unknown channel", + 1, + ); +} diff --git a/lightning/src/ln/quiescence_tests.rs b/lightning/src/ln/quiescence_tests.rs index 6daf4d65b9d..d972fb6a5c5 100644 --- a/lightning/src/ln/quiescence_tests.rs +++ b/lightning/src/ln/quiescence_tests.rs @@ -2,10 +2,10 @@ use crate::chain::ChannelMonitorUpdateStatus; use crate::events::{Event, HTLCHandlingFailureType}; use crate::ln::channel::DISCONNECT_PEER_AWAITING_RESPONSE_TICKS; use crate::ln::channelmanager::PaymentId; -use crate::ln::channelmanager::RecipientOnionFields; use crate::ln::functional_test_utils::*; use crate::ln::msgs; use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, ErrorAction, MessageSendEvent}; +use crate::ln::outbound_payment::RecipientOnionFields; use crate::util::errors::APIError; use crate::util::ser::Writeable; use crate::util::test_channel_signer::SignerOp; @@ -101,7 +101,7 @@ fn allow_shutdown_while_awaiting_quiescence(local_shutdown: bool) { let onion = RecipientOnionFields::secret_only(payment_secret); let payment_id = PaymentId(payment_hash.0); local_node.node.send_payment_with_route(route, payment_hash, onion, payment_id).unwrap(); - check_added_monitors!(local_node, 1); + check_added_monitors(&local_node, 1); // Attempt to send an HTLC, but don't fully commit it yet. let update_add = get_htlc_update_msgs(&local_node, &remote_node_id); @@ -373,7 +373,7 @@ fn quiescence_updates_go_to_holding_cell(fail_htlc: bool) { let onion1 = RecipientOnionFields::secret_only(payment_secret1); let payment_id1 = PaymentId(payment_hash1.0); nodes[1].node.send_payment_with_route(route1, payment_hash1, onion1, payment_id1).unwrap(); - check_added_monitors!(&nodes[1], 0); + check_added_monitors(&nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); // Send a payment in the opposite direction. Since nodes[0] hasn't sent its own `stfu` yet, it's @@ -383,7 +383,7 @@ fn quiescence_updates_go_to_holding_cell(fail_htlc: bool) { let onion2 = RecipientOnionFields::secret_only(payment_secret2); let payment_id2 = PaymentId(payment_hash2.0); nodes[0].node.send_payment_with_route(route2, payment_hash2, onion2, payment_id2).unwrap(); - check_added_monitors!(&nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_add = get_htlc_update_msgs(&nodes[0], &node_id_1); nodes[1].node.handle_update_add_htlc(node_id_0, &update_add.update_add_htlcs[0]); diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index 2e9471a787d..2e9b47725db 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -18,7 +18,8 @@ use crate::routing::router::{PaymentParameters, RouteParameters}; use crate::sign::EntropySource; use crate::chain::transaction::OutPoint; use crate::events::{ClosureReason, Event, HTLCHandlingFailureType}; -use crate::ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, PaymentId, RecipientOnionFields, RAACommitmentOrder}; +use crate::ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, PaymentId, RAACommitmentOrder}; +use crate::ln::outbound_payment::RecipientOnionFields; use crate::ln::msgs; use crate::ln::types::ChannelId; use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, RoutingMessageHandler, ErrorAction, MessageSendEvent}; @@ -26,7 +27,7 @@ use crate::util::test_channel_signer::TestChannelSigner; use crate::util::test_utils; use crate::util::errors::APIError; use crate::util::ser::{Writeable, ReadableArgs}; -use crate::util::config::UserConfig; +use crate::util::config::{HTLCInterceptionFlags, UserConfig}; use bitcoin::hashes::Hash; use bitcoin::hash_types::BlockHash; @@ -253,13 +254,13 @@ fn test_manager_serialize_deserialize_events() { let node_a = nodes.remove(0); let node_b = nodes.remove(0); node_a.node.create_channel(node_b.node.get_our_node_id(), channel_value, push_msat, 42, None, None).unwrap(); - node_b.node.handle_open_channel(node_a.node.get_our_node_id(), &get_event_msg!(node_a, MessageSendEvent::SendOpenChannel, node_b.node.get_our_node_id())); + handle_and_accept_open_channel(&node_b, node_a.node.get_our_node_id(), &get_event_msg!(node_a, MessageSendEvent::SendOpenChannel, node_b.node.get_our_node_id())); node_a.node.handle_accept_channel(node_b.node.get_our_node_id(), &get_event_msg!(node_b, MessageSendEvent::SendAcceptChannel, node_a.node.get_our_node_id())); let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&node_a, &node_b.node.get_our_node_id(), channel_value, 42); node_a.node.funding_transaction_generated(temporary_channel_id, node_b.node.get_our_node_id(), tx.clone()).unwrap(); - check_added_monitors!(node_a, 0); + check_added_monitors(&node_a, 0); let funding_created = get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b.node.get_our_node_id()); let channel_id = ChannelId::v1_from_funding_txid( @@ -367,7 +368,8 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { let persister; let new_chain_monitor; - let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg.clone()), Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes_0_deserialized; let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs); @@ -437,6 +439,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { tx_broadcaster: nodes[0].tx_broadcaster, logger: &logger, channel_monitors: node_0_stale_monitors.iter().map(|monitor| { (monitor.channel_id(), monitor) }).collect(), + reconstruct_manager_from_monitors: None, }) { } else { panic!("If the monitor(s) are stale, this indicates a bug and we should get an Err return"); }; @@ -455,6 +458,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { tx_broadcaster: nodes[0].tx_broadcaster, logger: &logger, channel_monitors: node_0_monitors.iter().map(|monitor| { (monitor.channel_id(), monitor) }).collect(), + reconstruct_manager_from_monitors: None, }).unwrap(); nodes_0_deserialized = nodes_0_deserialized_tmp; assert!(nodes_0_read.is_empty()); @@ -462,7 +466,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { for monitor in node_0_monitors.drain(..) { assert_eq!(nodes[0].chain_monitor.watch_channel(monitor.channel_id(), monitor), Ok(ChannelMonitorUpdateStatus::Completed)); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); } nodes[0].node = &nodes_0_deserialized; @@ -474,7 +478,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { check_spends!(txn[0], funding_tx); assert_eq!(txn[0].input[0].previous_output.txid, funding_tx.compute_txid()); } - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); // nodes[1] and nodes[2] have no lost state with nodes[0]... reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); @@ -508,7 +512,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { #[cfg(feature = "std")] fn do_test_data_loss_protect(reconnect_panicing: bool, substantially_old: bool, not_stale: bool) { - use crate::ln::channelmanager::Retry; + use crate::ln::outbound_payment::Retry; use crate::types::string::UntrustedString; // When we get a data_loss_protect proving we're behind, we immediately panic as the // chain::Watch API requirements have been violated (e.g. the user restored from a backup). The @@ -522,7 +526,8 @@ fn do_test_data_loss_protect(reconnect_panicing: bool, substantially_old: bool, let persister; let new_chain_monitor; - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes_0_deserialized; let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -647,7 +652,7 @@ fn do_test_data_loss_protect(reconnect_panicing: bool, substantially_old: bool, .node .force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id(), message.clone()) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, reason, &[nodes[1].node.get_our_node_id()], 1000000); @@ -686,7 +691,11 @@ fn do_test_data_loss_protect(reconnect_panicing: bool, substantially_old: bool, if let MessageSendEvent::HandleError { ref action, .. } = nodes[0].node.get_and_clear_pending_msg_events()[1] { match action { &ErrorAction::SendErrorMessage { ref msg } => { - assert_eq!(msg.data, format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id())); + let peer_msg = format!( + "Got a message for a channel from the wrong node! No such channel_id {} for the passed counterparty_node_id {}", + chan.2, nodes[1].node.get_our_node_id() + ); + assert_eq!(msg.data, peer_msg); err_msgs_0.push(msg.clone()); }, _ => panic!("Unexpected event!"), @@ -697,9 +706,13 @@ fn do_test_data_loss_protect(reconnect_panicing: bool, substantially_old: bool, assert_eq!(err_msgs_0.len(), 1); nodes[1].node.handle_error(nodes[0].node.get_our_node_id(), &err_msgs_0[0]); assert!(nodes[1].node.list_usable_channels().is_empty()); - check_added_monitors!(nodes[1], 1); - check_closed_event(&nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id())) } - , &[nodes[0].node.get_our_node_id()], 1000000); + check_added_monitors(&nodes[1], 1); + let peer_msg = format!( + "Got a message for a channel from the wrong node! No such channel_id {} for the passed counterparty_node_id {}", + chan.2, nodes[1].node.get_our_node_id() + ); + let reason = ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(peer_msg) }; + check_closed_event(&nodes[1], 1, reason, &[nodes[0].node.get_our_node_id()], 1000000); check_closed_broadcast!(nodes[1], false); } } @@ -754,7 +767,7 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool, double_rest nodes[0].node.send_payment_with_route(route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 2); + check_added_monitors(&nodes[0], 2); // Send the payment through to nodes[3] *without* clearing the PaymentClaimable event let mut send_events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -785,7 +798,7 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool, double_rest expect_payment_claimable!(nodes[3], payment_hash, payment_secret, 15_000_000); nodes[3].node.claim_funds(payment_preimage); - check_added_monitors!(nodes[3], 2); + check_added_monitors(&nodes[3], 2); expect_payment_claimed!(nodes[3], payment_hash, 15_000_000); // Now fetch one of the two updated ChannelMonitors from nodes[3], and restart pretending we @@ -881,7 +894,7 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool, double_rest // Once we call `get_and_clear_pending_msg_events` the holding cell is cleared and the HTLC // claim should fly. let mut ds_msgs = nodes[3].node.get_and_clear_pending_msg_events(); - check_added_monitors!(nodes[3], 1); + check_added_monitors(&nodes[3], 1); assert_eq!(ds_msgs.len(), 2); if let MessageSendEvent::SendChannelUpdate { .. } = ds_msgs[0] {} else { panic!(); } @@ -889,7 +902,7 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool, double_rest MessageSendEvent::UpdateHTLCs { mut updates, .. } => { let mut fulfill = updates.update_fulfill_htlcs.remove(0); nodes[2].node.handle_update_fulfill_htlc(nodes[3].node.get_our_node_id(), fulfill); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let cs_updates = get_htlc_update_msgs(&nodes[2], &nodes[0].node.get_our_node_id()); expect_payment_forwarded!(nodes[2], nodes[0], nodes[3], Some(1000), false, false); do_commitment_signed_dance(&nodes[2], &nodes[3], &updates.commitment_signed, false, true); @@ -930,8 +943,9 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht let persister; let new_chain_monitor; - let mut intercept_forwards_config = test_default_channel_config(); - intercept_forwards_config.accept_intercept_htlcs = true; + let mut intercept_forwards_config = test_legacy_channel_config(); + intercept_forwards_config.htlc_interception_flags = + HTLCInterceptionFlags::ToInterceptSCIDs as u8; let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(intercept_forwards_config), None]); let nodes_1_deserialized; @@ -951,7 +965,7 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht let htlc_expiry = nodes[0].best_block_info().1 + TEST_FINAL_CLTV; nodes[0].node.send_payment_with_route(route, payment_hash, RecipientOnionFields::secret_only(payment_secret), payment_id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let payment_event = SendEvent::from_node(&nodes[0]); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); @@ -985,7 +999,7 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht let payment_event = SendEvent::from_node(&nodes[1]); nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); nodes[2].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &payment_event.commitment_msg); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); if claim_htlc { get_monitor!(nodes[2], chan_id_2).provide_payment_preimage_unsafe_legacy( @@ -1005,7 +1019,7 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht let cs_commitment_tx = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(cs_commitment_tx.len(), if claim_htlc { 2 } else { 1 }); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[2], 1, reason, &[nodes[1].node.get_our_node_id()], 100000); check_closed_broadcast!(nodes[2], true); @@ -1031,7 +1045,7 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht nodes[1].node.timer_tick_occurred(); let bs_commitment_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(bs_commitment_tx.len(), 1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); @@ -1064,7 +1078,7 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht } else { expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, true); } - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut update = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); if claim_htlc { @@ -1107,7 +1121,8 @@ fn removed_payment_no_manager_persistence() { let persister; let new_chain_monitor; - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes_1_deserialized; let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); @@ -1124,7 +1139,7 @@ fn removed_payment_no_manager_persistence() { &nodes[2], &[HTLCHandlingFailureType::Receive { payment_hash }] ); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let events = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match &events[0] { @@ -1159,7 +1174,7 @@ fn removed_payment_no_manager_persistence() { &nodes[1], &[HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }] ); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match &events[0] { @@ -1173,6 +1188,281 @@ fn removed_payment_no_manager_persistence() { expect_payment_failed!(nodes[0], payment_hash, false); } +#[test] +fn manager_persisted_pre_outbound_edge_forward() { + do_manager_persisted_pre_outbound_edge_forward(false); +} + +#[test] +fn manager_persisted_pre_outbound_edge_intercept_forward() { + do_manager_persisted_pre_outbound_edge_forward(true); +} + +fn do_manager_persisted_pre_outbound_edge_forward(intercept_htlc: bool) { + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let persister; + let new_chain_monitor; + let mut intercept_forwards_config = test_default_channel_config(); + intercept_forwards_config.htlc_interception_flags = + HTLCInterceptionFlags::ToInterceptSCIDs as u8; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(intercept_forwards_config), None]); + let nodes_1_deserialized; + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1).2; + let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2; + + // Lock in the HTLC from node_a <> node_b. + let amt_msat = 5000; + let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat); + if intercept_htlc { + route.paths[0].hops[1].short_channel_id = nodes[1].node.get_intercept_scid(); + } + nodes[0].node.send_payment_with_route(route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + check_added_monitors(&nodes[0], 1); + let updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); + nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); + do_commitment_signed_dance(&nodes[1], &nodes[0], &updates.commitment_signed, false, false); + // While an inbound HTLC is committed in a channel but not yet forwarded, we store its onion in + // the `Channel` in case we need to remember it on restart. Once it's irrevocably forwarded to the + // outbound edge, we can prune it on the inbound edge. + assert_eq!( + nodes[1].node.test_get_inbound_committed_htlcs_with_onion(nodes[0].node.get_our_node_id(), chan_id_1), + 1 + ); + + // Decode the HTLC onion but don't forward it to the next hop, such that the HTLC ends up in + // `ChannelManager::forward_htlcs` or `ChannelManager::pending_intercepted_htlcs`. + nodes[1].node.test_process_pending_update_add_htlcs(); + + // Disconnect peers and reload the forwarding node_b. + nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); + nodes[2].node.peer_disconnected(nodes[1].node.get_our_node_id()); + + let node_b_encoded = nodes[1].node.encode(); + + let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id_1).encode(); + let chan_1_monitor_serialized = get_monitor!(nodes[1], chan_id_2).encode(); + reload_node!(nodes[1], node_b_encoded, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized); + + reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[0])); + let mut args_b_c = ReconnectArgs::new(&nodes[1], &nodes[2]); + args_b_c.send_channel_ready = (true, true); + args_b_c.send_announcement_sigs = (true, true); + reconnect_nodes(args_b_c); + + // Before an inbound HTLC is irrevocably forwarded, its onion should still be persisted within the + // inbound edge channel. + assert_eq!( + nodes[1].node.test_get_inbound_committed_htlcs_with_onion(nodes[0].node.get_our_node_id(), chan_id_1), + 1 + ); + + // Forward the HTLC and ensure we can claim it post-reload. + nodes[1].node.process_pending_htlc_forwards(); + + if intercept_htlc { + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + let (intercept_id, expected_outbound_amt_msat) = match events[0] { + Event::HTLCIntercepted { intercept_id, expected_outbound_amount_msat, .. } => { + (intercept_id, expected_outbound_amount_msat) + }, + _ => panic!() + }; + nodes[1].node.forward_intercepted_htlc(intercept_id, &chan_id_2, + nodes[2].node.get_our_node_id(), expected_outbound_amt_msat).unwrap(); + nodes[1].node.process_pending_htlc_forwards(); + } + check_added_monitors(&nodes[1], 1); + + let updates = get_htlc_update_msgs(&nodes[1], &nodes[2].node.get_our_node_id()); + nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &updates.update_add_htlcs[0]); + do_commitment_signed_dance(&nodes[2], &nodes[1], &updates.commitment_signed, false, false); + expect_and_process_pending_htlcs(&nodes[2], false); + // After an inbound HTLC is irrevocably forwarded, its onion should be pruned within the inbound + // edge channel. + assert_eq!( + nodes[1].node.test_get_inbound_committed_htlcs_with_onion(nodes[0].node.get_our_node_id(), chan_id_1), + 0 + ); + + expect_payment_claimable!(nodes[2], payment_hash, payment_secret, amt_msat, None, nodes[2].node.get_our_node_id()); + let path: &[&[_]] = &[&[&nodes[1], &nodes[2]]]; + do_claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], path, payment_preimage)); + expect_payment_sent(&nodes[0], payment_preimage, None, true, true); +} + +#[test] +fn test_manager_persisted_post_outbound_edge_forward() { + // Test that we will not double-forward an HTLC after restart if it has already been forwarded to + // the outbound edge, which was previously broken. + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let persister; + let new_chain_monitor; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes_1_deserialized; + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1).2; + let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2; + + // Lock in the HTLC from node_a <> node_b. + let amt_msat = 5000; + let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat); + nodes[0].node.send_payment_with_route(route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + check_added_monitors(&nodes[0], 1); + let updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); + nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); + do_commitment_signed_dance(&nodes[1], &nodes[0], &updates.commitment_signed, false, false); + + // Add the HTLC to the outbound edge, node_b <> node_c. + nodes[1].node.process_pending_htlc_forwards(); + check_added_monitors(&nodes[1], 1); + + // Disconnect peers and reload the forwarding node_b. + nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); + nodes[2].node.peer_disconnected(nodes[1].node.get_our_node_id()); + + let node_b_encoded = nodes[1].node.encode(); + let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id_1).encode(); + let chan_1_monitor_serialized = get_monitor!(nodes[1], chan_id_2).encode(); + reload_node!(nodes[1], node_b_encoded, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized); + + reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[0])); + let mut args_b_c = ReconnectArgs::new(&nodes[1], &nodes[2]); + args_b_c.send_channel_ready = (true, true); + args_b_c.send_announcement_sigs = (true, true); + args_b_c.pending_htlc_adds = (0, 1); + // While reconnecting, we re-send node_b's outbound update_add and commit the HTLC to the b<>c + // channel. + reconnect_nodes(args_b_c); + + // Ensure node_b won't double-forward the outbound HTLC (this was previously broken). + nodes[1].node.process_pending_htlc_forwards(); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + // Claim the HTLC backwards to node_a. + expect_and_process_pending_htlcs(&nodes[2], false); + expect_payment_claimable!(nodes[2], payment_hash, payment_secret, amt_msat, None, nodes[2].node.get_our_node_id()); + let path: &[&[_]] = &[&[&nodes[1], &nodes[2]]]; + do_claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], path, payment_preimage)); + expect_payment_sent(&nodes[0], payment_preimage, None, true, true); +} + +#[test] +fn test_manager_persisted_post_outbound_edge_holding_cell() { + // Test that we will not double-forward an HTLC after restart if it is already in the outbound + // edge's holding cell, which was previously broken. + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let persister; + let new_chain_monitor; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes_1_deserialized; + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1).2; + let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2; + send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000); + + // Lock in the HTLC from node_a <> node_b. + let amt_msat = 1000; + let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat); + nodes[0].node.send_payment_with_route(route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + check_added_monitors(&nodes[0], 1); + let updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); + nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); + do_commitment_signed_dance(&nodes[1], &nodes[0], &updates.commitment_signed, false, false); + + // Send a 2nd HTLC node_c -> node_b, to force the first HTLC into the holding cell. + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); + let (route_2, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[2], nodes[1], amt_msat); + nodes[2].node.send_payment_with_route(route_2, payment_hash_2, RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); + let send_event = + SendEvent::from_event(nodes[2].node.get_and_clear_pending_msg_events().remove(0)); + nodes[1].node.handle_update_add_htlc(nodes[2].node.get_our_node_id(), &send_event.msgs[0]); + nodes[1].node.handle_commitment_signed_batch_test(nodes[2].node.get_our_node_id(), &send_event.commitment_msg); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + check_added_monitors(&nodes[1], 1); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + // Add the HTLC to the outbound edge, node_b <> node_c. Force the outbound HTLC into the b<>c + // holding cell. + nodes[1].node.process_pending_htlc_forwards(); + check_added_monitors(&nodes[1], 0); + assert_eq!( + nodes[1].node.test_holding_cell_outbound_htlc_forwards_count(nodes[2].node.get_our_node_id(), chan_id_2), + 1 + ); + + // Disconnect peers and reload the forwarding node_b. + nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); + nodes[2].node.peer_disconnected(nodes[1].node.get_our_node_id()); + + let node_b_encoded = nodes[1].node.encode(); + let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id_1).encode(); + let chan_1_monitor_serialized = get_monitor!(nodes[1], chan_id_2).encode(); + reload_node!(nodes[1], node_b_encoded, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized); + + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); + let (latest_update, _) = get_latest_mon_update_id(&nodes[1], chan_id_2); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_id_2, latest_update); + + reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[0])); + + // Reconnect b<>c. Node_b has pending RAA + commitment_signed from the incomplete c->b + // commitment dance, plus an HTLC in the holding cell that will be released after the dance. + let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]); + reconnect_args.pending_raa = (false, true); + reconnect_args.pending_responding_commitment_signed = (false, true); + // Node_c needs a monitor update to catch up after processing node_b's reestablish. + reconnect_args.expect_renegotiated_funding_locked_monitor_update = (false, true); + // The holding cell HTLC will be released after the commitment dance - handle it below. + reconnect_args.allow_post_commitment_dance_msgs = (false, true); + reconnect_nodes(reconnect_args); + + // The holding cell HTLC was released during the reconnect. Complete its commitment dance. + let holding_cell_htlc_msgs = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(holding_cell_htlc_msgs.len(), 1); + match &holding_cell_htlc_msgs[0] { + MessageSendEvent::UpdateHTLCs { node_id, updates, .. } => { + assert_eq!(*node_id, nodes[2].node.get_our_node_id()); + assert_eq!(updates.update_add_htlcs.len(), 1); + nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &updates.update_add_htlcs[0]); + do_commitment_signed_dance(&nodes[2], &nodes[1], &updates.commitment_signed, false, false); + } + _ => panic!("Unexpected message: {:?}", holding_cell_htlc_msgs[0]), + } + + // Ensure node_b won't double-forward the outbound HTLC (this was previously broken). + nodes[1].node.process_pending_htlc_forwards(); + let msgs = nodes[1].node.get_and_clear_pending_msg_events(); + assert!(msgs.is_empty(), "Expected 0 messages, got {:?}", msgs); + + // The a->b->c HTLC is now committed on node_c. The c->b HTLC is committed on node_b. + // Both payments should now be claimable. + expect_and_process_pending_htlcs(&nodes[2], false); + expect_payment_claimable!(nodes[2], payment_hash, payment_secret, amt_msat, None, nodes[2].node.get_our_node_id()); + expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, amt_msat, None, nodes[1].node.get_our_node_id()); + + // Claim the a->b->c payment on node_c. + let path: &[&[_]] = &[&[&nodes[1], &nodes[2]]]; + do_claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], path, payment_preimage)); + expect_payment_sent(&nodes[0], payment_preimage, None, true, true); + + // Claim the c->b payment on node_b. + nodes[1].node.claim_funds(payment_preimage_2); + expect_payment_claimed!(nodes[1], payment_hash_2, amt_msat); + check_added_monitors(&nodes[1], 1); + let mut update = get_htlc_update_msgs(&nodes[1], &nodes[2].node.get_our_node_id()); + nodes[2].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), update.update_fulfill_htlcs.remove(0)); + do_commitment_signed_dance(&nodes[2], &nodes[1], &update.commitment_signed, false, false); + expect_payment_sent(&nodes[2], payment_preimage_2, None, true, true); +} + #[test] fn test_reload_partial_funding_batch() { let chanmon_cfgs = create_chanmon_cfgs(3); @@ -1180,7 +1470,8 @@ fn test_reload_partial_funding_batch() { let new_persister; let new_chain_monitor; - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg.clone()), Some(legacy_cfg)]); let new_channel_manager; let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); @@ -1266,7 +1557,7 @@ fn test_htlc_localremoved_persistence() { RecipientOnionFields::spontaneous_empty(), PaymentId(mismatch_payment_hash.0), &route).unwrap(); nodes[0].node.test_send_payment_internal(&route, mismatch_payment_hash, RecipientOnionFields::spontaneous_empty(), Some(test_preimage), PaymentId(mismatch_payment_hash.0), None, session_privs).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); @@ -1315,7 +1606,8 @@ fn test_peer_storage() { let (persister, chain_monitor); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let nodes_0_deserialized; - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -1366,7 +1658,7 @@ fn test_peer_storage() { // TODO: Handle the case where we've completely forgotten about an active channel. reload_node!( nodes[0], - test_default_channel_config(), + test_legacy_channel_config(), &nodes_0_serialized, &[&old_state_monitor[..]], persister, @@ -1420,3 +1712,526 @@ fn test_peer_storage() { assert!(res.is_err()); } +#[test] +fn test_hold_completed_inflight_monitor_updates_upon_manager_reload() { + // Test that if a `ChannelMonitorUpdate` completes after the `ChannelManager` is serialized, + // but before it is deserialized, we hold any completed in-flight updates until background event + // processing. Previously, we would remove completed monitor updates from + // `in_flight_monitor_updates` during deserialization, relying on + // [`ChannelManager::process_background_events`] to eventually be called before the + // `ChannelManager` is serialized again such that the channel is resumed and further updates can + // be made. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let (persister_a, persister_b); + let (chain_monitor_a, chain_monitor_b); + + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes_0_deserialized_a; + let nodes_0_deserialized_b; + + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; + + send_payment(&nodes[0], &[&nodes[1]], 1_000_000); + + chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); + + // Send a payment that will be pending due to an async monitor update. + let (route, payment_hash, _, payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], 1_000_000); + let payment_id = PaymentId(payment_hash.0); + let onion = RecipientOnionFields::secret_only(payment_secret); + nodes[0].node.send_payment_with_route(route, payment_hash, onion, payment_id).unwrap(); + check_added_monitors(&nodes[0], 1); + + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + // Serialize the ChannelManager while the monitor update is still in-flight. + let node_0_serialized = nodes[0].node.encode(); + + // Now complete the monitor update by calling force_channel_monitor_updated. + // This updates the monitor's state, but the ChannelManager still thinks it's pending. + let (_, latest_update_id) = nodes[0].chain_monitor.get_latest_mon_update_id(chan_id); + nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_id, latest_update_id); + let monitor_serialized_updated = get_monitor!(nodes[0], chan_id).encode(); + + // Reload the node with the updated monitor. Upon deserialization, the ChannelManager will + // detect that the monitor update completed (monitor's update_id >= the in-flight update_id) + // and queue a `BackgroundEvent::MonitorUpdatesComplete`. + nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + reload_node!( + nodes[0], + test_default_channel_config(), + &node_0_serialized, + &[&monitor_serialized_updated[..]], + persister_a, + chain_monitor_a, + nodes_0_deserialized_a + ); + + // If we serialize again, even though we haven't processed any background events yet, we should + // still see the `BackgroundEvent::MonitorUpdatesComplete` be regenerated on startup. + let node_0_serialized = nodes[0].node.encode(); + reload_node!( + nodes[0], + test_default_channel_config(), + &node_0_serialized, + &[&monitor_serialized_updated[..]], + persister_b, + chain_monitor_b, + nodes_0_deserialized_b + ); + + // Reconnect the nodes. We should finally see the `update_add_htlc` go out, as the reconnection + // should first process `BackgroundEvent::MonitorUpdatesComplete, allowing the channel to be + // resumed. + let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); + reconnect_args.pending_htlc_adds = (0, 1); + reconnect_nodes(reconnect_args); +} + +#[test] +fn outbound_removed_holding_cell_resolved_no_double_forward() { + // Test that if a forwarding node has an HTLC that is fully removed on the outbound edge + // but where the inbound edge resolution is in the holding cell, and we reload the node in this + // state, that node will not double-forward the HTLC. + + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let persister; + let new_chain_monitor; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes_1_deserialized; + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_0_id = nodes[0].node.get_our_node_id(); + let node_1_id = nodes[1].node.get_our_node_id(); + let node_2_id = nodes[2].node.get_our_node_id(); + + let chan_0_1 = create_announced_chan_between_nodes(&nodes, 0, 1); + let chan_1_2 = create_announced_chan_between_nodes(&nodes, 1, 2); + + let chan_id_0_1 = chan_0_1.2; + let chan_id_1_2 = chan_1_2.2; + + // Send a payment from nodes[0] to nodes[2] via nodes[1]. + let (route, payment_hash, payment_preimage, payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[2], 1_000_000); + send_along_route_with_secret( + &nodes[0], route, &[&[&nodes[1], &nodes[2]]], 1_000_000, payment_hash, payment_secret, + ); + + // Claim the payment on nodes[2]. + nodes[2].node.claim_funds(payment_preimage); + check_added_monitors(&nodes[2], 1); + expect_payment_claimed!(nodes[2], payment_hash, 1_000_000); + + // Disconnect nodes[0] from nodes[1] BEFORE processing the fulfill. + // This forces the inbound fulfill resolution go to into nodes[1]'s holding cell for the inbound + // channel. + nodes[0].node.peer_disconnected(node_1_id); + nodes[1].node.peer_disconnected(node_0_id); + + // Process the fulfill from nodes[2] to nodes[1]. + let updates_2_1 = get_htlc_update_msgs(&nodes[2], &node_1_id); + nodes[1].node.handle_update_fulfill_htlc(node_2_id, updates_2_1.update_fulfill_htlcs[0].clone()); + check_added_monitors(&nodes[1], 1); + do_commitment_signed_dance(&nodes[1], &nodes[2], &updates_2_1.commitment_signed, false, false); + expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); + + // At this point: + // - The outbound HTLC nodes[1]->nodes[2] is resolved and removed + // - The inbound HTLC nodes[0]->nodes[1] is still in a Committed state, with the fulfill + // resolution in nodes[1]'s chan_0_1 holding cell + let node_1_serialized = nodes[1].node.encode(); + let mon_0_1_serialized = get_monitor!(nodes[1], chan_id_0_1).encode(); + let mon_1_2_serialized = get_monitor!(nodes[1], chan_id_1_2).encode(); + + // Reload nodes[1]. + // During deserialization, we previously would have not noticed that the nodes[0]<>nodes[1] HTLC + // had a resolution pending in the holding cell, and reconstructed the ChannelManager's pending + // HTLC state indicating that the HTLC still needed to be forwarded to the outbound edge. + reload_node!( + nodes[1], + node_1_serialized, + &[&mon_0_1_serialized, &mon_1_2_serialized], + persister, + new_chain_monitor, + nodes_1_deserialized + ); + + // Check that nodes[1] doesn't double-forward the HTLC. + nodes[1].node.process_pending_htlc_forwards(); + + // Reconnect nodes[1] to nodes[0]. The claim should be in nodes[1]'s holding cell. + let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[0]); + reconnect_args.pending_cell_htlc_claims = (0, 1); + reconnect_nodes(reconnect_args); + + // nodes[0] should now have received the fulfill and generate PaymentSent. + expect_payment_sent(&nodes[0], payment_preimage, None, true, true); +} + +#[test] +fn test_reload_node_with_preimage_in_monitor_claims_htlc() { + // Test that if a forwarding node has an HTLC that was irrevocably removed on the outbound edge + // via claim but is still forwarded-and-unresolved in the inbound edge, that HTLC will not be + // failed back on the inbound edge on reload. + // + // For context, the ChannelManager is moving towards reconstructing the pending inbound HTLC set + // from Channel data on startup. If we find an inbound HTLC that is flagged as already-forwarded, + // we then check that the HTLC is either (a) still present in the outbound edge or (b) removed + // from the outbound edge but with a preimage present in the corresponding ChannelMonitor, + // indicating that it was removed from the outbound edge via claim. If neither of those are the + // case, we infer that the HTLC was removed from the outbound edge via failure and fail the HTLC + // backwards. + // + // Here we ensure that inbound HTLCs in case (b) above will not be failed backwards on manager + // reload. + + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let persister; + let new_chain_monitor; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes_1_deserialized; + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_0_id = nodes[0].node.get_our_node_id(); + let node_1_id = nodes[1].node.get_our_node_id(); + let node_2_id = nodes[2].node.get_our_node_id(); + + let chan_0_1 = create_announced_chan_between_nodes(&nodes, 0, 1); + let chan_1_2 = create_announced_chan_between_nodes(&nodes, 1, 2); + + let chan_id_0_1 = chan_0_1.2; + let chan_id_1_2 = chan_1_2.2; + + // Send a payment from nodes[0] to nodes[2] via nodes[1]. + let (route, payment_hash, payment_preimage, payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[2], 1_000_000); + send_along_route_with_secret( + &nodes[0], route, &[&[&nodes[1], &nodes[2]]], 1_000_000, payment_hash, payment_secret, + ); + + // Claim the payment on nodes[2]. + nodes[2].node.claim_funds(payment_preimage); + check_added_monitors(&nodes[2], 1); + expect_payment_claimed!(nodes[2], payment_hash, 1_000_000); + + // Disconnect nodes[0] from nodes[1] BEFORE processing the fulfill. + // This prevents the claim from propagating back, leaving the inbound HTLC in ::Forwarded state. + nodes[0].node.peer_disconnected(node_1_id); + nodes[1].node.peer_disconnected(node_0_id); + + // Process the fulfill from nodes[2] to nodes[1]. + // This stores the preimage in nodes[1]'s monitor for chan_1_2. + let updates_2_1 = get_htlc_update_msgs(&nodes[2], &node_1_id); + nodes[1].node.handle_update_fulfill_htlc(node_2_id, updates_2_1.update_fulfill_htlcs[0].clone()); + check_added_monitors(&nodes[1], 1); + do_commitment_signed_dance(&nodes[1], &nodes[2], &updates_2_1.commitment_signed, false, false); + expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); + + // Clear the holding cell's claim entry on chan_0_1 before serialization. + // This simulates a crash where the HTLC was fully removed from the outbound edge but is still + // present on the inbound edge without a resolution. + nodes[1].node.test_clear_channel_holding_cell(node_0_id, chan_id_0_1); + + // At this point: + // - The inbound HTLC on nodes[1] (from nodes[0]) is in ::Forwarded state + // - The preimage IS in nodes[1]'s monitor for chan_1_2 + // - The outbound HTLC to nodes[2] is resolved + // + // Serialize nodes[1] state and monitors before reloading. + let node_1_serialized = nodes[1].node.encode(); + let mon_0_1_serialized = get_monitor!(nodes[1], chan_id_0_1).encode(); + let mon_1_2_serialized = get_monitor!(nodes[1], chan_id_1_2).encode(); + + // Reload nodes[1]. + // During deserialization, we track inbound HTLCs that purport to already be forwarded on the + // outbound edge. If any are entirely missing from the outbound edge with no preimage available, + // they will be failed backwards. Otherwise, as in this case where a preimage is available, the + // payment should be claimed backwards. + reload_node!( + nodes[1], + node_1_serialized, + &[&mon_0_1_serialized, &mon_1_2_serialized], + persister, + new_chain_monitor, + nodes_1_deserialized, + Some(true) + ); + + // When the claim is reconstructed during reload, a PaymentForwarded event is generated. + // Fetching events triggers the pending monitor update (adding preimage) to be applied. + expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); + check_added_monitors(&nodes[1], 1); + + // Reconnect nodes[1] to nodes[0]. The claim should be in nodes[1]'s holding cell. + let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[0]); + reconnect_args.pending_cell_htlc_claims = (0, 1); + reconnect_nodes(reconnect_args); + + // nodes[0] should now have received the fulfill and generate PaymentSent. + expect_payment_sent(&nodes[0], payment_preimage, None, true, true); +} + +#[test] +fn test_reload_node_without_preimage_fails_htlc() { + // Test that if a forwarding node has an HTLC that was removed on the outbound edge via failure + // but is still forwarded-and-unresolved in the inbound edge, that HTLC will be correctly + // failed back on reload via the already_forwarded_htlcs mechanism. + // + // For context, the ChannelManager reconstructs the pending inbound HTLC set from Channel data + // on startup. If an inbound HTLC is present but flagged as already-forwarded, we check that + // the HTLC is either (a) still present in the outbound edge or (b) removed from the outbound + // edge but with a preimage present in the corresponding ChannelMonitor, indicating it was + // removed via claim. If neither, we infer the HTLC was removed via failure and fail it back. + // + // Here we test the failure case: no preimage is present, so the HTLC should be failed back. + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let persister; + let new_chain_monitor; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes_1_deserialized; + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_0_id = nodes[0].node.get_our_node_id(); + let node_1_id = nodes[1].node.get_our_node_id(); + let node_2_id = nodes[2].node.get_our_node_id(); + + let chan_0_1 = create_announced_chan_between_nodes(&nodes, 0, 1); + let chan_1_2 = create_announced_chan_between_nodes(&nodes, 1, 2); + + let chan_id_0_1 = chan_0_1.2; + let chan_id_1_2 = chan_1_2.2; + + // Send a payment from nodes[0] to nodes[2] via nodes[1]. + let (route, payment_hash, _, payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[2], 1_000_000); + send_along_route_with_secret( + &nodes[0], route, &[&[&nodes[1], &nodes[2]]], 1_000_000, payment_hash, payment_secret, + ); + + // Disconnect nodes[0] from nodes[1] BEFORE processing the failure. + // This prevents the fail from propagating back, leaving the inbound HTLC in ::Forwarded state. + nodes[0].node.peer_disconnected(node_1_id); + nodes[1].node.peer_disconnected(node_0_id); + + // Fail the payment on nodes[2] and process the failure to nodes[1]. + // This removes the outbound HTLC and queues a fail in the holding cell. + nodes[2].node.fail_htlc_backwards(&payment_hash); + expect_and_process_pending_htlcs_and_htlc_handling_failed( + &nodes[2], &[HTLCHandlingFailureType::Receive { payment_hash }] + ); + check_added_monitors(&nodes[2], 1); + + let updates_2_1 = get_htlc_update_msgs(&nodes[2], &node_1_id); + nodes[1].node.handle_update_fail_htlc(node_2_id, &updates_2_1.update_fail_htlcs[0]); + do_commitment_signed_dance(&nodes[1], &nodes[2], &updates_2_1.commitment_signed, false, false); + expect_and_process_pending_htlcs_and_htlc_handling_failed( + &nodes[1], &[HTLCHandlingFailureType::Forward { node_id: Some(node_2_id), channel_id: chan_id_1_2 }] + ); + + // Clear the holding cell's fail entry on chan_0_1 before serialization. + // This simulates a crash where the HTLC was fully removed from the outbound edge but is still + // present on the inbound edge without a resolution. Otherwise, we would not be able to exercise + // the desired failure paths due to the holding cell failure resolution being present. + nodes[1].node.test_clear_channel_holding_cell(node_0_id, chan_id_0_1); + + // Now serialize. The state has: + // - Inbound HTLC on chan_0_1 in ::Forwarded state + // - Outbound HTLC on chan_1_2 resolved (not present) + // - No preimage in monitors (it was a failure) + // - No holding cell entry for the fail (we cleared it) + let node_1_serialized = nodes[1].node.encode(); + let mon_0_1_serialized = get_monitor!(nodes[1], chan_id_0_1).encode(); + let mon_1_2_serialized = get_monitor!(nodes[1], chan_id_1_2).encode(); + + // Reload nodes[1]. + // The already_forwarded_htlcs mechanism should detect: + // - Inbound HTLC is in ::Forwarded state + // - Outbound HTLC is not present in outbound channel + // - No preimage in monitors + // Therefore it should fail the HTLC backwards. + reload_node!( + nodes[1], + node_1_serialized, + &[&mon_0_1_serialized, &mon_1_2_serialized], + persister, + new_chain_monitor, + nodes_1_deserialized, + Some(true) + ); + + // After reload, nodes[1] should have generated an HTLCHandlingFailed event. + let events = nodes[1].node.get_and_clear_pending_events(); + assert!(!events.is_empty(), "Expected HTLCHandlingFailed event"); + for event in events { + match event { + Event::HTLCHandlingFailed { .. } => {}, + _ => panic!("Unexpected event {:?}", event), + } + } + + // Process the failure so it goes back into chan_0_1's holding cell. + nodes[1].node.process_pending_htlc_forwards(); + check_added_monitors(&nodes[1], 0); // No monitor update yet (peer disconnected) + + // Reconnect nodes[1] to nodes[0]. The fail should be in nodes[1]'s holding cell. + let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[0]); + reconnect_args.pending_cell_htlc_fails = (0, 1); + reconnect_nodes(reconnect_args); + + // nodes[0] should now have received the failure and generate PaymentFailed. + expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new()); +} + +#[test] +fn test_reload_with_mpp_claims_on_same_channel() { + // Test that if a forwarding node has two HTLCs for the same MPP payment that were both + // irrevocably removed on the outbound edge via claim but are still forwarded-and-unresolved + // on the inbound edge, both HTLCs will be claimed backwards on restart. + // + // Topology: + // nodes[0] ----chan_0_1----> nodes[1] ----chan_1_2_a----> nodes[2] + // \----chan_1_2_b---/ + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let persister; + let new_chain_monitor; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes_1_deserialized; + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_0_id = nodes[0].node.get_our_node_id(); + let node_1_id = nodes[1].node.get_our_node_id(); + let node_2_id = nodes[2].node.get_our_node_id(); + + let chan_0_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 2_000_000, 0); + let chan_1_2_a = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0); + let chan_1_2_b = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0); + + let chan_id_0_1 = chan_0_1.2; + let chan_id_1_2_a = chan_1_2_a.2; + let chan_id_1_2_b = chan_1_2_b.2; + + // Send an MPP payment large enough that the router must split it across both outbound channels. + // Each 1M sat outbound channel has 100M msat max in-flight, so 150M msat requires splitting. + let amt_msat = 150_000_000; + let (route, payment_hash, payment_preimage, payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat); + + let payment_id = PaymentId(nodes[0].keys_manager.backing.get_secure_random_bytes()); + nodes[0].node.send_payment_with_route( + route, payment_hash, RecipientOnionFields::secret_only(payment_secret), payment_id, + ).unwrap(); + check_added_monitors(&nodes[0], 1); + + // Forward the first HTLC nodes[0] -> nodes[1] -> nodes[2]. Note that the second HTLC is released + // from the holding cell during the first HTLC's commitment_signed_dance. + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let payment_event_1 = SendEvent::from_event(events.remove(0)); + + nodes[1].node.handle_update_add_htlc(node_0_id, &payment_event_1.msgs[0]); + check_added_monitors(&nodes[1], 0); + nodes[1].node.handle_commitment_signed_batch_test(node_0_id, &payment_event_1.commitment_msg); + check_added_monitors(&nodes[1], 1); + let (_, raa, holding_cell_htlcs) = + do_main_commitment_signed_dance(&nodes[1], &nodes[0], false); + assert_eq!(holding_cell_htlcs.len(), 1); + let payment_event_2 = holding_cell_htlcs.into_iter().next().unwrap(); + nodes[1].node.handle_revoke_and_ack(node_0_id, &raa); + check_added_monitors(&nodes[1], 1); + + nodes[1].node.process_pending_htlc_forwards(); + check_added_monitors(&nodes[1], 1); + let mut events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let ev_1_2 = events.remove(0); + pass_along_path( + &nodes[1], &[&nodes[2]], amt_msat, payment_hash, Some(payment_secret), ev_1_2, false, None, + ); + + // Second HTLC: full path nodes[0] -> nodes[1] -> nodes[2]. PaymentClaimable expected at end. + pass_along_path( + &nodes[0], &[&nodes[1], &nodes[2]], amt_msat, payment_hash, Some(payment_secret), + payment_event_2, true, None, + ); + + // Claim the HTLCs such that they're fully removed from the outbound edge, but disconnect + // node_0<>node_1 so that they can't be claimed backwards by node_1. + nodes[2].node.claim_funds(payment_preimage); + check_added_monitors(&nodes[2], 2); + expect_payment_claimed!(nodes[2], payment_hash, amt_msat); + + nodes[0].node.peer_disconnected(node_1_id); + nodes[1].node.peer_disconnected(node_0_id); + + let mut events = nodes[2].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 2); + for ev in events { + match ev { + MessageSendEvent::UpdateHTLCs { ref node_id, ref updates, .. } => { + assert_eq!(*node_id, node_1_id); + assert_eq!(updates.update_fulfill_htlcs.len(), 1); + nodes[1].node.handle_update_fulfill_htlc(node_2_id, updates.update_fulfill_htlcs[0].clone()); + check_added_monitors(&nodes[1], 1); + do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, false, false); + }, + _ => panic!("Unexpected event"), + } + } + + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 2); + for event in events { + expect_payment_forwarded( + event, &nodes[1], &nodes[0], &nodes[2], Some(1000), None, false, false, false, + ); + } + + // Clear the holding cell's claim entries on chan_0_1 before serialization. + // This simulates a crash where both HTLCs were fully removed on the outbound edges but are + // still present on the inbound edge without a resolution. + nodes[1].node.test_clear_channel_holding_cell(node_0_id, chan_id_0_1); + + let node_1_serialized = nodes[1].node.encode(); + let mon_0_1_serialized = get_monitor!(nodes[1], chan_id_0_1).encode(); + let mon_1_2_a_serialized = get_monitor!(nodes[1], chan_id_1_2_a).encode(); + let mon_1_2_b_serialized = get_monitor!(nodes[1], chan_id_1_2_b).encode(); + + reload_node!( + nodes[1], + node_1_serialized, + &[&mon_0_1_serialized, &mon_1_2_a_serialized, &mon_1_2_b_serialized], + persister, + new_chain_monitor, + nodes_1_deserialized, + Some(true) + ); + + // When the claims are reconstructed during reload, PaymentForwarded events are regenerated. + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 2); + for event in events { + expect_payment_forwarded( + event, &nodes[1], &nodes[0], &nodes[2], Some(1000), None, false, false, false, + ); + } + // Fetching events triggers the pending monitor updates (one for each HTLC preimage) to be applied. + check_added_monitors(&nodes[1], 2); + + // Reconnect nodes[1] to nodes[0]. Both claims should be in nodes[1]'s holding cell. + let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[0]); + reconnect_args.pending_cell_htlc_claims = (0, 2); + reconnect_nodes(reconnect_args); + + // nodes[0] should now have received both fulfills and generate PaymentSent. + expect_payment_sent(&nodes[0], payment_preimage, None, true, true); +} diff --git a/lightning/src/ln/reorg_tests.rs b/lightning/src/ln/reorg_tests.rs index 97e4429fbd6..b39e8d31a75 100644 --- a/lightning/src/ln/reorg_tests.rs +++ b/lightning/src/ln/reorg_tests.rs @@ -49,7 +49,8 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) { // before they otherwise would and reorg them out, confirming an HTLC-Success tx instead. let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(legacy_cfg), None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); create_announced_chan_between_nodes(&nodes, 0, 1); @@ -65,7 +66,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) { // Provide preimage to node 2 by claiming payment nodes[2].node.claim_funds(our_payment_preimage); expect_payment_claimed!(nodes[2], our_payment_hash, 1_000_000); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); let claim_txn = if local_commitment { @@ -79,7 +80,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) { // Give node 2 node 1's transactions and get its response (claiming the HTLC instead). connect_block(&nodes[2], &create_dummy_block(nodes[2].best_block_hash(), 42, node_1_commitment_txn.clone())); check_closed_broadcast!(nodes[2], true); // We should get a BroadcastChannelUpdate (and *only* a BroadcstChannelUpdate) - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); check_closed_event(&nodes[2], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[1].node.get_our_node_id()], 100000); let node_2_commitment_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(node_2_commitment_txn.len(), 1); // ChannelMonitor: 1 offered HTLC-Claim @@ -113,11 +114,11 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) { vec![node_2_commitment_txn.pop().unwrap()] }; check_closed_broadcast!(nodes[1], true); // We should get a BroadcastChannelUpdate (and *only* a BroadcstChannelUpdate) - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[2].node.get_our_node_id()], 100000); // Connect ANTI_REORG_DELAY - 2 blocks, giving us a confirmation count of ANTI_REORG_DELAY - 1. connect_blocks(&nodes[1], ANTI_REORG_DELAY - 2); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); assert_eq!(nodes[1].node.get_and_clear_pending_events().len(), 0); if claim { @@ -139,7 +140,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) { ); } - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Which should result in an immediate claim/fail of the HTLC: let mut htlc_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); if claim { @@ -182,7 +183,8 @@ fn test_counterparty_revoked_reorg() { // still be claim-from-able after the reorg. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg), None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000); @@ -199,7 +201,7 @@ fn test_counterparty_revoked_reorg() { nodes[0].node.claim_funds(payment_preimage_3); let _ = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); expect_payment_claimed!(nodes[0], payment_hash_3, 4_000_000); let mut unrevoked_local_txn = get_local_commitment_txn!(nodes[0], chan.2); @@ -211,7 +213,7 @@ fn test_counterparty_revoked_reorg() { // on any of the HTLCs, at least until we get six confirmations (which we won't get). mine_transaction(&nodes[1], &revoked_local_txn[0]); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[0].node.get_our_node_id()], 1000000); // Connect up to one block before the revoked transaction would be considered final, then do a @@ -255,7 +257,8 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_ let persister; let new_chain_monitor; - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg), None]); let nodes_0_deserialized; let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -313,7 +316,15 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_ assert_eq!(nodes[0].node.short_to_chan_info.read().unwrap().len(), 0); } - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); + } + + let expected_err = "Funding transaction was un-confirmed, originally locked at 6 confs."; + if reload_node && !reorg_after_reload { + handle_announce_close_broadcast_events(&nodes, 0, 1, true, "Channel closed because of an exception: Funding transaction was un-confirmed, originally locked at 6 confs."); + check_added_monitors(&nodes[1], 1); + let reason = ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Channel closed because of an exception: {}", expected_err)) }; + check_closed_event(&nodes[1], 1, reason, &[nodes[0].node.get_our_node_id()], 100000); } if reload_node { @@ -380,16 +391,15 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_ // we were already running. nodes[0].node.test_process_background_events(); } - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(txn.len(), 1); } - let expected_err = "Funding transaction was un-confirmed. Locked at 6 confs, now have 0 confs."; if reorg_after_reload || !reload_node { - handle_announce_close_broadcast_events(&nodes, 0, 1, true, "Channel closed because of an exception: Funding transaction was un-confirmed. Locked at 6 confs, now have 0 confs."); - check_added_monitors!(nodes[1], 1); + handle_announce_close_broadcast_events(&nodes, 0, 1, true, "Channel closed because of an exception: Funding transaction was un-confirmed, originally locked at 6 confs."); + check_added_monitors(&nodes[1], 1); let reason = ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Channel closed because of an exception: {}", expected_err)) }; check_closed_event(&nodes[1], 1, reason, &[nodes[0].node.get_our_node_id()], 100000); } @@ -404,7 +414,11 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_ nodes[0].node.peer_connected(nodes[1].node.get_our_node_id(), &Init { features: nodes[1].node.init_features(), networks: None, remote_network_address: None }, true).unwrap(); + nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); } + create_announced_chan_between_nodes(&nodes, 0, 1); send_payment(&nodes[0], &[&nodes[1]], 8000000); } @@ -455,7 +469,8 @@ fn test_set_outpoints_partial_claiming() { // - disconnect tx, see no tx anymore let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg), None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000); @@ -477,14 +492,14 @@ fn test_set_outpoints_partial_claiming() { expect_payment_claimed!(nodes[0], payment_hash_1, 3_000_000); nodes[0].node.claim_funds(payment_preimage_2); expect_payment_claimed!(nodes[0], payment_hash_2, 3_000_000); - check_added_monitors!(nodes[0], 2); + check_added_monitors(&nodes[0], 2); nodes[0].node.get_and_clear_pending_msg_events(); // Connect blocks on node A commitment transaction mine_transaction(&nodes[0], &remote_txn[0]); check_closed_broadcast!(nodes[0], true); check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[1].node.get_our_node_id()], 1000000); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); // Verify node A broadcast tx claiming both HTLCs { let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); @@ -508,7 +523,7 @@ fn test_set_outpoints_partial_claiming() { channel_funding_txo: None, user_channel_id: None, }]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Verify node B broadcast 2 HTLC-timeout txn let partial_claim_tx = { let mut node_txn = nodes[1].tx_broadcaster.unique_txn_broadcast(); @@ -583,11 +598,11 @@ fn do_test_to_remote_after_local_detection(style: ConnectStyle) { check_closed_broadcast!(nodes[0], true); assert!(nodes[0].node.list_channels().is_empty()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[1].node.get_our_node_id()], 1000000); check_closed_broadcast!(nodes[1], true); assert!(nodes[1].node.list_channels().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[0].node.get_our_node_id()], 1000000); assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty()); @@ -622,7 +637,7 @@ fn do_test_to_remote_after_local_detection(style: ConnectStyle) { let mut node_a_spendable = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events(); assert_eq!(node_a_spendable.len(), 1); - if let Event::SpendableOutputs { outputs, channel_id } = node_a_spendable.pop().unwrap() { + if let Event::SpendableOutputs { outputs, channel_id, counterparty_node_id: _ } = node_a_spendable.pop().unwrap() { assert_eq!(outputs.len(), 1); assert_eq!(channel_id, Some(chan_id)); let spend_tx = nodes[0].keys_manager.backing.spend_spendable_outputs(&[&outputs[0]], Vec::new(), @@ -643,7 +658,7 @@ fn do_test_to_remote_after_local_detection(style: ConnectStyle) { let mut node_b_spendable = nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events(); assert_eq!(node_b_spendable.len(), 1); - if let Event::SpendableOutputs { outputs, channel_id } = node_b_spendable.pop().unwrap() { + if let Event::SpendableOutputs { outputs, channel_id, counterparty_node_id: _ } = node_b_spendable.pop().unwrap() { assert_eq!(outputs.len(), 1); assert_eq!(channel_id, Some(chan_id)); let spend_tx = nodes[1].keys_manager.backing.spend_spendable_outputs(&[&outputs[0]], Vec::new(), @@ -670,7 +685,8 @@ fn test_htlc_preimage_claim_holder_commitment_after_counterparty_commitment_reor // test that we only claim the currently confirmed commitment. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg), None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1); @@ -745,7 +761,8 @@ fn test_htlc_preimage_claim_prev_counterparty_commitment_after_current_counterpa // confirmed commitment. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg), None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1); @@ -828,7 +845,6 @@ fn do_test_retries_own_commitment_broadcast_after_reorg(keyed_anchors: bool, p2a let mut config = test_default_channel_config(); config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = keyed_anchors; config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = p2a_anchor; - config.manually_accept_inbound_channels = keyed_anchors || p2a_anchor; let persister; let new_chain_monitor; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config.clone()), Some(config.clone())]); @@ -985,7 +1001,6 @@ fn do_test_split_htlc_expiry_tracking(use_third_htlc: bool, reorg_out: bool, p2a // This test relies on being able to consolidate HTLC claims into a single transaction, which // requires anchors: let mut config = test_default_channel_config(); - config.manually_accept_inbound_channels = true; config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = p2a_anchor; diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index 5e7c7d9fd35..870f00ee9df 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -14,10 +14,11 @@ use crate::chain::transaction::OutPoint; use crate::chain::ChannelMonitorUpdateStatus; use crate::events::{ClosureReason, Event, HTLCHandlingFailureReason, HTLCHandlingFailureType}; use crate::ln::channel_state::{ChannelDetails, ChannelShutdownState}; -use crate::ln::channelmanager::{self, PaymentId, RecipientOnionFields, Retry}; +use crate::ln::channelmanager::{self, PaymentId}; use crate::ln::msgs; use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, ErrorAction, MessageSendEvent}; use crate::ln::onion_utils::LocalHTLCFailureReason; +use crate::ln::outbound_payment::{RecipientOnionFields, Retry}; use crate::ln::script::ShutdownScript; use crate::ln::types::ChannelId; use crate::prelude::*; @@ -175,7 +176,7 @@ fn expect_channel_shutdown_state_with_htlc() { // Claim Funds on Node2 nodes[2].node.claim_funds(payment_preimage_0); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash_0, 100_000); // Fulfil HTLCs on node1 and node0 @@ -187,7 +188,7 @@ fn expect_channel_shutdown_state_with_htlc() { assert_eq!(updates.update_fulfill_htlcs.len(), 1); nodes[1].node.handle_update_fulfill_htlc(node_c_id, updates.update_fulfill_htlcs.remove(0)); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut updates_2 = get_htlc_update_msgs(&nodes[1], &node_a_id); do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, false, false); @@ -361,7 +362,7 @@ fn expect_channel_shutdown_state_with_force_closure() { .force_close_broadcasting_latest_txn(&chan_1.2, &node_a_id, message.clone()) .unwrap(); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_channel_shutdown_state!(nodes[0], chan_1.2, ChannelShutdownState::NotShuttingDown); assert!(nodes[1].node.list_channels().is_empty()); @@ -371,7 +372,7 @@ fn expect_channel_shutdown_state_with_force_closure() { check_spends!(node_txn[0], chan_1.3); mine_transaction(&nodes[0], &node_txn[0]); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert!(nodes[0].node.list_channels().is_empty()); assert!(nodes[1].node.list_channels().is_empty()); @@ -452,7 +453,7 @@ fn updates_shutdown_wait() { unwrap_send_err!(nodes[1], res, true, APIError::ChannelUnavailable { .. }, {}); nodes[2].node.claim_funds(payment_preimage_0); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash_0, 100_000); let mut updates = get_htlc_update_msgs(&nodes[2], &node_b_id); @@ -463,7 +464,7 @@ fn updates_shutdown_wait() { assert_eq!(updates.update_fulfill_htlcs.len(), 1); nodes[1].node.handle_update_fulfill_htlc(node_c_id, updates.update_fulfill_htlcs.remove(0)); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut updates_2 = get_htlc_update_msgs(&nodes[1], &node_a_id); do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, false, false); @@ -495,7 +496,7 @@ fn updates_shutdown_wait() { assert!(nodes[0].node.list_channels().is_empty()); assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); - nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); + nodes[1].tx_broadcaster.clear(); close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true); assert!(nodes[1].node.list_channels().is_empty()); @@ -549,7 +550,7 @@ fn do_htlc_fail_async_shutdown(blinded_recipient: bool) { .node .send_payment(our_payment_hash, onion, id, route_params, Retry::Attempts(0)) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let updates = get_htlc_update_msgs(&nodes[0], &node_b_id); assert_eq!(updates.update_add_htlcs.len(), 1); assert!(updates.update_fulfill_htlcs.is_empty()); @@ -564,7 +565,7 @@ fn do_htlc_fail_async_shutdown(blinded_recipient: bool) { nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &updates.commitment_signed); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.handle_shutdown(node_a_id, &node_0_shutdown); assert!(commitment_signed_dance_through_cp_raa(&nodes[1], &nodes[0], false, false).is_none()); expect_and_process_pending_htlcs(&nodes[1], false); @@ -625,7 +626,7 @@ fn do_htlc_fail_async_shutdown(blinded_recipient: bool) { assert!(nodes[0].node.list_channels().is_empty()); assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); - nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); + nodes[1].tx_broadcaster.clear(); close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true); assert!(nodes[1].node.list_channels().is_empty()); assert!(nodes[2].node.list_channels().is_empty()); @@ -718,7 +719,7 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[2].node.claim_funds(payment_preimage); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash, 100_000); let mut updates = get_htlc_update_msgs(&nodes[2], &node_b_id); @@ -729,7 +730,7 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { assert_eq!(updates.update_fulfill_htlcs.len(), 1); nodes[1].node.handle_update_fulfill_htlc(node_c_id, updates.update_fulfill_htlcs.remove(0)); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut updates_2 = get_htlc_update_msgs(&nodes[1], &node_a_id); do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, false, false); @@ -834,15 +835,19 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { // checks it, but in this case nodes[1] didn't ever get a chance to receive a // closing_signed so we do it ourselves check_closed_broadcast!(nodes[1], false); - check_added_monitors!(nodes[1], 1); - let reason = ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &node_b_id)) }; + check_added_monitors(&nodes[1], 1); + let peer_msg = format!( + "Got a message for a channel from the wrong node! No such channel_id {} for the passed counterparty_node_id {}", + chan_1.2, node_b_id + ); + let reason = ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(peer_msg) }; check_closed_event(&nodes[1], 1, reason, &[node_a_id], 100000); } assert!(nodes[0].node.list_channels().is_empty()); assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); - nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); + nodes[1].tx_broadcaster.clear(); close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true); assert!(nodes[1].node.list_channels().is_empty()); @@ -920,7 +925,7 @@ fn test_upfront_shutdown_script() { nodes[0].node.close_channel(&chan.2, &node_b_id).unwrap(); let node_1_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, node_b_id); nodes[1].node.handle_shutdown(node_a_id, &node_1_shutdown); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match events[0] { @@ -935,7 +940,7 @@ fn test_upfront_shutdown_script() { *nodes[0].override_init_features.borrow_mut() = None; let chan = create_announced_chan_between_nodes_with_value(&nodes, 1, 0, 1000000, 1000000); nodes[1].node.close_channel(&chan.2, &node_a_id).unwrap(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, node_a_id); nodes[0].node.handle_shutdown(node_b_id, &node_0_shutdown); let events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -951,7 +956,7 @@ fn test_upfront_shutdown_script() { //// channel smoothly let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000); nodes[1].node.close_channel(&chan.2, &node_a_id).unwrap(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, node_a_id); nodes[0].node.handle_shutdown(node_b_id, &node_0_shutdown); let events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -991,6 +996,17 @@ fn test_unsupported_anysegwit_upfront_shutdown_script() { let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); open_channel.common_fields.shutdown_scriptpubkey = Some(anysegwit_shutdown_script.clone()); nodes[1].node.handle_open_channel(node_a_id, &open_channel); + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match &events[0] { + Event::OpenChannelRequest { temporary_channel_id, counterparty_node_id, .. } => { + assert!(nodes[1] + .node + .accept_inbound_channel(temporary_channel_id, counterparty_node_id, 42, None,) + .is_err()); + }, + _ => panic!("Unexpected event"), + }; let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -1019,7 +1035,8 @@ fn test_unsupported_anysegwit_upfront_shutdown_script() { // Check script when handling an accept_channel message nodes[0].node.create_channel(node_b_id, 100000, 10001, 42, None, None).unwrap(); let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); - nodes[1].node.handle_open_channel(node_a_id, &open_channel); + handle_and_accept_open_channel(&nodes[1], node_a_id, &open_channel); + let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); accept_channel.common_fields.shutdown_scriptpubkey = Some(anysegwit_shutdown_script.clone()); @@ -1057,6 +1074,17 @@ fn test_invalid_upfront_shutdown_script() { open_channel.common_fields.shutdown_scriptpubkey = Some(Builder::new().push_int(0).push_slice(&[0, 0]).into_script()); nodes[1].node.handle_open_channel(node_a_id, &open_channel); + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match &events[0] { + Event::OpenChannelRequest { temporary_channel_id, counterparty_node_id, .. } => { + assert!(nodes[1] + .node + .accept_inbound_channel(temporary_channel_id, counterparty_node_id, 42, None,) + .is_err()); + }, + _ => panic!("Unexpected event"), + }; let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -1088,7 +1116,7 @@ fn test_segwit_v0_shutdown_script() { let chan = create_announced_chan_between_nodes(&nodes, 0, 1); nodes[1].node.close_channel(&chan.2, &node_a_id).unwrap(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Use a segwit v0 script supported even without option_shutdown_anysegwit let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, node_a_id); @@ -1127,7 +1155,7 @@ fn test_anysegwit_shutdown_script() { let chan = create_announced_chan_between_nodes(&nodes, 0, 1); nodes[1].node.close_channel(&chan.2, &node_a_id).unwrap(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Use a non-v0 segwit script supported by option_shutdown_anysegwit let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, node_a_id); @@ -1188,7 +1216,7 @@ fn test_unsupported_anysegwit_shutdown_script() { Ok(_) => panic!("Expected error"), } nodes[1].node.close_channel(&chan.2, &node_a_id).unwrap(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Use a non-v0 segwit script unsupported without option_shutdown_anysegwit let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, node_a_id); @@ -1217,7 +1245,7 @@ fn test_invalid_shutdown_script() { let chan = create_announced_chan_between_nodes(&nodes, 0, 1); nodes[1].node.close_channel(&chan.2, &node_a_id).unwrap(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Use a segwit v0 script with an unsupported witness program let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, node_a_id); @@ -1253,7 +1281,7 @@ fn test_user_shutdown_script() { .node .close_channel_with_feerate_and_script(&chan.2, &node_a_id, None, Some(shutdown_script)) .unwrap(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, node_a_id); @@ -1311,7 +1339,8 @@ fn do_test_closing_signed_reinit_timeout(timeout_step: TimeoutStep) { // it manually. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg), None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); let node_b_id = nodes[1].node.get_our_node_id(); @@ -1390,7 +1419,7 @@ fn do_test_closing_signed_reinit_timeout(timeout_step: TimeoutStep) { && txn[0].output[0].script_pubkey.is_p2wsh()) ); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let reason = ClosureReason::ProcessingError { err: "closing_signed negotiation failed to finish within two timer ticks".to_string(), }; @@ -1819,7 +1848,7 @@ fn test_force_closure_on_low_stale_fee() { // Finally, connect one more block and check the force-close happened. connect_blocks(&nodes[1], 1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_broadcast(&nodes[1], 1, true); let reason = ClosureReason::PeerFeerateTooLow { peer_feerate_sat_per_kw: 253, diff --git a/lightning/src/ln/splicing_tests.rs b/lightning/src/ln/splicing_tests.rs index a96af7bbc5d..4846f7137cc 100644 --- a/lightning/src/ln/splicing_tests.rs +++ b/lightning/src/ln/splicing_tests.rs @@ -9,7 +9,7 @@ #![cfg_attr(not(test), allow(unused_imports))] -use crate::chain::chaininterface::FEERATE_FLOOR_SATS_PER_KW; +use crate::chain::chaininterface::{TransactionType, FEERATE_FLOOR_SATS_PER_KW}; use crate::chain::channelmonitor::{ANTI_REORG_DELAY, LATENCY_GRACE_PERIOD_BLOCKS}; use crate::chain::transaction::OutPoint; use crate::chain::ChannelMonitorUpdateStatus; @@ -17,18 +17,79 @@ use crate::events::bump_transaction::sync::WalletSourceSync; use crate::events::{ClosureReason, Event, FundingInfo, HTLCHandlingFailureType}; use crate::ln::chan_utils; use crate::ln::channel::CHANNEL_ANNOUNCEMENT_PROPAGATION_DELAY; -use crate::ln::channelmanager::{PaymentId, RecipientOnionFields, BREAKDOWN_TIMEOUT}; +use crate::ln::channelmanager::{provided_init_features, PaymentId, BREAKDOWN_TIMEOUT}; use crate::ln::functional_test_utils::*; use crate::ln::funding::{FundingTxInput, SpliceContribution}; use crate::ln::msgs::{self, BaseMessageHandler, ChannelMessageHandler, MessageSendEvent}; +use crate::ln::outbound_payment::RecipientOnionFields; use crate::ln::types::ChannelId; use crate::routing::router::{PaymentParameters, RouteParameters}; use crate::util::errors::APIError; use crate::util::ser::Writeable; -use crate::util::test_channel_signer::SignerOp; +use bitcoin::hashes::Hash; +use bitcoin::secp256k1::ecdsa::Signature; use bitcoin::secp256k1::PublicKey; -use bitcoin::{Amount, OutPoint as BitcoinOutPoint, ScriptBuf, Transaction, TxOut}; +use bitcoin::{Amount, OutPoint as BitcoinOutPoint, ScriptBuf, Transaction, TxOut, WPubkeyHash}; + +#[test] +fn test_splicing_not_supported_api_error() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let mut features = provided_init_features(&test_default_channel_config()); + features.clear_splicing(); + *node_cfgs[0].override_init_features.borrow_mut() = Some(features); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_id_0 = nodes[0].node.get_our_node_id(); + let node_id_1 = nodes[1].node.get_our_node_id(); + + let (_, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1); + + let bs_contribution = SpliceContribution::splice_in(Amount::ZERO, Vec::new(), None); + + let res = nodes[1].node.splice_channel( + &channel_id, + &node_id_0, + bs_contribution.clone(), + 0, // funding_feerate_per_kw, + None, // locktime + ); + match res { + Err(APIError::ChannelUnavailable { err }) => { + assert!(err.contains("Peer does not support splicing")) + }, + _ => panic!("Wrong error {:?}", res.err().unwrap()), + } + + nodes[0].node.peer_disconnected(node_id_1); + nodes[1].node.peer_disconnected(node_id_0); + + let mut features = nodes[0].node.init_features(); + features.set_splicing_optional(); + features.clear_quiescence(); + *nodes[0].override_init_features.borrow_mut() = Some(features); + + let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); + reconnect_args.send_channel_ready = (true, true); + reconnect_args.send_announcement_sigs = (true, true); + reconnect_nodes(reconnect_args); + + let res = nodes[1].node.splice_channel( + &channel_id, + &node_id_0, + bs_contribution, + 0, // funding_feerate_per_kw, + None, // locktime + ); + match res { + Err(APIError::ChannelUnavailable { err }) => { + assert!(err.contains("Peer does not support quiescence, a splicing prerequisite")) + }, + _ => panic!("Wrong error {:?}", res.err().unwrap()), + } +} #[test] fn test_v1_splice_in_negative_insufficient_inputs() { @@ -48,11 +109,8 @@ fn test_v1_splice_in_negative_insufficient_inputs() { let funding_inputs = create_dual_funding_utxos_with_prev_txs(&nodes[0], &[extra_splice_funding_input_sats]); - let contribution = SpliceContribution::SpliceIn { - value: Amount::from_sat(splice_in_sats), - inputs: funding_inputs, - change_script: None, - }; + let contribution = + SpliceContribution::splice_in(Amount::from_sat(splice_in_sats), funding_inputs, None); // Initiate splice-in, with insufficient input contribution let res = nodes[0].node.splice_channel( @@ -73,7 +131,7 @@ fn test_v1_splice_in_negative_insufficient_inputs() { pub fn negotiate_splice_tx<'a, 'b, 'c, 'd>( initiator: &'a Node<'b, 'c, 'd>, acceptor: &'a Node<'b, 'c, 'd>, channel_id: ChannelId, initiator_contribution: SpliceContribution, -) -> msgs::CommitmentSigned { +) { let new_funding_script = complete_splice_handshake(initiator, acceptor, channel_id, initiator_contribution.clone()); complete_interactive_funding_negotiation( @@ -82,7 +140,7 @@ pub fn negotiate_splice_tx<'a, 'b, 'c, 'd>( channel_id, initiator_contribution, new_funding_script, - ) + ); } pub fn complete_splice_handshake<'a, 'b, 'c, 'd>( @@ -125,7 +183,7 @@ pub fn complete_splice_handshake<'a, 'b, 'c, 'd>( pub fn complete_interactive_funding_negotiation<'a, 'b, 'c, 'd>( initiator: &'a Node<'b, 'c, 'd>, acceptor: &'a Node<'b, 'c, 'd>, channel_id: ChannelId, initiator_contribution: SpliceContribution, new_funding_script: ScriptBuf, -) -> msgs::CommitmentSigned { +) { let node_id_initiator = initiator.node.get_our_node_id(); let node_id_acceptor = acceptor.node.get_our_node_id(); @@ -181,22 +239,15 @@ pub fn complete_interactive_funding_negotiation<'a, 'b, 'c, 'd>( ); acceptor.node.handle_tx_add_output(node_id_initiator, &tx_add_output); } else { - let mut msg_events = initiator.node.get_and_clear_pending_msg_events(); - assert_eq!( - msg_events.len(), - if acceptor_sent_tx_complete { 2 } else { 1 }, - "{msg_events:?}" - ); - if let MessageSendEvent::SendTxComplete { ref msg, .. } = msg_events.remove(0) { + let msg_events = initiator.node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 1, "{msg_events:?}"); + if let MessageSendEvent::SendTxComplete { ref msg, .. } = &msg_events[0] { acceptor.node.handle_tx_complete(node_id_initiator, msg); } else { panic!(); } if acceptor_sent_tx_complete { - if let MessageSendEvent::UpdateHTLCs { mut updates, .. } = msg_events.remove(0) { - return updates.commitment_signed.remove(0); - } - panic!(); + break; } } @@ -212,13 +263,38 @@ pub fn complete_interactive_funding_negotiation<'a, 'b, 'c, 'd>( } pub fn sign_interactive_funding_tx<'a, 'b, 'c, 'd>( - initiator: &'a Node<'b, 'c, 'd>, acceptor: &'a Node<'b, 'c, 'd>, - initial_commit_sig_for_acceptor: msgs::CommitmentSigned, is_0conf: bool, + initiator: &'a Node<'b, 'c, 'd>, acceptor: &'a Node<'b, 'c, 'd>, is_0conf: bool, ) -> (Transaction, Option<(msgs::SpliceLocked, PublicKey)>) { let node_id_initiator = initiator.node.get_our_node_id(); let node_id_acceptor = acceptor.node.get_our_node_id(); assert!(initiator.node.get_and_clear_pending_msg_events().is_empty()); + + let event = get_event!(initiator, Event::FundingTransactionReadyForSigning); + if let Event::FundingTransactionReadyForSigning { + channel_id, + counterparty_node_id, + unsigned_transaction, + .. + } = event + { + let partially_signed_tx = initiator.wallet_source.sign_tx(unsigned_transaction).unwrap(); + initiator + .node + .funding_transaction_signed(&channel_id, &counterparty_node_id, partially_signed_tx) + .unwrap(); + } else { + panic!(); + } + + let msg_events = initiator.node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 1, "{msg_events:?}"); + let initial_commit_sig_for_acceptor = + if let MessageSendEvent::UpdateHTLCs { ref updates, .. } = &msg_events[0] { + updates.commitment_signed[0].clone() + } else { + panic!(); + }; acceptor.node.handle_commitment_signed(node_id_initiator, &initial_commit_sig_for_acceptor); let msg_events = acceptor.node.get_and_clear_pending_msg_events(); @@ -235,20 +311,6 @@ pub fn sign_interactive_funding_tx<'a, 'b, 'c, 'd>( panic!(); } - let event = get_event!(initiator, Event::FundingTransactionReadyForSigning); - if let Event::FundingTransactionReadyForSigning { - channel_id, - counterparty_node_id, - unsigned_transaction, - .. - } = event - { - let partially_signed_tx = initiator.wallet_source.sign_tx(unsigned_transaction).unwrap(); - initiator - .node - .funding_transaction_signed(&channel_id, &counterparty_node_id, partially_signed_tx) - .unwrap(); - } let mut msg_events = initiator.node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), if is_0conf { 2 } else { 1 }, "{msg_events:?}"); if let MessageSendEvent::SendTxSignatures { ref msg, .. } = &msg_events[0] { @@ -270,11 +332,26 @@ pub fn sign_interactive_funding_tx<'a, 'b, 'c, 'd>( check_added_monitors(&acceptor, 1); let tx = { - let mut initiator_txn = initiator.tx_broadcaster.txn_broadcast(); + let mut initiator_txn = initiator.tx_broadcaster.txn_broadcast_with_types(); assert_eq!(initiator_txn.len(), 1); - let acceptor_txn = acceptor.tx_broadcaster.txn_broadcast(); - assert_eq!(initiator_txn, acceptor_txn,); - initiator_txn.remove(0) + let mut acceptor_txn = acceptor.tx_broadcaster.txn_broadcast_with_types(); + assert_eq!(acceptor_txn.len(), 1); + // Compare transactions only (not types, as counterparty_node_id differs per perspective) + assert_eq!(initiator_txn[0].0, acceptor_txn[0].0); + let (tx, initiator_tx_type) = initiator_txn.remove(0); + let (_, acceptor_tx_type) = acceptor_txn.remove(0); + // Verify transaction types are Splice for both nodes + assert!( + matches!(initiator_tx_type, TransactionType::Splice { .. }), + "Expected TransactionType::Splice, got {:?}", + initiator_tx_type + ); + assert!( + matches!(acceptor_tx_type, TransactionType::Splice { .. }), + "Expected TransactionType::Splice, got {:?}", + acceptor_tx_type + ); + tx }; (tx, splice_locked) } @@ -289,15 +366,14 @@ pub fn splice_channel<'a, 'b, 'c, 'd>( let new_funding_script = complete_splice_handshake(initiator, acceptor, channel_id, initiator_contribution.clone()); - let initial_commit_sig_for_acceptor = complete_interactive_funding_negotiation( + complete_interactive_funding_negotiation( initiator, acceptor, channel_id, initiator_contribution, new_funding_script, ); - let (splice_tx, splice_locked) = - sign_interactive_funding_tx(initiator, acceptor, initial_commit_sig_for_acceptor, false); + let (splice_tx, splice_locked) = sign_interactive_funding_tx(initiator, acceptor, false); assert!(splice_locked.is_none()); expect_splice_pending_event(initiator, &node_id_acceptor); @@ -425,12 +501,10 @@ fn do_test_splice_state_reset_on_disconnect(reload: bool) { let (_, _, channel_id, _) = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 50_000_000); - let contribution = SpliceContribution::SpliceOut { - outputs: vec![TxOut { - value: Amount::from_sat(1_000), - script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), - }], - }; + let contribution = SpliceContribution::splice_out(vec![TxOut { + value: Amount::from_sat(1_000), + script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), + }]); nodes[0] .node .splice_channel( @@ -585,15 +659,13 @@ fn do_test_splice_state_reset_on_disconnect(reload: bool) { nodes[0].node.handle_tx_complete(node_id_1, &tx_complete); let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(msg_events.len(), 2); + assert_eq!(msg_events.len(), 1); if let MessageSendEvent::SendTxComplete { .. } = &msg_events[0] { } else { panic!("Unexpected event"); } - if let MessageSendEvent::UpdateHTLCs { .. } = &msg_events[1] { - } else { - panic!("Unexpected event"); - } + + let _event = get_event!(nodes[0], Event::FundingTransactionReadyForSigning); if reload { let encoded_monitor_0 = get_monitor!(nodes[0], channel_id).encode(); @@ -614,6 +686,8 @@ fn do_test_splice_state_reset_on_disconnect(reload: bool) { chain_monitor_1c, node_1c ); + // We should have another signing event generated upon reload as they're not persisted. + let _ = get_event!(nodes[0], Event::FundingTransactionReadyForSigning); } else { nodes[0].node.peer_disconnected(node_id_1); nodes[1].node.peer_disconnected(node_id_0); @@ -683,12 +757,10 @@ fn test_config_reject_inbound_splices() { let (_, _, channel_id, _) = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 50_000_000); - let contribution = SpliceContribution::SpliceOut { - outputs: vec![TxOut { - value: Amount::from_sat(1_000), - script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), - }], - }; + let contribution = SpliceContribution::splice_out(vec![TxOut { + value: Amount::from_sat(1_000), + script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), + }]); nodes[0] .node .splice_channel( @@ -746,16 +818,27 @@ fn test_splice_in() { let coinbase_tx1 = provide_anchor_reserves(&nodes); let coinbase_tx2 = provide_anchor_reserves(&nodes); - let initiator_contribution = SpliceContribution::SpliceIn { - value: Amount::from_sat(initial_channel_value_sat * 2), - inputs: vec![ + + let added_value = Amount::from_sat(initial_channel_value_sat * 2); + let change_script = ScriptBuf::new_p2wpkh(&WPubkeyHash::all_zeros()); + let fees = Amount::from_sat(321); + + let initiator_contribution = SpliceContribution::splice_in( + added_value, + vec![ FundingTxInput::new_p2wpkh(coinbase_tx1, 0).unwrap(), FundingTxInput::new_p2wpkh(coinbase_tx2, 0).unwrap(), ], - change_script: Some(nodes[0].wallet_source.get_change_script().unwrap()), - }; + Some(change_script.clone()), + ); let splice_tx = splice_channel(&nodes[0], &nodes[1], channel_id, initiator_contribution); + let expected_change = Amount::ONE_BTC * 2 - added_value - fees; + assert_eq!( + splice_tx.output.iter().find(|txout| txout.script_pubkey == change_script).unwrap().value, + expected_change, + ); + mine_transaction(&nodes[0], &splice_tx); mine_transaction(&nodes[1], &splice_tx); @@ -785,32 +868,194 @@ fn test_splice_out() { let _ = send_payment(&nodes[0], &[&nodes[1]], 100_000); - let initiator_contribution = SpliceContribution::SpliceOut { - outputs: vec![ + let initiator_contribution = SpliceContribution::splice_out(vec![ + TxOut { + value: Amount::from_sat(initial_channel_value_sat / 4), + script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), + }, + TxOut { + value: Amount::from_sat(initial_channel_value_sat / 4), + script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), + }, + ]); + + let splice_tx = splice_channel(&nodes[0], &nodes[1], channel_id, initiator_contribution); + mine_transaction(&nodes[0], &splice_tx); + mine_transaction(&nodes[1], &splice_tx); + + let htlc_limit_msat = nodes[0].node.list_channels()[0].next_outbound_htlc_limit_msat; + assert!(htlc_limit_msat < initial_channel_value_sat / 2 * 1000); + let _ = send_payment(&nodes[0], &[&nodes[1]], htlc_limit_msat); + + lock_splice_after_blocks(&nodes[0], &nodes[1], ANTI_REORG_DELAY - 1); + + let htlc_limit_msat = nodes[0].node.list_channels()[0].next_outbound_htlc_limit_msat; + assert!(htlc_limit_msat < initial_channel_value_sat / 2 * 1000); + let _ = send_payment(&nodes[0], &[&nodes[1]], htlc_limit_msat); +} + +#[test] +fn test_splice_in_and_out() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let mut config = test_default_channel_config(); + config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100; + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(config)]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let initial_channel_value_sat = 100_000; + let (_, _, channel_id, _) = + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, initial_channel_value_sat, 0); + + let _ = send_payment(&nodes[0], &[&nodes[1]], 100_000); + + let coinbase_tx1 = provide_anchor_reserves(&nodes); + let coinbase_tx2 = provide_anchor_reserves(&nodes); + + // Contribute a net negative value, with fees taken from the contributed inputs and the + // remaining value sent to change + let htlc_limit_msat = nodes[0].node.list_channels()[0].next_outbound_htlc_limit_msat; + let added_value = Amount::from_sat(htlc_limit_msat / 1000); + let removed_value = added_value * 2; + let change_script = ScriptBuf::new_p2wpkh(&WPubkeyHash::all_zeros()); + let fees = if cfg!(feature = "grind_signatures") { + Amount::from_sat(383) + } else { + Amount::from_sat(384) + }; + + assert!(htlc_limit_msat > initial_channel_value_sat / 2 * 1000); + + let initiator_contribution = SpliceContribution::splice_in_and_out( + added_value, + vec![ + FundingTxInput::new_p2wpkh(coinbase_tx1, 0).unwrap(), + FundingTxInput::new_p2wpkh(coinbase_tx2, 0).unwrap(), + ], + vec![ TxOut { - value: Amount::from_sat(initial_channel_value_sat / 4), + value: removed_value / 2, script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), }, TxOut { - value: Amount::from_sat(initial_channel_value_sat / 4), + value: removed_value / 2, script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), }, ], - }; + Some(change_script.clone()), + ); let splice_tx = splice_channel(&nodes[0], &nodes[1], channel_id, initiator_contribution); + let expected_change = Amount::ONE_BTC * 2 - added_value - fees; + assert_eq!( + splice_tx.output.iter().find(|txout| txout.script_pubkey == change_script).unwrap().value, + expected_change, + ); + mine_transaction(&nodes[0], &splice_tx); mine_transaction(&nodes[1], &splice_tx); let htlc_limit_msat = nodes[0].node.list_channels()[0].next_outbound_htlc_limit_msat; - assert!(htlc_limit_msat < initial_channel_value_sat / 2 * 1000); + assert!(htlc_limit_msat < added_value.to_sat() * 1000); let _ = send_payment(&nodes[0], &[&nodes[1]], htlc_limit_msat); lock_splice_after_blocks(&nodes[0], &nodes[1], ANTI_REORG_DELAY - 1); let htlc_limit_msat = nodes[0].node.list_channels()[0].next_outbound_htlc_limit_msat; - assert!(htlc_limit_msat < initial_channel_value_sat / 2 * 1000); + assert!(htlc_limit_msat < added_value.to_sat() * 1000); + let _ = send_payment(&nodes[0], &[&nodes[1]], htlc_limit_msat); + + let coinbase_tx1 = provide_anchor_reserves(&nodes); + let coinbase_tx2 = provide_anchor_reserves(&nodes); + + // Contribute a net positive value, with fees taken from the contributed inputs and the + // remaining value sent to change + let added_value = Amount::from_sat(initial_channel_value_sat * 2); + let removed_value = added_value / 2; + let change_script = ScriptBuf::new_p2wpkh(&WPubkeyHash::all_zeros()); + let fees = if cfg!(feature = "grind_signatures") { + Amount::from_sat(383) + } else { + Amount::from_sat(384) + }; + + let initiator_contribution = SpliceContribution::splice_in_and_out( + added_value, + vec![ + FundingTxInput::new_p2wpkh(coinbase_tx1, 0).unwrap(), + FundingTxInput::new_p2wpkh(coinbase_tx2, 0).unwrap(), + ], + vec![ + TxOut { + value: removed_value / 2, + script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), + }, + TxOut { + value: removed_value / 2, + script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), + }, + ], + Some(change_script.clone()), + ); + + let splice_tx = splice_channel(&nodes[0], &nodes[1], channel_id, initiator_contribution); + let expected_change = Amount::ONE_BTC * 2 - added_value - fees; + assert_eq!( + splice_tx.output.iter().find(|txout| txout.script_pubkey == change_script).unwrap().value, + expected_change, + ); + + mine_transaction(&nodes[0], &splice_tx); + mine_transaction(&nodes[1], &splice_tx); + + let htlc_limit_msat = nodes[0].node.list_channels()[0].next_outbound_htlc_limit_msat; + assert_eq!(htlc_limit_msat, 0); + + lock_splice_after_blocks(&nodes[0], &nodes[1], ANTI_REORG_DELAY - 1); + + let htlc_limit_msat = nodes[0].node.list_channels()[0].next_outbound_htlc_limit_msat; + assert!(htlc_limit_msat > initial_channel_value_sat / 2 * 1000); let _ = send_payment(&nodes[0], &[&nodes[1]], htlc_limit_msat); + + let coinbase_tx1 = provide_anchor_reserves(&nodes); + let coinbase_tx2 = provide_anchor_reserves(&nodes); + + // Fail adding a net contribution value of zero + let added_value = Amount::from_sat(initial_channel_value_sat * 2); + let removed_value = added_value; + let change_script = ScriptBuf::new_p2wpkh(&WPubkeyHash::all_zeros()); + + let initiator_contribution = SpliceContribution::splice_in_and_out( + added_value, + vec![ + FundingTxInput::new_p2wpkh(coinbase_tx1, 0).unwrap(), + FundingTxInput::new_p2wpkh(coinbase_tx2, 0).unwrap(), + ], + vec![ + TxOut { + value: removed_value / 2, + script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), + }, + TxOut { + value: removed_value / 2, + script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), + }, + ], + Some(change_script), + ); + + assert_eq!( + nodes[0].node.splice_channel( + &channel_id, + &nodes[1].node.get_our_node_id(), + initiator_contribution, + FEERATE_FLOOR_SATS_PER_KW, + None, + ), + Err(APIError::APIMisuseError { + err: format!("Channel {} cannot be spliced; contribution cannot be zero", channel_id), + }), + ); } #[cfg(test)] @@ -836,8 +1081,7 @@ fn do_test_splice_commitment_broadcast(splice_status: SpliceStatus, claim_htlcs: // Tests that we're able to enforce HTLCs onchain during the different stages of a splice. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let config = test_default_anchors_channel_config(); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config.clone()), Some(config)]); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_id_0 = nodes[0].node.get_our_node_id(); @@ -854,11 +1098,11 @@ fn do_test_splice_commitment_broadcast(splice_status: SpliceStatus, claim_htlcs: let payment_amount = 1_000_000; let (preimage1, payment_hash1, ..) = route_payment(&nodes[0], &[&nodes[1]], payment_amount); let splice_in_amount = initial_channel_capacity / 2; - let initiator_contribution = SpliceContribution::SpliceIn { - value: Amount::from_sat(splice_in_amount), - inputs: vec![FundingTxInput::new_p2wpkh(coinbase_tx.clone(), 0).unwrap()], - change_script: Some(nodes[0].wallet_source.get_change_script().unwrap()), - }; + let initiator_contribution = SpliceContribution::splice_in( + Amount::from_sat(splice_in_amount), + vec![FundingTxInput::new_p2wpkh(coinbase_tx.clone(), 0).unwrap()], + Some(nodes[0].wallet_source.get_change_script().unwrap()), + ); let splice_tx = splice_channel(&nodes[0], &nodes[1], channel_id, initiator_contribution); let (preimage2, payment_hash2, ..) = route_payment(&nodes[0], &[&nodes[1]], payment_amount); let htlc_expiry = nodes[0].best_block_info().1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS; @@ -1052,37 +1296,27 @@ fn do_test_splice_reestablish(reload: bool, async_monitor_update: bool) { route_payment(&nodes[0], &[&nodes[1]], 1_000_000); // Negotiate the splice up until the nodes exchange `tx_complete`. - let initiator_contribution = SpliceContribution::SpliceOut { - outputs: vec![ - TxOut { - value: Amount::from_sat(initial_channel_value_sat / 4), - script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), - }, - TxOut { - value: Amount::from_sat(initial_channel_value_sat / 4), - script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), - }, - ], - }; - let initial_commit_sig_for_acceptor = - negotiate_splice_tx(&nodes[0], &nodes[1], channel_id, initiator_contribution); - assert_eq!(initial_commit_sig_for_acceptor.htlc_signatures.len(), 1); - let initial_commit_sig_for_initiator = get_htlc_update_msgs(&nodes[1], &node_id_0); - assert_eq!(initial_commit_sig_for_initiator.commitment_signed.len(), 1); - assert_eq!(initial_commit_sig_for_initiator.commitment_signed[0].htlc_signatures.len(), 1); + let initiator_contribution = SpliceContribution::splice_out(vec![ + TxOut { + value: Amount::from_sat(initial_channel_value_sat / 4), + script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), + }, + TxOut { + value: Amount::from_sat(initial_channel_value_sat / 4), + script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), + }, + ]); + negotiate_splice_tx(&nodes[0], &nodes[1], channel_id, initiator_contribution); - macro_rules! reconnect_nodes { - ($f: expr) => { - nodes[0].node.peer_disconnected(node_id_1); - nodes[1].node.peer_disconnected(node_id_0); - let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); - $f(&mut reconnect_args); - reconnect_nodes(reconnect_args); - }; - } + // Node 0 should have a signing event to handle since they had a contribution in the splice. + // Node 1 won't and will immediately try to send their initial `commitment_signed`. + let signing_event = get_event!(nodes[0], Event::FundingTransactionReadyForSigning); + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); - // Reestablishing now should force both nodes to retransmit their initial `commitment_signed` - // message as they were never delivered. + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + let _ = get_htlc_update_msgs(&nodes[1], &node_id_0); + + // Disconnect them, and handle the signing event on the initiator side. if reload { let encoded_monitor_0 = get_monitor!(nodes[0], channel_id).encode(); reload_node!( @@ -1102,6 +1336,8 @@ fn do_test_splice_reestablish(reload: bool, async_monitor_update: bool) { chain_monitor_1a, node_1a ); + // We should have another signing event generated upon reload as they're not persisted. + let _ = get_event!(nodes[0], Event::FundingTransactionReadyForSigning); if async_monitor_update { persister_0a.set_update_ret(ChannelMonitorUpdateStatus::InProgress); persister_1a.set_update_ret(ChannelMonitorUpdateStatus::InProgress); @@ -1115,6 +1351,17 @@ fn do_test_splice_reestablish(reload: bool, async_monitor_update: bool) { } } + if let Event::FundingTransactionReadyForSigning { unsigned_transaction, .. } = signing_event { + let tx = nodes[0].wallet_source.sign_tx(unsigned_transaction).unwrap(); + nodes[0].node.funding_transaction_signed(&channel_id, &node_id_1, tx).unwrap(); + } + + // Since they're not connected, no messages should be sent. + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + // Reestablishing now should force both nodes to retransmit their initial `commitment_signed` + // message as they were never delivered. let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); reconnect_args.send_interactive_tx_commit_sig = (true, true); reconnect_nodes(reconnect_args); @@ -1124,6 +1371,16 @@ fn do_test_splice_reestablish(reload: bool, async_monitor_update: bool) { check_added_monitors(&nodes[0], 1); check_added_monitors(&nodes[1], 1); + macro_rules! reconnect_nodes { + ($f: expr) => { + nodes[0].node.peer_disconnected(node_id_1); + nodes[1].node.peer_disconnected(node_id_0); + let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); + $f(&mut reconnect_args); + reconnect_nodes(reconnect_args); + }; + } + if async_monitor_update { // Reconnecting again should result in no messages/events being generated as the monitor // update is pending. @@ -1138,11 +1395,9 @@ fn do_test_splice_reestablish(reload: bool, async_monitor_update: bool) { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); } - // Node 0 should have a signing event to handle since they had a contribution in the splice. - // Node 1 won't and will immediately send `tx_signatures`. - let _ = get_event!(nodes[0], Event::FundingTransactionReadyForSigning); + // Both nodes should have their `tx_signatures` ready after completing the monitor update, but + // node 0 has to wait for node 1 to send theirs first. assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); let _ = get_event_msg!(nodes[1], MessageSendEvent::SendTxSignatures, node_id_0); // Reconnecting now should force node 1 to retransmit their `tx_signatures` since it was never @@ -1151,18 +1406,6 @@ fn do_test_splice_reestablish(reload: bool, async_monitor_update: bool) { reconnect_nodes!(|reconnect_args: &mut ReconnectArgs| { reconnect_args.send_interactive_tx_sigs = (true, false); }); - let _ = get_event!(nodes[0], Event::FundingTransactionReadyForSigning); - - // Reconnect again to make sure node 1 doesn't retransmit `tx_signatures` unnecessarily as it - // was delivered in the previous reestablishment. - reconnect_nodes!(|_| {}); - - // Have node 0 sign, we should see its `tx_signatures` go out. - let event = get_event!(nodes[0], Event::FundingTransactionReadyForSigning); - if let Event::FundingTransactionReadyForSigning { unsigned_transaction, .. } = event { - let tx = nodes[0].wallet_source.sign_tx(unsigned_transaction).unwrap(); - nodes[0].node.funding_transaction_signed(&channel_id, &node_id_1, tx).unwrap(); - } let _ = get_event_msg!(nodes[0], MessageSendEvent::SendTxSignatures, node_id_1); expect_splice_pending_event(&nodes[0], &node_id_1); @@ -1301,7 +1544,6 @@ fn do_test_propose_splice_while_disconnected(reload: bool, use_0conf: bool) { let (chain_monitor_0a, chain_monitor_0b, chain_monitor_1a, chain_monitor_1b); let mut config = test_default_channel_config(); if use_0conf { - config.manually_accept_inbound_channels = true; config.channel_handshake_limits.trust_own_funding_0conf = true; } let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config.clone()), Some(config)]); @@ -1340,12 +1582,10 @@ fn do_test_propose_splice_while_disconnected(reload: bool, use_0conf: bool) { nodes[1].node.peer_disconnected(node_id_0); let splice_out_sat = initial_channel_value_sat / 4; - let node_0_contribution = SpliceContribution::SpliceOut { - outputs: vec![TxOut { - value: Amount::from_sat(splice_out_sat), - script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), - }], - }; + let node_0_contribution = SpliceContribution::splice_out(vec![TxOut { + value: Amount::from_sat(splice_out_sat), + script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), + }]); nodes[0] .node .splice_channel( @@ -1358,12 +1598,10 @@ fn do_test_propose_splice_while_disconnected(reload: bool, use_0conf: bool) { .unwrap(); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - let node_1_contribution = SpliceContribution::SpliceOut { - outputs: vec![TxOut { - value: Amount::from_sat(splice_out_sat), - script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), - }], - }; + let node_1_contribution = SpliceContribution::splice_out(vec![TxOut { + value: Amount::from_sat(splice_out_sat), + script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), + }]); nodes[1] .node .splice_channel( @@ -1417,25 +1655,22 @@ fn do_test_propose_splice_while_disconnected(reload: bool, use_0conf: bool) { .unwrap(); // Negotiate the first splice to completion. - let initial_commit_sig = { - nodes[1].node.handle_splice_init(node_id_0, &splice_init); - let splice_ack = get_event_msg!(nodes[1], MessageSendEvent::SendSpliceAck, node_id_0); - nodes[0].node.handle_splice_ack(node_id_1, &splice_ack); - let new_funding_script = chan_utils::make_funding_redeemscript( - &splice_init.funding_pubkey, - &splice_ack.funding_pubkey, - ) - .to_p2wsh(); - complete_interactive_funding_negotiation( - &nodes[0], - &nodes[1], - channel_id, - node_0_contribution, - new_funding_script, - ) - }; - let (splice_tx, splice_locked) = - sign_interactive_funding_tx(&nodes[0], &nodes[1], initial_commit_sig, use_0conf); + nodes[1].node.handle_splice_init(node_id_0, &splice_init); + let splice_ack = get_event_msg!(nodes[1], MessageSendEvent::SendSpliceAck, node_id_0); + nodes[0].node.handle_splice_ack(node_id_1, &splice_ack); + let new_funding_script = chan_utils::make_funding_redeemscript( + &splice_init.funding_pubkey, + &splice_ack.funding_pubkey, + ) + .to_p2wsh(); + complete_interactive_funding_negotiation( + &nodes[0], + &nodes[1], + channel_id, + node_0_contribution, + new_funding_script, + ); + let (splice_tx, splice_locked) = sign_interactive_funding_tx(&nodes[0], &nodes[1], use_0conf); expect_splice_pending_event(&nodes[0], &node_id_1); expect_splice_pending_event(&nodes[1], &node_id_0); @@ -1559,25 +1794,22 @@ fn do_test_propose_splice_while_disconnected(reload: bool, use_0conf: bool) { } let splice_init = get_event_msg!(nodes[1], MessageSendEvent::SendSpliceInit, node_id_0); - let initial_commit_sig = { - nodes[0].node.handle_splice_init(node_id_1, &splice_init); - let splice_ack = get_event_msg!(nodes[0], MessageSendEvent::SendSpliceAck, node_id_1); - nodes[1].node.handle_splice_ack(node_id_0, &splice_ack); - let new_funding_script = chan_utils::make_funding_redeemscript( - &splice_init.funding_pubkey, - &splice_ack.funding_pubkey, - ) - .to_p2wsh(); - complete_interactive_funding_negotiation( - &nodes[1], - &nodes[0], - channel_id, - node_1_contribution, - new_funding_script, - ) - }; - let (splice_tx, splice_locked) = - sign_interactive_funding_tx(&nodes[1], &nodes[0], initial_commit_sig, use_0conf); + nodes[0].node.handle_splice_init(node_id_1, &splice_init); + let splice_ack = get_event_msg!(nodes[0], MessageSendEvent::SendSpliceAck, node_id_1); + nodes[1].node.handle_splice_ack(node_id_0, &splice_ack); + let new_funding_script = chan_utils::make_funding_redeemscript( + &splice_init.funding_pubkey, + &splice_ack.funding_pubkey, + ) + .to_p2wsh(); + complete_interactive_funding_negotiation( + &nodes[1], + &nodes[0], + channel_id, + node_1_contribution, + new_funding_script, + ); + let (splice_tx, splice_locked) = sign_interactive_funding_tx(&nodes[1], &nodes[0], use_0conf); expect_splice_pending_event(&nodes[0], &node_id_1); expect_splice_pending_event(&nodes[1], &node_id_0); @@ -1600,14 +1832,13 @@ fn do_test_propose_splice_while_disconnected(reload: bool, use_0conf: bool) { fn disconnect_on_unexpected_interactive_tx_message() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let config = test_default_anchors_channel_config(); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config.clone()), Some(config)]); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let initiator = &nodes[0]; let acceptor = &nodes[1]; - let _node_id_initiator = initiator.node.get_our_node_id(); + let node_id_initiator = initiator.node.get_our_node_id(); let node_id_acceptor = acceptor.node.get_our_node_id(); let initial_channel_capacity = 100_000; @@ -1616,19 +1847,18 @@ fn disconnect_on_unexpected_interactive_tx_message() { let coinbase_tx = provide_anchor_reserves(&nodes); let splice_in_amount = initial_channel_capacity / 2; - let contribution = SpliceContribution::SpliceIn { - value: Amount::from_sat(splice_in_amount), - inputs: vec![FundingTxInput::new_p2wpkh(coinbase_tx, 0).unwrap()], - change_script: Some(nodes[0].wallet_source.get_change_script().unwrap()), - }; + let contribution = SpliceContribution::splice_in( + Amount::from_sat(splice_in_amount), + vec![FundingTxInput::new_p2wpkh(coinbase_tx, 0).unwrap()], + Some(nodes[0].wallet_source.get_change_script().unwrap()), + ); // Complete interactive-tx construction, but fail by having the acceptor send a duplicate // tx_complete instead of commitment_signed. - let _ = negotiate_splice_tx(initiator, acceptor, channel_id, contribution.clone()); + negotiate_splice_tx(initiator, acceptor, channel_id, contribution.clone()); - let mut msg_events = acceptor.node.get_and_clear_pending_msg_events(); - assert_eq!(msg_events.len(), 1); - assert!(matches!(msg_events.remove(0), MessageSendEvent::UpdateHTLCs { .. })); + let _ = get_event!(initiator, Event::FundingTransactionReadyForSigning); + let _ = get_htlc_update_msgs(acceptor, &node_id_initiator); let tx_complete = msgs::TxComplete { channel_id }; initiator.node.handle_tx_complete(node_id_acceptor, &tx_complete); @@ -1640,8 +1870,7 @@ fn disconnect_on_unexpected_interactive_tx_message() { fn fail_splice_on_interactive_tx_error() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let config = test_default_anchors_channel_config(); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config.clone()), Some(config)]); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let initiator = &nodes[0]; @@ -1656,11 +1885,11 @@ fn fail_splice_on_interactive_tx_error() { let coinbase_tx = provide_anchor_reserves(&nodes); let splice_in_amount = initial_channel_capacity / 2; - let contribution = SpliceContribution::SpliceIn { - value: Amount::from_sat(splice_in_amount), - inputs: vec![FundingTxInput::new_p2wpkh(coinbase_tx, 0).unwrap()], - change_script: Some(nodes[0].wallet_source.get_change_script().unwrap()), - }; + let contribution = SpliceContribution::splice_in( + Amount::from_sat(splice_in_amount), + vec![FundingTxInput::new_p2wpkh(coinbase_tx, 0).unwrap()], + Some(nodes[0].wallet_source.get_change_script().unwrap()), + ); // Fail during interactive-tx construction by having the acceptor echo back tx_add_input instead // of sending tx_complete. The failure occurs because the serial id will have the wrong parity. @@ -1688,66 +1917,13 @@ fn fail_splice_on_interactive_tx_error() { let tx_abort = get_event_msg!(acceptor, MessageSendEvent::SendTxAbort, node_id_initiator); initiator.node.handle_tx_abort(node_id_acceptor, &tx_abort); - - // Fail signing the commitment transaction, which prevents the initiator from sending - // tx_complete. - initiator.disable_channel_signer_op( - &node_id_acceptor, - &channel_id, - SignerOp::SignCounterpartyCommitment, - ); - let _ = complete_splice_handshake(initiator, acceptor, channel_id, contribution.clone()); - - let tx_add_input = - get_event_msg!(initiator, MessageSendEvent::SendTxAddInput, node_id_acceptor); - acceptor.node.handle_tx_add_input(node_id_initiator, &tx_add_input); - - let tx_complete = get_event_msg!(acceptor, MessageSendEvent::SendTxComplete, node_id_initiator); - initiator.node.handle_tx_complete(node_id_acceptor, &tx_complete); - - let tx_add_input = - get_event_msg!(initiator, MessageSendEvent::SendTxAddInput, node_id_acceptor); - acceptor.node.handle_tx_add_input(node_id_initiator, &tx_add_input); - - let tx_complete = get_event_msg!(acceptor, MessageSendEvent::SendTxComplete, node_id_initiator); - initiator.node.handle_tx_complete(node_id_acceptor, &tx_complete); - - let tx_add_output = - get_event_msg!(initiator, MessageSendEvent::SendTxAddOutput, node_id_acceptor); - acceptor.node.handle_tx_add_output(node_id_initiator, &tx_add_output); - - let tx_complete = get_event_msg!(acceptor, MessageSendEvent::SendTxComplete, node_id_initiator); - initiator.node.handle_tx_complete(node_id_acceptor, &tx_complete); - - let tx_add_output = - get_event_msg!(initiator, MessageSendEvent::SendTxAddOutput, node_id_acceptor); - acceptor.node.handle_tx_add_output(node_id_initiator, &tx_add_output); - - let tx_complete = get_event_msg!(acceptor, MessageSendEvent::SendTxComplete, node_id_initiator); - initiator.node.handle_tx_complete(node_id_acceptor, &tx_complete); - - let event = get_event!(initiator, Event::SpliceFailed); - match event { - Event::SpliceFailed { contributed_inputs, .. } => { - assert_eq!(contributed_inputs.len(), 1); - assert_eq!(contributed_inputs[0], contribution.inputs()[0].outpoint()); - }, - _ => panic!("Expected Event::SpliceFailed"), - } - - let tx_abort = get_event_msg!(initiator, MessageSendEvent::SendTxAbort, node_id_acceptor); - acceptor.node.handle_tx_abort(node_id_initiator, &tx_abort); - - let tx_abort = get_event_msg!(acceptor, MessageSendEvent::SendTxAbort, node_id_initiator); - initiator.node.handle_tx_abort(node_id_acceptor, &tx_abort); } #[test] fn fail_splice_on_tx_abort() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let config = test_default_anchors_channel_config(); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config.clone()), Some(config)]); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let initiator = &nodes[0]; @@ -1762,11 +1938,11 @@ fn fail_splice_on_tx_abort() { let coinbase_tx = provide_anchor_reserves(&nodes); let splice_in_amount = initial_channel_capacity / 2; - let contribution = SpliceContribution::SpliceIn { - value: Amount::from_sat(splice_in_amount), - inputs: vec![FundingTxInput::new_p2wpkh(coinbase_tx, 0).unwrap()], - change_script: Some(nodes[0].wallet_source.get_change_script().unwrap()), - }; + let contribution = SpliceContribution::splice_in( + Amount::from_sat(splice_in_amount), + vec![FundingTxInput::new_p2wpkh(coinbase_tx, 0).unwrap()], + Some(nodes[0].wallet_source.get_change_script().unwrap()), + ); // Fail during interactive-tx construction by having the acceptor send tx_abort instead of // tx_complete. @@ -1800,8 +1976,7 @@ fn fail_splice_on_tx_abort() { fn fail_splice_on_channel_close() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let config = test_default_anchors_channel_config(); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config.clone()), Some(config)]); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let initiator = &nodes[0]; @@ -1816,11 +1991,11 @@ fn fail_splice_on_channel_close() { let coinbase_tx = provide_anchor_reserves(&nodes); let splice_in_amount = initial_channel_capacity / 2; - let contribution = SpliceContribution::SpliceIn { - value: Amount::from_sat(splice_in_amount), - inputs: vec![FundingTxInput::new_p2wpkh(coinbase_tx, 0).unwrap()], - change_script: Some(nodes[0].wallet_source.get_change_script().unwrap()), - }; + let contribution = SpliceContribution::splice_in( + Amount::from_sat(splice_in_amount), + vec![FundingTxInput::new_p2wpkh(coinbase_tx, 0).unwrap()], + Some(nodes[0].wallet_source.get_change_script().unwrap()), + ); // Close the channel before completion of interactive-tx construction. let _ = complete_splice_handshake(initiator, acceptor, channel_id, contribution.clone()); @@ -1851,8 +2026,7 @@ fn fail_splice_on_channel_close() { fn fail_quiescent_action_on_channel_close() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let config = test_default_anchors_channel_config(); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config.clone()), Some(config)]); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let initiator = &nodes[0]; @@ -1867,11 +2041,11 @@ fn fail_quiescent_action_on_channel_close() { let coinbase_tx = provide_anchor_reserves(&nodes); let splice_in_amount = initial_channel_capacity / 2; - let contribution = SpliceContribution::SpliceIn { - value: Amount::from_sat(splice_in_amount), - inputs: vec![FundingTxInput::new_p2wpkh(coinbase_tx, 0).unwrap()], - change_script: Some(nodes[0].wallet_source.get_change_script().unwrap()), - }; + let contribution = SpliceContribution::splice_in( + Amount::from_sat(splice_in_amount), + vec![FundingTxInput::new_p2wpkh(coinbase_tx, 0).unwrap()], + Some(nodes[0].wallet_source.get_change_script().unwrap()), + ); // Close the channel before completion of STFU handshake. initiator @@ -1960,23 +2134,19 @@ fn do_test_splice_with_inflight_htlc_forward_and_resolution(expire_scid_pre_forw // Splice both channels, lock them, and connect enough blocks to trigger the legacy SCID pruning // logic while the HTLC is still pending. - let contribution = SpliceContribution::SpliceOut { - outputs: vec![TxOut { - value: Amount::from_sat(1_000), - script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), - }], - }; + let contribution = SpliceContribution::splice_out(vec![TxOut { + value: Amount::from_sat(1_000), + script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), + }]); let splice_tx_0_1 = splice_channel(&nodes[0], &nodes[1], channel_id_0_1, contribution); for node in &nodes { mine_transaction(node, &splice_tx_0_1); } - let contribution = SpliceContribution::SpliceOut { - outputs: vec![TxOut { - value: Amount::from_sat(1_000), - script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), - }], - }; + let contribution = SpliceContribution::splice_out(vec![TxOut { + value: Amount::from_sat(1_000), + script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), + }]); let splice_tx_1_2 = splice_channel(&nodes[1], &nodes[2], channel_id_1_2, contribution); for node in &nodes { mine_transaction(node, &splice_tx_1_2); @@ -2059,3 +2229,233 @@ fn test_splice_with_inflight_htlc_forward_and_resolution() { do_test_splice_with_inflight_htlc_forward_and_resolution(true); do_test_splice_with_inflight_htlc_forward_and_resolution(false); } + +#[test] +fn test_splice_buffer_commitment_signed_until_funding_tx_signed() { + // Test that when the counterparty sends their initial `commitment_signed` before the user has + // called `funding_transaction_signed`, we buffer the message and process it at the end of + // `funding_transaction_signed`. This allows the user to cancel the splice negotiation if + // desired without having queued an irreversible monitor update. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_id_0 = nodes[0].node.get_our_node_id(); + let node_id_1 = nodes[1].node.get_our_node_id(); + + let initial_channel_value_sat = 100_000; + let (_, _, channel_id, _) = + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, initial_channel_value_sat, 0); + + // Negotiate a splice-out where only the initiator (node 0) has a contribution. + // This means node 1 will send their commitment_signed immediately after tx_complete. + let initiator_contribution = SpliceContribution::splice_out(vec![TxOut { + value: Amount::from_sat(1_000), + script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), + }]); + negotiate_splice_tx(&nodes[0], &nodes[1], channel_id, initiator_contribution); + + // Node 0 (initiator with contribution) should have a signing event to handle. + let signing_event = get_event!(nodes[0], Event::FundingTransactionReadyForSigning); + + // Node 1 (acceptor with no contribution) won't have a signing event and will immediately + // send their initial commitment_signed. + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + let acceptor_commit_sig = get_htlc_update_msgs(&nodes[1], &node_id_0); + + // Deliver the acceptor's commitment_signed to the initiator BEFORE the initiator has called + // funding_transaction_signed. The message should be buffered, not processed. + nodes[0].node.handle_commitment_signed(node_id_1, &acceptor_commit_sig.commitment_signed[0]); + + // No monitor update should have happened since the message is buffered. + check_added_monitors(&nodes[0], 0); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + + // Now handle the signing event and call `funding_transaction_signed`. + if let Event::FundingTransactionReadyForSigning { + channel_id: event_channel_id, + counterparty_node_id, + unsigned_transaction, + .. + } = signing_event + { + assert_eq!(event_channel_id, channel_id); + assert_eq!(counterparty_node_id, node_id_1); + + let partially_signed_tx = nodes[0].wallet_source.sign_tx(unsigned_transaction).unwrap(); + nodes[0] + .node + .funding_transaction_signed(&channel_id, &node_id_1, partially_signed_tx) + .unwrap(); + } else { + panic!("Expected FundingTransactionReadyForSigning event"); + } + + // After funding_transaction_signed: + // 1. The initiator should send their commitment_signed + // 2. The buffered commitment_signed from the acceptor should be processed (monitor update) + let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 1, "{msg_events:?}"); + let initiator_commit_sig = + if let MessageSendEvent::UpdateHTLCs { ref updates, .. } = &msg_events[0] { + updates.commitment_signed[0].clone() + } else { + panic!("Expected UpdateHTLCs message"); + }; + + // The buffered commitment_signed should have been processed, resulting in a monitor update. + check_added_monitors(&nodes[0], 1); + + // Complete the rest of the flow normally. + nodes[1].node.handle_commitment_signed(node_id_0, &initiator_commit_sig); + let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 1, "{msg_events:?}"); + if let MessageSendEvent::SendTxSignatures { ref msg, .. } = &msg_events[0] { + nodes[0].node.handle_tx_signatures(node_id_1, msg); + } else { + panic!("Expected SendTxSignatures message"); + } + check_added_monitors(&nodes[1], 1); + + let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 1, "{msg_events:?}"); + if let MessageSendEvent::SendTxSignatures { ref msg, .. } = &msg_events[0] { + nodes[1].node.handle_tx_signatures(node_id_0, msg); + } else { + panic!("Expected SendTxSignatures message"); + } + + expect_splice_pending_event(&nodes[0], &node_id_1); + expect_splice_pending_event(&nodes[1], &node_id_0); + + // Both nodes should broadcast the splice transaction. + let splice_tx = { + let mut txn_0 = nodes[0].tx_broadcaster.txn_broadcast(); + assert_eq!(txn_0.len(), 1); + let txn_1 = nodes[1].tx_broadcaster.txn_broadcast(); + assert_eq!(txn_0, txn_1); + txn_0.remove(0) + }; + + // Verify the channel is operational by sending a payment. + send_payment(&nodes[0], &[&nodes[1]], 1_000_000); + + // Lock the splice by confirming the transaction. + mine_transaction(&nodes[0], &splice_tx); + mine_transaction(&nodes[1], &splice_tx); + lock_splice_after_blocks(&nodes[0], &nodes[1], ANTI_REORG_DELAY - 1); + + // Verify the channel is still operational by sending another payment. + send_payment(&nodes[0], &[&nodes[1]], 1_000_000); +} + +#[test] +fn test_splice_buffer_invalid_commitment_signed_closes_channel() { + // Test that when the counterparty sends an invalid `commitment_signed` (with a bad signature) + // before the user has called `funding_transaction_signed`, the channel is closed with an error + // when `ChannelManager::funding_transaction_signed` processes the buffered message. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_id_0 = nodes[0].node.get_our_node_id(); + let node_id_1 = nodes[1].node.get_our_node_id(); + + let initial_channel_value_sat = 100_000; + let (_, _, channel_id, _) = + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, initial_channel_value_sat, 0); + + // Negotiate a splice-out where only the initiator (node 0) has a contribution. + // This means node 1 will send their commitment_signed immediately after tx_complete. + let initiator_contribution = SpliceContribution::splice_out(vec![TxOut { + value: Amount::from_sat(1_000), + script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), + }]); + negotiate_splice_tx(&nodes[0], &nodes[1], channel_id, initiator_contribution); + + // Node 0 (initiator with contribution) should have a signing event to handle. + let signing_event = get_event!(nodes[0], Event::FundingTransactionReadyForSigning); + + // Node 1 (acceptor with no contribution) won't have a signing event and will immediately + // send their initial commitment_signed. + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + let mut acceptor_commit_sig = get_htlc_update_msgs(&nodes[1], &node_id_0); + + // Invalidate the signature by modifying one byte. This will cause signature verification + // to fail when the buffered message is processed. + let original_sig = acceptor_commit_sig.commitment_signed[0].signature; + let mut sig_bytes = original_sig.serialize_compact(); + sig_bytes[0] ^= 0x01; // Flip a bit to corrupt the signature + acceptor_commit_sig.commitment_signed[0].signature = + Signature::from_compact(&sig_bytes).unwrap(); + + // Deliver the acceptor's invalid commitment_signed to the initiator BEFORE the initiator has + // called funding_transaction_signed. The message should be buffered, not processed. + nodes[0].node.handle_commitment_signed(node_id_1, &acceptor_commit_sig.commitment_signed[0]); + + // No monitor update should have happened since the message is buffered. + check_added_monitors(&nodes[0], 0); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + + // Now handle the signing event and call `funding_transaction_signed`. + // This should process the buffered invalid commitment_signed and close the channel. + if let Event::FundingTransactionReadyForSigning { + channel_id: event_channel_id, + counterparty_node_id, + unsigned_transaction, + .. + } = signing_event + { + assert_eq!(event_channel_id, channel_id); + assert_eq!(counterparty_node_id, node_id_1); + + let partially_signed_tx = nodes[0].wallet_source.sign_tx(unsigned_transaction).unwrap(); + nodes[0] + .node + .funding_transaction_signed(&channel_id, &node_id_1, partially_signed_tx) + .unwrap(); + } else { + panic!("Expected FundingTransactionReadyForSigning event"); + } + + // After funding_transaction_signed: + // 1. The initiator sends its commitment_signed (UpdateHTLCs message). + // 2. The buffered invalid commitment_signed from the acceptor is processed, causing the + // channel to close due to the invalid signature. + // We expect 3 message events: UpdateHTLCs, BroadcastChannelUpdate, and HandleError. + let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 3, "{msg_events:?}"); + match &msg_events[0] { + MessageSendEvent::UpdateHTLCs { ref updates, .. } => { + assert!(!updates.commitment_signed.is_empty()); + }, + _ => panic!("Expected UpdateHTLCs message, got {:?}", msg_events[0]), + } + match &msg_events[1] { + MessageSendEvent::HandleError { + action: msgs::ErrorAction::SendErrorMessage { ref msg }, + .. + } => { + assert!(msg.data.contains("Invalid commitment tx signature from peer")); + }, + _ => panic!("Expected HandleError with SendErrorMessage, got {:?}", msg_events[1]), + } + match &msg_events[2] { + MessageSendEvent::BroadcastChannelUpdate { ref msg, .. } => { + assert_eq!(msg.contents.channel_flags & 2, 2); + }, + _ => panic!("Expected BroadcastChannelUpdate, got {:?}", msg_events[2]), + } + + let err = "Invalid commitment tx signature from peer".to_owned(); + let reason = ClosureReason::ProcessingError { err }; + check_closed_events( + &nodes[0], + &[ExpectedCloseEvent::from_id_reason(channel_id, false, reason)], + ); + check_added_monitors(&nodes[0], 1); +} diff --git a/lightning/src/ln/types.rs b/lightning/src/ln/types.rs index 5d72ba685cb..fd8ccbae382 100644 --- a/lightning/src/ln/types.rs +++ b/lightning/src/ln/types.rs @@ -24,7 +24,6 @@ use bitcoin::hashes::{sha256::Hash as Sha256, Hash as _, HashEngine as _}; use bitcoin::hex::display::impl_fmt_traits; use core::borrow::Borrow; -use core::ops::Deref; /// A unique 32-byte identifier for a channel. /// Depending on how the ID is generated, several varieties are distinguished @@ -53,10 +52,7 @@ impl ChannelId { } /// Create a _temporary_ channel ID randomly, based on an entropy source. - pub fn temporary_from_entropy_source(entropy_source: &ES) -> Self - where - ES::Target: EntropySource, - { + pub fn temporary_from_entropy_source(entropy_source: &ES) -> Self { Self(entropy_source.get_secure_random_bytes()) } diff --git a/lightning/src/ln/update_fee_tests.rs b/lightning/src/ln/update_fee_tests.rs index 060496d3bee..24ae8525450 100644 --- a/lightning/src/ln/update_fee_tests.rs +++ b/lightning/src/ln/update_fee_tests.rs @@ -385,18 +385,13 @@ pub fn do_test_update_fee_that_funder_cannot_afford(channel_type_features: Chann let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let mut default_config = test_default_channel_config(); + let mut cfg = test_legacy_channel_config(); if channel_type_features == ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies() { - default_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; - // this setting is also needed to create an anchor channel - default_config.manually_accept_inbound_channels = true; + cfg.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; } - let node_chanmgrs = create_node_chanmgrs( - 2, - &node_cfgs, - &[Some(default_config.clone()), Some(default_config.clone())], - ); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(cfg.clone()), Some(cfg.clone())]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -413,8 +408,7 @@ pub fn do_test_update_fee_that_funder_cannot_afford(channel_type_features: Chann ); let channel_id = chan.2; let secp_ctx = Secp256k1::new(); - let bs_channel_reserve_sats = - get_holder_selected_channel_reserve_satoshis(channel_value, &default_config); + let bs_channel_reserve_sats = get_holder_selected_channel_reserve_satoshis(channel_value, &cfg); let (anchor_outputs_value_sats, outputs_num_no_htlcs) = if channel_type_features.supports_anchors_zero_fee_htlc_tx() { (ANCHOR_OUTPUT_VALUE_SATOSHI * 2, 4) @@ -548,13 +542,12 @@ pub fn test_update_fee_that_saturates_subs() { // on the commitment transaction that is greater than her balance, we saturate the subtractions, // and force close the channel. - let mut default_config = test_default_channel_config(); + let mut cfg = test_legacy_channel_config(); let secp_ctx = Secp256k1::new(); let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = - create_node_chanmgrs(2, &node_cfgs, &[Some(default_config.clone()), Some(default_config)]); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(cfg.clone()), Some(cfg)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -870,7 +863,9 @@ pub fn test_chan_init_feerate_unaffordability() { let mut chanmon_cfgs = create_chanmon_cfgs(2); let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -898,6 +893,17 @@ pub fn test_chan_init_feerate_unaffordability() { get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); open_channel_msg.push_msat += 1; nodes[1].node.handle_open_channel(node_a_id, &open_channel_msg); + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match &events[0] { + Event::OpenChannelRequest { temporary_channel_id, counterparty_node_id, .. } => { + assert!(nodes[1] + .node + .accept_inbound_channel(temporary_channel_id, counterparty_node_id, 42, None,) + .is_err()); + }, + _ => panic!("Unexpected event"), + } let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); @@ -1024,17 +1030,14 @@ pub fn do_cannot_afford_on_holding_cell_release( // update_fee from its holding cell, we do not generate any msg events let chanmon_cfgs = create_chanmon_cfgs(2); - let mut default_config = test_default_channel_config(); - default_config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = - 100; + let mut cfg = test_legacy_channel_config(); + cfg.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100; if channel_type_features.supports_anchors_zero_fee_htlc_tx() { - default_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; - default_config.manually_accept_inbound_channels = true; + cfg.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; } let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = - create_node_chanmgrs(2, &node_cfgs, &[Some(default_config.clone()), Some(default_config)]); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(cfg.clone()), Some(cfg)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -1124,7 +1127,7 @@ pub fn do_cannot_afford_on_holding_cell_release( if let MessageSendEvent::SendRevokeAndACK { node_id, msg } = events.pop().unwrap() { assert_eq!(node_id, node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &msg); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); } else { panic!(); } @@ -1211,13 +1214,12 @@ pub fn do_can_afford_given_trimmed_htlcs(inequality_regions: core::cmp::Ordering let chanmon_cfgs = create_chanmon_cfgs(2); - let mut default_config = test_default_channel_config(); - default_config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = - 100; + let mut legacy_cfg = test_legacy_channel_config(); + legacy_cfg.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100; let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = - create_node_chanmgrs(2, &node_cfgs, &[Some(default_config.clone()), Some(default_config)]); + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -1372,7 +1374,6 @@ pub fn test_zero_fee_commitments_no_update_fee() { // they'll disconnect and warn if they receive them. let mut cfg = test_default_channel_config(); cfg.channel_handshake_config.negotiate_anchor_zero_fee_commitments = true; - cfg.manually_accept_inbound_channels = true; let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); diff --git a/lightning/src/ln/wire.rs b/lightning/src/ln/wire.rs index bc1d83adb68..a2078ce4256 100644 --- a/lightning/src/ln/wire.rs +++ b/lightning/src/ln/wire.rs @@ -15,6 +15,7 @@ use crate::io; use crate::ln::msgs; use crate::util::ser::{LengthLimitedRead, LengthReadable, Readable, Writeable, Writer}; +use core::ops::Deref; /// Trait to be implemented by custom message (unrelated to the channel/gossip LN layers) /// decoders. @@ -30,6 +31,15 @@ pub trait CustomMessageReader { ) -> Result, msgs::DecodeError>; } +impl> CustomMessageReader for C { + type CustomMessage = T::CustomMessage; + fn read( + &self, message_type: u16, buffer: &mut R, + ) -> Result, msgs::DecodeError> { + self.deref().read(message_type, buffer) + } +} + // TestEq is a dummy trait which requires PartialEq when built in testing, and otherwise is // blanket-implemented for all types. @@ -244,23 +254,21 @@ impl Message { /// # Errors /// /// Returns an error if the message payload could not be decoded as the specified type. -pub(crate) fn read( +pub(crate) fn read>( buffer: &mut R, custom_reader: H, ) -> Result, (msgs::DecodeError, Option)> where T: core::fmt::Debug + Type + Writeable, - H::Target: CustomMessageReader, { let message_type = ::read(buffer).map_err(|e| (e, None))?; do_read(buffer, message_type, custom_reader).map_err(|e| (e, Some(message_type))) } -fn do_read( +fn do_read>( buffer: &mut R, message_type: u16, custom_reader: H, ) -> Result, msgs::DecodeError> where T: core::fmt::Debug + Type + Writeable, - H::Target: CustomMessageReader, { match message_type { msgs::Init::TYPE => { @@ -425,19 +433,6 @@ where } } -/// Writes a message to the data buffer encoded as a 2-byte big-endian type and a variable-length -/// payload. -/// -/// # Errors -/// -/// Returns an I/O error if the write could not be completed. -pub(crate) fn write( - message: &M, buffer: &mut W, -) -> Result<(), io::Error> { - message.type_id().write(buffer)?; - message.write(buffer) -} - mod encode { /// Defines a constant type identifier for reading messages from the wire. pub trait Encode { @@ -737,34 +732,6 @@ mod tests { } } - #[test] - fn write_message_with_type() { - let message = msgs::Pong { byteslen: 2u16 }; - let mut buffer = Vec::new(); - assert!(write(&message, &mut buffer).is_ok()); - - let type_length = ::core::mem::size_of::(); - let (type_bytes, payload_bytes) = buffer.split_at(type_length); - assert_eq!(u16::from_be_bytes(type_bytes.try_into().unwrap()), msgs::Pong::TYPE); - assert_eq!(payload_bytes, &ENCODED_PONG[type_length..]); - } - - #[test] - fn read_message_encoded_with_write() { - let message = msgs::Pong { byteslen: 2u16 }; - let mut buffer = Vec::new(); - assert!(write(&message, &mut buffer).is_ok()); - - let decoded_message = read(&mut &buffer[..], &IgnoringMessageHandler {}).unwrap(); - match decoded_message { - Message::Pong(msgs::Pong { byteslen: 2u16 }) => (), - Message::Pong(msgs::Pong { byteslen }) => { - panic!("Expected byteslen {}; found: {}", message.byteslen, byteslen); - }, - _ => panic!("Expected pong message; found message type: {}", decoded_message.type_id()), - } - } - #[test] fn is_even_message_type() { let message = Message::<()>::Unknown(42); @@ -917,7 +884,7 @@ mod tests { #[test] fn read_custom_message() { let buffer = [35, 40]; - let decoded_msg = read(&mut &buffer[..], &TestCustomMessageReader {}).unwrap(); + let decoded_msg = read(&mut &buffer[..], TestCustomMessageReader {}).unwrap(); match decoded_msg { Message::Custom(custom) => { assert_eq!(custom.type_id(), CUSTOM_MESSAGE_TYPE); @@ -930,7 +897,7 @@ mod tests { #[test] fn read_with_custom_reader_unknown_message_type() { let buffer = [35, 42]; - let decoded_msg = read(&mut &buffer[..], &TestCustomMessageReader {}).unwrap(); + let decoded_msg = read(&mut &buffer[..], TestCustomMessageReader {}).unwrap(); match decoded_msg { Message::Unknown(_) => {}, _ => panic!("Expected unknown message, found message type: {}", decoded_msg.type_id()), diff --git a/lightning/src/ln/zero_fee_commitment_tests.rs b/lightning/src/ln/zero_fee_commitment_tests.rs index f94066789c1..d287b6e3de1 100644 --- a/lightning/src/ln/zero_fee_commitment_tests.rs +++ b/lightning/src/ln/zero_fee_commitment_tests.rs @@ -18,7 +18,6 @@ fn test_p2a_anchor_values_under_trims_and_rounds() { let mut user_cfg = test_default_channel_config(); user_cfg.channel_handshake_config.our_htlc_minimum_msat = 1; user_cfg.channel_handshake_config.negotiate_anchor_zero_fee_commitments = true; - user_cfg.manually_accept_inbound_channels = true; let configs = [Some(user_cfg.clone()), Some(user_cfg)]; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &configs); @@ -125,7 +124,6 @@ fn test_htlc_claim_chunking() { user_cfg.channel_handshake_config.our_htlc_minimum_msat = 1; user_cfg.channel_handshake_config.negotiate_anchor_zero_fee_commitments = true; user_cfg.channel_handshake_config.our_max_accepted_htlcs = 114; - user_cfg.manually_accept_inbound_channels = true; let configs = [Some(user_cfg.clone()), Some(user_cfg)]; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &configs); @@ -158,7 +156,7 @@ fn test_htlc_claim_chunking() { for (preimage, payment_hash) in node_1_preimages { nodes[1].node.claim_funds(preimage); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash, NONDUST_HTLC_AMT_MSAT); } nodes[0].node.get_and_clear_pending_msg_events(); @@ -188,12 +186,12 @@ fn test_htlc_claim_chunking() { assert_eq!(htlc_claims[1].output.len(), 24); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[0], 1, reason, &[nodes[1].node.get_our_node_id()], CHAN_CAPACITY); assert!(nodes[0].node.list_channels().is_empty()); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[1], 1, reason, &[nodes[0].node.get_our_node_id()], CHAN_CAPACITY); assert!(nodes[1].node.list_channels().is_empty()); @@ -314,7 +312,6 @@ fn test_anchor_tx_too_big() { user_cfg.channel_handshake_config.our_htlc_minimum_msat = 1; user_cfg.channel_handshake_config.negotiate_anchor_zero_fee_commitments = true; user_cfg.channel_handshake_config.our_max_accepted_htlcs = 114; - user_cfg.manually_accept_inbound_channels = true; let configs = [Some(user_cfg.clone()), Some(user_cfg)]; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &configs); diff --git a/lightning/src/offers/flow.rs b/lightning/src/offers/flow.rs index 94a4534c61a..0bb98777227 100644 --- a/lightning/src/offers/flow.rs +++ b/lightning/src/offers/flow.rs @@ -10,7 +10,6 @@ //! Provides data structures and functions for creating and managing Offers messages, //! facilitating communication, and handling BOLT12 messages and payments. -use core::ops::Deref; use core::sync::atomic::{AtomicUsize, Ordering}; use core::time::Duration; @@ -52,7 +51,7 @@ use crate::onion_message::async_payments::{ StaticInvoicePersisted, }; use crate::onion_message::messenger::{ - Destination, MessageRouter, MessageSendInstructions, Responder, PADDED_PATH_LENGTH, + Destination, MessageRouter, MessageSendInstructions, Responder, DUMMY_HOPS_PATH_LENGTH, }; use crate::onion_message::offers::OffersMessage; use crate::onion_message::packet::OnionMessageContents; @@ -74,11 +73,7 @@ use { /// /// [`OffersMessageFlow`] is parameterized by a [`MessageRouter`], which is responsible /// for finding message paths when initiating and retrying onion messages. -pub struct OffersMessageFlow -where - MR::Target: MessageRouter, - L::Target: Logger, -{ +pub struct OffersMessageFlow { chain_hash: ChainHash, best_block: RwLock, @@ -107,11 +102,7 @@ where logger: L, } -impl OffersMessageFlow -where - MR::Target: MessageRouter, - L::Target: Logger, -{ +impl OffersMessageFlow { /// Creates a new [`OffersMessageFlow`] pub fn new( chain_hash: ChainHash, best_block: BestBlock, our_network_pubkey: PublicKey, @@ -266,11 +257,7 @@ const DEFAULT_ASYNC_RECEIVE_OFFER_EXPIRY: Duration = Duration::from_secs(365 * 2 pub(crate) const TEST_DEFAULT_ASYNC_RECEIVE_OFFER_EXPIRY: Duration = DEFAULT_ASYNC_RECEIVE_OFFER_EXPIRY; -impl OffersMessageFlow -where - MR::Target: MessageRouter, - L::Target: Logger, -{ +impl OffersMessageFlow { /// [`BlindedMessagePath`]s for an async recipient to communicate with this node and interactively /// build [`Offer`]s and [`StaticInvoice`]s for receiving async payments. /// @@ -317,14 +304,11 @@ where /// Creates multi-hop blinded payment paths for the given `amount_msats` by delegating to /// [`Router::create_blinded_payment_paths`]. - fn create_blinded_payment_paths( + fn create_blinded_payment_paths( &self, router: &R, usable_channels: Vec, amount_msats: Option, payment_secret: PaymentSecret, payment_context: PaymentContext, relative_expiry_seconds: u32, - ) -> Result, ()> - where - R::Target: Router, - { + ) -> Result, ()> { let secp_ctx = &self.secp_ctx; let receive_auth_key = self.receive_auth_key; @@ -356,14 +340,11 @@ where #[cfg(test)] /// Creates multi-hop blinded payment paths for the given `amount_msats` by delegating to /// [`Router::create_blinded_payment_paths`]. - pub(crate) fn test_create_blinded_payment_paths( + pub(crate) fn test_create_blinded_payment_paths( &self, router: &R, usable_channels: Vec, amount_msats: Option, payment_secret: PaymentSecret, payment_context: PaymentContext, relative_expiry_seconds: u32, - ) -> Result, ()> - where - R::Target: Router, - { + ) -> Result, ()> { self.create_blinded_payment_paths( router, usable_channels, @@ -436,11 +417,7 @@ pub enum HeldHtlcReplyPath { }, } -impl OffersMessageFlow -where - MR::Target: MessageRouter, - L::Target: Logger, -{ +impl OffersMessageFlow { /// Verifies an [`InvoiceRequest`] using the provided [`OffersContext`] or the [`InvoiceRequest::metadata`]. /// /// - If an [`OffersContext::InvoiceRequest`] with a `nonce` is provided, verification is performed using recipient context data. @@ -490,11 +467,12 @@ where Ok(InvreqResponseInstructions::SendInvoice(invoice_request)) } - /// Verifies a [`Bolt12Invoice`] using the provided [`OffersContext`] or the invoice's payer metadata, - /// returning the corresponding [`PaymentId`] if successful. + /// Verifies a [`Bolt12Invoice`] using the provided [`OffersContext`] or the invoice's payer + /// metadata, returning the corresponding [`PaymentId`] if successful. /// - /// - If an [`OffersContext::OutboundPayment`] with a `nonce` is provided, verification is performed - /// using this to form the payer metadata. + /// - If an [`OffersContext::OutboundPaymentForOffer`] or + /// [`OffersContext::OutboundPaymentForRefund`] with a `nonce` is provided, verification is + /// performed using this to form the payer metadata. /// - If no context is provided and the invoice corresponds to a [`Refund`] without blinded paths, /// verification is performed using the [`Bolt12Invoice::payer_metadata`]. /// - If neither condition is met, verification fails. @@ -508,8 +486,19 @@ where None if invoice.is_for_refund_without_paths() => { invoice.verify_using_metadata(expanded_key, secp_ctx) }, - Some(&OffersContext::OutboundPayment { payment_id, nonce, .. }) => { - invoice.verify_using_payer_data(payment_id, nonce, expanded_key, secp_ctx) + Some(&OffersContext::OutboundPaymentForOffer { payment_id, nonce, .. }) => { + if invoice.is_for_offer() { + invoice.verify_using_payer_data(payment_id, nonce, expanded_key, secp_ctx) + } else { + Err(()) + } + }, + Some(&OffersContext::OutboundPaymentForRefund { payment_id, nonce, .. }) => { + if invoice.is_for_refund() { + invoice.verify_using_payer_data(payment_id, nonce, expanded_key, secp_ctx) + } else { + Err(()) + } }, _ => Err(()), } @@ -538,11 +527,10 @@ where } } - fn create_offer_builder_intern( + fn create_offer_builder_intern( &self, entropy_source: ES, make_paths: PF, ) -> Result<(OfferBuilder<'_, DerivedMetadata, secp256k1::All>, Nonce), Bolt12SemanticError> where - ES::Target: EntropySource, PF: FnOnce( PublicKey, MessageContext, @@ -595,13 +583,10 @@ where /// This is not exported to bindings users as builder patterns don't map outside of move semantics. /// /// [`DefaultMessageRouter`]: crate::onion_message::messenger::DefaultMessageRouter - pub fn create_offer_builder( + pub fn create_offer_builder( &self, entropy_source: ES, peers: Vec, - ) -> Result, Bolt12SemanticError> - where - ES::Target: EntropySource, - { - self.create_offer_builder_intern(&*entropy_source, |_, context, _| { + ) -> Result, Bolt12SemanticError> { + self.create_offer_builder_intern(&entropy_source, |_, context, _| { self.create_blinded_paths(peers, context) .map(|paths| paths.into_iter().take(1)) .map_err(|_| Bolt12SemanticError::MissingPaths) @@ -618,15 +603,11 @@ where /// This is not exported to bindings users as builder patterns don't map outside of move semantics. /// /// See [`Self::create_offer_builder`] for more details on usage. - pub fn create_offer_builder_using_router( + pub fn create_offer_builder_using_router( &self, router: ME, entropy_source: ES, peers: Vec, - ) -> Result, Bolt12SemanticError> - where - ME::Target: MessageRouter, - ES::Target: EntropySource, - { + ) -> Result, Bolt12SemanticError> { let receive_key = self.get_receive_auth_key(); - self.create_offer_builder_intern(&*entropy_source, |node_id, context, secp_ctx| { + self.create_offer_builder_intern(&entropy_source, |node_id, context, secp_ctx| { router .create_blinded_paths(node_id, receive_key, context, peers, secp_ctx) .map(|paths| paths.into_iter().take(1)) @@ -645,23 +626,19 @@ where /// aforementioned always-online node. /// /// This is not exported to bindings users as builder patterns don't map outside of move semantics. - pub fn create_async_receive_offer_builder( + pub fn create_async_receive_offer_builder( &self, entropy_source: ES, message_paths_to_always_online_node: Vec, - ) -> Result<(OfferBuilder<'_, DerivedMetadata, secp256k1::All>, Nonce), Bolt12SemanticError> - where - ES::Target: EntropySource, - { - self.create_offer_builder_intern(&*entropy_source, |_, _, _| { + ) -> Result<(OfferBuilder<'_, DerivedMetadata, secp256k1::All>, Nonce), Bolt12SemanticError> { + self.create_offer_builder_intern(&entropy_source, |_, _, _| { Ok(message_paths_to_always_online_node) }) } - fn create_refund_builder_intern( + fn create_refund_builder_intern( &self, entropy_source: ES, make_paths: PF, amount_msats: u64, absolute_expiry: Duration, payment_id: PaymentId, ) -> Result, Bolt12SemanticError> where - ES::Target: EntropySource, PF: FnOnce( PublicKey, MessageContext, @@ -671,11 +648,12 @@ where { let node_id = self.get_our_node_id(); let expanded_key = &self.inbound_payment_key; - let entropy = &*entropy_source; + let entropy = &entropy_source; let secp_ctx = &self.secp_ctx; let nonce = Nonce::from_entropy_source(entropy); - let context = MessageContext::Offers(OffersContext::OutboundPayment { payment_id, nonce }); + let context = + MessageContext::Offers(OffersContext::OutboundPaymentForRefund { payment_id, nonce }); // Create the base builder with common properties let mut builder = RefundBuilder::deriving_signing_pubkey( @@ -731,15 +709,12 @@ where /// /// [`Event::PaymentFailed`]: crate::events::Event::PaymentFailed /// [`RouteParameters::from_payment_params_and_value`]: crate::routing::router::RouteParameters::from_payment_params_and_value - pub fn create_refund_builder( + pub fn create_refund_builder( &self, entropy_source: ES, amount_msats: u64, absolute_expiry: Duration, payment_id: PaymentId, peers: Vec, - ) -> Result, Bolt12SemanticError> - where - ES::Target: EntropySource, - { + ) -> Result, Bolt12SemanticError> { self.create_refund_builder_intern( - &*entropy_source, + &entropy_source, |_, context, _| { self.create_blinded_paths(peers, context) .map(|paths| paths.into_iter().take(1)) @@ -772,17 +747,13 @@ where /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice /// [`Event::PaymentFailed`]: crate::events::Event::PaymentFailed /// [`RouteParameters::from_payment_params_and_value`]: crate::routing::router::RouteParameters::from_payment_params_and_value - pub fn create_refund_builder_using_router( + pub fn create_refund_builder_using_router( &self, router: ME, entropy_source: ES, amount_msats: u64, absolute_expiry: Duration, payment_id: PaymentId, peers: Vec, - ) -> Result, Bolt12SemanticError> - where - ME::Target: MessageRouter, - ES::Target: EntropySource, - { + ) -> Result, Bolt12SemanticError> { let receive_key = self.get_receive_auth_key(); self.create_refund_builder_intern( - &*entropy_source, + &entropy_source, |node_id, context, secp_ctx| { router .create_blinded_paths(node_id, receive_key, context, peers, secp_ctx) @@ -821,14 +792,11 @@ where /// created via [`Self::create_async_receive_offer_builder`]. /// /// This is not exported to bindings users as builder patterns don't map outside of move semantics. - pub fn create_static_invoice_builder<'a, R: Deref>( + pub fn create_static_invoice_builder<'a, R: Router>( &self, router: &R, offer: &'a Offer, offer_nonce: Nonce, payment_secret: PaymentSecret, relative_expiry_secs: u32, usable_channels: Vec, peers: Vec, - ) -> Result, Bolt12SemanticError> - where - R::Target: Router, - { + ) -> Result, Bolt12SemanticError> { let expanded_key = &self.inbound_payment_key; let secp_ctx = &self.secp_ctx; @@ -892,13 +860,11 @@ where /// blinded path can be constructed. /// /// This is not exported to bindings users as builder patterns don't map outside of move semantics. - pub fn create_invoice_builder_from_refund<'a, ES: Deref, R: Deref, F>( + pub fn create_invoice_builder_from_refund<'a, ES: EntropySource, R: Router, F>( &'a self, router: &R, entropy_source: ES, refund: &'a Refund, usable_channels: Vec, get_payment_info: F, ) -> Result, Bolt12SemanticError> where - ES::Target: EntropySource, - R::Target: Router, F: Fn(u64, u32) -> Result<(PaymentHash, PaymentSecret), Bolt12SemanticError>, { if refund.chain() != self.chain_hash { @@ -906,7 +872,7 @@ where } let expanded_key = &self.inbound_payment_key; - let entropy = &*entropy_source; + let entropy = &entropy_source; let amount_msats = refund.amount_msats(); let relative_expiry = DEFAULT_RELATIVE_EXPIRY.as_secs() as u32; @@ -961,12 +927,11 @@ where /// Returns a [`Bolt12SemanticError`] if: /// - Valid blinded payment paths could not be generated for the [`Bolt12Invoice`]. /// - The [`InvoiceBuilder`] could not be created from the [`InvoiceRequest`]. - pub fn create_invoice_builder_from_invoice_request_with_keys<'a, R: Deref, F>( + pub fn create_invoice_builder_from_invoice_request_with_keys<'a, R: Router, F>( &self, router: &R, invoice_request: &'a VerifiedInvoiceRequest, usable_channels: Vec, get_payment_info: F, ) -> Result<(InvoiceBuilder<'a, DerivedSigningPubkey>, MessageContext), Bolt12SemanticError> where - R::Target: Router, F: Fn(u64, u32) -> Result<(PaymentHash, PaymentSecret), Bolt12SemanticError>, { let relative_expiry = DEFAULT_RELATIVE_EXPIRY.as_secs() as u32; @@ -1021,12 +986,11 @@ where /// Returns a [`Bolt12SemanticError`] if: /// - Valid blinded payment paths could not be generated for the [`Bolt12Invoice`]. /// - The [`InvoiceBuilder`] could not be created from the [`InvoiceRequest`]. - pub fn create_invoice_builder_from_invoice_request_without_keys<'a, R: Deref, F>( + pub fn create_invoice_builder_from_invoice_request_without_keys<'a, R: Router, F>( &self, router: &R, invoice_request: &'a VerifiedInvoiceRequest, usable_channels: Vec, get_payment_info: F, ) -> Result<(InvoiceBuilder<'a, ExplicitSigningPubkey>, MessageContext), Bolt12SemanticError> where - R::Target: Router, F: Fn(u64, u32) -> Result<(PaymentHash, PaymentSecret), Bolt12SemanticError>, { let relative_expiry = DEFAULT_RELATIVE_EXPIRY.as_secs() as u32; @@ -1098,7 +1062,8 @@ where &self, invoice_request: InvoiceRequest, payment_id: PaymentId, nonce: Nonce, peers: Vec, ) -> Result<(), Bolt12SemanticError> { - let context = MessageContext::Offers(OffersContext::OutboundPayment { payment_id, nonce }); + let context = + MessageContext::Offers(OffersContext::OutboundPaymentForOffer { payment_id, nonce }); let reply_paths = self .create_blinded_paths(peers, context) .map_err(|_| Bolt12SemanticError::MissingPaths)?; @@ -1268,26 +1233,24 @@ where /// received to our node. /// /// [`ReleaseHeldHtlc`]: crate::onion_message::async_payments::ReleaseHeldHtlc - pub fn path_for_release_held_htlc( + pub fn path_for_release_held_htlc( &self, intercept_id: InterceptId, prev_outbound_scid_alias: u64, htlc_id: u64, entropy: ES, - ) -> BlindedMessagePath - where - ES::Target: EntropySource, - { + ) -> BlindedMessagePath { // In the future, we should support multi-hop paths here. let context = MessageContext::AsyncPayments(AsyncPaymentsContext::ReleaseHeldHtlc { intercept_id, prev_outbound_scid_alias, htlc_id, }); - let num_dummy_hops = PADDED_PATH_LENGTH.saturating_sub(1); + let num_dummy_hops = DUMMY_HOPS_PATH_LENGTH.saturating_sub(1); BlindedMessagePath::new_with_dummy_hops( &[], self.get_our_node_id(), num_dummy_hops, self.receive_auth_key, context, - &*entropy, + false, + &entropy, &self.secp_ctx, ) } @@ -1374,13 +1337,10 @@ where /// the cache can self-regulate the number of messages sent out. /// /// Errors if we failed to create blinded reply paths when sending an [`OfferPathsRequest`] message. - pub fn check_refresh_async_receive_offer_cache( + pub fn check_refresh_async_receive_offer_cache( &self, peers: Vec, usable_channels: Vec, router: R, timer_tick_occurred: bool, - ) -> Result<(), ()> - where - R::Target: Router, - { + ) -> Result<(), ()> { // Terminate early if this node does not intend to receive async payments. { let cache = self.async_receive_offer_cache.lock().unwrap(); @@ -1449,11 +1409,9 @@ where /// Enqueue onion messages that will used to request invoice refresh from the static invoice /// server, based on the offers provided by the cache. - fn check_refresh_static_invoices( + fn check_refresh_static_invoices( &self, peers: Vec, usable_channels: Vec, router: R, - ) where - R::Target: Router, - { + ) { let mut serve_static_invoice_msgs = Vec::new(); { let duration_since_epoch = self.duration_since_epoch(); @@ -1466,7 +1424,7 @@ where offer_nonce, peers.clone(), usable_channels.clone(), - &*router, + &router, ) { Ok((invoice, path)) => (invoice, path), Err(()) => continue, @@ -1574,15 +1532,11 @@ where /// /// Returns `None` if we have enough offers cached already, verification of `message` fails, or we /// fail to create blinded paths. - pub fn handle_offer_paths( + pub fn handle_offer_paths( &self, message: OfferPaths, context: AsyncPaymentsContext, responder: Responder, peers: Vec, usable_channels: Vec, entropy: ES, router: R, - ) -> Option<(ServeStaticInvoice, MessageContext)> - where - ES::Target: EntropySource, - R::Target: Router, - { + ) -> Option<(ServeStaticInvoice, MessageContext)> { let duration_since_epoch = self.duration_since_epoch(); let invoice_slot = match context { AsyncPaymentsContext::OfferPaths { invoice_slot, path_absolute_expiry } => { @@ -1609,7 +1563,7 @@ where } let (mut offer_builder, offer_nonce) = - match self.create_async_receive_offer_builder(&*entropy, message.paths) { + match self.create_async_receive_offer_builder(&entropy, message.paths) { Ok((builder, nonce)) => (builder, nonce), Err(_) => return None, // Only reachable if OfferPaths::paths is empty }; @@ -1665,13 +1619,10 @@ where /// Creates a [`StaticInvoice`] and a blinded path for the server to forward invoice requests from /// payers to our node. - fn create_static_invoice_for_server( + fn create_static_invoice_for_server( &self, offer: &Offer, offer_nonce: Nonce, peers: Vec, usable_channels: Vec, router: R, - ) -> Result<(StaticInvoice, BlindedMessagePath), ()> - where - R::Target: Router, - { + ) -> Result<(StaticInvoice, BlindedMessagePath), ()> { let expanded_key = &self.inbound_payment_key; let duration_since_epoch = self.duration_since_epoch(); let secp_ctx = &self.secp_ctx; diff --git a/lightning/src/offers/invoice.rs b/lightning/src/offers/invoice.rs index 6dfd6eac508..8d83225f117 100644 --- a/lightning/src/offers/invoice.rs +++ b/lightning/src/offers/invoice.rs @@ -778,6 +778,19 @@ struct InvoiceFields { } macro_rules! invoice_accessors { ($self: ident, $contents: expr) => { + /// Whether the invoice was created in response to a [`Refund`]. + pub fn is_for_refund(&$self) -> bool { + $contents.is_for_refund() + } + + /// Whether the invoice was created in response to an [`InvoiceRequest`] created from an + /// [`Offer`]. + /// + /// [`Offer`]: crate::offers::offer::Offer + pub fn is_for_offer(&$self) -> bool { + $contents.is_for_offer() + } + /// The chains that may be used when paying a requested invoice. /// /// From [`Offer::chains`]; `None` if the invoice was created in response to a [`Refund`]. @@ -1093,6 +1106,20 @@ impl InvoiceContents { } } + fn is_for_refund(&self) -> bool { + match self { + InvoiceContents::ForRefund { .. } => true, + InvoiceContents::ForOffer { .. } => false, + } + } + + fn is_for_offer(&self) -> bool { + match self { + InvoiceContents::ForRefund { .. } => false, + InvoiceContents::ForOffer { .. } => true, + } + } + fn offer_chains(&self) -> Option> { match self { InvoiceContents::ForOffer { invoice_request, .. } => { diff --git a/lightning/src/offers/nonce.rs b/lightning/src/offers/nonce.rs index 0675414125f..8c99a464abc 100644 --- a/lightning/src/offers/nonce.rs +++ b/lightning/src/offers/nonce.rs @@ -13,7 +13,6 @@ use crate::io::{self, Read}; use crate::ln::msgs::DecodeError; use crate::sign::EntropySource; use crate::util::ser::{Readable, Writeable, Writer}; -use core::ops::Deref; #[allow(unused_imports)] use crate::prelude::*; @@ -34,10 +33,7 @@ impl Nonce { pub const LENGTH: usize = 16; /// Creates a `Nonce` from the given [`EntropySource`]. - pub fn from_entropy_source(entropy_source: ES) -> Self - where - ES::Target: EntropySource, - { + pub fn from_entropy_source(entropy_source: ES) -> Self { let mut bytes = [0u8; Self::LENGTH]; let rand_bytes = entropy_source.get_secure_random_bytes(); bytes.copy_from_slice(&rand_bytes[..Self::LENGTH]); diff --git a/lightning/src/offers/offer.rs b/lightning/src/offers/offer.rs index 7ad3c282c77..5592c50a264 100644 --- a/lightning/src/offers/offer.rs +++ b/lightning/src/offers/offer.rs @@ -2528,5 +2528,12 @@ mod bolt12_tests { "lno1pgx9getnwss8vetrw3hhyucsespjgef743p5fzqq9nqxh0ah7y87rzv3ud0eleps9kl2d5348hq2k8qzqgpqyqszqgpqyqszqgpqyqszqgpqyqszqgpqyqszqgpqyqszqgpqyqszqgpqyqszqgpqyqszqgpqyqszqgpqyqszqgpqyqszqgpqyqszqgqpqqqqqqqqqqqqqqqqqqqqqqqqqqqzqgpqyqszqgpqyqszqgpqyqszqgpqyqszqgpqyqszqgpqyqszqgpqqzq3zyg3zyg3zygszqqqqyqqqqsqqvpqyqszqgpqyqszqgpqyqszqgpqyqszqgpqyqszqgpqyqszqgpqyqsq".parse::(), Err(Bolt12ParseError::Decode(DecodeError::InvalidValue)), ); + + // Bech32 padding exceeds 4-bit limit (BOLT 12 test vector) + // See: https://github.com/lightning/bolts/pull/1312 + assert!(matches!( + "lno1zcss9mk8y3wkklfvevcrszlmu23kfrxh49px20665dqwmn4p72pkseseq".parse::(), + Err(Bolt12ParseError::InvalidPadding(_)) + )); } } diff --git a/lightning/src/offers/parse.rs b/lightning/src/offers/parse.rs index 99dd1bb938d..df71e860d2d 100644 --- a/lightning/src/offers/parse.rs +++ b/lightning/src/offers/parse.rs @@ -12,7 +12,7 @@ use crate::io; use crate::ln::msgs::DecodeError; use crate::util::ser::CursorReadable; -use bech32::primitives::decode::CheckedHrpstringError; +use bech32::primitives::decode::{CheckedHrpstringError, PaddingError}; use bitcoin::secp256k1; #[allow(unused_imports)] @@ -76,6 +76,10 @@ mod sealed { return Err(Bolt12ParseError::InvalidBech32Hrp); } + // Validate that bech32 padding is valid per BIP-173: + // "Any incomplete group at the end MUST be 4 bits or less, MUST be all zeroes" + parsed.validate_segwit_padding()?; + let data = parsed.byte_iter().collect::>(); Self::try_from(data) } @@ -146,6 +150,11 @@ pub enum Bolt12ParseError { /// This is not exported to bindings users as the details don't matter much CheckedHrpstringError, ), + /// The bech32 data has invalid padding per BIP-173 (more than 4 bits or non-zero padding). + InvalidPadding( + /// This is not exported to bindings users as the details don't matter much + PaddingError, + ), /// The bech32 decoded string could not be decoded as the expected message type. Decode(DecodeError), /// The parsed message has invalid semantics. @@ -232,6 +241,12 @@ impl From for Bolt12ParseError { } } +impl From for Bolt12ParseError { + fn from(error: PaddingError) -> Self { + Self::InvalidPadding(error) + } +} + impl From for Bolt12ParseError { fn from(error: DecodeError) -> Self { Self::Decode(error) @@ -326,7 +341,7 @@ mod bolt12_tests { #[cfg(test)] mod tests { - use super::Bolt12ParseError; + use super::{Bolt12ParseError, PaddingError}; use crate::ln::msgs::DecodeError; use crate::offers::offer::Offer; use bech32::primitives::decode::{CharError, CheckedHrpstringError, UncheckedHrpstringError}; @@ -371,4 +386,15 @@ mod tests { Err(e) => assert_eq!(e, Bolt12ParseError::Decode(DecodeError::InvalidValue)), } } + + #[test] + fn fails_parsing_bech32_encoded_offer_with_invalid_padding() { + // BOLT 12 test vector for invalid bech32 padding + // See: https://github.com/lightning/bolts/pull/1312 + let encoded_offer = "lno1zcss9mk8y3wkklfvevcrszlmu23kfrxh49px20665dqwmn4p72pkseseq"; + match encoded_offer.parse::() { + Ok(_) => panic!("Valid offer: {}", encoded_offer), + Err(e) => assert_eq!(e, Bolt12ParseError::InvalidPadding(PaddingError::TooMuch)), + } + } } diff --git a/lightning/src/offers/refund.rs b/lightning/src/offers/refund.rs index dd2c3e2a92e..c0fd9dfdd3e 100644 --- a/lightning/src/offers/refund.rs +++ b/lightning/src/offers/refund.rs @@ -110,7 +110,6 @@ use bitcoin::constants::ChainHash; use bitcoin::network::Network; use bitcoin::secp256k1::{self, PublicKey, Secp256k1}; use core::hash::{Hash, Hasher}; -use core::ops::Deref; use core::str::FromStr; use core::time::Duration; @@ -624,13 +623,10 @@ macro_rules! respond_with_derived_signing_pubkey_methods { ($self: ident, $build /// /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice #[cfg(feature = "std")] - pub fn respond_using_derived_keys( + pub fn respond_using_derived_keys( &$self, payment_paths: Vec, payment_hash: PaymentHash, expanded_key: &ExpandedKey, entropy_source: ES - ) -> Result<$builder, Bolt12SemanticError> - where - ES::Target: EntropySource, - { + ) -> Result<$builder, Bolt12SemanticError> { let created_at = std::time::SystemTime::now() .duration_since(std::time::SystemTime::UNIX_EPOCH) .expect("SystemTime::now() should come after SystemTime::UNIX_EPOCH"); @@ -648,13 +644,10 @@ macro_rules! respond_with_derived_signing_pubkey_methods { ($self: ident, $build /// This is not exported to bindings users as builder patterns don't map outside of move semantics. /// /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice - pub fn respond_using_derived_keys_no_std( + pub fn respond_using_derived_keys_no_std( &$self, payment_paths: Vec, payment_hash: PaymentHash, created_at: core::time::Duration, expanded_key: &ExpandedKey, entropy_source: ES - ) -> Result<$builder, Bolt12SemanticError> - where - ES::Target: EntropySource, - { + ) -> Result<$builder, Bolt12SemanticError> { if $self.features().requires_unknown_bits() { return Err(Bolt12SemanticError::UnknownRequiredFeatures); } diff --git a/lightning/src/onion_message/async_payments.rs b/lightning/src/onion_message/async_payments.rs index 127126e150f..41108cdccd7 100644 --- a/lightning/src/onion_message/async_payments.rs +++ b/lightning/src/onion_message/async_payments.rs @@ -17,6 +17,7 @@ use crate::onion_message::messenger::{MessageSendInstructions, Responder, Respon use crate::onion_message::packet::OnionMessageContents; use crate::prelude::*; use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer}; +use core::ops::Deref; // TLV record types for the `onionmsg_tlv` TLV stream as defined in BOLT 4. const OFFER_PATHS_REQ_TLV_TYPE: u64 = 75540; @@ -89,6 +90,45 @@ pub trait AsyncPaymentsMessageHandler { } } +impl> AsyncPaymentsMessageHandler + for A +{ + fn handle_offer_paths_request( + &self, message: OfferPathsRequest, context: AsyncPaymentsContext, + responder: Option, + ) -> Option<(OfferPaths, ResponseInstruction)> { + self.deref().handle_offer_paths_request(message, context, responder) + } + fn handle_offer_paths( + &self, message: OfferPaths, context: AsyncPaymentsContext, responder: Option, + ) -> Option<(ServeStaticInvoice, ResponseInstruction)> { + self.deref().handle_offer_paths(message, context, responder) + } + fn handle_serve_static_invoice( + &self, message: ServeStaticInvoice, context: AsyncPaymentsContext, + responder: Option, + ) { + self.deref().handle_serve_static_invoice(message, context, responder) + } + fn handle_static_invoice_persisted( + &self, message: StaticInvoicePersisted, context: AsyncPaymentsContext, + ) { + self.deref().handle_static_invoice_persisted(message, context) + } + fn handle_held_htlc_available( + &self, message: HeldHtlcAvailable, context: AsyncPaymentsContext, + responder: Option, + ) -> Option<(ReleaseHeldHtlc, ResponseInstruction)> { + self.deref().handle_held_htlc_available(message, context, responder) + } + fn handle_release_held_htlc(&self, message: ReleaseHeldHtlc, context: AsyncPaymentsContext) { + self.deref().handle_release_held_htlc(message, context) + } + fn release_pending_messages(&self) -> Vec<(AsyncPaymentsMessage, MessageSendInstructions)> { + self.deref().release_pending_messages() + } +} + /// Possible async payment messages sent and received via an [`OnionMessage`]. /// /// [`OnionMessage`]: crate::ln::msgs::OnionMessage diff --git a/lightning/src/onion_message/dns_resolution.rs b/lightning/src/onion_message/dns_resolution.rs index 47d4bc09e04..e857a359c78 100644 --- a/lightning/src/onion_message/dns_resolution.rs +++ b/lightning/src/onion_message/dns_resolution.rs @@ -37,6 +37,7 @@ use dnssec_prover::rr::Name; use lightning_types::features::NodeFeatures; use core::fmt; +use core::ops::Deref; use crate::blinded_path::message::DNSResolverContext; use crate::io; @@ -89,6 +90,23 @@ pub trait DNSResolverMessageHandler { } } +impl> DNSResolverMessageHandler for D { + fn handle_dnssec_query( + &self, message: DNSSECQuery, responder: Option, + ) -> Option<(DNSResolverMessage, ResponseInstruction)> { + self.deref().handle_dnssec_query(message, responder) + } + fn handle_dnssec_proof(&self, message: DNSSECProof, context: DNSResolverContext) { + self.deref().handle_dnssec_proof(message, context) + } + fn provided_node_features(&self) -> NodeFeatures { + self.deref().provided_node_features() + } + fn release_pending_messages(&self) -> Vec<(DNSResolverMessage, MessageSendInstructions)> { + self.deref().release_pending_messages() + } +} + #[derive(Clone, Debug, Hash, PartialEq, Eq)] /// An enum containing the possible onion messages which are used uses to request and receive /// DNSSEC proofs. diff --git a/lightning/src/onion_message/functional_tests.rs b/lightning/src/onion_message/functional_tests.rs index 605a81a4f95..75e2aaf3c5f 100644 --- a/lightning/src/onion_message/functional_tests.rs +++ b/lightning/src/onion_message/functional_tests.rs @@ -436,8 +436,9 @@ fn one_blinded_hop() { let context = MessageContext::Custom(Vec::new()); let entropy = &*nodes[1].entropy_source; let receive_key = nodes[1].messenger.node_signer.get_receive_auth_key(); + let node_id = nodes[1].node_id; let blinded_path = - BlindedMessagePath::new(&[], nodes[1].node_id, receive_key, context, entropy, &secp_ctx); + BlindedMessagePath::new(&[], node_id, receive_key, context, false, entropy, &secp_ctx); let destination = Destination::BlindedPath(blinded_path); let instructions = MessageSendInstructions::WithoutReplyPath { destination }; nodes[0].messenger.send_onion_message(test_msg, instructions).unwrap(); @@ -450,18 +451,15 @@ fn blinded_path_with_dummy_hops() { let nodes = create_nodes(2); let test_msg = TestCustomMessage::Pong; - let secp_ctx = Secp256k1::new(); - let context = MessageContext::Custom(Vec::new()); - let entropy = &*nodes[1].entropy_source; - let receive_key = nodes[1].messenger.node_signer.get_receive_auth_key(); let blinded_path = BlindedMessagePath::new_with_dummy_hops( &[], nodes[1].node_id, TEST_DUMMY_HOP_COUNT, - receive_key, - context, - entropy, - &secp_ctx, + nodes[1].messenger.node_signer.get_receive_auth_key(), + MessageContext::Custom(Vec::new()), + false, + &*nodes[1].entropy_source, + &Secp256k1::new(), ); // Ensure that dummy hops are added to the blinded path. assert_eq!(blinded_path.blinded_hops().len(), 6); @@ -477,19 +475,16 @@ fn two_unblinded_two_blinded() { let nodes = create_nodes(5); let test_msg = TestCustomMessage::Pong; - let secp_ctx = Secp256k1::new(); let intermediate_nodes = [MessageForwardNode { node_id: nodes[3].node_id, short_channel_id: None }]; - let context = MessageContext::Custom(Vec::new()); - let entropy = &*nodes[4].entropy_source; - let receive_key = nodes[4].messenger.node_signer.get_receive_auth_key(); let blinded_path = BlindedMessagePath::new( &intermediate_nodes, nodes[4].node_id, - receive_key, - context, - entropy, - &secp_ctx, + nodes[4].messenger.node_signer.get_receive_auth_key(), + MessageContext::Custom(Vec::new()), + false, + &*nodes[4].entropy_source, + &Secp256k1::new(), ); let path = OnionMessagePath { intermediate_nodes: vec![nodes[1].node_id, nodes[2].node_id], @@ -507,21 +502,18 @@ fn three_blinded_hops() { let nodes = create_nodes(4); let test_msg = TestCustomMessage::Pong; - let secp_ctx = Secp256k1::new(); let intermediate_nodes = [ MessageForwardNode { node_id: nodes[1].node_id, short_channel_id: None }, MessageForwardNode { node_id: nodes[2].node_id, short_channel_id: None }, ]; - let context = MessageContext::Custom(Vec::new()); - let entropy = &*nodes[3].entropy_source; - let receive_key = nodes[3].messenger.node_signer.get_receive_auth_key(); let blinded_path = BlindedMessagePath::new( &intermediate_nodes, nodes[3].node_id, - receive_key, - context, - entropy, - &secp_ctx, + nodes[3].messenger.node_signer.get_receive_auth_key(), + MessageContext::Custom(Vec::new()), + false, + &*nodes[3].entropy_source, + &Secp256k1::new(), ); let destination = Destination::BlindedPath(blinded_path); let instructions = MessageSendInstructions::WithoutReplyPath { destination }; @@ -548,8 +540,9 @@ fn async_response_over_one_blinded_hop() { let context = MessageContext::Custom(Vec::new()); let entropy = &*nodes[1].entropy_source; let receive_key = nodes[1].messenger.node_signer.get_receive_auth_key(); + let node_id = nodes[1].node_id; let reply_path = - BlindedMessagePath::new(&[], nodes[1].node_id, receive_key, context, entropy, &secp_ctx); + BlindedMessagePath::new(&[], node_id, receive_key, context, false, entropy, &secp_ctx); // 4. Create a responder using the reply path for Alice. let responder = Some(Responder::new(reply_path)); @@ -590,7 +583,7 @@ fn async_response_with_reply_path_succeeds() { let entropy = &*bob.entropy_source; let receive_key = bob.messenger.node_signer.get_receive_auth_key(); let reply_path = - BlindedMessagePath::new(&[], bob.node_id, receive_key, context, entropy, &secp_ctx); + BlindedMessagePath::new(&[], bob.node_id, receive_key, context, false, entropy, &secp_ctx); // Alice asynchronously responds to Bob, expecting a response back from him. let responder = Responder::new(reply_path); @@ -632,7 +625,7 @@ fn async_response_with_reply_path_fails() { let entropy = &*bob.entropy_source; let receive_key = bob.messenger.node_signer.get_receive_auth_key(); let reply_path = - BlindedMessagePath::new(&[], bob.node_id, receive_key, context, entropy, &secp_ctx); + BlindedMessagePath::new(&[], bob.node_id, receive_key, context, false, entropy, &secp_ctx); // Alice tries to asynchronously respond to Bob, but fails because the nodes are unannounced and // disconnected. Thus, a reply path could no be created for the response. @@ -668,28 +661,26 @@ fn too_big_packet_error() { #[test] fn test_blinded_path_padding_for_full_length_path() { - // Check that for a full blinded path, all encrypted payload are padded to rounded-off length. + // Check that for a full blinded path without compact padding, all encrypted payload are padded + // to rounded-off length. let nodes = create_nodes(4); let test_msg = TestCustomMessage::Pong; - let secp_ctx = Secp256k1::new(); let intermediate_nodes = [ MessageForwardNode { node_id: nodes[1].node_id, short_channel_id: None }, MessageForwardNode { node_id: nodes[2].node_id, short_channel_id: None }, ]; - // Update the context to create a larger final receive TLVs, ensuring that - // the hop sizes vary before padding. - let context = MessageContext::Custom(vec![0u8; 42]); - let entropy = &*nodes[3].entropy_source; - let receive_key = nodes[3].messenger.node_signer.get_receive_auth_key(); + // Build with a larger context to create a larger final receive TLVs, ensuring that the hop + // sizes vary before padding. let blinded_path = BlindedMessagePath::new_with_dummy_hops( &intermediate_nodes, nodes[3].node_id, TEST_DUMMY_HOP_COUNT, - receive_key, - context, - entropy, - &secp_ctx, + nodes[3].messenger.node_signer.get_receive_auth_key(), + MessageContext::Custom(vec![0u8; 42]), + false, + &*nodes[3].entropy_source, + &Secp256k1::new(), ); assert!(is_padded(&blinded_path.blinded_hops(), MESSAGE_PADDING_ROUND_OFF)); @@ -703,32 +694,72 @@ fn test_blinded_path_padding_for_full_length_path() { } #[test] -fn test_blinded_path_no_padding_for_compact_path() { - // Check that for a compact blinded path, no padding is applied. +fn test_blinded_path_compact_padding() { + // Check that for a blinded path with non-SCID intermediate hops with compact padding, no extra + // padding is applied. let nodes = create_nodes(4); - let secp_ctx = Secp256k1::new(); - // Include some short_channel_id, so that MessageRouter uses this to create compact blinded paths. + let intermediate_nodes = [ + MessageForwardNode { node_id: nodes[1].node_id, short_channel_id: None }, + MessageForwardNode { node_id: nodes[2].node_id, short_channel_id: None }, + ]; + // Build with a larger context to create a larger final receive TLVs, ensuring that the hop + // sizes vary before padding. + let blinded_path = BlindedMessagePath::new_with_dummy_hops( + &intermediate_nodes, + nodes[3].node_id, + TEST_DUMMY_HOP_COUNT, + nodes[3].messenger.node_signer.get_receive_auth_key(), + MessageContext::Custom(vec![0u8; 42]), + true, + &*nodes[3].entropy_source, + &Secp256k1::new(), + ); + + let hops = blinded_path.blinded_hops(); + assert!(!is_padded(&hops, MESSAGE_PADDING_ROUND_OFF)); + assert_eq!(hops.len(), TEST_DUMMY_HOP_COUNT + 3); + for hop in hops.iter().take(TEST_DUMMY_HOP_COUNT + 2) { + assert_eq!(hops[0].encrypted_payload.len(), hop.encrypted_payload.len()); + } + // Check the actual encrypted payload lengths, which may change in the future but serves to + // ensure that this and test_compact_blinded_path_compact_padding, below, differ. + assert_eq!(hops[0].encrypted_payload.len(), 51); +} + +#[test] +fn test_compact_blinded_path_compact_padding() { + // Check that for a blinded path with SCID intermediate hops with compact padding, no extra + // padding is applied. + let nodes = create_nodes(4); + + // Include some short_channel_id, so that MessageRouter uses this to create compact blinded paths let intermediate_nodes = [ MessageForwardNode { node_id: nodes[1].node_id, short_channel_id: Some(24) }, MessageForwardNode { node_id: nodes[2].node_id, short_channel_id: Some(25) }, ]; - // Update the context to create a larger final receive TLVs, ensuring that - // the hop sizes vary before padding. - let context = MessageContext::Custom(vec![0u8; 42]); - let entropy = &*nodes[3].entropy_source; - let receive_key = nodes[3].messenger.node_signer.get_receive_auth_key(); + // Build with a larger context to create a larger final receive TLVs, ensuring that the hop + // sizes vary before padding. let blinded_path = BlindedMessagePath::new_with_dummy_hops( &intermediate_nodes, nodes[3].node_id, TEST_DUMMY_HOP_COUNT, - receive_key, - context, - entropy, - &secp_ctx, + nodes[3].messenger.node_signer.get_receive_auth_key(), + MessageContext::Custom(vec![0u8; 42]), + true, + &*nodes[3].entropy_source, + &Secp256k1::new(), ); - assert!(!is_padded(&blinded_path.blinded_hops(), MESSAGE_PADDING_ROUND_OFF)); + let hops = blinded_path.blinded_hops(); + assert!(!is_padded(&hops, MESSAGE_PADDING_ROUND_OFF)); + assert_eq!(hops.len(), TEST_DUMMY_HOP_COUNT + 3); + for hop in hops.iter().take(TEST_DUMMY_HOP_COUNT + 2) { + assert_eq!(hops[0].encrypted_payload.len(), hop.encrypted_payload.len()); + } + // Check the actual encrypted payload lengths, which may change in the future but serves to + // ensure that this and test_blinded_path_compact_padding, above, differ. + assert_eq!(hops[0].encrypted_payload.len(), 26); } #[test] @@ -743,15 +774,13 @@ fn we_are_intro_node() { MessageForwardNode { node_id: nodes[0].node_id, short_channel_id: None }, MessageForwardNode { node_id: nodes[1].node_id, short_channel_id: None }, ]; - let context = MessageContext::Custom(Vec::new()); - let entropy = &*nodes[2].entropy_source; - let receive_key = nodes[2].messenger.node_signer.get_receive_auth_key(); let blinded_path = BlindedMessagePath::new( &intermediate_nodes, nodes[2].node_id, - receive_key, - context, - entropy, + nodes[2].messenger.node_signer.get_receive_auth_key(), + MessageContext::Custom(Vec::new()), + false, + &*nodes[2].entropy_source, &secp_ctx, ); let destination = Destination::BlindedPath(blinded_path); @@ -764,15 +793,13 @@ fn we_are_intro_node() { // Try with a two-hop blinded path where we are the introduction node. let intermediate_nodes = [MessageForwardNode { node_id: nodes[0].node_id, short_channel_id: None }]; - let context = MessageContext::Custom(Vec::new()); - let entropy = &*nodes[1].entropy_source; - let receive_key = nodes[1].messenger.node_signer.get_receive_auth_key(); let blinded_path = BlindedMessagePath::new( &intermediate_nodes, nodes[1].node_id, - receive_key, - context, - entropy, + nodes[1].messenger.node_signer.get_receive_auth_key(), + MessageContext::Custom(Vec::new()), + false, + &*nodes[1].entropy_source, &secp_ctx, ); let destination = Destination::BlindedPath(blinded_path); @@ -790,19 +817,16 @@ fn invalid_blinded_path_error() { let nodes = create_nodes(3); let test_msg = TestCustomMessage::Pong; - let secp_ctx = Secp256k1::new(); let intermediate_nodes = [MessageForwardNode { node_id: nodes[1].node_id, short_channel_id: None }]; - let context = MessageContext::Custom(Vec::new()); - let entropy = &*nodes[2].entropy_source; - let receive_key = nodes[2].messenger.node_signer.get_receive_auth_key(); let mut blinded_path = BlindedMessagePath::new( &intermediate_nodes, nodes[2].node_id, - receive_key, - context, - entropy, - &secp_ctx, + nodes[2].messenger.node_signer.get_receive_auth_key(), + MessageContext::Custom(Vec::new()), + false, + &*nodes[2].entropy_source, + &Secp256k1::new(), ); blinded_path.clear_blinded_hops(); let destination = Destination::BlindedPath(blinded_path); @@ -828,15 +852,13 @@ fn reply_path() { MessageForwardNode { node_id: nodes[2].node_id, short_channel_id: None }, MessageForwardNode { node_id: nodes[1].node_id, short_channel_id: None }, ]; - let context = MessageContext::Custom(Vec::new()); - let entropy = &*nodes[0].entropy_source; - let receive_key = nodes[0].messenger.node_signer.get_receive_auth_key(); let reply_path = BlindedMessagePath::new( &intermediate_nodes, nodes[0].node_id, - receive_key, - context, - entropy, + nodes[0].messenger.node_signer.get_receive_auth_key(), + MessageContext::Custom(Vec::new()), + false, + &*nodes[0].entropy_source, &secp_ctx, ); nodes[0] @@ -855,15 +877,13 @@ fn reply_path() { MessageForwardNode { node_id: nodes[1].node_id, short_channel_id: None }, MessageForwardNode { node_id: nodes[2].node_id, short_channel_id: None }, ]; - let context = MessageContext::Custom(Vec::new()); - let entropy = &*nodes[3].entropy_source; - let receive_key = nodes[3].messenger.node_signer.get_receive_auth_key(); let blinded_path = BlindedMessagePath::new( &intermediate_nodes, nodes[3].node_id, - receive_key, - context, - entropy, + nodes[3].messenger.node_signer.get_receive_auth_key(), + MessageContext::Custom(Vec::new()), + false, + &*nodes[3].entropy_source, &secp_ctx, ); let destination = Destination::BlindedPath(blinded_path); @@ -871,15 +891,13 @@ fn reply_path() { MessageForwardNode { node_id: nodes[2].node_id, short_channel_id: None }, MessageForwardNode { node_id: nodes[1].node_id, short_channel_id: None }, ]; - let context = MessageContext::Custom(Vec::new()); - let entropy = &*nodes[0].entropy_source; - let receive_key = nodes[0].messenger.node_signer.get_receive_auth_key(); let reply_path = BlindedMessagePath::new( &intermediate_nodes, nodes[0].node_id, - receive_key, - context, - entropy, + nodes[0].messenger.node_signer.get_receive_auth_key(), + MessageContext::Custom(Vec::new()), + false, + &*nodes[0].entropy_source, &secp_ctx, ); let instructions = MessageSendInstructions::WithSpecifiedReplyPath { destination, reply_path }; @@ -975,15 +993,13 @@ fn requests_peer_connection_for_buffered_messages() { let intermediate_nodes = [MessageForwardNode { node_id: nodes[1].node_id, short_channel_id: None }]; - let context = MessageContext::Custom(Vec::new()); - let entropy = &*nodes[0].entropy_source; - let receive_key = nodes[0].messenger.node_signer.get_receive_auth_key(); let blinded_path = BlindedMessagePath::new( &intermediate_nodes, nodes[2].node_id, - receive_key, - context, - entropy, + nodes[0].messenger.node_signer.get_receive_auth_key(), + MessageContext::Custom(Vec::new()), + false, + &*nodes[0].entropy_source, &secp_ctx, ); let destination = Destination::BlindedPath(blinded_path); @@ -1046,15 +1062,13 @@ fn drops_buffered_messages_waiting_for_peer_connection() { let intermediate_nodes = [MessageForwardNode { node_id: nodes[1].node_id, short_channel_id: None }]; - let context = MessageContext::Custom(Vec::new()); - let entropy = &*nodes[0].entropy_source; - let receive_key = nodes[0].messenger.node_signer.get_receive_auth_key(); let blinded_path = BlindedMessagePath::new( &intermediate_nodes, nodes[2].node_id, - receive_key, - context, - entropy, + nodes[0].messenger.node_signer.get_receive_auth_key(), + MessageContext::Custom(Vec::new()), + false, + &*nodes[0].entropy_source, &secp_ctx, ); let destination = Destination::BlindedPath(blinded_path); @@ -1107,19 +1121,16 @@ fn intercept_offline_peer_oms() { } let message = TestCustomMessage::Pong; - let secp_ctx = Secp256k1::new(); let intermediate_nodes = [MessageForwardNode { node_id: nodes[1].node_id, short_channel_id: None }]; - let context = MessageContext::Custom(Vec::new()); - let entropy = &*nodes[2].entropy_source; - let receive_key = nodes[2].messenger.node_signer.get_receive_auth_key(); let blinded_path = BlindedMessagePath::new( &intermediate_nodes, nodes[2].node_id, - receive_key, - context, - entropy, - &secp_ctx, + nodes[2].messenger.node_signer.get_receive_auth_key(), + MessageContext::Custom(Vec::new()), + false, + &*nodes[2].entropy_source, + &Secp256k1::new(), ); let destination = Destination::BlindedPath(blinded_path); let instructions = MessageSendInstructions::WithoutReplyPath { destination }; diff --git a/lightning/src/onion_message/messenger.rs b/lightning/src/onion_message/messenger.rs index 9a2c06bb72f..e688c020ac6 100644 --- a/lightning/src/onion_message/messenger.rs +++ b/lightning/src/onion_message/messenger.rs @@ -66,50 +66,32 @@ pub(super) const MAX_TIMER_TICKS: usize = 2; /// languages. pub trait AOnionMessenger { /// A type implementing [`EntropySource`] - type EntropySource: EntropySource + ?Sized; - /// A type that may be dereferenced to [`Self::EntropySource`] - type ES: Deref; + type EntropySource: EntropySource; /// A type implementing [`NodeSigner`] - type NodeSigner: NodeSigner + ?Sized; - /// A type that may be dereferenced to [`Self::NodeSigner`] - type NS: Deref; + type NodeSigner: NodeSigner; /// A type implementing [`Logger`] - type Logger: Logger + ?Sized; - /// A type that may be dereferenced to [`Self::Logger`] - type L: Deref; + type Logger: Logger; /// A type implementing [`NodeIdLookUp`] - type NodeIdLookUp: NodeIdLookUp + ?Sized; - /// A type that may be dereferenced to [`Self::NodeIdLookUp`] - type NL: Deref; + type NL: NodeIdLookUp; /// A type implementing [`MessageRouter`] - type MessageRouter: MessageRouter + ?Sized; - /// A type that may be dereferenced to [`Self::MessageRouter`] - type MR: Deref; + type MessageRouter: MessageRouter; /// A type implementing [`OffersMessageHandler`] - type OffersMessageHandler: OffersMessageHandler + ?Sized; - /// A type that may be dereferenced to [`Self::OffersMessageHandler`] - type OMH: Deref; + type OMH: OffersMessageHandler; /// A type implementing [`AsyncPaymentsMessageHandler`] - type AsyncPaymentsMessageHandler: AsyncPaymentsMessageHandler + ?Sized; - /// A type that may be dereferenced to [`Self::AsyncPaymentsMessageHandler`] - type APH: Deref; + type APH: AsyncPaymentsMessageHandler; /// A type implementing [`DNSResolverMessageHandler`] - type DNSResolverMessageHandler: DNSResolverMessageHandler + ?Sized; - /// A type that may be dereferenced to [`Self::DNSResolverMessageHandler`] - type DRH: Deref; + type DRH: DNSResolverMessageHandler; /// A type implementing [`CustomOnionMessageHandler`] - type CustomOnionMessageHandler: CustomOnionMessageHandler + ?Sized; - /// A type that may be dereferenced to [`Self::CustomOnionMessageHandler`] - type CMH: Deref; + type CMH: CustomOnionMessageHandler; /// Returns a reference to the actual [`OnionMessenger`] object. fn get_om( &self, ) -> &OnionMessenger< - Self::ES, - Self::NS, - Self::L, + Self::EntropySource, + Self::NodeSigner, + Self::Logger, Self::NL, - Self::MR, + Self::MessageRouter, Self::OMH, Self::APH, Self::DRH, @@ -118,44 +100,25 @@ pub trait AOnionMessenger { } impl< - ES: Deref, - NS: Deref, - L: Deref, - NL: Deref, - MR: Deref, - OMH: Deref, - APH: Deref, - DRH: Deref, - CMH: Deref, + ES: EntropySource, + NS: NodeSigner, + L: Logger, + NL: NodeIdLookUp, + MR: MessageRouter, + OMH: OffersMessageHandler, + APH: AsyncPaymentsMessageHandler, + DRH: DNSResolverMessageHandler, + CMH: CustomOnionMessageHandler, > AOnionMessenger for OnionMessenger -where - ES::Target: EntropySource, - NS::Target: NodeSigner, - L::Target: Logger, - NL::Target: NodeIdLookUp, - MR::Target: MessageRouter, - OMH::Target: OffersMessageHandler, - APH::Target: AsyncPaymentsMessageHandler, - DRH::Target: DNSResolverMessageHandler, - CMH::Target: CustomOnionMessageHandler, { - type EntropySource = ES::Target; - type ES = ES; - type NodeSigner = NS::Target; - type NS = NS; - type Logger = L::Target; - type L = L; - type NodeIdLookUp = NL::Target; + type EntropySource = ES; + type NodeSigner = NS; + type Logger = L; type NL = NL; - type MessageRouter = MR::Target; - type MR = MR; - type OffersMessageHandler = OMH::Target; + type MessageRouter = MR; type OMH = OMH; - type AsyncPaymentsMessageHandler = APH::Target; type APH = APH; - type DNSResolverMessageHandler = DRH::Target; type DRH = DRH; - type CustomOnionMessageHandler = CMH::Target; type CMH = CMH; fn get_om(&self) -> &OnionMessenger { self @@ -272,7 +235,7 @@ where /// ]; /// let context = MessageContext::Custom(Vec::new()); /// let receive_key = keys_manager.get_receive_auth_key(); -/// let blinded_path = BlindedMessagePath::new(&hops, your_node_id, receive_key, context, &keys_manager, &secp_ctx); +/// let blinded_path = BlindedMessagePath::new(&hops, your_node_id, receive_key, context, false, &keys_manager, &secp_ctx); /// /// // Send a custom onion message to a blinded path. /// let destination = Destination::BlindedPath(blinded_path); @@ -284,26 +247,16 @@ where /// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice pub struct OnionMessenger< - ES: Deref, - NS: Deref, - L: Deref, - NL: Deref, - MR: Deref, - OMH: Deref, - APH: Deref, - DRH: Deref, - CMH: Deref, -> where - ES::Target: EntropySource, - NS::Target: NodeSigner, - L::Target: Logger, - NL::Target: NodeIdLookUp, - MR::Target: MessageRouter, - OMH::Target: OffersMessageHandler, - APH::Target: AsyncPaymentsMessageHandler, - DRH::Target: DNSResolverMessageHandler, - CMH::Target: CustomOnionMessageHandler, -{ + ES: EntropySource, + NS: NodeSigner, + L: Logger, + NL: NodeIdLookUp, + MR: MessageRouter, + OMH: OffersMessageHandler, + APH: AsyncPaymentsMessageHandler, + DRH: DNSResolverMessageHandler, + CMH: CustomOnionMessageHandler, +> { entropy_source: ES, #[cfg(test)] pub(super) node_signer: NS, @@ -522,11 +475,34 @@ pub trait MessageRouter { ) -> Result, ()>; } +impl> MessageRouter for R { + fn find_path( + &self, sender: PublicKey, peers: Vec, destination: Destination, + ) -> Result { + self.deref().find_path(sender, peers, destination) + } + + fn create_blinded_paths( + &self, recipient: PublicKey, local_node_receive_key: ReceiveAuthKey, + context: MessageContext, peers: Vec, secp_ctx: &Secp256k1, + ) -> Result, ()> { + self.deref().create_blinded_paths( + recipient, + local_node_receive_key, + context, + peers, + secp_ctx, + ) + } +} + /// A [`MessageRouter`] that can only route to a directly connected [`Destination`]. /// -/// [`DefaultMessageRouter`] constructs compact [`BlindedMessagePath`]s on a best-effort basis. -/// That is, if appropriate SCID information is available for the intermediate peers, it will -/// default to creating compact paths. +/// [`DefaultMessageRouter`] tries to construct compact or private [`BlindedMessagePath`]s based on +/// the [`MessageContext`] given to [`MessageRouter::create_blinded_paths`]. That is, if the +/// provided context implies the path may be used in a BOLT 12 object which might appear in a QR +/// code, it reduces the amount of padding and dummy hops and prefers building compact paths when +/// short channel IDs (SCIDs) are available for intermediate peers. /// /// # Compact Blinded Paths /// @@ -545,28 +521,26 @@ pub trait MessageRouter { /// Creating [`BlindedMessagePath`]s may affect privacy since, if a suitable path cannot be found, /// it will create a one-hop path using the recipient as the introduction node if it is an announced /// node. Otherwise, there is no way to find a path to the introduction node in order to send a -/// message, and thus an `Err` is returned. -pub struct DefaultMessageRouter>, L: Deref, ES: Deref> -where - L::Target: Logger, - ES::Target: EntropySource, -{ +/// message, and thus an `Err` is returned. The impact of this may be somewhat muted when +/// additional dummy hops are added to the blinded path, but this protection is not complete. +pub struct DefaultMessageRouter>, L: Logger, ES: EntropySource> { network_graph: G, entropy_source: ES, } -// Target total length (in hops) for non-compact blinded paths. -// We pad with dummy hops until the path reaches this length, -// obscuring the recipient's true position. +// Target total length (in hops) for blinded paths used outside of QR codes. // -// Compact paths are optimized for minimal size, so we avoid -// adding dummy hops to them. -pub(crate) const PADDED_PATH_LENGTH: usize = 4; - -impl>, L: Deref, ES: Deref> DefaultMessageRouter -where - L::Target: Logger, - ES::Target: EntropySource, +// We add dummy hops until the path reaches this length (including the recipient). +pub(crate) const DUMMY_HOPS_PATH_LENGTH: usize = 4; + +// Target total length (in hops) for blinded paths included in objects which may appear in a QR +// code. +// +// We add dummy hops until the path reaches this length (including the recipient). +pub(crate) const QR_CODED_DUMMY_HOPS_PATH_LENGTH: usize = 2; + +impl>, L: Logger, ES: EntropySource> + DefaultMessageRouter { /// Creates a [`DefaultMessageRouter`] using the given [`NetworkGraph`]. pub fn new(network_graph: G, entropy_source: ES) -> Self { @@ -574,12 +548,12 @@ where } pub(crate) fn create_blinded_paths_from_iter< - I: ExactSizeIterator, + I: ExactSizeIterator + Clone, T: secp256k1::Signing + secp256k1::Verification, >( network_graph: &G, recipient: PublicKey, local_node_receive_key: ReceiveAuthKey, context: MessageContext, peers: I, entropy_source: &ES, secp_ctx: &Secp256k1, - compact_paths: bool, + never_compact_path: bool, ) -> Result, ()> { // Limit the number of blinded paths that are computed. const MAX_PATHS: usize = 3; @@ -592,6 +566,31 @@ where let is_recipient_announced = network_graph.nodes().contains_key(&NodeId::from_pubkey(&recipient)); + let (size_constrained, path_len_incl_dummys) = match &context { + MessageContext::Offers(OffersContext::InvoiceRequest { .. }) + | MessageContext::Offers(OffersContext::OutboundPaymentForRefund { .. }) => { + // When including blinded paths within BOLT 12 objects that appear in QR codes, we + // sadly need to be conservative about size, especially if the QR code ultimately + // also includes an on-chain address. + (true, QR_CODED_DUMMY_HOPS_PATH_LENGTH) + }, + MessageContext::Offers(OffersContext::StaticInvoiceRequested { .. }) => { + // Async Payments aggressively embeds the entire `InvoiceRequest` in the payment + // onion. In a future version it should likely move to embedding only the + // `InvoiceRequest`-specific fields instead, but until then we have to be + // incredibly strict in the size of the blinded path we include in a static payment + // `Offer`. + (true, 0) + }, + _ => { + // If there's no need to be small, add additional dummy hops and never use + // SCID-based next-hops as they carry additional expiry risk. + (false, DUMMY_HOPS_PATH_LENGTH) + }, + }; + + let compact_paths = !never_compact_path && size_constrained; + let has_one_peer = peers.len() == 1; let mut peer_info = peers .map(|peer| MessageForwardNode { @@ -619,12 +618,8 @@ where }); let build_path = |intermediate_hops: &[MessageForwardNode]| { - let dummy_hops_count = if compact_paths { - 0 - } else { - // Add one for the final recipient TLV - PADDED_PATH_LENGTH.saturating_sub(intermediate_hops.len() + 1) - }; + // Calculate the dummy hops given the total hop count target (including the recipient). + let dummy_hops_count = path_len_incl_dummys.saturating_sub(intermediate_hops.len() + 1); BlindedMessagePath::new_with_dummy_hops( intermediate_hops, @@ -632,7 +627,8 @@ where dummy_hops_count, local_node_receive_key, context.clone(), - &**entropy_source, + size_constrained, + &entropy_source, secp_ctx, ) }; @@ -651,12 +647,6 @@ where } } - // Sanity check: Ones the paths are created for the non-compact case, ensure - // each of them are of the length `PADDED_PATH_LENGTH`. - if !compact_paths { - debug_assert!(paths.iter().all(|path| path.blinded_hops().len() == PADDED_PATH_LENGTH)); - } - if compact_paths { for path in &mut paths { path.use_compact_introduction_node(&network_graph); @@ -716,11 +706,8 @@ where } } -impl>, L: Deref, ES: Deref> MessageRouter +impl>, L: Logger, ES: EntropySource> MessageRouter for DefaultMessageRouter -where - L::Target: Logger, - ES::Target: EntropySource, { fn find_path( &self, sender: PublicKey, peers: Vec, destination: Destination, @@ -740,13 +727,18 @@ where peers.into_iter(), &self.entropy_source, secp_ctx, - true, + false, ) } } /// This message router is similar to [`DefaultMessageRouter`], but it always creates -/// full-length blinded paths, using the peer's [`NodeId`]. +/// non-compact blinded paths, using the peer's [`NodeId`]. It uses the same heuristics as +/// [`DefaultMessageRouter`] for deciding when to add additional dummy hops to the generated blinded +/// paths. +/// +/// This may be useful in cases where you want a long-lived blinded path and anticipate channel(s) +/// may close, but connections to specific peers will remain stable. /// /// This message router can only route to a directly connected [`Destination`]. /// @@ -755,20 +747,15 @@ where /// Creating [`BlindedMessagePath`]s may affect privacy since, if a suitable path cannot be found, /// it will create a one-hop path using the recipient as the introduction node if it is an announced /// node. Otherwise, there is no way to find a path to the introduction node in order to send a -/// message, and thus an `Err` is returned. -pub struct NodeIdMessageRouter>, L: Deref, ES: Deref> -where - L::Target: Logger, - ES::Target: EntropySource, -{ +/// message, and thus an `Err` is returned. The impact of this may be somewhat muted when +/// additional dummy hops are added to the blinded path, but this protection is not complete. +pub struct NodeIdMessageRouter>, L: Logger, ES: EntropySource> { network_graph: G, entropy_source: ES, } -impl>, L: Deref, ES: Deref> NodeIdMessageRouter -where - L::Target: Logger, - ES::Target: EntropySource, +impl>, L: Logger, ES: EntropySource> + NodeIdMessageRouter { /// Creates a [`NodeIdMessageRouter`] using the given [`NetworkGraph`]. pub fn new(network_graph: G, entropy_source: ES) -> Self { @@ -776,11 +763,8 @@ where } } -impl>, L: Deref, ES: Deref> MessageRouter +impl>, L: Logger, ES: EntropySource> MessageRouter for NodeIdMessageRouter -where - L::Target: Logger, - ES::Target: EntropySource, { fn find_path( &self, sender: PublicKey, peers: Vec, destination: Destination, @@ -790,8 +774,11 @@ where fn create_blinded_paths( &self, recipient: PublicKey, local_node_receive_key: ReceiveAuthKey, - context: MessageContext, peers: Vec, secp_ctx: &Secp256k1, + context: MessageContext, mut peers: Vec, secp_ctx: &Secp256k1, ) -> Result, ()> { + for peer in peers.iter_mut() { + peer.short_channel_id = None; + } DefaultMessageRouter::create_blinded_paths_from_iter( &self.network_graph, recipient, @@ -800,7 +787,7 @@ where peers.into_iter(), &self.entropy_source, secp_ctx, - false, + true, ) } } @@ -998,6 +985,25 @@ pub trait CustomOnionMessageHandler { ) -> Vec<(Self::CustomMessage, MessageSendInstructions)>; } +impl> CustomOnionMessageHandler for C { + type CustomMessage = T::CustomMessage; + fn handle_custom_message( + &self, message: Self::CustomMessage, context: Option>, responder: Option, + ) -> Option<(Self::CustomMessage, ResponseInstruction)> { + self.deref().handle_custom_message(message, context, responder) + } + fn read_custom_message( + &self, message_type: u64, buffer: &mut R, + ) -> Result, msgs::DecodeError> { + self.deref().read_custom_message(message_type, buffer) + } + fn release_pending_custom_messages( + &self, + ) -> Vec<(Self::CustomMessage, MessageSendInstructions)> { + self.deref().release_pending_custom_messages() + } +} + /// A processed incoming onion message, containing either a Forward (another onion message) /// or a Receive payload with decrypted contents. #[derive(Clone, Debug)] @@ -1021,20 +1027,15 @@ pub enum PeeledOnion { /// Returns the node id of the peer to send the message to, the message itself, and any addresses /// needed to connect to the first node. pub fn create_onion_message_resolving_destination< - ES: Deref, - NS: Deref, - NL: Deref, + ES: EntropySource, + NS: NodeSigner, + NL: NodeIdLookUp, T: OnionMessageContents, >( entropy_source: &ES, node_signer: &NS, node_id_lookup: &NL, network_graph: &ReadOnlyNetworkGraph, secp_ctx: &Secp256k1, mut path: OnionMessagePath, contents: T, reply_path: Option, -) -> Result<(PublicKey, OnionMessage, Vec), SendError> -where - ES::Target: EntropySource, - NS::Target: NodeSigner, - NL::Target: NodeIdLookUp, -{ +) -> Result<(PublicKey, OnionMessage, Vec), SendError> { path.destination.resolve(network_graph); create_onion_message( entropy_source, @@ -1058,16 +1059,16 @@ where /// - unless it can be resolved by [`NodeIdLookUp::next_node_id`]. /// Use [`create_onion_message_resolving_destination`] instead to resolve the introduction node /// first with a [`ReadOnlyNetworkGraph`]. -pub fn create_onion_message( +pub fn create_onion_message< + ES: EntropySource, + NS: NodeSigner, + NL: NodeIdLookUp, + T: OnionMessageContents, +>( entropy_source: &ES, node_signer: &NS, node_id_lookup: &NL, secp_ctx: &Secp256k1, path: OnionMessagePath, contents: T, reply_path: Option, -) -> Result<(PublicKey, OnionMessage, Vec), SendError> -where - ES::Target: EntropySource, - NS::Target: NodeSigner, - NL::Target: NodeIdLookUp, -{ +) -> Result<(PublicKey, OnionMessage, Vec), SendError> { let OnionMessagePath { intermediate_nodes, mut destination, first_node_addresses } = path; if let Destination::BlindedPath(ref path) = destination { if path.blinded_hops().is_empty() { @@ -1140,15 +1141,10 @@ where /// /// Returns either the next layer of the onion for forwarding or the decrypted content for the /// receiver. -pub fn peel_onion_message( +pub fn peel_onion_message( msg: &OnionMessage, secp_ctx: &Secp256k1, node_signer: NS, logger: L, custom_handler: CMH, -) -> Result::Target as CustomOnionMessageHandler>::CustomMessage>, ()> -where - NS::Target: NodeSigner, - L::Target: Logger, - CMH::Target: CustomOnionMessageHandler, -{ +) -> Result, ()> { let control_tlvs_ss = match node_signer.ecdh(Recipient::Node, &msg.blinding_point, None) { Ok(ss) => ss, Err(e) => { @@ -1177,7 +1173,7 @@ where onion_decode_ss, &msg.onion_routing_packet.hop_data[..], msg.onion_routing_packet.hmac, - (control_tlvs_ss, custom_handler.deref(), receiving_context_auth_key, logger.deref()), + (control_tlvs_ss, &custom_handler, receiving_context_auth_key, &logger), ); // Constructs the next onion message using packet data and blinding logic. @@ -1363,26 +1359,16 @@ macro_rules! drop_handled_events_and_abort { } impl< - ES: Deref, - NS: Deref, - L: Deref, - NL: Deref, - MR: Deref, - OMH: Deref, - APH: Deref, - DRH: Deref, - CMH: Deref, + ES: EntropySource, + NS: NodeSigner, + L: Logger, + NL: NodeIdLookUp, + MR: MessageRouter, + OMH: OffersMessageHandler, + APH: AsyncPaymentsMessageHandler, + DRH: DNSResolverMessageHandler, + CMH: CustomOnionMessageHandler, > OnionMessenger -where - ES::Target: EntropySource, - NS::Target: NodeSigner, - L::Target: Logger, - NL::Target: NodeIdLookUp, - MR::Target: MessageRouter, - OMH::Target: OffersMessageHandler, - APH::Target: AsyncPaymentsMessageHandler, - DRH::Target: DNSResolverMessageHandler, - CMH::Target: CustomOnionMessageHandler, { /// Constructs a new `OnionMessenger` to send, forward, and delegate received onion messages to /// their respective handlers. @@ -1773,13 +1759,13 @@ where pub(crate) fn peel_onion_message( &self, msg: &OnionMessage, - ) -> Result::Target as CustomOnionMessageHandler>::CustomMessage>, ()> { + ) -> Result, ()> { peel_onion_message( msg, &self.secp_ctx, - &*self.node_signer, - &*self.logger, - &*self.custom_handler, + &self.node_signer, + &self.logger, + &self.custom_handler, ) } @@ -2007,26 +1993,16 @@ fn outbound_buffer_full( } impl< - ES: Deref, - NS: Deref, - L: Deref, - NL: Deref, - MR: Deref, - OMH: Deref, - APH: Deref, - DRH: Deref, - CMH: Deref, + ES: EntropySource, + NS: NodeSigner, + L: Logger, + NL: NodeIdLookUp, + MR: MessageRouter, + OMH: OffersMessageHandler, + APH: AsyncPaymentsMessageHandler, + DRH: DNSResolverMessageHandler, + CMH: CustomOnionMessageHandler, > EventsProvider for OnionMessenger -where - ES::Target: EntropySource, - NS::Target: NodeSigner, - L::Target: Logger, - NL::Target: NodeIdLookUp, - MR::Target: MessageRouter, - OMH::Target: OffersMessageHandler, - APH::Target: AsyncPaymentsMessageHandler, - DRH::Target: DNSResolverMessageHandler, - CMH::Target: CustomOnionMessageHandler, { fn process_pending_events(&self, handler: H) where @@ -2128,26 +2104,16 @@ where } impl< - ES: Deref, - NS: Deref, - L: Deref, - NL: Deref, - MR: Deref, - OMH: Deref, - APH: Deref, - DRH: Deref, - CMH: Deref, + ES: EntropySource, + NS: NodeSigner, + L: Logger, + NL: NodeIdLookUp, + MR: MessageRouter, + OMH: OffersMessageHandler, + APH: AsyncPaymentsMessageHandler, + DRH: DNSResolverMessageHandler, + CMH: CustomOnionMessageHandler, > BaseMessageHandler for OnionMessenger -where - ES::Target: EntropySource, - NS::Target: NodeSigner, - L::Target: Logger, - NL::Target: NodeIdLookUp, - MR::Target: MessageRouter, - OMH::Target: OffersMessageHandler, - APH::Target: AsyncPaymentsMessageHandler, - DRH::Target: DNSResolverMessageHandler, - CMH::Target: CustomOnionMessageHandler, { fn provided_node_features(&self) -> NodeFeatures { let mut features = NodeFeatures::empty(); @@ -2200,26 +2166,16 @@ where } impl< - ES: Deref, - NS: Deref, - L: Deref, - NL: Deref, - MR: Deref, - OMH: Deref, - APH: Deref, - DRH: Deref, - CMH: Deref, + ES: EntropySource, + NS: NodeSigner, + L: Logger, + NL: NodeIdLookUp, + MR: MessageRouter, + OMH: OffersMessageHandler, + APH: AsyncPaymentsMessageHandler, + DRH: DNSResolverMessageHandler, + CMH: CustomOnionMessageHandler, > OnionMessageHandler for OnionMessenger -where - ES::Target: EntropySource, - NS::Target: NodeSigner, - L::Target: Logger, - NL::Target: NodeIdLookUp, - MR::Target: MessageRouter, - OMH::Target: OffersMessageHandler, - APH::Target: AsyncPaymentsMessageHandler, - DRH::Target: DNSResolverMessageHandler, - CMH::Target: CustomOnionMessageHandler, { fn handle_onion_message(&self, peer_node_id: PublicKey, msg: &OnionMessage) { let logger = WithContext::from(&self.logger, Some(peer_node_id), None, None); diff --git a/lightning/src/onion_message/offers.rs b/lightning/src/onion_message/offers.rs index 06988d4db8f..8e3afdfa977 100644 --- a/lightning/src/onion_message/offers.rs +++ b/lightning/src/onion_message/offers.rs @@ -22,6 +22,7 @@ use crate::onion_message::packet::OnionMessageContents; use crate::util::logger::Logger; use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer}; use core::fmt; +use core::ops::Deref; use crate::prelude::*; @@ -63,6 +64,17 @@ pub trait OffersMessageHandler { } } +impl> OffersMessageHandler for O { + fn handle_message( + &self, message: OffersMessage, context: Option, responder: Option, + ) -> Option<(OffersMessage, ResponseInstruction)> { + self.deref().handle_message(message, context, responder) + } + fn release_pending_messages(&self) -> Vec<(OffersMessage, MessageSendInstructions)> { + self.deref().release_pending_messages() + } +} + /// Possible BOLT 12 Offers messages sent and received via an [`OnionMessage`]. /// /// [`OnionMessage`]: crate::ln::msgs::OnionMessage diff --git a/lightning/src/routing/gossip.rs b/lightning/src/routing/gossip.rs index ae317ad1ac3..3794c381817 100644 --- a/lightning/src/routing/gossip.rs +++ b/lightning/src/routing/gossip.rs @@ -43,6 +43,7 @@ use crate::util::indexed_map::{ use crate::util::logger::{Level, Logger}; use crate::util::scid_utils::{block_from_scid, scid_from_parts, MAX_SCID_BLOCK}; use crate::util::ser::{MaybeReadable, Readable, ReadableArgs, RequiredWrapper, Writeable, Writer}; +use crate::util::wakers::Future; use crate::io; use crate::io_extras::{copy, sink}; @@ -183,10 +184,7 @@ impl FromStr for NodeId { } /// Represents the network as nodes and channels between them -pub struct NetworkGraph -where - L::Target: Logger, -{ +pub struct NetworkGraph { secp_ctx: Secp256k1, last_rapid_gossip_sync_timestamp: Mutex>, chain_hash: ChainHash, @@ -321,44 +319,33 @@ impl MaybeReadable for NetworkUpdate { /// This network graph is then used for routing payments. /// Provides interface to help with initial routing sync by /// serving historical announcements. -pub struct P2PGossipSync>, U: Deref, L: Deref> -where - U::Target: UtxoLookup, - L::Target: Logger, -{ +pub struct P2PGossipSync>, U: UtxoLookup, L: Logger> { network_graph: G, - utxo_lookup: RwLock>, + #[cfg(any(feature = "_test_utils", test))] + pub(super) utxo_lookup: Option, + #[cfg(not(any(feature = "_test_utils", test)))] + utxo_lookup: Option, full_syncs_requested: AtomicUsize, pending_events: Mutex>, logger: L, } -impl>, U: Deref, L: Deref> P2PGossipSync -where - U::Target: UtxoLookup, - L::Target: Logger, -{ +impl>, U: UtxoLookup, L: Logger> P2PGossipSync { /// Creates a new tracker of the actual state of the network of channels and nodes, /// assuming an existing [`NetworkGraph`]. + /// /// UTXO lookup is used to make sure announced channels exist on-chain, channel data is /// correct, and the announcement is signed with channel owners' keys. pub fn new(network_graph: G, utxo_lookup: Option, logger: L) -> Self { P2PGossipSync { network_graph, full_syncs_requested: AtomicUsize::new(0), - utxo_lookup: RwLock::new(utxo_lookup), + utxo_lookup, pending_events: Mutex::new(vec![]), logger, } } - /// Adds a provider used to check new announcements. Does not affect - /// existing announcements unless they are updated. - /// Add, update or remove the provider would replace the current one. - pub fn add_utxo_lookup(&self, utxo_lookup: Option) { - *self.utxo_lookup.write().unwrap() = utxo_lookup; - } - /// Gets a reference to the underlying [`NetworkGraph`] which was provided in /// [`P2PGossipSync::new`]. /// @@ -367,6 +354,17 @@ where &self.network_graph } + /// Gets a [`Future`] which will resolve the next time an async validation of gossip data + /// completes. + /// + /// If the [`UtxoLookup`] provided in [`P2PGossipSync::new`] does not return + /// [`UtxoResult::Async`] values, the returned [`Future`] will never resolve + /// + /// [`UtxoResult::Async`]: crate::routing::utxo::UtxoResult::Async + pub fn validation_completion_future(&self) -> Future { + self.network_graph.pending_checks.completion_notifier.get_future() + } + /// Returns true when a full routing table sync should be performed with a peer. fn should_request_full_sync(&self) -> bool { const FULL_SYNCS_TO_REQUEST: usize = 5; @@ -378,46 +376,46 @@ where } } - /// Used to broadcast forward gossip messages which were validated async. - /// - /// Note that this will ignore events other than `Broadcast*` or messages with too much excess - /// data. - pub(super) fn forward_gossip_msg(&self, mut ev: MessageSendEvent) { - match &mut ev { - MessageSendEvent::BroadcastChannelAnnouncement { msg, ref mut update_msg } => { - if msg.contents.excess_data.len() > MAX_EXCESS_BYTES_FOR_RELAY { - return; - } - if update_msg.as_ref().map(|msg| msg.contents.excess_data.len()).unwrap_or(0) - > MAX_EXCESS_BYTES_FOR_RELAY - { - *update_msg = None; - } - }, - MessageSendEvent::BroadcastChannelUpdate { msg, .. } => { - if msg.contents.excess_data.len() > MAX_EXCESS_BYTES_FOR_RELAY { - return; - } - }, - MessageSendEvent::BroadcastNodeAnnouncement { msg } => { - if msg.contents.excess_data.len() > MAX_EXCESS_BYTES_FOR_RELAY - || msg.contents.excess_address_data.len() > MAX_EXCESS_BYTES_FOR_RELAY - || msg.contents.excess_data.len() + msg.contents.excess_address_data.len() + /// Walks the list of pending UTXO validations and removes completed ones, adding any messages + /// we should forward as a result to [`Self::pending_events`]. + fn process_completed_checks(&self) { + let msgs = self.network_graph.pending_checks.check_resolved_futures(&*self.network_graph); + let mut pending_events = self.pending_events.lock().unwrap(); + pending_events.reserve(msgs.len()); + for mut message in msgs { + match &mut message { + MessageSendEvent::BroadcastChannelAnnouncement { msg, ref mut update_msg } => { + if msg.contents.excess_data.len() > MAX_EXCESS_BYTES_FOR_RELAY { + continue; + } + if update_msg.as_ref().map(|msg| msg.contents.excess_data.len()).unwrap_or(0) > MAX_EXCESS_BYTES_FOR_RELAY - { - return; - } - }, - _ => return, + { + *update_msg = None; + } + }, + MessageSendEvent::BroadcastChannelUpdate { msg, .. } => { + if msg.contents.excess_data.len() > MAX_EXCESS_BYTES_FOR_RELAY { + continue; + } + }, + MessageSendEvent::BroadcastNodeAnnouncement { msg } => { + if msg.contents.excess_data.len() > MAX_EXCESS_BYTES_FOR_RELAY + || msg.contents.excess_address_data.len() > MAX_EXCESS_BYTES_FOR_RELAY + || msg.contents.excess_data.len() + msg.contents.excess_address_data.len() + > MAX_EXCESS_BYTES_FOR_RELAY + { + continue; + } + }, + _ => continue, + } + pending_events.push(message); } - self.pending_events.lock().unwrap().push(ev); } } -impl NetworkGraph -where - L::Target: Logger, -{ +impl NetworkGraph { /// Handles any network updates originating from [`Event`]s. /// /// [`Event`]: crate::events::Event @@ -530,11 +528,8 @@ pub fn verify_channel_announcement( Ok(()) } -impl>, U: Deref, L: Deref> RoutingMessageHandler +impl>, U: UtxoLookup, L: Logger> RoutingMessageHandler for P2PGossipSync -where - U::Target: UtxoLookup, - L::Target: Logger, { fn handle_node_announcement( &self, _their_node_id: Option, msg: &msgs::NodeAnnouncement, @@ -549,14 +544,21 @@ where fn handle_channel_announcement( &self, _their_node_id: Option, msg: &msgs::ChannelAnnouncement, ) -> Result { - self.network_graph - .update_channel_from_announcement(msg, &*self.utxo_lookup.read().unwrap())?; + self.network_graph.update_channel_from_announcement(msg, &self.utxo_lookup)?; Ok(msg.contents.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY) } fn handle_channel_update( &self, _their_node_id: Option, msg: &msgs::ChannelUpdate, ) -> Result, LightningError> { + // Ignore channel updates with dont_forward bit set - these are for private channels + // and shouldn't be gossiped or stored in the network graph + if msg.contents.message_flags & (1 << 1) != 0 { + return Err(LightningError { + err: "Ignoring channel_update with dont_forward bit set".to_owned(), + action: ErrorAction::IgnoreAndLog(Level::Debug), + }); + } match self.network_graph.update_channel(msg) { Ok(nodes) if msg.contents.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY => Ok(nodes), Ok(_) => Ok(None), @@ -759,11 +761,8 @@ where } } -impl>, U: Deref, L: Deref> BaseMessageHandler +impl>, U: UtxoLookup, L: Logger> BaseMessageHandler for P2PGossipSync -where - U::Target: UtxoLookup, - L::Target: Logger, { /// Initiates a stateless sync of routing gossip information with a peer /// using [`gossip_queries`]. The default strategy used by this implementation @@ -884,6 +883,7 @@ where } fn get_and_clear_pending_msg_events(&self) -> Vec { + self.process_completed_checks(); let mut ret = Vec::new(); let mut pending_events = self.pending_events.lock().unwrap(); core::mem::swap(&mut ret, &mut pending_events); @@ -1632,10 +1632,7 @@ impl Readable for NodeInfo { const SERIALIZATION_VERSION: u8 = 1; const MIN_SERIALIZATION_VERSION: u8 = 1; -impl Writeable for NetworkGraph -where - L::Target: Logger, -{ +impl Writeable for NetworkGraph { fn write(&self, writer: &mut W) -> Result<(), io::Error> { self.test_node_counter_consistency(); @@ -1663,16 +1660,21 @@ where } } -impl ReadableArgs for NetworkGraph -where - L::Target: Logger, -{ +impl ReadableArgs for NetworkGraph { fn read(reader: &mut R, logger: L) -> Result, DecodeError> { let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION); + const MAX_CHAN_COUNT_LIMIT: usize = 100_000_000; + const MAX_NODE_COUNT_LIMIT: usize = 10_000_000; + let chain_hash: ChainHash = Readable::read(reader)?; let channels_count: u64 = Readable::read(reader)?; - let mut channels = IndexedMap::with_capacity(CHAN_COUNT_ESTIMATE); + // Pre-allocate 115% of the known channel count to avoid unnecessary reallocations. + let channels_map_capacity = (channels_count as u128 * 115 / 100) + .try_into() + .map(|v: usize| v.min(MAX_CHAN_COUNT_LIMIT)) + .map_err(|_| DecodeError::InvalidValue)?; + let mut channels = IndexedMap::with_capacity(channels_map_capacity); for _ in 0..channels_count { let chan_id: u64 = Readable::read(reader)?; let chan_info: ChannelInfo = Readable::read(reader)?; @@ -1684,7 +1686,12 @@ where if nodes_count > u32::max_value() as u64 / 2 { return Err(DecodeError::InvalidValue); } - let mut nodes = IndexedMap::with_capacity(NODE_COUNT_ESTIMATE); + // Pre-allocate 115% of the known channel count to avoid unnecessary reallocations. + let nodes_map_capacity: usize = (nodes_count as u128 * 115 / 100) + .try_into() + .map(|v: usize| v.min(MAX_NODE_COUNT_LIMIT)) + .map_err(|_| DecodeError::InvalidValue)?; + let mut nodes = IndexedMap::with_capacity(nodes_map_capacity); for i in 0..nodes_count { let node_id = Readable::read(reader)?; let mut node_info: NodeInfo = Readable::read(reader)?; @@ -1720,10 +1727,7 @@ where } } -impl fmt::Display for NetworkGraph -where - L::Target: Logger, -{ +impl fmt::Display for NetworkGraph { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { writeln!(f, "Network map\n[Channels]")?; for (key, val) in self.channels.read().unwrap().unordered_iter() { @@ -1737,11 +1741,8 @@ where } } -impl Eq for NetworkGraph where L::Target: Logger {} -impl PartialEq for NetworkGraph -where - L::Target: Logger, -{ +impl Eq for NetworkGraph {} +impl PartialEq for NetworkGraph { fn eq(&self, other: &Self) -> bool { // For a total lockorder, sort by position in memory and take the inner locks in that order. // (Assumes that we can't move within memory while a lock is held). @@ -1760,27 +1761,32 @@ where } } -// In Jan, 2025 there were about 49K channels. -// We over-allocate by a bit because 20% more is better than the double we get if we're slightly -// too low -const CHAN_COUNT_ESTIMATE: usize = 60_000; -// In Jan, 2025 there were about 15K nodes -// We over-allocate by a bit because 33% more is better than the double we get if we're slightly -// too low +/// In Jan, 2026 there were about 54K channels. +/// +/// We over-allocate by a bit because ~15% more is better than the double we get if we're slightly +/// too low. +const CHAN_COUNT_ESTIMATE: usize = 63_000; +/// In Jan, 2026 there were about 17K nodes +/// +/// We over-allocate by a bit because 15% more is better than the double we get if we're slightly +/// too low. const NODE_COUNT_ESTIMATE: usize = 20_000; -impl NetworkGraph -where - L::Target: Logger, -{ +impl NetworkGraph { /// Creates a new, empty, network graph. pub fn new(network: Network, logger: L) -> NetworkGraph { + let (node_map_cap, chan_map_cap) = if matches!(network, Network::Bitcoin) { + (NODE_COUNT_ESTIMATE, CHAN_COUNT_ESTIMATE) + } else { + (0, 0) + }; + Self { secp_ctx: Secp256k1::verification_only(), chain_hash: ChainHash::using_genesis_block(network), logger, - channels: RwLock::new(IndexedMap::with_capacity(CHAN_COUNT_ESTIMATE)), - nodes: RwLock::new(IndexedMap::with_capacity(NODE_COUNT_ESTIMATE)), + channels: RwLock::new(IndexedMap::with_capacity(chan_map_cap)), + nodes: RwLock::new(IndexedMap::with_capacity(node_map_cap)), next_node_counter: AtomicUsize::new(0), removed_node_counters: Mutex::new(Vec::new()), last_rapid_gossip_sync_timestamp: Mutex::new(None), @@ -1964,12 +1970,9 @@ where /// /// If a [`UtxoLookup`] object is provided via `utxo_lookup`, it will be called to verify /// the corresponding UTXO exists on chain and is correctly-formatted. - pub fn update_channel_from_announcement( + pub fn update_channel_from_announcement( &self, msg: &msgs::ChannelAnnouncement, utxo_lookup: &Option, - ) -> Result<(), LightningError> - where - U::Target: UtxoLookup, - { + ) -> Result<(), LightningError> { self.pre_channel_announcement_validation_check(&msg.contents, utxo_lookup)?; verify_channel_announcement(msg, &self.secp_ctx)?; self.update_channel_from_unsigned_announcement_intern(&msg.contents, Some(msg), utxo_lookup) @@ -1994,12 +1997,9 @@ where /// /// If a [`UtxoLookup`] object is provided via `utxo_lookup`, it will be called to verify /// the corresponding UTXO exists on chain and is correctly-formatted. - pub fn update_channel_from_unsigned_announcement( + pub fn update_channel_from_unsigned_announcement( &self, msg: &msgs::UnsignedChannelAnnouncement, utxo_lookup: &Option, - ) -> Result<(), LightningError> - where - U::Target: UtxoLookup, - { + ) -> Result<(), LightningError> { self.pre_channel_announcement_validation_check(&msg, utxo_lookup)?; self.update_channel_from_unsigned_announcement_intern(msg, None, utxo_lookup) } @@ -2118,12 +2118,9 @@ where /// /// In those cases, this will return an `Err` that we can return immediately. Otherwise it will /// return an `Ok(())`. - fn pre_channel_announcement_validation_check( + fn pre_channel_announcement_validation_check( &self, msg: &msgs::UnsignedChannelAnnouncement, utxo_lookup: &Option, - ) -> Result<(), LightningError> - where - U::Target: UtxoLookup, - { + ) -> Result<(), LightningError> { let channels = self.channels.read().unwrap(); if let Some(chan) = channels.get(&msg.short_channel_id) { @@ -2162,13 +2159,10 @@ where /// /// Generally [`Self::pre_channel_announcement_validation_check`] should have been called /// first. - fn update_channel_from_unsigned_announcement_intern( + fn update_channel_from_unsigned_announcement_intern( &self, msg: &msgs::UnsignedChannelAnnouncement, full_msg: Option<&msgs::ChannelAnnouncement>, utxo_lookup: &Option, - ) -> Result<(), LightningError> - where - U::Target: UtxoLookup, - { + ) -> Result<(), LightningError> { if msg.node_id_1 == msg.node_id_2 || msg.bitcoin_key_1 == msg.bitcoin_key_2 { return Err(LightningError { err: "Channel announcement node had a channel with itself".to_owned(), @@ -3308,6 +3302,65 @@ pub(crate) mod tests { }; } + #[test] + fn handling_channel_update_with_dont_forward_flag() { + // Test that channel updates with the dont_forward bit set are rejected + let secp_ctx = Secp256k1::new(); + let logger = test_utils::TestLogger::new(); + let chain_source = test_utils::TestChainSource::new(Network::Testnet); + let network_graph = NetworkGraph::new(Network::Testnet, &logger); + let gossip_sync = P2PGossipSync::new(&network_graph, Some(&chain_source), &logger); + + let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap(); + let node_1_pubkey = PublicKey::from_secret_key(&secp_ctx, node_1_privkey); + let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); + + // First announce a channel so we have something to update + let good_script = get_channel_script(&secp_ctx); + *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Sync(Ok(TxOut { + value: Amount::from_sat(1000_000), + script_pubkey: good_script.clone(), + })); + + let valid_channel_announcement = + get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx); + gossip_sync + .handle_channel_announcement(Some(node_1_pubkey), &valid_channel_announcement) + .unwrap(); + + // Create a channel update with dont_forward bit set (bit 1 of message_flags) + let dont_forward_update = get_signed_channel_update( + |unsigned_channel_update| { + unsigned_channel_update.message_flags = 1 | (1 << 1); // must_be_one + dont_forward + }, + node_1_privkey, + &secp_ctx, + ); + + // The update should be rejected because dont_forward is set + match gossip_sync.handle_channel_update(Some(node_1_pubkey), &dont_forward_update) { + Ok(_) => panic!("Expected channel update with dont_forward to be rejected"), + Err(e) => { + assert_eq!(e.err, "Ignoring channel_update with dont_forward bit set"); + match e.action { + crate::ln::msgs::ErrorAction::IgnoreAndLog(level) => { + assert_eq!(level, crate::util::logger::Level::Debug) + }, + _ => panic!("Expected IgnoreAndLog action"), + } + }, + }; + + // Verify the update was not applied to the network graph + let channels = network_graph.read_only(); + let channel = + channels.channels().get(&valid_channel_announcement.contents.short_channel_id).unwrap(); + assert!( + channel.one_to_two.is_none(), + "Channel update with dont_forward should not be stored in network graph" + ); + } + #[test] fn handling_network_update() { let logger = test_utils::TestLogger::new(); diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index c06e5174263..b27dee1a450 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -13,15 +13,16 @@ use bitcoin::secp256k1::{self, PublicKey, Secp256k1}; use lightning_invoice::Bolt11Invoice; use crate::blinded_path::payment::{ - BlindedPaymentPath, ForwardTlvs, PaymentConstraints, PaymentForwardNode, PaymentRelay, - ReceiveTlvs, + BlindedPaymentPath, DummyTlvs, ForwardTlvs, PaymentConstraints, PaymentForwardNode, + PaymentRelay, ReceiveTlvs, }; use crate::blinded_path::{BlindedHop, Direction, IntroductionNode}; use crate::crypto::chacha20::ChaCha20; use crate::ln::channel_state::ChannelDetails; -use crate::ln::channelmanager::{PaymentId, RecipientOnionFields, MIN_FINAL_CLTV_EXPIRY_DELTA}; +use crate::ln::channelmanager::{PaymentId, MIN_FINAL_CLTV_EXPIRY_DELTA}; use crate::ln::msgs::{DecodeError, MAX_VALUE_MSAT}; use crate::ln::onion_utils; +use crate::ln::outbound_payment::RecipientOnionFields; use crate::offers::invoice::Bolt12Invoice; use crate::offers::static_invoice::StaticInvoice; use crate::routing::gossip::{ @@ -57,15 +58,13 @@ pub use lightning_types::routing::{RouteHint, RouteHintHop}; /// payment, and thus an `Err` is returned. pub struct DefaultRouter< G: Deref>, - L: Deref, - ES: Deref, + L: Logger, + ES: EntropySource, S: Deref, SP: Sized, Sc: ScoreLookUp, > where - L::Target: Logger, S::Target: for<'a> LockableScore<'a, ScoreLookUp = Sc>, - ES::Target: EntropySource, { network_graph: G, logger: L, @@ -74,18 +73,19 @@ pub struct DefaultRouter< score_params: SP, } +/// The number of dummy hops included in [`BlindedPaymentPath`]s created by [`DefaultRouter`]. +pub const DEFAULT_PAYMENT_DUMMY_HOPS: usize = 3; + impl< G: Deref>, - L: Deref, - ES: Deref, + L: Logger, + ES: EntropySource, S: Deref, SP: Sized, Sc: ScoreLookUp, > DefaultRouter where - L::Target: Logger, S::Target: for<'a> LockableScore<'a, ScoreLookUp = Sc>, - ES::Target: EntropySource, { /// Creates a new router. pub fn new( @@ -97,16 +97,14 @@ where impl< G: Deref>, - L: Deref, - ES: Deref, + L: Logger, + ES: EntropySource, S: Deref, SP: Sized, Sc: ScoreLookUp, > Router for DefaultRouter where - L::Target: Logger, S::Target: for<'a> LockableScore<'a, ScoreLookUp = Sc>, - ES::Target: EntropySource, { #[rustfmt::skip] fn find_route( @@ -118,7 +116,7 @@ where ) -> Result { let random_seed_bytes = self.entropy_source.get_secure_random_bytes(); find_route( - payer, params, &self.network_graph, first_hops, &*self.logger, + payer, params, &self.network_graph, first_hops, &self.logger, &ScorerAccountingForInFlightHtlcs::new(self.scorer.read_lock(), &inflight_htlcs), &self.score_params, &random_seed_bytes @@ -198,9 +196,9 @@ where }) }) .map(|forward_node| { - BlindedPaymentPath::new( - &[forward_node], recipient, local_node_receive_key, tlvs.clone(), u64::MAX, MIN_FINAL_CLTV_EXPIRY_DELTA, - &*self.entropy_source, secp_ctx + BlindedPaymentPath::new_with_dummy_hops( + &[forward_node], recipient, &[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS], + local_node_receive_key, tlvs.clone(), u64::MAX, MIN_FINAL_CLTV_EXPIRY_DELTA, &self.entropy_source, secp_ctx ) }) .take(MAX_PAYMENT_PATHS) @@ -210,9 +208,9 @@ where Ok(paths) if !paths.is_empty() => Ok(paths), _ => { if network_graph.nodes().contains_key(&NodeId::from_pubkey(&recipient)) { - BlindedPaymentPath::new( - &[], recipient, local_node_receive_key, tlvs, u64::MAX, MIN_FINAL_CLTV_EXPIRY_DELTA, &*self.entropy_source, - secp_ctx + BlindedPaymentPath::new_with_dummy_hops( + &[], recipient, &[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS], + local_node_receive_key, tlvs, u64::MAX, MIN_FINAL_CLTV_EXPIRY_DELTA, &self.entropy_source, secp_ctx ).map(|path| vec![path]) } else { Err(()) @@ -292,6 +290,45 @@ pub trait Router { ) -> Result, ()>; } +impl> Router for R { + fn find_route( + &self, payer: &PublicKey, route_params: &RouteParameters, + first_hops: Option<&[&ChannelDetails]>, inflight_htlcs: InFlightHtlcs, + ) -> Result { + self.deref().find_route(payer, route_params, first_hops, inflight_htlcs) + } + + fn find_route_with_id( + &self, payer: &PublicKey, route_params: &RouteParameters, + first_hops: Option<&[&ChannelDetails]>, inflight_htlcs: InFlightHtlcs, + payment_hash: PaymentHash, payment_id: PaymentId, + ) -> Result { + self.deref().find_route_with_id( + payer, + route_params, + first_hops, + inflight_htlcs, + payment_hash, + payment_id, + ) + } + + fn create_blinded_payment_paths( + &self, recipient: PublicKey, local_node_receive_key: ReceiveAuthKey, + first_hops: Vec, tlvs: ReceiveTlvs, amount_msats: Option, + secp_ctx: &Secp256k1, + ) -> Result, ()> { + self.deref().create_blinded_payment_paths( + recipient, + local_node_receive_key, + first_hops, + tlvs, + amount_msats, + secp_ctx, + ) + } +} + /// [`ScoreLookUp`] implementation that factors in in-flight HTLC liquidity. /// /// Useful for custom [`Router`] implementations to wrap their [`ScoreLookUp`] on-the-fly when calling @@ -1000,8 +1037,6 @@ impl PaymentParameters { /// whether your router will be allowed to find a multi-part route for this payment. If you /// set `allow_mpp` to true, you should ensure a payment secret is set on send, likely via /// [`RecipientOnionFields::secret_only`]. - /// - /// [`RecipientOnionFields::secret_only`]: crate::ln::channelmanager::RecipientOnionFields::secret_only #[rustfmt::skip] pub fn for_keysend(payee_pubkey: PublicKey, final_cltv_expiry_delta: u32, allow_mpp: bool) -> Self { Self::from_node_id(payee_pubkey, final_cltv_expiry_delta) @@ -1945,12 +1980,11 @@ impl<'a> NodeCounters<'a> { /// Calculates the introduction point for each blinded path in the given [`PaymentParameters`], if /// they can be found. #[rustfmt::skip] -fn calculate_blinded_path_intro_points<'a, L: Deref>( +fn calculate_blinded_path_intro_points<'a, L: Logger>( payment_params: &PaymentParameters, node_counters: &'a NodeCounters, network_graph: &ReadOnlyNetworkGraph, logger: &L, our_node_id: NodeId, first_hop_targets: &HashMap, u32)>, -) -> Result>, &'static str> -where L::Target: Logger { +) -> Result>, &'static str> { let introduction_node_id_cache = payment_params.payee.blinded_route_hints().iter() .map(|path| { match path.introduction_node() { @@ -2451,12 +2485,11 @@ fn sort_first_hop_channels( /// [`Event::PaymentPathFailed`]: crate::events::Event::PaymentPathFailed /// [`NetworkGraph`]: crate::routing::gossip::NetworkGraph #[rustfmt::skip] -pub fn find_route( +pub fn find_route( our_node_pubkey: &PublicKey, route_params: &RouteParameters, network_graph: &NetworkGraph, first_hops: Option<&[&ChannelDetails]>, logger: L, scorer: &S, score_params: &S::ScoreParams, random_seed_bytes: &[u8; 32] -) -> Result -where L::Target: Logger, GL::Target: Logger { +) -> Result { let graph_lock = network_graph.read_only(); let mut route = get_route(our_node_pubkey, &route_params, &graph_lock, first_hops, logger, scorer, score_params, random_seed_bytes)?; @@ -2465,12 +2498,11 @@ where L::Target: Logger, GL::Target: Logger { } #[rustfmt::skip] -pub(crate) fn get_route( +pub(crate) fn get_route( our_node_pubkey: &PublicKey, route_params: &RouteParameters, network_graph: &ReadOnlyNetworkGraph, first_hops: Option<&[&ChannelDetails]>, logger: L, scorer: &S, score_params: &S::ScoreParams, _random_seed_bytes: &[u8; 32] -) -> Result -where L::Target: Logger { +) -> Result { let payment_params = &route_params.payment_params; let max_path_length = core::cmp::min(payment_params.max_path_length, MAX_PATH_LENGTH_ESTIMATE); @@ -3854,11 +3886,10 @@ fn add_random_cltv_offset(route: &mut Route, payment_params: &PaymentParameters, /// /// Re-uses logic from `find_route`, so the restrictions described there also apply here. #[rustfmt::skip] -pub fn build_route_from_hops( +pub fn build_route_from_hops( our_node_pubkey: &PublicKey, hops: &[PublicKey], route_params: &RouteParameters, network_graph: &NetworkGraph, logger: L, random_seed_bytes: &[u8; 32] -) -> Result -where L::Target: Logger, GL::Target: Logger { +) -> Result { let graph_lock = network_graph.read_only(); let mut route = build_route_from_hops_internal(our_node_pubkey, hops, &route_params, &graph_lock, logger, random_seed_bytes)?; @@ -3867,10 +3898,10 @@ where L::Target: Logger, GL::Target: Logger { } #[rustfmt::skip] -fn build_route_from_hops_internal( +fn build_route_from_hops_internal( our_node_pubkey: &PublicKey, hops: &[PublicKey], route_params: &RouteParameters, network_graph: &ReadOnlyNetworkGraph, logger: L, random_seed_bytes: &[u8; 32], -) -> Result where L::Target: Logger { +) -> Result { struct HopScorer { our_node_id: NodeId, @@ -3943,10 +3974,7 @@ mod tests { ChannelUsage, FixedPenaltyScorer, ProbabilisticScorer, ProbabilisticScoringDecayParameters, ProbabilisticScoringFeeParameters, ScoreLookUp, }; - use crate::routing::test_utils::{ - add_channel, add_or_update_node, build_graph, build_line_graph, get_nodes, - id_to_feature_flags, update_channel, - }; + use crate::routing::test_utils::*; use crate::routing::utxo::UtxoResult; use crate::types::features::{BlindedHopFeatures, ChannelFeatures, InitFeatures, NodeFeatures}; use crate::util::config::UserConfig; @@ -5368,7 +5396,7 @@ mod tests { fn available_amount_while_routing_test() { // Tests whether we choose the correct available channel amount while routing. - let (secp_ctx, network_graph, gossip_sync, chain_monitor, logger) = build_graph(); + let (secp_ctx, network_graph, gossip_sync, chain_monitor, logger) = build_graph_with_gossip_validation(); let (our_privkey, our_id, privkeys, nodes) = get_nodes(&secp_ctx); let scorer = ln_test_utils::TestScorer::new(); let random_seed_bytes = [42; 32]; @@ -5588,11 +5616,10 @@ mod tests { .push_opcode(opcodes::all::OP_PUSHNUM_2) .push_opcode(opcodes::all::OP_CHECKMULTISIG).into_script().to_p2wsh(); + *chain_monitor.utxo_ret.lock().unwrap() = UtxoResult::Sync(Ok(TxOut { value: Amount::from_sat(15), script_pubkey: good_script.clone() })); - gossip_sync.add_utxo_lookup(Some(chain_monitor)); - - add_channel(&gossip_sync, &secp_ctx, &privkeys[0], &privkeys[2], ChannelFeatures::from_le_bytes(id_to_feature_flags(3)), 333); + add_channel_skipping_utxo_update(&gossip_sync, &secp_ctx, &privkeys[0], &privkeys[2], ChannelFeatures::from_le_bytes(id_to_feature_flags(3)), 333); update_channel(&gossip_sync, &secp_ctx, &privkeys[0], UnsignedChannelUpdate { chain_hash: ChainHash::using_genesis_block(Network::Testnet), short_channel_id: 333, diff --git a/lightning/src/routing/scoring.rs b/lightning/src/routing/scoring.rs index d741adf58d3..47621e37380 100644 --- a/lightning/src/routing/scoring.rs +++ b/lightning/src/routing/scoring.rs @@ -479,10 +479,7 @@ impl ReadableArgs for FixedPenaltyScorer { /// [`liquidity_offset_half_life`]: ProbabilisticScoringDecayParameters::liquidity_offset_half_life /// [`historical_liquidity_penalty_multiplier_msat`]: ProbabilisticScoringFeeParameters::historical_liquidity_penalty_multiplier_msat /// [`historical_liquidity_penalty_amount_multiplier_msat`]: ProbabilisticScoringFeeParameters::historical_liquidity_penalty_amount_multiplier_msat -pub struct ProbabilisticScorer>, L: Deref> -where - L::Target: Logger, -{ +pub struct ProbabilisticScorer>, L: Logger> { decay_params: ProbabilisticScoringDecayParameters, network_graph: G, logger: L, @@ -964,10 +961,7 @@ struct DirectedChannelLiquidity< last_datapoint_time: T, } -impl>, L: Deref> ProbabilisticScorer -where - L::Target: Logger, -{ +impl>, L: Logger> ProbabilisticScorer { /// Creates a new scorer using the given scoring parameters for sending payments from a node /// through a network graph. pub fn new( @@ -1593,9 +1587,9 @@ impl< { /// Adjusts the channel liquidity balance bounds when failing to route `amount_msat`. #[rustfmt::skip] - fn failed_at_channel( + fn failed_at_channel( &mut self, amount_msat: u64, duration_since_epoch: Duration, chan_descr: fmt::Arguments, logger: &Log - ) where Log::Target: Logger { + ) { let existing_max_msat = self.max_liquidity_msat(); if amount_msat < existing_max_msat { log_debug!(logger, "Setting max liquidity of {} from {} to {}", chan_descr, existing_max_msat, amount_msat); @@ -1610,9 +1604,9 @@ impl< /// Adjusts the channel liquidity balance bounds when failing to route `amount_msat` downstream. #[rustfmt::skip] - fn failed_downstream( + fn failed_downstream( &mut self, amount_msat: u64, duration_since_epoch: Duration, chan_descr: fmt::Arguments, logger: &Log - ) where Log::Target: Logger { + ) { let existing_min_msat = self.min_liquidity_msat(); if amount_msat > existing_min_msat { log_debug!(logger, "Setting min liquidity of {} from {} to {}", existing_min_msat, chan_descr, amount_msat); @@ -1627,9 +1621,9 @@ impl< /// Adjusts the channel liquidity balance bounds when successfully routing `amount_msat`. #[rustfmt::skip] - fn successful(&mut self, + fn successful(&mut self, amount_msat: u64, duration_since_epoch: Duration, chan_descr: fmt::Arguments, logger: &Log - ) where Log::Target: Logger { + ) { let max_liquidity_msat = self.max_liquidity_msat().checked_sub(amount_msat).unwrap_or(0); log_debug!(logger, "Subtracting {} from max liquidity of {} (setting it to {})", amount_msat, chan_descr, max_liquidity_msat); self.set_max_liquidity_msat(max_liquidity_msat, duration_since_epoch); @@ -1669,10 +1663,7 @@ impl< } } -impl>, L: Deref> ScoreLookUp for ProbabilisticScorer -where - L::Target: Logger, -{ +impl>, L: Logger> ScoreLookUp for ProbabilisticScorer { type ScoreParams = ProbabilisticScoringFeeParameters; #[rustfmt::skip] fn channel_penalty_msat( @@ -1735,10 +1726,7 @@ where } } -impl>, L: Deref> ScoreUpdate for ProbabilisticScorer -where - L::Target: Logger, -{ +impl>, L: Logger> ScoreUpdate for ProbabilisticScorer { #[rustfmt::skip] fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64, duration_since_epoch: Duration) { let amount_msat = path.final_value_msat(); @@ -1836,18 +1824,12 @@ where /// /// Note that only the locally acquired data is persisted. After a restart, the external scores will be lost and must be /// resupplied. -pub struct CombinedScorer>, L: Deref> -where - L::Target: Logger, -{ +pub struct CombinedScorer>, L: Logger> { local_only_scorer: ProbabilisticScorer, scorer: ProbabilisticScorer, } -impl> + Clone, L: Deref + Clone> CombinedScorer -where - L::Target: Logger, -{ +impl> + Clone, L: Logger + Clone> CombinedScorer { /// Create a new combined scorer with the given local scorer. #[rustfmt::skip] pub fn new(local_scorer: ProbabilisticScorer) -> Self { @@ -1889,10 +1871,7 @@ where } } -impl>, L: Deref> ScoreLookUp for CombinedScorer -where - L::Target: Logger, -{ +impl>, L: Logger> ScoreLookUp for CombinedScorer { type ScoreParams = ProbabilisticScoringFeeParameters; fn channel_penalty_msat( @@ -1903,10 +1882,7 @@ where } } -impl>, L: Deref> ScoreUpdate for CombinedScorer -where - L::Target: Logger, -{ +impl>, L: Logger> ScoreUpdate for CombinedScorer { fn payment_path_failed( &mut self, path: &Path, short_channel_id: u64, duration_since_epoch: Duration, ) { @@ -1935,20 +1911,14 @@ where } } -impl>, L: Deref> Writeable for CombinedScorer -where - L::Target: Logger, -{ +impl>, L: Logger> Writeable for CombinedScorer { fn write(&self, writer: &mut W) -> Result<(), crate::io::Error> { self.local_only_scorer.write(writer) } } #[cfg(c_bindings)] -impl>, L: Deref> Score for ProbabilisticScorer where - L::Target: Logger -{ -} +impl>, L: Logger> Score for ProbabilisticScorer {} #[cfg(feature = "std")] #[inline] @@ -2520,20 +2490,15 @@ mod bucketed_history { } } -impl>, L: Deref> Writeable for ProbabilisticScorer -where - L::Target: Logger, -{ +impl>, L: Logger> Writeable for ProbabilisticScorer { #[inline] fn write(&self, w: &mut W) -> Result<(), io::Error> { self.channel_liquidities.write(w) } } -impl>, L: Deref> +impl>, L: Logger> ReadableArgs<(ProbabilisticScoringDecayParameters, G, L)> for ProbabilisticScorer -where - L::Target: Logger, { #[inline] #[rustfmt::skip] diff --git a/lightning/src/routing/test_utils.rs b/lightning/src/routing/test_utils.rs index c5c35c9ce77..a433fa30c5b 100644 --- a/lightning/src/routing/test_utils.rs +++ b/lightning/src/routing/test_utils.rs @@ -10,7 +10,9 @@ // licenses. use crate::routing::gossip::{NetworkGraph, NodeAlias, P2PGossipSync}; +use crate::routing::utxo::UtxoResult; use crate::types::features::{ChannelFeatures, NodeFeatures}; +use crate::ln::chan_utils::make_funding_redeemscript; use crate::ln::msgs::{ChannelAnnouncement, ChannelUpdate, MAX_VALUE_MSAT, NodeAnnouncement, RoutingMessageHandler, SocketAddress, UnsignedChannelAnnouncement, UnsignedChannelUpdate, UnsignedNodeAnnouncement}; use crate::util::test_utils; use crate::util::ser::Writeable; @@ -22,6 +24,7 @@ use bitcoin::hex::FromHex; use bitcoin::network::Network; use bitcoin::secp256k1::{PublicKey,SecretKey}; use bitcoin::secp256k1::{Secp256k1, All}; +use bitcoin::{Amount, TxOut}; #[allow(unused)] use crate::prelude::*; @@ -58,19 +61,34 @@ pub(crate) fn channel_announcement( } // Using the same keys for LN and BTC ids -pub(crate) fn add_channel( +pub(crate) fn add_channel_skipping_utxo_update( gossip_sync: &P2PGossipSync>>, Arc, Arc>, - secp_ctx: &Secp256k1, node_1_privkey: &SecretKey, node_2_privkey: &SecretKey, features: ChannelFeatures, short_channel_id: u64 + secp_ctx: &Secp256k1, node_1_privkey: &SecretKey, node_2_privkey: &SecretKey, features: ChannelFeatures, short_channel_id: u64, ) { let valid_announcement = channel_announcement(node_1_privkey, node_2_privkey, features, short_channel_id, secp_ctx); - let node_1_pubkey = PublicKey::from_secret_key(&secp_ctx, node_1_privkey); + + let node_1_pubkey = PublicKey::from_secret_key(&secp_ctx, &node_1_privkey); match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &valid_announcement) { Ok(res) => assert!(res), - _ => panic!() + Err(e) => panic!("{:?}", e), }; } +pub(crate) fn add_channel( + gossip_sync: &P2PGossipSync>>, Arc, Arc>, + secp_ctx: &Secp256k1, node_1_privkey: &SecretKey, node_2_privkey: &SecretKey, features: ChannelFeatures, short_channel_id: u64, +) { + gossip_sync.utxo_lookup.as_ref().map(|checker| { + let node_1_pubkey = PublicKey::from_secret_key(&secp_ctx, &node_1_privkey); + let node_2_pubkey = PublicKey::from_secret_key(&secp_ctx, &node_2_privkey); + let script_pubkey = make_funding_redeemscript(&node_1_pubkey, &node_2_pubkey).to_p2wsh(); + *checker.utxo_ret.lock().unwrap() = + UtxoResult::Sync(Ok(TxOut { value: Amount::from_sat(21_000_000_0000_0000), script_pubkey })); + }); + add_channel_skipping_utxo_update(gossip_sync, secp_ctx, node_1_privkey, node_2_privkey, features, short_channel_id); +} + pub(crate) fn add_or_update_node( gossip_sync: &P2PGossipSync>>, Arc, Arc>, secp_ctx: &Secp256k1, node_privkey: &SecretKey, features: NodeFeatures, timestamp: u32 @@ -197,18 +215,43 @@ pub(super) fn build_line_graph() -> ( (secp_ctx, network_graph, gossip_sync, chain_monitor, logger) } +pub(super) fn build_graph_with_gossip_validation() -> ( + Secp256k1, + sync::Arc>>, + P2PGossipSync>>, sync::Arc, sync::Arc>, + sync::Arc, + sync::Arc, +) { + do_build_graph(true) +} + pub(super) fn build_graph() -> ( Secp256k1, sync::Arc>>, P2PGossipSync>>, sync::Arc, sync::Arc>, sync::Arc, sync::Arc, +) { + do_build_graph(false) +} + +fn do_build_graph(with_validation: bool) -> ( + Secp256k1, + sync::Arc>>, + P2PGossipSync>>, sync::Arc, sync::Arc>, + sync::Arc, + sync::Arc, ) { let secp_ctx = Secp256k1::new(); let logger = Arc::new(test_utils::TestLogger::new()); let chain_monitor = Arc::new(test_utils::TestChainSource::new(Network::Testnet)); let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, Arc::clone(&logger))); - let gossip_sync = P2PGossipSync::new(Arc::clone(&network_graph), None, Arc::clone(&logger)); + let checker = if with_validation { + Some(Arc::clone(&chain_monitor)) + } else { + None + }; + let gossip_sync = P2PGossipSync::new(Arc::clone(&network_graph), checker, Arc::clone(&logger)); // Build network from our_id to node6: // // -1(1)2- node0 -1(3)2- diff --git a/lightning/src/routing/utxo.rs b/lightning/src/routing/utxo.rs index 4299dffb90f..466b9416f41 100644 --- a/lightning/src/routing/utxo.rs +++ b/lightning/src/routing/utxo.rs @@ -21,8 +21,9 @@ use bitcoin::hex::DisplayHex; use crate::ln::chan_utils::make_funding_redeemscript_from_slices; use crate::ln::msgs::{self, ErrorAction, LightningError, MessageSendEvent}; -use crate::routing::gossip::{NetworkGraph, NodeId, P2PGossipSync}; +use crate::routing::gossip::{NetworkGraph, NodeId}; use crate::util::logger::{Level, Logger}; +use crate::util::wakers::Notifier; use crate::prelude::*; @@ -64,8 +65,23 @@ pub trait UtxoLookup { /// Returns an error if `chain_hash` is for a different chain or if such a transaction output is /// unknown. /// + /// An `async_completion_notifier` is provided which should be [`Notifier::notify`]ed upon + /// resolution of the [`UtxoFuture`] in case this method returns [`UtxoResult::Async`]. + /// /// [`short_channel_id`]: https://github.com/lightning/bolts/blob/master/07-routing-gossip.md#definition-of-short_channel_id - fn get_utxo(&self, chain_hash: &ChainHash, short_channel_id: u64) -> UtxoResult; + fn get_utxo( + &self, chain_hash: &ChainHash, short_channel_id: u64, + async_completion_notifier: Arc, + ) -> UtxoResult; +} + +impl> UtxoLookup for U { + fn get_utxo( + &self, chain_hash: &ChainHash, short_channel_id: u64, + async_completion_notifier: Arc, + ) -> UtxoResult { + self.deref().get_utxo(chain_hash, short_channel_id, async_completion_notifier) + } } enum ChannelAnnouncement { @@ -108,6 +124,7 @@ impl ChannelUpdate { } struct UtxoMessages { + notifier: Arc, complete: Option>, channel_announce: Option, latest_node_announce_a: Option, @@ -128,214 +145,63 @@ pub struct UtxoFuture { /// once we have a concrete resolution of a request. pub(crate) struct UtxoResolver(Result); impl UtxoLookup for UtxoResolver { - fn get_utxo(&self, _chain_hash: &ChainHash, _short_channel_id: u64) -> UtxoResult { + fn get_utxo(&self, _hash: &ChainHash, _scid: u64, _notifier: Arc) -> UtxoResult { UtxoResult::Sync(self.0.clone()) } } impl UtxoFuture { /// Builds a new future for later resolution. - #[rustfmt::skip] - pub fn new() -> Self { - Self { state: Arc::new(Mutex::new(UtxoMessages { - complete: None, - channel_announce: None, - latest_node_announce_a: None, - latest_node_announce_b: None, - latest_channel_update_a: None, - latest_channel_update_b: None, - }))} - } - - /// Resolves this future against the given `graph` and with the given `result`. - /// - /// This is identical to calling [`UtxoFuture::resolve`] with a dummy `gossip`, disabling - /// forwarding the validated gossip message onwards to peers. - /// - /// Because this may cause the [`NetworkGraph`]'s [`processing_queue_high`] to flip, in order - /// to allow us to interact with peers again, you should call [`PeerManager::process_events`] - /// after this. - /// - /// [`processing_queue_high`]: crate::ln::msgs::RoutingMessageHandler::processing_queue_high - /// [`PeerManager::process_events`]: crate::ln::peer_handler::PeerManager::process_events - pub fn resolve_without_forwarding( - &self, graph: &NetworkGraph, result: Result, - ) where - L::Target: Logger, - { - self.do_resolve(graph, result); - } - - /// Resolves this future against the given `graph` and with the given `result`. - /// - /// The given `gossip` is used to broadcast any validated messages onwards to all peers which - /// have available buffer space. - /// - /// Because this may cause the [`NetworkGraph`]'s [`processing_queue_high`] to flip, in order - /// to allow us to interact with peers again, you should call [`PeerManager::process_events`] - /// after this. - /// - /// [`processing_queue_high`]: crate::ln::msgs::RoutingMessageHandler::processing_queue_high - /// [`PeerManager::process_events`]: crate::ln::peer_handler::PeerManager::process_events - pub fn resolve< - L: Deref, - G: Deref>, - U: Deref, - GS: Deref>, - >( - &self, graph: &NetworkGraph, gossip: GS, result: Result, - ) where - L::Target: Logger, - U::Target: UtxoLookup, - { - let mut res = self.do_resolve(graph, result); - for msg_opt in res.iter_mut() { - if let Some(msg) = msg_opt.take() { - gossip.forward_gossip_msg(msg); - } + pub fn new(notifier: Arc) -> Self { + Self { + state: Arc::new(Mutex::new(UtxoMessages { + notifier, + complete: None, + channel_announce: None, + latest_node_announce_a: None, + latest_node_announce_b: None, + latest_channel_update_a: None, + latest_channel_update_b: None, + })), } } - #[rustfmt::skip] - fn do_resolve(&self, graph: &NetworkGraph, result: Result) - -> [Option; 5] where L::Target: Logger { - let (announcement, node_a, node_b, update_a, update_b) = { - let mut pending_checks = graph.pending_checks.internal.lock().unwrap(); - let mut async_messages = self.state.lock().unwrap(); - - if async_messages.channel_announce.is_none() { - // We raced returning to `check_channel_announcement` which hasn't updated - // `channel_announce` yet. That's okay, we can set the `complete` field which it will - // check once it gets control again. - async_messages.complete = Some(result); - return [None, None, None, None, None]; - } - - let announcement_msg = match async_messages.channel_announce.as_ref().unwrap() { - ChannelAnnouncement::Full(signed_msg) => &signed_msg.contents, - ChannelAnnouncement::Unsigned(msg) => &msg, - }; - - pending_checks.lookup_completed(announcement_msg, &Arc::downgrade(&self.state)); - - (async_messages.channel_announce.take().unwrap(), - async_messages.latest_node_announce_a.take(), - async_messages.latest_node_announce_b.take(), - async_messages.latest_channel_update_a.take(), - async_messages.latest_channel_update_b.take()) - }; - - let mut res = [None, None, None, None, None]; - let mut res_idx = 0; - - // Now that we've updated our internal state, pass the pending messages back through the - // network graph with a different `UtxoLookup` which will resolve immediately. - // Note that we ignore errors as we don't disconnect peers anyway, so there's nothing to do - // with them. - let resolver = UtxoResolver(result); - let (node_id_1, node_id_2) = match &announcement { - ChannelAnnouncement::Full(signed_msg) => (signed_msg.contents.node_id_1, signed_msg.contents.node_id_2), - ChannelAnnouncement::Unsigned(msg) => (msg.node_id_1, msg.node_id_2), - }; - match announcement { - ChannelAnnouncement::Full(signed_msg) => { - if graph.update_channel_from_announcement(&signed_msg, &Some(&resolver)).is_ok() { - res[res_idx] = Some(MessageSendEvent::BroadcastChannelAnnouncement { - msg: signed_msg, update_msg: None, - }); - res_idx += 1; - } - }, - ChannelAnnouncement::Unsigned(msg) => { - let _ = graph.update_channel_from_unsigned_announcement(&msg, &Some(&resolver)); - }, - } - - for announce in core::iter::once(node_a).chain(core::iter::once(node_b)) { - match announce { - Some(NodeAnnouncement::Full(signed_msg)) => { - if graph.update_node_from_announcement(&signed_msg).is_ok() { - res[res_idx] = Some(MessageSendEvent::BroadcastNodeAnnouncement { - msg: signed_msg, - }); - res_idx += 1; - } - }, - Some(NodeAnnouncement::Unsigned(msg)) => { - let _ = graph.update_node_from_unsigned_announcement(&msg); - }, - None => {}, - } - } - - for update in core::iter::once(update_a).chain(core::iter::once(update_b)) { - match update { - Some(ChannelUpdate::Full(signed_msg)) => { - if graph.update_channel(&signed_msg).is_ok() { - res[res_idx] = Some(MessageSendEvent::BroadcastChannelUpdate { - msg: signed_msg, - node_id_1, - node_id_2, - }); - res_idx += 1; - } - }, - Some(ChannelUpdate::Unsigned(msg)) => { - let _ = graph.update_channel_unsigned(&msg); - }, - None => {}, - } - } - - res + /// Resolves this future with the given result. + pub fn resolve(&self, result: Result) { + let mut state = self.state.lock().unwrap(); + state.complete = Some(result); + state.notifier.notify(); } } struct PendingChecksContext { + pending_states: Vec>>, channels: HashMap>>, nodes: HashMap>>>, } -impl PendingChecksContext { - #[rustfmt::skip] - fn lookup_completed(&mut self, - msg: &msgs::UnsignedChannelAnnouncement, completed_state: &Weak> - ) { - if let hash_map::Entry::Occupied(e) = self.channels.entry(msg.short_channel_id) { - if Weak::ptr_eq(e.get(), &completed_state) { - e.remove(); - } - } - - if let hash_map::Entry::Occupied(mut e) = self.nodes.entry(msg.node_id_1) { - e.get_mut().retain(|elem| !Weak::ptr_eq(&elem, &completed_state)); - if e.get().is_empty() { e.remove(); } - } - if let hash_map::Entry::Occupied(mut e) = self.nodes.entry(msg.node_id_2) { - e.get_mut().retain(|elem| !Weak::ptr_eq(&elem, &completed_state)); - if e.get().is_empty() { e.remove(); } - } - } -} - /// A set of messages which are pending UTXO lookups for processing. pub(super) struct PendingChecks { internal: Mutex, + pub(super) completion_notifier: Arc, } impl PendingChecks { - #[rustfmt::skip] pub(super) fn new() -> Self { - PendingChecks { internal: Mutex::new(PendingChecksContext { - channels: new_hash_map(), nodes: new_hash_map(), - }) } + PendingChecks { + internal: Mutex::new(PendingChecksContext { + pending_states: Vec::new(), + channels: new_hash_map(), + nodes: new_hash_map(), + }), + completion_notifier: Arc::new(Notifier::new()), + } } /// Checks if there is a pending `channel_update` UTXO validation for the given channel, /// and, if so, stores the channel message for handling later and returns an `Err`. - #[rustfmt::skip] pub(super) fn check_hold_pending_channel_update( - &self, msg: &msgs::UnsignedChannelUpdate, full_msg: Option<&msgs::ChannelUpdate> + &self, msg: &msgs::UnsignedChannelUpdate, full_msg: Option<&msgs::ChannelUpdate>, ) -> Result<(), LightningError> { let mut pending_checks = self.internal.lock().unwrap(); if let hash_map::Entry::Occupied(e) = pending_checks.channels.entry(msg.short_channel_id) { @@ -344,25 +210,32 @@ impl PendingChecks { Some(msgs_ref) => { let mut messages = msgs_ref.lock().unwrap(); let latest_update = if is_from_a { - &mut messages.latest_channel_update_a - } else { - &mut messages.latest_channel_update_b - }; - if latest_update.is_none() || latest_update.as_ref().unwrap().timestamp() < msg.timestamp { + &mut messages.latest_channel_update_a + } else { + &mut messages.latest_channel_update_b + }; + if latest_update.is_none() + || latest_update.as_ref().unwrap().timestamp() < msg.timestamp + { // If the messages we got has a higher timestamp, just blindly assume the // signatures on the new message are correct and drop the old message. This // may cause us to end up dropping valid `channel_update`s if a peer is // malicious, but we should get the correct ones when the node updates them. - *latest_update = Some( - if let Some(msg) = full_msg { ChannelUpdate::Full(msg.clone()) } - else { ChannelUpdate::Unsigned(msg.clone()) }); + *latest_update = Some(if let Some(msg) = full_msg { + ChannelUpdate::Full(msg.clone()) + } else { + ChannelUpdate::Unsigned(msg.clone()) + }); } return Err(LightningError { - err: "Awaiting channel_announcement validation to accept channel_update".to_owned(), + err: "Awaiting channel_announcement validation to accept channel_update" + .to_owned(), action: ErrorAction::IgnoreAndLog(Level::Gossip), }); }, - None => { e.remove(); }, + None => { + e.remove(); + }, } } Ok(()) @@ -370,45 +243,49 @@ impl PendingChecks { /// Checks if there is a pending `node_announcement` UTXO validation for a channel with the /// given node and, if so, stores the channel message for handling later and returns an `Err`. - #[rustfmt::skip] pub(super) fn check_hold_pending_node_announcement( - &self, msg: &msgs::UnsignedNodeAnnouncement, full_msg: Option<&msgs::NodeAnnouncement> + &self, msg: &msgs::UnsignedNodeAnnouncement, full_msg: Option<&msgs::NodeAnnouncement>, ) -> Result<(), LightningError> { let mut pending_checks = self.internal.lock().unwrap(); if let hash_map::Entry::Occupied(mut e) = pending_checks.nodes.entry(msg.node_id) { let mut found_at_least_one_chan = false; - e.get_mut().retain(|node_msgs| { - match Weak::upgrade(&node_msgs) { - Some(chan_mtx) => { - let mut chan_msgs = chan_mtx.lock().unwrap(); - if let Some(chan_announce) = &chan_msgs.channel_announce { - let latest_announce = - if *chan_announce.node_id_1() == msg.node_id { - &mut chan_msgs.latest_node_announce_a - } else { - &mut chan_msgs.latest_node_announce_b - }; - if latest_announce.is_none() || - latest_announce.as_ref().unwrap().timestamp() < msg.timestamp - { - *latest_announce = Some( - if let Some(msg) = full_msg { NodeAnnouncement::Full(msg.clone()) } - else { NodeAnnouncement::Unsigned(msg.clone()) }); - } - found_at_least_one_chan = true; - true + e.get_mut().retain(|node_msgs| match Weak::upgrade(&node_msgs) { + Some(chan_mtx) => { + let mut chan_msgs = chan_mtx.lock().unwrap(); + if let Some(chan_announce) = &chan_msgs.channel_announce { + let latest_announce = if *chan_announce.node_id_1() == msg.node_id { + &mut chan_msgs.latest_node_announce_a } else { - debug_assert!(false, "channel_announce is set before struct is added to node map"); - false + &mut chan_msgs.latest_node_announce_b + }; + if latest_announce.is_none() + || latest_announce.as_ref().unwrap().timestamp() < msg.timestamp + { + *latest_announce = Some(if let Some(msg) = full_msg { + NodeAnnouncement::Full(msg.clone()) + } else { + NodeAnnouncement::Unsigned(msg.clone()) + }); } - }, - None => false, - } + found_at_least_one_chan = true; + true + } else { + debug_assert!( + false, + "channel_announce is set before struct is added to node map" + ); + false + } + }, + None => false, }); - if e.get().is_empty() { e.remove(); } + if e.get().is_empty() { + e.remove(); + } if found_at_least_one_chan { return Err(LightningError { - err: "Awaiting channel_announcement validation to accept node_announcement".to_owned(), + err: "Awaiting channel_announcement validation to accept node_announcement" + .to_owned(), action: ErrorAction::IgnoreAndLog(Level::Gossip), }); } @@ -416,10 +293,10 @@ impl PendingChecks { Ok(()) } - #[rustfmt::skip] - fn check_replace_previous_entry(msg: &msgs::UnsignedChannelAnnouncement, - full_msg: Option<&msgs::ChannelAnnouncement>, replacement: Option>>, - pending_channels: &mut HashMap>> + fn check_replace_previous_entry( + msg: &msgs::UnsignedChannelAnnouncement, full_msg: Option<&msgs::ChannelAnnouncement>, + replacement: Option>>, + pending_channels: &mut HashMap>>, ) -> Result<(), msgs::LightningError> { match pending_channels.entry(msg.short_channel_id) { hash_map::Entry::Occupied(mut e) => { @@ -431,8 +308,13 @@ impl PendingChecks { // This may be called with the mutex held on a different UtxoMessages // struct, however in that case we have a global lockorder of new messages // -> old messages, which makes this safe. - let pending_matches = match &pending_msgs.unsafe_well_ordered_double_lock_self().channel_announce { - Some(ChannelAnnouncement::Full(pending_msg)) => Some(pending_msg) == full_msg, + let pending_matches = match &pending_msgs + .unsafe_well_ordered_double_lock_self() + .channel_announce + { + Some(ChannelAnnouncement::Full(pending_msg)) => { + Some(pending_msg) == full_msg + }, Some(ChannelAnnouncement::Unsigned(pending_msg)) => pending_msg == msg, None => { // This shouldn't actually be reachable. We set the @@ -464,54 +346,63 @@ impl PendingChecks { // so just remove/replace it and move on. if let Some(item) = replacement { *e.get_mut() = item; - } else { e.remove(); } + } else { + e.remove(); + } }, } }, hash_map::Entry::Vacant(v) => { - if let Some(item) = replacement { v.insert(item); } + if let Some(item) = replacement { + v.insert(item); + } }, } Ok(()) } - #[rustfmt::skip] - pub(super) fn check_channel_announcement(&self, - utxo_lookup: &Option, msg: &msgs::UnsignedChannelAnnouncement, - full_msg: Option<&msgs::ChannelAnnouncement> - ) -> Result, msgs::LightningError> where U::Target: UtxoLookup { - let handle_result = |res| { - match res { - Ok(TxOut { value, script_pubkey }) => { - let expected_script = - make_funding_redeemscript_from_slices(msg.bitcoin_key_1.as_array(), msg.bitcoin_key_2.as_array()).to_p2wsh(); - if script_pubkey != expected_script { - return Err(LightningError{ - err: format!("Channel announcement key ({}) didn't match on-chain script ({})", - expected_script.to_hex_string(), script_pubkey.to_hex_string()), - action: ErrorAction::IgnoreError - }); - } - Ok(Some(value)) - }, - Err(UtxoLookupError::UnknownChain) => { - Err(LightningError { - err: format!("Channel announced on an unknown chain ({})", - msg.chain_hash.to_bytes().as_hex()), - action: ErrorAction::IgnoreError - }) - }, - Err(UtxoLookupError::UnknownTx) => { - Err(LightningError { - err: "Channel announced without corresponding UTXO entry".to_owned(), - action: ErrorAction::IgnoreError - }) - }, - } + pub(super) fn check_channel_announcement( + &self, utxo_lookup: &Option, msg: &msgs::UnsignedChannelAnnouncement, + full_msg: Option<&msgs::ChannelAnnouncement>, + ) -> Result, msgs::LightningError> { + let handle_result = |res| match res { + Ok(TxOut { value, script_pubkey }) => { + let expected_script = make_funding_redeemscript_from_slices( + msg.bitcoin_key_1.as_array(), + msg.bitcoin_key_2.as_array(), + ) + .to_p2wsh(); + if script_pubkey != expected_script { + return Err(LightningError { + err: format!( + "Channel announcement key ({}) didn't match on-chain script ({})", + expected_script.to_hex_string(), + script_pubkey.to_hex_string() + ), + action: ErrorAction::IgnoreError, + }); + } + Ok(Some(value)) + }, + Err(UtxoLookupError::UnknownChain) => Err(LightningError { + err: format!( + "Channel announced on an unknown chain ({})", + msg.chain_hash.to_bytes().as_hex() + ), + action: ErrorAction::IgnoreError, + }), + Err(UtxoLookupError::UnknownTx) => Err(LightningError { + err: "Channel announced without corresponding UTXO entry".to_owned(), + action: ErrorAction::IgnoreError, + }), }; - Self::check_replace_previous_entry(msg, full_msg, None, - &mut self.internal.lock().unwrap().channels)?; + Self::check_replace_previous_entry( + msg, + full_msg, + None, + &mut self.internal.lock().unwrap().channels, + )?; match utxo_lookup { &None => { @@ -519,7 +410,8 @@ impl PendingChecks { Ok(None) }, &Some(ref utxo_lookup) => { - match utxo_lookup.get_utxo(&msg.chain_hash, msg.short_channel_id) { + let notifier = Arc::clone(&self.completion_notifier); + match utxo_lookup.get_utxo(&msg.chain_hash, msg.short_channel_id, notifier) { UtxoResult::Sync(res) => handle_result(res), UtxoResult::Async(future) => { let mut pending_checks = self.internal.lock().unwrap(); @@ -529,15 +421,41 @@ impl PendingChecks { // handle the result in-line. handle_result(res) } else { - Self::check_replace_previous_entry(msg, full_msg, - Some(Arc::downgrade(&future.state)), &mut pending_checks.channels)?; - async_messages.channel_announce = Some( - if let Some(msg) = full_msg { ChannelAnnouncement::Full(msg.clone()) } - else { ChannelAnnouncement::Unsigned(msg.clone()) }); - pending_checks.nodes.entry(msg.node_id_1) - .or_default().push(Arc::downgrade(&future.state)); - pending_checks.nodes.entry(msg.node_id_2) - .or_default().push(Arc::downgrade(&future.state)); + // To avoid cases where we drop the resolved data before it can be + // collected by `check_resolved_futures`, we here track all pending + // states at least until the next call of `check_resolved_futures`. + let pending_states = &mut pending_checks.pending_states; + if pending_states + .iter() + .find(|s| Arc::ptr_eq(s, &future.state)) + .is_none() + { + // We're not already tracking the future state, keep the `Arc` + // around. + pending_states.push(Arc::clone(&future.state)); + } + + Self::check_replace_previous_entry( + msg, + full_msg, + Some(Arc::downgrade(&future.state)), + &mut pending_checks.channels, + )?; + async_messages.channel_announce = Some(if let Some(msg) = full_msg { + ChannelAnnouncement::Full(msg.clone()) + } else { + ChannelAnnouncement::Unsigned(msg.clone()) + }); + pending_checks + .nodes + .entry(msg.node_id_1) + .or_default() + .push(Arc::downgrade(&future.state)); + pending_checks + .nodes + .entry(msg.node_id_2) + .or_default() + .push(Arc::downgrade(&future.state)); Err(LightningError { err: "Channel being checked async".to_owned(), action: ErrorAction::IgnoreAndLog(Level::Gossip), @@ -545,7 +463,7 @@ impl PendingChecks { } }, } - } + }, } } @@ -562,16 +480,13 @@ impl PendingChecks { /// Returns true if there are a large number of async checks pending and future /// `channel_announcement` messages should be delayed. Note that this is only a hint and /// messages already in-flight may still have to be handled for various reasons. - #[rustfmt::skip] pub(super) fn too_many_checks_pending(&self) -> bool { let mut pending_checks = self.internal.lock().unwrap(); if pending_checks.channels.len() > Self::MAX_PENDING_LOOKUPS { // If we have many channel checks pending, ensure we don't have any dangling checks // (i.e. checks where the user told us they'd call back but drop'd the `UtxoFuture` // instead) before we commit to applying backpressure. - pending_checks.channels.retain(|_, chan| { - Weak::upgrade(&chan).is_some() - }); + pending_checks.channels.retain(|_, chan| Weak::upgrade(&chan).is_some()); pending_checks.nodes.retain(|_, channels| { channels.retain(|chan| Weak::upgrade(&chan).is_some()); !channels.is_empty() @@ -581,6 +496,152 @@ impl PendingChecks { false } } + + fn resolve_single_future( + &self, graph: &NetworkGraph, entry: Arc>, + new_messages: &mut Vec, + ) { + let (announcement, result, announce_a, announce_b, update_a, update_b); + { + let mut state = entry.lock().unwrap(); + announcement = if let Some(announcement) = state.channel_announce.take() { + announcement + } else { + // We raced returning to `check_channel_announcement` which hasn't updated + // `channel_announce` yet. That's okay, we can set the `complete` field which it will + // check once it gets control again. + return; + }; + + result = if let Some(result) = state.complete.take() { + result + } else { + debug_assert!(false, "Future should have been resolved"); + return; + }; + + announce_a = state.latest_node_announce_a.take(); + announce_b = state.latest_node_announce_b.take(); + update_a = state.latest_channel_update_a.take(); + update_b = state.latest_channel_update_b.take(); + } + + // Now that we've updated our internal state, pass the pending messages back through the + // network graph with a different `UtxoLookup` which will resolve immediately. + // Note that we ignore errors as we don't disconnect peers anyway, so there's nothing to do + // with them. + let resolver = UtxoResolver(result); + let (node_id_1, node_id_2) = match &announcement { + ChannelAnnouncement::Full(signed_msg) => { + (signed_msg.contents.node_id_1, signed_msg.contents.node_id_2) + }, + ChannelAnnouncement::Unsigned(msg) => (msg.node_id_1, msg.node_id_2), + }; + match announcement { + ChannelAnnouncement::Full(signed_msg) => { + if graph.update_channel_from_announcement(&signed_msg, &Some(&resolver)).is_ok() { + new_messages.push(MessageSendEvent::BroadcastChannelAnnouncement { + msg: signed_msg, + update_msg: None, + }); + } + }, + ChannelAnnouncement::Unsigned(msg) => { + let _ = graph.update_channel_from_unsigned_announcement(&msg, &Some(&resolver)); + }, + } + + for announce in [announce_a, announce_b] { + match announce { + Some(NodeAnnouncement::Full(signed_msg)) => { + if graph.update_node_from_announcement(&signed_msg).is_ok() { + new_messages + .push(MessageSendEvent::BroadcastNodeAnnouncement { msg: signed_msg }); + } + }, + Some(NodeAnnouncement::Unsigned(msg)) => { + let _ = graph.update_node_from_unsigned_announcement(&msg); + }, + None => {}, + } + } + + for update in [update_a, update_b] { + match update { + Some(ChannelUpdate::Full(signed_msg)) => { + if graph.update_channel(&signed_msg).is_ok() { + new_messages.push(MessageSendEvent::BroadcastChannelUpdate { + msg: signed_msg, + node_id_1, + node_id_2, + }); + } + }, + Some(ChannelUpdate::Unsigned(msg)) => { + let _ = graph.update_channel_unsigned(&msg); + }, + None => {}, + } + } + } + + pub(super) fn check_resolved_futures( + &self, graph: &NetworkGraph, + ) -> Vec { + let mut completed_states = Vec::new(); + { + let mut lck = self.internal.lock().unwrap(); + lck.pending_states.retain(|state| { + if state.lock().unwrap().complete.is_some() { + // We're done, collect the result and clean up. + completed_states.push(Arc::clone(&state)); + false + } else { + if Arc::strong_count(state) == 1 { + // The future has been dropped. + false + } else { + // It's still inflight. + true + } + } + }); + lck.channels.retain(|_, state| { + if let Some(state) = state.upgrade() { + if state.lock().unwrap().complete.is_some() { + completed_states.push(state); + false + } else { + true + } + } else { + // The UtxoFuture has been dropped, drop the pending-lookup state. + false + } + }); + lck.nodes.retain(|_, lookups| { + lookups.retain(|state| { + if let Some(state) = state.upgrade() { + if state.lock().unwrap().complete.is_some() { + completed_states.push(state); + false + } else { + true + } + } else { + // The UtxoFuture has been dropped, drop the pending-lookup state. + false + } + }); + !lookups.is_empty() + }); + } + let mut res = Vec::with_capacity(completed_states.len() * 5); + for state in completed_states { + self.resolve_single_future(graph, state, &mut res); + } + res + } } #[cfg(test)] @@ -602,11 +663,17 @@ mod tests { (chain_source, network_graph) } - #[rustfmt::skip] - fn get_test_objects() -> (msgs::ChannelAnnouncement, TestChainSource, - NetworkGraph>, bitcoin::ScriptBuf, msgs::NodeAnnouncement, - msgs::NodeAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, msgs::ChannelUpdate) - { + fn get_test_objects() -> ( + msgs::ChannelAnnouncement, + TestChainSource, + NetworkGraph>, + bitcoin::ScriptBuf, + msgs::NodeAnnouncement, + msgs::NodeAnnouncement, + msgs::ChannelUpdate, + msgs::ChannelUpdate, + msgs::ChannelUpdate, + ) { let secp_ctx = Secp256k1::new(); let (chain_source, network_graph) = get_network(); @@ -614,203 +681,318 @@ mod tests { let good_script = get_channel_script(&secp_ctx); let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap(); let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); - let valid_announcement = get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx); + let valid_announcement = + get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx); let node_a_announce = get_signed_node_announcement(|_| {}, node_1_privkey, &secp_ctx); let node_b_announce = get_signed_node_announcement(|_| {}, node_2_privkey, &secp_ctx); - // Note that we have to set the "direction" flag correctly on both messages - let chan_update_a = get_signed_channel_update(|msg| msg.channel_flags = 0, node_1_privkey, &secp_ctx); - let chan_update_b = get_signed_channel_update(|msg| msg.channel_flags = 1, node_2_privkey, &secp_ctx); - let chan_update_c = get_signed_channel_update(|msg| { - msg.channel_flags = 1; msg.timestamp += 1; }, node_2_privkey, &secp_ctx); - - (valid_announcement, chain_source, network_graph, good_script, node_a_announce, - node_b_announce, chan_update_a, chan_update_b, chan_update_c) + ( + valid_announcement, + chain_source, + network_graph, + good_script, + node_a_announce, + node_b_announce, + get_signed_channel_update(|msg| msg.channel_flags = 0, node_1_privkey, &secp_ctx), + get_signed_channel_update(|msg| msg.channel_flags = 1, node_2_privkey, &secp_ctx), + // Note that we have to set the "direction" flag correctly on both messages + get_signed_channel_update( + |msg| { + msg.channel_flags = 1; + msg.timestamp += 1; + }, + node_2_privkey, + &secp_ctx, + ), + ) } #[test] - #[rustfmt::skip] fn test_fast_async_lookup() { // Check that async lookups which resolve quicker than the future is returned to the // `get_utxo` call can read it still resolve properly. let (valid_announcement, chain_source, network_graph, good_script, ..) = get_test_objects(); - - let future = UtxoFuture::new(); - future.resolve_without_forwarding(&network_graph, - Ok(TxOut { value: Amount::from_sat(1_000_000), script_pubkey: good_script })); + let scid = valid_announcement.contents.short_channel_id; + + let notifier = Arc::new(Notifier::new()); + let future = UtxoFuture::new(Arc::clone(¬ifier)); + future + .resolve(Ok(TxOut { value: Amount::from_sat(1_000_000), script_pubkey: good_script })); + assert!(notifier.notify_pending()); + network_graph.pending_checks.check_resolved_futures(&network_graph); *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(future.clone()); - network_graph.update_channel_from_announcement(&valid_announcement, &Some(&chain_source)).unwrap(); - assert!(network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id).is_some()); + network_graph + .update_channel_from_announcement(&valid_announcement, &Some(&chain_source)) + .unwrap(); + assert!(network_graph.read_only().channels().get(&scid).is_some()); } #[test] - #[rustfmt::skip] fn test_async_lookup() { // Test a simple async lookup - let (valid_announcement, chain_source, network_graph, good_script, - node_a_announce, node_b_announce, ..) = get_test_objects(); - - let future = UtxoFuture::new(); + let ( + valid_announcement, + chain_source, + network_graph, + good_script, + node_a_announce, + node_b_announce, + .., + ) = get_test_objects(); + let scid = valid_announcement.contents.short_channel_id; + let node_id_1 = valid_announcement.contents.node_id_1; + + let notifier = Arc::new(Notifier::new()); + let future = UtxoFuture::new(Arc::clone(¬ifier)); *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(future.clone()); assert_eq!( - network_graph.update_channel_from_announcement(&valid_announcement, &Some(&chain_source)).unwrap_err().err, - "Channel being checked async"); - assert!(network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id).is_none()); - - future.resolve_without_forwarding(&network_graph, - Ok(TxOut { value: Amount::ZERO, script_pubkey: good_script })); - network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id).unwrap(); - network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id).unwrap(); - - assert!(network_graph.read_only().nodes().get(&valid_announcement.contents.node_id_1) - .unwrap().announcement_info.is_none()); + network_graph + .update_channel_from_announcement(&valid_announcement, &Some(&chain_source)) + .unwrap_err() + .err, + "Channel being checked async" + ); + assert!(network_graph.read_only().channels().get(&scid).is_none()); + + future.resolve(Ok(TxOut { value: Amount::ZERO, script_pubkey: good_script })); + assert!(notifier.notify_pending()); + network_graph.pending_checks.check_resolved_futures(&network_graph); + network_graph.read_only().channels().get(&scid).unwrap(); + network_graph.read_only().channels().get(&scid).unwrap(); + + #[rustfmt::skip] + let is_node_a_announced = network_graph.read_only().nodes().get(&node_id_1).unwrap() + .announcement_info.is_some(); + assert!(!is_node_a_announced); network_graph.update_node_from_announcement(&node_a_announce).unwrap(); network_graph.update_node_from_announcement(&node_b_announce).unwrap(); - assert!(network_graph.read_only().nodes().get(&valid_announcement.contents.node_id_1) - .unwrap().announcement_info.is_some()); + #[rustfmt::skip] + let is_node_a_announced = network_graph.read_only().nodes().get(&node_id_1).unwrap() + .announcement_info.is_some(); + assert!(is_node_a_announced); } #[test] - #[rustfmt::skip] fn test_invalid_async_lookup() { // Test an async lookup which returns an incorrect script let (valid_announcement, chain_source, network_graph, ..) = get_test_objects(); + let scid = valid_announcement.contents.short_channel_id; - let future = UtxoFuture::new(); + let notifier = Arc::new(Notifier::new()); + let future = UtxoFuture::new(Arc::clone(¬ifier)); *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(future.clone()); assert_eq!( - network_graph.update_channel_from_announcement(&valid_announcement, &Some(&chain_source)).unwrap_err().err, - "Channel being checked async"); - assert!(network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id).is_none()); - - future.resolve_without_forwarding(&network_graph, - Ok(TxOut { value: Amount::from_sat(1_000_000), script_pubkey: bitcoin::ScriptBuf::new() })); - assert!(network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id).is_none()); + network_graph + .update_channel_from_announcement(&valid_announcement, &Some(&chain_source)) + .unwrap_err() + .err, + "Channel being checked async" + ); + assert!(network_graph.read_only().channels().get(&scid).is_none()); + + let value = Amount::from_sat(1_000_000); + future.resolve(Ok(TxOut { value, script_pubkey: bitcoin::ScriptBuf::new() })); + assert!(notifier.notify_pending()); + network_graph.pending_checks.check_resolved_futures(&network_graph); + assert!(network_graph.read_only().channels().get(&scid).is_none()); } #[test] - #[rustfmt::skip] fn test_failing_async_lookup() { // Test an async lookup which returns an error let (valid_announcement, chain_source, network_graph, ..) = get_test_objects(); + let scid = valid_announcement.contents.short_channel_id; - let future = UtxoFuture::new(); + let notifier = Arc::new(Notifier::new()); + let future = UtxoFuture::new(Arc::clone(¬ifier)); *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(future.clone()); assert_eq!( - network_graph.update_channel_from_announcement(&valid_announcement, &Some(&chain_source)).unwrap_err().err, - "Channel being checked async"); - assert!(network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id).is_none()); - - future.resolve_without_forwarding(&network_graph, Err(UtxoLookupError::UnknownTx)); - assert!(network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id).is_none()); + network_graph + .update_channel_from_announcement(&valid_announcement, &Some(&chain_source)) + .unwrap_err() + .err, + "Channel being checked async" + ); + assert!(network_graph.read_only().channels().get(&scid).is_none()); + + future.resolve(Err(UtxoLookupError::UnknownTx)); + assert!(notifier.notify_pending()); + network_graph.pending_checks.check_resolved_futures(&network_graph); + assert!(network_graph.read_only().channels().get(&scid).is_none()); } #[test] - #[rustfmt::skip] fn test_updates_async_lookup() { // Test async lookups will process pending channel_update/node_announcements once they // complete. - let (valid_announcement, chain_source, network_graph, good_script, node_a_announce, - node_b_announce, chan_update_a, chan_update_b, ..) = get_test_objects(); - - let future = UtxoFuture::new(); + let ( + valid_announcement, + chain_source, + network_graph, + good_script, + node_a_announce, + node_b_announce, + chan_update_a, + chan_update_b, + .., + ) = get_test_objects(); + let scid = valid_announcement.contents.short_channel_id; + + let notifier = Arc::new(Notifier::new()); + let future = UtxoFuture::new(Arc::clone(¬ifier)); *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(future.clone()); assert_eq!( - network_graph.update_channel_from_announcement(&valid_announcement, &Some(&chain_source)).unwrap_err().err, - "Channel being checked async"); - assert!(network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id).is_none()); + network_graph + .update_channel_from_announcement(&valid_announcement, &Some(&chain_source)) + .unwrap_err() + .err, + "Channel being checked async" + ); + assert!(network_graph.read_only().channels().get(&scid).is_none()); assert_eq!( network_graph.update_node_from_announcement(&node_a_announce).unwrap_err().err, - "Awaiting channel_announcement validation to accept node_announcement"); + "Awaiting channel_announcement validation to accept node_announcement" + ); assert_eq!( network_graph.update_node_from_announcement(&node_b_announce).unwrap_err().err, - "Awaiting channel_announcement validation to accept node_announcement"); - - assert_eq!(network_graph.update_channel(&chan_update_a).unwrap_err().err, - "Awaiting channel_announcement validation to accept channel_update"); - assert_eq!(network_graph.update_channel(&chan_update_b).unwrap_err().err, - "Awaiting channel_announcement validation to accept channel_update"); + "Awaiting channel_announcement validation to accept node_announcement" + ); - future.resolve_without_forwarding(&network_graph, - Ok(TxOut { value: Amount::from_sat(1_000_000), script_pubkey: good_script })); - - assert!(network_graph.read_only().channels() - .get(&valid_announcement.contents.short_channel_id).unwrap().one_to_two.is_some()); - assert!(network_graph.read_only().channels() - .get(&valid_announcement.contents.short_channel_id).unwrap().two_to_one.is_some()); - - assert!(network_graph.read_only().nodes().get(&valid_announcement.contents.node_id_1) - .unwrap().announcement_info.is_some()); - assert!(network_graph.read_only().nodes().get(&valid_announcement.contents.node_id_2) - .unwrap().announcement_info.is_some()); + assert_eq!( + network_graph.update_channel(&chan_update_a).unwrap_err().err, + "Awaiting channel_announcement validation to accept channel_update" + ); + assert_eq!( + network_graph.update_channel(&chan_update_b).unwrap_err().err, + "Awaiting channel_announcement validation to accept channel_update" + ); + + assert!(!notifier.notify_pending()); + future + .resolve(Ok(TxOut { value: Amount::from_sat(1_000_000), script_pubkey: good_script })); + assert!(notifier.notify_pending()); + network_graph.pending_checks.check_resolved_futures(&network_graph); + + assert!(network_graph.read_only().channels().get(&scid).unwrap().one_to_two.is_some()); + assert!(network_graph.read_only().channels().get(&scid).unwrap().two_to_one.is_some()); + + assert!(network_graph + .read_only() + .nodes() + .get(&valid_announcement.contents.node_id_1) + .unwrap() + .announcement_info + .is_some()); + assert!(network_graph + .read_only() + .nodes() + .get(&valid_announcement.contents.node_id_2) + .unwrap() + .announcement_info + .is_some()); } #[test] - #[rustfmt::skip] fn test_latest_update_async_lookup() { // Test async lookups will process the latest channel_update if two are received while // awaiting an async UTXO lookup. - let (valid_announcement, chain_source, network_graph, good_script, _, - _, chan_update_a, chan_update_b, chan_update_c, ..) = get_test_objects(); - - let future = UtxoFuture::new(); + let ( + valid_announcement, + chain_source, + network_graph, + good_script, + _, + _, + chan_update_a, + chan_update_b, + chan_update_c, + .., + ) = get_test_objects(); + let scid = valid_announcement.contents.short_channel_id; + + let notifier = Arc::new(Notifier::new()); + let future = UtxoFuture::new(Arc::clone(¬ifier)); *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(future.clone()); assert_eq!( - network_graph.update_channel_from_announcement(&valid_announcement, &Some(&chain_source)).unwrap_err().err, - "Channel being checked async"); - assert!(network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id).is_none()); + network_graph + .update_channel_from_announcement(&valid_announcement, &Some(&chain_source)) + .unwrap_err() + .err, + "Channel being checked async" + ); + assert!(network_graph.read_only().channels().get(&scid).is_none()); - assert_eq!(network_graph.update_channel(&chan_update_a).unwrap_err().err, - "Awaiting channel_announcement validation to accept channel_update"); - assert_eq!(network_graph.update_channel(&chan_update_b).unwrap_err().err, - "Awaiting channel_announcement validation to accept channel_update"); - assert_eq!(network_graph.update_channel(&chan_update_c).unwrap_err().err, - "Awaiting channel_announcement validation to accept channel_update"); + assert_eq!( + network_graph.update_channel(&chan_update_a).unwrap_err().err, + "Awaiting channel_announcement validation to accept channel_update" + ); + assert_eq!( + network_graph.update_channel(&chan_update_b).unwrap_err().err, + "Awaiting channel_announcement validation to accept channel_update" + ); + assert_eq!( + network_graph.update_channel(&chan_update_c).unwrap_err().err, + "Awaiting channel_announcement validation to accept channel_update" + ); - future.resolve_without_forwarding(&network_graph, - Ok(TxOut { value: Amount::from_sat(1_000_000), script_pubkey: good_script })); + assert!(!notifier.notify_pending()); + future + .resolve(Ok(TxOut { value: Amount::from_sat(1_000_000), script_pubkey: good_script })); + assert!(notifier.notify_pending()); + network_graph.pending_checks.check_resolved_futures(&network_graph); assert_eq!(chan_update_a.contents.timestamp, chan_update_b.contents.timestamp); let graph_lock = network_graph.read_only(); - assert!(graph_lock.channels() - .get(&valid_announcement.contents.short_channel_id).as_ref().unwrap() - .one_to_two.as_ref().unwrap().last_update != - graph_lock.channels() - .get(&valid_announcement.contents.short_channel_id).as_ref().unwrap() - .two_to_one.as_ref().unwrap().last_update); + #[rustfmt::skip] + let one_to_two_update = + graph_lock.channels().get(&scid).as_ref().unwrap().one_to_two.as_ref().unwrap().last_update; + #[rustfmt::skip] + let two_to_one_update = + graph_lock.channels().get(&scid).as_ref().unwrap().two_to_one.as_ref().unwrap().last_update; + assert!(one_to_two_update != two_to_one_update); } #[test] - #[rustfmt::skip] fn test_no_double_lookups() { // Test that a pending async lookup will prevent a second async lookup from flying, but // only if the channel_announcement message is identical. let (valid_announcement, chain_source, network_graph, good_script, ..) = get_test_objects(); + let scid = valid_announcement.contents.short_channel_id; - let future = UtxoFuture::new(); + let notifier_a = Arc::new(Notifier::new()); + let future = UtxoFuture::new(Arc::clone(¬ifier_a)); *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(future.clone()); assert_eq!( - network_graph.update_channel_from_announcement(&valid_announcement, &Some(&chain_source)).unwrap_err().err, - "Channel being checked async"); + network_graph + .update_channel_from_announcement(&valid_announcement, &Some(&chain_source)) + .unwrap_err() + .err, + "Channel being checked async" + ); assert_eq!(chain_source.get_utxo_call_count.load(Ordering::Relaxed), 1); // If we make a second request with the same message, the call count doesn't increase... - let future_b = UtxoFuture::new(); + let notifier_b = Arc::new(Notifier::new()); + let future_b = UtxoFuture::new(Arc::clone(¬ifier_b)); *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(future_b.clone()); assert_eq!( - network_graph.update_channel_from_announcement(&valid_announcement, &Some(&chain_source)).unwrap_err().err, - "Channel announcement is already being checked"); + network_graph + .update_channel_from_announcement(&valid_announcement, &Some(&chain_source)) + .unwrap_err() + .err, + "Channel announcement is already being checked" + ); assert_eq!(chain_source.get_utxo_call_count.load(Ordering::Relaxed), 1); // But if we make a third request with a tweaked message, we should get a second call @@ -818,23 +1000,31 @@ mod tests { let secp_ctx = Secp256k1::new(); let replacement_pk_1 = &SecretKey::from_slice(&[99; 32]).unwrap(); let replacement_pk_2 = &SecretKey::from_slice(&[98; 32]).unwrap(); - let invalid_announcement = get_signed_channel_announcement(|_| {}, replacement_pk_1, replacement_pk_2, &secp_ctx); + let invalid_announcement = + get_signed_channel_announcement(|_| {}, replacement_pk_1, replacement_pk_2, &secp_ctx); assert_eq!( - network_graph.update_channel_from_announcement(&invalid_announcement, &Some(&chain_source)).unwrap_err().err, - "Channel being checked async"); + network_graph + .update_channel_from_announcement(&invalid_announcement, &Some(&chain_source)) + .unwrap_err() + .err, + "Channel being checked async" + ); assert_eq!(chain_source.get_utxo_call_count.load(Ordering::Relaxed), 2); // Still, if we resolve the original future, the original channel will be accepted. - future.resolve_without_forwarding(&network_graph, - Ok(TxOut { value: Amount::from_sat(1_000_000), script_pubkey: good_script })); - assert!(!network_graph.read_only().channels() - .get(&valid_announcement.contents.short_channel_id).unwrap() - .announcement_message.as_ref().unwrap() - .contents.features.supports_unknown_test_feature()); + future + .resolve(Ok(TxOut { value: Amount::from_sat(1_000_000), script_pubkey: good_script })); + assert!(notifier_a.notify_pending()); + assert!(!notifier_b.notify_pending()); + network_graph.pending_checks.check_resolved_futures(&network_graph); + #[rustfmt::skip] + let is_test_feature_set = + network_graph.read_only().channels().get(&scid).unwrap().announcement_message + .as_ref().unwrap().contents.features.supports_unknown_test_feature(); + assert!(!is_test_feature_set); } #[test] - #[rustfmt::skip] fn test_checks_backpressure() { // Test that too_many_checks_pending returns true when there are many checks pending, and // returns false once they complete. @@ -842,7 +1032,8 @@ mod tests { let (chain_source, network_graph) = get_network(); // We cheat and use a single future for all the lookups to complete them all at once. - let future = UtxoFuture::new(); + let notifier = Arc::new(Notifier::new()); + let future = UtxoFuture::new(Arc::clone(¬ifier)); *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(future.clone()); let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap(); @@ -850,23 +1041,32 @@ mod tests { for i in 0..PendingChecks::MAX_PENDING_LOOKUPS { let valid_announcement = get_signed_channel_announcement( - |msg| msg.short_channel_id += 1 + i as u64, node_1_privkey, node_2_privkey, &secp_ctx); - network_graph.update_channel_from_announcement(&valid_announcement, &Some(&chain_source)).unwrap_err(); + |msg| msg.short_channel_id += 1 + i as u64, + node_1_privkey, + node_2_privkey, + &secp_ctx, + ); + network_graph + .update_channel_from_announcement(&valid_announcement, &Some(&chain_source)) + .unwrap_err(); assert!(!network_graph.pending_checks.too_many_checks_pending()); } - let valid_announcement = get_signed_channel_announcement( - |_| {}, node_1_privkey, node_2_privkey, &secp_ctx); - network_graph.update_channel_from_announcement(&valid_announcement, &Some(&chain_source)).unwrap_err(); + let valid_announcement = + get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx); + network_graph + .update_channel_from_announcement(&valid_announcement, &Some(&chain_source)) + .unwrap_err(); assert!(network_graph.pending_checks.too_many_checks_pending()); // Once the future completes the "too many checks" flag should reset. - future.resolve_without_forwarding(&network_graph, Err(UtxoLookupError::UnknownTx)); + future.resolve(Err(UtxoLookupError::UnknownTx)); + assert!(notifier.notify_pending()); + network_graph.pending_checks.check_resolved_futures(&network_graph); assert!(!network_graph.pending_checks.too_many_checks_pending()); } #[test] - #[rustfmt::skip] fn test_checks_backpressure_drop() { // Test that too_many_checks_pending returns true when there are many checks pending, and // returns false if we drop some of the futures without completion. @@ -874,26 +1074,39 @@ mod tests { let (chain_source, network_graph) = get_network(); // We cheat and use a single future for all the lookups to complete them all at once. - *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(UtxoFuture::new()); + let notifier = Arc::new(Notifier::new()); + *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(UtxoFuture::new(notifier)); let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap(); let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); for i in 0..PendingChecks::MAX_PENDING_LOOKUPS { let valid_announcement = get_signed_channel_announcement( - |msg| msg.short_channel_id += 1 + i as u64, node_1_privkey, node_2_privkey, &secp_ctx); - network_graph.update_channel_from_announcement(&valid_announcement, &Some(&chain_source)).unwrap_err(); + |msg| msg.short_channel_id += 1 + i as u64, + node_1_privkey, + node_2_privkey, + &secp_ctx, + ); + network_graph + .update_channel_from_announcement(&valid_announcement, &Some(&chain_source)) + .unwrap_err(); assert!(!network_graph.pending_checks.too_many_checks_pending()); } - let valid_announcement = get_signed_channel_announcement( - |_| {}, node_1_privkey, node_2_privkey, &secp_ctx); - network_graph.update_channel_from_announcement(&valid_announcement, &Some(&chain_source)).unwrap_err(); + let valid_announcement = + get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx); + network_graph + .update_channel_from_announcement(&valid_announcement, &Some(&chain_source)) + .unwrap_err(); assert!(network_graph.pending_checks.too_many_checks_pending()); // Once the future is drop'd (by resetting the `utxo_ret` value) the "too many checks" flag - // should reset to false. + // should not yet reset to false. *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Sync(Err(UtxoLookupError::UnknownTx)); + assert!(network_graph.pending_checks.too_many_checks_pending()); + + // .. but it should once we called check_resolved_futures clearing the `pending_states`. + network_graph.pending_checks.check_resolved_futures(&network_graph); assert!(!network_graph.pending_checks.too_many_checks_pending()); } } diff --git a/lightning/src/sign/mod.rs b/lightning/src/sign/mod.rs index 77076a408e4..84bfbb902ea 100644 --- a/lightning/src/sign/mod.rs +++ b/lightning/src/sign/mod.rs @@ -58,7 +58,7 @@ use crate::ln::script::ShutdownScript; use crate::offers::invoice::UnsignedBolt12Invoice; use crate::types::features::ChannelTypeFeatures; use crate::types::payment::PaymentPreimage; -use crate::util::async_poll::AsyncResult; +use crate::util::async_poll::MaybeSend; use crate::util::ser::{ReadableArgs, Writeable}; use crate::util::transaction_utils; @@ -68,7 +68,9 @@ use crate::sign::ecdsa::EcdsaChannelSigner; #[cfg(taproot)] use crate::sign::taproot::TaprootChannelSigner; use crate::util::atomic_counter::AtomicCounter; + use core::convert::TryInto; +use core::future::Future; use core::ops::Deref; use core::sync::atomic::{AtomicUsize, Ordering}; #[cfg(taproot)] @@ -876,7 +878,20 @@ pub trait EntropySource { fn get_secure_random_bytes(&self) -> [u8; 32]; } +impl> EntropySource for E { + fn get_secure_random_bytes(&self) -> [u8; 32] { + self.deref().get_secure_random_bytes() + } +} + /// A trait that can handle cryptographic operations at the scope level of a node. +/// +/// Instantiations of this trait should generally be shared by reference across the lightning +/// node's components, e.g. the [`NodeSigner`]s provided to [`PeerManager`] and [`ChannelManager`], +/// etc. MUST all return the same value for a given input. +/// +/// [`PeerManager`]: crate::ln::peer_handler::PeerManager +/// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager pub trait NodeSigner { /// Get the [`ExpandedKey`] which provides cryptographic material for various Lightning Network operations. /// @@ -990,6 +1005,42 @@ pub trait NodeSigner { fn sign_message(&self, msg: &[u8]) -> Result; } +impl> NodeSigner for N { + fn get_expanded_key(&self) -> ExpandedKey { + self.deref().get_expanded_key() + } + fn get_peer_storage_key(&self) -> PeerStorageKey { + self.deref().get_peer_storage_key() + } + fn get_receive_auth_key(&self) -> ReceiveAuthKey { + self.deref().get_receive_auth_key() + } + fn get_node_id(&self, recipient: Recipient) -> Result { + self.deref().get_node_id(recipient) + } + fn ecdh( + &self, recipient: Recipient, other_key: &PublicKey, tweak: Option<&Scalar>, + ) -> Result { + self.deref().ecdh(recipient, other_key, tweak) + } + fn sign_invoice( + &self, invoice: &RawBolt11Invoice, recipient: Recipient, + ) -> Result { + self.deref().sign_invoice(invoice, recipient) + } + fn sign_bolt12_invoice( + &self, invoice: &UnsignedBolt12Invoice, + ) -> Result { + self.deref().sign_bolt12_invoice(invoice) + } + fn sign_gossip_message(&self, msg: UnsignedGossipMessage) -> Result { + self.deref().sign_gossip_message(msg) + } + fn sign_message(&self, msg: &[u8]) -> Result { + self.deref().sign_message(msg) + } +} + /// A trait that describes a wallet capable of creating a spending [`Transaction`] from a set of /// [`SpendableOutputDescriptor`]s. pub trait OutputSpender { @@ -1012,6 +1063,23 @@ pub trait OutputSpender { ) -> Result; } +impl> OutputSpender for O { + fn spend_spendable_outputs( + &self, descriptors: &[&SpendableOutputDescriptor], outputs: Vec, + change_destination_script: ScriptBuf, feerate_sat_per_1000_weight: u32, + locktime: Option, secp_ctx: &Secp256k1, + ) -> Result { + self.deref().spend_spendable_outputs( + descriptors, + outputs, + change_destination_script, + feerate_sat_per_1000_weight, + locktime, + secp_ctx, + ) + } +} + // Primarily needed in doctests because of https://github.com/rust-lang/rust/issues/67295 /// A dynamic [`SignerProvider`] temporarily needed for doc tests. /// @@ -1031,6 +1099,13 @@ pub type DynSignerProvider = pub type DynSignerProvider = dyn SignerProvider; /// A trait that can return signer instances for individual channels. +/// +/// Instantiations of this trait should generally be shared by reference across the lightning +/// node's components. E.g., it would be unsafe to provide a different [`SignerProvider`] to +/// [`ChannelManager`] vs [`MonitorUpdatingPersister`]. +/// +/// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager +/// [`MonitorUpdatingPersister`]: crate::util::persist::MonitorUpdatingPersister pub trait SignerProvider { /// A type which implements [`EcdsaChannelSigner`] which will be returned by [`Self::derive_channel_signer`]. type EcdsaSigner: EcdsaChannelSigner; @@ -1074,6 +1149,28 @@ pub trait SignerProvider { fn get_shutdown_scriptpubkey(&self) -> Result; } +impl> SignerProvider for SP { + type EcdsaSigner = T::EcdsaSigner; + #[cfg(taproot)] + type TaprootSigner = T::TaprootSigner; + + fn generate_channel_keys_id(&self, inbound: bool, user_channel_id: u128) -> [u8; 32] { + self.deref().generate_channel_keys_id(inbound, user_channel_id) + } + + fn derive_channel_signer(&self, channel_keys_id: [u8; 32]) -> Self::EcdsaSigner { + self.deref().derive_channel_signer(channel_keys_id) + } + + fn get_destination_script(&self, channel_keys_id: [u8; 32]) -> Result { + self.deref().get_destination_script(channel_keys_id) + } + + fn get_shutdown_scriptpubkey(&self) -> Result { + self.deref().get_shutdown_scriptpubkey() + } +} + /// A helper trait that describes an on-chain wallet capable of returning a (change) destination /// script. /// @@ -1084,7 +1181,9 @@ pub trait ChangeDestinationSource { /// /// This method should return a different value each time it is called, to avoid linking /// on-chain funds controlled to the same user. - fn get_change_destination_script<'a>(&'a self) -> AsyncResult<'a, ScriptBuf, ()>; + fn get_change_destination_script<'a>( + &'a self, + ) -> impl Future> + MaybeSend + 'a; } /// A synchronous helper trait that describes an on-chain wallet capable of returning a (change) destination script. @@ -1119,9 +1218,11 @@ impl ChangeDestinationSource for ChangeDestinationSourceSyncWrapper where T::Target: ChangeDestinationSourceSync, { - fn get_change_destination_script<'a>(&'a self) -> AsyncResult<'a, ScriptBuf, ()> { + fn get_change_destination_script<'a>( + &'a self, + ) -> impl Future> + MaybeSend + 'a { let script = self.0.get_change_destination_script(); - Box::pin(async move { script }) + async move { script } } } diff --git a/lightning/src/sign/tx_builder.rs b/lightning/src/sign/tx_builder.rs index 74941ec8a87..27b8b1a9a2b 100644 --- a/lightning/src/sign/tx_builder.rs +++ b/lightning/src/sign/tx_builder.rs @@ -2,7 +2,6 @@ #![allow(dead_code)] use core::cmp; -use core::ops::Deref; use bitcoin::secp256k1::{self, PublicKey, Secp256k1}; @@ -169,14 +168,12 @@ pub(crate) trait TxBuilder { &self, is_outbound_from_holder: bool, value_to_self_after_htlcs: u64, value_to_remote_after_htlcs: u64, channel_type: &ChannelTypeFeatures, ) -> (u64, u64); - fn build_commitment_transaction( + fn build_commitment_transaction( &self, local: bool, commitment_number: u64, per_commitment_point: &PublicKey, channel_parameters: &ChannelTransactionParameters, secp_ctx: &Secp256k1, value_to_self_msat: u64, htlcs_in_tx: Vec, feerate_per_kw: u32, broadcaster_dust_limit_satoshis: u64, logger: &L, - ) -> (CommitmentTransaction, CommitmentStats) - where - L::Target: Logger; + ) -> (CommitmentTransaction, CommitmentStats); } pub(crate) struct SpecTxBuilder {} @@ -322,15 +319,12 @@ impl TxBuilder for SpecTxBuilder { (local_balance_before_fee_msat, remote_balance_before_fee_msat) } - fn build_commitment_transaction( + fn build_commitment_transaction( &self, local: bool, commitment_number: u64, per_commitment_point: &PublicKey, channel_parameters: &ChannelTransactionParameters, secp_ctx: &Secp256k1, value_to_self_msat: u64, mut htlcs_in_tx: Vec, feerate_per_kw: u32, broadcaster_dust_limit_satoshis: u64, logger: &L, - ) -> (CommitmentTransaction, CommitmentStats) - where - L::Target: Logger, - { + ) -> (CommitmentTransaction, CommitmentStats) { let mut local_htlc_total_msat = 0; let mut remote_htlc_total_msat = 0; let channel_type = &channel_parameters.channel_type_features; diff --git a/lightning/src/sign/type_resolver.rs b/lightning/src/sign/type_resolver.rs index a84886cdee0..405e346dda6 100644 --- a/lightning/src/sign/type_resolver.rs +++ b/lightning/src/sign/type_resolver.rs @@ -1,32 +1,21 @@ use crate::sign::{ChannelSigner, SignerProvider}; -use core::ops::Deref; -pub(crate) enum ChannelSignerType -where - SP::Target: SignerProvider, -{ +pub(crate) enum ChannelSignerType { // in practice, this will only ever be an EcdsaChannelSigner (specifically, Writeable) - Ecdsa(::EcdsaSigner), + Ecdsa(SP::EcdsaSigner), #[cfg(taproot)] #[allow(unused)] - Taproot(::TaprootSigner), + Taproot(SP::TaprootSigner), } #[cfg(test)] -impl std::fmt::Debug for ChannelSignerType -where - SP: Deref, - SP::Target: SignerProvider, -{ +impl std::fmt::Debug for ChannelSignerType { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("ChannelSignerType").finish() } } -impl ChannelSignerType -where - SP::Target: SignerProvider, -{ +impl ChannelSignerType { pub(crate) fn as_ref(&self) -> &dyn ChannelSigner { match self { ChannelSignerType::Ecdsa(ecs) => ecs, @@ -37,7 +26,7 @@ where } #[allow(unused)] - pub(crate) fn as_ecdsa(&self) -> Option<&::EcdsaSigner> { + pub(crate) fn as_ecdsa(&self) -> Option<&SP::EcdsaSigner> { match self { ChannelSignerType::Ecdsa(ecs) => Some(ecs), _ => None, @@ -45,9 +34,7 @@ where } #[allow(unused)] - pub(crate) fn as_mut_ecdsa( - &mut self, - ) -> Option<&mut ::EcdsaSigner> { + pub(crate) fn as_mut_ecdsa(&mut self) -> Option<&mut SP::EcdsaSigner> { match self { ChannelSignerType::Ecdsa(ecs) => Some(ecs), _ => None, diff --git a/lightning/src/util/anchor_channel_reserves.rs b/lightning/src/util/anchor_channel_reserves.rs index e50e103211f..8026af03d58 100644 --- a/lightning/src/util/anchor_channel_reserves.rs +++ b/lightning/src/util/anchor_channel_reserves.rs @@ -272,35 +272,20 @@ pub fn get_supportable_anchor_channels( pub fn can_support_additional_anchor_channel< AChannelManagerRef: Deref, ChannelSigner: EcdsaChannelSigner, - FilterRef: Deref, - BroadcasterRef: Deref, - EstimatorRef: Deref, - LoggerRef: Deref, + FI: Filter, + B: BroadcasterInterface, + FE: FeeEstimator, + L: Logger, PersistRef: Deref, - EntropySourceRef: Deref, - ChainMonitorRef: Deref< - Target = ChainMonitor< - ChannelSigner, - FilterRef, - BroadcasterRef, - EstimatorRef, - LoggerRef, - PersistRef, - EntropySourceRef, - >, - >, + ES: EntropySource, + ChainMonitorRef: Deref>, >( context: &AnchorChannelReserveContext, utxos: &[Utxo], a_channel_manager: AChannelManagerRef, chain_monitor: ChainMonitorRef, ) -> bool where AChannelManagerRef::Target: AChannelManager, - FilterRef::Target: Filter, - BroadcasterRef::Target: BroadcasterInterface, - EstimatorRef::Target: FeeEstimator, - LoggerRef::Target: Logger, PersistRef::Target: Persist, - EntropySourceRef::Target: EntropySource, { let mut anchor_channels = new_hash_set(); // Calculate the number of in-progress anchor channels by inspecting ChannelMonitors with balance. diff --git a/lightning/src/util/async_poll.rs b/lightning/src/util/async_poll.rs index eefa40d1055..57df5b26cb0 100644 --- a/lightning/src/util/async_poll.rs +++ b/lightning/src/util/async_poll.rs @@ -9,33 +9,106 @@ //! Some utilities to make working with the standard library's [`Future`]s easier -use alloc::boxed::Box; use alloc::vec::Vec; use core::future::Future; use core::marker::Unpin; use core::pin::Pin; use core::task::{Context, Poll, RawWaker, RawWakerVTable, Waker}; -pub(crate) enum ResultFuture>, E: Unpin> { +pub(crate) enum ResultFuture + Unpin, O> { Pending(F), - Ready(Result<(), E>), + Ready(O), } -pub(crate) struct MultiResultFuturePoller> + Unpin, E: Unpin> { - futures_state: Vec>, +pub(crate) struct TwoFutureJoiner< + AO, + BO, + AF: Future + Unpin, + BF: Future + Unpin, +> { + a: Option>, + b: Option>, } -impl> + Unpin, E: Unpin> MultiResultFuturePoller { - pub fn new(futures_state: Vec>) -> Self { +impl + Unpin, BF: Future + Unpin> + TwoFutureJoiner +{ + pub fn new(future_a: AF, future_b: BF) -> Self { + Self { a: Some(ResultFuture::Pending(future_a)), b: Some(ResultFuture::Pending(future_b)) } + } +} + +impl + Unpin, BF: Future + Unpin> Future + for TwoFutureJoiner +{ + type Output = (AO, BO); + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<(AO, BO)> { + let mut have_pending_futures = false; + // SAFETY: While we are pinned, we can't get direct access to our internal state because we + // aren't `Unpin`. However, we don't actually need the `Pin` - we only use it below on the + // `Future` in the `ResultFuture::Pending` case, and the `Future` is bound by `Unpin`. + // Thus, the `Pin` is not actually used, and its safe to bypass it and access the inner + // reference directly. + let state = unsafe { &mut self.get_unchecked_mut() }; + macro_rules! poll_future { + ($future: ident) => { + match state.$future { + Some(ResultFuture::Pending(ref mut fut)) => match Pin::new(fut).poll(cx) { + Poll::Ready(res) => { + state.$future = Some(ResultFuture::Ready(res)); + }, + Poll::Pending => { + have_pending_futures = true; + }, + }, + Some(ResultFuture::Ready(_)) => {}, + None => { + debug_assert!(false, "Future polled after Ready"); + return Poll::Pending; + }, + } + }; + } + poll_future!(a); + poll_future!(b); + + if have_pending_futures { + Poll::Pending + } else { + Poll::Ready(( + match state.a.take() { + Some(ResultFuture::Ready(a)) => a, + _ => unreachable!(), + }, + match state.b.take() { + Some(ResultFuture::Ready(b)) => b, + _ => unreachable!(), + }, + )) + } + } +} + +pub(crate) struct MultiResultFuturePoller + Unpin, O> { + futures_state: Vec>, +} + +impl + Unpin, O> MultiResultFuturePoller { + pub fn new(futures_state: Vec>) -> Self { Self { futures_state } } } -impl> + Unpin, E: Unpin> Future for MultiResultFuturePoller { - type Output = Vec>; - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll>> { +impl + Unpin, O> Future for MultiResultFuturePoller { + type Output = Vec; + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let mut have_pending_futures = false; - let futures_state = &mut self.get_mut().futures_state; + // SAFETY: While we are pinned, we can't get direct access to `futures_state` because we + // aren't `Unpin`. However, we don't actually need the `Pin` - we only use it below on the + // `Future` in the `ResultFuture::Pending` case, and the `Future` is bound by `Unpin`. + // Thus, the `Pin` is not actually used, and its safe to bypass it and access the inner + // reference directly. + let futures_state = unsafe { &mut self.get_unchecked_mut().futures_state }; for state in futures_state.iter_mut() { match state { ResultFuture::Pending(ref mut fut) => match Pin::new(fut).poll(cx) { @@ -92,17 +165,6 @@ pub(crate) fn dummy_waker() -> Waker { unsafe { Waker::from_raw(RawWaker::new(core::ptr::null(), &DUMMY_WAKER_VTABLE)) } } -#[cfg(feature = "std")] -/// A type alias for a future that returns a result of type `T` or error `E`. -/// -/// This is not exported to bindings users as async is only supported in Rust. -pub type AsyncResult<'a, T, E> = Pin> + 'a + Send>>; -#[cfg(not(feature = "std"))] -/// A type alias for a future that returns a result of type `T` or error `E`. -/// -/// This is not exported to bindings users as async is only supported in Rust. -pub type AsyncResult<'a, T, E> = Pin> + 'a>>; - /// Marker trait to optionally implement `Sync` under std. /// /// This is not exported to bindings users as async is only supported in Rust. diff --git a/lightning/src/util/config.rs b/lightning/src/util/config.rs index dd1aaa40424..dd55d5c2130 100644 --- a/lightning/src/util/config.rs +++ b/lightning/src/util/config.rs @@ -10,7 +10,6 @@ //! Various user-configurable channel limits and settings which ChannelManager //! applies for you. -use crate::ln::channel::MAX_FUNDING_SATOSHIS_NO_WUMBO; use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, MAX_LOCAL_BREAKDOWN_TIMEOUT}; #[cfg(fuzzing)] @@ -30,14 +29,14 @@ pub struct ChannelHandshakeConfig { /// both parties have exchanged `splice_locked`. /// /// A lower-bound of `1` is applied, requiring all channels to have a confirmed commitment - /// transaction before operation. If you wish to accept channels with zero confirmations, see - /// [`UserConfig::manually_accept_inbound_channels`] and + /// transaction before operation. If you wish to accept channels with zero confirmations, + /// manually accept them via [`Event::OpenChannelRequest`] using /// [`ChannelManager::accept_inbound_channel_from_trusted_peer_0conf`]. /// /// Default value: `6` /// - /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel /// [`ChannelManager::accept_inbound_channel_from_trusted_peer_0conf`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel_from_trusted_peer_0conf + /// [`Event::OpenChannelRequest`]: crate::events::Event::OpenChannelRequest pub minimum_depth: u32, /// Set to the number of blocks we require our counterparty to wait to claim their money (ie /// the number of blocks we have to punish our counterparty if they broadcast a revoked @@ -162,15 +161,13 @@ pub struct ChannelHandshakeConfig { /// will be treated as one million instead, although channel negotiations will /// fail in that case.) pub their_channel_reserve_proportional_millionths: u32, - /// If set, we attempt to negotiate the `anchors_zero_fee_htlc_tx`option for all future + /// If set, we attempt to negotiate the `anchors_zero_fee_htlc_tx` option for all future /// channels. This feature requires having a reserve of onchain funds readily available to bump /// transactions in the event of a channel force close to avoid the possibility of losing funds. /// - /// Note that if you wish accept inbound channels with anchor outputs, you must enable - /// [`UserConfig::manually_accept_inbound_channels`] and manually accept them with - /// [`ChannelManager::accept_inbound_channel`]. This is done to give you the chance to check - /// whether your reserve of onchain funds is enough to cover the fees for all existing and new - /// channels featuring anchor outputs in the event of a force close. + /// Upon receiving an [`Event::OpenChannelRequest`] for a channel of this type, you must + /// check whether your reserve of onchain funds is enough to cover the fees for all existing + /// and new channels featuring anchor outputs in the event of a force close. /// /// If this option is set, channels may be created that will not be readable by LDK versions /// prior to 0.0.116, causing [`ChannelManager`]'s read method to return a @@ -180,11 +177,12 @@ pub struct ChannelHandshakeConfig { /// counterparties that do not support the `anchors_zero_fee_htlc_tx` option; we will simply /// fall back to a `static_remote_key` channel. /// - /// Default value: `false` (This value is likely to change to `true` in the future.) + /// Default value: `true` /// /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel /// [`DecodeError::InvalidValue`]: crate::ln::msgs::DecodeError::InvalidValue + /// [`Event::OpenChannelRequest`]: crate::events::Event::OpenChannelRequest pub negotiate_anchors_zero_fee_htlc_tx: bool, /// If set, we attempt to negotiate the `zero_fee_commitments` option for all future channels. @@ -198,11 +196,9 @@ pub struct ChannelHandshakeConfig { /// funds readily available to bump transactions in the event of a channel force close to avoid /// the possibility of losing funds. /// - /// Note that if you wish accept inbound channels with anchor outputs, you must enable - /// [`UserConfig::manually_accept_inbound_channels`] and manually accept them with - /// [`ChannelManager::accept_inbound_channel`]. This is done to give you the chance to check - /// whether your reserve of onchain funds is enough to cover the fees for all existing and new - /// channels featuring anchor outputs in the event of a force close. + /// Upon receiving an [`Event::OpenChannelRequest`] for a channel of this type, you must + /// check whether your reserve of onchain funds is enough to cover the fees for all existing + /// and new channels featuring anchor outputs in the event of a force close. /// /// If this option is set, channels may be created that will not be readable by LDK versions /// prior to 0.2, causing [`ChannelManager`]'s read method to return a @@ -224,6 +220,7 @@ pub struct ChannelHandshakeConfig { /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel /// [`DecodeError::InvalidValue`]: crate::ln::msgs::DecodeError::InvalidValue + /// [`Event::OpenChannelRequest`]: crate::events::Event::OpenChannelRequest pub negotiate_anchor_zero_fee_commitments: bool, /// The maximum number of HTLCs in-flight from our counterparty towards us at the same time. @@ -254,7 +251,7 @@ impl Default for ChannelHandshakeConfig { announce_for_forwarding: false, commit_upfront_shutdown_pubkey: true, their_channel_reserve_proportional_millionths: 10_000, - negotiate_anchors_zero_fee_htlc_tx: false, + negotiate_anchors_zero_fee_htlc_tx: true, negotiate_anchor_zero_fee_commitments: false, our_max_accepted_htlcs: 50, } @@ -302,11 +299,6 @@ pub struct ChannelHandshakeLimits { /// Default value: `1000` /// (Minimum of [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]) pub min_funding_satoshis: u64, - /// Maximum allowed satoshis when a channel is funded. This is supplied by the sender and so - /// only applies to inbound channels. - /// - /// Default value: `2^24 - 1` - pub max_funding_satoshis: u64, /// The remote node sets a limit on the minimum size of HTLCs we can send to them. This allows /// you to limit the maximum minimum-size they can require. /// @@ -376,7 +368,6 @@ impl Default for ChannelHandshakeLimits { fn default() -> Self { ChannelHandshakeLimits { min_funding_satoshis: 1000, - max_funding_satoshis: MAX_FUNDING_SATOSHIS_NO_WUMBO, max_htlc_minimum_msat: u64::MAX, min_max_htlc_value_in_flight_msat: 0, max_channel_reserve_satoshis: u64::MAX, @@ -397,7 +388,6 @@ impl Readable for ChannelHandshakeLimits { fn read(reader: &mut R) -> Result { Ok(Self { min_funding_satoshis: Readable::read(reader)?, - max_funding_satoshis: Readable::read(reader)?, max_htlc_minimum_msat: Readable::read(reader)?, min_max_htlc_value_in_flight_msat: Readable::read(reader)?, max_channel_reserve_satoshis: Readable::read(reader)?, @@ -855,6 +845,111 @@ impl crate::util::ser::Readable for LegacyChannelConfig { } } +/// Flags which can be set on [`UserConfig::htlc_interception_flags`]. Each flag selects some set +/// of HTLCs which are forwarded across this node to be intercepted instead, generating an +/// [`Event::HTLCIntercepted`] instead of automatically forwarding the HTLC and allowing it to be +/// forwarded or rejected manually. +/// +/// [`Event::HTLCIntercepted`]: crate::events::Event::HTLCIntercepted +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum HTLCInterceptionFlags { + /// If this flag is set, LDK will intercept HTLCs that are attempting to be forwarded over fake + /// short channel ids generated via [`ChannelManager::get_intercept_scid`]. This allows you to + /// only intercept HTLCs which are specifically marked for interception by the invoice being + /// paid. + /// + /// Note that because LDK is not aware of which channel the HTLC will be forwarded over at the + /// time of interception, only basic checks to ensure the fee the HTLC intends to pay is not + /// negative and a minimum CLTV delta between the incoming and outgoing HTLC edge are performed + /// before the [`Event::HTLCIntercepted`] is generated. You must validate the fee and CLTV + /// delta meets your requirements before forwarding the HTLC. + /// + /// [`ChannelManager::get_intercept_scid`]: crate::ln::channelmanager::ChannelManager::get_intercept_scid + /// [`Event::HTLCIntercepted`]: crate::events::Event::HTLCIntercepted + ToInterceptSCIDs = 1 << 0, + /// If this flag is set, any attempts to forward a payment to a private channel while the + /// channel counterparty is offline will instead generate an [`Event::HTLCIntercepted`] which + /// must be handled the same as any other intercepted HTLC. + /// + /// This is useful for LSPs that may need to wake the recipient node (e.g. via a mobile push + /// notification). Note that in this case you must ensure that you set a quick timeout to fail + /// the HTLC if the recipient node fails to come online (e.g. within 10 seconds). + /// + /// Before interception, the HTLC is validated against the forwarding config of the outbound + /// channel to ensure it pays sufficient fee and meets the + /// [`ChannelConfig::cltv_expiry_delta`]. + /// + /// [`Event::HTLCIntercepted`]: crate::events::Event::HTLCIntercepted + ToOfflinePrivateChannels = 1 << 1, + /// If this flag is set, any attempts to forward a payment to a private channel while the + /// channel counterparty is online will instead generate an [`Event::HTLCIntercepted`] which + /// must be handled the same as any other intercepted HTLC. + /// + /// This is the complement to [`Self::ToOfflinePrivateChannels`] and, together, they allow + /// intercepting all HTLCs destined for private channels. This may be useful for LSPs that wish + /// to take an additional fee paid by the recipient on all forwards to clients. + /// + /// Before interception, the HTLC is validated against the forwarding config of the outbound + /// channel to ensure it pays sufficient fee and meets the + /// [`ChannelConfig::cltv_expiry_delta`]. + /// + /// [`Event::HTLCIntercepted`]: crate::events::Event::HTLCIntercepted + ToOnlinePrivateChannels = 1 << 2, + /// If this flag is set, any attempts to forward a payment to a publicly announced channel will + /// instead generate an [`Event::HTLCIntercepted`] which must be handled the same as any other + /// intercepted HTLC. + /// + /// Before interception, the HTLC is validated against the forwarding config of the outbound + /// channel to ensure it pays sufficient fee and meets the + /// [`ChannelConfig::cltv_expiry_delta`]. + /// + /// [`Event::HTLCIntercepted`]: crate::events::Event::HTLCIntercepted + ToPublicChannels = 1 << 3, + /// If these flags are set, any attempts to forward a payment to a channel of ours or a fake + /// short channel id generated via [`ChannelManager::get_intercept_scid`] will instead generate + /// an [`Event::HTLCIntercepted`] which must be handled the same as any other intercepted HTLC. + /// + /// In the case of intercept SCIDs, only basic checks to ensure the fee the HTLC intends to pay + /// is not negative and a minimum CLTV delta between the incoming and outgoing HTLC edge are + /// performed before the [`Event::HTLCIntercepted`] is generated. You must validate the fee and + /// CLTV delta meets your requirements before forwarding the HTLC. + /// + /// [`ChannelManager::get_intercept_scid`]: crate::ln::channelmanager::ChannelManager::get_intercept_scid + /// [`Event::HTLCIntercepted`]: crate::events::Event::HTLCIntercepted + ToAllKnownSCIDs = Self::ToInterceptSCIDs as isize + | Self::ToOfflinePrivateChannels as isize + | Self::ToOnlinePrivateChannels as isize + | Self::ToPublicChannels as isize, + /// If this flag is set, any attempts to forward a payment to an unknown short channel id will + /// instead generate an [`Event::HTLCIntercepted`] which must be handled the same as any other + /// intercepted HTLC. + /// + /// Note that because LDK is not aware of which channel the HTLC will be forwarded over at the + /// time of interception, only basic checks to ensure the fee the HTLC intends to pay is not + /// negative and a minimum CLTV delta between the incoming and outgoing HTLC edge are performed + /// before the [`Event::HTLCIntercepted`] is generated. You must validate the fee and CLTV + /// delta meets your requirements before forwarding the HTLC. + /// + /// [`Event::HTLCIntercepted`]: crate::events::Event::HTLCIntercepted + ToUnknownSCIDs = 1 << 4, + /// If these flags are set, all HTLCs being forwarded over this node will instead generate an + /// [`Event::HTLCIntercepted`] which must be handled the same as any other intercepted HTLC. + /// + /// In the case of intercept or unknown SCIDs, only basic checks to ensure the fee the HTLC + /// intends to pay is not negative and a minimum CLTV delta between the incoming and outgoing + /// HTLC edge are performed before the [`Event::HTLCIntercepted`] is generated. You must + /// validate the fee and CLTV delta meets your requirements before forwarding the HTLC. + /// + /// [`Event::HTLCIntercepted`]: crate::events::Event::HTLCIntercepted + AllValidHTLCs = Self::ToAllKnownSCIDs as isize | Self::ToUnknownSCIDs as isize, +} + +impl Into for HTLCInterceptionFlags { + fn into(self) -> u8 { + self as u8 + } +} + /// Top-level config which holds ChannelHandshakeLimits and ChannelConfig. /// /// `Default::default()` provides sane defaults for most configurations @@ -893,31 +988,21 @@ pub struct UserConfig { /// /// Default value: `true` pub accept_inbound_channels: bool, - /// If this is set to `true`, the user needs to manually accept inbound requests to open a new - /// channel. + /// Flags consisting of OR'd values from [`HTLCInterceptionFlags`] which describe HTLCs + /// forwarded over this node to intercept. Any HTLCs which are intercepted will generate an + /// [`Event::HTLCIntercepted`] event which must be handled to forward or fail the HTLC. /// - /// When set to `true`, [`Event::OpenChannelRequest`] will be triggered once a request to open a - /// new inbound channel is received through a [`msgs::OpenChannel`] message. In that case, a - /// [`msgs::AcceptChannel`] message will not be sent back to the counterparty node unless the - /// user explicitly chooses to accept the request. + /// Do NOT hold on to intercepted HTLCs for more than a few seconds, they must always be + /// forwarded or failed nearly immediately to avoid performing accidental denial of service + /// attacks against other lightning nodes and being punished appropriately by other nodes. /// - /// Default value: `false` - /// - /// [`Event::OpenChannelRequest`]: crate::events::Event::OpenChannelRequest - /// [`msgs::OpenChannel`]: crate::ln::msgs::OpenChannel - /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel - pub manually_accept_inbound_channels: bool, - /// If this is set to `true`, LDK will intercept HTLCs that are attempting to be forwarded over - /// fake short channel ids generated via [`ChannelManager::get_intercept_scid`]. Upon HTLC - /// intercept, LDK will generate an [`Event::HTLCIntercepted`] which MUST be handled by the user. + /// To ensure efficiency and reliable HTLC latency you should ensure you only intercept types + /// of HTLCs which you need to manually forward or reject. /// - /// Setting this to `true` may break backwards compatibility with LDK versions < 0.0.113. + /// Default value: `0` (indicating no HTLCs will be intercepted). /// - /// Default value: `false` - /// - /// [`ChannelManager::get_intercept_scid`]: crate::ln::channelmanager::ChannelManager::get_intercept_scid /// [`Event::HTLCIntercepted`]: crate::events::Event::HTLCIntercepted - pub accept_intercept_htlcs: bool, + pub htlc_interception_flags: u8, /// If this is set to `true`, the user needs to manually pay [`Bolt12Invoice`]s when received. /// /// When set to `true`, [`Event::InvoiceReceived`] will be generated for each received @@ -983,8 +1068,7 @@ impl Default for UserConfig { channel_config: ChannelConfig::default(), accept_forwards_to_priv_channels: false, accept_inbound_channels: true, - manually_accept_inbound_channels: false, - accept_intercept_htlcs: false, + htlc_interception_flags: 0, manually_handle_bolt12_invoices: false, enable_dual_funded_channels: false, enable_htlc_hold: false, @@ -1006,8 +1090,7 @@ impl Readable for UserConfig { channel_config: Readable::read(reader)?, accept_forwards_to_priv_channels: Readable::read(reader)?, accept_inbound_channels: Readable::read(reader)?, - manually_accept_inbound_channels: Readable::read(reader)?, - accept_intercept_htlcs: Readable::read(reader)?, + htlc_interception_flags: Readable::read(reader)?, manually_handle_bolt12_invoices: Readable::read(reader)?, enable_dual_funded_channels: Readable::read(reader)?, hold_outbound_htlcs_at_next_hop: Readable::read(reader)?, diff --git a/lightning/src/util/errors.rs b/lightning/src/util/errors.rs index eaaf0130ca2..cd72d60327f 100644 --- a/lightning/src/util/errors.rs +++ b/lightning/src/util/errors.rs @@ -9,7 +9,10 @@ //! Error types live here. +use bitcoin::secp256k1::PublicKey; + use crate::ln::script::ShutdownScript; +use crate::ln::types::ChannelId; #[allow(unused_imports)] use crate::prelude::*; @@ -90,6 +93,28 @@ impl fmt::Debug for APIError { } } +impl APIError { + pub(crate) fn no_such_peer(counterparty_node_id: &PublicKey) -> Self { + Self::ChannelUnavailable { + err: format!( + "No such peer for the passed counterparty_node_id {}", + counterparty_node_id + ), + } + } + + pub(crate) fn no_such_channel_for_peer( + channel_id: &ChannelId, counterparty_node_id: &PublicKey, + ) -> Self { + Self::ChannelUnavailable { + err: format!( + "No such channel_id {} for the passed counterparty_node_id {}", + channel_id, counterparty_node_id + ), + } + } +} + impl_writeable_tlv_based_enum_upgradable!(APIError, (0, APIMisuseError) => { (0, err, required), }, (2, FeeRateTooHigh) => { diff --git a/lightning/src/util/hash_tables.rs b/lightning/src/util/hash_tables.rs index 00341d57b45..b6555975191 100644 --- a/lightning/src/util/hash_tables.rs +++ b/lightning/src/util/hash_tables.rs @@ -6,10 +6,75 @@ pub use hashbrown::hash_map; mod hashbrown_tables { - #[cfg(feature = "std")] + #[cfg(all(feature = "std", not(test)))] mod hasher { pub use std::collections::hash_map::RandomState; } + #[cfg(all(feature = "std", test))] + mod hasher { + #![allow(deprecated)] // hash::SipHasher was deprecated in favor of something only in std. + use core::hash::{BuildHasher, Hasher}; + + /// A [`BuildHasher`] for tests that supports deterministic behavior via environment variable. + /// + /// When `LDK_TEST_DETERMINISTIC_HASHES` is set, uses fixed keys for deterministic iteration. + /// Otherwise, delegates to std's RandomState for random hashing. + #[derive(Clone)] + pub enum RandomState { + Std(std::collections::hash_map::RandomState), + Deterministic, + } + + impl RandomState { + pub fn new() -> RandomState { + if std::env::var("LDK_TEST_DETERMINISTIC_HASHES").map(|v| v == "1").unwrap_or(false) + { + RandomState::Deterministic + } else { + RandomState::Std(std::collections::hash_map::RandomState::new()) + } + } + } + + impl Default for RandomState { + fn default() -> RandomState { + RandomState::new() + } + } + + /// A hasher wrapper that delegates to either std's DefaultHasher or a deterministic SipHasher. + pub enum RandomStateHasher { + Std(std::collections::hash_map::DefaultHasher), + Deterministic(core::hash::SipHasher), + } + + impl Hasher for RandomStateHasher { + fn finish(&self) -> u64 { + match self { + RandomStateHasher::Std(h) => h.finish(), + RandomStateHasher::Deterministic(h) => h.finish(), + } + } + fn write(&mut self, bytes: &[u8]) { + match self { + RandomStateHasher::Std(h) => h.write(bytes), + RandomStateHasher::Deterministic(h) => h.write(bytes), + } + } + } + + impl BuildHasher for RandomState { + type Hasher = RandomStateHasher; + fn build_hasher(&self) -> RandomStateHasher { + match self { + RandomState::Std(s) => RandomStateHasher::Std(s.build_hasher()), + RandomState::Deterministic => { + RandomStateHasher::Deterministic(core::hash::SipHasher::new_with_keys(0, 0)) + }, + } + } + } + } #[cfg(not(feature = "std"))] mod hasher { #![allow(deprecated)] // hash::SipHasher was deprecated in favor of something only in std. diff --git a/lightning/src/util/logger.rs b/lightning/src/util/logger.rs index b49cd32c131..c8b6715ae7c 100644 --- a/lightning/src/util/logger.rs +++ b/lightning/src/util/logger.rs @@ -21,6 +21,7 @@ use core::fmt::Display; use core::fmt::Write; use core::ops::Deref; +use crate::ln::channelmanager::PaymentId; use crate::ln::types::ChannelId; #[cfg(c_bindings)] use crate::prelude::*; // Needed for String @@ -124,12 +125,18 @@ pub struct Record<$($args)?> { pub file: &'static str, /// The line containing the message. pub line: u32, - /// The payment hash. Since payment_hash is not repeated in the message body, include it in the log output so - /// entries remain clear. + /// The payment hash. /// - /// Note that this is only filled in for logs pertaining to a specific payment, and will be - /// `None` for logs which are not directly related to a payment. + /// Since payment_hash is generally not repeated in the message body, you should ensure you log + /// it so that entries remain clear. + /// + /// Note that payments don't always have a [`PaymentHash`] immediately - when paying BOLT 12 + /// offers the [`PaymentHash`] is only selected a ways into the payment process. Thus, when + /// searching your logs for specific payments you should also search for the relevant + /// [`Self::payment_id`]. pub payment_hash: Option, + /// The payment id if the log pertained to a payment with an ID. + pub payment_id: Option, } impl<$($args)?> Record<$($args)?> { @@ -138,14 +145,13 @@ impl<$($args)?> Record<$($args)?> { /// This is not exported to bindings users as fmt can't be used in C #[inline] pub fn new<$($nonstruct_args)?>( - level: Level, peer_id: Option, channel_id: Option, - args: fmt::Arguments<'a>, module_path: &'static str, file: &'static str, line: u32, - payment_hash: Option + level: Level, args: fmt::Arguments<'a>, module_path: &'static str, file: &'static str, + line: u32, ) -> Record<$($args)?> { Record { level, - peer_id, - channel_id, + peer_id: None, + channel_id: None, #[cfg(not(c_bindings))] args, #[cfg(c_bindings)] @@ -153,7 +159,8 @@ impl<$($args)?> Record<$($args)?> { module_path, file, line, - payment_hash, + payment_hash: None, + payment_id: None, } } } @@ -287,52 +294,58 @@ pub trait Logger { fn log(&self, record: Record); } +impl> Logger for L { + fn log(&self, record: Record) { + self.deref().log(record) + } +} + /// Adds relevant context to a [`Record`] before passing it to the wrapped [`Logger`]. /// /// This is not exported to bindings users as lifetimes are problematic and there's little reason /// for this to be used downstream anyway. -pub struct WithContext<'a, L: Deref> -where - L::Target: Logger, -{ - /// The logger to delegate to after adding context to the record. +pub struct WithContext<'a, L: Logger> { logger: &'a L, - /// The node id of the peer pertaining to the logged record. peer_id: Option, - /// The channel id of the channel pertaining to the logged record. channel_id: Option, - /// The payment hash of the payment pertaining to the logged record. payment_hash: Option, + payment_id: Option, } -impl<'a, L: Deref> Logger for WithContext<'a, L> -where - L::Target: Logger, -{ +impl<'a, L: Logger> Logger for WithContext<'a, L> { fn log(&self, mut record: Record) { - if self.peer_id.is_some() { + if self.peer_id.is_some() && record.peer_id.is_none() { record.peer_id = self.peer_id }; - if self.channel_id.is_some() { + if self.channel_id.is_some() && record.channel_id.is_none() { record.channel_id = self.channel_id; } - if self.payment_hash.is_some() { + if self.payment_hash.is_some() && record.payment_hash.is_none() { record.payment_hash = self.payment_hash; } + if self.payment_id.is_some() && record.payment_id.is_none() { + record.payment_id = self.payment_id; + } self.logger.log(record) } } -impl<'a, L: Deref> WithContext<'a, L> -where - L::Target: Logger, -{ +impl<'a, L: Logger> WithContext<'a, L> { /// Wraps the given logger, providing additional context to any logged records. pub fn from( logger: &'a L, peer_id: Option, channel_id: Option, payment_hash: Option, ) -> Self { - WithContext { logger, peer_id, channel_id, payment_hash } + WithContext { logger, peer_id, channel_id, payment_hash, payment_id: None } + } + + /// Wraps the given logger, providing additional context to any logged records. + pub fn for_payment( + logger: &'a L, peer_id: Option, channel_id: Option, + payment_hash: Option, payment_id: PaymentId, + ) -> Self { + let payment_id = Some(payment_id); + WithContext { logger, peer_id, channel_id, payment_hash, payment_id } } } diff --git a/lightning/src/util/macro_logger.rs b/lightning/src/util/macro_logger.rs index ec9eb14ba38..12f4f67962e 100644 --- a/lightning/src/util/macro_logger.rs +++ b/lightning/src/util/macro_logger.rs @@ -175,7 +175,7 @@ macro_rules! log_spendable { #[macro_export] macro_rules! log_given_level { ($logger: expr, $lvl:expr, $($arg:tt)+) => ( - $logger.log($crate::util::logger::Record::new($lvl, None, None, format_args!($($arg)+), module_path!(), file!(), line!(), None)) + $logger.log($crate::util::logger::Record::new($lvl, format_args!($($arg)+), module_path!(), file!(), line!())) ); } diff --git a/lightning/src/util/native_async.rs b/lightning/src/util/native_async.rs index 886146e976d..0c380f2b1d1 100644 --- a/lightning/src/util/native_async.rs +++ b/lightning/src/util/native_async.rs @@ -8,23 +8,44 @@ //! environment. #[cfg(all(test, feature = "std"))] -use crate::sync::Mutex; +use crate::sync::{Arc, Mutex}; use crate::util::async_poll::{MaybeSend, MaybeSync}; +#[cfg(all(test, not(feature = "std")))] +use alloc::rc::Rc; + #[cfg(all(test, not(feature = "std")))] use core::cell::RefCell; +#[cfg(test)] +use core::convert::Infallible; use core::future::Future; #[cfg(test)] use core::pin::Pin; +#[cfg(test)] +use core::task::{Context, Poll}; -/// A generic trait which is able to spawn futures in the background. +/// A generic trait which is able to spawn futures to be polled in the background. +/// +/// When the spawned future completes, the returned [`Self::SpawnedFutureResult`] should resolve +/// with the output of the spawned future. +/// +/// Spawned futures must be polled independently in the background even if the returned +/// [`Self::SpawnedFutureResult`] is dropped without being polled. This matches the semantics of +/// `tokio::spawn`. /// /// This is not exported to bindings users as async is only supported in Rust. pub trait FutureSpawner: MaybeSend + MaybeSync + 'static { + /// The error type of [`Self::SpawnedFutureResult`]. This can be used to indicate that the + /// spawned future was cancelled or panicked. + type E; + /// The result of [`Self::spawn`], a future which completes when the spawned future completes. + type SpawnedFutureResult: Future> + Unpin; /// Spawns the given future as a background task. /// /// This method MUST NOT block on the given future immediately. - fn spawn + MaybeSend + 'static>(&self, future: T); + fn spawn + MaybeSend + 'static>( + &self, future: T, + ) -> Self::SpawnedFutureResult; } #[cfg(test)] @@ -39,6 +60,77 @@ pub(crate) struct FutureQueue(Mutex>>>); #[cfg(all(test, not(feature = "std")))] pub(crate) struct FutureQueue(RefCell>>>); +/// A simple future which can be completed later. Used to implement [`FutureQueue`]. +#[cfg(all(test, feature = "std"))] +pub struct FutureQueueCompletion(Arc>>); +#[cfg(all(test, not(feature = "std")))] +pub struct FutureQueueCompletion(Rc>>); + +#[cfg(all(test, feature = "std"))] +impl FutureQueueCompletion { + fn new() -> Self { + Self(Arc::new(Mutex::new(None))) + } + + fn complete(&self, o: O) { + *self.0.lock().unwrap() = Some(o); + } +} + +#[cfg(all(test, feature = "std"))] +impl Clone for FutureQueueCompletion { + fn clone(&self) -> Self { + #[cfg(all(test, feature = "std"))] + { + Self(Arc::clone(&self.0)) + } + #[cfg(all(test, not(feature = "std")))] + { + Self(Rc::clone(&self.0)) + } + } +} + +#[cfg(all(test, not(feature = "std")))] +impl FutureQueueCompletion { + fn new() -> Self { + Self(Rc::new(RefCell::new(None))) + } + + fn complete(&self, o: O) { + *self.0.borrow_mut() = Some(o); + } +} + +#[cfg(all(test, not(feature = "std")))] +impl Clone for FutureQueueCompletion { + fn clone(&self) -> Self { + Self(self.0.clone()) + } +} + +#[cfg(all(test, feature = "std"))] +impl Future for FutureQueueCompletion { + type Output = Result; + fn poll(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + match Pin::into_inner(self).0.lock().unwrap().take() { + None => Poll::Pending, + Some(o) => Poll::Ready(Ok(o)), + } + } +} + +#[cfg(all(test, not(feature = "std")))] +impl Future for FutureQueueCompletion { + type Output = Result; + fn poll(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + match Pin::into_inner(self).0.borrow_mut().take() { + None => Poll::Pending, + Some(o) => Poll::Ready(Ok(o)), + } + } +} + #[cfg(test)] impl FutureQueue { pub(crate) fn new() -> Self { @@ -74,7 +166,6 @@ impl FutureQueue { futures = self.0.borrow_mut(); } futures.retain_mut(|fut| { - use core::task::{Context, Poll}; let waker = crate::util::async_poll::dummy_waker(); match fut.as_mut().poll(&mut Context::from_waker(&waker)) { Poll::Ready(()) => false, @@ -86,7 +177,16 @@ impl FutureQueue { #[cfg(test)] impl FutureSpawner for FutureQueue { - fn spawn + MaybeSend + 'static>(&self, future: T) { + type E = Infallible; + type SpawnedFutureResult = FutureQueueCompletion; + fn spawn + MaybeSend + 'static>( + &self, f: F, + ) -> FutureQueueCompletion { + let completion = FutureQueueCompletion::new(); + let compl_ref = completion.clone(); + let future = async move { + compl_ref.complete(f.await); + }; #[cfg(feature = "std")] { self.0.lock().unwrap().push(Box::pin(future)); @@ -95,6 +195,7 @@ impl FutureSpawner for FutureQueue { { self.0.borrow_mut().push(Box::pin(future)); } + completion } } @@ -102,7 +203,16 @@ impl FutureSpawner for FutureQueue { impl + MaybeSend + MaybeSync + 'static> FutureSpawner for D { - fn spawn + MaybeSend + 'static>(&self, future: T) { + type E = Infallible; + type SpawnedFutureResult = FutureQueueCompletion; + fn spawn + MaybeSend + 'static>( + &self, f: F, + ) -> FutureQueueCompletion { + let completion = FutureQueueCompletion::new(); + let compl_ref = completion.clone(); + let future = async move { + compl_ref.complete(f.await); + }; #[cfg(feature = "std")] { self.0.lock().unwrap().push(Box::pin(future)); @@ -111,5 +221,6 @@ impl + MaybeSend + MaybeSync + 'static { self.0.borrow_mut().push(Box::pin(future)); } + completion } } diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index bcee29bcf2b..cb4bdeb6a51 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -16,10 +16,11 @@ use alloc::sync::Arc; use bitcoin::hashes::hex::FromHex; use bitcoin::{BlockHash, Txid}; +use core::convert::Infallible; use core::future::Future; use core::mem; use core::ops::Deref; -use core::pin::Pin; +use core::pin::{pin, Pin}; use core::str::FromStr; use core::task; @@ -34,7 +35,9 @@ use crate::chain::transaction::OutPoint; use crate::ln::types::ChannelId; use crate::sign::{ecdsa::EcdsaChannelSigner, EntropySource, SignerProvider}; use crate::sync::Mutex; -use crate::util::async_poll::{dummy_waker, AsyncResult, MaybeSend, MaybeSync}; +use crate::util::async_poll::{ + dummy_waker, MaybeSend, MaybeSync, MultiResultFuturePoller, ResultFuture, TwoFutureJoiner, +}; use crate::util::logger::Logger; use crate::util::native_async::FutureSpawner; use crate::util::ser::{Readable, ReadableArgs, Writeable}; @@ -199,16 +202,6 @@ pub struct KVStoreSyncWrapper(pub K) where K::Target: KVStoreSync; -impl Deref for KVStoreSyncWrapper -where - K::Target: KVStoreSync, -{ - type Target = Self; - fn deref(&self) -> &Self::Target { - self - } -} - /// This is not exported to bindings users as async is only supported in Rust. impl KVStore for KVStoreSyncWrapper where @@ -216,34 +209,34 @@ where { fn read( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, - ) -> AsyncResult<'static, Vec, io::Error> { + ) -> impl Future, io::Error>> + 'static + MaybeSend { let res = self.0.read(primary_namespace, secondary_namespace, key); - Box::pin(async move { res }) + async move { res } } fn write( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, - ) -> AsyncResult<'static, (), io::Error> { + ) -> impl Future> + 'static + MaybeSend { let res = self.0.write(primary_namespace, secondary_namespace, key, buf); - Box::pin(async move { res }) + async move { res } } fn remove( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, - ) -> AsyncResult<'static, (), io::Error> { + ) -> impl Future> + 'static + MaybeSend { let res = self.0.remove(primary_namespace, secondary_namespace, key, lazy); - Box::pin(async move { res }) + async move { res } } fn list( &self, primary_namespace: &str, secondary_namespace: &str, - ) -> AsyncResult<'static, Vec, io::Error> { + ) -> impl Future, io::Error>> + 'static + MaybeSend { let res = self.0.list(primary_namespace, secondary_namespace); - Box::pin(async move { res }) + async move { res } } } @@ -283,16 +276,18 @@ pub trait KVStore { /// [`ErrorKind::NotFound`]: io::ErrorKind::NotFound fn read( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, - ) -> AsyncResult<'static, Vec, io::Error>; + ) -> impl Future, io::Error>> + 'static + MaybeSend; /// Persists the given data under the given `key`. /// - /// The order of multiple writes to the same key needs to be retained while persisting - /// asynchronously. In other words, if two writes to the same key occur, the state (as seen by - /// [`Self::read`]) must either see the first write then the second, or only ever the second, - /// no matter when the futures complete (and must always contain the second write once the - /// second future completes). The state should never contain the first write after the second - /// write's future completes, nor should it contain the second write, then contain the first - /// write at any point thereafter (even if the second write's future hasn't yet completed). + /// Note that this is *not* an `async fn`. Rather, the order of multiple writes to the same key + /// (as defined by the order of the synchronous function calls) needs to be retained while + /// persisting asynchronously. In other words, if two writes to the same key occur, the state + /// (as seen by [`Self::read`]) must either see the first write then the second, or only ever + /// the second, no matter when the futures complete (and must always contain the second write + /// once the second future completes). The state should never contain the first write after the + /// second write's future completes, nor should it contain the second write, then contain the + /// first write at any point thereafter (even if the second write's future hasn't yet + /// completed). /// /// One way to ensure this requirement is met is by assigning a version number to each write /// before returning the future, and then during asynchronous execution, ensuring that the @@ -303,7 +298,7 @@ pub trait KVStore { /// Will create the given `primary_namespace` and `secondary_namespace` if not already present in the store. fn write( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, - ) -> AsyncResult<'static, (), io::Error>; + ) -> impl Future> + 'static + MaybeSend; /// Removes any data that had previously been persisted under the given `key`. /// /// If the `lazy` flag is set to `true`, the backend implementation might choose to lazily @@ -311,6 +306,10 @@ pub trait KVStore { /// eventual batch deletion of multiple keys. As a consequence, subsequent calls to /// [`KVStoreSync::list`] might include the removed key until the changes are actually persisted. /// + /// Note that similar to [`Self::write`] this is *not* an `async fn`, but rather a sync fn + /// which defines the order of writes to a given key, but which may complete its operation + /// asynchronously. + /// /// Note that while setting the `lazy` flag reduces the I/O burden of multiple subsequent /// `remove` calls, it also influences the atomicity guarantees as lazy `remove`s could /// potentially get lost on crash after the method returns. Therefore, this flag should only be @@ -321,12 +320,13 @@ pub trait KVStore { /// to the same key which occur before a removal completes must cancel/overwrite the pending /// removal. /// + /// /// Returns successfully if no data will be stored for the given `primary_namespace`, /// `secondary_namespace`, and `key`, independently of whether it was present before its /// invokation or not. fn remove( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, - ) -> AsyncResult<'static, (), io::Error>; + ) -> impl Future> + 'static + MaybeSend; /// Returns a list of keys that are stored under the given `secondary_namespace` in /// `primary_namespace`. /// @@ -334,7 +334,37 @@ pub trait KVStore { /// returned keys. Returns an empty list if `primary_namespace` or `secondary_namespace` is unknown. fn list( &self, primary_namespace: &str, secondary_namespace: &str, - ) -> AsyncResult<'static, Vec, io::Error>; + ) -> impl Future, io::Error>> + 'static + MaybeSend; +} + +impl KVStore for K +where + K: Deref, + K::Target: KVStore, +{ + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> impl Future, io::Error>> + 'static + MaybeSend { + self.deref().read(primary_namespace, secondary_namespace, key) + } + + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> impl Future> + 'static + MaybeSend { + self.deref().write(primary_namespace, secondary_namespace, key, buf) + } + + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> impl Future> + 'static + MaybeSend { + self.deref().remove(primary_namespace, secondary_namespace, key, lazy) + } + + fn list( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> impl Future, io::Error>> + 'static + MaybeSend { + self.deref().list(primary_namespace, secondary_namespace) + } } /// Provides additional interface methods that are required for [`KVStore`]-to-[`KVStore`] @@ -435,13 +465,11 @@ impl Persist( +pub fn read_channel_monitors( kv_store: K, entropy_source: ES, signer_provider: SP, -) -> Result::EcdsaSigner>)>, io::Error> +) -> Result)>, io::Error> where K::Target: KVStoreSync, - ES::Target: EntropySource + Sized, - SP::Target: SignerProvider + Sized, { let mut res = Vec::new(); @@ -449,13 +477,13 @@ where CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, )? { - match ::EcdsaSigner>)>>::read( + match )>>::read( &mut io::Cursor::new(kv_store.read( CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, &stored_key, )?), - (&*entropy_source, &*signer_provider), + (&entropy_source, &signer_provider), ) { Ok(Some((block_hash, channel_monitor))) => { let monitor_name = MonitorName::from_str(&stored_key)?; @@ -482,7 +510,11 @@ where struct PanicingSpawner; impl FutureSpawner for PanicingSpawner { - fn spawn + MaybeSend + 'static>(&self, _: T) { + type E = Infallible; + type SpawnedFutureResult = Box> + Unpin>; + fn spawn + MaybeSend + 'static>( + &self, _: T, + ) -> Self::SpawnedFutureResult { unreachable!(); } } @@ -490,8 +522,7 @@ impl FutureSpawner for PanicingSpawner { fn poll_sync_future(future: F) -> F::Output { let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); - // TODO A future MSRV bump to 1.68 should allow for the pin macro - match Pin::new(&mut Box::pin(future)).poll(&mut ctx) { + match pin!(future).poll(&mut ctx) { task::Poll::Ready(result) => result, task::Poll::Pending => { // In a sync context, we can't wait for the future to complete. @@ -563,15 +594,6 @@ fn poll_sync_future(future: F) -> F::Output { /// list channel monitors themselves and load channels individually using /// [`MonitorUpdatingPersister::read_channel_monitor_with_updates`]. /// -/// ## EXTREMELY IMPORTANT -/// -/// It is extremely important that your [`KVStoreSync::read`] implementation uses the -/// [`io::ErrorKind::NotFound`] variant correctly: that is, when a file is not found, and _only_ in -/// that circumstance (not when there is really a permissions error, for example). This is because -/// neither channel monitor reading function lists updates. Instead, either reads the monitor, and -/// using its stored `update_id`, synthesizes update storage keys, and tries them in sequence until -/// one is not found. All _other_ errors will be bubbled up in the function's [`Result`]. -/// /// # Pruning stale channel updates /// /// Stale updates are pruned when the consolidation threshold is reached according to `maximum_pending_updates`. @@ -584,26 +606,27 @@ fn poll_sync_future(future: F) -> F::Output { /// If you have many stale updates stored (such as after a crash with pending lazy deletes), and /// would like to get rid of them, consider using the /// [`MonitorUpdatingPersister::cleanup_stale_updates`] function. -pub struct MonitorUpdatingPersister( - MonitorUpdatingPersisterAsync, PanicingSpawner, L, ES, SP, BI, FE>, -) +pub struct MonitorUpdatingPersister< + K: Deref, + L: Logger, + ES: EntropySource, + SP: SignerProvider, + BI: BroadcasterInterface, + FE: FeeEstimator, +>(MonitorUpdatingPersisterAsync, PanicingSpawner, L, ES, SP, BI, FE>) where - K::Target: KVStoreSync, - L::Target: Logger, - ES::Target: EntropySource + Sized, - SP::Target: SignerProvider + Sized, - BI::Target: BroadcasterInterface, - FE::Target: FeeEstimator; - -impl - MonitorUpdatingPersister + K::Target: KVStoreSync; + +impl< + K: Deref, + L: Logger, + ES: EntropySource, + SP: SignerProvider, + BI: BroadcasterInterface, + FE: FeeEstimator, + > MonitorUpdatingPersister where K::Target: KVStoreSync, - L::Target: Logger, - ES::Target: EntropySource + Sized, - SP::Target: SignerProvider + Sized, - BI::Target: BroadcasterInterface, - FE::Target: FeeEstimator, { /// Constructs a new [`MonitorUpdatingPersister`]. /// @@ -645,25 +668,14 @@ where } /// Reads all stored channel monitors, along with any stored updates for them. - /// - /// It is extremely important that your [`KVStoreSync::read`] implementation uses the - /// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the - /// documentation for [`MonitorUpdatingPersister`]. pub fn read_all_channel_monitors_with_updates( &self, - ) -> Result< - Vec<(BlockHash, ChannelMonitor<::EcdsaSigner>)>, - io::Error, - > { + ) -> Result)>, io::Error> { poll_sync_future(self.0.read_all_channel_monitors_with_updates()) } /// Read a single channel monitor, along with any stored updates for it. /// - /// It is extremely important that your [`KVStoreSync::read`] implementation uses the - /// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the - /// documentation for [`MonitorUpdatingPersister`]. - /// /// For `monitor_key`, channel storage keys can be the channel's funding [`OutPoint`], with an /// underscore `_` between txid and index for v1 channels. For example, given: /// @@ -679,8 +691,7 @@ where /// function to accomplish this. Take care to limit the number of parallel readers. pub fn read_channel_monitor_with_updates( &self, monitor_key: &str, - ) -> Result<(BlockHash, ChannelMonitor<::EcdsaSigner>), io::Error> - { + ) -> Result<(BlockHash, ChannelMonitor), io::Error> { poll_sync_future(self.0.read_channel_monitor_with_updates(monitor_key)) } @@ -698,19 +709,14 @@ where impl< ChannelSigner: EcdsaChannelSigner, K: Deref, - L: Deref, - ES: Deref, - SP: Deref, - BI: Deref, - FE: Deref, + L: Logger, + ES: EntropySource, + SP: SignerProvider, + BI: BroadcasterInterface, + FE: FeeEstimator, > Persist for MonitorUpdatingPersister where K::Target: KVStoreSync, - L::Target: Logger, - ES::Target: EntropySource + Sized, - SP::Target: SignerProvider + Sized, - BI::Target: BroadcasterInterface, - FE::Target: FeeEstimator, { /// Persists a new channel. This means writing the entire monitor to the /// parametrized [`KVStoreSync`]. @@ -782,38 +788,24 @@ where /// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor /// [`ChainMonitor::new_async_beta`]: crate::chain::chainmonitor::ChainMonitor::new_async_beta pub struct MonitorUpdatingPersisterAsync< - K: Deref, + K: KVStore, S: FutureSpawner, - L: Deref, - ES: Deref, - SP: Deref, - BI: Deref, - FE: Deref, ->(Arc>) -where - K::Target: KVStore, - L::Target: Logger, - ES::Target: EntropySource + Sized, - SP::Target: SignerProvider + Sized, - BI::Target: BroadcasterInterface, - FE::Target: FeeEstimator; + L: Logger, + ES: EntropySource, + SP: SignerProvider, + BI: BroadcasterInterface, + FE: FeeEstimator, +>(Arc>); struct MonitorUpdatingPersisterAsyncInner< - K: Deref, + K: KVStore, S: FutureSpawner, - L: Deref, - ES: Deref, - SP: Deref, - BI: Deref, - FE: Deref, -> where - K::Target: KVStore, - L::Target: Logger, - ES::Target: EntropySource + Sized, - SP::Target: SignerProvider + Sized, - BI::Target: BroadcasterInterface, - FE::Target: FeeEstimator, -{ + L: Logger, + ES: EntropySource, + SP: SignerProvider, + BI: BroadcasterInterface, + FE: FeeEstimator, +> { kv_store: K, async_completed_updates: Mutex>, future_spawner: S, @@ -825,15 +817,15 @@ struct MonitorUpdatingPersisterAsyncInner< fee_estimator: FE, } -impl - MonitorUpdatingPersisterAsync -where - K::Target: KVStore, - L::Target: Logger, - ES::Target: EntropySource + Sized, - SP::Target: SignerProvider + Sized, - BI::Target: BroadcasterInterface, - FE::Target: FeeEstimator, +impl< + K: KVStore, + S: FutureSpawner, + L: Logger, + ES: EntropySource, + SP: SignerProvider, + BI: BroadcasterInterface, + FE: FeeEstimator, + > MonitorUpdatingPersisterAsync { /// Constructs a new [`MonitorUpdatingPersisterAsync`]. /// @@ -857,34 +849,80 @@ where /// Reads all stored channel monitors, along with any stored updates for them. /// - /// It is extremely important that your [`KVStore::read`] implementation uses the - /// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the - /// documentation for [`MonitorUpdatingPersister`]. + /// While the reads themselves are performed in parallel, deserializing the + /// [`ChannelMonitor`]s is not. For large [`ChannelMonitor`]s actively used for forwarding, + /// this may substantially limit the parallelism of this method. + /// + /// If you can move this object into an `Arc`, consider using + /// [`Self::read_all_channel_monitors_with_updates_parallel`] to parallelize the CPU-bound + /// deserialization as well. pub async fn read_all_channel_monitors_with_updates( &self, - ) -> Result< - Vec<(BlockHash, ChannelMonitor<::EcdsaSigner>)>, - io::Error, - > { + ) -> Result)>, io::Error> { let primary = CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE; let secondary = CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE; let monitor_list = self.0.kv_store.list(primary, secondary).await?; - let mut res = Vec::with_capacity(monitor_list.len()); + let mut futures = Vec::with_capacity(monitor_list.len()); for monitor_key in monitor_list { - let result = - self.0.maybe_read_channel_monitor_with_updates(monitor_key.as_str()).await?; - if let Some(read_res) = result { + futures.push(ResultFuture::Pending(Box::pin(async move { + self.0.maybe_read_channel_monitor_with_updates(monitor_key.as_str()).await + }))); + } + let future_results = MultiResultFuturePoller::new(futures).await; + let mut res = Vec::with_capacity(future_results.len()); + for result in future_results { + if let Some(read_res) = result? { res.push(read_res); } } Ok(res) } - /// Read a single channel monitor, along with any stored updates for it. + /// Reads all stored channel monitors, along with any stored updates for them, in parallel. /// - /// It is extremely important that your [`KVStoreSync::read`] implementation uses the - /// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the - /// documentation for [`MonitorUpdatingPersister`]. + /// Because deserializing large [`ChannelMonitor`]s from forwarding nodes is often CPU-bound, + /// this version of [`Self::read_all_channel_monitors_with_updates`] uses the [`FutureSpawner`] + /// to parallelize deserialization as well as the IO operations. + /// + /// Because [`FutureSpawner`] requires that the spawned future be `'static` (matching `tokio` + /// and other multi-threaded runtime requirements), this method requires that `self` be an + /// `Arc` that can live for `'static` and be sent and accessed across threads. + pub async fn read_all_channel_monitors_with_updates_parallel( + self: &Arc, + ) -> Result)>, io::Error> + where + K: MaybeSend + MaybeSync + 'static, + L: MaybeSend + MaybeSync + 'static, + ES: MaybeSend + MaybeSync + 'static, + SP: MaybeSend + MaybeSync + 'static, + BI: MaybeSend + MaybeSync + 'static, + FE: MaybeSend + MaybeSync + 'static, + SP::EcdsaSigner: MaybeSend, + { + let primary = CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE; + let secondary = CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE; + let monitor_list = self.0.kv_store.list(primary, secondary).await?; + let mut futures = Vec::with_capacity(monitor_list.len()); + for monitor_key in monitor_list { + let us = Arc::clone(&self); + futures.push(ResultFuture::Pending(self.0.future_spawner.spawn(async move { + us.0.maybe_read_channel_monitor_with_updates(monitor_key.as_str()).await + }))); + } + let future_results = MultiResultFuturePoller::new(futures).await; + let mut res = Vec::with_capacity(future_results.len()); + for result in future_results { + match result { + Err(_) => return Err(io::Error::new(io::ErrorKind::Other, "Future was cancelled")), + Ok(Err(e)) => return Err(e), + Ok(Ok(Some(read_res))) => res.push(read_res), + Ok(Ok(None)) => {}, + } + } + Ok(res) + } + + /// Read a single channel monitor, along with any stored updates for it. /// /// For `monitor_key`, channel storage keys can be the channel's funding [`OutPoint`], with an /// underscore `_` between txid and index for v1 channels. For example, given: @@ -901,8 +939,7 @@ where /// function to accomplish this. Take care to limit the number of parallel readers. pub async fn read_channel_monitor_with_updates( &self, monitor_key: &str, - ) -> Result<(BlockHash, ChannelMonitor<::EcdsaSigner>), io::Error> - { + ) -> Result<(BlockHash, ChannelMonitor), io::Error> { self.0.read_channel_monitor_with_updates(monitor_key).await } @@ -918,26 +955,19 @@ where } impl< - K: Deref + MaybeSend + MaybeSync + 'static, + K: KVStore + MaybeSend + MaybeSync + 'static, S: FutureSpawner, - L: Deref + MaybeSend + MaybeSync + 'static, - ES: Deref + MaybeSend + MaybeSync + 'static, - SP: Deref + MaybeSend + MaybeSync + 'static, - BI: Deref + MaybeSend + MaybeSync + 'static, - FE: Deref + MaybeSend + MaybeSync + 'static, + L: Logger + MaybeSend + MaybeSync + 'static, + ES: EntropySource + MaybeSend + MaybeSync + 'static, + SP: SignerProvider + MaybeSend + MaybeSync + 'static, + BI: BroadcasterInterface + MaybeSend + MaybeSync + 'static, + FE: FeeEstimator + MaybeSend + MaybeSync + 'static, > MonitorUpdatingPersisterAsync where - K::Target: KVStore + MaybeSync, - L::Target: Logger, - ES::Target: EntropySource + Sized, - SP::Target: SignerProvider + Sized, - BI::Target: BroadcasterInterface, - FE::Target: FeeEstimator, - ::EcdsaSigner: MaybeSend + 'static, + SP::EcdsaSigner: MaybeSend + 'static, { pub(crate) fn spawn_async_persist_new_channel( - &self, monitor_name: MonitorName, - monitor: &ChannelMonitor<::EcdsaSigner>, + &self, monitor_name: MonitorName, monitor: &ChannelMonitor, notifier: Arc, ) { let inner = Arc::clone(&self.0); @@ -946,7 +976,7 @@ where let future = inner.persist_new_channel(monitor_name, monitor); let channel_id = monitor.channel_id(); let completion = (monitor.channel_id(), monitor.get_latest_update_id()); - self.0.future_spawner.spawn(async move { + let _runs_free = self.0.future_spawner.spawn(async move { match future.await { Ok(()) => { inner.async_completed_updates.lock().unwrap().push(completion); @@ -964,8 +994,7 @@ where pub(crate) fn spawn_async_update_channel( &self, monitor_name: MonitorName, update: Option<&ChannelMonitorUpdate>, - monitor: &ChannelMonitor<::EcdsaSigner>, - notifier: Arc, + monitor: &ChannelMonitor, notifier: Arc, ) { let inner = Arc::clone(&self.0); // Note that `update_persisted_channel` is a sync method which calls all the way through to @@ -978,7 +1007,7 @@ where None }; let inner = Arc::clone(&self.0); - self.0.future_spawner.spawn(async move { + let _runs_free = self.0.future_spawner.spawn(async move { match future.await { Ok(()) => if let Some(completion) = completion { inner.async_completed_updates.lock().unwrap().push(completion); @@ -996,7 +1025,7 @@ where pub(crate) fn spawn_async_archive_persisted_channel(&self, monitor_name: MonitorName) { let inner = Arc::clone(&self.0); - self.0.future_spawner.spawn(async move { + let _runs_free = self.0.future_spawner.spawn(async move { inner.archive_persisted_channel(monitor_name).await; }); } @@ -1006,20 +1035,22 @@ where } } -impl - MonitorUpdatingPersisterAsyncInner -where - K::Target: KVStore, - L::Target: Logger, - ES::Target: EntropySource + Sized, - SP::Target: SignerProvider + Sized, - BI::Target: BroadcasterInterface, - FE::Target: FeeEstimator, +trait MaybeSendableFuture: Future> + MaybeSend {} +impl> + MaybeSend> MaybeSendableFuture for F {} + +impl< + K: KVStore, + S: FutureSpawner, + L: Logger, + ES: EntropySource, + SP: SignerProvider, + BI: BroadcasterInterface, + FE: FeeEstimator, + > MonitorUpdatingPersisterAsyncInner { pub async fn read_channel_monitor_with_updates( &self, monitor_key: &str, - ) -> Result<(BlockHash, ChannelMonitor<::EcdsaSigner>), io::Error> - { + ) -> Result<(BlockHash, ChannelMonitor), io::Error> { match self.maybe_read_channel_monitor_with_updates(monitor_key).await? { Some(res) => Ok(res), None => Err(io::Error::new( @@ -1036,33 +1067,31 @@ where async fn maybe_read_channel_monitor_with_updates( &self, monitor_key: &str, - ) -> Result< - Option<(BlockHash, ChannelMonitor<::EcdsaSigner>)>, - io::Error, - > { + ) -> Result)>, io::Error> { let monitor_name = MonitorName::from_str(monitor_key)?; - let read_res = self.maybe_read_monitor(&monitor_name, monitor_key).await?; - let (block_hash, monitor) = match read_res { + let read_future = pin!(self.maybe_read_monitor(&monitor_name, monitor_key)); + let list_future = pin!(self + .kv_store + .list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_key)); + let (read_res, list_res) = TwoFutureJoiner::new(read_future, list_future).await; + let (block_hash, monitor) = match read_res? { Some(res) => res, None => return Ok(None), }; - let mut current_update_id = monitor.get_latest_update_id(); - // TODO: Parallelize this loop by speculatively reading a batch of updates - loop { - current_update_id = match current_update_id.checked_add(1) { - Some(next_update_id) => next_update_id, - None => break, - }; - let update_name = UpdateName::from(current_update_id); - let update = match self.read_monitor_update(monitor_key, &update_name).await { - Ok(update) => update, - Err(err) if err.kind() == io::ErrorKind::NotFound => { - // We can't find any more updates, so we are done. - break; - }, - Err(err) => return Err(err), - }; - + let current_update_id = monitor.get_latest_update_id(); + let updates: Result, _> = + list_res?.into_iter().map(|name| UpdateName::new(name)).collect(); + let mut updates = updates?; + updates.sort_unstable(); + let updates_to_load = updates.iter().filter(|update| update.0 > current_update_id); + let mut update_futures = Vec::with_capacity(updates_to_load.clone().count()); + for update_name in updates_to_load { + update_futures.push(ResultFuture::Pending(Box::pin(async move { + (update_name, self.read_monitor_update(monitor_key, update_name).await) + }))); + } + for (update_name, update_res) in MultiResultFuturePoller::new(update_futures).await { + let update = update_res?; monitor .update_monitor(&update, &self.broadcaster, &self.fee_estimator, &self.logger) .map_err(|e| { @@ -1082,10 +1111,7 @@ where /// Read a channel monitor. async fn maybe_read_monitor( &self, monitor_name: &MonitorName, monitor_key: &str, - ) -> Result< - Option<(BlockHash, ChannelMonitor<::EcdsaSigner>)>, - io::Error, - > { + ) -> Result)>, io::Error> { let primary = CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE; let secondary = CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE; let monitor_bytes = self.kv_store.read(primary, secondary, monitor_key).await?; @@ -1094,9 +1120,9 @@ where if monitor_cursor.get_ref().starts_with(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL) { monitor_cursor.set_position(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL.len() as u64); } - match ::EcdsaSigner>)>>::read( + match )>>::read( &mut monitor_cursor, - (&*self.entropy_source, &*self.signer_provider), + (&self.entropy_source, &self.signer_provider), ) { Ok(None) => Ok(None), Ok(Some((blockhash, channel_monitor))) => { @@ -1179,9 +1205,9 @@ where Ok(()) } - fn persist_new_channel( - &self, monitor_name: MonitorName, monitor: &ChannelMonitor, - ) -> impl Future> { + fn persist_new_channel<'a, ChannelSigner: EcdsaChannelSigner>( + &'a self, monitor_name: MonitorName, monitor: &'a ChannelMonitor, + ) -> Pin> + 'static>> { // Determine the proper key for this monitor let monitor_key = monitor_name.to_string(); // Serialize and write the new monitor @@ -1200,7 +1226,10 @@ where // completion of the write. This ensures monitor persistence ordering is preserved. let primary = CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE; let secondary = CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE; - self.kv_store.write(primary, secondary, monitor_key.as_str(), monitor_bytes) + // There's no real reason why this needs to be boxed, but dropping it rams into the "hidden + // type for impl... captures lifetime that does not appear in bounds" issue. This can + // trivially be dropped once we upgrade to edition 2024/MSRV 1.85. + Box::pin(self.kv_store.write(primary, secondary, monitor_key.as_str(), monitor_bytes)) } fn update_persisted_channel<'a, ChannelSigner: EcdsaChannelSigner + 'a>( @@ -1226,12 +1255,10 @@ where // write method, allowing it to do its queueing immediately, and then return a // future for the completion of the write. This ensures monitor persistence // ordering is preserved. - res_a = Some(self.kv_store.write( - primary, - &monitor_key, - update_name.as_str(), - update.encode(), - )); + let encoded = update.encode(); + res_a = Some(async move { + self.kv_store.write(primary, &monitor_key, update_name.as_str(), encoded).await + }); } else { // We could write this update, but it meets criteria of our design that calls for a full monitor write. // Note that this is NOT an async function, but rather calls the *sync* KVStore @@ -1448,7 +1475,7 @@ impl core::fmt::Display for MonitorName { /// let monitor_name = "some_monitor_name"; /// let storage_key = format!("channel_monitor_updates/{}/{}", monitor_name, update_name.as_str()); /// ``` -#[derive(Debug)] +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)] pub struct UpdateName(pub u64, String); impl UpdateName { @@ -1506,13 +1533,13 @@ impl From for UpdateName { mod tests { use super::*; use crate::chain::ChannelMonitorUpdateStatus; + use crate::check_closed_broadcast; use crate::events::ClosureReason; use crate::ln::functional_test_utils::*; use crate::ln::msgs::BaseMessageHandler; use crate::sync::Arc; use crate::util::test_channel_signer::TestChannelSigner; use crate::util::test_utils::{self, TestStore}; - use crate::{check_added_monitors, check_closed_broadcast}; use bitcoin::hashes::hex::FromHex; use core::cmp; @@ -1630,7 +1657,9 @@ mod tests { ); node_cfgs[0].chain_monitor = chain_mon_0; node_cfgs[1].chain_monitor = chain_mon_1; - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); // Check that the persisted channel data is empty before any channels are @@ -1728,7 +1757,7 @@ mod tests { ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, reason, &[node_id_1], 100000); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let node_txn = nodes[0].tx_broadcaster.txn_broadcast(); assert_eq!(node_txn.len(), 1); @@ -1740,7 +1769,7 @@ mod tests { let reason = ClosureReason::CommitmentTxConfirmed; let node_id_0 = nodes[0].node.get_our_node_id(); check_closed_event(&nodes[1], 1, reason, &[node_id_0], 100000); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Make sure everything is persisted as expected after close. // We always send at least two payments, and loop up to max_pending_updates_0 * 2. diff --git a/lightning/src/util/scid_utils.rs b/lightning/src/util/scid_utils.rs index b9dcc4688e8..d57c529a41a 100644 --- a/lightning/src/util/scid_utils.rs +++ b/lightning/src/util/scid_utils.rs @@ -80,8 +80,6 @@ pub(crate) mod fake_scid { use bitcoin::constants::ChainHash; use bitcoin::Network; - use core::ops::Deref; - const TEST_SEGWIT_ACTIVATION_HEIGHT: u32 = 1; const MAINNET_SEGWIT_ACTIVATION_HEIGHT: u32 = 481_824; const MAX_TX_INDEX: u32 = 2_500; @@ -110,13 +108,10 @@ pub(crate) mod fake_scid { /// between segwit activation and the current best known height, and the tx index and output /// index are also selected from a "reasonable" range. We add this logic because it makes it /// non-obvious at a glance that the scid is fake, e.g. if it appears in invoice route hints. - pub(crate) fn get_fake_scid( + pub(crate) fn get_fake_scid( &self, highest_seen_blockheight: u32, chain_hash: &ChainHash, fake_scid_rand_bytes: &[u8; 32], entropy_source: &ES, - ) -> u64 - where - ES::Target: EntropySource, - { + ) -> u64 { // Ensure we haven't created a namespace that doesn't fit into the 3 bits we've allocated for // namespaces. assert!((*self as u8) < MAX_NAMESPACES); diff --git a/lightning/src/util/ser.rs b/lightning/src/util/ser.rs index f821aa5afc0..6579c0353a3 100644 --- a/lightning/src/util/ser.rs +++ b/lightning/src/util/ser.rs @@ -979,13 +979,15 @@ where } } -// Vectors +/// Write number of items in a vec followed by each element, without writing a length-prefix for +/// each element. +#[macro_export] macro_rules! impl_writeable_for_vec { ($ty: ty $(, $name: ident)*) => { impl<$($name : Writeable),*> Writeable for Vec<$ty> { #[inline] fn write(&self, w: &mut W) -> Result<(), io::Error> { - CollectionLength(self.len() as u64).write(w)?; + $crate::util::ser::CollectionLength(self.len() as u64).write(w)?; for elem in self.iter() { elem.write(w)?; } @@ -994,15 +996,21 @@ macro_rules! impl_writeable_for_vec { } } } +/// Read the number of items in a vec followed by each element, without reading a length prefix for +/// each element. +/// +/// Each element is read with `MaybeReadable`, meaning if an element cannot be read then it is +/// skipped without returning `DecodeError::InvalidValue`. +#[macro_export] macro_rules! impl_readable_for_vec { ($ty: ty $(, $name: ident)*) => { impl<$($name : Readable),*> Readable for Vec<$ty> { #[inline] - fn read(r: &mut R) -> Result { - let len: CollectionLength = Readable::read(r)?; - let mut ret = Vec::with_capacity(cmp::min(len.0 as usize, MAX_BUF_SIZE / core::mem::size_of::<$ty>())); + fn read(r: &mut R) -> Result { + let len: $crate::util::ser::CollectionLength = Readable::read(r)?; + let mut ret = Vec::with_capacity(cmp::min(len.0 as usize, $crate::util::ser::MAX_BUF_SIZE / core::mem::size_of::<$ty>())); for _ in 0..len.0 { - if let Some(val) = MaybeReadable::read(r)? { + if let Some(val) = $crate::util::ser::MaybeReadable::read(r)? { ret.push(val); } } diff --git a/lightning/src/util/ser_macros.rs b/lightning/src/util/ser_macros.rs index 647e7c77a6c..cc95fe619e8 100644 --- a/lightning/src/util/ser_macros.rs +++ b/lightning/src/util/ser_macros.rs @@ -45,7 +45,7 @@ macro_rules! _encode_tlv { field.write($stream)?; } }; - ($stream: expr, $optional_type: expr, $optional_field: expr, (legacy, $fieldty: ty, $write: expr) $(, $self: ident)?) => { { + ($stream: expr, $optional_type: expr, $optional_field: expr, (legacy, $fieldty: ty, $read: expr, $write: expr) $(, $self: ident)?) => { { let value: Option<_> = $write($($self)?); #[cfg(debug_assertions)] { @@ -63,6 +63,9 @@ macro_rules! _encode_tlv { } $crate::_encode_tlv!($stream, $optional_type, value, option); } }; + ($stream: expr, $optional_type: expr, $optional_field: expr, (custom, $fieldty: ty, $read: expr, $write: expr) $(, $self: ident)?) => { { + $crate::_encode_tlv!($stream, $optional_type, $optional_field, (legacy, $fieldty, $read, $write) $(, $self)?); + } }; ($stream: expr, $type: expr, $field: expr, optional_vec $(, $self: ident)?) => { if !$field.is_empty() { $crate::_encode_tlv!($stream, $type, $field, required_vec); @@ -229,9 +232,12 @@ macro_rules! _get_varint_length_prefixed_tlv_length { $len.0 += field_len; } }; - ($len: expr, $optional_type: expr, $optional_field: expr, (legacy, $fieldty: ty, $write: expr) $(, $self: ident)?) => { + ($len: expr, $optional_type: expr, $optional_field: expr, (legacy, $fieldty: ty, $read: expr, $write: expr) $(, $self: ident)?) => { $crate::_get_varint_length_prefixed_tlv_length!($len, $optional_type, $write($($self)?), option); }; + ($len: expr, $optional_type: expr, $optional_field: expr, (custom, $fieldty: ty, $read: expr, $write: expr) $(, $self: ident)?) => { + $crate::_get_varint_length_prefixed_tlv_length!($len, $optional_type, $optional_field, (legacy, $fieldty, $read, $write) $(, $self)?); + }; ($len: expr, $type: expr, $field: expr, optional_vec $(, $self: ident)?) => { if !$field.is_empty() { $crate::_get_varint_length_prefixed_tlv_length!($len, $type, $field, required_vec); @@ -314,9 +320,19 @@ macro_rules! _check_decoded_tlv_order { ($last_seen_type: expr, $typ: expr, $type: expr, $field: ident, (option, explicit_type: $fieldty: ty)) => {{ // no-op }}; - ($last_seen_type: expr, $typ: expr, $type: expr, $field: ident, (legacy, $fieldty: ty, $write: expr)) => {{ + ($last_seen_type: expr, $typ: expr, $type: expr, $field: ident, (legacy, $fieldty: ty, $read: expr, $write: expr)) => {{ // no-op }}; + ($last_seen_type: expr, $typ: expr, $type: expr, $field: ident, (custom, $fieldty: ty, $read: expr, $write: expr) $(, $self: ident)?) => {{ + // Note that $type may be 0 making the second comparison always false + #[allow(unused_comparisons)] + let invalid_order = + ($last_seen_type.is_none() || $last_seen_type.unwrap() < $type) && $typ.0 > $type; + if invalid_order { + let read_result: Result<_, DecodeError> = $read(None); + $field = read_result?.into(); + } + }}; ($last_seen_type: expr, $typ: expr, $type: expr, $field: ident, (required, explicit_type: $fieldty: ty)) => {{ _check_decoded_tlv_order!($last_seen_type, $typ, $type, $field, required); }}; @@ -382,8 +398,19 @@ macro_rules! _check_missing_tlv { ($last_seen_type: expr, $type: expr, $field: ident, (option, explicit_type: $fieldty: ty)) => {{ // no-op }}; - ($last_seen_type: expr, $type: expr, $field: ident, (legacy, $fieldty: ty, $write: expr)) => {{ - // no-op + ($last_seen_type: expr, $type: expr, $field: ident, (legacy, $fieldty: ty, $read: expr, $write: expr)) => {{ + use $crate::ln::msgs::DecodeError; + let read_result: Result<(), DecodeError> = $read($field.as_ref()); + read_result?; + }}; + ($last_seen_type: expr, $type: expr, $field: ident, (custom, $fieldty: ty, $read: expr, $write: expr)) => {{ + // Note that $type may be 0 making the second comparison always false + #[allow(unused_comparisons)] + let missing_req_type = $last_seen_type.is_none() || $last_seen_type.unwrap() < $type; + if missing_req_type { + let read_result: Result<_, DecodeError> = $read(None); + $field = read_result?.into(); + } }}; ($last_seen_type: expr, $type: expr, $field: ident, (required, explicit_type: $fieldty: ty)) => {{ _check_missing_tlv!($last_seen_type, $type, $field, required); @@ -438,9 +465,15 @@ macro_rules! _decode_tlv { let _field: &Option<$fieldty> = &$field; $crate::_decode_tlv!($outer_reader, $reader, $field, option); }}; - ($outer_reader: expr, $reader: expr, $field: ident, (legacy, $fieldty: ty, $write: expr)) => {{ + ($outer_reader: expr, $reader: expr, $field: ident, (legacy, $fieldty: ty, $read: expr, $write: expr)) => {{ $crate::_decode_tlv!($outer_reader, $reader, $field, (option, explicit_type: $fieldty)); }}; + ($outer_reader: expr, $reader: expr, $field: ident, (custom, $fieldty: ty, $read: expr, $write: expr)) => {{ + let read_field: $fieldty; + $crate::_decode_tlv!($outer_reader, $reader, read_field, required); + let read_result: Result<_, DecodeError> = $read(Some(read_field)); + $field = read_result?.into(); + }}; ($outer_reader: expr, $reader: expr, $field: ident, (required, explicit_type: $fieldty: ty)) => {{ let _field: &$fieldty = &$field; _decode_tlv!($outer_reader, $reader, $field, required); @@ -827,9 +860,12 @@ macro_rules! _init_tlv_based_struct_field { ($field: ident, option) => { $field }; - ($field: ident, (legacy, $fieldty: ty, $write: expr)) => { + ($field: ident, (legacy, $fieldty: ty, $read: expr, $write: expr)) => { $crate::_init_tlv_based_struct_field!($field, option) }; + ($field: ident, (custom, $fieldty: ty, $read: expr, $write: expr)) => { + $crate::_init_tlv_based_struct_field!($field, required) + }; ($field: ident, (option: $trait: ident $(, $read_arg: expr)?)) => { $crate::_init_tlv_based_struct_field!($field, option) }; @@ -852,6 +888,9 @@ macro_rules! _init_tlv_based_struct_field { ($field: ident, (required_vec, encoding: ($fieldty: ty, $encoding: ident))) => { $crate::_init_tlv_based_struct_field!($field, required) }; + ($field: ident, (option, encoding: ($fieldty: ty, $encoding: ident))) => { + $crate::_init_tlv_based_struct_field!($field, option) + }; ($field: ident, optional_vec) => { $field.unwrap() }; @@ -890,9 +929,12 @@ macro_rules! _init_tlv_field_var { ($field: ident, (option, explicit_type: $fieldty: ty)) => { let mut $field: Option<$fieldty> = None; }; - ($field: ident, (legacy, $fieldty: ty, $write: expr)) => { + ($field: ident, (legacy, $fieldty: ty, $read: expr, $write: expr)) => { $crate::_init_tlv_field_var!($field, (option, explicit_type: $fieldty)); }; + ($field: ident, (custom, $fieldty: ty, $read: expr, $write: expr)) => { + $crate::_init_tlv_field_var!($field, required); + }; ($field: ident, (required, explicit_type: $fieldty: ty)) => { let mut $field = $crate::util::ser::RequiredWrapper::<$fieldty>(None); }; @@ -972,10 +1014,18 @@ macro_rules! _decode_and_build { /// [`MaybeReadable`], requiring the TLV to be present. /// If `$fieldty` is `optional_vec`, then `$field` is a [`Vec`], which needs to have its individual elements serialized. /// Note that for `optional_vec` no bytes are written if the vec is empty -/// If `$fieldty` is `(legacy, $ty, $write)` then, when writing, the function $write will be +/// If `$fieldty` is `(legacy, $ty, $read, $write)` then, when writing, the function $write will be /// called with the object being serialized and a returned `Option` and is written as a TLV if -/// `Some`. When reading, an optional field of type `$ty` is read (which can be used in later -/// `default_value` or `static_value` fields by referring to the value by name). +/// `Some`. When reading, an optional field of type `$ty` is read, and after all TLV fields are +/// read, the `$read` closure is called with the `Option<&$ty>` value. The `$read` closure should +/// return a `Result<(), DecodeError>`. Legacy field values can be used in later +/// `default_value` or `static_value` fields by referring to the value by name. +/// If `$fieldty` is `(custom, $ty, $read, $write)` then, when writing, the same behavior as +/// `legacy`, above is used. When reading, if a TLV is present, it is read as `$ty` and the +/// `$read` method is called with `Some(decoded_$ty_object)`. If no TLV is present, the field +/// will be initialized by calling `$read(None)`. `$read` should return a +/// `Result` (note that the processed field type may differ from `$ty`; +/// `$ty` is the type as de/serialized, not necessarily the actual field type). /// /// For example, /// ``` @@ -993,7 +1043,7 @@ macro_rules! _decode_and_build { /// (1, tlv_default_integer, (default_value, 7)), /// (2, tlv_optional_integer, option), /// (3, tlv_vec_type_integer, optional_vec), -/// (4, unwritten_type, (legacy, u32, |us: &LightningMessage| Some(us.tlv_integer))), +/// (4, unwritten_type, (legacy, u32, |_| Ok(()), |us: &LightningMessage| Some(us.tlv_integer))), /// (_unused, tlv_upgraded_integer, (static_value, unwritten_type.unwrap_or(0) * 2)) /// }); /// ``` @@ -1885,7 +1935,7 @@ mod tests { new_field: (u8, u8), } impl_writeable_tlv_based!(ExpandedField, { - (0, old_field, (legacy, u8, |us: &ExpandedField| Some(us.new_field.0))), + (0, old_field, (legacy, u8, |_| Ok(()), |us: &ExpandedField| Some(us.new_field.0))), (1, new_field, (default_value, (old_field.ok_or(DecodeError::InvalidValue)?, 0))), }); @@ -1924,4 +1974,31 @@ mod tests { LengthReadable::read_from_fixed_length_buffer(&mut &encoded[..]).unwrap(); assert_eq!(decoded, instance); } + + #[test] + fn test_option_with_encoding() { + // Ensure that serializing an option with a specified encoding will survive a ser round + // trip for Some and None options. + #[derive(PartialEq, Eq, Debug)] + struct MyCustomStruct { + tlv_field: Option, + } + + impl_writeable_msg!(MyCustomStruct, {}, { + (1, tlv_field, (option, encoding: (u64, HighZeroBytesDroppedBigSize))), + }); + + for tlv_field in [None, Some(0u64), Some(255u64)] { + let instance = MyCustomStruct { tlv_field }; + let encoded = instance.encode(); + let decoded: MyCustomStruct = + LengthReadable::read_from_fixed_length_buffer(&mut &encoded[..]).unwrap(); + assert_eq!( + decoded, + MyCustomStruct { tlv_field }, + "option custom encoding failed for: {:?}", + tlv_field + ); + } + } } diff --git a/lightning/src/util/sweep.rs b/lightning/src/util/sweep.rs index 5a1ffad3e04..b70eb274085 100644 --- a/lightning/src/util/sweep.rs +++ b/lightning/src/util/sweep.rs @@ -8,7 +8,9 @@ //! [`SpendableOutputDescriptor`]s, i.e., persists them in a given [`KVStoreSync`] and regularly retries //! sweeping them. -use crate::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator}; +use crate::chain::chaininterface::{ + BroadcasterInterface, ConfirmationTarget, FeeEstimator, TransactionType, +}; use crate::chain::channelmonitor::{ANTI_REORG_DELAY, ARCHIVAL_DELAY_BLOCKS}; use crate::chain::{self, BestBlock, Confirm, Filter, Listen, WatchedOutput}; use crate::io; @@ -26,19 +28,20 @@ use crate::util::persist::{ OUTPUT_SWEEPER_PERSISTENCE_PRIMARY_NAMESPACE, OUTPUT_SWEEPER_PERSISTENCE_SECONDARY_NAMESPACE, }; use crate::util::ser::{Readable, ReadableArgs, Writeable}; -use crate::{impl_writeable_tlv_based, log_debug, log_error}; +use crate::{log_debug, log_error}; use bitcoin::block::Header; use bitcoin::locktime::absolute::LockTime; -use bitcoin::secp256k1::Secp256k1; +use bitcoin::secp256k1::{PublicKey, Secp256k1}; use bitcoin::{BlockHash, ScriptBuf, Transaction, Txid}; use core::future::Future; use core::ops::Deref; +use core::pin::{pin, Pin}; use core::sync::atomic::{AtomicBool, Ordering}; use core::task; -use super::async_poll::{dummy_waker, AsyncResult}; +use super::async_poll::dummy_waker; /// The number of blocks we wait before we prune the tracked spendable outputs. pub const PRUNE_DELAY_BLOCKS: u32 = ARCHIVAL_DELAY_BLOCKS + ANTI_REORG_DELAY; @@ -52,6 +55,13 @@ pub struct TrackedSpendableOutput { /// /// Will be `None` if no `channel_id` was given to [`OutputSweeper::track_spendable_outputs`] pub channel_id: Option, + /// The `node_id` of the channel counterparty. + /// + /// Will be `None` if no `counterparty_node_id` was given to + /// [`OutputSweeper::track_spendable_outputs`]. + /// + /// This will be `None` for outputs tracked with LDK 0.2 and prior. + pub counterparty_node_id: Option, /// The current status of the output spend. pub status: OutputSpendStatus, } @@ -90,6 +100,7 @@ impl TrackedSpendableOutput { impl_writeable_tlv_based!(TrackedSpendableOutput, { (0, descriptor, required), (2, channel_id, option), + (3, counterparty_node_id, option), (4, status, required), }); @@ -336,15 +347,16 @@ impl_writeable_tlv_based_enum!(OutputSpendStatus, /// /// [`Event::SpendableOutputs`]: crate::events::Event::SpendableOutputs // Note that updates to documentation on this struct should be copied to the synchronous version. -pub struct OutputSweeper -where - B::Target: BroadcasterInterface, +pub struct OutputSweeper< + B: BroadcasterInterface, + D: Deref, + E: FeeEstimator, + F: Filter, + K: KVStore, + L: Logger, + O: OutputSpender, +> where D::Target: ChangeDestinationSource, - E::Target: FeeEstimator, - F::Target: Filter, - K::Target: KVStore, - L::Target: Logger, - O::Target: OutputSpender, { sweeper_state: Mutex, pending_sweep: AtomicBool, @@ -357,16 +369,17 @@ where logger: L, } -impl - OutputSweeper +impl< + B: BroadcasterInterface, + D: Deref, + E: FeeEstimator, + F: Filter, + K: KVStore, + L: Logger, + O: OutputSpender, + > OutputSweeper where - B::Target: BroadcasterInterface, D::Target: ChangeDestinationSource, - E::Target: FeeEstimator, - F::Target: Filter, - K::Target: KVStore, - L::Target: Logger, - O::Target: OutputSpender, { /// Constructs a new [`OutputSweeper`]. /// @@ -408,7 +421,8 @@ where /// [`Event::SpendableOutputs`]: crate::events::Event::SpendableOutputs pub async fn track_spendable_outputs( &self, output_descriptors: Vec, channel_id: Option, - exclude_static_outputs: bool, delay_until_height: Option, + counterparty_node_id: Option, exclude_static_outputs: bool, + delay_until_height: Option, ) -> Result<(), ()> { let mut relevant_descriptors = output_descriptors .into_iter() @@ -427,6 +441,7 @@ where let output_info = TrackedSpendableOutput { descriptor, channel_id, + counterparty_node_id, status: OutputSpendStatus::PendingInitialBroadcast { delayed_until_height: delay_until_height, }, @@ -522,66 +537,83 @@ where self.change_destination_source.get_change_destination_script().await?; // Sweep the outputs. - let spending_tx = self - .update_state(|sweeper_state| -> Result<(Option, bool), ()> { - let cur_height = sweeper_state.best_block.height; - let cur_hash = sweeper_state.best_block.block_hash; - - let respend_descriptors_set: HashSet<&SpendableOutputDescriptor> = sweeper_state - .outputs - .iter() - .filter(|o| filter_fn(*o, cur_height)) - .map(|o| &o.descriptor) - .collect(); - - // we first collect into a set to avoid duplicates and to "randomize" the order - // in which outputs are spent. Then we collect into a vec as that is what - // `spend_outputs` requires. - let respend_descriptors: Vec<&SpendableOutputDescriptor> = - respend_descriptors_set.into_iter().collect(); - - // Generate the spending transaction and broadcast it. - if !respend_descriptors.is_empty() { - let spending_tx = self - .spend_outputs( - &sweeper_state, - &respend_descriptors, - change_destination_script, - ) - .map_err(|e| { - log_error!(self.logger, "Error spending outputs: {:?}", e); - })?; - - log_debug!( - self.logger, - "Generating and broadcasting sweeping transaction {}", - spending_tx.compute_txid() - ); - - // As we didn't modify the state so far, the same filter_fn yields the same elements as - // above. - let respend_outputs = - sweeper_state.outputs.iter_mut().filter(|o| filter_fn(&**o, cur_height)); - for output_info in respend_outputs { - if let Some(filter) = self.chain_data_source.as_ref() { - let watched_output = output_info.to_watched_output(cur_hash); - filter.register_output(watched_output); + let spending_tx_and_chan_id = self + .update_state( + |sweeper_state| -> Result<(Option<(Transaction, Vec<(PublicKey, ChannelId)>)>, bool), ()> { + let cur_height = sweeper_state.best_block.height; + let cur_hash = sweeper_state.best_block.block_hash; + + let respend_descriptors_set: HashSet<&SpendableOutputDescriptor> = + sweeper_state + .outputs + .iter() + .filter(|o| filter_fn(*o, cur_height)) + .map(|o| &o.descriptor) + .collect(); + + // we first collect into a set to avoid duplicates and to "randomize" the order + // in which outputs are spent. Then we collect into a vec as that is what + // `spend_outputs` requires. + let respend_descriptors: Vec<&SpendableOutputDescriptor> = + respend_descriptors_set.into_iter().collect(); + + // Generate the spending transaction and broadcast it. + if !respend_descriptors.is_empty() { + let spending_tx = self + .spend_outputs( + &sweeper_state, + &respend_descriptors, + change_destination_script, + ) + .map_err(|e| { + log_error!(self.logger, "Error spending outputs: {:?}", e); + })?; + + log_debug!( + self.logger, + "Generating and broadcasting sweeping transaction {}", + spending_tx.compute_txid() + ); + + // As we didn't modify the state so far, the same filter_fn yields the same elements as + // above. + let respend_outputs = sweeper_state + .outputs + .iter_mut() + .filter(|o| filter_fn(&**o, cur_height)); + let mut channels = Vec::new(); + for output_info in respend_outputs { + if let Some(filter) = self.chain_data_source.as_ref() { + let watched_output = output_info.to_watched_output(cur_hash); + filter.register_output(watched_output); + } + + if let (Some(counterparty_node_id), Some(channel_id)) = + (output_info.counterparty_node_id, output_info.channel_id) + { + if !channels.iter().any(|(cp, ch)| { + *cp == counterparty_node_id && *ch == channel_id + }) { + channels.push((counterparty_node_id, channel_id)); + } + } + + output_info.status.broadcast(cur_hash, cur_height, spending_tx.clone()); + sweeper_state.dirty = true; } - output_info.status.broadcast(cur_hash, cur_height, spending_tx.clone()); - sweeper_state.dirty = true; + Ok((Some((spending_tx, channels)), false)) + } else { + Ok((None, false)) } - - Ok((Some(spending_tx), false)) - } else { - Ok((None, false)) - } - }) + }, + ) .await?; // Persistence completely successfully. If we have a spending transaction, we broadcast it. - if let Some(spending_tx) = spending_tx { - self.broadcaster.broadcast_transactions(&[&spending_tx]); + if let Some((spending_tx, channels)) = spending_tx_and_chan_id { + self.broadcaster + .broadcast_transactions(&[(&spending_tx, TransactionType::Sweep { channels })]); } Ok(()) @@ -609,15 +641,32 @@ where sweeper_state.dirty = true; } - fn persist_state<'a>(&self, sweeper_state: &SweeperState) -> AsyncResult<'a, (), io::Error> { + #[cfg(feature = "std")] + fn persist_state<'a>( + &'a self, sweeper_state: &SweeperState, + ) -> Pin> + Send + 'static>> { let encoded = sweeper_state.encode(); - self.kv_store.write( + Box::pin(self.kv_store.write( OUTPUT_SWEEPER_PERSISTENCE_PRIMARY_NAMESPACE, OUTPUT_SWEEPER_PERSISTENCE_SECONDARY_NAMESPACE, OUTPUT_SWEEPER_PERSISTENCE_KEY, encoded, - ) + )) + } + + #[cfg(not(feature = "std"))] + fn persist_state<'a>( + &'a self, sweeper_state: &SweeperState, + ) -> Pin> + 'static>> { + let encoded = sweeper_state.encode(); + + Box::pin(self.kv_store.write( + OUTPUT_SWEEPER_PERSISTENCE_PRIMARY_NAMESPACE, + OUTPUT_SWEEPER_PERSISTENCE_SECONDARY_NAMESPACE, + OUTPUT_SWEEPER_PERSISTENCE_KEY, + encoded, + )) } /// Updates the sweeper state by executing the given callback. Persists the state afterwards if it is marked dirty, @@ -692,16 +741,17 @@ where } } -impl Listen - for OutputSweeper +impl< + B: BroadcasterInterface, + D: Deref, + E: FeeEstimator, + F: Filter + Sync + Send, + K: KVStore, + L: Logger, + O: OutputSpender, + > Listen for OutputSweeper where - B::Target: BroadcasterInterface, D::Target: ChangeDestinationSource, - E::Target: FeeEstimator, - F::Target: Filter + Sync + Send, - K::Target: KVStore, - L::Target: Logger, - O::Target: OutputSpender, { fn filtered_block_connected( &self, header: &Header, txdata: &chain::transaction::TransactionData, height: u32, @@ -733,16 +783,17 @@ where } } -impl Confirm - for OutputSweeper +impl< + B: BroadcasterInterface, + D: Deref, + E: FeeEstimator, + F: Filter + Sync + Send, + K: KVStore, + L: Logger, + O: OutputSpender, + > Confirm for OutputSweeper where - B::Target: BroadcasterInterface, D::Target: ChangeDestinationSource, - E::Target: FeeEstimator, - F::Target: Filter + Sync + Send, - K::Target: KVStore, - L::Target: Logger, - O::Target: OutputSpender, { fn transactions_confirmed( &self, header: &Header, txdata: &chain::transaction::TransactionData, height: u32, @@ -830,16 +881,17 @@ pub enum SpendingDelay { }, } -impl - ReadableArgs<(B, E, Option, O, D, K, L)> for (BestBlock, OutputSweeper) +impl< + B: BroadcasterInterface, + D: Deref, + E: FeeEstimator, + F: Filter + Sync + Send, + K: KVStore, + L: Logger, + O: OutputSpender, + > ReadableArgs<(B, E, Option, O, D, K, L)> for (BestBlock, OutputSweeper) where - B::Target: BroadcasterInterface, D::Target: ChangeDestinationSource, - E::Target: FeeEstimator, - F::Target: Filter + Sync + Send, - K::Target: KVStore, - L::Target: Logger, - O::Target: OutputSpender, { #[inline] fn read( @@ -900,30 +952,34 @@ where /// /// [`Event::SpendableOutputs`]: crate::events::Event::SpendableOutputs // Note that updates to documentation on this struct should be copied to the asynchronous version. -pub struct OutputSweeperSync -where - B::Target: BroadcasterInterface, +pub struct OutputSweeperSync< + B: BroadcasterInterface, + D: Deref, + E: FeeEstimator, + F: Filter, + K: Deref, + L: Logger, + O: OutputSpender, +> where D::Target: ChangeDestinationSourceSync, - E::Target: FeeEstimator, - F::Target: Filter, K::Target: KVStoreSync, - L::Target: Logger, - O::Target: OutputSpender, { sweeper: OutputSweeper, E, F, KVStoreSyncWrapper, L, O>, } -impl - OutputSweeperSync +impl< + B: BroadcasterInterface, + D: Deref, + E: FeeEstimator, + F: Filter, + K: Deref, + L: Logger, + O: OutputSpender, + > OutputSweeperSync where - B::Target: BroadcasterInterface, D::Target: ChangeDestinationSourceSync, - E::Target: FeeEstimator, - F::Target: Filter, K::Target: KVStoreSync, - L::Target: Logger, - O::Target: OutputSpender, { /// Constructs a new [`OutputSweeperSync`] instance. /// @@ -968,11 +1024,13 @@ where /// [`Event::SpendableOutputs`]: crate::events::Event::SpendableOutputs pub fn track_spendable_outputs( &self, output_descriptors: Vec, channel_id: Option, - exclude_static_outputs: bool, delay_until_height: Option, + counterparty_node_id: Option, exclude_static_outputs: bool, + delay_until_height: Option, ) -> Result<(), ()> { - let mut fut = Box::pin(self.sweeper.track_spendable_outputs( + let mut fut = pin!(self.sweeper.track_spendable_outputs( output_descriptors, channel_id, + counterparty_node_id, exclude_static_outputs, delay_until_height, )); @@ -1005,7 +1063,7 @@ where /// /// Wraps [`OutputSweeper::regenerate_and_broadcast_spend_if_necessary`]. pub fn regenerate_and_broadcast_spend_if_necessary(&self) -> Result<(), ()> { - let mut fut = Box::pin(self.sweeper.regenerate_and_broadcast_spend_if_necessary()); + let mut fut = pin!(self.sweeper.regenerate_and_broadcast_spend_if_necessary()); let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); match fut.as_mut().poll(&mut ctx) { @@ -1034,16 +1092,18 @@ where } } -impl Listen - for OutputSweeperSync +impl< + B: BroadcasterInterface, + D: Deref, + E: FeeEstimator, + F: Filter + Sync + Send, + K: Deref, + L: Logger, + O: OutputSpender, + > Listen for OutputSweeperSync where - B::Target: BroadcasterInterface, D::Target: ChangeDestinationSourceSync, - E::Target: FeeEstimator, - F::Target: Filter + Sync + Send, K::Target: KVStoreSync, - L::Target: Logger, - O::Target: OutputSpender, { fn filtered_block_connected( &self, header: &Header, txdata: &chain::transaction::TransactionData, height: u32, @@ -1056,16 +1116,18 @@ where } } -impl Confirm - for OutputSweeperSync +impl< + B: BroadcasterInterface, + D: Deref, + E: FeeEstimator, + F: Filter + Sync + Send, + K: Deref, + L: Logger, + O: OutputSpender, + > Confirm for OutputSweeperSync where - B::Target: BroadcasterInterface, D::Target: ChangeDestinationSourceSync, - E::Target: FeeEstimator, - F::Target: Filter + Sync + Send, K::Target: KVStoreSync, - L::Target: Logger, - O::Target: OutputSpender, { fn transactions_confirmed( &self, header: &Header, txdata: &chain::transaction::TransactionData, height: u32, @@ -1086,16 +1148,19 @@ where } } -impl - ReadableArgs<(B, E, Option, O, D, K, L)> for (BestBlock, OutputSweeperSync) +impl< + B: BroadcasterInterface, + D: Deref, + E: FeeEstimator, + F: Filter + Sync + Send, + K: Deref, + L: Logger, + O: OutputSpender, + > ReadableArgs<(B, E, Option, O, D, K, L)> + for (BestBlock, OutputSweeperSync) where - B::Target: BroadcasterInterface, D::Target: ChangeDestinationSourceSync, - E::Target: FeeEstimator, - F::Target: Filter + Sync + Send, K::Target: KVStoreSync, - L::Target: Logger, - O::Target: OutputSpender, { #[inline] fn read( diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index 34c6414b7e0..bcf39fde482 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -12,9 +12,9 @@ use crate::blinded_path::message::{BlindedMessagePath, MessageForwardNode}; use crate::blinded_path::payment::{BlindedPaymentPath, ReceiveTlvs}; use crate::chain; use crate::chain::chaininterface; -use crate::chain::chaininterface::ConfirmationTarget; #[cfg(any(test, feature = "_externalize_tests"))] use crate::chain::chaininterface::FEERATE_FLOOR_SATS_PER_KW; +use crate::chain::chaininterface::{ConfirmationTarget, TransactionType}; use crate::chain::chainmonitor::{ChainMonitor, Persist}; use crate::chain::channelmonitor::{ ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, MonitorEvent, @@ -50,7 +50,7 @@ use crate::sign::{self, ReceiveAuthKey}; use crate::sign::{ChannelSigner, PeerStorageKey}; use crate::sync::RwLock; use crate::types::features::{ChannelFeatures, InitFeatures, NodeFeatures}; -use crate::util::async_poll::AsyncResult; +use crate::util::async_poll::MaybeSend; use crate::util::config::UserConfig; use crate::util::dyn_signer::{ DynKeysInterface, DynKeysInterfaceTrait, DynPhantomKeysInterface, DynSigner, @@ -61,6 +61,7 @@ use crate::util::mut_global::MutGlobal; use crate::util::persist::{KVStore, KVStoreSync, MonitorName}; use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer}; use crate::util::test_channel_signer::{EnforcementState, TestChannelSigner}; +use crate::util::wakers::Notifier; use bitcoin::amount::Amount; use bitcoin::block::Block; @@ -580,6 +581,11 @@ impl<'a> TestChainMonitor<'a> { self.added_monitors.lock().unwrap().push((channel_id, monitor)); self.chain_monitor.load_existing_monitor(channel_id, new_monitor) } + + pub fn get_latest_mon_update_id(&self, channel_id: ChannelId) -> (u64, u64) { + let monitor_id_state = self.latest_monitor_update_id.lock().unwrap(); + monitor_id_state.get(&channel_id).unwrap().clone() + } } impl<'a> chain::Watch for TestChainMonitor<'a> { fn watch_channel( @@ -1042,13 +1048,13 @@ impl TestStore { impl KVStore for TestStore { fn read( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, - ) -> AsyncResult<'static, Vec, io::Error> { + ) -> impl Future, io::Error>> + 'static + MaybeSend { let res = self.read_internal(&primary_namespace, &secondary_namespace, &key); - Box::pin(async move { res }) + async move { res } } fn write( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, - ) -> AsyncResult<'static, (), io::Error> { + ) -> impl Future> + 'static + MaybeSend { let path = format!("{primary_namespace}/{secondary_namespace}/{key}"); let future = Arc::new(Mutex::new((None, None))); @@ -1057,19 +1063,19 @@ impl KVStore for TestStore { let new_id = pending_writes.last().map(|(id, _, _)| id + 1).unwrap_or(0); pending_writes.push((new_id, Arc::clone(&future), buf)); - Box::pin(OneShotChannel(future)) + OneShotChannel(future) } fn remove( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, - ) -> AsyncResult<'static, (), io::Error> { + ) -> impl Future> + 'static + MaybeSend { let res = self.remove_internal(&primary_namespace, &secondary_namespace, &key, lazy); - Box::pin(async move { res }) + async move { res } } fn list( &self, primary_namespace: &str, secondary_namespace: &str, - ) -> AsyncResult<'static, Vec, io::Error> { + ) -> impl Future, io::Error>> + 'static + MaybeSend { let res = self.list_internal(primary_namespace, secondary_namespace); - Box::pin(async move { res }) + async move { res } } } @@ -1125,35 +1131,54 @@ unsafe impl Send for TestStore {} pub struct TestBroadcaster { pub txn_broadcasted: Mutex>, + pub txn_types: Mutex>, pub blocks: Arc>>, } impl TestBroadcaster { pub fn new(network: Network) -> Self { let txn_broadcasted = Mutex::new(Vec::new()); + let txn_types = Mutex::new(Vec::new()); let blocks = Arc::new(Mutex::new(vec![(genesis_block(network), 0)])); - Self { txn_broadcasted, blocks } + Self { txn_broadcasted, txn_types, blocks } } pub fn with_blocks(blocks: Arc>>) -> Self { let txn_broadcasted = Mutex::new(Vec::new()); - Self { txn_broadcasted, blocks } + let txn_types = Mutex::new(Vec::new()); + Self { txn_broadcasted, txn_types, blocks } } pub fn txn_broadcast(&self) -> Vec { + self.txn_types.lock().unwrap().clear(); self.txn_broadcasted.lock().unwrap().split_off(0) } pub fn unique_txn_broadcast(&self) -> Vec { let mut txn = self.txn_broadcasted.lock().unwrap().split_off(0); + self.txn_types.lock().unwrap().clear(); let mut seen = new_hash_set(); txn.retain(|tx| seen.insert(tx.compute_txid())); txn } + + /// Returns all broadcast transactions with their types, clearing both internal lists. + pub fn txn_broadcast_with_types(&self) -> Vec<(Transaction, TransactionType)> { + let txn = self.txn_broadcasted.lock().unwrap().split_off(0); + let types = self.txn_types.lock().unwrap().split_off(0); + assert_eq!(txn.len(), types.len(), "Transaction and type vectors out of sync"); + txn.into_iter().zip(types.into_iter()).collect() + } + + /// Clears both the transaction and type vectors. + pub fn clear(&self) { + self.txn_broadcasted.lock().unwrap().clear(); + self.txn_types.lock().unwrap().clear(); + } } impl chaininterface::BroadcasterInterface for TestBroadcaster { - fn broadcast_transactions(&self, txs: &[&Transaction]) { + fn broadcast_transactions(&self, txs: &[(&Transaction, TransactionType)]) { // Assert that any batch of transactions of length greater than 1 is sorted // topologically, and is a `child-with-parents` package as defined in // . @@ -1164,21 +1189,23 @@ impl chaininterface::BroadcasterInterface for TestBroadcaster { // Right now LDK only ever broadcasts packages of length 2. assert!(txs.len() <= 2); if txs.len() == 2 { - let parent_txid = txs[0].compute_txid(); + let parent_txid = txs[0].0.compute_txid(); assert!(txs[1] + .0 .input .iter() .map(|input| input.previous_output.txid) .any(|txid| txid == parent_txid)); - let child_txid = txs[1].compute_txid(); + let child_txid = txs[1].0.compute_txid(); assert!(txs[0] + .0 .input .iter() .map(|input| input.previous_output.txid) .all(|txid| txid != child_txid)); } - for tx in txs { + for (tx, _broadcast_type) in txs { let lock_time = tx.lock_time.to_consensus_u32(); assert!(lock_time < 1_500_000_000); if tx.lock_time.is_block_height() @@ -1194,8 +1221,11 @@ impl chaininterface::BroadcasterInterface for TestBroadcaster { } } } - let owned_txs: Vec = txs.iter().map(|tx| (*tx).clone()).collect(); + let owned_txs: Vec = txs.iter().map(|(tx, _)| (*tx).clone()).collect(); + let owned_types: Vec = + txs.iter().map(|(_, tx_type)| tx_type.clone()).collect(); self.txn_broadcasted.lock().unwrap().extend(owned_txs); + self.txn_types.lock().unwrap().extend(owned_types); } } @@ -2101,7 +2131,7 @@ impl TestChainSource { } impl UtxoLookup for TestChainSource { - fn get_utxo(&self, chain_hash: &ChainHash, _short_channel_id: u64) -> UtxoResult { + fn get_utxo(&self, chain_hash: &ChainHash, _scid: u64, _notifier: Arc) -> UtxoResult { self.get_utxo_call_count.fetch_add(1, Ordering::Relaxed); if self.chain_hash != *chain_hash { return UtxoResult::Sync(Err(UtxoLookupError::UnknownChain)); diff --git a/lightning/src/util/wakers.rs b/lightning/src/util/wakers.rs index a84d90960d8..17edadfd822 100644 --- a/lightning/src/util/wakers.rs +++ b/lightning/src/util/wakers.rs @@ -253,37 +253,13 @@ impl Sleeper { pub fn from_single_future(future: &Future) -> Self { Self { notifiers: vec![Arc::clone(&future.state)] } } - /// Constructs a new sleeper from two futures, allowing blocking on both at once. - pub fn from_two_futures(fut_a: &Future, fut_b: &Future) -> Self { - Self { notifiers: vec![Arc::clone(&fut_a.state), Arc::clone(&fut_b.state)] } - } - /// Constructs a new sleeper from three futures, allowing blocking on all three at once. - /// - // Note that this is the common case - a ChannelManager, a ChainMonitor, and an - // OnionMessenger. - pub fn from_three_futures(fut_a: &Future, fut_b: &Future, fut_c: &Future) -> Self { - let notifiers = - vec![Arc::clone(&fut_a.state), Arc::clone(&fut_b.state), Arc::clone(&fut_c.state)]; - Self { notifiers } - } - /// Constructs a new sleeper from four futures, allowing blocking on all four at once. - /// - // Note that this is another common case - a ChannelManager, a ChainMonitor, an - // OnionMessenger, and a LiquidityManager. - pub fn from_four_futures( - fut_a: &Future, fut_b: &Future, fut_c: &Future, fut_d: &Future, - ) -> Self { - let notifiers = vec![ - Arc::clone(&fut_a.state), - Arc::clone(&fut_b.state), - Arc::clone(&fut_c.state), - Arc::clone(&fut_d.state), - ]; - Self { notifiers } + /// Constructs an iterator of futures, allowing blocking on all at once. + pub fn from_futures>(futures: I) -> Self { + Self { notifiers: futures.into_iter().map(|f| Arc::clone(&f.state)).collect() } } /// Constructs a new sleeper on many futures, allowing blocking on all at once. pub fn new(futures: Vec) -> Self { - Self { notifiers: futures.into_iter().map(|f| Arc::clone(&f.state)).collect() } + Self::from_futures(futures) } /// Prepares to go into a wait loop body, creating a condition variable which we can block on /// and an `Arc>>` which gets set to the waking `Future`'s state prior to the @@ -506,15 +482,13 @@ mod tests { // Wait on the other thread to finish its sleep, note that the leak only happened if we // actually have to sleep here, not if we immediately return. - Sleeper::from_two_futures(&future_a, &future_b).wait(); + Sleeper::from_futures([future_a, future_b]).wait(); join_handle.join().unwrap(); // then drop the notifiers and make sure the future states are gone. mem::drop(notifier_a); mem::drop(notifier_b); - mem::drop(future_a); - mem::drop(future_b); assert!(future_state_a.upgrade().is_none() && future_state_b.upgrade().is_none()); } @@ -736,18 +710,18 @@ mod tests { // Set both notifiers as woken without sleeping yet. notifier_a.notify(); notifier_b.notify(); - Sleeper::from_two_futures(¬ifier_a.get_future(), ¬ifier_b.get_future()).wait(); + Sleeper::from_futures([notifier_a.get_future(), notifier_b.get_future()]).wait(); // One future has woken us up, but the other should still have a pending notification. - Sleeper::from_two_futures(¬ifier_a.get_future(), ¬ifier_b.get_future()).wait(); + Sleeper::from_futures([notifier_a.get_future(), notifier_b.get_future()]).wait(); // However once we've slept twice, we should no longer have any pending notifications - assert!(!Sleeper::from_two_futures(¬ifier_a.get_future(), ¬ifier_b.get_future()) + assert!(!Sleeper::from_futures([notifier_a.get_future(), notifier_b.get_future()]) .wait_timeout(Duration::from_millis(10))); // Test ordering somewhat more. notifier_a.notify(); - Sleeper::from_two_futures(¬ifier_a.get_future(), ¬ifier_b.get_future()).wait(); + Sleeper::from_futures([notifier_a.get_future(), notifier_b.get_future()]).wait(); } #[test] @@ -765,7 +739,7 @@ mod tests { // After sleeping one future (not guaranteed which one, however) will have its notification // bit cleared. - Sleeper::from_two_futures(¬ifier_a.get_future(), ¬ifier_b.get_future()).wait(); + Sleeper::from_futures([notifier_a.get_future(), notifier_b.get_future()]).wait(); // By registering a callback on the futures for both notifiers, one will complete // immediately, but one will remain tied to the notifier, and will complete once the @@ -788,8 +762,8 @@ mod tests { notifier_b.notify(); assert!(callback_a.load(Ordering::SeqCst) && callback_b.load(Ordering::SeqCst)); - Sleeper::from_two_futures(¬ifier_a.get_future(), ¬ifier_b.get_future()).wait(); - assert!(!Sleeper::from_two_futures(¬ifier_a.get_future(), ¬ifier_b.get_future()) + Sleeper::from_futures([notifier_a.get_future(), notifier_b.get_future()]).wait(); + assert!(!Sleeper::from_futures([notifier_a.get_future(), notifier_b.get_future()]) .wait_timeout(Duration::from_millis(10))); } diff --git a/pending_changelog/3137-accept-dual-funding-without-contributing.txt b/pending_changelog/3137-accept-dual-funding-without-contributing.txt index 9ea8de24e54..5e1d0de2d86 100644 --- a/pending_changelog/3137-accept-dual-funding-without-contributing.txt +++ b/pending_changelog/3137-accept-dual-funding-without-contributing.txt @@ -7,9 +7,8 @@ differentiate between an inbound request for a dual-funded (V2) or non-dual-funded (V1) channel to be opened, with value being either of the enum variants `InboundChannelFunds::DualFunded` and `InboundChannelFunds::PushMsat(u64)` corresponding to V2 and V1 channel open requests respectively. - * If `manually_accept_inbound_channels` is false, then V2 channels will be accepted automatically; the - same behaviour as V1 channels. Otherwise, `ChannelManager::accept_inbound_channel()` can also be used - to manually accept an inbound V2 channel. + * Similar to V1 channels, `ChannelManager::accept_inbound_channel()` can also be used + to accept an inbound V2 channel. * 0conf dual-funded channels are not supported. * RBF of dual-funded channel funding transactions is not supported. diff --git a/pending_changelog/4213.txt b/pending_changelog/4213.txt new file mode 100644 index 00000000000..791edd47804 --- /dev/null +++ b/pending_changelog/4213.txt @@ -0,0 +1,5 @@ +Backwards compat +================ + + * Outbound payments which are awaiting a response to a BOLT 12 invoice request + will not be able to complete after upgrading to 0.3 (#4213). diff --git a/pending_changelog/4336.txt b/pending_changelog/4336.txt new file mode 100644 index 00000000000..a41c71ca2b4 --- /dev/null +++ b/pending_changelog/4336.txt @@ -0,0 +1,5 @@ +Forward Compatibility +===================== + +* Downgrading from 0.3 will not be possible while a splice is pending when using async monitor + updates. diff --git a/pending_changelog/4337-manual-channel-accept-default-anchors.txt b/pending_changelog/4337-manual-channel-accept-default-anchors.txt new file mode 100644 index 00000000000..999b2490939 --- /dev/null +++ b/pending_changelog/4337-manual-channel-accept-default-anchors.txt @@ -0,0 +1,18 @@ +# API Updates + + * `ChannelHandshakeConfig::negotiate_anchors_zero_fee_htlc_tx` + now defaults to `true` (previously `false`). This means anchor output channels + will be negotiated by default for all new channels if the counterparty supports + it, requiring users to maintain an on-chain reserve for fee bumping in the + event of force-closes. + + * All inbound channels now require manual acceptance. + `UserConfig::manually_accept_inbound_channels` has been removed, and + `Event::OpenChannelRequest` will now always be generated for inbound channel + requests. Users must handle this event and call either + `ChannelManager::accept_inbound_channel` (or + `accept_inbound_channel_from_trusted_peer_0conf` for zero-conf channels) to + accept the channel, or `ChannelManager::force_close_broadcasting_latest_txn` + to reject it. This ensures users can verify they have sufficient on-chain + funds before accepting channels with anchor outputs. + diff --git a/pending_changelog/matt-full-interception.txt b/pending_changelog/matt-full-interception.txt new file mode 100644 index 00000000000..2cc51a56305 --- /dev/null +++ b/pending_changelog/matt-full-interception.txt @@ -0,0 +1,4 @@ +# Backwards Compatibility + * HTLCs which were first received on an LDK version prior to LDK 0.2 will no + longer be intercepted. Instead, they will be handled as if they were not + intercepted and be forwarded/failed automatically.