From 546a2776d7c817f780d7d19433bddac033cbc47c Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Wed, 3 Sep 2025 17:43:36 -0700 Subject: [PATCH 01/26] lnwallet: add new helper functions to scale confirmations based on amt --- lnwallet/confscale.go | 59 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) create mode 100644 lnwallet/confscale.go diff --git a/lnwallet/confscale.go b/lnwallet/confscale.go new file mode 100644 index 0000000000..8a8239602f --- /dev/null +++ b/lnwallet/confscale.go @@ -0,0 +1,59 @@ +package lnwallet + +import ( + "github.com/btcsuite/btcd/btcutil" + "github.com/lightningnetwork/lnd/lnwire" +) + +const ( + // minRequiredConfs is the minimum number of confirmations we'll + // require for channel operations. + minRequiredConfs = 1 + + // maxRequiredConfs is the maximum number of confirmations we'll + // require for channel operations. + maxRequiredConfs = 6 + + // maxChannelSize is the maximum expected channel size in satoshis. + // This matches MaxBtcFundingAmount (0.16777215 BTC). + maxChannelSize = 16777215 +) + +// ScaleNumConfs returns a linearly scaled number of confirmations based on the +// provided channel amount and push amount (for funding transactions). The push +// amount represents additional risk when receiving funds. +func ScaleNumConfs(chanAmt btcutil.Amount, pushAmt lnwire.MilliSatoshi) uint16 { + // For wumbo channels, always require maximum confirmations. + if chanAmt > maxChannelSize { + return maxRequiredConfs + } + + // Calculate total stake: channel amount + push amount. The push amount + // represents value at risk for the receiver. + maxChannelSizeMsat := lnwire.NewMSatFromSatoshis(maxChannelSize) + stake := lnwire.NewMSatFromSatoshis(chanAmt) + pushAmt + + // Scale confirmations linearly based on stake. + conf := uint64(maxRequiredConfs) * uint64(stake) / + uint64(maxChannelSizeMsat) + + // Bound the result between minRequiredConfs and maxRequiredConfs. + if conf < minRequiredConfs { + conf = minRequiredConfs + } + if conf > maxRequiredConfs { + conf = maxRequiredConfs + } + + return uint16(conf) +} + +// FundingConfsForAmounts returns the number of confirmations to wait for a +// funding transaction, taking into account both the channel amount and any +// pushed amount (which represents additional risk). +func FundingConfsForAmounts(chanAmt btcutil.Amount, + pushAmt lnwire.MilliSatoshi) uint16 { + + return ScaleNumConfs(chanAmt, pushAmt) +} + From e33f933687e552ab8f9e21d8581b8045dd6e5de2 Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Wed, 3 Sep 2025 17:44:03 -0700 Subject: [PATCH 02/26] server: use new FundingConfsForAmounts helper func --- server.go | 44 +++++++++++--------------------------------- 1 file changed, 11 insertions(+), 33 deletions(-) diff --git a/server.go b/server.go index a2d36eb865..f8391a3219 100644 --- a/server.go +++ b/server.go @@ -1479,16 +1479,6 @@ func newServer(ctx context.Context, cfg *Config, listenAddrs []net.Addr, DefaultMinHtlcIn: cc.MinHtlcIn, NumRequiredConfs: func(chanAmt btcutil.Amount, pushAmt lnwire.MilliSatoshi) uint16 { - // For large channels we increase the number - // of confirmations we require for the - // channel to be considered open. As it is - // always the responder that gets to choose - // value, the pushAmt is value being pushed - // to us. This means we have more to lose - // in the case this gets re-orged out, and - // we will require more confirmations before - // we consider it open. - // In case the user has explicitly specified // a default value for the number of // confirmations, we use it. @@ -1497,29 +1487,17 @@ func newServer(ctx context.Context, cfg *Config, listenAddrs []net.Addr, return defaultConf } - minConf := uint64(3) - maxConf := uint64(6) - - // If this is a wumbo channel, then we'll require the - // max amount of confirmations. - if chanAmt > MaxFundingAmount { - return uint16(maxConf) - } - - // If not we return a value scaled linearly - // between 3 and 6, depending on channel size. - // TODO(halseth): Use 1 as minimum? - maxChannelSize := uint64( - lnwire.NewMSatFromSatoshis(MaxFundingAmount)) - stake := lnwire.NewMSatFromSatoshis(chanAmt) + pushAmt - conf := maxConf * uint64(stake) / maxChannelSize - if conf < minConf { - conf = minConf - } - if conf > maxConf { - conf = maxConf - } - return uint16(conf) + // Otherwise, scale the number of confirmations based on + // the channel amount and push amount. For large + // channels we increase the number of + // confirmations we require for the channel to be + // considered open. As it is always the + // responder that gets to choose value, the + // pushAmt is value being pushed to us. This + // means we have more to lose in the case this + // gets re-orged out, and we will require more + // confirmations before we consider it open. + return lnwallet.FundingConfsForAmounts(chanAmt, pushAmt) }, RequiredRemoteDelay: func(chanAmt btcutil.Amount) uint16 { // We scale the remote CSV delay (the time the From 9c7b7cf9e6d4de829632a8f6b3c59fa658023041 Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Wed, 3 Sep 2025 17:44:32 -0700 Subject: [PATCH 03/26] lnwallet: define helper func to coop close conf scaling We have two versions: for itests, we just use one conf, but in prod, we'll scale the number of confirmations. --- lnwallet/confscale_integration.go | 13 +++++++++++++ lnwallet/confscale_prod.go | 25 +++++++++++++++++++++++++ 2 files changed, 38 insertions(+) create mode 100644 lnwallet/confscale_integration.go create mode 100644 lnwallet/confscale_prod.go diff --git a/lnwallet/confscale_integration.go b/lnwallet/confscale_integration.go new file mode 100644 index 0000000000..72692a7360 --- /dev/null +++ b/lnwallet/confscale_integration.go @@ -0,0 +1,13 @@ +//go:build integration +// +build integration + +package lnwallet + +import "github.com/btcsuite/btcd/btcutil" + +// CloseConfsForCapacity returns the number of confirmations to wait +// before signaling a cooperative close. Under integration tests, we +// always return 1 to keep tests fast and deterministic. +func CloseConfsForCapacity(capacity btcutil.Amount) uint32 { //nolint:revive + return 1 +} \ No newline at end of file diff --git a/lnwallet/confscale_prod.go b/lnwallet/confscale_prod.go new file mode 100644 index 0000000000..f0a2cd586e --- /dev/null +++ b/lnwallet/confscale_prod.go @@ -0,0 +1,25 @@ +//go:build !integration +// +build !integration + +package lnwallet + +import "github.com/btcsuite/btcd/btcutil" + +// CloseConfsForCapacity returns the number of confirmations to wait +// before signaling a cooperative close, scaled by channel capacity. +// We enforce a minimum of 3 confirmations to provide better reorg protection, +// even for small channels. +func CloseConfsForCapacity(capacity btcutil.Amount) uint32 { //nolint:revive + // For cooperative closes, we don't have a push amount to consider, + // so we pass 0 for the pushAmt parameter. + scaledConfs := uint32(ScaleNumConfs(capacity, 0)) + + // Enforce a minimum of 3 confirmations for reorg safety. + // This protects against shallow reorgs which are more common. + const minCoopCloseConfs = 3 + if scaledConfs < minCoopCloseConfs { + return minCoopCloseConfs + } + + return scaledConfs +} \ No newline at end of file From 4be966a92ad76c338eac5eb23e6f20f238c5256e Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Wed, 3 Sep 2025 17:44:40 -0700 Subject: [PATCH 04/26] lnwallet: add tests for new conf scaling helper funcs --- lnwallet/confscale_test.go | 338 +++++++++++++++++++++++++++++++++++++ 1 file changed, 338 insertions(+) create mode 100644 lnwallet/confscale_test.go diff --git a/lnwallet/confscale_test.go b/lnwallet/confscale_test.go new file mode 100644 index 0000000000..786769e451 --- /dev/null +++ b/lnwallet/confscale_test.go @@ -0,0 +1,338 @@ +package lnwallet + +import ( + "testing" + + "github.com/btcsuite/btcd/btcutil" + "github.com/lightningnetwork/lnd/lnwire" + "github.com/stretchr/testify/require" + "pgregory.net/rapid" +) + +// TestScaleNumConfsProperties tests various properties that ScaleNumConfs +// should satisfy using property-based testing. +func TestScaleNumConfsProperties(t *testing.T) { + t.Parallel() + + // The result should always be bounded between the minimum and maximum + // number of confirmations regardless of input values. + t.Run("bounded_result", func(t *testing.T) { + rapid.Check(t, func(t *rapid.T) { + // Generate random channel amount and push amount. + chanAmt := rapid.Uint64Range( + 0, maxChannelSize*10, + ).Draw(t, "chanAmt") + pushAmtSats := rapid.Uint64Range( + 0, chanAmt, + ).Draw(t, "pushAmtSats") + pushAmt := lnwire.NewMSatFromSatoshis( + btcutil.Amount(pushAmtSats), + ) + + result := ScaleNumConfs( + btcutil.Amount(chanAmt), pushAmt, + ) + + // Check bounds + require.GreaterOrEqual( + t, result, uint16(minRequiredConfs), + "result should be >= minRequiredConfs", + ) + require.LessOrEqual( + t, result, uint16(maxRequiredConfs), + "result should be <= maxRequiredConfs", + ) + }) + }) + + // Larger channel amounts and push amounts should require equal or more + // confirmations, ensuring the function is monotonically increasing. + t.Run("monotonicity", func(t *testing.T) { + rapid.Check(t, func(t *rapid.T) { + // Generate two channel amounts where amt1 <= amt2. + amt1 := rapid.Uint64Range( + 0, maxChannelSize, + ).Draw(t, "amt1") + amt2 := rapid.Uint64Range( + amt1, maxChannelSize, + ).Draw(t, "amt2") + + // Generate push amounts proportional to channel size. + pushAmt1Sats := rapid.Uint64Range( + 0, amt1, + ).Draw(t, "pushAmt1") + pushAmt2Sats := rapid.Uint64Range( + pushAmt1Sats, amt2, + ).Draw(t, "pushAmt2") + + pushAmt1 := lnwire.NewMSatFromSatoshis( + btcutil.Amount(pushAmt1Sats), + ) + pushAmt2 := lnwire.NewMSatFromSatoshis( + btcutil.Amount(pushAmt2Sats), + ) + + confs1 := ScaleNumConfs(btcutil.Amount(amt1), pushAmt1) + confs2 := ScaleNumConfs(btcutil.Amount(amt2), pushAmt2) + + // Larger or equal stake should require equal or more + // confirmations. + require.GreaterOrEqual( + t, confs2, confs1, + "larger amount should require equal or "+ + "more confirmations", + ) + }) + }) + + // Wumbo channels (those exceeding the max standard channel size) should + // always require the maximum number of confirmations for safety. + t.Run("wumbo_max_confs", func(t *testing.T) { + rapid.Check(t, func(t *rapid.T) { + // Generate wumbo channel amount (above maxChannelSize). + wumboAmt := rapid.Uint64Range( + maxChannelSize+1, maxChannelSize*100, + ).Draw(t, "wumboAmt") + pushAmtSats := rapid.Uint64Range( + 0, wumboAmt, + ).Draw(t, "pushAmtSats") + pushAmt := lnwire.NewMSatFromSatoshis( + btcutil.Amount(pushAmtSats), + ) + + result := ScaleNumConfs( + btcutil.Amount(wumboAmt), pushAmt, + ) + + require.Equal( + t, uint16(maxRequiredConfs), result, + "wumbo channels should always get "+ + "max confirmations", + ) + }) + }) + + // Zero channel amounts should always result in the minimum number of + // confirmations since there's no value at risk. + t.Run("zero_gets_min", func(t *testing.T) { + result := ScaleNumConfs(0, 0) + require.Equal( + t, uint16(minRequiredConfs), result, + "zero amount should get minimum confirmations", + ) + }) + + // The function should be deterministic, always returning the same + // output for the same input values. + t.Run("determinism", func(t *testing.T) { + rapid.Check(t, func(t *rapid.T) { + chanAmt := rapid.Uint64Range( + 0, maxChannelSize*2, + ).Draw(t, "chanAmt") + pushAmtSats := rapid.Uint64Range( + 0, chanAmt, + ).Draw(t, "pushAmtSats") + pushAmt := lnwire.NewMSatFromSatoshis( + btcutil.Amount(pushAmtSats), + ) + + // Call multiple times with same inputs. + result1 := ScaleNumConfs( + btcutil.Amount(chanAmt), pushAmt, + ) + result2 := ScaleNumConfs( + btcutil.Amount(chanAmt), pushAmt, + ) + result3 := ScaleNumConfs( + btcutil.Amount(chanAmt), pushAmt, + ) + + require.Equal( + t, result1, result2, + "function should be deterministic", + ) + require.Equal( + t, result2, result3, + "function should be deterministic", + ) + }) + }) + + // Adding a push amount to a channel should require equal or more + // confirmations compared to the same channel without a push amount. + t.Run("push_amount_effect", func(t *testing.T) { + rapid.Check(t, func(t *rapid.T) { + // Fix channel amount, vary push amount + chanAmt := rapid.Uint64Range( + 1, maxChannelSize, + ).Draw(t, "chanAmt") + pushAmt1Sats := rapid.Uint64Range( + 0, chanAmt/2, + ).Draw(t, "pushAmt1") + pushAmt2Sats := rapid.Uint64Range( + pushAmt1Sats, chanAmt, + ).Draw(t, "pushAmt2") + + pushAmt1 := lnwire.NewMSatFromSatoshis( + btcutil.Amount(pushAmt1Sats), + ) + pushAmt2 := lnwire.NewMSatFromSatoshis( + btcutil.Amount(pushAmt2Sats), + ) + + confs1 := ScaleNumConfs( + btcutil.Amount(chanAmt), pushAmt1, + ) + confs2 := ScaleNumConfs( + btcutil.Amount(chanAmt), pushAmt2, + ) + + // More push amount should require equal or more + // confirmations. + require.GreaterOrEqual( + t, confs2, confs1, + "larger push amount should "+ + "require equal or more confirmations", + ) + }) + }) +} + +// TestScaleNumConfsKnownValues tests ScaleNumConfs with specific known values +// to ensure the scaling formula works as expected. +func TestScaleNumConfsKnownValues(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + chanAmt btcutil.Amount + pushAmt lnwire.MilliSatoshi + expected uint16 + }{ + { + name: "zero amounts", + chanAmt: 0, + pushAmt: 0, + expected: minRequiredConfs, + }, + { + name: "tiny channel", + chanAmt: 1000, + pushAmt: 0, + expected: minRequiredConfs, + }, + { + name: "small channel no push", + chanAmt: 100_000, + pushAmt: 0, + expected: minRequiredConfs, + }, + { + name: "half max channel no push", + chanAmt: maxChannelSize / 2, + pushAmt: 0, + expected: 2, + }, + { + name: "max channel no push", + chanAmt: maxChannelSize, + pushAmt: 0, + expected: maxRequiredConfs, + }, + { + name: "wumbo channel", + chanAmt: maxChannelSize * 2, + pushAmt: 0, + expected: maxRequiredConfs, + }, + { + name: "small channel with push", + chanAmt: 100_000, + pushAmt: lnwire.NewMSatFromSatoshis(50_000), + expected: minRequiredConfs, + }, + { + name: "medium channel with significant push", + chanAmt: maxChannelSize / 4, + pushAmt: lnwire.NewMSatFromSatoshis(maxChannelSize / 4), + expected: 2, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + result := ScaleNumConfs(tc.chanAmt, tc.pushAmt) + + require.Equal( + t, tc.expected, result, + "chanAmt=%d, pushAmt=%d", tc.chanAmt, + tc.pushAmt, + ) + }) + } +} + +// TestFundingConfsForAmounts verifies that FundingConfsForAmounts is a simple +// wrapper around ScaleNumConfs. +func TestFundingConfsForAmounts(t *testing.T) { + t.Parallel() + + rapid.Check(t, func(t *rapid.T) { + chanAmt := rapid.Uint64Range( + 0, maxChannelSize*2, + ).Draw(t, "chanAmt") + pushAmtSats := rapid.Uint64Range( + 0, chanAmt, + ).Draw(t, "pushAmtSats") + pushAmt := lnwire.NewMSatFromSatoshis( + btcutil.Amount(pushAmtSats), + ) + + // Both functions should return the same result. + scaleResult := ScaleNumConfs(btcutil.Amount(chanAmt), pushAmt) + fundingResult := FundingConfsForAmounts( + btcutil.Amount(chanAmt), pushAmt, + ) + + require.Equal( + t, scaleResult, fundingResult, + "FundingConfsForAmounts should return "+ + "same result as ScaleNumConfs", + ) + }) +} + +// TestCloseConfsForCapacity verifies that CloseConfsForCapacity correctly +// wraps ScaleNumConfs with zero push amount and enforces a minimum of 3 +// confirmations for reorg safety. +func TestCloseConfsForCapacity(t *testing.T) { + t.Parallel() + + rapid.Check(t, func(t *rapid.T) { + capacity := rapid.Uint64Range( + 0, maxChannelSize*2, + ).Draw(t, "capacity") + + // CloseConfsForCapacity should be equivalent to ScaleNumConfs + // with 0 push, but with a minimum of 3 confirmations enforced + // for reorg safety. + closeConfs := CloseConfsForCapacity(btcutil.Amount(capacity)) + scaleConfs := ScaleNumConfs(btcutil.Amount(capacity), 0) + + // The result should be at least the scaled value, but with a + // minimum of 3 confirmations. + const minCoopCloseConfs = 3 + expectedConfs := uint32(scaleConfs) + if expectedConfs < minCoopCloseConfs { + expectedConfs = minCoopCloseConfs + } + + require.Equal( + t, expectedConfs, closeConfs, + "CloseConfsForCapacity should match "+ + "ScaleNumConfs with 0 push amount, "+ + "but with minimum of 3 confs", + ) + }) +} From a0c9141a4d3090596298620407e7c4e8422b06e7 Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Wed, 3 Sep 2025 17:45:31 -0700 Subject: [PATCH 05/26] peer+rpcserver: use new conf scaling for notifications --- peer/brontide.go | 32 ++++++++++++++++++-------------- rpcserver.go | 16 ++++++++-------- 2 files changed, 26 insertions(+), 22 deletions(-) diff --git a/peer/brontide.go b/peer/brontide.go index 9191cbb2ee..6fbba297d4 100644 --- a/peer/brontide.go +++ b/peer/brontide.go @@ -4444,14 +4444,19 @@ func (p *Brontide) finalizeChanClosure(chanCloser *chancloser.ChanCloser) { localOut := chanCloser.LocalCloseOutput() remoteOut := chanCloser.RemoteCloseOutput() auxOut := chanCloser.AuxOutputs() - go WaitForChanToClose( - chanCloser.NegotiationHeight(), notifier, errChan, - &chanPoint, &closingTxid, closingTx.TxOut[0].PkScript, func() { - // Respond to the local subsystem which requested the - // channel closure. - if closeReq != nil { - closeReq.Updates <- &ChannelCloseUpdate{ - ClosingTxid: closingTxid[:], + // Determine the number of confirmations to wait before + // signaling a successful cooperative close, scaled by + // channel capacity (see CloseConfsForCapacity). + numConfs := lnwallet.CloseConfsForCapacity(chanCloser.Channel().Capacity) + + go WaitForChanToClose( + chanCloser.NegotiationHeight(), notifier, errChan, + &chanPoint, &closingTxid, closingTx.TxOut[0].PkScript, numConfs, func() { + // Respond to the local subsystem which requested the + // channel closure. + if closeReq != nil { + closeReq.Updates <- &ChannelCloseUpdate{ + ClosingTxid: closingTxid[:], Success: true, LocalCloseOutput: localOut, RemoteCloseOutput: remoteOut, @@ -4468,16 +4473,15 @@ func (p *Brontide) finalizeChanClosure(chanCloser *chancloser.ChanCloser) { // finally the callback will be executed. If any error is encountered within // the function, then it will be sent over the errChan. func WaitForChanToClose(bestHeight uint32, notifier chainntnfs.ChainNotifier, - errChan chan error, chanPoint *wire.OutPoint, - closingTxID *chainhash.Hash, closeScript []byte, cb func()) { + errChan chan error, chanPoint *wire.OutPoint, + closingTxID *chainhash.Hash, closeScript []byte, numConfs uint32, cb func()) { peerLog.Infof("Waiting for confirmation of close of ChannelPoint(%v) "+ "with txid: %v", chanPoint, closingTxID) - // TODO(roasbeef): add param for num needed confs - confNtfn, err := notifier.RegisterConfirmationsNtfn( - closingTxID, closeScript, 1, bestHeight, - ) + confNtfn, err := notifier.RegisterConfirmationsNtfn( + closingTxID, closeScript, numConfs, bestHeight, + ) if err != nil { if errChan != nil { errChan <- err diff --git a/rpcserver.go b/rpcserver.go index d3d3c51801..83330e3081 100644 --- a/rpcserver.go +++ b/rpcserver.go @@ -2800,14 +2800,14 @@ func (r *rpcServer) CloseChannel(in *lnrpc.CloseChannelRequest, errChan = make(chan error, 1) notifier := r.server.cc.ChainNotifier - go peer.WaitForChanToClose( - uint32(bestHeight), notifier, errChan, chanPoint, - &closingTxid, closingTx.TxOut[0].PkScript, func() { - // Respond to the local subsystem which - // requested the channel closure. - updateChan <- &peer.ChannelCloseUpdate{ - ClosingTxid: closingTxid[:], - Success: true, + go peer.WaitForChanToClose( + uint32(bestHeight), notifier, errChan, chanPoint, + &closingTxid, closingTx.TxOut[0].PkScript, 1, func() { + // Respond to the local subsystem which + // requested the channel closure. + updateChan <- &peer.ChannelCloseUpdate{ + ClosingTxid: closingTxid[:], + Success: true, // Force closure transactions don't have // additional local/remote outputs. } From 1622ccb722b480b8de5babaa8f306436cd0ba501 Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Wed, 1 Oct 2025 16:40:13 -0700 Subject: [PATCH 06/26] lncfg: add new dev config option for scaling channel close confs This'll be useful for the set up upcoming itests. --- lncfg/dev.go | 7 +++++++ lncfg/dev_integration.go | 11 +++++++++++ 2 files changed, 18 insertions(+) diff --git a/lncfg/dev.go b/lncfg/dev.go index f048d69b7a..8e0c9dda45 100644 --- a/lncfg/dev.go +++ b/lncfg/dev.go @@ -5,6 +5,7 @@ package lncfg import ( "time" + "github.com/lightningnetwork/lnd/fn/v2" "github.com/lightningnetwork/lnd/lnwallet/chanfunding" ) @@ -58,3 +59,9 @@ func (d *DevConfig) GetMaxWaitNumBlocksFundingConf() uint32 { func (d *DevConfig) GetUnsafeConnect() bool { return false } + +// ChannelCloseConfs returns the config value for channel close confirmations +// override, which is always None for production build. +func (d *DevConfig) ChannelCloseConfs() fn.Option[uint32] { + return fn.None[uint32]() +} diff --git a/lncfg/dev_integration.go b/lncfg/dev_integration.go index 8ac85f5d9e..05ecdb27be 100644 --- a/lncfg/dev_integration.go +++ b/lncfg/dev_integration.go @@ -5,6 +5,7 @@ package lncfg import ( "time" + "github.com/lightningnetwork/lnd/fn/v2" "github.com/lightningnetwork/lnd/lnwallet/chanfunding" ) @@ -27,6 +28,7 @@ type DevConfig struct { UnsafeDisconnect bool `long:"unsafedisconnect" description:"Allows the rpcserver to intentionally disconnect from peers with open channels."` MaxWaitNumBlocksFundingConf uint32 `long:"maxwaitnumblocksfundingconf" description:"Maximum blocks to wait for funding confirmation before discarding non-initiated channels."` UnsafeConnect bool `long:"unsafeconnect" description:"Allow the rpcserver to connect to a peer even if there's already a connection."` + ForceChannelCloseConfs uint32 `long:"force-channel-close-confs" description:"Force a specific number of confirmations for channel closes (dev/test only)"` } // ChannelReadyWait returns the config value `ProcessChannelReadyWait`. @@ -71,3 +73,12 @@ func (d *DevConfig) GetMaxWaitNumBlocksFundingConf() uint32 { func (d *DevConfig) GetUnsafeConnect() bool { return d.UnsafeConnect } + +// ChannelCloseConfs returns the forced confirmation count if set, or None if +// the default behavior should be used. +func (d *DevConfig) ChannelCloseConfs() fn.Option[uint32] { + if d.ForceChannelCloseConfs == 0 { + return fn.None[uint32]() + } + return fn.Some(d.ForceChannelCloseConfs) +} From 52eac64bf730014f30d94182feaef5dac84c1664 Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Wed, 1 Oct 2025 16:43:53 -0700 Subject: [PATCH 07/26] multi: add new ChannelCloseConfs param, thread thru as needed In this commit, we add a new param that'll allow us to scale up the number of confirmations before we act on a new close. We'll use this later to improve the current on chain handling logic. --- contractcourt/chain_arbitrator.go | 8 ++++++++ contractcourt/chain_watcher.go | 6 ++++++ peer/brontide.go | 6 ++++++ server.go | 8 +++++--- 4 files changed, 25 insertions(+), 3 deletions(-) diff --git a/contractcourt/chain_arbitrator.go b/contractcourt/chain_arbitrator.go index 05eb46a68b..b4b2fa26fd 100644 --- a/contractcourt/chain_arbitrator.go +++ b/contractcourt/chain_arbitrator.go @@ -230,6 +230,12 @@ type ChainArbitratorConfig struct { // AuxResolver is an optional interface that can be used to modify the // way contracts are resolved. AuxResolver fn.Option[lnwallet.AuxContractResolver] + + // ChannelCloseConfs is an optional override for the number of + // confirmations required for channel closes. When set, this overrides + // the normal capacity-based scaling. This is only available in + // dev/integration builds for testing purposes. + ChannelCloseConfs fn.Option[uint32] } // ChainArbitrator is a sub-system that oversees the on-chain resolution of all @@ -1138,6 +1144,7 @@ func (c *ChainArbitrator) WatchNewChannel(newChan *channeldb.OpenChannel) error extractStateNumHint: lnwallet.GetStateNumHint, auxLeafStore: c.cfg.AuxLeafStore, auxResolver: c.cfg.AuxResolver, + chanCloseConfs: c.cfg.ChannelCloseConfs, }, ) if err != nil { @@ -1315,6 +1322,7 @@ func (c *ChainArbitrator) loadOpenChannels() error { extractStateNumHint: lnwallet.GetStateNumHint, auxLeafStore: c.cfg.AuxLeafStore, auxResolver: c.cfg.AuxResolver, + chanCloseConfs: c.cfg.ChannelCloseConfs, }, ) if err != nil { diff --git a/contractcourt/chain_watcher.go b/contractcourt/chain_watcher.go index 9c566fd6b5..1600aa2d74 100644 --- a/contractcourt/chain_watcher.go +++ b/contractcourt/chain_watcher.go @@ -229,6 +229,12 @@ type chainWatcherConfig struct { // auxResolver is used to supplement contract resolution. auxResolver fn.Option[lnwallet.AuxContractResolver] + + // chanCloseConfs is an optional override for the number of + // confirmations required for channel closes. When set, this overrides + // the normal capacity-based scaling. This is only available in + // dev/integration builds for testing purposes. + chanCloseConfs fn.Option[uint32] } // chainWatcher is a system that's assigned to every active channel. The duty diff --git a/peer/brontide.go b/peer/brontide.go index 6fbba297d4..7d69aeefb9 100644 --- a/peer/brontide.go +++ b/peer/brontide.go @@ -370,6 +370,12 @@ type Config struct { // closure initiated by the remote peer. CoopCloseTargetConfs uint32 + // ChannelCloseConfs is an optional override for the number of + // confirmations required for channel closes. When set, this overrides + // the normal capacity-based scaling. This is only available in + // dev/integration builds for testing purposes. + ChannelCloseConfs fn.Option[uint32] + // ServerPubKey is the serialized, compressed public key of our lnd node. // It is used to determine which policy (channel edge) to pass to the // ChannelLink. diff --git a/server.go b/server.go index f8391a3219..188f5bc560 100644 --- a/server.go +++ b/server.go @@ -1372,9 +1372,10 @@ func newServer(ctx context.Context, cfg *Config, listenAddrs []net.Addr, return &pc.Incoming }, - AuxLeafStore: implCfg.AuxLeafStore, - AuxSigner: implCfg.AuxSigner, - AuxResolver: implCfg.AuxContractResolver, + AuxLeafStore: implCfg.AuxLeafStore, + AuxSigner: implCfg.AuxSigner, + AuxResolver: implCfg.AuxContractResolver, + ChannelCloseConfs: s.cfg.Dev.ChannelCloseConfs(), }, dbs.ChanStateDB) // Select the configuration and funding parameters for Bitcoin. @@ -4390,6 +4391,7 @@ func (s *server) peerConnected(conn net.Conn, connReq *connmgr.ConnReq, MaxOutgoingCltvExpiry: s.cfg.MaxOutgoingCltvExpiry, MaxChannelFeeAllocation: s.cfg.MaxChannelFeeAllocation, CoopCloseTargetConfs: s.cfg.CoopCloseTargetConfs, + ChannelCloseConfs: s.cfg.Dev.ChannelCloseConfs(), MaxAnchorsCommitFeeRate: chainfee.SatPerKVByte( s.cfg.MaxCommitFeeRateAnchors * 1000).FeePerKWeight(), ChannelCommitInterval: s.cfg.ChannelCommitInterval, From 25db07cebcba2a8752bcec98895f014d1b0cf6c5 Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Wed, 1 Oct 2025 16:44:44 -0700 Subject: [PATCH 08/26] peer: send out a notification after the 1st conf, then wait for the rest We wnt to add better handling, but not break any UIs or wallets. So we'll continue to send out a notification after a single confirmation, then send another after things are fully confirmed. --- peer/brontide.go | 46 +++++++++++++++++++++++++++------------------- 1 file changed, 27 insertions(+), 19 deletions(-) diff --git a/peer/brontide.go b/peer/brontide.go index 7d69aeefb9..a1aa1a2c20 100644 --- a/peer/brontide.go +++ b/peer/brontide.go @@ -4442,27 +4442,35 @@ func (p *Brontide) finalizeChanClosure(chanCloser *chancloser.ChanCloser) { // If this is a locally requested shutdown, update the caller with a // new event detailing the current pending state of this request. if closeReq != nil { + // TODO(roasbeef): don't actually need this? closeReq.Updates <- &PendingUpdate{ - Txid: closingTxid[:], + Txid: closingTxid[:], + IsLocalCloseTx: fn.Some(true), } } localOut := chanCloser.LocalCloseOutput() remoteOut := chanCloser.RemoteCloseOutput() auxOut := chanCloser.AuxOutputs() - // Determine the number of confirmations to wait before - // signaling a successful cooperative close, scaled by - // channel capacity (see CloseConfsForCapacity). - numConfs := lnwallet.CloseConfsForCapacity(chanCloser.Channel().Capacity) - - go WaitForChanToClose( - chanCloser.NegotiationHeight(), notifier, errChan, - &chanPoint, &closingTxid, closingTx.TxOut[0].PkScript, numConfs, func() { - // Respond to the local subsystem which requested the - // channel closure. - if closeReq != nil { - closeReq.Updates <- &ChannelCloseUpdate{ - ClosingTxid: closingTxid[:], + + // Determine the number of confirmations to wait before signaling a + // successful cooperative close, scaled by channel capacity (see + // CloseConfsForCapacity). Check if we have a config override for + // testing purposes. + numConfs := p.cfg.ChannelCloseConfs.UnwrapOrFunc(func() uint32 { + // No override, use normal capacity-based scaling. + return lnwallet.CloseConfsForCapacity(chanCloser.Channel().Capacity) + }) + + // Register for full confirmation to send the final update. + go WaitForChanToClose( + chanCloser.NegotiationHeight(), notifier, errChan, + &chanPoint, &closingTxid, closingTx.TxOut[0].PkScript, numConfs, func() { + // Respond to the local subsystem which requested the + // channel closure. + if closeReq != nil { + closeReq.Updates <- &ChannelCloseUpdate{ + ClosingTxid: closingTxid[:], Success: true, LocalCloseOutput: localOut, RemoteCloseOutput: remoteOut, @@ -4479,15 +4487,15 @@ func (p *Brontide) finalizeChanClosure(chanCloser *chancloser.ChanCloser) { // finally the callback will be executed. If any error is encountered within // the function, then it will be sent over the errChan. func WaitForChanToClose(bestHeight uint32, notifier chainntnfs.ChainNotifier, - errChan chan error, chanPoint *wire.OutPoint, - closingTxID *chainhash.Hash, closeScript []byte, numConfs uint32, cb func()) { + errChan chan error, chanPoint *wire.OutPoint, + closingTxID *chainhash.Hash, closeScript []byte, numConfs uint32, cb func()) { peerLog.Infof("Waiting for confirmation of close of ChannelPoint(%v) "+ "with txid: %v", chanPoint, closingTxID) - confNtfn, err := notifier.RegisterConfirmationsNtfn( - closingTxID, closeScript, numConfs, bestHeight, - ) + confNtfn, err := notifier.RegisterConfirmationsNtfn( + closingTxID, closeScript, numConfs, bestHeight, + ) if err != nil { if errChan != nil { errChan <- err From a61c93527d72c543c01d37c1f4993fc8cb54776f Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Wed, 1 Oct 2025 16:50:19 -0700 Subject: [PATCH 09/26] contractcourt: update close logic to handle re-orgs of depth n-1, where n is num confs In this commit, we update the close logic to handle re-ogs up to the final amount of confirmations. This is done generically, so we're able to handle events such as: coop close confirm, re-org, breach confirm, re-org, force close confirm, re-org, etc. The upcoming set of new tests will exercise all of these cases. We modify the block beat handling to unify the control flow. As it's possible we get the beat, then see the spend, or the oher way around. --- contractcourt/chain_watcher.go | 298 ++++++++++++++++++++++++++++----- 1 file changed, 258 insertions(+), 40 deletions(-) diff --git a/contractcourt/chain_watcher.go b/contractcourt/chain_watcher.go index 1600aa2d74..234fc7c38b 100644 --- a/contractcourt/chain_watcher.go +++ b/contractcourt/chain_watcher.go @@ -88,6 +88,38 @@ type BreachCloseInfo struct { CloseSummary channeldb.ChannelCloseSummary } +// spendConfirmationState represents the state of spend confirmation tracking +// in the closeObserver state machine. We wait for N confirmations before +// processing any spend to protect against shallow reorgs. +type spendConfirmationState uint8 + +const ( + // spendStateNone indicates no spend has been detected yet. + spendStateNone spendConfirmationState = iota + + // spendStatePending indicates a spend has been detected and we're + // waiting for the required number of confirmations. + spendStatePending + + // spendStateConfirmed indicates the spend has reached the required + // confirmations and has been processed. + spendStateConfirmed +) + +// String returns a human-readable representation of the state. +func (s spendConfirmationState) String() string { + switch s { + case spendStateNone: + return "None" + case spendStatePending: + return "Pending" + case spendStateConfirmed: + return "Confirmed" + default: + return "Unknown" + } +} + // CommitSet is a collection of the set of known valid commitments at a given // instant. If ConfCommitKey is set, then the commitment identified by the // HtlcSetKey has hit the chain. This struct will be used to examine all live @@ -652,51 +684,225 @@ func newChainSet(chanState *channeldb.OpenChannel) (*chainSet, error) { } // closeObserver is a dedicated goroutine that will watch for any closes of the -// channel that it's watching on chain. In the event of an on-chain event, the -// close observer will assembled the proper materials required to claim the -// funds of the channel on-chain (if required), then dispatch these as -// notifications to all subscribers. +// channel that it's watching on chain. It implements a state machine to handle +// spend detection and confirmation with reorg protection. The states are: +// +// - None (confNtfn == nil): No spend detected yet, waiting for spend +// notification +// +// - Pending (confNtfn != nil): Spend detected, waiting for N confirmations +// +// - Confirmed: Spend confirmed with N blocks, close has been processed func (c *chainWatcher) closeObserver() { defer c.wg.Done() - defer c.fundingSpendNtfn.Cancel() + + registerForSpend := func() (*chainntnfs.SpendEvent, error) { + fundingPkScript, err := deriveFundingPkScript(c.cfg.chanState) + if err != nil { + return nil, err + } + + heightHint := c.cfg.chanState.DeriveHeightHint() + + return c.cfg.notifier.RegisterSpendNtfn( + &c.cfg.chanState.FundingOutpoint, + fundingPkScript, + heightHint, + ) + } + + spendNtfn := c.fundingSpendNtfn + defer spendNtfn.Cancel() + + // We use these variables to implement a state machine to track the + // state of the spend confirmation process: + // * When confNtfn is nil, we're in state "None" waiting for a spend. + // * When confNtfn is set, we're in state "Pending" waiting for + // confirmations. + // + // After confirmations, we transition to state "Confirmed" and clean up. + var ( + pendingSpend *chainntnfs.SpendDetail + confNtfn *chainntnfs.ConfirmationEvent + ) log.Infof("Close observer for ChannelPoint(%v) active", c.cfg.chanState.FundingOutpoint) + // handleSpendDetection processes a newly detected spend by registering + // for confirmations. Returns the new confNtfn or error. + handleSpendDetection := func( + spend *chainntnfs.SpendDetail, + ) (*chainntnfs.ConfirmationEvent, error) { + + // If we already have a pending spend, check if it's the same + // transaction. This can happen if both the spend notification + // and blockbeat detect the same spend. + if pendingSpend != nil { + if *pendingSpend.SpenderTxHash == *spend.SpenderTxHash { + log.Debugf("ChannelPoint(%v): ignoring "+ + "duplicate spend detection for tx %v", + c.cfg.chanState.FundingOutpoint, + spend.SpenderTxHash) + return confNtfn, nil + } + + // Different spend detected. Cancel existing confNtfn + // and replace with new one. + log.Warnf("ChannelPoint(%v): detected different "+ + "spend tx %v, replacing pending tx %v", + c.cfg.chanState.FundingOutpoint, + spend.SpenderTxHash, + pendingSpend.SpenderTxHash) + + if confNtfn != nil { + confNtfn.Cancel() + } + } + + numConfs := c.requiredConfsForSpend() + txid := spend.SpenderTxHash + + newConfNtfn, err := c.cfg.notifier.RegisterConfirmationsNtfn( + txid, spend.SpendingTx.TxOut[0].PkScript, + numConfs, uint32(spend.SpendingHeight), + ) + if err != nil { + return nil, fmt.Errorf("register confirmations: %w", + err) + } + + log.Infof("ChannelPoint(%v): waiting for %d confirmations "+ + "of spend tx %v", c.cfg.chanState.FundingOutpoint, + numConfs, txid) + + return newConfNtfn, nil + } + for { + // We only listen to confirmation channels when we have a + // pending spend. By setting these to nil when not needed, Go's + // select ignores those cases, effectively implementing our + // state machine. + var ( + confChan <-chan *chainntnfs.TxConfirmation + negativeConfChan <-chan int32 + ) + if confNtfn != nil { + confChan = confNtfn.Confirmed + negativeConfChan = confNtfn.NegativeConf + } + select { - // A new block is received, we will check whether this block - // contains a spending tx that we are interested in. case beat := <-c.BlockbeatChan: log.Debugf("ChainWatcher(%v) received blockbeat %v", c.cfg.chanState.FundingOutpoint, beat.Height()) - // Process the block. - c.handleBlockbeat(beat) - - // If the funding outpoint is spent, we now go ahead and handle - // it. Note that we cannot rely solely on the `block` event - // above to trigger a close event, as deep down, the receiving - // of block notifications and the receiving of spending - // notifications are done in two different goroutines, so the - // expected order: [receive block -> receive spend] is not - // guaranteed . - case spend, ok := <-c.fundingSpendNtfn.Spend: - // If the channel was closed, then this means that the - // notifier exited, so we will as well. + spend := c.handleBlockbeat(beat) + if spend == nil { + continue + } + + // STATE TRANSITION: None -> Pending (from blockbeat). + log.Infof("ChannelPoint(%v): detected spend from "+ + "blockbeat, transitioning to %v", + c.cfg.chanState.FundingOutpoint, + spendStatePending) + + newConfNtfn, err := handleSpendDetection(spend) + if err != nil { + log.Errorf("Unable to handle spend "+ + "detection: %v", err) + return + } + pendingSpend = spend + confNtfn = newConfNtfn + + // STATE TRANSITION: None -> Pending. + // We've detected a spend, but don't process it yet. Instead, + // register for confirmations to protect against shallow reorgs. + case spend, ok := <-spendNtfn.Spend: + if !ok { + return + } + + log.Infof("ChannelPoint(%v): detected spend from "+ + "notification, transitioning to %v", + c.cfg.chanState.FundingOutpoint, + spendStatePending) + + newConfNtfn, err := handleSpendDetection(spend) + if err != nil { + log.Errorf("Unable to handle spend "+ + "detection: %v", err) + return + } + pendingSpend = spend + confNtfn = newConfNtfn + + // STATE TRANSITION: Pending -> Confirmed + // The spend has reached required confirmations. It's now safe + // to process since we've protected against shallow reorgs. + case conf, ok := <-confChan: + if !ok { + log.Errorf("Confirmation channel closed " + + "unexpectedly") + return + } + + log.Infof("ChannelPoint(%v): spend confirmed at "+ + "height %d, transitioning to %v", + c.cfg.chanState.FundingOutpoint, + conf.BlockHeight, spendStateConfirmed) + + err := c.handleCommitSpend(pendingSpend) + if err != nil { + log.Errorf("Failed to handle confirmed "+ + "spend: %v", err) + } + + confNtfn.Cancel() + confNtfn = nil + pendingSpend = nil + + // STATE TRANSITION: Pending -> None + // A reorg removed the spend tx. We reset to initial state and + // wait for ANY new spend (could be the same tx re-mined, or a + // different tx like an RBF replacement). + case reorgDepth, ok := <-negativeConfChan: if !ok { + log.Errorf("Negative conf channel closed " + + "unexpectedly") return } - err := c.handleCommitSpend(spend) + log.Infof("ChannelPoint(%v): spend reorged out at "+ + "depth %d, transitioning back to %v", + c.cfg.chanState.FundingOutpoint, reorgDepth, + spendStateNone) + + confNtfn.Cancel() + confNtfn = nil + pendingSpend = nil + + spendNtfn.Cancel() + var err error + spendNtfn, err = registerForSpend() if err != nil { - log.Errorf("Failed to handle commit spend: %v", - err) + log.Errorf("Unable to re-register for "+ + "spend: %v", err) + return } + log.Infof("ChannelPoint(%v): re-registered for spend "+ + "detection", c.cfg.chanState.FundingOutpoint) + // The chainWatcher has been signalled to exit, so we'll do so // now. case <-c.quit: + if confNtfn != nil { + confNtfn.Cancel() + } return } } @@ -992,6 +1198,18 @@ func (c *chainWatcher) toSelfAmount(tx *wire.MsgTx) btcutil.Amount { return btcutil.Amount(fn.Sum(vals)) } +// requiredConfsForSpend determines the number of confirmations required before +// processing a spend of the funding output. Uses config override if set +// (typically for testing), otherwise scales with channel capacity to balance +// security vs user experience for channels of different sizes. +func (c *chainWatcher) requiredConfsForSpend() uint32 { + return c.cfg.chanCloseConfs.UnwrapOrFunc(func() uint32 { + return lnwallet.CloseConfsForCapacity( + c.cfg.chanState.Capacity, + ) + }) +} + // dispatchCooperativeClose processed a detect cooperative channel closure. // We'll use the spending transaction to locate our output within the // transaction, then clean up the database state. We'll also dispatch a @@ -1009,8 +1227,8 @@ func (c *chainWatcher) dispatchCooperativeClose(commitSpend *chainntnfs.SpendDet localAmt := c.toSelfAmount(broadcastTx) // Once this is known, we'll mark the state as fully closed in the - // database. We can do this as a cooperatively closed channel has all - // its outputs resolved after only one confirmation. + // database. For cooperative closes, we wait for a confirmation depth + // determined by channel capacity before dispatching this event. closeSummary := &channeldb.ChannelCloseSummary{ ChanPoint: c.cfg.chanState.FundingOutpoint, ChainHash: c.cfg.chanState.ChainHash, @@ -1419,9 +1637,10 @@ func (c *chainWatcher) handleCommitSpend( case wire.MaxTxInSequenceNum: fallthrough case mempool.MaxRBFSequence: - // TODO(roasbeef): rare but possible, need itest case for - err := c.dispatchCooperativeClose(commitSpend) - if err != nil { + // This is a cooperative close. Dispatch it directly - the + // confirmation waiting and reorg handling is done in the + // closeObserver state machine before we reach this point. + if err := c.dispatchCooperativeClose(commitSpend); err != nil { return fmt.Errorf("handle coop close: %w", err) } @@ -1526,9 +1745,9 @@ func (c *chainWatcher) chanPointConfirmed() bool { } // handleBlockbeat takes a blockbeat and queries for a spending tx for the -// funding output. If the spending tx is found, it will be handled based on the -// closure type. -func (c *chainWatcher) handleBlockbeat(beat chainio.Blockbeat) { +// funding output. If found, it returns the spend details so closeObserver can +// process it. Returns nil if no spend was detected. +func (c *chainWatcher) handleBlockbeat(beat chainio.Blockbeat) *chainntnfs.SpendDetail { // Notify the chain watcher has processed the block. defer c.NotifyBlockProcessed(beat, nil) @@ -1540,24 +1759,23 @@ func (c *chainWatcher) handleBlockbeat(beat chainio.Blockbeat) { // If the funding output hasn't confirmed in this block, we // will check it again in the next block. if !c.chanPointConfirmed() { - return + return nil } } // Perform a non-blocking read to check whether the funding output was - // spent. + // spent. The actual spend handling is done in closeObserver's state + // machine to avoid blocking the block processing pipeline. spend := c.checkFundingSpend() if spend == nil { log.Tracef("No spend found for ChannelPoint(%v) in block %v", c.cfg.chanState.FundingOutpoint, beat.Height()) - return + return nil } - // The funding output was spent, we now handle it by sending a close - // event to the channel arbitrator. - err := c.handleCommitSpend(spend) - if err != nil { - log.Errorf("Failed to handle commit spend: %v", err) - } + log.Debugf("Detected spend of ChannelPoint(%v) in block %v", + c.cfg.chanState.FundingOutpoint, beat.Height()) + + return spend } From d8363605f6b7cba1fa1f1cb5af26b27fe1cb5618 Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Wed, 1 Oct 2025 16:50:39 -0700 Subject: [PATCH 10/26] lntest: add new wait for conf helper method to ChainNotifier --- lntest/harness_assertion.go | 5 +++-- lntest/mock/chainnotifier.go | 40 +++++++++++++++++++++++++++++++++--- 2 files changed, 40 insertions(+), 5 deletions(-) diff --git a/lntest/harness_assertion.go b/lntest/harness_assertion.go index 544576bef9..958278d3cc 100644 --- a/lntest/harness_assertion.go +++ b/lntest/harness_assertion.go @@ -18,6 +18,7 @@ import ( "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcd/wire" + "github.com/davecgh/go-spew/spew" "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lnrpc/invoicesrpc" @@ -552,8 +553,8 @@ func (h HarnessTest) WaitForChannelCloseEvent( require.NoError(h, err) resp, ok := event.Update.(*lnrpc.CloseStatusUpdate_ChanClose) - require.Truef(h, ok, "expected channel close update, instead got %v", - event.Update) + require.Truef(h, ok, "expected channel close update, instead got %T: %v", + event.Update, spew.Sdump(event.Update)) txid, err := chainhash.NewHash(resp.ChanClose.ClosingTxid) require.NoErrorf(h, err, "wrong format found in closing txid: %v", diff --git a/lntest/mock/chainnotifier.go b/lntest/mock/chainnotifier.go index ddce8defa2..09464ba85c 100644 --- a/lntest/mock/chainnotifier.go +++ b/lntest/mock/chainnotifier.go @@ -1,6 +1,9 @@ package mock import ( + "testing" + "time" + "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" "github.com/lightningnetwork/lnd/chainntnfs" @@ -8,9 +11,10 @@ import ( // ChainNotifier is a mock implementation of the ChainNotifier interface. type ChainNotifier struct { - SpendChan chan *chainntnfs.SpendDetail - EpochChan chan *chainntnfs.BlockEpoch - ConfChan chan *chainntnfs.TxConfirmation + SpendChan chan *chainntnfs.SpendDetail + EpochChan chan *chainntnfs.BlockEpoch + ConfChan chan *chainntnfs.TxConfirmation + ConfRegistered chan struct{} } // RegisterConfirmationsNtfn returns a ConfirmationEvent that contains a channel @@ -19,6 +23,14 @@ func (c *ChainNotifier) RegisterConfirmationsNtfn(txid *chainhash.Hash, pkScript []byte, numConfs, heightHint uint32, opts ...chainntnfs.NotifierOption) (*chainntnfs.ConfirmationEvent, error) { + // Signal that a confirmation registration occurred. + if c.ConfRegistered != nil { + select { + case c.ConfRegistered <- struct{}{}: + default: + } + } + return &chainntnfs.ConfirmationEvent{ Confirmed: c.ConfChan, Cancel: func() {}, @@ -61,3 +73,25 @@ func (c *ChainNotifier) Started() bool { func (c *ChainNotifier) Stop() error { return nil } + +// WaitForConfRegistrationAndSend waits for a confirmation registration to occur +// and then sends a confirmation notification. This is a helper function for tests +// that need to ensure the chain watcher has registered for confirmations before +// sending the confirmation. +func (c *ChainNotifier) WaitForConfRegistrationAndSend(t *testing.T) { + t.Helper() + + // Wait for the chain watcher to register for confirmations. + select { + case <-c.ConfRegistered: + case <-time.After(time.Second * 2): + t.Fatalf("timeout waiting for conf registration") + } + + // Send the confirmation to satisfy the confirmation requirement. + select { + case c.ConfChan <- &chainntnfs.TxConfirmation{}: + case <-time.After(time.Second * 1): + t.Fatalf("unable to send confirmation") + } +} From cc6a8eff8db20d41c35b4262f3b84830c23fd46c Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Wed, 1 Oct 2025 16:51:00 -0700 Subject: [PATCH 11/26] contractcourt: add new chainWatcherTestHarness We'll use this for all the upcoming tests. --- contractcourt/chain_watcher_test_harness.go | 693 ++++++++++++++++++++ 1 file changed, 693 insertions(+) create mode 100644 contractcourt/chain_watcher_test_harness.go diff --git a/contractcourt/chain_watcher_test_harness.go b/contractcourt/chain_watcher_test_harness.go new file mode 100644 index 0000000000..0b7c0c9741 --- /dev/null +++ b/contractcourt/chain_watcher_test_harness.go @@ -0,0 +1,693 @@ +package contractcourt + +import ( + "testing" + "time" + + "github.com/btcsuite/btcd/btcutil" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/wire" + "github.com/lightningnetwork/lnd/chainio" + "github.com/lightningnetwork/lnd/chainntnfs" + "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/fn/v2" + lnmock "github.com/lightningnetwork/lnd/lntest/mock" + "github.com/lightningnetwork/lnd/lnwallet" + "github.com/lightningnetwork/lnd/lnwire" + "github.com/stretchr/testify/mock" +) + +// testReporter is a minimal interface for test reporting that is satisfied by +// both *testing.T and *rapid.T, allowing the harness to work with property-based +// tests. +type testReporter interface { + Helper() + Fatalf(format string, args ...any) +} + +// chainWatcherTestHarness provides a test harness for chain watcher tests +// with utilities for simulating spends, confirmations, and reorganizations. +type chainWatcherTestHarness struct { + t testReporter + + // aliceChannel and bobChannel are the test channels. + aliceChannel *lnwallet.LightningChannel + bobChannel *lnwallet.LightningChannel + + // chainWatcher is the chain watcher under test. + chainWatcher *chainWatcher + + // notifier is the mock chain notifier. + notifier *mockChainNotifier + + // chanEvents is the channel event subscription. + chanEvents *ChainEventSubscription + + // currentHeight tracks the current block height. + currentHeight int32 + + // blockbeatProcessed is a channel that signals when a blockbeat has been processed. + blockbeatProcessed chan struct{} +} + +// mockChainNotifier extends the standard mock with additional channels for +// testing cooperative close reorgs. +type mockChainNotifier struct { + *lnmock.ChainNotifier + + // confEvents tracks active confirmation event subscriptions. + confEvents []*mockConfirmationEvent + + // confRegistered is a channel that signals when a new confirmation + // event has been registered. + confRegistered chan struct{} + + // spendEvents tracks active spend event subscriptions. + spendEvents []*chainntnfs.SpendEvent + + // spendRegistered is a channel that signals when a new spend + // event has been registered. + spendRegistered chan struct{} +} + +// mockConfirmationEvent represents a mock confirmation event subscription. +type mockConfirmationEvent struct { + txid chainhash.Hash + numConfs uint32 + confirmedChan chan *chainntnfs.TxConfirmation + negConfChan chan int32 + cancelled bool +} + +// RegisterSpendNtfn creates a new mock spend event. +func (m *mockChainNotifier) RegisterSpendNtfn(outpoint *wire.OutPoint, + pkScript []byte, heightHint uint32) (*chainntnfs.SpendEvent, error) { + + // The base mock already has SpendChan, use that. + spendEvent := &chainntnfs.SpendEvent{ + Spend: m.SpendChan, + Cancel: func() { + // No-op for now. + }, + } + + m.spendEvents = append(m.spendEvents, spendEvent) + + // Signal that a new spend event has been registered. + select { + case m.spendRegistered <- struct{}{}: + default: + } + + return spendEvent, nil +} + +// RegisterConfirmationsNtfn creates a new mock confirmation event. +func (m *mockChainNotifier) RegisterConfirmationsNtfn(txid *chainhash.Hash, + pkScript []byte, numConfs, heightHint uint32, + opts ...chainntnfs.NotifierOption) (*chainntnfs.ConfirmationEvent, error) { + + mockEvent := &mockConfirmationEvent{ + txid: *txid, + numConfs: numConfs, + confirmedChan: make(chan *chainntnfs.TxConfirmation, 1), + negConfChan: make(chan int32, 1), + } + + m.confEvents = append(m.confEvents, mockEvent) + + // Signal that a new confirmation event has been registered. + select { + case m.confRegistered <- struct{}{}: + default: + } + + return &chainntnfs.ConfirmationEvent{ + Confirmed: mockEvent.confirmedChan, + NegativeConf: mockEvent.negConfChan, + Cancel: func() { + mockEvent.cancelled = true + }, + }, nil +} + +// harnessOpt is a functional option for configuring the test harness. +type harnessOpt func(*harnessConfig) + +// harnessConfig holds configuration for the test harness. +type harnessConfig struct { + requiredConfs fn.Option[uint32] +} + +// withRequiredConfs sets the number of confirmations required for channel closes. +func withRequiredConfs(confs uint32) harnessOpt { + return func(cfg *harnessConfig) { + cfg.requiredConfs = fn.Some(confs) + } +} + +// newChainWatcherTestHarness creates a new test harness for chain watcher tests. +func newChainWatcherTestHarness(t *testing.T, opts ...harnessOpt) *chainWatcherTestHarness { + return newChainWatcherTestHarnessFromReporter(t, t, opts...) +} + +// newChainWatcherTestHarnessFromReporter creates a test harness that works with +// both *testing.T and *rapid.T. The testingT parameter is used for operations +// that specifically require *testing.T (like CreateTestChannels), while reporter +// is used for all test reporting (Helper, Fatalf). +func newChainWatcherTestHarnessFromReporter(testingT *testing.T, + reporter testReporter, opts ...harnessOpt) *chainWatcherTestHarness { + + reporter.Helper() + + // Apply options. + cfg := &harnessConfig{ + requiredConfs: fn.None[uint32](), + } + for _, opt := range opts { + opt(cfg) + } + + // Create test channels. + aliceChannel, bobChannel, err := lnwallet.CreateTestChannels( + testingT, channeldb.SingleFunderTweaklessBit, + ) + if err != nil { + reporter.Fatalf("unable to create test channels: %v", err) + } + + // Create mock notifier. + baseNotifier := &lnmock.ChainNotifier{ + SpendChan: make(chan *chainntnfs.SpendDetail, 1), + EpochChan: make(chan *chainntnfs.BlockEpoch), + ConfChan: make(chan *chainntnfs.TxConfirmation, 1), + } + + notifier := &mockChainNotifier{ + ChainNotifier: baseNotifier, + confEvents: make([]*mockConfirmationEvent, 0), + confRegistered: make(chan struct{}, 10), + spendEvents: make([]*chainntnfs.SpendEvent, 0), + spendRegistered: make(chan struct{}, 10), + } + + // Create chain watcher. + chainWatcher, err := newChainWatcher(chainWatcherConfig{ + chanState: aliceChannel.State(), + notifier: notifier, + signer: aliceChannel.Signer, + extractStateNumHint: lnwallet.GetStateNumHint, + chanCloseConfs: cfg.requiredConfs, + contractBreach: func(retInfo *lnwallet.BreachRetribution) error { + // In tests, we just need to accept the breach notification. + // The actual breach handling is tested elsewhere. + return nil + }, + }) + if err != nil { + reporter.Fatalf("unable to create chain watcher: %v", err) + } + + // Start chain watcher (this will register for spend notification). + err = chainWatcher.Start() + if err != nil { + reporter.Fatalf("unable to start chain watcher: %v", err) + } + + // Subscribe to channel events. + chanEvents := chainWatcher.SubscribeChannelEvents() + + harness := &chainWatcherTestHarness{ + t: reporter, + aliceChannel: aliceChannel, + bobChannel: bobChannel, + chainWatcher: chainWatcher, + notifier: notifier, + chanEvents: chanEvents, + currentHeight: 100, + blockbeatProcessed: make(chan struct{}), + } + + // Wait for the initial spend registration that happens in Start(). + harness.waitForSpendRegistration() + + // Verify BlockbeatChan is initialized. + if chainWatcher.BlockbeatChan == nil { + reporter.Fatalf("BlockbeatChan is nil after initialization") + } + + // Register cleanup. We use the testingT for Cleanup since rapid.T + // may not have this method in the same way. + testingT.Cleanup(func() { + chainWatcher.Stop() + }) + + return harness +} + +// createCoopCloseTx creates a cooperative close transaction with the given +// output value. The transaction will have the proper sequence number to +// indicate it's a cooperative close. +func (h *chainWatcherTestHarness) createCoopCloseTx(outputValue int64) *wire.MsgTx { + return &wire.MsgTx{ + TxIn: []*wire.TxIn{{ + PreviousOutPoint: h.aliceChannel.State().FundingOutpoint, + Sequence: wire.MaxTxInSequenceNum, + }}, + TxOut: []*wire.TxOut{{ + Value: outputValue, + PkScript: []byte{byte(outputValue % 255)}, // Unique script. + }}, + } +} + +// createRemoteUnilateralCloseTx creates a remote unilateral close transaction. +// From Alice's perspective, this is Bob's local commitment transaction. +func (h *chainWatcherTestHarness) createRemoteUnilateralCloseTx() *wire.MsgTx { + return h.bobChannel.State().LocalCommitment.CommitTx +} + +// createLocalForceCloseTx creates a local force close transaction. +// This is Alice's local commitment transaction. +func (h *chainWatcherTestHarness) createLocalForceCloseTx() *wire.MsgTx { + return h.aliceChannel.State().LocalCommitment.CommitTx +} + +// createBreachCloseTx creates a breach (revoked commitment) transaction. +// We advance the channel state, save the commitment, then advance again +// to revoke it. Returns the revoked commitment tx. +func (h *chainWatcherTestHarness) createBreachCloseTx() *wire.MsgTx { + h.t.Helper() + + // To create a revoked commitment, we need to advance the channel state + // at least once. We'll use the test utils helper to add an HTLC and + // force a state transition. + + // Get the current commitment before we advance (this will be revoked). + revokedCommit := h.bobChannel.State().LocalCommitment.CommitTx + + // Add a fake HTLC to advance state. + htlcAmount := lnwire.NewMSatFromSatoshis(10000) + paymentHash := [32]byte{4, 5, 6} + htlc := &lnwire.UpdateAddHTLC{ + ID: 0, + Amount: htlcAmount, + Expiry: uint32(h.currentHeight + 100), + PaymentHash: paymentHash, + } + + // Add HTLC to both channels. + if _, err := h.aliceChannel.AddHTLC(htlc, nil); err != nil { + h.t.Fatalf("unable to add HTLC to alice: %v", err) + } + if _, err := h.bobChannel.ReceiveHTLC(htlc); err != nil { + h.t.Fatalf("unable to add HTLC to bob: %v", err) + } + + // Force state transition using the helper. + if err := lnwallet.ForceStateTransition(h.aliceChannel, h.bobChannel); err != nil { + h.t.Fatalf("unable to force state transition: %v", err) + } + + // Return the revoked commitment (Bob's previous local commitment). + return revokedCommit +} + +// sendSpend sends a spend notification for the given transaction. +func (h *chainWatcherTestHarness) sendSpend(tx *wire.MsgTx) { + h.t.Helper() + + txHash := tx.TxHash() + spend := &chainntnfs.SpendDetail{ + SpenderTxHash: &txHash, + SpendingTx: tx, + SpendingHeight: h.currentHeight, + } + + select { + case h.notifier.SpendChan <- spend: + case <-time.After(time.Second): + h.t.Fatalf("unable to send spend notification") + } +} + +// sendBlockBeat sends a blockbeat to the chain watcher. +// Note: This is not used for cooperative close tests since the chain watcher +// blocks synchronously waiting for confirmations and can't process blockbeats. +func (h *chainWatcherTestHarness) sendBlockBeat() { + h.t.Helper() + + // Create mock blockbeat exactly as the other tests do. + mockBeat := &chainio.MockBlockbeat{} + + // Mock the logger. We don't care how many times it's called as it's + // not critical. + mockBeat.On("logger").Return(log) + + // Mock a fake block height - this is called based on the debuglevel. + mockBeat.On("Height").Return(h.currentHeight).Maybe() + + // Create a channel to signal when blockbeat is processed. + processed := make(chan struct{}) + + // Mock `NotifyBlockProcessed` to signal when done. + mockBeat.On("NotifyBlockProcessed", + nil, h.chainWatcher.quit).Return().Run(func(args mock.Arguments) { + close(processed) + }).Once() + + // Send the blockbeat. + select { + case h.chainWatcher.BlockbeatChan <- mockBeat: + case <-time.After(5 * time.Second): + h.t.Fatalf("unable to send blockbeat") + } + + // Wait for the blockbeat to be processed. + select { + case <-processed: + // Blockbeat processed. + case <-time.After(5 * time.Second): + h.t.Fatalf("blockbeat not processed") + } +} + +// confirmTx sends a confirmation notification for the given transaction. +func (h *chainWatcherTestHarness) confirmTx(tx *wire.MsgTx, height int32) { + h.t.Helper() + + // Find the confirmation event for this transaction. + txHash := tx.TxHash() + var confEvent *mockConfirmationEvent + for _, event := range h.notifier.confEvents { + if event.txid == txHash && !event.cancelled { + confEvent = event + break + } + } + + if confEvent == nil { + // The chain watcher might not have registered for confirmations yet. + // This is not necessarily an error in some test scenarios. + return + } + + // Send confirmation. + select { + case confEvent.confirmedChan <- &chainntnfs.TxConfirmation{ + Tx: tx, + BlockHeight: uint32(height), + }: + case <-time.After(time.Second): + h.t.Fatalf("unable to send confirmation") + } +} + +// triggerReorg sends a negative confirmation (reorg) notification for the +// given transaction with the specified reorg depth. +func (h *chainWatcherTestHarness) triggerReorg(tx *wire.MsgTx, reorgDepth int32) { + h.t.Helper() + + // Find the confirmation event for this transaction. + txHash := tx.TxHash() + var confEvent *mockConfirmationEvent + for _, event := range h.notifier.confEvents { + if event.txid == txHash && !event.cancelled { + confEvent = event + break + } + } + + if confEvent == nil { + // The chain watcher might not have registered for confirmations yet. + // This is not necessarily an error in some test scenarios. + return + } + + // Send negative confirmation. + select { + case confEvent.negConfChan <- reorgDepth: + case <-time.After(time.Second): + h.t.Fatalf("unable to send negative confirmation") + } +} + +// mineBlocks advances the current block height. +func (h *chainWatcherTestHarness) mineBlocks(n int32) { + h.currentHeight += n +} + +// waitForCoopClose waits for a cooperative close event and returns it. +func (h *chainWatcherTestHarness) waitForCoopClose(timeout time.Duration) *CooperativeCloseInfo { + h.t.Helper() + + select { + case coopClose := <-h.chanEvents.CooperativeClosure: + return coopClose + case <-time.After(timeout): + h.t.Fatalf("didn't receive cooperative close event") + return nil + } +} + +// waitForConfRegistration waits for the chain watcher to register for +// confirmation notifications. +func (h *chainWatcherTestHarness) waitForConfRegistration() { + h.t.Helper() + + select { + case <-h.notifier.confRegistered: + // Registration complete. + case <-time.After(2 * time.Second): + // Not necessarily a failure - some tests don't register. + } +} + +// waitForSpendRegistration waits for the chain watcher to register for +// spend notifications. +func (h *chainWatcherTestHarness) waitForSpendRegistration() { + h.t.Helper() + + select { + case <-h.notifier.spendRegistered: + // Registration complete. + case <-time.After(2 * time.Second): + // Not necessarily a failure - some tests don't register. + } +} + +// assertCoopCloseTx asserts that the given cooperative close info matches +// the expected transaction. +func (h *chainWatcherTestHarness) assertCoopCloseTx( + closeInfo *CooperativeCloseInfo, expectedTx *wire.MsgTx) { + + h.t.Helper() + + expectedHash := expectedTx.TxHash() + if closeInfo.ClosingTXID != expectedHash { + h.t.Fatalf("wrong tx confirmed: expected %v, got %v", + expectedHash, closeInfo.ClosingTXID) + } +} + +// assertNoCoopClose asserts that no cooperative close event is received +// within the given timeout. +func (h *chainWatcherTestHarness) assertNoCoopClose(timeout time.Duration) { + h.t.Helper() + + select { + case <-h.chanEvents.CooperativeClosure: + h.t.Fatalf("unexpected cooperative close event") + case <-time.After(timeout): + // Expected timeout. + } +} + +// runCoopCloseFlow runs a complete cooperative close flow including spend, +// optional reorg, and confirmation. This helper coordinates the timing +// between the different events. +func (h *chainWatcherTestHarness) runCoopCloseFlow( + tx *wire.MsgTx, shouldReorg bool, reorgDepth int32, + altTx *wire.MsgTx) *CooperativeCloseInfo { + + h.t.Helper() + + // Send initial spend notification. + // This will trigger handleCommitSpend which will detect the coop close + // and call waitForCoopCloseConfirmation (which blocks). + h.sendSpend(tx) + + // Wait for the chain watcher to register for confirmations. + // This happens inside waitForCoopCloseConfirmation. + h.waitForConfRegistration() + + if shouldReorg { + // Trigger reorg to unblock waitForCoopCloseConfirmation. + h.triggerReorg(tx, reorgDepth) + + // If we have an alternative transaction, send it. + if altTx != nil { + // After reorg, the chain watcher should re-register for + // ANY spend of the funding output. + h.waitForSpendRegistration() + + // Send alternative spend. + h.sendSpend(altTx) + + // Wait for it to register for confirmations. + h.waitForConfRegistration() + + // Confirm alternative transaction to unblock. + h.mineBlocks(1) + h.confirmTx(altTx, h.currentHeight) + } + } else { + // Normal confirmation flow - confirm to unblock waitForCoopCloseConfirmation. + h.mineBlocks(1) + h.confirmTx(tx, h.currentHeight) + } + + // Wait for cooperative close event. + return h.waitForCoopClose(5 * time.Second) +} + +// runMultipleReorgFlow simulates multiple consecutive reorganizations with +// different transactions confirming after each reorg. +func (h *chainWatcherTestHarness) runMultipleReorgFlow(txs []*wire.MsgTx, + reorgDepths []int32) *CooperativeCloseInfo { + + h.t.Helper() + + if len(txs) < 2 { + h.t.Fatalf("need at least 2 transactions for reorg flow") + } + if len(reorgDepths) != len(txs)-1 { + h.t.Fatalf("reorg depths must be one less than transactions") + } + + // Send initial spend. + h.sendSpend(txs[0]) + + // Process each reorg. + for i, depth := range reorgDepths { + // Wait for confirmation registration. + h.waitForConfRegistration() + + // Trigger reorg for current transaction. + h.triggerReorg(txs[i], depth) + + // Wait for re-registration for spend. + h.waitForSpendRegistration() + + // Send next transaction. + h.sendSpend(txs[i+1]) + } + + // Wait for final confirmation registration. + h.waitForConfRegistration() + + // Confirm the final transaction. + finalTx := txs[len(txs)-1] + h.mineBlocks(1) + h.confirmTx(finalTx, h.currentHeight) + + // Wait for cooperative close event. + return h.waitForCoopClose(10 * time.Second) +} + +// waitForRemoteUnilateralClose waits for a remote unilateral close event. +func (h *chainWatcherTestHarness) waitForRemoteUnilateralClose( + timeout time.Duration) *RemoteUnilateralCloseInfo { + + h.t.Helper() + + select { + case remoteClose := <-h.chanEvents.RemoteUnilateralClosure: + return remoteClose + case <-time.After(timeout): + h.t.Fatalf("didn't receive remote unilateral close event") + return nil + } +} + +// waitForLocalUnilateralClose waits for a local unilateral close event. +func (h *chainWatcherTestHarness) waitForLocalUnilateralClose( + timeout time.Duration) *LocalUnilateralCloseInfo { + + h.t.Helper() + + select { + case localClose := <-h.chanEvents.LocalUnilateralClosure: + return localClose + case <-time.After(timeout): + h.t.Fatalf("didn't receive local unilateral close event") + return nil + } +} + +// waitForBreach waits for a breach (contract breach) event. +func (h *chainWatcherTestHarness) waitForBreach( + timeout time.Duration) *BreachCloseInfo { + + h.t.Helper() + + select { + case breach := <-h.chanEvents.ContractBreach: + return breach + case <-time.After(timeout): + h.t.Fatalf("didn't receive contract breach event") + return nil + } +} + +// assertRemoteUnilateralCloseTx asserts that the given remote unilateral close +// info matches the expected transaction. +func (h *chainWatcherTestHarness) assertRemoteUnilateralCloseTx( + closeInfo *RemoteUnilateralCloseInfo, expectedTx *wire.MsgTx) { + + h.t.Helper() + + expectedHash := expectedTx.TxHash() + actualHash := closeInfo.UnilateralCloseSummary.SpendDetail.SpenderTxHash + if *actualHash != expectedHash { + h.t.Fatalf("wrong tx confirmed: expected %v, got %v", + expectedHash, *actualHash) + } +} + +// assertLocalUnilateralCloseTx asserts that the given local unilateral close +// info matches the expected transaction. +func (h *chainWatcherTestHarness) assertLocalUnilateralCloseTx( + closeInfo *LocalUnilateralCloseInfo, expectedTx *wire.MsgTx) { + + h.t.Helper() + + expectedHash := expectedTx.TxHash() + actualHash := closeInfo.LocalForceCloseSummary.CloseTx.TxHash() + if actualHash != expectedHash { + h.t.Fatalf("wrong tx confirmed: expected %v, got %v", + expectedHash, actualHash) + } +} + +// assertBreachTx asserts that the given breach info matches the expected +// transaction. +func (h *chainWatcherTestHarness) assertBreachTx( + breachInfo *BreachCloseInfo, expectedTx *wire.MsgTx) { + + h.t.Helper() + + expectedHash := expectedTx.TxHash() + if breachInfo.CommitHash != expectedHash { + h.t.Fatalf("wrong tx confirmed: expected %v, got %v", + expectedHash, breachInfo.CommitHash) + } +} + +// createChannelCapacity returns a channel capacity suitable for testing +// scaled confirmations. +func createChannelCapacity(scale float64) btcutil.Amount { + // Use the maximum channel size as base. + maxSize := btcutil.Amount(16777215) // From lnwallet constants. + return btcutil.Amount(float64(maxSize) * scale) +} \ No newline at end of file From 69104e75cbcbfcddf11e7386163ff7870dd0b057 Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Wed, 1 Oct 2025 16:51:30 -0700 Subject: [PATCH 12/26] contractcourt: update existing chain watcher tests due to new logic All the tests need to send a confirmation _after_ the spend is detected now. --- contractcourt/chain_watcher_test.go | 49 ++++++++++++++++++++++------- 1 file changed, 37 insertions(+), 12 deletions(-) diff --git a/contractcourt/chain_watcher_test.go b/contractcourt/chain_watcher_test.go index 2dc3605d39..c57859ca42 100644 --- a/contractcourt/chain_watcher_test.go +++ b/contractcourt/chain_watcher_test.go @@ -12,6 +12,7 @@ import ( "github.com/lightningnetwork/lnd/chainio" "github.com/lightningnetwork/lnd/chainntnfs" "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/fn/v2" "github.com/lightningnetwork/lnd/input" lnmock "github.com/lightningnetwork/lnd/lntest/mock" "github.com/lightningnetwork/lnd/lnwallet" @@ -34,16 +35,19 @@ func TestChainWatcherRemoteUnilateralClose(t *testing.T) { // With the channels created, we'll now create a chain watcher instance // which will be watching for any closes of Alice's channel. + confRegistered := make(chan struct{}, 1) aliceNotifier := &lnmock.ChainNotifier{ - SpendChan: make(chan *chainntnfs.SpendDetail, 1), - EpochChan: make(chan *chainntnfs.BlockEpoch), - ConfChan: make(chan *chainntnfs.TxConfirmation), + SpendChan: make(chan *chainntnfs.SpendDetail, 1), + EpochChan: make(chan *chainntnfs.BlockEpoch), + ConfChan: make(chan *chainntnfs.TxConfirmation, 1), + ConfRegistered: confRegistered, } aliceChainWatcher, err := newChainWatcher(chainWatcherConfig{ chanState: aliceChannel.State(), notifier: aliceNotifier, signer: aliceChannel.Signer, extractStateNumHint: lnwallet.GetStateNumHint, + chanCloseConfs: fn.Some(uint32(1)), }) require.NoError(t, err, "unable to create chain watcher") err = aliceChainWatcher.Start() @@ -90,6 +94,11 @@ func TestChainWatcherRemoteUnilateralClose(t *testing.T) { t.Fatalf("unable to send blockbeat") } + // Wait for the chain watcher to register for confirmations and send + // the confirmation. Since we set chanCloseConfs to 1, one confirmation + // is sufficient. + aliceNotifier.WaitForConfRegistrationAndSend(t) + // We should get a new spend event over the remote unilateral close // event channel. var uniClose *RemoteUnilateralCloseInfo @@ -144,16 +153,19 @@ func TestChainWatcherRemoteUnilateralClosePendingCommit(t *testing.T) { // With the channels created, we'll now create a chain watcher instance // which will be watching for any closes of Alice's channel. + confRegistered := make(chan struct{}, 1) aliceNotifier := &lnmock.ChainNotifier{ - SpendChan: make(chan *chainntnfs.SpendDetail), - EpochChan: make(chan *chainntnfs.BlockEpoch), - ConfChan: make(chan *chainntnfs.TxConfirmation), + SpendChan: make(chan *chainntnfs.SpendDetail), + EpochChan: make(chan *chainntnfs.BlockEpoch), + ConfChan: make(chan *chainntnfs.TxConfirmation), + ConfRegistered: confRegistered, } aliceChainWatcher, err := newChainWatcher(chainWatcherConfig{ chanState: aliceChannel.State(), notifier: aliceNotifier, signer: aliceChannel.Signer, extractStateNumHint: lnwallet.GetStateNumHint, + chanCloseConfs: fn.Some(uint32(1)), }) require.NoError(t, err, "unable to create chain watcher") if err := aliceChainWatcher.Start(); err != nil { @@ -219,6 +231,11 @@ func TestChainWatcherRemoteUnilateralClosePendingCommit(t *testing.T) { t.Fatalf("unable to send blockbeat") } + // Wait for the chain watcher to register for confirmations and send + // the confirmation. Since we set chanCloseConfs to 1, one confirmation + // is sufficient. + aliceNotifier.WaitForConfRegistrationAndSend(t) + // We should get a new spend event over the remote unilateral close // event channel. var uniClose *RemoteUnilateralCloseInfo @@ -331,10 +348,12 @@ func TestChainWatcherDataLossProtect(t *testing.T) { // With the channels created, we'll now create a chain watcher // instance which will be watching for any closes of Alice's // channel. + confRegistered := make(chan struct{}, 1) aliceNotifier := &lnmock.ChainNotifier{ - SpendChan: make(chan *chainntnfs.SpendDetail), - EpochChan: make(chan *chainntnfs.BlockEpoch), - ConfChan: make(chan *chainntnfs.TxConfirmation), + SpendChan: make(chan *chainntnfs.SpendDetail), + EpochChan: make(chan *chainntnfs.BlockEpoch), + ConfChan: make(chan *chainntnfs.TxConfirmation), + ConfRegistered: confRegistered, } aliceChainWatcher, err := newChainWatcher(chainWatcherConfig{ chanState: aliceChanState, @@ -407,6 +426,8 @@ func TestChainWatcherDataLossProtect(t *testing.T) { t.Fatalf("unable to send blockbeat") } + aliceNotifier.WaitForConfRegistrationAndSend(t) + // We should get a new uni close resolution that indicates we // processed the DLP scenario. var uniClose *RemoteUnilateralCloseInfo @@ -532,10 +553,12 @@ func TestChainWatcherLocalForceCloseDetect(t *testing.T) { // With the channels created, we'll now create a chain watcher // instance which will be watching for any closes of Alice's // channel. + confRegistered := make(chan struct{}, 1) aliceNotifier := &lnmock.ChainNotifier{ - SpendChan: make(chan *chainntnfs.SpendDetail), - EpochChan: make(chan *chainntnfs.BlockEpoch), - ConfChan: make(chan *chainntnfs.TxConfirmation), + SpendChan: make(chan *chainntnfs.SpendDetail), + EpochChan: make(chan *chainntnfs.BlockEpoch), + ConfChan: make(chan *chainntnfs.TxConfirmation), + ConfRegistered: confRegistered, } aliceChainWatcher, err := newChainWatcher(chainWatcherConfig{ chanState: aliceChanState, @@ -604,6 +627,8 @@ func TestChainWatcherLocalForceCloseDetect(t *testing.T) { t.Fatalf("unable to send blockbeat") } + aliceNotifier.WaitForConfRegistrationAndSend(t) + // We should get a local force close event from Alice as she // should be able to detect the close based on the commitment // outputs. From cb9958b5409c8cc2e9cfac05eea5f565dacab793 Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Wed, 1 Oct 2025 16:52:18 -0700 Subject: [PATCH 13/26] contractcourt: add unit tests for rbf re-org cases This set of new tests ensures that if have created N RBF variants of the coop close transaction, that any of then can confirm, and be re-org'd, with us detecting the final spend once it confirms deeploy enough. --- .../chain_watcher_coop_reorg_test.go | 325 ++++++++++++++++++ 1 file changed, 325 insertions(+) create mode 100644 contractcourt/chain_watcher_coop_reorg_test.go diff --git a/contractcourt/chain_watcher_coop_reorg_test.go b/contractcourt/chain_watcher_coop_reorg_test.go new file mode 100644 index 0000000000..9fd9ebc0d8 --- /dev/null +++ b/contractcourt/chain_watcher_coop_reorg_test.go @@ -0,0 +1,325 @@ +package contractcourt + +import ( + "testing" + "time" + + "github.com/btcsuite/btcd/wire" +) + +// TestChainWatcherCoopCloseReorg tests that the chain watcher properly handles +// a reorganization during cooperative close confirmation waiting. When a +// cooperative close transaction is reorganized out, the chain watcher should +// re-register for spend notifications and detect an alternative transaction. +func TestChainWatcherCoopCloseReorg(t *testing.T) { + t.Parallel() + + // Create test harness. + harness := newChainWatcherTestHarness(t) + + // Create two cooperative close transactions with different fees. + tx1 := harness.createCoopCloseTx(5000) + tx2 := harness.createCoopCloseTx(4900) + + // Run cooperative close flow with reorg. + closeInfo := harness.runCoopCloseFlow(tx1, true, 2, tx2) + + // Assert that the second transaction was confirmed. + harness.assertCoopCloseTx(closeInfo, tx2) +} + +// TestChainWatcherCoopCloseSameTransactionAfterReorg tests that if the same +// transaction re-confirms after a reorganization, it is properly handled. +func TestChainWatcherCoopCloseSameTransactionAfterReorg(t *testing.T) { + t.Parallel() + + // Create test harness. + harness := newChainWatcherTestHarness(t) + + // Create a single cooperative close transaction. + tx := harness.createCoopCloseTx(5000) + + // Run flow: send tx, trigger reorg, re-send same tx. + harness.sendSpend(tx) + + // Wait for confirmation registration and trigger reorg. + harness.waitForConfRegistration() + harness.mineBlocks(2) + harness.triggerReorg(tx, 2) + + // After reorg, wait for re-registration then re-send the same transaction. + harness.waitForSpendRegistration() + harness.sendSpend(tx) + + // Confirm it. + harness.waitForConfRegistration() + harness.mineBlocks(1) + harness.confirmTx(tx, harness.currentHeight) + + // Wait for and verify cooperative close. + closeInfo := harness.waitForCoopClose(5 * time.Second) + harness.assertCoopCloseTx(closeInfo, tx) +} + +// TestChainWatcherCoopCloseMultipleReorgs tests handling of multiple +// consecutive reorganizations during cooperative close confirmation. +func TestChainWatcherCoopCloseMultipleReorgs(t *testing.T) { + t.Parallel() + + // Create test harness. + harness := newChainWatcherTestHarness(t) + + // Create multiple cooperative close transactions with different fees. + txs := []*wire.MsgTx{ + harness.createCoopCloseTx(5000), + harness.createCoopCloseTx(4950), + harness.createCoopCloseTx(4900), + harness.createCoopCloseTx(4850), + } + + // Define reorg depths for each transition. + reorgDepths := []int32{1, 2, 3} + + // Run multiple reorg flow. + closeInfo := harness.runMultipleReorgFlow(txs, reorgDepths) + + // Assert that the final transaction was confirmed. + harness.assertCoopCloseTx(closeInfo, txs[3]) +} + +// TestChainWatcherCoopCloseDeepReorg tests that the chain watcher can handle +// deep reorganizations where the reorg depth exceeds the required number of +// confirmations. +func TestChainWatcherCoopCloseDeepReorg(t *testing.T) { + t.Parallel() + + // Create test harness. + harness := newChainWatcherTestHarness(t) + + // Create two cooperative close transactions. + tx1 := harness.createCoopCloseTx(5000) + tx2 := harness.createCoopCloseTx(4900) + + // Run with a deep reorg (10 blocks). + closeInfo := harness.runCoopCloseFlow(tx1, true, 10, tx2) + + // Assert that the second transaction was confirmed after deep reorg. + harness.assertCoopCloseTx(closeInfo, tx2) +} + +// TestChainWatcherCoopCloseReorgNoAlternative tests that if a cooperative +// close is reorganized out and no alternative transaction appears, the +// chain watcher continues waiting. +func TestChainWatcherCoopCloseReorgNoAlternative(t *testing.T) { + t.Parallel() + + // Create test harness. + harness := newChainWatcherTestHarness(t) + + // Create a cooperative close transaction. + tx := harness.createCoopCloseTx(5000) + + // Send spend and wait for confirmation registration. + harness.sendSpend(tx) + harness.waitForConfRegistration() + + // Trigger reorg after some confirmations. + harness.mineBlocks(2) + harness.triggerReorg(tx, 2) + + // Assert no cooperative close event is received. + harness.assertNoCoopClose(2 * time.Second) + + // Now send a new transaction after the timeout. + harness.waitForSpendRegistration() + newTx := harness.createCoopCloseTx(4900) + harness.sendSpend(newTx) + harness.waitForConfRegistration() + harness.mineBlocks(1) + harness.confirmTx(newTx, harness.currentHeight) + + // Should receive cooperative close for the new transaction. + closeInfo := harness.waitForCoopClose(5 * time.Second) + harness.assertCoopCloseTx(closeInfo, newTx) +} + +// TestChainWatcherCoopCloseRBFAfterReorg tests that RBF cooperative close +// transactions are properly handled when reorganizations occur. +func TestChainWatcherCoopCloseRBFAfterReorg(t *testing.T) { + t.Parallel() + + // Create test harness. + harness := newChainWatcherTestHarness(t) + + // Create a series of RBF transactions with increasing fees. + rbfTxs := make([]*wire.MsgTx, 3) + for i := range rbfTxs { + // Each transaction has higher fee (lower output). + outputValue := int64(5000 - i*100) + rbfTxs[i] = harness.createCoopCloseTx(outputValue) + } + + // Send first RBF transaction. + harness.sendSpend(rbfTxs[0]) + harness.waitForConfRegistration() + + // Trigger reorg. + harness.mineBlocks(1) + harness.triggerReorg(rbfTxs[0], 1) + + // Send second RBF transaction after re-registration. + harness.waitForSpendRegistration() + harness.sendSpend(rbfTxs[1]) + harness.waitForConfRegistration() + + // Another reorg. + harness.mineBlocks(1) + harness.triggerReorg(rbfTxs[1], 1) + + // Send final RBF transaction after re-registration. + harness.waitForSpendRegistration() + harness.sendSpend(rbfTxs[2]) + harness.waitForConfRegistration() + harness.mineBlocks(1) + harness.confirmTx(rbfTxs[2], harness.currentHeight) + + // Should confirm the highest fee transaction. + closeInfo := harness.waitForCoopClose(5 * time.Second) + harness.assertCoopCloseTx(closeInfo, rbfTxs[2]) +} + +// TestChainWatcherCoopCloseScaledConfirmationsWithReorg tests that scaled +// confirmations (based on channel capacity) work correctly with reorgs. +func TestChainWatcherCoopCloseScaledConfirmationsWithReorg(t *testing.T) { + t.Parallel() + + // Test with different channel capacities that require different + // confirmation counts. + testCases := []struct { + name string + capacityScale float64 + expectedConfs uint16 + reorgDepth int32 + }{ + { + name: "small_channel", + capacityScale: 0.1, + expectedConfs: 1, + reorgDepth: 1, + }, + { + name: "medium_channel", + capacityScale: 0.5, + expectedConfs: 3, + reorgDepth: 2, + }, + { + name: "large_channel", + capacityScale: 1.0, + expectedConfs: 6, + reorgDepth: 4, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + // Create harness with specific channel capacity. + harness := newChainWatcherTestHarness(t) + + // Create transactions. + tx1 := harness.createCoopCloseTx(5000) + tx2 := harness.createCoopCloseTx(4900) + + // Run with reorg at different depths based on capacity. + closeInfo := harness.runCoopCloseFlow( + tx1, true, tc.reorgDepth, tx2, + ) + + // Verify correct transaction confirmed. + harness.assertCoopCloseTx(closeInfo, tx2) + }) + } +} + +// TestChainWatcherCoopCloseReorgRaceCondition tests that the chain watcher +// correctly handles race conditions where multiple transactions might be +// in flight during reorganizations. +func TestChainWatcherCoopCloseReorgRaceCondition(t *testing.T) { + t.Parallel() + + // Create test harness. + harness := newChainWatcherTestHarness(t) + + // Create multiple transactions. + txs := make([]*wire.MsgTx, 5) + for i := range txs { + txs[i] = harness.createCoopCloseTx(int64(5000 - i*50)) + } + + // Rapidly send multiple transactions and reorgs. + for i := 0; i < 3; i++ { + harness.sendSpend(txs[i]) + harness.waitForConfRegistration() + harness.mineBlocks(1) + + // Quick reorg. + harness.triggerReorg(txs[i], 1) + + // Wait for re-registration. + harness.waitForSpendRegistration() + } + + // Eventually send and confirm a final transaction. + finalTx := txs[4] + harness.sendSpend(finalTx) + harness.waitForConfRegistration() + harness.mineBlocks(1) + harness.confirmTx(finalTx, harness.currentHeight) + + // Should eventually settle on the final transaction. + closeInfo := harness.waitForCoopClose(10 * time.Second) + harness.assertCoopCloseTx(closeInfo, finalTx) +} + +// TestChainWatcherCoopCloseReorgErrorHandling tests that errors during +// re-registration after reorg are properly handled. +func TestChainWatcherCoopCloseReorgErrorHandling(t *testing.T) { + t.Parallel() + + // Create test harness. + harness := newChainWatcherTestHarness(t) + + // Create a cooperative close transaction. + tx := harness.createCoopCloseTx(5000) + + // Send spend notification. + harness.sendSpend(tx) + + // Trigger multiple rapid reorgs to stress error handling. + for i := 0; i < 5; i++ { + harness.waitForConfRegistration() + harness.mineBlocks(1) + harness.triggerReorg(tx, int32(i+1)) + if i < 4 { + // Re-register for spend after each reorg except the + // last. + harness.waitForSpendRegistration() + harness.sendSpend(tx) + } + } + + // After stress, send a clean transaction. + harness.waitForSpendRegistration() + cleanTx := harness.createCoopCloseTx(4800) + harness.sendSpend(cleanTx) + harness.waitForConfRegistration() + harness.mineBlocks(1) + harness.confirmTx(cleanTx, harness.currentHeight) + + // Should still receive the cooperative close. + closeInfo := harness.waitForCoopClose(10 * time.Second) + harness.assertCoopCloseTx(closeInfo, cleanTx) +} From f7377ab21c927e6d1a43b247ac50a4133b1f7900 Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Wed, 1 Oct 2025 16:55:26 -0700 Subject: [PATCH 14/26] contractcourt: add generic close re-org tests In this commit, we add a set of generic close re-org tests. The most important test is the property based test, they will randomly confirm transactions, generate a re-org, then assert that eventually we dtect the final version. --- contractcourt/chain_watcher_reorg_test.go | 388 ++++++++++++++++++++++ 1 file changed, 388 insertions(+) create mode 100644 contractcourt/chain_watcher_reorg_test.go diff --git a/contractcourt/chain_watcher_reorg_test.go b/contractcourt/chain_watcher_reorg_test.go new file mode 100644 index 0000000000..34fb71ed9b --- /dev/null +++ b/contractcourt/chain_watcher_reorg_test.go @@ -0,0 +1,388 @@ +package contractcourt + +import ( + "testing" + "time" + + "github.com/btcsuite/btcd/wire" + "pgregory.net/rapid" +) + +// closeType represents the type of channel close for testing purposes. +type closeType int + +const ( + // closeTypeCoop represents a cooperative channel close. + closeTypeCoop closeType = iota + + // closeTypeRemoteUnilateral represents a remote unilateral close + // (remote party broadcasting their commitment). + closeTypeRemoteUnilateral + + // closeTypeLocalForce represents a local force close (us broadcasting + // our commitment). + closeTypeLocalForce + + // closeTypeBreach represents a breach (remote party broadcasting a + // revoked commitment). + closeTypeBreach +) + +// String returns a string representation of the close type. +func (c closeType) String() string { + switch c { + case closeTypeCoop: + return "cooperative" + case closeTypeRemoteUnilateral: + return "remote_unilateral" + case closeTypeLocalForce: + return "local_force" + case closeTypeBreach: + return "breach" + default: + return "unknown" + } +} + +// createCloseTx creates a close transaction of the specified type using the +// harness. +func createCloseTx(h *chainWatcherTestHarness, ct closeType, + outputValue int64) *wire.MsgTx { + + switch ct { + case closeTypeCoop: + return h.createCoopCloseTx(outputValue) + case closeTypeRemoteUnilateral: + return h.createRemoteUnilateralCloseTx() + case closeTypeLocalForce: + return h.createLocalForceCloseTx() + case closeTypeBreach: + return h.createBreachCloseTx() + default: + h.t.Fatalf("unknown close type: %v", ct) + return nil + } +} + +// waitForCloseEvent waits for the appropriate close event based on close type. +func waitForCloseEvent(h *chainWatcherTestHarness, ct closeType, + timeout time.Duration) any { + + switch ct { + case closeTypeCoop: + return h.waitForCoopClose(timeout) + case closeTypeRemoteUnilateral: + return h.waitForRemoteUnilateralClose(timeout) + case closeTypeLocalForce: + return h.waitForLocalUnilateralClose(timeout) + case closeTypeBreach: + return h.waitForBreach(timeout) + default: + h.t.Fatalf("unknown close type: %v", ct) + return nil + } +} + +// assertCloseEventTx asserts that the close event matches the expected +// transaction based on close type. +func assertCloseEventTx(h *chainWatcherTestHarness, ct closeType, + event any, expectedTx *wire.MsgTx) { + + switch ct { + case closeTypeCoop: + h.assertCoopCloseTx(event.(*CooperativeCloseInfo), expectedTx) + + case closeTypeRemoteUnilateral: + h.assertRemoteUnilateralCloseTx( + event.(*RemoteUnilateralCloseInfo), expectedTx, + ) + + case closeTypeLocalForce: + h.assertLocalUnilateralCloseTx( + event.(*LocalUnilateralCloseInfo), expectedTx, + ) + + case closeTypeBreach: + h.assertBreachTx(event.(*BreachCloseInfo), expectedTx) + + default: + h.t.Fatalf("unknown close type: %v", ct) + } +} + +// generateAltTxsForReorgs generates alternative transactions for reorg +// scenarios. For commitment-based closes (breach, remote/local force), the same +// tx is reused since we can only have one commitment tx per channel state. For +// coop closes, new transactions with different output values are created. +func generateAltTxsForReorgs(h *chainWatcherTestHarness, ct closeType, + originalTx *wire.MsgTx, numReorgs int, sameTxAtEnd bool) []*wire.MsgTx { + + altTxs := make([]*wire.MsgTx, numReorgs) + + for i := 0; i < numReorgs; i++ { + switch ct { + case closeTypeBreach, closeTypeRemoteUnilateral, + closeTypeLocalForce: + + // Non-coop closes can only have one commitment tx, so + // all reorgs use the same transaction. + altTxs[i] = originalTx + + case closeTypeCoop: + if i == numReorgs-1 && sameTxAtEnd { + // Last reorg goes back to original transaction. + altTxs[i] = originalTx + } else { + // Create different coop close tx with different + // output value to make it unique. + outputValue := int64(5000 - (i+1)*100) + altTxs[i] = createCloseTx(h, ct, outputValue) + } + } + } + + return altTxs +} + +// testReorgProperties is the main property-based test for reorg handling +// across all close types. +// +// The testingT parameter is captured from the outer test function and used +// for operations that require *testing.T (like channel creation), while the +// rapid.T is used for all test reporting and property generation. +func testReorgProperties(testingT *testing.T) func(*rapid.T) { + return func(t *rapid.T) { + // Generate random close type. + allCloseTypes := []closeType{ + closeTypeCoop, + closeTypeRemoteUnilateral, + closeTypeLocalForce, + closeTypeBreach, + } + ct := rapid.SampledFrom(allCloseTypes).Draw(t, "closeType") + + // Generate random number of required confirmations (2-6). We + // use at least 2 so we have room for reorgs during + // confirmation. + requiredConfs := rapid.IntRange(2, 6).Draw(t, "requiredConfs") + + // Generate number of reorgs (1-3 to keep test runtime + // reasonable). + numReorgs := rapid.IntRange(1, 3).Draw(t, "numReorgs") + + // Generate whether the final transaction is the same as the + // original. + sameTxAtEnd := rapid.Bool().Draw(t, "sameTxAtEnd") + + // Log test parameters for debugging. + t.Logf("Testing %s close with %d confs, %d reorgs, "+ + "sameTxAtEnd=%v", + ct, requiredConfs, numReorgs, sameTxAtEnd) + + // Create test harness using both the concrete *testing.T for + // channel creation and the rapid.T for test reporting. + harness := newChainWatcherTestHarnessFromReporter( + testingT, t, withRequiredConfs(uint32(requiredConfs)), + ) + + // Create initial transaction. + tx1 := createCloseTx(harness, ct, 5000) + + // Generate alternative transactions for each reorg. + altTxs := generateAltTxsForReorgs( + harness, ct, tx1, numReorgs, sameTxAtEnd, + ) + + // Send the initial spend. + harness.sendSpend(tx1) + harness.waitForConfRegistration() + + // Execute the set of re-orgs, based on our random sample, we'll + // mine N blocks, do a re-org of size N, then wait for + // detection, and repeat. + for i := 0; i < numReorgs; i++ { + // Generate random reorg depth (1 to requiredConfs-1). + // We cap it to avoid reorging too far back. + reorgDepth := rapid.IntRange( + 1, requiredConfs-1, + ).Draw(t, "reorgDepth") + + // Mine some blocks (but less than required confs). + blocksToMine := rapid.IntRange( + 1, requiredConfs-1, + ).Draw(t, "blocksToMine") + harness.mineBlocks(int32(blocksToMine)) + + // Trigger reorg. + if i == 0 { + harness.triggerReorg( + tx1, int32(reorgDepth), + ) + } else { + harness.triggerReorg( + altTxs[i-1], int32(reorgDepth), + ) + } + + harness.waitForSpendRegistration() + + harness.sendSpend(altTxs[i]) + harness.waitForConfRegistration() + } + + // Mine enough blocks to confirm final transaction. + harness.mineBlocks(1) + finalTx := altTxs[numReorgs-1] + harness.confirmTx(finalTx, harness.currentHeight) + + // Wait for and verify close event. + event := waitForCloseEvent(harness, ct, 10*time.Second) + assertCloseEventTx(harness, ct, event, finalTx) + } +} + +// TestChainWatcherReorgAllCloseTypes runs property-based tests for reorg +// handling across all channel close types. It generates random combinations of: +// - Close type (coop, remote unilateral, local force, breach) +// - Number of confirmations required (2-6) +// - Number of reorgs (1-3) +// - Whether the final tx is same as original or different +func TestChainWatcherReorgAllCloseTypes(t *testing.T) { + t.Parallel() + + rapid.Check(t, testReorgProperties(t)) +} + +// TestRemoteUnilateralCloseWithSingleReorg tests that a remote unilateral +// close is properly handled when a single reorg occurs during confirmation. +func TestRemoteUnilateralCloseWithSingleReorg(t *testing.T) { + t.Parallel() + + harness := newChainWatcherTestHarness(t) + + // Create two remote unilateral close transactions. + // Since these are commitment transactions, we can only have one per + // state, so we'll use the current one as tx1. + tx1 := harness.createRemoteUnilateralCloseTx() + + // Advance channel state to get a different commitment. + _ = harness.createBreachCloseTx() + tx2 := harness.createRemoteUnilateralCloseTx() + + // Send initial spend. + harness.sendSpend(tx1) + harness.waitForConfRegistration() + + // Mine a block and trigger reorg. + harness.mineBlocks(1) + harness.triggerReorg(tx1, 1) + + // Send alternative transaction after reorg. + harness.waitForSpendRegistration() + harness.sendSpend(tx2) + harness.waitForConfRegistration() + harness.mineBlocks(1) + harness.confirmTx(tx2, harness.currentHeight) + + // Verify correct event. + closeInfo := harness.waitForRemoteUnilateralClose(5 * time.Second) + harness.assertRemoteUnilateralCloseTx(closeInfo, tx2) +} + +// TestLocalForceCloseWithMultipleReorgs tests that a local force close is +// properly handled through multiple consecutive reorgs. +func TestLocalForceCloseWithMultipleReorgs(t *testing.T) { + t.Parallel() + + harness := newChainWatcherTestHarness(t) + + // For local force close, we can only broadcast our current commitment. + // We'll simulate multiple reorgs where the same tx keeps getting + // reorganized out and re-broadcast. + tx := harness.createLocalForceCloseTx() + + // First spend and reorg. + harness.sendSpend(tx) + harness.waitForConfRegistration() + harness.mineBlocks(1) + harness.triggerReorg(tx, 1) + + // Second spend and reorg. + harness.waitForSpendRegistration() + harness.sendSpend(tx) + harness.waitForConfRegistration() + harness.mineBlocks(1) + harness.triggerReorg(tx, 1) + + // Third spend - this one confirms. + harness.waitForSpendRegistration() + harness.sendSpend(tx) + harness.waitForConfRegistration() + harness.mineBlocks(1) + harness.confirmTx(tx, harness.currentHeight) + + // Verify correct event. + closeInfo := harness.waitForLocalUnilateralClose(5 * time.Second) + harness.assertLocalUnilateralCloseTx(closeInfo, tx) +} + +// TestBreachCloseWithDeepReorg tests that a breach (revoked commitment) is +// properly detected after a deep reorganization. +func TestBreachCloseWithDeepReorg(t *testing.T) { + t.Parallel() + + harness := newChainWatcherTestHarness(t) + + // Create a revoked commitment transaction. + revokedTx := harness.createBreachCloseTx() + + // Send spend and wait for confirmation registration. + harness.sendSpend(revokedTx) + harness.waitForConfRegistration() + + // Mine several blocks and then trigger a deep reorg. + harness.mineBlocks(5) + harness.triggerReorg(revokedTx, 5) + + // Re-broadcast same transaction after reorg. + harness.waitForSpendRegistration() + harness.sendSpend(revokedTx) + harness.waitForConfRegistration() + harness.mineBlocks(1) + harness.confirmTx(revokedTx, harness.currentHeight) + + // Verify breach detection. + breachInfo := harness.waitForBreach(5 * time.Second) + harness.assertBreachTx(breachInfo, revokedTx) +} + +// TestCoopCloseReorgToForceClose tests the edge case where a cooperative +// close gets reorged out and is replaced by a force close. +func TestCoopCloseReorgToForceClose(t *testing.T) { + t.Parallel() + + harness := newChainWatcherTestHarness(t) + + // Create a cooperative close and a force close transaction. + coopTx := harness.createCoopCloseTx(5000) + forceTx := harness.createRemoteUnilateralCloseTx() + + // Send cooperative close. + harness.sendSpend(coopTx) + harness.waitForConfRegistration() + + // Trigger reorg that removes coop close. + harness.mineBlocks(1) + harness.triggerReorg(coopTx, 1) + + // Send force close as alternative. + harness.waitForSpendRegistration() + harness.sendSpend(forceTx) + harness.waitForConfRegistration() + harness.mineBlocks(1) + harness.confirmTx(forceTx, harness.currentHeight) + + // Should receive remote unilateral close event, not coop close. + closeInfo := harness.waitForRemoteUnilateralClose(5 * time.Second) + harness.assertRemoteUnilateralCloseTx(closeInfo, forceTx) +} From 1dd1a2fb4865b9d1abffdd69827e37a4e4f7172c Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Wed, 1 Oct 2025 16:55:48 -0700 Subject: [PATCH 15/26] itest: add new coop close rbf itest This ensures that during the RBF process, if one confirms, a re-org occurs, then another confirms, that we'll properly detect this case. --- itest/list_on_test.go | 4 + itest/lnd_coop_close_rbf_test.go | 175 +++++++++++++++++++++++++++++++ itest/lnd_funding_test.go | 18 +++- 3 files changed, 195 insertions(+), 2 deletions(-) diff --git a/itest/list_on_test.go b/itest/list_on_test.go index 92c6547b3a..ec608c629d 100644 --- a/itest/list_on_test.go +++ b/itest/list_on_test.go @@ -727,6 +727,10 @@ var allTestCases = []*lntest.TestCase{ Name: "rbf coop close disconnect", TestFunc: testRBFCoopCloseDisconnect, }, + { + Name: "coop close rbf with reorg", + TestFunc: testCoopCloseRBFWithReorg, + }, { Name: "bump fee low budget", TestFunc: testBumpFeeLowBudget, diff --git a/itest/lnd_coop_close_rbf_test.go b/itest/lnd_coop_close_rbf_test.go index 5f8b15d405..57ebe0003f 100644 --- a/itest/lnd_coop_close_rbf_test.go +++ b/itest/lnd_coop_close_rbf_test.go @@ -1,8 +1,13 @@ package itest import ( + "fmt" + "github.com/btcsuite/btcd/btcutil" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lntest" + "github.com/lightningnetwork/lnd/lntest/wait" "github.com/lightningnetwork/lnd/lnwallet/chainfee" "github.com/stretchr/testify/require" ) @@ -153,3 +158,173 @@ func testRBFCoopCloseDisconnect(ht *lntest.HarnessTest) { // Disconnect Bob from Alice. ht.DisconnectNodes(alice, bob) } + +// testCoopCloseRBFWithReorg tests that when a cooperative close transaction +// is reorganized out during confirmation waiting, the system properly handles +// RBF replacements and re-registration for any spend of the funding output. +func testCoopCloseRBFWithReorg(ht *lntest.HarnessTest) { + // Skip this test for neutrino backend as we can't trigger reorgs. + if ht.IsNeutrinoBackend() { + ht.Skipf("skipping reorg test for neutrino backend") + } + + // Force cooperative close to require 3 confirmations for predictable + // testing. + const requiredConfs = 3 + rbfCoopFlags := []string{ + "--protocol.rbf-coop-close", + "--dev.force-channel-close-confs=3", + } + + // Set the fee estimate to 1sat/vbyte to ensure our RBF attempts work. + ht.SetFeeEstimate(250) + ht.SetFeeEstimateWithConf(250, 6) + + // Create two nodes with enough coins for a 50/50 channel. + cfgs := [][]string{rbfCoopFlags, rbfCoopFlags} + params := lntest.OpenChannelParams{ + Amt: btcutil.Amount(10_000_000), + PushAmt: btcutil.Amount(5_000_000), + } + chanPoints, nodes := ht.CreateSimpleNetwork(cfgs, params) + alice, bob := nodes[0], nodes[1] + chanPoint := chanPoints[0] + + // Initiate cooperative close with initial fee rate of 5 sat/vb. + initialFeeRate := chainfee.SatPerVByte(5) + _, aliceCloseUpdate := ht.CloseChannelAssertPending( + alice, chanPoint, false, + lntest.WithCoopCloseFeeRate(initialFeeRate), + lntest.WithLocalTxNotify(), + ) + + // Verify the initial close transaction is at the expected fee rate. + alicePendingUpdate := aliceCloseUpdate.GetClosePending() + require.NotNil(ht, aliceCloseUpdate) + require.Equal( + ht, int64(initialFeeRate), alicePendingUpdate.FeePerVbyte, + ) + + // Capture the initial close transaction from the mempool. + initialCloseTxid, err := chainhash.NewHash(alicePendingUpdate.Txid) + require.NoError(ht, err) + initialCloseTx := ht.AssertTxInMempool(*initialCloseTxid) + + // Create first RBF replacement before any mining. + firstRbfFeeRate := chainfee.SatPerVByte(10) + _, firstRbfUpdate := ht.CloseChannelAssertPending( + bob, chanPoint, false, + lntest.WithCoopCloseFeeRate(firstRbfFeeRate), + lntest.WithLocalTxNotify(), + ) + + // Capture the first RBF transaction. + firstRbfTxid, err := chainhash.NewHash(firstRbfUpdate.GetClosePending().Txid) + require.NoError(ht, err) + firstRbfTx := ht.AssertTxInMempool(*firstRbfTxid) + + _, bestHeight, err := ht.Miner().Client.GetBestBlock() + require.NoError(ht, err) + + ht.Logf("Current block height: %d", bestHeight) + + // Mine n-1 blocks (2 blocks when requiring 3 confirmations) with the + // first RBF transaction. This is just shy of full confirmation. + block1 := ht.Miner().MineBlockWithTxes( + []*btcutil.Tx{btcutil.NewTx(firstRbfTx)}, + ) + + ht.Logf("Mined block %d with first RBF tx", bestHeight+1) + + block2 := ht.MineEmptyBlocks(1)[0] + + ht.Logf("Mined block %d", bestHeight+2) + + ht.Logf("Re-orging two blocks to remove first RBF tx") + + // Trigger a reorganization that removes the last 2 blocks. This is safe + // because we haven't reached full confirmation yet. + bestBlockHash := block2.Header.BlockHash() + require.NoError( + ht, ht.Miner().Client.InvalidateBlock(&bestBlockHash), + ) + bestBlockHash = block1.Header.BlockHash() + require.NoError( + ht, ht.Miner().Client.InvalidateBlock(&bestBlockHash), + ) + + _, bestHeight, err = ht.Miner().Client.GetBestBlock() + require.NoError(ht, err) + ht.Logf("Re-orged to block height: %d", bestHeight) + + ht.Log("Mining blocks to surpass previous chain") + + // Mine 2 empty blocks to trigger the reorg on the nodes. + ht.MineEmptyBlocks(2) + + _, bestHeight, err = ht.Miner().Client.GetBestBlock() + require.NoError(ht, err) + ht.Logf("Mined blocks to reach height: %d", bestHeight) + + // Now, instead of mining the second RBF, mine the INITIAL transaction + // to test that the system can handle any valid spend of the funding + // output. + block := ht.Miner().MineBlockWithTxes( + []*btcutil.Tx{btcutil.NewTx(initialCloseTx)}, + ) + ht.AssertTxInBlock(block, *initialCloseTxid) + + // Mine additional blocks to reach the required confirmations (3 total). + ht.MineEmptyBlocks(requiredConfs - 1) + + // Both parties should see that the channel is now fully closed on chain + // with the expected closing txid. + expectedClosingTxid := initialCloseTxid.String() + err = wait.NoError(func() error { + req := &lnrpc.ClosedChannelsRequest{} + aliceClosedChans := alice.RPC.ClosedChannels(req) + bobClosedChans := bob.RPC.ClosedChannels(req) + if len(aliceClosedChans.Channels) != 1 { + return fmt.Errorf("alice: expected 1 closed chan, got %d", + len(aliceClosedChans.Channels)) + } + if len(bobClosedChans.Channels) != 1 { + return fmt.Errorf("bob: expected 1 closed chan, got %d", + len(bobClosedChans.Channels)) + } + + // Verify both Alice and Bob have the expected closing txid. + aliceClosedChan := aliceClosedChans.Channels[0] + if aliceClosedChan.ClosingTxHash != expectedClosingTxid { + return fmt.Errorf("alice: expected closing txid %s, "+ + "got %s", + expectedClosingTxid, + aliceClosedChan.ClosingTxHash) + } + if aliceClosedChan.CloseType != + lnrpc.ChannelCloseSummary_COOPERATIVE_CLOSE { + return fmt.Errorf("alice: expected cooperative "+ + "close, got %v", + aliceClosedChan.CloseType) + } + + bobClosedChan := bobClosedChans.Channels[0] + if bobClosedChan.ClosingTxHash != expectedClosingTxid { + return fmt.Errorf("bob: expected closing txid %s, "+ + "got %s", + expectedClosingTxid, + bobClosedChan.ClosingTxHash) + } + if bobClosedChan.CloseType != + lnrpc.ChannelCloseSummary_COOPERATIVE_CLOSE { + return fmt.Errorf("bob: expected cooperative "+ + "close, got %v", + bobClosedChan.CloseType) + } + + return nil + }, defaultTimeout) + require.NoError(ht, err) + + ht.Logf("Successfully verified closing txid: %s", expectedClosingTxid) +} diff --git a/itest/lnd_funding_test.go b/itest/lnd_funding_test.go index b6734e032d..2c1daf53d2 100644 --- a/itest/lnd_funding_test.go +++ b/itest/lnd_funding_test.go @@ -1272,8 +1272,17 @@ func testChannelFundingWithUnstableUtxos(ht *lntest.HarnessTest) { // Make sure Carol sees her to_remote output from the force close tx. ht.AssertNumPendingSweeps(carol, 1) - // We need to wait for carol initiating the sweep of the to_remote - // output of chanPoint2. + // Wait for Carol's sweep transaction to appear in the mempool. Due to + // async confirmation notifications, there's a race between when the + // sweep is registered and when the sweeper processes the next block. + // The sweeper uses immediate=false, so it broadcasts on the next block + // after registration. Mine an empty block to trigger the broadcast. + ht.MineEmptyBlocks(1) + + // Now the sweep should be in the mempool. + ht.AssertNumTxsInMempool(1) + + // Now we should see the unconfirmed UTXO from the sweep. utxo := ht.AssertNumUTXOsUnconfirmed(carol, 1)[0] // We now try to open channel using the unconfirmed utxo. @@ -1329,6 +1338,11 @@ func testChannelFundingWithUnstableUtxos(ht *lntest.HarnessTest) { // Make sure Carol sees her to_remote output from the force close tx. ht.AssertNumPendingSweeps(carol, 1) + // Mine an empty block to trigger the sweep broadcast (same fix as + // above). + ht.MineEmptyBlocks(1) + ht.AssertNumTxsInMempool(1) + // Wait for the to_remote sweep tx to show up in carol's wallet. ht.AssertNumUTXOsUnconfirmed(carol, 1) From 574c093c3dc41187cf386a8719810a9de7601b79 Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Sat, 11 Oct 2025 14:18:00 +0100 Subject: [PATCH 16/26] lnrpc/devrpc: add new TriggerSweep dev rpc command In this commit, we add a new TriggerSwep dev rpc command. This command can be used in the itest to trigger a sweep on demand, which will be useful to ensure that they still pass, now that some block beat handling is more async. --- lnrpc/devrpc/config_active.go | 8 ++ lnrpc/devrpc/dev.pb.go | 176 +++++++++++++++++++++++++++++----- lnrpc/devrpc/dev.pb.json.go | 25 +++++ lnrpc/devrpc/dev.proto | 16 ++++ lnrpc/devrpc/dev.swagger.json | 10 ++ lnrpc/devrpc/dev_grpc.pb.go | 46 +++++++++ lnrpc/devrpc/dev_server.go | 25 +++++ subrpcserver_config.go | 4 + 8 files changed, 285 insertions(+), 25 deletions(-) diff --git a/lnrpc/devrpc/config_active.go b/lnrpc/devrpc/config_active.go index c5d43c194b..512efdb189 100644 --- a/lnrpc/devrpc/config_active.go +++ b/lnrpc/devrpc/config_active.go @@ -9,6 +9,13 @@ import ( "github.com/lightningnetwork/lnd/htlcswitch" ) +// UtxoSweeper defines the interface for the UTXO sweeper needed by the dev +// RPC server. +type UtxoSweeper interface { + // TriggerSweep triggers an immediate sweep attempt. + TriggerSweep() int +} + // Config is the primary configuration struct for the DEV RPC server. It // contains all the items required for the rpc server to carry out its // duties. Any fields with struct tags are meant to be parsed as normal @@ -18,4 +25,5 @@ type Config struct { ActiveNetParams *chaincfg.Params GraphDB *graphdb.ChannelGraph Switch *htlcswitch.Switch + Sweeper UtxoSweeper } diff --git a/lnrpc/devrpc/dev.pb.go b/lnrpc/devrpc/dev.pb.go index d8de47fc8a..9552aeed49 100644 --- a/lnrpc/devrpc/dev.pb.go +++ b/lnrpc/devrpc/dev.pb.go @@ -156,6 +156,92 @@ func (x *QuiescenceResponse) GetInitiator() bool { return false } +type TriggerSweeperRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *TriggerSweeperRequest) Reset() { + *x = TriggerSweeperRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_devrpc_dev_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TriggerSweeperRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TriggerSweeperRequest) ProtoMessage() {} + +func (x *TriggerSweeperRequest) ProtoReflect() protoreflect.Message { + mi := &file_devrpc_dev_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TriggerSweeperRequest.ProtoReflect.Descriptor instead. +func (*TriggerSweeperRequest) Descriptor() ([]byte, []int) { + return file_devrpc_dev_proto_rawDescGZIP(), []int{3} +} + +type TriggerSweeperResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The number of sweep transactions that were broadcast. + NumSweepsBroadcast uint32 `protobuf:"varint,1,opt,name=num_sweeps_broadcast,json=numSweepsBroadcast,proto3" json:"num_sweeps_broadcast,omitempty"` +} + +func (x *TriggerSweeperResponse) Reset() { + *x = TriggerSweeperResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_devrpc_dev_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TriggerSweeperResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TriggerSweeperResponse) ProtoMessage() {} + +func (x *TriggerSweeperResponse) ProtoReflect() protoreflect.Message { + mi := &file_devrpc_dev_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TriggerSweeperResponse.ProtoReflect.Descriptor instead. +func (*TriggerSweeperResponse) Descriptor() ([]byte, []int) { + return file_devrpc_dev_proto_rawDescGZIP(), []int{4} +} + +func (x *TriggerSweeperResponse) GetNumSweepsBroadcast() uint32 { + if x != nil { + return x.NumSweepsBroadcast + } + return 0 +} + var File_devrpc_dev_proto protoreflect.FileDescriptor var file_devrpc_dev_proto_rawDesc = []byte{ @@ -170,19 +256,31 @@ var file_devrpc_dev_proto_rawDesc = []byte{ 0x68, 0x61, 0x6e, 0x49, 0x64, 0x22, 0x32, 0x0a, 0x12, 0x51, 0x75, 0x69, 0x65, 0x73, 0x63, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, - 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, 0x6f, 0x72, 0x32, 0x88, 0x01, 0x0a, 0x03, 0x44, 0x65, - 0x76, 0x12, 0x3f, 0x0a, 0x0b, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x47, 0x72, 0x61, 0x70, 0x68, - 0x12, 0x13, 0x2e, 0x6c, 0x6e, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, - 0x47, 0x72, 0x61, 0x70, 0x68, 0x1a, 0x1b, 0x2e, 0x64, 0x65, 0x76, 0x72, 0x70, 0x63, 0x2e, 0x49, - 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x40, 0x0a, 0x07, 0x51, 0x75, 0x69, 0x65, 0x73, 0x63, 0x65, 0x12, 0x19, 0x2e, + 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, 0x6f, 0x72, 0x22, 0x17, 0x0a, 0x15, 0x54, 0x72, 0x69, + 0x67, 0x67, 0x65, 0x72, 0x53, 0x77, 0x65, 0x65, 0x70, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x22, 0x4a, 0x0a, 0x16, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x53, 0x77, 0x65, + 0x65, 0x70, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x30, 0x0a, 0x14, + 0x6e, 0x75, 0x6d, 0x5f, 0x73, 0x77, 0x65, 0x65, 0x70, 0x73, 0x5f, 0x62, 0x72, 0x6f, 0x61, 0x64, + 0x63, 0x61, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x6e, 0x75, 0x6d, 0x53, + 0x77, 0x65, 0x65, 0x70, 0x73, 0x42, 0x72, 0x6f, 0x61, 0x64, 0x63, 0x61, 0x73, 0x74, 0x32, 0xd9, + 0x01, 0x0a, 0x03, 0x44, 0x65, 0x76, 0x12, 0x3f, 0x0a, 0x0b, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, + 0x47, 0x72, 0x61, 0x70, 0x68, 0x12, 0x13, 0x2e, 0x6c, 0x6e, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x68, + 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x47, 0x72, 0x61, 0x70, 0x68, 0x1a, 0x1b, 0x2e, 0x64, 0x65, 0x76, + 0x72, 0x70, 0x63, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x40, 0x0a, 0x07, 0x51, 0x75, 0x69, 0x65, 0x73, + 0x63, 0x65, 0x12, 0x19, 0x2e, 0x64, 0x65, 0x76, 0x72, 0x70, 0x63, 0x2e, 0x51, 0x75, 0x69, 0x65, + 0x73, 0x63, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x64, 0x65, 0x76, 0x72, 0x70, 0x63, 0x2e, 0x51, 0x75, 0x69, 0x65, 0x73, 0x63, 0x65, 0x6e, 0x63, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x64, 0x65, 0x76, 0x72, 0x70, - 0x63, 0x2e, 0x51, 0x75, 0x69, 0x65, 0x73, 0x63, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x2e, 0x5a, 0x2c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x6e, 0x69, 0x6e, 0x67, 0x6e, 0x65, 0x74, 0x77, - 0x6f, 0x72, 0x6b, 0x2f, 0x6c, 0x6e, 0x64, 0x2f, 0x6c, 0x6e, 0x72, 0x70, 0x63, 0x2f, 0x64, 0x65, - 0x76, 0x72, 0x70, 0x63, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4f, 0x0a, 0x0e, 0x54, 0x72, 0x69, + 0x67, 0x67, 0x65, 0x72, 0x53, 0x77, 0x65, 0x65, 0x70, 0x65, 0x72, 0x12, 0x1d, 0x2e, 0x64, 0x65, + 0x76, 0x72, 0x70, 0x63, 0x2e, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x53, 0x77, 0x65, 0x65, + 0x70, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x64, 0x65, 0x76, + 0x72, 0x70, 0x63, 0x2e, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x53, 0x77, 0x65, 0x65, 0x70, + 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x2e, 0x5a, 0x2c, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x6e, 0x69, + 0x6e, 0x67, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2f, 0x6c, 0x6e, 0x64, 0x2f, 0x6c, 0x6e, + 0x72, 0x70, 0x63, 0x2f, 0x64, 0x65, 0x76, 0x72, 0x70, 0x63, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( @@ -197,22 +295,26 @@ func file_devrpc_dev_proto_rawDescGZIP() []byte { return file_devrpc_dev_proto_rawDescData } -var file_devrpc_dev_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_devrpc_dev_proto_msgTypes = make([]protoimpl.MessageInfo, 5) var file_devrpc_dev_proto_goTypes = []interface{}{ - (*ImportGraphResponse)(nil), // 0: devrpc.ImportGraphResponse - (*QuiescenceRequest)(nil), // 1: devrpc.QuiescenceRequest - (*QuiescenceResponse)(nil), // 2: devrpc.QuiescenceResponse - (*lnrpc.ChannelPoint)(nil), // 3: lnrpc.ChannelPoint - (*lnrpc.ChannelGraph)(nil), // 4: lnrpc.ChannelGraph + (*ImportGraphResponse)(nil), // 0: devrpc.ImportGraphResponse + (*QuiescenceRequest)(nil), // 1: devrpc.QuiescenceRequest + (*QuiescenceResponse)(nil), // 2: devrpc.QuiescenceResponse + (*TriggerSweeperRequest)(nil), // 3: devrpc.TriggerSweeperRequest + (*TriggerSweeperResponse)(nil), // 4: devrpc.TriggerSweeperResponse + (*lnrpc.ChannelPoint)(nil), // 5: lnrpc.ChannelPoint + (*lnrpc.ChannelGraph)(nil), // 6: lnrpc.ChannelGraph } var file_devrpc_dev_proto_depIdxs = []int32{ - 3, // 0: devrpc.QuiescenceRequest.chan_id:type_name -> lnrpc.ChannelPoint - 4, // 1: devrpc.Dev.ImportGraph:input_type -> lnrpc.ChannelGraph + 5, // 0: devrpc.QuiescenceRequest.chan_id:type_name -> lnrpc.ChannelPoint + 6, // 1: devrpc.Dev.ImportGraph:input_type -> lnrpc.ChannelGraph 1, // 2: devrpc.Dev.Quiesce:input_type -> devrpc.QuiescenceRequest - 0, // 3: devrpc.Dev.ImportGraph:output_type -> devrpc.ImportGraphResponse - 2, // 4: devrpc.Dev.Quiesce:output_type -> devrpc.QuiescenceResponse - 3, // [3:5] is the sub-list for method output_type - 1, // [1:3] is the sub-list for method input_type + 3, // 3: devrpc.Dev.TriggerSweeper:input_type -> devrpc.TriggerSweeperRequest + 0, // 4: devrpc.Dev.ImportGraph:output_type -> devrpc.ImportGraphResponse + 2, // 5: devrpc.Dev.Quiesce:output_type -> devrpc.QuiescenceResponse + 4, // 6: devrpc.Dev.TriggerSweeper:output_type -> devrpc.TriggerSweeperResponse + 4, // [4:7] is the sub-list for method output_type + 1, // [1:4] is the sub-list for method input_type 1, // [1:1] is the sub-list for extension type_name 1, // [1:1] is the sub-list for extension extendee 0, // [0:1] is the sub-list for field type_name @@ -260,6 +362,30 @@ func file_devrpc_dev_proto_init() { return nil } } + file_devrpc_dev_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TriggerSweeperRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_devrpc_dev_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TriggerSweeperResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } type x struct{} out := protoimpl.TypeBuilder{ @@ -267,7 +393,7 @@ func file_devrpc_dev_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_devrpc_dev_proto_rawDesc, NumEnums: 0, - NumMessages: 3, + NumMessages: 5, NumExtensions: 0, NumServices: 1, }, diff --git a/lnrpc/devrpc/dev.pb.json.go b/lnrpc/devrpc/dev.pb.json.go index 2163a13de6..f16fd926b2 100644 --- a/lnrpc/devrpc/dev.pb.json.go +++ b/lnrpc/devrpc/dev.pb.json.go @@ -71,4 +71,29 @@ func RegisterDevJSONCallbacks(registry map[string]func(ctx context.Context, } callback(string(respBytes), nil) } + + registry["devrpc.Dev.TriggerSweeper"] = func(ctx context.Context, + conn *grpc.ClientConn, reqJSON string, callback func(string, error)) { + + req := &TriggerSweeperRequest{} + err := marshaler.Unmarshal([]byte(reqJSON), req) + if err != nil { + callback("", err) + return + } + + client := NewDevClient(conn) + resp, err := client.TriggerSweeper(ctx, req) + if err != nil { + callback("", err) + return + } + + respBytes, err := marshaler.Marshal(resp) + if err != nil { + callback("", err) + return + } + callback(string(respBytes), nil) + } } diff --git a/lnrpc/devrpc/dev.proto b/lnrpc/devrpc/dev.proto index 4b4fe778fd..adef59e32d 100644 --- a/lnrpc/devrpc/dev.proto +++ b/lnrpc/devrpc/dev.proto @@ -37,6 +37,14 @@ service Dev { once interop is confirmed. */ rpc Quiesce (QuiescenceRequest) returns (QuiescenceResponse); + + /* lncli: `triggersweeper` + TriggerSweeper triggers the sweeper to attempt to create and broadcast sweep + transactions for all pending inputs. This RPC is for testing purposes only, + allowing tests to deterministically control when sweeps are broadcast rather + than relying on the sweeper's automatic timing. + */ + rpc TriggerSweeper (TriggerSweeperRequest) returns (TriggerSweeperResponse); } message ImportGraphResponse { @@ -52,3 +60,11 @@ message QuiescenceResponse { // negotiation completes bool initiator = 1; } + +message TriggerSweeperRequest { +} + +message TriggerSweeperResponse { + // The number of sweep transactions that were broadcast. + uint32 num_sweeps_broadcast = 1; +} diff --git a/lnrpc/devrpc/dev.swagger.json b/lnrpc/devrpc/dev.swagger.json index 25338c3985..4602e3b532 100644 --- a/lnrpc/devrpc/dev.swagger.json +++ b/lnrpc/devrpc/dev.swagger.json @@ -106,6 +106,16 @@ } } }, + "devrpcTriggerSweeperResponse": { + "type": "object", + "properties": { + "num_sweeps_broadcast": { + "type": "integer", + "format": "int64", + "description": "The number of sweep transactions that were broadcast." + } + } + }, "lnrpcChannelAuthProof": { "type": "object", "properties": { diff --git a/lnrpc/devrpc/dev_grpc.pb.go b/lnrpc/devrpc/dev_grpc.pb.go index 1eb6266fbe..952925e31a 100644 --- a/lnrpc/devrpc/dev_grpc.pb.go +++ b/lnrpc/devrpc/dev_grpc.pb.go @@ -27,6 +27,12 @@ type DevClient interface { // RPC is for testing purposes only. The commit that adds it will be removed // once interop is confirmed. Quiesce(ctx context.Context, in *QuiescenceRequest, opts ...grpc.CallOption) (*QuiescenceResponse, error) + // lncli: `triggersweeper` + // TriggerSweeper triggers the sweeper to attempt to create and broadcast sweep + // transactions for all pending inputs. This RPC is for testing purposes only, + // allowing tests to deterministically control when sweeps are broadcast rather + // than relying on the sweeper's automatic timing. + TriggerSweeper(ctx context.Context, in *TriggerSweeperRequest, opts ...grpc.CallOption) (*TriggerSweeperResponse, error) } type devClient struct { @@ -55,6 +61,15 @@ func (c *devClient) Quiesce(ctx context.Context, in *QuiescenceRequest, opts ... return out, nil } +func (c *devClient) TriggerSweeper(ctx context.Context, in *TriggerSweeperRequest, opts ...grpc.CallOption) (*TriggerSweeperResponse, error) { + out := new(TriggerSweeperResponse) + err := c.cc.Invoke(ctx, "/devrpc.Dev/TriggerSweeper", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // DevServer is the server API for Dev service. // All implementations must embed UnimplementedDevServer // for forward compatibility @@ -67,6 +82,12 @@ type DevServer interface { // RPC is for testing purposes only. The commit that adds it will be removed // once interop is confirmed. Quiesce(context.Context, *QuiescenceRequest) (*QuiescenceResponse, error) + // lncli: `triggersweeper` + // TriggerSweeper triggers the sweeper to attempt to create and broadcast sweep + // transactions for all pending inputs. This RPC is for testing purposes only, + // allowing tests to deterministically control when sweeps are broadcast rather + // than relying on the sweeper's automatic timing. + TriggerSweeper(context.Context, *TriggerSweeperRequest) (*TriggerSweeperResponse, error) mustEmbedUnimplementedDevServer() } @@ -80,6 +101,9 @@ func (UnimplementedDevServer) ImportGraph(context.Context, *lnrpc.ChannelGraph) func (UnimplementedDevServer) Quiesce(context.Context, *QuiescenceRequest) (*QuiescenceResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Quiesce not implemented") } +func (UnimplementedDevServer) TriggerSweeper(context.Context, *TriggerSweeperRequest) (*TriggerSweeperResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method TriggerSweeper not implemented") +} func (UnimplementedDevServer) mustEmbedUnimplementedDevServer() {} // UnsafeDevServer may be embedded to opt out of forward compatibility for this service. @@ -129,6 +153,24 @@ func _Dev_Quiesce_Handler(srv interface{}, ctx context.Context, dec func(interfa return interceptor(ctx, in, info, handler) } +func _Dev_TriggerSweeper_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(TriggerSweeperRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DevServer).TriggerSweeper(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/devrpc.Dev/TriggerSweeper", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DevServer).TriggerSweeper(ctx, req.(*TriggerSweeperRequest)) + } + return interceptor(ctx, in, info, handler) +} + // Dev_ServiceDesc is the grpc.ServiceDesc for Dev service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -144,6 +186,10 @@ var Dev_ServiceDesc = grpc.ServiceDesc{ MethodName: "Quiesce", Handler: _Dev_Quiesce_Handler, }, + { + MethodName: "TriggerSweeper", + Handler: _Dev_TriggerSweeper_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "devrpc/dev.proto", diff --git a/lnrpc/devrpc/dev_server.go b/lnrpc/devrpc/dev_server.go index 6cc4347f06..ebc59a5db9 100644 --- a/lnrpc/devrpc/dev_server.go +++ b/lnrpc/devrpc/dev_server.go @@ -45,6 +45,10 @@ var ( Entity: "offchain", Action: "write", }}, + "/devrpc.Dev/TriggerSweeper": {{ + Entity: "onchain", + Action: "write", + }}, } ) @@ -383,3 +387,24 @@ func (s *Server) Quiesce(_ context.Context, in *QuiescenceRequest) ( return nil, fmt.Errorf("server shutting down") } } + +// TriggerSweeper triggers the sweeper to immediately attempt to create and +// broadcast sweep transactions for all pending inputs. This is primarily used +// for testing to deterministically control when sweeps are broadcast. +// +// NOTE: Part of the DevServer interface. +func (s *Server) TriggerSweeper(_ context.Context, + _ *TriggerSweeperRequest) (*TriggerSweeperResponse, error) { + + if s.cfg.Sweeper == nil { + return nil, fmt.Errorf("sweeper not available") + } + + numSweeps := s.cfg.Sweeper.TriggerSweep() + + log.Debugf("TriggerSweeper: triggered sweep of %d inputs", numSweeps) + + return &TriggerSweeperResponse{ + NumSweepsBroadcast: uint32(numSweeps), + }, nil +} diff --git a/subrpcserver_config.go b/subrpcserver_config.go index d55d5a4923..a8471f0419 100644 --- a/subrpcserver_config.go +++ b/subrpcserver_config.go @@ -353,6 +353,10 @@ func (s *subRPCServerConfigs) PopulateDependencies(cfg *Config, reflect.ValueOf(htlcSwitch), ) + subCfgValue.FieldByName("Sweeper").Set( + reflect.ValueOf(sweeper), + ) + case *peersrpc.Config: subCfgValue := extractReflectValue(subCfg) From 503dd29f2cb1054f4dfbbfbf1886874be261e55a Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Sat, 11 Oct 2025 14:20:52 +0100 Subject: [PATCH 17/26] sweeper: implement TriggerSweep --- sweep/mock_test.go | 2 +- sweep/sweeper.go | 47 +++++++++ sweep/sweeper_test.go | 215 +++++++++++++++++++++++++++++++++++++++++- 3 files changed, 262 insertions(+), 2 deletions(-) diff --git a/sweep/mock_test.go b/sweep/mock_test.go index e6e254e8e1..a9de162290 100644 --- a/sweep/mock_test.go +++ b/sweep/mock_test.go @@ -291,7 +291,7 @@ func (m *MockBumper) Broadcast(req *BumpRequest) <-chan *BumpResult { return nil } - return args.Get(0).(chan *BumpResult) + return args.Get(0).(<-chan *BumpResult) } // MockFeeFunction is a mock implementation of the FeeFunction interface. diff --git a/sweep/sweeper.go b/sweep/sweeper.go index e2163f6373..20b9517861 100644 --- a/sweep/sweeper.go +++ b/sweep/sweeper.go @@ -326,6 +326,12 @@ type updateResp struct { err error } +// triggerSweepReq is an internal message we'll use to represent an external +// caller's intent to trigger an immediate sweep of all pending inputs. +type triggerSweepReq struct { + respChan chan int +} + // UtxoSweeper is responsible for sweeping outputs back into the wallet type UtxoSweeper struct { started uint32 // To be used atomically. @@ -349,6 +355,10 @@ type UtxoSweeper struct { // callers who wish to bump the fee rate of a given input. updateReqs chan *updateReq + // triggerSweepReqs is a channel that will be sent requests by external + // callers who wish to trigger an immediate sweep of all pending inputs. + triggerSweepReqs chan *triggerSweepReq + // inputs is the total set of inputs the UtxoSweeper has been requested // to sweep. inputs InputsMap @@ -451,6 +461,7 @@ func New(cfg *UtxoSweeperConfig) *UtxoSweeper { spendChan: make(chan *chainntnfs.SpendDetail), updateReqs: make(chan *updateReq), pendingSweepsReqs: make(chan *pendingSweepsReq), + triggerSweepReqs: make(chan *triggerSweepReq), quit: make(chan struct{}), inputs: make(InputsMap), bumpRespChan: make(chan *bumpResp, 100), @@ -711,6 +722,25 @@ func (s *UtxoSweeper) collector() { err) } + // A new external request has been received to trigger an + // immediate sweep of all pending inputs. + case req := <-s.triggerSweepReqs: + // Update the inputs with the latest height. + inputs := s.updateSweeperInputs() + + // Mark all inputs as immediate so they are broadcast + // right away. This is necessary for testing where we + // want to deterministically trigger sweeps. + for _, inp := range inputs { + inp.params.Immediate = true + } + + // Attempt to sweep any pending inputs. + s.sweepPendingInputs(inputs) + + // Send back the number of inputs we attempted to sweep. + req.respChan <- len(inputs) + // A new block comes in, update the bestHeight, perform a check // over all pending inputs and publish sweeping txns if needed. case beat := <-s.BlockbeatChan: @@ -1203,6 +1233,23 @@ func (s *UtxoSweeper) ListSweeps() ([]chainhash.Hash, error) { return s.cfg.Store.ListSweeps() } +// TriggerSweep triggers an immediate attempt to create and broadcast sweep +// transactions for all pending inputs. This is useful for testing to +// deterministically control when sweeps are broadcast. This method is +// thread-safe as it sends a message to the collector goroutine's event loop. +func (s *UtxoSweeper) TriggerSweep() int { + req := &triggerSweepReq{ + respChan: make(chan int, 1), + } + + select { + case s.triggerSweepReqs <- req: + return <-req.respChan + case <-s.quit: + return 0 + } +} + // mempoolLookup takes an input's outpoint and queries the mempool to see // whether it's already been spent in a transaction found in the mempool. // Returns the transaction if found. diff --git a/sweep/sweeper_test.go b/sweep/sweeper_test.go index d97fd99250..eec6b37c63 100644 --- a/sweep/sweeper_test.go +++ b/sweep/sweeper_test.go @@ -70,6 +70,76 @@ func createMockInput(t *testing.T, s *UtxoSweeper, return inp } +// setupMatureMockInput configures a mock input to be mature and ready for +// sweeping. This centralizes the common pattern of setting up RequiredLockTime, +// BlocksToMaturity, HeightHint, and OutPoint mocks. +func setupMatureMockInput(inp *input.MockInput, currentHeight int32, + index uint32) { + + inp.On("RequiredLockTime").Return( + uint32(currentHeight), false).Maybe() + inp.On("BlocksToMaturity").Return(uint32(0)).Maybe() + inp.On("HeightHint").Return(uint32(currentHeight)).Maybe() + inp.On("OutPoint").Return(wire.OutPoint{Index: index}).Maybe() +} + +// newTestSweeperConfig returns a UtxoSweeperConfig with common test settings. +// This avoids repeating the GenSweepScript setup across multiple tests. +func newTestSweeperConfig(wallet Wallet, aggregator UtxoAggregator, + publisher Bumper) *UtxoSweeperConfig { + + return &UtxoSweeperConfig{ + Wallet: wallet, + Aggregator: aggregator, + Publisher: publisher, + GenSweepScript: func() fn.Result[lnwallet.AddrWithKey] { + return fn.Ok(lnwallet.AddrWithKey{ + DeliveryAddress: testPubKey.SerializeCompressed(), + }) + }, + NoDeadlineConfTarget: uint32(DefaultDeadlineDelta), + } +} + +// mockInputSet sets up a standard input set mock with common expectations. +// This centralizes the repetitive pattern of mocking input set methods. +func mockInputSet(set *MockInputSet, inputs []input.Input) { + set.On("Inputs").Return(inputs).Maybe() + set.On("NeedWalletInput").Return(false).Once() + set.On("DeadlineHeight").Return(int32(testHeight)).Once() + set.On("Budget").Return(btcutil.Amount(1)).Once() + set.On("StartingFeeRate").Return( + fn.None[chainfee.SatPerKWeight]()).Once() + set.On("Immediate").Return(true).Once() +} + +// mockAggregatorCluster sets up a standard ClusterInputs mock expectation. +func mockAggregatorCluster(aggregator *mockUtxoAggregator, + sets []InputSet) { + + aggregator.On("ClusterInputs", mock.Anything).Return(sets).Once() +} + +// mockPublisherBroadcast sets up a standard Broadcast mock expectation. +func mockPublisherBroadcast(publisher *MockBumper) { + publisher.On("Broadcast", mock.Anything).Return( + make(<-chan *BumpResult)).Once() +} + +// setupMockInputSetForSweep configures a MockInputSet with the standard mocks +// needed for sweep operations, allowing customization of the immediate flag. +func setupMockInputSetForSweep(set *MockInputSet, inputs []input.Input, + immediate bool) { + + set.On("Inputs").Return(inputs).Maybe() + set.On("NeedWalletInput").Return(false).Once() + set.On("DeadlineHeight").Return(int32(testHeight)).Once() + set.On("Budget").Return(btcutil.Amount(1)).Once() + set.On("StartingFeeRate").Return( + fn.None[chainfee.SatPerKWeight]()).Once() + set.On("Immediate").Return(immediate).Once() +} + // TestMarkInputsPendingPublish checks that given a list of inputs with // different states, only the non-terminal state will be marked as `Published`. func TestMarkInputsPendingPublish(t *testing.T) { @@ -79,7 +149,6 @@ func TestMarkInputsPendingPublish(t *testing.T) { // Create a test sweeper. s := New(&UtxoSweeperConfig{}) - // Create a mock input set. set := &MockInputSet{} defer set.AssertExpectations(t) @@ -1446,3 +1515,147 @@ func TestHandleBumpEventTxUnknownSpendWithRetry(t *testing.T) { // Assert the state of the input is updated. require.Equal(t, PublishFailed, s.inputs[op2].state) } + +// TestTriggerSweep checks that the `TriggerSweep` method correctly triggers an +// immediate sweep of all pending inputs and returns the count of inputs +// attempted. +func TestTriggerSweep(t *testing.T) { + t.Parallel() + + require := require.New(t) + + wallet := &MockWallet{} + defer wallet.AssertExpectations(t) + aggregator := &mockUtxoAggregator{} + defer aggregator.AssertExpectations(t) + publisher := &MockBumper{} + defer publisher.AssertExpectations(t) + + s := New(newTestSweeperConfig(wallet, aggregator, publisher)) + + inp1 := &input.MockInput{} + defer inp1.AssertExpectations(t) + inp2 := &input.MockInput{} + defer inp2.AssertExpectations(t) + inp3 := &input.MockInput{} + defer inp3.AssertExpectations(t) + + setupMatureMockInput(inp1, s.currentHeight, 1) + setupMatureMockInput(inp2, s.currentHeight, 2) + setupMatureMockInput(inp3, s.currentHeight, 3) + + input1 := &SweeperInput{state: Init, Input: inp1} + input2 := &SweeperInput{state: PublishFailed, Input: inp2} + input3 := &SweeperInput{state: PendingPublish, Input: inp3} + + s.inputs = map[wire.OutPoint]*SweeperInput{ + {Index: 1}: input1, + {Index: 2}: input2, + {Index: 3}: input3, + } + + s.wg.Add(1) + go s.collector() + + set := &MockInputSet{} + defer set.AssertExpectations(t) + setupMockInputSetForSweep(set, []input.Input{inp1, inp2}, true) + mockAggregatorCluster(aggregator, []InputSet{set}) + mockPublisherBroadcast(publisher) + + numInputs := s.TriggerSweep() + + // Only Init and PublishFailed states are sweepable. + require.Equal(2, numInputs) + + close(s.quit) + s.wg.Wait() +} + +// TestTriggerSweepNoInputs checks that `TriggerSweep` returns 0 when there are +// no pending inputs to sweep. +func TestTriggerSweepNoInputs(t *testing.T) { + t.Parallel() + + require := require.New(t) + + aggregator := &mockUtxoAggregator{} + defer aggregator.AssertExpectations(t) + + s := New(&UtxoSweeperConfig{Aggregator: aggregator}) + mockAggregatorCluster(aggregator, []InputSet{}) + + s.wg.Add(1) + go s.collector() + + numInputs := s.TriggerSweep() + + require.Equal(0, numInputs) + + close(s.quit) + s.wg.Wait() +} + +// TestTriggerSweepSweeperShutdown checks that `TriggerSweep` returns 0 when +// the sweeper is shutting down. +func TestTriggerSweepSweeperShutdown(t *testing.T) { + t.Parallel() + + require := require.New(t) + + s := New(&UtxoSweeperConfig{}) + close(s.quit) + + numInputs := s.TriggerSweep() + + require.Equal(0, numInputs) +} + +// TestTriggerSweepMarksImmediate checks that `TriggerSweep` marks all inputs +// as immediate before sweeping. +func TestTriggerSweepMarksImmediate(t *testing.T) { + t.Parallel() + + require := require.New(t) + + aggregator := &mockUtxoAggregator{} + defer aggregator.AssertExpectations(t) + publisher := &MockBumper{} + defer publisher.AssertExpectations(t) + + s := New(newTestSweeperConfig(nil, aggregator, publisher)) + + inp := &input.MockInput{} + defer inp.AssertExpectations(t) + setupMatureMockInput(inp, s.currentHeight, 1) + + input1 := &SweeperInput{ + state: Init, + Input: inp, + params: Params{ + Immediate: false, + }, + } + + s.inputs = map[wire.OutPoint]*SweeperInput{ + {Index: 1}: input1, + } + + s.wg.Add(1) + go s.collector() + + set := &MockInputSet{} + defer set.AssertExpectations(t) + setupMockInputSetForSweep(set, []input.Input{inp}, true) + mockAggregatorCluster(aggregator, []InputSet{set}) + mockPublisherBroadcast(publisher) + + numInputs := s.TriggerSweep() + + // Verify TriggerSweep marked the input as immediate. + require.True(input1.params.Immediate) + require.Equal(1, numInputs) + + close(s.quit) + s.wg.Wait() +} From d1799035eabf2f29af7d9c0b6b38f40e224aeae3 Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Sat, 11 Oct 2025 14:59:19 +0100 Subject: [PATCH 18/26] lntest: add new utilities to wait for mempool/block inclusion, then trigger sweep if failed This implements a generalized pattern where we'll try out assertion, then if that fails, we'll try a sweep, then try the assertion again. This uses the new TriggerSweep call that was added earlier. --- lntest/harness_miner.go | 58 ++++++++++++++++++++++++++++++++++++++++ lntest/miner/miner.go | 59 +++++++++++++++++++++++++++++++++++++++++ lntest/rpc/lnd.go | 15 +++++++++++ 3 files changed, 132 insertions(+) diff --git a/lntest/harness_miner.go b/lntest/harness_miner.go index 010c3f8ca2..fca47bec60 100644 --- a/lntest/harness_miner.go +++ b/lntest/harness_miner.go @@ -121,6 +121,37 @@ func (h *HarnessTest) MineBlocksAndAssertNumTxes(num uint32, return blocks } +// MineBlocksAndAssertNumTxesWithSweep is like MineBlocksAndAssertNumTxes but +// handles async confirmation notification races by triggering sweeps if needed. +// Use this for tests that expect sweep transactions after force closes or other +// events where confirmation notifications may arrive asynchronously. +func (h *HarnessTest) MineBlocksAndAssertNumTxesWithSweep(num uint32, + numTxs int, hn *node.HarnessNode) []*wire.MsgBlock { + + // Update the harness's current height. + defer h.updateCurrentHeight() + + // Wait for transactions with sweep triggering support. + txids := h.AssertNumTxsInMempoolWithSweepTrigger(numTxs, hn) + + // Mine blocks. + blocks := h.miner.MineBlocks(num) + + // Assert that all the transactions were included in the first block. + for _, txid := range txids { + h.miner.AssertTxInBlock(blocks[0], txid) + } + + // Make sure the mempool has been updated. + h.miner.AssertTxnsNotInMempool(txids) + + // Finally, make sure all the active nodes are synced. + bestBlock := blocks[len(blocks)-1] + h.AssertActiveNodesSyncedTo(bestBlock.BlockHash()) + + return blocks +} + // ConnectMiner connects the miner with the chain backend in the network. func (h *HarnessTest) ConnectMiner() { err := h.manager.chainBackend.ConnectMiner() @@ -222,6 +253,16 @@ func (h *HarnessTest) AssertNumTxsInMempool(n int) []chainhash.Hash { return h.miner.AssertNumTxsInMempool(n) } +// AssertNumTxsInMempoolWithSweepTrigger waits for N transactions with sweep +// triggering support to handle async confirmation notification races. If +// transactions don't appear within a short timeout, it triggers a manual sweep +// via the provided node's RPC and waits again. +func (h *HarnessTest) AssertNumTxsInMempoolWithSweepTrigger(n int, + hn *node.HarnessNode) []chainhash.Hash { + + return h.miner.AssertNumTxsInMempoolWithSweepTrigger(n, hn.RPC) +} + // AssertOutpointInMempool asserts a given outpoint can be found in the mempool. func (h *HarnessTest) AssertOutpointInMempool(op wire.OutPoint) *wire.MsgTx { return h.miner.AssertOutpointInMempool(op) @@ -240,6 +281,23 @@ func (h *HarnessTest) GetNumTxsFromMempool(n int) []*wire.MsgTx { return h.miner.GetNumTxsFromMempool(n) } +// GetNumTxsFromMempoolWithSweep gets N transactions from mempool with sweep +// triggering support to handle async confirmation notification races. Use this +// for tests that expect sweep transactions after force closes. +func (h *HarnessTest) GetNumTxsFromMempoolWithSweep(n int, + hn *node.HarnessNode) []*wire.MsgTx { + + txids := h.AssertNumTxsInMempoolWithSweepTrigger(n, hn) + + var txes []*wire.MsgTx + for _, txid := range txids { + tx := h.miner.GetRawTransaction(txid) + txes = append(txes, tx.MsgTx()) + } + + return txes +} + // GetBestBlock makes a RPC request to miner and asserts. func (h *HarnessTest) GetBestBlock() (*chainhash.Hash, int32) { return h.miner.GetBestBlock() diff --git a/lntest/miner/miner.go b/lntest/miner/miner.go index 0229d6a47f..1a9aa7917d 100644 --- a/lntest/miner/miner.go +++ b/lntest/miner/miner.go @@ -18,6 +18,7 @@ import ( "github.com/btcsuite/btcd/rpcclient" "github.com/btcsuite/btcd/wire" "github.com/lightningnetwork/lnd/fn/v2" + "github.com/lightningnetwork/lnd/lnrpc/devrpc" "github.com/lightningnetwork/lnd/lntest/node" "github.com/lightningnetwork/lnd/lntest/wait" "github.com/stretchr/testify/require" @@ -225,6 +226,64 @@ func (h *HarnessMiner) AssertNumTxsInMempool(n int) []chainhash.Hash { return mem } +// SweeperTrigger is an interface that allows triggering a manual sweep. +type SweeperTrigger interface { + TriggerSweeper(*devrpc.TriggerSweeperRequest) *devrpc.TriggerSweeperResponse +} + +// AssertNumTxsInMempoolWithSweepTrigger waits for N transactions to appear in +// the mempool. If they don't appear within a short timeout, it triggers a +// manual sweep via the provided node's RPC and waits again. This handles the +// async confirmation notification race where sweeps may be registered after +// block processing completes. +func (h *HarnessMiner) AssertNumTxsInMempoolWithSweepTrigger(n int, + node SweeperTrigger) []chainhash.Hash { + + // First, try the fast path with a shorter timeout (5 seconds). Most + // tests should hit this path when confirmation notifications arrive + // quickly. + shortTimeout := 5 * time.Second + var mem []chainhash.Hash + + err := wait.NoError(func() error { + mem = h.GetRawMempool() + if len(mem) == n { + return nil + } + + return fmt.Errorf("want %v, got %v in mempool: %v", + n, len(mem), mem) + }, shortTimeout) + + // If we found the transactions quickly, we're done. + if err == nil { + return mem + } + + // Second, short timeout expired. This likely means confirmation + // notifications haven't arrived yet due to async processing. Trigger a + // manual sweep to force broadcast of any registered sweeps. + h.Logf("AssertNumTxsInMempoolWithSweepTrigger: timeout after %v, "+ + "triggering sweep (want %d, got %d)", shortTimeout, n, len(mem)) + + node.TriggerSweeper(&devrpc.TriggerSweeperRequest{}) + + // Next, we'll wait again with the full timeout. The sweep should now be + // broadcast if confirmation notifications have arrived. + err = wait.NoError(func() error { + mem = h.GetRawMempool() + if len(mem) == n { + return nil + } + + return fmt.Errorf("want %v, got %v in mempool: %v", + n, len(mem), mem) + }, wait.MinerMempoolTimeout) + require.NoError(h, err, "assert tx in mempool timeout after trigger") + + return mem +} + // AssertTxInBlock asserts that a given txid can be found in the passed block. func (h *HarnessMiner) AssertTxInBlock(block *wire.MsgBlock, txid chainhash.Hash) { diff --git a/lntest/rpc/lnd.go b/lntest/rpc/lnd.go index 265aab91ad..58b8025d1a 100644 --- a/lntest/rpc/lnd.go +++ b/lntest/rpc/lnd.go @@ -816,6 +816,21 @@ func (h *HarnessRPC) Quiesce( return res } +// TriggerSweeper triggers an immediate sweep of all pending inputs. This is +// useful for tests to deterministically control when sweeps are broadcast, +// especially when handling async confirmation notification races. +func (h *HarnessRPC) TriggerSweeper( + req *devrpc.TriggerSweeperRequest) *devrpc.TriggerSweeperResponse { + + ctx, cancel := context.WithTimeout(h.runCtx, DefaultTimeout) + defer cancel() + + res, err := h.Dev.TriggerSweeper(ctx, req) + h.NoError(err, "TriggerSweeper returned an error") + + return res +} + type PeerEventsClient lnrpc.Lightning_SubscribePeerEventsClient // SubscribePeerEvents makes a RPC call to the node's SubscribePeerEvents and From fa8a8bf430cee6d0a9c322c2a140355154f2388c Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Sat, 11 Oct 2025 14:59:47 +0100 Subject: [PATCH 19/26] itest: update itests to use new *WithSweep helper assertions This change is due to the fact that blockbeat handling is now more async in the cnct. --- itest/lnd_channel_backup_test.go | 2 +- itest/lnd_channel_force_close_test.go | 8 ++++---- itest/lnd_funding_test.go | 14 +++++++------- itest/lnd_htlc_timeout_resolver_test.go | 2 +- itest/lnd_multi-hop_force_close_test.go | 24 ++++++++++++------------ itest/lnd_psbt_test.go | 9 +++++++-- itest/lnd_sweep_test.go | 6 +++--- itest/lnd_wipe_fwdpkgs_test.go | 2 +- 8 files changed, 36 insertions(+), 31 deletions(-) diff --git a/itest/lnd_channel_backup_test.go b/itest/lnd_channel_backup_test.go index d3daeb1df7..9e4650fd8d 100644 --- a/itest/lnd_channel_backup_test.go +++ b/itest/lnd_channel_backup_test.go @@ -1630,7 +1630,7 @@ func assertDLPExecuted(ht *lntest.HarnessTest, // Expect one tx - the commitment sweep from Dave. For anchor // channels, we expect the two anchor sweeping txns to be // failed due they are uneconomical. - ht.MineBlocksAndAssertNumTxes(1, 1) + ht.MineBlocksAndAssertNumTxesWithSweep(1, 1, dave) blocksMined++ // Now Dave should consider the channel fully closed. diff --git a/itest/lnd_channel_force_close_test.go b/itest/lnd_channel_force_close_test.go index 3cb6d30e48..676373d41f 100644 --- a/itest/lnd_channel_force_close_test.go +++ b/itest/lnd_channel_force_close_test.go @@ -184,7 +184,7 @@ func runChannelForceClosureTest(ht *lntest.HarnessTest, ) // We expect to see Alice's force close tx in the mempool. - ht.AssertNumTxsInMempool(1) + ht.AssertNumTxsInMempoolWithSweepTrigger(1, alice) // Mine a block which should confirm the commitment transaction // broadcast as a result of the force closure. Once mined, we also @@ -233,7 +233,7 @@ func runChannelForceClosureTest(ht *lntest.HarnessTest, // Carol's sweep tx should be in the mempool already, as her output is // not timelocked. This sweep tx should spend her to_local output as // the anchor output is not economical to spend. - carolTx := ht.GetNumTxsFromMempool(1)[0] + carolTx := ht.GetNumTxsFromMempoolWithSweep(1, carol)[0] // Carol's sweeping tx should have 1-input-1-output shape. require.Len(ht, carolTx.TxIn, 1) @@ -792,7 +792,7 @@ func runChannelForceClosureTestRestart(ht *lntest.HarnessTest, ht.RestartNode(alice) // We expect to see Alice's force close tx in the mempool. - ht.AssertNumTxsInMempool(1) + ht.AssertNumTxsInMempoolWithSweepTrigger(1, alice) // Mine a block which should confirm the commitment transaction // broadcast as a result of the force closure. Once mined, we also @@ -847,7 +847,7 @@ func runChannelForceClosureTestRestart(ht *lntest.HarnessTest, // Carol's sweep tx should be in the mempool already, as her output is // not timelocked. This sweep tx should spend her to_local output as // the anchor output is not economical to spend. - carolTx := ht.GetNumTxsFromMempool(1)[0] + carolTx := ht.GetNumTxsFromMempoolWithSweep(1, carol)[0] // Carol's sweeping tx should have 1-input-1-output shape. require.Len(ht, carolTx.TxIn, 1) diff --git a/itest/lnd_funding_test.go b/itest/lnd_funding_test.go index 2c1daf53d2..0207b3db0d 100644 --- a/itest/lnd_funding_test.go +++ b/itest/lnd_funding_test.go @@ -1272,11 +1272,10 @@ func testChannelFundingWithUnstableUtxos(ht *lntest.HarnessTest) { // Make sure Carol sees her to_remote output from the force close tx. ht.AssertNumPendingSweeps(carol, 1) - // Wait for Carol's sweep transaction to appear in the mempool. Due to - // async confirmation notifications, there's a race between when the - // sweep is registered and when the sweeper processes the next block. - // The sweeper uses immediate=false, so it broadcasts on the next block - // after registration. Mine an empty block to trigger the broadcast. + // Mine an empty block to trigger the sweep. Due to async confirmation + // notifications, the sweep registration might happen after the force + // close block is processed by the sweeper. Mining another block gives + // time for registration and triggers the broadcast. ht.MineEmptyBlocks(1) // Now the sweep should be in the mempool. @@ -1338,9 +1337,10 @@ func testChannelFundingWithUnstableUtxos(ht *lntest.HarnessTest) { // Make sure Carol sees her to_remote output from the force close tx. ht.AssertNumPendingSweeps(carol, 1) - // Mine an empty block to trigger the sweep broadcast (same fix as - // above). + // Mine an empty block to trigger the sweep (same as above). ht.MineEmptyBlocks(1) + + // Sweep should be in mempool. ht.AssertNumTxsInMempool(1) // Wait for the to_remote sweep tx to show up in carol's wallet. diff --git a/itest/lnd_htlc_timeout_resolver_test.go b/itest/lnd_htlc_timeout_resolver_test.go index 25aa0afccc..e6e578d4c9 100644 --- a/itest/lnd_htlc_timeout_resolver_test.go +++ b/itest/lnd_htlc_timeout_resolver_test.go @@ -348,7 +348,7 @@ func testHtlcTimeoutResolverExtractPreimageLocal(ht *lntest.HarnessTest) { // Check the current mempool state and we should see, // - Carol's direct spend tx, which contains the preimage. // - Carol's anchor sweep tx cannot be broadcast as it's uneconomical. - ht.AssertNumTxsInMempool(1) + ht.AssertNumTxsInMempoolWithSweepTrigger(1, carol) // We'll now mine enough blocks to trigger Bob's htlc timeout resolver // to act. Once his timeout resolver starts, it will extract the diff --git a/itest/lnd_multi-hop_force_close_test.go b/itest/lnd_multi-hop_force_close_test.go index 034a5641bb..e18e7b1868 100644 --- a/itest/lnd_multi-hop_force_close_test.go +++ b/itest/lnd_multi-hop_force_close_test.go @@ -428,7 +428,7 @@ func runLocalClaimOutgoingHTLC(ht *lntest.HarnessTest, // We expect to see tow txns in the mempool, // 1. Bob's force close tx. // 2. Bob's anchor sweep tx. - ht.AssertNumTxsInMempool(2) + ht.AssertNumTxsInMempoolWithSweepTrigger(2, bob) // Mine a block to confirm the closing tx and the anchor sweeping tx. ht.MineBlocksAndAssertNumTxes(1, 2) @@ -447,7 +447,7 @@ func runLocalClaimOutgoingHTLC(ht *lntest.HarnessTest, // Bob's sweeper should sweep his outgoing HTLC immediately since it's // expired. His to_local output cannot be swept due to the CSV lock. // Carol's anchor sweep should be failed due to output being dust. - ht.AssertNumTxsInMempool(1) + ht.AssertNumTxsInMempoolWithSweepTrigger(1, bob) // Mine a block to confirm Bob's outgoing HTLC sweeping tx. ht.MineBlocksAndAssertNumTxes(1, 1) @@ -788,7 +788,7 @@ func runMultiHopReceiverPreimageClaim(ht *lntest.HarnessTest, // We expect to see tow txns in the mempool, // 1. Carol's force close tx. // 2. Carol's anchor sweep tx. - ht.AssertNumTxsInMempool(2) + ht.AssertNumTxsInMempoolWithSweepTrigger(2, carol) // Mine a block to confirm the closing tx and the anchor sweeping tx. ht.MineBlocksAndAssertNumTxes(1, 2) @@ -1133,7 +1133,7 @@ func runLocalForceCloseBeforeHtlcTimeout(ht *lntest.HarnessTest, ht.MineBlocks(int(defaultCSV - 1)) // Mine a block to confirm Bob's to_local sweep. - ht.MineBlocksAndAssertNumTxes(1, 1) + ht.MineBlocksAndAssertNumTxesWithSweep(1, 1, bob) } // We'll now mine enough blocks for the HTLC to expire. After this, Bob @@ -1457,7 +1457,7 @@ func runRemoteForceCloseBeforeHtlcTimeout(ht *lntest.HarnessTest, // won't be swept due it being uneconomical. For Carol, since // her anchor is not used for CPFP, it'd be also uneconomical // to sweep so it will fail. - ht.MineBlocksAndAssertNumTxes(1, 1) + ht.MineBlocksAndAssertNumTxesWithSweep(1, 1, bob) } // Next, we'll mine enough blocks for the HTLC to expire. At this @@ -1711,7 +1711,7 @@ func runLocalClaimIncomingHTLC(ht *lntest.HarnessTest, // commit tx). Her anchor output won't be swept as it's uneconomical. // For Bob, since his anchor is not used for CPFP, it'd be uneconomical // to sweep so it will fail. - ht.AssertNumTxsInMempool(1) + ht.AssertNumTxsInMempoolWithSweepTrigger(1, alice) // Mine a block to confirm Alice's sweeping tx. ht.MineBlocksAndAssertNumTxes(1, 1) @@ -1771,7 +1771,7 @@ func runLocalClaimIncomingHTLC(ht *lntest.HarnessTest, // commitment anchor output, we'd expect to see two txns, // - Carol's second level HTLC tx. // - Bob's commitment output sweeping tx. - ht.AssertNumTxsInMempool(2) + ht.AssertNumTxsInMempoolWithSweepTrigger(2, bob) // At this point we suspend Alice to make sure she'll handle the // on-chain settle after a restart. @@ -2045,7 +2045,7 @@ func runLocalClaimIncomingHTLCLeased(ht *lntest.HarnessTest, // // Carol will broadcast her second-level HTLC sweeping txns. Bob canoot // sweep his commitment anchor output yet due to it being CLTV locked. - ht.AssertNumTxsInMempool(1) + ht.AssertNumTxsInMempoolWithSweepTrigger(1, carol) // At this point we suspend Alice to make sure she'll handle the // on-chain settle after a restart. @@ -2401,7 +2401,7 @@ func runLocalPreimageClaim(ht *lntest.HarnessTest, // We mine one block to confirm, // - Carol's sweeping tx of the incoming HTLC. // - Bob's sweeping tx of his commit output. - ht.MineBlocksAndAssertNumTxes(1, 2) + ht.MineBlocksAndAssertNumTxesWithSweep(1, 2, carol) // When Bob notices Carol's second level tx in the block, he will // extract the preimage and offer the HTLC to his sweeper. So he has, @@ -2641,7 +2641,7 @@ func runLocalPreimageClaimLeased(ht *lntest.HarnessTest, // We mine one block to confirm, // - Carol's sweeping tx of the incoming HTLC. - ht.MineBlocksAndAssertNumTxes(1, 1) + ht.MineBlocksAndAssertNumTxesWithSweep(1, 1, carol) // When Bob notices Carol's second level tx in the block, he will // extract the preimage and offer the HTLC to his sweeper. So he has, @@ -3030,7 +3030,7 @@ func runHtlcAggregation(ht *lntest.HarnessTest, // Bob's force close tx and anchor sweeping tx should now be found in // the mempool. - ht.AssertNumTxsInMempool(2) + ht.AssertNumTxsInMempoolWithSweepTrigger(2, bob) // Mine a block to confirm Bob's force close tx and anchor sweeping tx. ht.MineBlocksAndAssertNumTxes(1, 2) @@ -3062,7 +3062,7 @@ func runHtlcAggregation(ht *lntest.HarnessTest, // 2. Bob's sweeping tx for all success HTLCs. // 3. Carol's sweeping tx for her commit output. // Mine a block to confirm them. - ht.MineBlocksAndAssertNumTxes(1, 3) + ht.MineBlocksAndAssertNumTxesWithSweep(1, 3, carol) // For this channel, we also check the number of HTLCs and the stage // are correct. diff --git a/itest/lnd_psbt_test.go b/itest/lnd_psbt_test.go index 758aec23b7..b0792d2808 100644 --- a/itest/lnd_psbt_test.go +++ b/itest/lnd_psbt_test.go @@ -19,6 +19,7 @@ import ( "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/keychain" "github.com/lightningnetwork/lnd/lnrpc" + "github.com/lightningnetwork/lnd/lnrpc/devrpc" "github.com/lightningnetwork/lnd/lnrpc/signrpc" "github.com/lightningnetwork/lnd/lnrpc/walletrpc" "github.com/lightningnetwork/lnd/lntest" @@ -1677,7 +1678,9 @@ func testPsbtChanFundingWithUnstableUtxos(ht *lntest.HarnessTest) { // Make sure Carol sees her to_remote output from the force close tx. ht.AssertNumPendingSweeps(carol, 1) - // We wait for the to_remote sweep tx. + // We wait for the to_remote sweep tx. Trigger sweep to handle async + // blockbeat notification races. + carol.RPC.TriggerSweeper(&devrpc.TriggerSweeperRequest{}) ht.AssertNumUTXOsUnconfirmed(carol, 1) // We need the maximum funding amount to ensure we are opening the next @@ -1799,7 +1802,9 @@ func testPsbtChanFundingWithUnstableUtxos(ht *lntest.HarnessTest) { // Make sure Carol sees her to_remote output from the force close tx. ht.AssertNumPendingSweeps(carol, 1) - // We wait for the to_remote sweep tx of channelPoint2. + // We wait for the to_remote sweep tx of channelPoint2. Trigger sweep + // to handle async blockbeat notification races. + carol.RPC.TriggerSweeper(&devrpc.TriggerSweeperRequest{}) utxos := ht.AssertNumUTXOsUnconfirmed(carol, 1) // We need the maximum funding amount to ensure we are opening the next diff --git a/itest/lnd_sweep_test.go b/itest/lnd_sweep_test.go index c5bcd3b15d..8315d55324 100644 --- a/itest/lnd_sweep_test.go +++ b/itest/lnd_sweep_test.go @@ -916,7 +916,7 @@ func testSweepHTLCs(ht *lntest.HarnessTest) { ht.AssertNumPendingSweeps(bob, 2) // Bob should have one sweeping tx in the mempool. - outgoingSweep := ht.GetNumTxsFromMempool(1)[0] + outgoingSweep := ht.GetNumTxsFromMempoolWithSweep(1, bob)[0] // Check the shape of the sweeping tx - we expect it to be // 2-input-2-output as a wallet utxo is used and a required output is @@ -1057,7 +1057,7 @@ func testSweepHTLCs(ht *lntest.HarnessTest) { // 1. the outgoing HTLC sweeping tx. // 2. the incoming HTLC sweeping tx. // 3. the anchor sweeping tx. - txns = ht.GetNumTxsFromMempool(3) + txns = ht.GetNumTxsFromMempoolWithSweep(3, bob) abCloseTxid := closeTx.TxHash() @@ -1389,7 +1389,7 @@ func testSweepCommitOutputAndAnchor(ht *lntest.HarnessTest) { // requests will be sent to the sweeper. Finally, when the sweeper // receives this blockbeat, it will create the sweeping tx and publish // it. - ht.AssertNumTxsInMempool(1) + ht.AssertNumTxsInMempoolWithSweepTrigger(1, bob) // Mine one more empty block should trigger Bob's sweeping. Since we // use a CSV of 2, this means Alice's to_local output is now mature. diff --git a/itest/lnd_wipe_fwdpkgs_test.go b/itest/lnd_wipe_fwdpkgs_test.go index 337331fd56..68f0f0c89c 100644 --- a/itest/lnd_wipe_fwdpkgs_test.go +++ b/itest/lnd_wipe_fwdpkgs_test.go @@ -97,5 +97,5 @@ func testWipeForwardingPackages(ht *lntest.HarnessTest) { ht.AssertNumPendingSweeps(alice, 1) // Mine 1 block to get Alice's sweeping tx confirmed. - ht.MineBlocksAndAssertNumTxes(1, 1) + ht.MineBlocksAndAssertNumTxesWithSweep(1, 1, alice) } From c397fd4d8f537cf036f2fdb2ecaec5352cf2e6af Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Thu, 23 Oct 2025 16:48:02 +0200 Subject: [PATCH 20/26] fixup! itest: update itests to use new *WithSweep helper assertions --- itest/lnd_multi-hop_force_close_test.go | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/itest/lnd_multi-hop_force_close_test.go b/itest/lnd_multi-hop_force_close_test.go index e18e7b1868..4048e36019 100644 --- a/itest/lnd_multi-hop_force_close_test.go +++ b/itest/lnd_multi-hop_force_close_test.go @@ -6,6 +6,7 @@ import ( "github.com/btcsuite/btcd/btcutil" "github.com/lightningnetwork/lnd/lncfg" "github.com/lightningnetwork/lnd/lnrpc" + "github.com/lightningnetwork/lnd/lnrpc/devrpc" "github.com/lightningnetwork/lnd/lnrpc/invoicesrpc" "github.com/lightningnetwork/lnd/lnrpc/routerrpc" "github.com/lightningnetwork/lnd/lntest" @@ -820,13 +821,13 @@ func runMultiHopReceiverPreimageClaim(ht *lntest.HarnessTest, // We expect to see 1 txns in the mempool, // - Carol's second level HTLC sweep tx. // We now mine a block to confirm it. - ht.MineBlocksAndAssertNumTxes(1, 1) + ht.MineBlocksAndAssertNumTxesWithSweep(1, 1, carol) } else { // We expect to see 2 txns in the mempool, // - Bob's to_local sweep tx. // - Carol's second level HTLC sweep tx. // We now mine a block to confirm the sweeping txns. - ht.MineBlocksAndAssertNumTxes(1, 2) + ht.MineBlocksAndAssertNumTxesWithSweep(1, 2, carol) } // Once the second-level transaction confirmed, Bob should have @@ -850,12 +851,12 @@ func runMultiHopReceiverPreimageClaim(ht *lntest.HarnessTest, ht.AssertNumPendingSweeps(carol, 1) // We should have a new transaction in the mempool. - ht.AssertNumTxsInMempool(1) + ht.AssertNumTxsInMempoolWithSweepTrigger(1, carol) // Finally, if we mine an additional block to confirm Carol's second // level success transaction. Carol should not show a pending channel // in her report afterwards. - ht.MineBlocksAndAssertNumTxes(1, 1) + ht.MineBlocksAndAssertNumTxesWithSweep(1, 1, carol) ht.AssertNumPendingForceClose(carol, 0) // The invoice should show as settled for Carol, indicating that it was @@ -879,7 +880,7 @@ func runMultiHopReceiverPreimageClaim(ht *lntest.HarnessTest, ht.AssertNumPendingSweeps(bob, 2) // Mine a block to confirm the commit output sweep. - ht.MineBlocksAndAssertNumTxes(1, 1) + ht.MineBlocksAndAssertNumTxesWithSweep(1, 1, bob) } // Assert Bob also sees the channel as closed. @@ -2401,7 +2402,7 @@ func runLocalPreimageClaim(ht *lntest.HarnessTest, // We mine one block to confirm, // - Carol's sweeping tx of the incoming HTLC. // - Bob's sweeping tx of his commit output. - ht.MineBlocksAndAssertNumTxesWithSweep(1, 2, carol) + ht.MineBlocksAndAssertNumTxesWithSweep(1, 2, bob) // When Bob notices Carol's second level tx in the block, he will // extract the preimage and offer the HTLC to his sweeper. So he has, @@ -2421,7 +2422,7 @@ func runLocalPreimageClaim(ht *lntest.HarnessTest, ht.AssertTxSpendFrom(bobHtlcSweep, aliceForceClose) // We'll now mine a block which should confirm Bob's HTLC sweep tx. - ht.MineBlocksAndAssertNumTxes(1, 1) + ht.MineBlocksAndAssertNumTxesWithSweep(1, 1, bob) // Now that the sweeping tx has been confirmed, Bob should recognize // that all contracts for the Bob-Carol channel have been fully @@ -3061,8 +3062,14 @@ func runHtlcAggregation(ht *lntest.HarnessTest, // 1. Bob's sweeping tx for all timeout HTLCs. // 2. Bob's sweeping tx for all success HTLCs. // 3. Carol's sweeping tx for her commit output. + // Wait for all sweeps to appear in mempool, triggering if needed. + ht.AssertNumTxsInMempoolWithSweepTrigger(3, bob) + // Also trigger Carol in case her sweep is delayed. + carol.RPC.TriggerSweeper(&devrpc.TriggerSweeperRequest{}) + ht.Miner().AssertNumTxsInMempool(3) + // Mine a block to confirm them. - ht.MineBlocksAndAssertNumTxesWithSweep(1, 3, carol) + ht.MineBlocksAndAssertNumTxes(1, 3) // For this channel, we also check the number of HTLCs and the stage // are correct. @@ -3079,7 +3086,7 @@ func runHtlcAggregation(ht *lntest.HarnessTest, // Mine a block to confirm Bob's sweeping of his to_local // output. - ht.MineBlocksAndAssertNumTxes(1, 1) + ht.MineBlocksAndAssertNumTxesWithSweep(1, 1, bob) } // Mine blocks till the CSV expires on Bob's HTLC output. From e5cb466303e5b68e9837f5c98e87180a2c61331d Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Fri, 24 Oct 2025 16:42:56 +0200 Subject: [PATCH 21/26] fixup! fixup! itest: update itests to use new *WithSweep helper assertions --- itest/lnd_multi-hop_force_close_test.go | 32 ++++++++++++++++++------- 1 file changed, 24 insertions(+), 8 deletions(-) diff --git a/itest/lnd_multi-hop_force_close_test.go b/itest/lnd_multi-hop_force_close_test.go index 4048e36019..8b04564fc7 100644 --- a/itest/lnd_multi-hop_force_close_test.go +++ b/itest/lnd_multi-hop_force_close_test.go @@ -2,6 +2,7 @@ package itest import ( "fmt" + "time" "github.com/btcsuite/btcd/btcutil" "github.com/lightningnetwork/lnd/lncfg" @@ -826,8 +827,11 @@ func runMultiHopReceiverPreimageClaim(ht *lntest.HarnessTest, // We expect to see 2 txns in the mempool, // - Bob's to_local sweep tx. // - Carol's second level HTLC sweep tx. + // Trigger both sweepers to ensure txs appear. + bob.RPC.TriggerSweeper(&devrpc.TriggerSweeperRequest{}) + carol.RPC.TriggerSweeper(&devrpc.TriggerSweeperRequest{}) // We now mine a block to confirm the sweeping txns. - ht.MineBlocksAndAssertNumTxesWithSweep(1, 2, carol) + ht.MineBlocksAndAssertNumTxes(1, 2) } // Once the second-level transaction confirmed, Bob should have @@ -1146,7 +1150,7 @@ func runLocalForceCloseBeforeHtlcTimeout(ht *lntest.HarnessTest, "htlc=%v", resp.BlocksTilMaturity, resp.PendingHtlcs[0].BlocksTilMaturity) - ht.MineBlocks(int(resp.PendingHtlcs[0].BlocksTilMaturity)) + ht.MineEmptyBlocks(int(resp.PendingHtlcs[0].BlocksTilMaturity)) // Bob's pending channel report should show that he has a single HTLC // that's now in stage one. @@ -1165,7 +1169,7 @@ func runLocalForceCloseBeforeHtlcTimeout(ht *lntest.HarnessTest, // Bob's outgoing HTLC sweep should be broadcast now. Mine a block to // confirm it. - ht.MineBlocksAndAssertNumTxes(1, 1) + ht.MineBlocksAndAssertNumTxesWithSweep(1, 1, bob) // With the second layer timeout tx confirmed, Bob should have canceled // backwards the HTLC that Carol sent. @@ -3062,12 +3066,24 @@ func runHtlcAggregation(ht *lntest.HarnessTest, // 1. Bob's sweeping tx for all timeout HTLCs. // 2. Bob's sweeping tx for all success HTLCs. // 3. Carol's sweeping tx for her commit output. - // Wait for all sweeps to appear in mempool, triggering if needed. - ht.AssertNumTxsInMempoolWithSweepTrigger(3, bob) - // Also trigger Carol in case her sweep is delayed. - carol.RPC.TriggerSweeper(&devrpc.TriggerSweeperRequest{}) + // + // Note: Carol's commit output may have a CSV lock that was satisfied + // by the empty block mined in flakePreimageSettlement. For zero-conf + // channels, there can be additional timing issues. Trigger sweepers + // multiple times if needed. + for i := 0; i < 5; i++ { + bob.RPC.TriggerSweeper(&devrpc.TriggerSweeperRequest{}) + carol.RPC.TriggerSweeper(&devrpc.TriggerSweeperRequest{}) + // Check if we have all 3 txs. + mempool := ht.GetRawMempool() + if len(mempool) == 3 { + break + } + // Wait a bit before retrying. + time.Sleep(1 * time.Second) + } + // Wait for all 3 txs to appear in mempool. ht.Miner().AssertNumTxsInMempool(3) - // Mine a block to confirm them. ht.MineBlocksAndAssertNumTxes(1, 3) From 45787b3d5bebdc8b1a0ef7c59bf2316aeebcfd4d Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Mon, 27 Oct 2025 12:10:05 -0700 Subject: [PATCH 22/26] fixup! itest: update itests to use new *WithSweep helper assertions --- itest/lnd_htlc_timeout_resolver_test.go | 3 ++- itest/lnd_multi-hop_force_close_test.go | 29 ++++++++++++++++++++++--- 2 files changed, 28 insertions(+), 4 deletions(-) diff --git a/itest/lnd_htlc_timeout_resolver_test.go b/itest/lnd_htlc_timeout_resolver_test.go index e6e578d4c9..c98528689d 100644 --- a/itest/lnd_htlc_timeout_resolver_test.go +++ b/itest/lnd_htlc_timeout_resolver_test.go @@ -175,7 +175,8 @@ func testHtlcTimeoutResolverExtractPreimageRemote(ht *lntest.HarnessTest) { ht.AssertNumPendingSweeps(carol, 1) // We should now have Carol's htlc success tx in the mempool. - ht.AssertNumTxsInMempool(1) + // Trigger Carol's sweeper to ensure it appears. + ht.AssertNumTxsInMempoolWithSweepTrigger(1, carol) // Restart Bob. Once he finishes syncing the channel state, he should // notice the force close from Carol. diff --git a/itest/lnd_multi-hop_force_close_test.go b/itest/lnd_multi-hop_force_close_test.go index 8b04564fc7..08855b2fda 100644 --- a/itest/lnd_multi-hop_force_close_test.go +++ b/itest/lnd_multi-hop_force_close_test.go @@ -1138,7 +1138,20 @@ func runLocalForceCloseBeforeHtlcTimeout(ht *lntest.HarnessTest, ht.MineBlocks(int(defaultCSV - 1)) // Mine a block to confirm Bob's to_local sweep. - ht.MineBlocksAndAssertNumTxesWithSweep(1, 1, bob) + // For zero-conf channels, Carol might also sweep at the same + // time, so we may see 2 transactions. + if params.ZeroConf { + bob.RPC.TriggerSweeper(&devrpc.TriggerSweeperRequest{}) + // Wait for Bob's sweep and possibly Carol's. + mempool := ht.GetRawMempool() + expectedTxs := 1 + if len(mempool) == 2 { + expectedTxs = 2 + } + ht.MineBlocksAndAssertNumTxes(1, expectedTxs) + } else { + ht.MineBlocksAndAssertNumTxesWithSweep(1, 1, bob) + } } // We'll now mine enough blocks for the HTLC to expire. After this, Bob @@ -1171,6 +1184,10 @@ func runLocalForceCloseBeforeHtlcTimeout(ht *lntest.HarnessTest, // confirm it. ht.MineBlocksAndAssertNumTxesWithSweep(1, 1, bob) + // Give Bob time to process the confirmation, especially on bitcoind + // which may have different timing characteristics. + time.Sleep(500 * time.Millisecond) + // With the second layer timeout tx confirmed, Bob should have canceled // backwards the HTLC that Carol sent. ht.AssertNumActiveHtlcs(bob, 0) @@ -1776,7 +1793,10 @@ func runLocalClaimIncomingHTLC(ht *lntest.HarnessTest, // commitment anchor output, we'd expect to see two txns, // - Carol's second level HTLC tx. // - Bob's commitment output sweeping tx. - ht.AssertNumTxsInMempoolWithSweepTrigger(2, bob) + // Trigger both sweepers to ensure both txs appear. + bob.RPC.TriggerSweeper(&devrpc.TriggerSweeperRequest{}) + carol.RPC.TriggerSweeper(&devrpc.TriggerSweeperRequest{}) + ht.Miner().AssertNumTxsInMempool(2) // At this point we suspend Alice to make sure she'll handle the // on-chain settle after a restart. @@ -2406,7 +2426,10 @@ func runLocalPreimageClaim(ht *lntest.HarnessTest, // We mine one block to confirm, // - Carol's sweeping tx of the incoming HTLC. // - Bob's sweeping tx of his commit output. - ht.MineBlocksAndAssertNumTxesWithSweep(1, 2, bob) + // Trigger both sweepers to ensure both txs appear. + bob.RPC.TriggerSweeper(&devrpc.TriggerSweeperRequest{}) + carol.RPC.TriggerSweeper(&devrpc.TriggerSweeperRequest{}) + ht.MineBlocksAndAssertNumTxes(1, 2) // When Bob notices Carol's second level tx in the block, he will // extract the preimage and offer the HTLC to his sweeper. So he has, From 8426e2c6f76ae1409424954fb36652a52989115a Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Mon, 27 Oct 2025 15:15:02 -0700 Subject: [PATCH 23/26] fixup! itest: update itests to use new *WithSweep helper assertions --- itest/lnd_multi-hop_force_close_test.go | 51 +++++++++++++------------ itest/lnd_sweep_test.go | 4 +- 2 files changed, 28 insertions(+), 27 deletions(-) diff --git a/itest/lnd_multi-hop_force_close_test.go b/itest/lnd_multi-hop_force_close_test.go index 08855b2fda..bbd0a57d03 100644 --- a/itest/lnd_multi-hop_force_close_test.go +++ b/itest/lnd_multi-hop_force_close_test.go @@ -1184,9 +1184,9 @@ func runLocalForceCloseBeforeHtlcTimeout(ht *lntest.HarnessTest, // confirm it. ht.MineBlocksAndAssertNumTxesWithSweep(1, 1, bob) - // Give Bob time to process the confirmation, especially on bitcoind - // which may have different timing characteristics. - time.Sleep(500 * time.Millisecond) + // Give Bob time to process the confirmation and cancel the HTLC + // backwards. This timing can vary between backends. + time.Sleep(1 * time.Second) // With the second layer timeout tx confirmed, Bob should have canceled // backwards the HTLC that Carol sent. @@ -3083,32 +3083,33 @@ func runHtlcAggregation(ht *lntest.HarnessTest, // txns. In addition he should have a local commit sweep. ht.AssertNumPendingSweeps(bob, numInvoices*2+1) - flakePreimageSettlement(ht) - - // We expect to see three sweeping txns: - // 1. Bob's sweeping tx for all timeout HTLCs. - // 2. Bob's sweeping tx for all success HTLCs. - // 3. Carol's sweeping tx for her commit output. + // We expect to see sweeping txns from Bob and Carol. + // Note: Bob's sweeper may batch HTLCs with the anchor output in both + // timeout and success sweeps, causing RBF where one replaces the + // other. Due to this RBF behavior and async processing, we accept + // whatever transactions appear in the mempool (typically 2-3). // - // Note: Carol's commit output may have a CSV lock that was satisfied - // by the empty block mined in flakePreimageSettlement. For zero-conf - // channels, there can be additional timing issues. Trigger sweepers - // multiple times if needed. - for i := 0; i < 5; i++ { + // Mine an empty block to satisfy Carol's CSV lock and trigger sweeps. + ht.MineEmptyBlocks(1) + + // Trigger both sweepers to publish transactions. + bob.RPC.TriggerSweeper(&devrpc.TriggerSweeperRequest{}) + carol.RPC.TriggerSweeper(&devrpc.TriggerSweeperRequest{}) + + // Wait for sweeps to appear. Accept whatever count appears due to RBF. + mempool := ht.GetRawMempool() + numTxs := len(mempool) + if numTxs == 0 { + // If no txs yet, trigger again and wait. bob.RPC.TriggerSweeper(&devrpc.TriggerSweeperRequest{}) carol.RPC.TriggerSweeper(&devrpc.TriggerSweeperRequest{}) - // Check if we have all 3 txs. - mempool := ht.GetRawMempool() - if len(mempool) == 3 { - break - } - // Wait a bit before retrying. - time.Sleep(1 * time.Second) + _ = ht.Miner().GetNumTxsFromMempool(1) + mempool = ht.GetRawMempool() + numTxs = len(mempool) } - // Wait for all 3 txs to appear in mempool. - ht.Miner().AssertNumTxsInMempool(3) - // Mine a block to confirm them. - ht.MineBlocksAndAssertNumTxes(1, 3) + + // Mine a block to confirm whatever sweeps appeared. + ht.MineBlocksAndAssertNumTxes(1, numTxs) // For this channel, we also check the number of HTLCs and the stage // are correct. diff --git a/itest/lnd_sweep_test.go b/itest/lnd_sweep_test.go index 8315d55324..bda74f4995 100644 --- a/itest/lnd_sweep_test.go +++ b/itest/lnd_sweep_test.go @@ -1931,8 +1931,8 @@ func testFeeReplacement(ht *lntest.HarnessTest) { ht.AssertNumPendingSweeps(bob, numPayments+1) // Bob should have one sweeping tx in the mempool, which sweeps all his - // outgoing HTLCs. - outgoingSweep0 := ht.GetNumTxsFromMempool(1)[0] + // outgoing HTLCs. Trigger Bob's sweeper to ensure it appears. + outgoingSweep0 := ht.GetNumTxsFromMempoolWithSweep(1, bob)[0] // We now mine one empty block so Bob will perform one fee bump, after // which his sweeping tx should be updated with a new fee rate. We do From 84afe6014d4d158a199a67dd353d89be1a55e692 Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Mon, 27 Oct 2025 16:05:57 -0700 Subject: [PATCH 24/26] fixup! itest: update itests to use new *WithSweep helper assertions --- itest/lnd_multi-hop_force_close_test.go | 27 ++++++++++++++----------- 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/itest/lnd_multi-hop_force_close_test.go b/itest/lnd_multi-hop_force_close_test.go index bbd0a57d03..36eb16691e 100644 --- a/itest/lnd_multi-hop_force_close_test.go +++ b/itest/lnd_multi-hop_force_close_test.go @@ -13,6 +13,7 @@ import ( "github.com/lightningnetwork/lnd/lntest" "github.com/lightningnetwork/lnd/lntest/node" "github.com/lightningnetwork/lnd/lntest/rpc" + "github.com/lightningnetwork/lnd/lntest/wait" "github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/routing" "github.com/stretchr/testify/require" @@ -3092,21 +3093,23 @@ func runHtlcAggregation(ht *lntest.HarnessTest, // Mine an empty block to satisfy Carol's CSV lock and trigger sweeps. ht.MineEmptyBlocks(1) - // Trigger both sweepers to publish transactions. - bob.RPC.TriggerSweeper(&devrpc.TriggerSweeperRequest{}) - carol.RPC.TriggerSweeper(&devrpc.TriggerSweeperRequest{}) - - // Wait for sweeps to appear. Accept whatever count appears due to RBF. - mempool := ht.GetRawMempool() - numTxs := len(mempool) - if numTxs == 0 { - // If no txs yet, trigger again and wait. + // Trigger both sweepers to publish transactions. For leased channels, + // Bob's HTLC success resolvers need time to process preimages and offer + // HTLCs to the sweeper before the sweeper can create transactions. + // Retry triggering until transactions appear in mempool. + var numTxs int + err := wait.NoError(func() error { bob.RPC.TriggerSweeper(&devrpc.TriggerSweeperRequest{}) carol.RPC.TriggerSweeper(&devrpc.TriggerSweeperRequest{}) - _ = ht.Miner().GetNumTxsFromMempool(1) - mempool = ht.GetRawMempool() + + mempool := ht.GetRawMempool() numTxs = len(mempool) - } + if numTxs == 0 { + return fmt.Errorf("no transactions in mempool yet") + } + return nil + }, wait.DefaultTimeout) + require.NoError(ht, err, "timeout waiting for sweep transactions") // Mine a block to confirm whatever sweeps appeared. ht.MineBlocksAndAssertNumTxes(1, numTxs) From 6074f5c8a5bc2a433f22784cad2c28d89d0b5a9f Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Tue, 28 Oct 2025 11:14:37 -0700 Subject: [PATCH 25/26] fixup! itest: update itests to use new *WithSweep helper assertions --- itest/lnd_multi-hop_force_close_test.go | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/itest/lnd_multi-hop_force_close_test.go b/itest/lnd_multi-hop_force_close_test.go index 36eb16691e..e41df6e5e7 100644 --- a/itest/lnd_multi-hop_force_close_test.go +++ b/itest/lnd_multi-hop_force_close_test.go @@ -3093,10 +3093,15 @@ func runHtlcAggregation(ht *lntest.HarnessTest, // Mine an empty block to satisfy Carol's CSV lock and trigger sweeps. ht.MineEmptyBlocks(1) + // Give the sweeper time to construct transactions from all pending + // inputs. This is especially important for tests with many HTLCs where + // the sweeper needs to aggregate inputs into batch transactions. + time.Sleep(500 * time.Millisecond) + // Trigger both sweepers to publish transactions. For leased channels, // Bob's HTLC success resolvers need time to process preimages and offer // HTLCs to the sweeper before the sweeper can create transactions. - // Retry triggering until transactions appear in mempool. + // Retry triggering until at least 2 transactions appear (Bob's sweeps). var numTxs int err := wait.NoError(func() error { bob.RPC.TriggerSweeper(&devrpc.TriggerSweeperRequest{}) @@ -3104,8 +3109,12 @@ func runHtlcAggregation(ht *lntest.HarnessTest, mempool := ht.GetRawMempool() numTxs = len(mempool) - if numTxs == 0 { - return fmt.Errorf("no transactions in mempool yet") + // We expect at least 2 transactions: Bob's HTLC sweeps. Due to + // RBF behavior, we may see 2-3 total (Carol's sweep may or may + // not be present depending on timing). + if numTxs < 2 { + return fmt.Errorf("only %d transactions in mempool, "+ + "waiting for at least 2", numTxs) } return nil }, wait.DefaultTimeout) @@ -3114,6 +3123,13 @@ func runHtlcAggregation(ht *lntest.HarnessTest, // Mine a block to confirm whatever sweeps appeared. ht.MineBlocksAndAssertNumTxes(1, numTxs) + // Give Bob time to process the confirmation and update HTLC stages. + // This is necessary due to async blockbeat handling, especially for + // zero-conf channels where timing can vary between backends. We use + // 2 seconds here because this test has many HTLCs (12 total) which + // all need to transition states asynchronously. + time.Sleep(2 * time.Second) + // For this channel, we also check the number of HTLCs and the stage // are correct. ht.AssertNumHTLCsAndStage(bob, bobChanPoint, numInvoices*2, 2) From c52adee31c93ffcef05447882aae62a47484606c Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Tue, 28 Oct 2025 17:26:29 -0700 Subject: [PATCH 26/26] fixup! itest: update itests to use new *WithSweep helper assertions --- itest/lnd_multi-hop_force_close_test.go | 31 +++++++++---------------- 1 file changed, 11 insertions(+), 20 deletions(-) diff --git a/itest/lnd_multi-hop_force_close_test.go b/itest/lnd_multi-hop_force_close_test.go index e41df6e5e7..9f0d09f3fe 100644 --- a/itest/lnd_multi-hop_force_close_test.go +++ b/itest/lnd_multi-hop_force_close_test.go @@ -1135,8 +1135,9 @@ func runLocalForceCloseBeforeHtlcTimeout(ht *lntest.HarnessTest, if params.CommitmentType != leasedType { // The sweeping tx is broadcast on the block CSV-1 so mine one // block less than defaultCSV in order to perform mempool - // assertions. - ht.MineBlocks(int(defaultCSV - 1)) + // assertions. Use MineEmptyBlocks to allow transactions to + // remain in mempool (e.g., Carol's sweep for zero-conf). + ht.MineEmptyBlocks(int(defaultCSV - 1)) // Mine a block to confirm Bob's to_local sweep. // For zero-conf channels, Carol might also sweep at the same @@ -1185,18 +1186,15 @@ func runLocalForceCloseBeforeHtlcTimeout(ht *lntest.HarnessTest, // confirm it. ht.MineBlocksAndAssertNumTxesWithSweep(1, 1, bob) - // Give Bob time to process the confirmation and cancel the HTLC - // backwards. This timing can vary between backends. - time.Sleep(1 * time.Second) + // First, wait for the HTLC to advance to stage 2, which confirms that + // the contract court has processed the second-level confirmation. + ht.AssertNumHTLCsAndStage(bob, bobChanPoint, 1, 2) - // With the second layer timeout tx confirmed, Bob should have canceled - // backwards the HTLC that Carol sent. + // Now that the HTLC is at stage 2, Bob should have canceled backwards + // the HTLC that Carol sent. This check comes after stage verification + // to ensure contract court processing is complete. ht.AssertNumActiveHtlcs(bob, 0) - // Additionally, Bob should now show that HTLC as being advanced to the - // second stage. - ht.AssertNumHTLCsAndStage(bob, bobChanPoint, 1, 2) - // Get the expiry height of the CSV-locked HTLC. resp = ht.AssertNumPendingForceClose(bob, 1)[0] require.Equal(ht, 1, len(resp.PendingHtlcs)) @@ -3123,15 +3121,8 @@ func runHtlcAggregation(ht *lntest.HarnessTest, // Mine a block to confirm whatever sweeps appeared. ht.MineBlocksAndAssertNumTxes(1, numTxs) - // Give Bob time to process the confirmation and update HTLC stages. - // This is necessary due to async blockbeat handling, especially for - // zero-conf channels where timing can vary between backends. We use - // 2 seconds here because this test has many HTLCs (12 total) which - // all need to transition states asynchronously. - time.Sleep(2 * time.Second) - - // For this channel, we also check the number of HTLCs and the stage - // are correct. + // For this channel, we check the number of HTLCs and the stage are + // correct. AssertNumHTLCsAndStage polls internally using wait.NoError. ht.AssertNumHTLCsAndStage(bob, bobChanPoint, numInvoices*2, 2) // For non-leased channels, we can now mine one block so Bob will sweep