From 1bea55f323dbc4e86d54d43c49d13d597ecfc504 Mon Sep 17 00:00:00 2001 From: Chris Waterson Date: Sat, 7 Oct 2023 08:54:07 -0700 Subject: [PATCH 1/2] Support normal channel operation with async signing This is a do-over of #2653, wherein we support asynchronous signing for 'normal' channel operation. This involves allowing the following `ChannelSigner` methods to return an `Err` result, indicating that the requested value is not available: - get_per_commitment_point - release_commitment_secret - sign_counterparty_commitment When the value does become available, channel operation can be resumed by invoking `signer_unblocked`. Note that this adds the current and next per-commitment point to the state that is persisted by the channel monitor. --- fuzz/src/chanmon_consistency.rs | 190 +++- lightning/src/chain/channelmonitor.rs | 4 +- lightning/src/chain/onchaintx.rs | 3 + lightning/src/ln/async_signer_tests.rs | 1019 ++++++++++++++++++--- lightning/src/ln/channel.rs | 595 ++++++++++-- lightning/src/ln/channelmanager.rs | 55 +- lightning/src/ln/functional_test_utils.rs | 46 +- lightning/src/ln/functional_tests.rs | 14 +- lightning/src/sign/mod.rs | 15 +- lightning/src/util/test_channel_signer.rs | 103 ++- 10 files changed, 1777 insertions(+), 267 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 89ff07fe698..dc062e43e2b 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -49,7 +49,7 @@ use lightning::ln::functional_test_utils::*; use lightning::offers::invoice::{BlindedPayInfo, UnsignedBolt12Invoice}; use lightning::offers::invoice_request::UnsignedInvoiceRequest; use lightning::onion_message::messenger::{Destination, MessageRouter, OnionMessagePath}; -use lightning::util::test_channel_signer::{TestChannelSigner, EnforcementState}; +use lightning::util::test_channel_signer::{TestChannelSigner, EnforcementState, ops}; use lightning::util::errors::APIError; use lightning::util::logger::Logger; use lightning::util::config::UserConfig; @@ -72,6 +72,8 @@ use std::sync::atomic; use std::io::Cursor; use bitcoin::bech32::u5; +#[allow(unused)] +const ASYNC_OPS: u32 = ops::GET_PER_COMMITMENT_POINT | ops::RELEASE_COMMITMENT_SECRET | ops::SIGN_COUNTERPARTY_COMMITMENT; const MAX_FEE: u32 = 10_000; struct FuzzEstimator { ret_val: atomic::AtomicU32, @@ -297,7 +299,6 @@ impl SignerProvider for KeyProvider { inner, state, disable_revocation_policy_check: false, - available: Arc::new(Mutex::new(true)), }) } @@ -829,7 +830,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == node_id { for update_add in update_add_htlcs.iter() { - out.locked_write(format!("Delivering update_add_htlc to node {}.\n", idx).as_bytes()); + out.locked_write(format!("Delivering update_add_htlc to node {} from node {}.\n", idx, $node).as_bytes()); if !$corrupt_forward { dest.handle_update_add_htlc(&nodes[$node].get_our_node_id(), update_add); } else { @@ -844,19 +845,19 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { } } for update_fulfill in update_fulfill_htlcs.iter() { - out.locked_write(format!("Delivering update_fulfill_htlc to node {}.\n", idx).as_bytes()); + out.locked_write(format!("Delivering update_fulfill_htlc to node {} from node {}.\n", idx, $node).as_bytes()); dest.handle_update_fulfill_htlc(&nodes[$node].get_our_node_id(), update_fulfill); } for update_fail in update_fail_htlcs.iter() { - out.locked_write(format!("Delivering update_fail_htlc to node {}.\n", idx).as_bytes()); + out.locked_write(format!("Delivering update_fail_htlc to node {} from node {}.\n", idx, $node).as_bytes()); dest.handle_update_fail_htlc(&nodes[$node].get_our_node_id(), update_fail); } for update_fail_malformed in update_fail_malformed_htlcs.iter() { - out.locked_write(format!("Delivering update_fail_malformed_htlc to node {}.\n", idx).as_bytes()); + out.locked_write(format!("Delivering update_fail_malformed_htlc to node {} from node {}.\n", idx, $node).as_bytes()); dest.handle_update_fail_malformed_htlc(&nodes[$node].get_our_node_id(), update_fail_malformed); } if let Some(msg) = update_fee { - out.locked_write(format!("Delivering update_fee to node {}.\n", idx).as_bytes()); + out.locked_write(format!("Delivering update_fee to node {} from node {}.\n", idx, $node).as_bytes()); dest.handle_update_fee(&nodes[$node].get_our_node_id(), &msg); } let processed_change = !update_add_htlcs.is_empty() || !update_fulfill_htlcs.is_empty() || @@ -873,7 +874,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { } }); break; } - out.locked_write(format!("Delivering commitment_signed to node {}.\n", idx).as_bytes()); + out.locked_write(format!("Delivering commitment_signed to node {} from node {}.\n", idx, $node).as_bytes()); dest.handle_commitment_signed(&nodes[$node].get_our_node_id(), &commitment_signed); break; } @@ -882,7 +883,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { events::MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == *node_id { - out.locked_write(format!("Delivering revoke_and_ack to node {}.\n", idx).as_bytes()); + out.locked_write(format!("Delivering revoke_and_ack to node {} from node {}.\n", idx, $node).as_bytes()); dest.handle_revoke_and_ack(&nodes[$node].get_our_node_id(), msg); } } @@ -890,7 +891,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { events::MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => { for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == *node_id { - out.locked_write(format!("Delivering channel_reestablish to node {}.\n", idx).as_bytes()); + out.locked_write(format!("Delivering channel_reestablish to node {} from node {}.\n", idx, $node).as_bytes()); dest.handle_channel_reestablish(&nodes[$node].get_our_node_id(), msg); } } @@ -913,7 +914,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { _ => if out.may_fail.load(atomic::Ordering::Acquire) { return; } else { - panic!("Unhandled message event {:?}", event) + panic!("Unhandled message event on node {}, {:?}", $node, event) }, } if $limit_events != ProcessMessages::AllMessages { @@ -1289,6 +1290,118 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { }, 0x89 => { fee_est_c.ret_val.store(253, atomic::Ordering::Release); nodes[2].maybe_update_chan_fees(); }, + #[cfg(async_signing)] + 0xa0 => { + let states = keys_manager_a.enforcement_states.lock().unwrap(); + assert_eq!(states.len(), 1); + states.values().next().unwrap().lock().unwrap().set_signer_unavailable(ASYNC_OPS); + } + #[cfg(async_signing)] + 0xa1 => { + let states = keys_manager_a.enforcement_states.lock().unwrap(); + assert_eq!(states.len(), 1); + states.values().next().unwrap().lock().unwrap().set_signer_available(ops::GET_PER_COMMITMENT_POINT); + nodes[0].signer_unblocked(None); + } + #[cfg(async_signing)] + 0xa2 => { + let states = keys_manager_a.enforcement_states.lock().unwrap(); + assert_eq!(states.len(), 1); + states.values().next().unwrap().lock().unwrap().set_signer_available(ops::RELEASE_COMMITMENT_SECRET); + nodes[0].signer_unblocked(None); + } + #[cfg(async_signing)] + 0xa3 => { + let states = keys_manager_a.enforcement_states.lock().unwrap(); + assert_eq!(states.len(), 1); + states.values().next().unwrap().lock().unwrap().set_signer_available(ops::SIGN_COUNTERPARTY_COMMITMENT); + nodes[0].signer_unblocked(None); + } + + #[cfg(async_signing)] + 0xa4 => { + let states = keys_manager_b.enforcement_states.lock().unwrap(); + assert_eq!(states.len(), 2); + states.values().next().unwrap().lock().unwrap().set_signer_unavailable(ASYNC_OPS); + } + #[cfg(async_signing)] + 0xa5 => { + let states = keys_manager_b.enforcement_states.lock().unwrap(); + assert_eq!(states.len(), 2); + states.values().next().unwrap().lock().unwrap().set_signer_available(ops::GET_PER_COMMITMENT_POINT); + nodes[1].signer_unblocked(None); + } + #[cfg(async_signing)] + 0xa6 => { + let states = keys_manager_b.enforcement_states.lock().unwrap(); + assert_eq!(states.len(), 2); + states.values().next().unwrap().lock().unwrap().set_signer_available(ops::RELEASE_COMMITMENT_SECRET); + nodes[1].signer_unblocked(None); + } + #[cfg(async_signing)] + 0xa7 => { + let states = keys_manager_b.enforcement_states.lock().unwrap(); + assert_eq!(states.len(), 2); + states.values().next().unwrap().lock().unwrap().set_signer_available(ops::SIGN_COUNTERPARTY_COMMITMENT); + nodes[1].signer_unblocked(None); + } + + #[cfg(async_signing)] + 0xa8 => { + let states = keys_manager_b.enforcement_states.lock().unwrap(); + assert_eq!(states.len(), 2); + states.values().last().unwrap().lock().unwrap().set_signer_unavailable(ASYNC_OPS); + } + #[cfg(async_signing)] + 0xa9 => { + let states = keys_manager_b.enforcement_states.lock().unwrap(); + assert_eq!(states.len(), 2); + states.values().last().unwrap().lock().unwrap().set_signer_available(ops::GET_PER_COMMITMENT_POINT); + nodes[1].signer_unblocked(None); + } + #[cfg(async_signing)] + 0xaa => { + let states = keys_manager_b.enforcement_states.lock().unwrap(); + assert_eq!(states.len(), 2); + states.values().last().unwrap().lock().unwrap().set_signer_available(ops::RELEASE_COMMITMENT_SECRET); + nodes[1].signer_unblocked(None); + } + #[cfg(async_signing)] + 0xab => { + let states = keys_manager_b.enforcement_states.lock().unwrap(); + assert_eq!(states.len(), 2); + states.values().last().unwrap().lock().unwrap().set_signer_available(ops::SIGN_COUNTERPARTY_COMMITMENT); + nodes[1].signer_unblocked(None); + } + + #[cfg(async_signing)] + 0xac => { + let states = keys_manager_c.enforcement_states.lock().unwrap(); + assert_eq!(states.len(), 1); + states.values().next().unwrap().lock().unwrap().set_signer_unavailable(ASYNC_OPS); + } + #[cfg(async_signing)] + 0xad => { + let states = keys_manager_c.enforcement_states.lock().unwrap(); + assert_eq!(states.len(), 1); + states.values().next().unwrap().lock().unwrap().set_signer_available(ops::GET_PER_COMMITMENT_POINT); + nodes[2].signer_unblocked(None); + } + #[cfg(async_signing)] + 0xae => { + let states = keys_manager_c.enforcement_states.lock().unwrap(); + assert_eq!(states.len(), 1); + states.values().next().unwrap().lock().unwrap().set_signer_available(ops::RELEASE_COMMITMENT_SECRET); + nodes[2].signer_unblocked(None); + } + #[cfg(async_signing)] + 0xaf => { + let states = keys_manager_c.enforcement_states.lock().unwrap(); + assert_eq!(states.len(), 1); + states.values().next().unwrap().lock().unwrap().set_signer_available(ops::SIGN_COUNTERPARTY_COMMITMENT); + nodes[2].signer_unblocked(None); + } + 0xf0 => { let pending_updates = monitor_a.chain_monitor.list_pending_monitor_updates().remove(&chan_1_funding).unwrap(); if let Some(id) = pending_updates.get(0) { @@ -1382,10 +1495,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { // after we resolve all pending events. // First make sure there are no pending monitor updates, resetting the error state // and calling force_channel_monitor_updated for each monitor. - *monitor_a.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed; - *monitor_b.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed; - *monitor_c.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed; - + out.locked_write(b"Restoring monitors...\n"); if let Some((id, _)) = monitor_a.latest_monitors.lock().unwrap().get(&chan_1_funding) { monitor_a.chain_monitor.force_channel_monitor_updated(chan_1_funding, *id); nodes[0].process_monitor_events(); @@ -1404,7 +1514,10 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { } // Next, make sure peers are all connected to each other + out.locked_write(b"Reconnecting peers...\n"); + if chan_a_disconnected { + out.locked_write(b"Reconnecting node 0 and node 1...\n"); nodes[0].peer_connected(&nodes[1].get_our_node_id(), &Init { features: nodes[1].init_features(), networks: None, remote_network_address: None }, true).unwrap(); @@ -1414,6 +1527,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { chan_a_disconnected = false; } if chan_b_disconnected { + out.locked_write(b"Reconnecting node 1 and node 2...\n"); nodes[1].peer_connected(&nodes[2].get_our_node_id(), &Init { features: nodes[2].init_features(), networks: None, remote_network_address: None }, true).unwrap(); @@ -1423,8 +1537,33 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { chan_b_disconnected = false; } + out.locked_write(b"Restoring signers...\n"); + + *monitor_a.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed; + *monitor_b.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed; + *monitor_c.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed; + + #[cfg(async_signing)] + { + for state in keys_manager_a.enforcement_states.lock().unwrap().values() { + state.lock().unwrap().set_signer_available(!0); + } + for state in keys_manager_b.enforcement_states.lock().unwrap().values() { + state.lock().unwrap().set_signer_available(!0); + } + for state in keys_manager_c.enforcement_states.lock().unwrap().values() { + state.lock().unwrap().set_signer_available(!0); + } + nodes[0].signer_unblocked(None); + nodes[1].signer_unblocked(None); + nodes[2].signer_unblocked(None); + } + + out.locked_write(b"Running event queues to quiescence...\n"); + for i in 0..std::usize::MAX { if i == 100 { panic!("It may take may iterations to settle the state, but it should not take forever"); } + // Then, make sure any current forwards make their way to their destination if process_msg_events!(0, false, ProcessMessages::AllMessages) { continue; } if process_msg_events!(1, false, ProcessMessages::AllMessages) { continue; } @@ -1437,13 +1576,34 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { break; } + out.locked_write(b"All channels restored to normal operation.\n"); + // Finally, make sure that at least one end of each channel can make a substantial payment assert!( send_payment(&nodes[0], &nodes[1], chan_a, 10_000_000, &mut payment_id, &mut payment_idx) || send_payment(&nodes[1], &nodes[0], chan_a, 10_000_000, &mut payment_id, &mut payment_idx)); + out.locked_write(b"Successfully sent a payment between node 0 and node 1.\n"); + assert!( send_payment(&nodes[1], &nodes[2], chan_b, 10_000_000, &mut payment_id, &mut payment_idx) || send_payment(&nodes[2], &nodes[1], chan_b, 10_000_000, &mut payment_id, &mut payment_idx)); + out.locked_write(b"Successfully sent a payment between node 1 and node 2.\n"); + + out.locked_write(b"Flushing pending messages.\n"); + for i in 0..std::usize::MAX { + if i == 100 { panic!("It may take may iterations to settle the state, but it should not take forever"); } + + // Then, make sure any current forwards make their way to their destination + if process_msg_events!(0, false, ProcessMessages::AllMessages) { continue; } + if process_msg_events!(1, false, ProcessMessages::AllMessages) { continue; } + if process_msg_events!(2, false, ProcessMessages::AllMessages) { continue; } + // ...making sure any pending PendingHTLCsForwardable events are handled and + // payments claimed. + if process_events!(0, false) { continue; } + if process_events!(1, false) { continue; } + if process_events!(2, false) { continue; } + break; + } last_htlc_clear_fee_a = fee_est_a.ret_val.load(atomic::Ordering::Acquire); last_htlc_clear_fee_b = fee_est_b.ret_val.load(atomic::Ordering::Acquire); diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index c81a48b78ac..a7829d1258d 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -2944,9 +2944,7 @@ impl ChannelMonitorImpl { }, commitment_txid: htlc.commitment_txid, per_commitment_number: htlc.per_commitment_number, - per_commitment_point: self.onchain_tx_handler.signer.get_per_commitment_point( - htlc.per_commitment_number, &self.onchain_tx_handler.secp_ctx, - ), + per_commitment_point: htlc.per_commitment_point, feerate_per_kw: 0, htlc: htlc.htlc, preimage: htlc.preimage, diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index 59c98f05ebc..2933b5f6e27 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -178,6 +178,7 @@ pub(crate) struct ExternalHTLCClaim { pub(crate) htlc: HTLCOutputInCommitment, pub(crate) preimage: Option, pub(crate) counterparty_sig: Signature, + pub(crate) per_commitment_point: bitcoin::secp256k1::PublicKey, } // Represents the different types of claims for which events are yielded externally to satisfy said @@ -1177,9 +1178,11 @@ impl OnchainTxHandler }) .map(|(htlc_idx, htlc)| { let counterparty_htlc_sig = holder_commitment.counterparty_htlc_sigs[htlc_idx]; + ExternalHTLCClaim { commitment_txid: trusted_tx.txid(), per_commitment_number: trusted_tx.commitment_number(), + per_commitment_point: trusted_tx.per_commitment_point(), htlc: htlc.clone(), preimage: *preimage, counterparty_sig: counterparty_htlc_sig, diff --git a/lightning/src/ln/async_signer_tests.rs b/lightning/src/ln/async_signer_tests.rs index 0f51bea0d36..91d9c0e4165 100644 --- a/lightning/src/ln/async_signer_tests.rs +++ b/lightning/src/ln/async_signer_tests.rs @@ -10,13 +10,47 @@ //! Tests for asynchronous signing. These tests verify that the channel state machine behaves //! properly with a signer implementation that asynchronously derives signatures. +use bitcoin::secp256k1::PublicKey; use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider}; +use crate::ln::ChannelId; use crate::ln::functional_test_utils::*; +use crate::ln::msgs; use crate::ln::msgs::ChannelMessageHandler; -use crate::ln::channelmanager::{PaymentId, RecipientOnionFields}; +use crate::ln::channelmanager::{PaymentId, RAACommitmentOrder, RecipientOnionFields}; +use crate::util::ser::Writeable; +use crate::util::test_channel_signer::ops; +use crate::util::test_utils; -#[test] -fn test_async_commitment_signature_for_funding_created() { +/// Helper to run operations with a simulated asynchronous signer. +/// +/// Disables the signer for the specified channel and then runs `do_fn`, then re-enables the signer +/// and calls `signer_unblocked`. +#[cfg(test)] +pub fn with_async_signer<'a, DoFn, T>(node: &Node, peer_id: &PublicKey, channel_id: &ChannelId, masks: &Vec, do_fn: &'a DoFn) -> T + where DoFn: Fn() -> T +{ + let mask = masks.iter().fold(0, |acc, m| (acc | m)); + eprintln!("disabling {}", ops::string_from(mask)); + node.set_channel_signer_ops_available(peer_id, channel_id, mask, false); + let res = do_fn(); + + // Recompute the channel ID just in case the original ID was temporary. + let new_channel_id = { + let channels = node.node.list_channels(); + assert_eq!(channels.len(), 1, "expected one channel, not {}", channels.len()); + channels[0].channel_id + }; + + for mask in masks { + eprintln!("enabling {} and calling signer_unblocked", ops::string_from(*mask)); + node.set_channel_signer_ops_available(peer_id, &new_channel_id, *mask, true); + node.node.signer_unblocked(Some((*peer_id, new_channel_id))); + } + res +} + +#[cfg(test)] +fn do_test_funding_created(masks: &Vec) { // Simulate acquiring the signature for `funding_created` asynchronously. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); @@ -37,22 +71,11 @@ fn test_async_commitment_signature_for_funding_created() { // But! Let's make node[0]'s signer be unavailable: we should *not* broadcast a funding_created // message... let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42); - nodes[0].set_channel_signer_available(&nodes[1].node.get_our_node_id(), &temporary_channel_id, false); - nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap(); - check_added_monitors(&nodes[0], 0); - - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - - // Now re-enable the signer and simulate a retry. The temporary_channel_id won't work anymore so - // we have to dig out the real channel ID. - let chan_id = { - let channels = nodes[0].node.list_channels(); - assert_eq!(channels.len(), 1, "expected one channel, not {}", channels.len()); - channels[0].channel_id - }; - - nodes[0].set_channel_signer_available(&nodes[1].node.get_our_node_id(), &chan_id, true); - nodes[0].node.signer_unblocked(Some((nodes[1].node.get_our_node_id(), chan_id))); + with_async_signer(&nodes[0], &nodes[1].node.get_our_node_id(), &temporary_channel_id, masks, &|| { + nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap(); + check_added_monitors(&nodes[0], 0); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + }); let mut funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg); @@ -67,7 +90,38 @@ fn test_async_commitment_signature_for_funding_created() { } #[test] -fn test_async_commitment_signature_for_funding_signed() { +fn test_funding_created_grs() { + do_test_funding_created(&vec![ops::GET_PER_COMMITMENT_POINT, ops::RELEASE_COMMITMENT_SECRET, ops::SIGN_COUNTERPARTY_COMMITMENT]); +} + +#[test] +fn test_funding_created_gsr() { + do_test_funding_created(&vec![ops::GET_PER_COMMITMENT_POINT, ops::SIGN_COUNTERPARTY_COMMITMENT, ops::RELEASE_COMMITMENT_SECRET]); +} + +#[test] +fn test_funding_created_rsg() { + do_test_funding_created(&vec![ops::RELEASE_COMMITMENT_SECRET, ops::SIGN_COUNTERPARTY_COMMITMENT, ops::GET_PER_COMMITMENT_POINT]); +} + +#[test] +fn test_funding_created_rgs() { + do_test_funding_created(&vec![ops::RELEASE_COMMITMENT_SECRET, ops::GET_PER_COMMITMENT_POINT, ops::SIGN_COUNTERPARTY_COMMITMENT]); +} + +#[test] +fn test_funding_created_srg() { + do_test_funding_created(&vec![ops::SIGN_COUNTERPARTY_COMMITMENT, ops::RELEASE_COMMITMENT_SECRET, ops::GET_PER_COMMITMENT_POINT]); +} + +#[test] +fn test_funding_created_sgr() { + do_test_funding_created(&vec![ops::SIGN_COUNTERPARTY_COMMITMENT, ops::GET_PER_COMMITMENT_POINT, ops::RELEASE_COMMITMENT_SECRET]); +} + + +#[cfg(test)] +fn do_test_funding_signed(masks: &Vec) { // Simulate acquiring the signature for `funding_signed` asynchronously. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); @@ -92,21 +146,11 @@ fn test_async_commitment_signature_for_funding_signed() { // Now let's make node[1]'s signer be unavailable while handling the `funding_created`. It should // *not* broadcast a `funding_signed`... - nodes[1].set_channel_signer_available(&nodes[0].node.get_our_node_id(), &temporary_channel_id, false); - nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg); - check_added_monitors(&nodes[1], 1); - - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - - // Now re-enable the signer and simulate a retry. The temporary_channel_id won't work anymore so - // we have to dig out the real channel ID. - let chan_id = { - let channels = nodes[0].node.list_channels(); - assert_eq!(channels.len(), 1, "expected one channel, not {}", channels.len()); - channels[0].channel_id - }; - nodes[1].set_channel_signer_available(&nodes[0].node.get_our_node_id(), &chan_id, true); - nodes[1].node.signer_unblocked(Some((nodes[0].node.get_our_node_id(), chan_id))); + with_async_signer(&nodes[1], &nodes[0].node.get_our_node_id(), &temporary_channel_id, masks, &|| { + nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg); + check_added_monitors(&nodes[1], 1); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + }); expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); @@ -118,7 +162,38 @@ fn test_async_commitment_signature_for_funding_signed() { } #[test] -fn test_async_commitment_signature_for_commitment_signed() { +fn test_funding_signed_grs() { + do_test_funding_signed(&vec![ops::GET_PER_COMMITMENT_POINT, ops::RELEASE_COMMITMENT_SECRET, ops::SIGN_COUNTERPARTY_COMMITMENT]); +} + +#[test] +fn test_funding_signed_gsr() { + do_test_funding_signed(&vec![ops::GET_PER_COMMITMENT_POINT, ops::SIGN_COUNTERPARTY_COMMITMENT, ops::RELEASE_COMMITMENT_SECRET]); +} + +#[test] +fn test_funding_signed_rsg() { + do_test_funding_signed(&vec![ops::RELEASE_COMMITMENT_SECRET, ops::SIGN_COUNTERPARTY_COMMITMENT, ops::GET_PER_COMMITMENT_POINT]); +} + +#[test] +fn test_funding_signed_rgs() { + do_test_funding_signed(&vec![ops::RELEASE_COMMITMENT_SECRET, ops::GET_PER_COMMITMENT_POINT, ops::SIGN_COUNTERPARTY_COMMITMENT]); +} + +#[test] +fn test_funding_signed_srg() { + do_test_funding_signed(&vec![ops::SIGN_COUNTERPARTY_COMMITMENT, ops::RELEASE_COMMITMENT_SECRET, ops::GET_PER_COMMITMENT_POINT]); +} + +#[test] +fn test_funding_signed_sgr() { + do_test_funding_signed(&vec![ops::SIGN_COUNTERPARTY_COMMITMENT, ops::GET_PER_COMMITMENT_POINT, ops::RELEASE_COMMITMENT_SECRET]); +} + + +#[cfg(test)] +fn do_test_commitment_signed(masks: &Vec) { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -144,29 +219,50 @@ fn test_async_commitment_signature_for_commitment_signed() { dst.node.handle_update_add_htlc(&src.node.get_our_node_id(), &payment_event.msgs[0]); - // Mark dst's signer as unavailable and handle src's commitment_signed: while dst won't yet have a - // `commitment_signed` of its own to offer, it should publish a `revoke_and_ack`. - dst.set_channel_signer_available(&src.node.get_our_node_id(), &chan_id, false); - dst.node.handle_commitment_signed(&src.node.get_our_node_id(), &payment_event.commitment_msg); - check_added_monitors(dst, 1); + // Mark dst's signer as unavailable and handle src's commitment_signed. If dst's signer is + // offline, it oughtn't yet respond with any updates. + with_async_signer(dst, &src.node.get_our_node_id(), &chan_id, masks, &|| { + dst.node.handle_commitment_signed(&src.node.get_our_node_id(), &payment_event.commitment_msg); + check_added_monitors(dst, 1); + assert!(dst.node.get_and_clear_pending_msg_events().is_empty()); + }); - get_event_msg!(dst, MessageSendEvent::SendRevokeAndACK, src.node.get_our_node_id()); + get_revoke_commit_msgs(&dst, &src.node.get_our_node_id()); +} - // Mark dst's signer as available and retry: we now expect to see dst's `commitment_signed`. - dst.set_channel_signer_available(&src.node.get_our_node_id(), &chan_id, true); - dst.node.signer_unblocked(Some((src.node.get_our_node_id(), chan_id))); +#[test] +fn test_commitment_signed_grs() { + do_test_commitment_signed(&vec![ops::GET_PER_COMMITMENT_POINT, ops::RELEASE_COMMITMENT_SECRET, ops::SIGN_COUNTERPARTY_COMMITMENT]); +} - let events = dst.node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1, "expected one message, got {}", events.len()); - if let MessageSendEvent::UpdateHTLCs { ref node_id, .. } = events[0] { - assert_eq!(node_id, &src.node.get_our_node_id()); - } else { - panic!("expected UpdateHTLCs message, not {:?}", events[0]); - }; +#[test] +fn test_commitment_signed_gsr() { + do_test_commitment_signed(&vec![ops::GET_PER_COMMITMENT_POINT, ops::SIGN_COUNTERPARTY_COMMITMENT, ops::RELEASE_COMMITMENT_SECRET]); +} + +#[test] +fn test_commitment_signed_rsg() { + do_test_commitment_signed(&vec![ops::RELEASE_COMMITMENT_SECRET, ops::SIGN_COUNTERPARTY_COMMITMENT, ops::GET_PER_COMMITMENT_POINT]); +} + +#[test] +fn test_commitment_signed_rgs() { + do_test_commitment_signed(&vec![ops::RELEASE_COMMITMENT_SECRET, ops::GET_PER_COMMITMENT_POINT, ops::SIGN_COUNTERPARTY_COMMITMENT]); +} + +#[test] +fn test_commitment_signed_srg() { + do_test_commitment_signed(&vec![ops::SIGN_COUNTERPARTY_COMMITMENT, ops::RELEASE_COMMITMENT_SECRET, ops::GET_PER_COMMITMENT_POINT]); } #[test] -fn test_async_commitment_signature_for_funding_signed_0conf() { +fn test_commitment_signed_sgr() { + do_test_commitment_signed(&vec![ops::SIGN_COUNTERPARTY_COMMITMENT, ops::GET_PER_COMMITMENT_POINT, ops::RELEASE_COMMITMENT_SECRET]); +} + + +#[cfg(test)] +fn do_test_funding_signed_0conf(masks: &Vec) { // Simulate acquiring the signature for `funding_signed` asynchronously for a zero-conf channel. let mut manually_accept_config = test_default_channel_config(); manually_accept_config.manually_accept_inbound_channels = true; @@ -184,7 +280,6 @@ fn test_async_commitment_signature_for_funding_signed_0conf() { { let events = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1, "Expected one event, got {}", events.len()); match &events[0] { Event::OpenChannelRequest { temporary_channel_id, .. } => { nodes[1].node.accept_inbound_channel_from_trusted_peer_0conf( @@ -193,6 +288,7 @@ fn test_async_commitment_signature_for_funding_signed_0conf() { }, ev => panic!("Expected OpenChannelRequest, not {:?}", ev) } + assert_eq!(events.len(), 1, "Expected one event, got {}", events.len()); } // nodes[0] <-- accept_channel --- nodes[1] @@ -209,27 +305,16 @@ fn test_async_commitment_signature_for_funding_signed_0conf() { // Now let's make node[1]'s signer be unavailable while handling the `funding_created`. It should // *not* broadcast a `funding_signed`... - nodes[1].set_channel_signer_available(&nodes[0].node.get_our_node_id(), &temporary_channel_id, false); - nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg); - check_added_monitors(&nodes[1], 1); - - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - - // Now re-enable the signer and simulate a retry. The temporary_channel_id won't work anymore so - // we have to dig out the real channel ID. - let chan_id = { - let channels = nodes[0].node.list_channels(); - assert_eq!(channels.len(), 1, "expected one channel, not {}", channels.len()); - channels[0].channel_id - }; + with_async_signer(&nodes[1], &nodes[0].node.get_our_node_id(), &temporary_channel_id, masks, &|| { + nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg); + check_added_monitors(&nodes[1], 1); + let msgs = nodes[1].node.get_and_clear_pending_msg_events(); + assert!(msgs.is_empty(), "Expected no messages, got {:?}", msgs); + }); // At this point, we basically expect the channel to open like a normal zero-conf channel. - nodes[1].set_channel_signer_available(&nodes[0].node.get_our_node_id(), &chan_id, true); - nodes[1].node.signer_unblocked(Some((nodes[0].node.get_our_node_id(), chan_id))); - let (funding_signed, channel_ready_1) = { let events = nodes[1].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 2); let funding_signed = match &events[0] { MessageSendEvent::SendFundingSigned { msg, .. } => msg.clone(), ev => panic!("Expected SendFundingSigned, not {:?}", ev) @@ -265,7 +350,229 @@ fn test_async_commitment_signature_for_funding_signed_0conf() { } #[test] -fn test_async_commitment_signature_for_peer_disconnect() { +fn test_funding_signed_0conf_grs() { + do_test_funding_signed_0conf(&vec![ops::GET_PER_COMMITMENT_POINT, ops::RELEASE_COMMITMENT_SECRET, ops::SIGN_COUNTERPARTY_COMMITMENT]); +} + +#[test] +fn test_funding_signed_0conf_gsr() { + do_test_funding_signed_0conf(&vec![ops::GET_PER_COMMITMENT_POINT, ops::SIGN_COUNTERPARTY_COMMITMENT, ops::RELEASE_COMMITMENT_SECRET]); +} + +#[test] +fn test_funding_signed_0conf_rsg() { + do_test_funding_signed_0conf(&vec![ops::RELEASE_COMMITMENT_SECRET, ops::SIGN_COUNTERPARTY_COMMITMENT, ops::GET_PER_COMMITMENT_POINT]); +} + +#[test] +fn test_funding_signed_0conf_rgs() { + do_test_funding_signed_0conf(&vec![ops::RELEASE_COMMITMENT_SECRET, ops::GET_PER_COMMITMENT_POINT, ops::SIGN_COUNTERPARTY_COMMITMENT]); +} + +#[test] +fn test_funding_signed_0conf_srg() { + do_test_funding_signed_0conf(&vec![ops::SIGN_COUNTERPARTY_COMMITMENT, ops::RELEASE_COMMITMENT_SECRET, ops::GET_PER_COMMITMENT_POINT]); +} + +#[test] +fn test_funding_signed_0conf_sgr() { + do_test_funding_signed_0conf(&vec![ops::SIGN_COUNTERPARTY_COMMITMENT, ops::GET_PER_COMMITMENT_POINT, ops::RELEASE_COMMITMENT_SECRET]); +} + + +#[cfg(test)] +fn do_test_payment(masks: &Vec) { + // This runs through a one-hop payment from start to finish, simulating an asynchronous signer at + // each step. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let (_up1, _up2, channel_id, _tx) = create_announced_chan_between_nodes(&nodes, 0, 1); + + let alice = &nodes[0]; + let bob = &nodes[1]; + + let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(alice, bob, 8_000_000); + + with_async_signer(&alice, &bob.node.get_our_node_id(), &channel_id, masks, &|| { + alice.node.send_payment_with_route(&route, payment_hash, + RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + check_added_monitors!(alice, 1); + let events = alice.node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 0, "expected 0 events, got {}", events.len()); + }); + + let payment_event = { + let mut events = alice.node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + SendEvent::from_event(events.remove(0)) + }; + assert_eq!(payment_event.node_id, bob.node.get_our_node_id()); + assert_eq!(payment_event.msgs.len(), 1); + + // alice --[update_add_htlc]--> bob + // alice --[commitment_signed]--> bob + with_async_signer(&bob, &alice.node.get_our_node_id(), &channel_id, masks, &|| { + bob.node.handle_update_add_htlc(&alice.node.get_our_node_id(), &payment_event.msgs[0]); + bob.node.handle_commitment_signed(&alice.node.get_our_node_id(), &payment_event.commitment_msg); + check_added_monitors(bob, 1); + }); + + // alice <--[revoke_and_ack]-- bob + // alice <--[commitment_signed]-- bob + { + let (raa, cu) = { + let events = bob.node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 2, "expected 2 messages, got {}", events.len()); + match (&events[0], &events[1]) { + (MessageSendEvent::SendRevokeAndACK { msg: raa, .. }, MessageSendEvent::UpdateHTLCs { updates: cu, .. }) => { + assert_eq!(cu.update_add_htlcs.len(), 0, "expected 0 update_add_htlcs, got {}", cu.update_add_htlcs.len()); + (raa.clone(), cu.clone()) + } + (a, b) => panic!("expected SendRevokeAndAck and UpdateHTLCs, not {:?} and {:?}", a, b) + } + }; + + // TODO: run this with_async_signer once validate_counterparty_revocation supports it. + alice.node.handle_revoke_and_ack(&bob.node.get_our_node_id(), &raa); + check_added_monitors(alice, 1); + + with_async_signer(&alice, &bob.node.get_our_node_id(), &channel_id, masks, &|| { + alice.node.handle_commitment_signed(&bob.node.get_our_node_id(), &cu.commitment_signed); + check_added_monitors(alice, 1); + }); + } + + // alice --[revoke_and_ack]--> bob + // TODO: run this with_async_signer once validate_counterparty_revocation supports it. + let raa = get_event_msg!(alice, MessageSendEvent::SendRevokeAndACK, bob.node.get_our_node_id()); + bob.node.handle_revoke_and_ack(&alice.node.get_our_node_id(), &raa); + check_added_monitors(bob, 1); + + expect_pending_htlcs_forwardable!(bob); + + // Bob generates a PaymentClaimable to user code. + { + let events = bob.node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1, "expected 1 event, got {}", events.len()); + match &events[0] { + Event::PaymentClaimable { .. } => { + bob.node.claim_funds(payment_preimage); + } + ev => panic!("Expected PaymentClaimable, got {:?}", ev) + } + check_added_monitors(bob, 1); + } + + // Bob generates a PaymentClaimed event to user code. + { + let events = bob.node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1, "expected 1 event, got {}", events.len()); + match &events[0] { + Event::PaymentClaimed { .. } => (), + ev => panic!("Expected PaymentClaimed, got {:?}", ev), + } + } + + // alice <--[update_fulfill_htlcs]-- bob + // alice <--[commitment_signed]-- bob + { + let cu = { + let events = bob.node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1, "expected 1 events, got {}", events.len()); + match &events[0] { + MessageSendEvent::UpdateHTLCs { updates, .. } => { + assert_eq!(updates.update_fulfill_htlcs.len(), 1, "expected 1 update_fulfill_htlcs, got {}", updates.update_fulfill_htlcs.len()); + updates.clone() + } + ev => panic!("Expected UpdateHTLCs, got {:?}", ev) + } + }; + + with_async_signer(&alice, &bob.node.get_our_node_id(), &channel_id, masks, &|| { + alice.node.handle_update_fulfill_htlc(&bob.node.get_our_node_id(), &cu.update_fulfill_htlcs[0]); + alice.node.handle_commitment_signed(&bob.node.get_our_node_id(), &cu.commitment_signed); + check_added_monitors(alice, 1); + }); + } + + // alice --[revoke_and_ack]--> bob + // alice --[commitment_signed]--> bob + { + let (raa, cu) = { + let events = alice.node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 2, "expected 2 messages, got {}", events.len()); + match (&events[0], &events[1]) { + (MessageSendEvent::SendRevokeAndACK { msg: raa, .. }, MessageSendEvent::UpdateHTLCs { updates: cu, .. }) => { + assert_eq!(cu.update_fulfill_htlcs.len(), 0, "expected 0 update_fulfill_htlcs, got {}", cu.update_fulfill_htlcs.len()); + (raa.clone(), cu.clone()) + } + (a, b) => panic!("expected SendRevokeAndAck and UpdateHTLCs, not {:?} and {:?}", a, b) + } + }; + + // TODO: run with async once validate_counterparty_revocation supports it. + bob.node.handle_revoke_and_ack(&alice.node.get_our_node_id(), &raa); + check_added_monitors(bob, 1); + + with_async_signer(&bob, &alice.node.get_our_node_id(), &channel_id, masks, &|| { + bob.node.handle_commitment_signed(&alice.node.get_our_node_id(), &cu.commitment_signed); + check_added_monitors(bob, 1); + }); + } + + // alice <--[revoke_and_ack]-- bob + // TODO: run with async once validate_counterparty_revocation supports it. + let raa = get_event_msg!(bob, MessageSendEvent::SendRevokeAndACK, alice.node.get_our_node_id()); + alice.node.handle_revoke_and_ack(&bob.node.get_our_node_id(), &raa); + check_added_monitors(alice, 0); + + // Alice generates PaymentSent and PaymentPathSuccessful events to user code. + { + let events = alice.node.get_and_clear_pending_events(); + assert_eq!(events.len(), 2, "expected 2 event, got {}", events.len()); + match (&events[0], &events[1]) { + (Event::PaymentSent { .. }, Event::PaymentPathSuccessful { .. }) => (), + (a, b) => panic!("Expected PaymentSent and PaymentPathSuccessful, got {:?} and {:?}", a, b) + } + + check_added_monitors(alice, 1); // why? would have expected this after handling RAA... + } +} + +#[test] +fn test_payment_grs() { + do_test_payment(&vec![ops::GET_PER_COMMITMENT_POINT, ops::RELEASE_COMMITMENT_SECRET, ops::SIGN_COUNTERPARTY_COMMITMENT]); +} + +#[test] +fn test_payment_gsr() { + do_test_payment(&vec![ops::GET_PER_COMMITMENT_POINT, ops::SIGN_COUNTERPARTY_COMMITMENT, ops::RELEASE_COMMITMENT_SECRET]); +} + +#[test] +fn test_payment_rsg() { + do_test_payment(&vec![ops::RELEASE_COMMITMENT_SECRET, ops::SIGN_COUNTERPARTY_COMMITMENT, ops::GET_PER_COMMITMENT_POINT]); +} + +#[test] +fn test_payment_rgs() { + do_test_payment(&vec![ops::RELEASE_COMMITMENT_SECRET, ops::GET_PER_COMMITMENT_POINT, ops::SIGN_COUNTERPARTY_COMMITMENT]); +} + +#[test] +fn test_payment_srg() { + do_test_payment(&vec![ops::SIGN_COUNTERPARTY_COMMITMENT, ops::RELEASE_COMMITMENT_SECRET, ops::GET_PER_COMMITMENT_POINT]); +} + +#[test] +fn test_payment_sgr() { + do_test_payment(&vec![ops::SIGN_COUNTERPARTY_COMMITMENT, ops::GET_PER_COMMITMENT_POINT, ops::RELEASE_COMMITMENT_SECRET]); +} + +#[cfg(test)] +fn do_test_peer_reconnect(masks: &Vec) { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -273,51 +580,553 @@ fn test_async_commitment_signature_for_peer_disconnect() { let (_, _, chan_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1); // Send a payment. - let src = &nodes[0]; - let dst = &nodes[1]; - let (route, our_payment_hash, _our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(src, dst, 8000000); - src.node.send_payment_with_route(&route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); - check_added_monitors!(src, 1); + let alice = &nodes[0]; + let bob = &nodes[1]; + let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(alice, bob, 8_000_000); + + with_async_signer(&alice, &bob.node.get_our_node_id(), &chan_id, masks, &|| { + alice.node.send_payment_with_route(&route, payment_hash, + RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + check_added_monitors!(alice, 1); + let events = alice.node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 0, "expected 0 events, got {}", events.len()); + + alice.node.peer_disconnected(&bob.node.get_our_node_id()); + bob.node.peer_disconnected(&alice.node.get_our_node_id()); + }); + + with_async_signer(&alice, &bob.node.get_our_node_id(), &chan_id, masks, &|| { + let mut reconnect_args = ReconnectArgs::new(alice, bob); + reconnect_args.send_channel_ready = (true, true); // ...since this will be state 1. + reconnect_nodes(reconnect_args); + }); - // Pass the payment along the route. let payment_event = { - let mut events = src.node.get_and_clear_pending_msg_events(); + let mut events = alice.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); SendEvent::from_event(events.remove(0)) }; - assert_eq!(payment_event.node_id, dst.node.get_our_node_id()); + assert_eq!(payment_event.node_id, bob.node.get_our_node_id()); assert_eq!(payment_event.msgs.len(), 1); - dst.node.handle_update_add_htlc(&src.node.get_our_node_id(), &payment_event.msgs[0]); + // alice --[update_add_htlc]--> bob + // alice --[commitment_signed]--> bob + with_async_signer(&bob, &alice.node.get_our_node_id(), &chan_id, masks, &|| { + bob.node.handle_update_add_htlc(&alice.node.get_our_node_id(), &payment_event.msgs[0]); + bob.node.handle_commitment_signed(&alice.node.get_our_node_id(), &payment_event.commitment_msg); + check_added_monitors(bob, 1); + + alice.node.peer_disconnected(&bob.node.get_our_node_id()); + bob.node.peer_disconnected(&alice.node.get_our_node_id()); + }); + + let (alice_reestablish, bob_reestablish) = with_async_signer(&alice, &bob.node.get_our_node_id(), &chan_id, masks, &|| { + alice.node.peer_connected(&bob.node.get_our_node_id(), &msgs::Init { + features: bob.node.init_features(), networks: None, remote_network_address: None + }, true).expect("peer_connected failed for alice"); + let alice_msgs = get_chan_reestablish_msgs!(alice, bob); + assert_eq!(alice_msgs.len(), 1, "expected 1 message, got {}", alice_msgs.len()); + bob.node.peer_connected(&alice.node.get_our_node_id(), &msgs::Init { + features: alice.node.init_features(), networks: None, remote_network_address: None + }, false).expect("peer_connected failed for bob"); + let bob_msgs = get_chan_reestablish_msgs!(bob, alice); + assert_eq!(bob_msgs.len(), 1, "expected 1 message, got {}", bob_msgs.len()); + (alice_msgs[0].clone(), bob_msgs[0].clone()) + }); + + with_async_signer(&bob, &alice.node.get_our_node_id(), &chan_id, masks, &|| { + bob.node.handle_channel_reestablish(&alice.node.get_our_node_id(), &alice_reestablish); + }); + + let (raa, cu) = match handle_chan_reestablish_msgs!(bob, alice) { + (None, Some(raa), Some(cu), RAACommitmentOrder::RevokeAndACKFirst) => (raa, cu), + (channel_ready, raa, cu, order) => { + panic!("bob: channel_ready={:?} raa={:?} cu={:?} order={:?}", channel_ready, raa, cu, order); + } + }; + + with_async_signer(&alice, &bob.node.get_our_node_id(), &chan_id, masks, &|| { + alice.node.handle_channel_reestablish(&bob.node.get_our_node_id(), &bob_reestablish); + }); + + match handle_chan_reestablish_msgs!(alice, bob) { + (None, None, None, _) => (), + (channel_ready, raa, cu, order) => { + panic!("alice: channel_ready={:?} raa={:?} cu={:?} order={:?}", channel_ready, raa, cu, order); + } + }; + + with_async_signer(&alice, &bob.node.get_our_node_id(), &chan_id, masks, &|| { + alice.node.handle_revoke_and_ack(&bob.node.get_our_node_id(), &raa); + check_added_monitors(alice, 1); + }); - // Mark dst's signer as unavailable and handle src's commitment_signed: while dst won't yet have a - // `commitment_signed` of its own to offer, it should publish a `revoke_and_ack`. - dst.set_channel_signer_available(&src.node.get_our_node_id(), &chan_id, false); - dst.node.handle_commitment_signed(&src.node.get_our_node_id(), &payment_event.commitment_msg); - check_added_monitors(dst, 1); + // Disconnect? - get_event_msg!(dst, MessageSendEvent::SendRevokeAndACK, src.node.get_our_node_id()); + with_async_signer(&alice, &bob.node.get_our_node_id(), &chan_id, masks, &|| { + alice.node.handle_commitment_signed(&bob.node.get_our_node_id(), &cu.commitment_signed); + check_added_monitors(alice, 1); + }); - // Now disconnect and reconnect the peers. - src.node.peer_disconnected(&dst.node.get_our_node_id()); - dst.node.peer_disconnected(&src.node.get_our_node_id()); - let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); - reconnect_args.send_channel_ready = (false, false); - reconnect_args.pending_raa = (true, false); - reconnect_nodes(reconnect_args); + // Disconnect? - // Mark dst's signer as available and retry: we now expect to see dst's `commitment_signed`. - dst.set_channel_signer_available(&src.node.get_our_node_id(), &chan_id, true); - dst.node.signer_unblocked(Some((src.node.get_our_node_id(), chan_id))); + let raa = get_event_msg!(alice, MessageSendEvent::SendRevokeAndACK, bob.node.get_our_node_id()); + with_async_signer(&bob, &alice.node.get_our_node_id(), &chan_id, masks, &|| { + bob.node.handle_revoke_and_ack(&alice.node.get_our_node_id(), &raa); + check_added_monitors(bob, 1); + }); + + expect_pending_htlcs_forwardable!(bob); { - let events = dst.node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1, "expected one message, got {}", events.len()); - if let MessageSendEvent::UpdateHTLCs { ref node_id, .. } = events[0] { - assert_eq!(node_id, &src.node.get_our_node_id()); - } else { - panic!("expected UpdateHTLCs message, not {:?}", events[0]); - }; + let events = bob.node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1, "expected 1 event, got {}", events.len()); + match &events[0] { + Event::PaymentClaimable { .. } => (), + ev => panic!("Expected PaymentClaimable, got {:?}", ev), + } + } + + with_async_signer(&bob, &alice.node.get_our_node_id(), &chan_id, masks, &|| { + bob.node.claim_funds(payment_preimage); + check_added_monitors(bob, 1); + }); + + let _cu = { + let events = bob.node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1, "expected 1 message, got {}", events.len()); + match &events[0] { + MessageSendEvent::UpdateHTLCs { ref updates, .. } => updates.clone(), + ev => panic!("expected UpdateHTLCs, got {:?}", ev), + } + }; + + { + let events = bob.node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1, "expected 1 event, got {}", events.len()); + match &events[0] { + Event::PaymentClaimed { .. } => (), + ev => panic!("Expected PaymentClaimed, got {:?}", ev), + } + } + + // Blah blah blah... send cu to alice, probably sprinkle some reconnects above. +} + +#[test] +fn test_peer_reconnect_grs() { + do_test_peer_reconnect(&vec![ops::GET_PER_COMMITMENT_POINT, ops::RELEASE_COMMITMENT_SECRET, ops::SIGN_COUNTERPARTY_COMMITMENT]); +} + +#[test] +fn test_peer_reconnect_gsr() { + do_test_peer_reconnect(&vec![ops::GET_PER_COMMITMENT_POINT, ops::SIGN_COUNTERPARTY_COMMITMENT, ops::RELEASE_COMMITMENT_SECRET]); +} + +#[test] +fn test_peer_reconnect_rsg() { + do_test_peer_reconnect(&vec![ops::RELEASE_COMMITMENT_SECRET, ops::SIGN_COUNTERPARTY_COMMITMENT, ops::GET_PER_COMMITMENT_POINT]); +} + +#[test] +fn test_peer_reconnect_rgs() { + do_test_peer_reconnect(&vec![ops::RELEASE_COMMITMENT_SECRET, ops::GET_PER_COMMITMENT_POINT, ops::SIGN_COUNTERPARTY_COMMITMENT]); +} + +#[test] +fn test_peer_reconnect_srg() { + do_test_peer_reconnect(&vec![ops::SIGN_COUNTERPARTY_COMMITMENT, ops::RELEASE_COMMITMENT_SECRET, ops::GET_PER_COMMITMENT_POINT]); +} + +#[test] +fn test_peer_reconnect_sgr() { + do_test_payment(&vec![ops::SIGN_COUNTERPARTY_COMMITMENT, ops::GET_PER_COMMITMENT_POINT, ops::RELEASE_COMMITMENT_SECRET]); +} + +#[test] +fn channel_update_fee_test() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let (_, _, chan_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1); + + let alice = &nodes[0]; + let bob = &nodes[1]; + + // Balance + send_payment(alice, &vec!(bob)[..], 8_000_000); + + // Send a payment from Bob to Alice: this requires Alice to acquire a new commitment point from + // the signer. Make the signer be unavailable, and then trigger a situation that requires Alice to + // request fee update. Alice should be able to generate the fee update without crashing: she needs + // to be able to get the statistics about the new transaction, but doesn't actually need the + // signed transaction itself. + let (route, payment_hash, _payment_preimage, payment_secret) = get_route_and_payment_hash!(bob, alice, 2_000_000); + bob.node.send_payment_with_route( + &route, payment_hash, + RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + check_added_monitors!(bob, 1); + + let payment_event = { + let mut events = bob.node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + SendEvent::from_event(events.remove(0)) + }; + + alice.set_channel_signer_ops_available( + &bob.node.get_our_node_id(), &chan_id, ops::GET_PER_COMMITMENT_POINT, false); + + alice.node.handle_update_add_htlc(&bob.node.get_our_node_id(), &payment_event.msgs[0]); + alice.node.handle_commitment_signed(&bob.node.get_our_node_id(), &payment_event.commitment_msg); + check_added_monitors!(alice, 1); + + // Force alice to generate an update_fee + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock += 20; + } + + alice.node.timer_tick_occurred(); + + assert!(alice.node.get_and_clear_pending_msg_events().is_empty()); + + alice.set_channel_signer_ops_available( + &bob.node.get_our_node_id(), &chan_id, ops::GET_PER_COMMITMENT_POINT, true); + alice.node.signer_unblocked(None); + + let events = alice.node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 2); + match (&events[0], &events[1]) { + (MessageSendEvent::SendRevokeAndACK { .. }, MessageSendEvent::UpdateHTLCs { .. }) => { + // TODO(waterson) we'd kind of expect to see an update_fee here, but we actually don't because + // signer_maybe_unblocked doesn't create that. It probably should. + } + (a, b) => { + panic!("Expected SendRevokeAndACK and UpdateHTLCs, got {:?} and {:?}", a, b); + } + } +} + +#[test] +fn monitor_honors_commitment_raa_order() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let (_, _, chan_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1); + + let alice = &nodes[0]; + let bob = &nodes[1]; + + let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(alice, bob, 8_000_000); + + alice.node.send_payment_with_route(&route, payment_hash, + RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + check_added_monitors!(alice, 1); + + let payment_event = { + let mut events = alice.node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + SendEvent::from_event(events.remove(0)) + }; + + // Make the commitment secret be unavailable. The expectation here is that Bob should send the + // revoke-and-ack first. So even though he can generate a commitment update, he should hold onto + // that until he's ready to revoke. + bob.set_channel_signer_ops_available(&alice.node.get_our_node_id(), &chan_id, ops::RELEASE_COMMITMENT_SECRET, false); + + bob.node.handle_update_add_htlc(&alice.node.get_our_node_id(), &payment_event.msgs[0]); + bob.node.handle_commitment_signed(&alice.node.get_our_node_id(), &payment_event.commitment_msg); + check_added_monitors(bob, 1); + + assert!(bob.node.get_and_clear_pending_msg_events().is_empty()); + + // Now make the commitment secret available and restart the channel. + bob.set_channel_signer_ops_available(&alice.node.get_our_node_id(), &chan_id, ops::RELEASE_COMMITMENT_SECRET, true); + bob.node.signer_unblocked(None); + + get_revoke_commit_msgs(bob, &alice.node.get_our_node_id()); +} + +#[test] +fn peer_restart_with_blocked_signer_and_pending_payment() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let persister; + let new_chain_monitor; + + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let alice_deserialized; + + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let (_, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1); + + let (route, payment_hash, _payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1_000_000); + nodes[0].node.send_payment_with_route(&route, payment_hash, + RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + + check_added_monitors!(nodes[0], 1); + assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); + + // Deliver the update_add_htlc and commitment_signed to Bob. + { + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let payment_event = SendEvent::from_event(events.remove(0)); + assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id()); + assert_eq!(payment_event.msgs.len(), 1); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); + } + + // Disconnect Bob and restart Alice + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); + + { + let alice_serialized = nodes[0].node.encode(); + let alice_monitor_serialized = get_monitor!(nodes[0], channel_id).encode(); + reload_node!(nodes[0], *nodes[0].node.get_current_default_configuration(), &alice_serialized, &[&alice_monitor_serialized], persister, new_chain_monitor, alice_deserialized); } + + assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); + + // Turn off Bob's signer. + nodes[1].set_channel_signer_ops_available( + &nodes[0].node.get_our_node_id(), &channel_id, + ops::GET_PER_COMMITMENT_POINT | ops::RELEASE_COMMITMENT_SECRET | ops::SIGN_COUNTERPARTY_COMMITMENT, + false); + + // Reconnect Alice and Bob. + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { + features: nodes[1].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); + + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); + + // Alice should have sent Bob a channel_reestablish and vice versa. + let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); + assert_eq!(reestablish_1.len(), 1); + let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); + assert_eq!(reestablish_2.len(), 1); + + { + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]); + match handle_chan_reestablish_msgs!(nodes[1], nodes[0]) { + (None, None, None, _) => (), + (channel_ready, revoke_and_ack, commitment_update, order) => { + panic!("got channel_ready={:?} revoke_and_ack={:?} commitment_update={:?} order={:?}", + channel_ready, revoke_and_ack, commitment_update, order); + } + } + } + + { + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]); + match handle_chan_reestablish_msgs!(nodes[0], nodes[1]) { + (None, None, None, _) => (), + (channel_ready, revoke_and_ack, commitment_update, order) => { + panic!("got channel_ready={:?} revoke_and_ack={:?} commitment_update={:?} order={:?}", + channel_ready, revoke_and_ack, commitment_update, order); + } + } + }; + + // Re-enable and unblock Bob's signer. + nodes[1].set_channel_signer_ops_available( + &nodes[0].node.get_our_node_id(), &channel_id, + ops::GET_PER_COMMITMENT_POINT | ops::RELEASE_COMMITMENT_SECRET | ops::SIGN_COUNTERPARTY_COMMITMENT, + true); + + nodes[1].node.signer_unblocked(None); + + // At this point we should provide Alice with the revoke_and_ack and commitment_signed. + get_revoke_commit_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); + check_added_monitors!(nodes[1], 1); +} + +#[test] +fn peer_restart_with_blocked_signer_before_pending_payment() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let persister; + let new_chain_monitor; + + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let alice_deserialized; + + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let (_, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1); + + let (route, payment_hash, _payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1_000_000); + nodes[0].node.send_payment_with_route(&route, payment_hash, + RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + + check_added_monitors!(nodes[0], 1); + assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); + + // Turn off Bob's signer. + nodes[1].set_channel_signer_ops_available( + &nodes[0].node.get_our_node_id(), &channel_id, + ops::GET_PER_COMMITMENT_POINT | ops::RELEASE_COMMITMENT_SECRET | ops::SIGN_COUNTERPARTY_COMMITMENT, + false); + + // Deliver the update_add_htlc and commitment_signed to Bob. + { + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let payment_event = SendEvent::from_event(events.remove(0)); + assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id()); + assert_eq!(payment_event.msgs.len(), 1); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); + } + + // Disconnect Bob and restart Alice + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); + + { + let alice_serialized = nodes[0].node.encode(); + let alice_monitor_serialized = get_monitor!(nodes[0], channel_id).encode(); + reload_node!(nodes[0], *nodes[0].node.get_current_default_configuration(), &alice_serialized, &[&alice_monitor_serialized], persister, new_chain_monitor, alice_deserialized); + } + + assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); + + // Reconnect Alice and Bob. + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { + features: nodes[1].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); + + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); + + // Re-enable and unblock Bob's signer. + nodes[1].set_channel_signer_ops_available( + &nodes[0].node.get_our_node_id(), &channel_id, + ops::GET_PER_COMMITMENT_POINT | ops::RELEASE_COMMITMENT_SECRET | ops::SIGN_COUNTERPARTY_COMMITMENT, + true); + + nodes[1].node.signer_unblocked(None); + + // Alice should have sent Bob a channel_reestablish and vice versa. We explicitly do _not_ expect + // to see a RevokeAndACK and CommitmentUpdate yet! + let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); + assert_eq!(reestablish_1.len(), 1); + let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); + assert_eq!(reestablish_2.len(), 1); + + let (raa, cu) = { + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]); + match handle_chan_reestablish_msgs!(nodes[1], nodes[0]) { + (None, Some(raa), Some(cu), RAACommitmentOrder::RevokeAndACKFirst) => (raa, cu), + (channel_ready, revoke_and_ack, commitment_update, order) => { + panic!("got channel_ready={:?} revoke_and_ack={:?} commitment_update={:?} order={:?}", + channel_ready, revoke_and_ack, commitment_update, order); + } + } + }; + + { + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]); + match handle_chan_reestablish_msgs!(nodes[0], nodes[1]) { + (None, None, None, _) => (), + (channel_ready, revoke_and_ack, commitment_update, order) => { + panic!("got channel_ready={:?} revoke_and_ack={:?} commitment_update={:?} order={:?}", + channel_ready, revoke_and_ack, commitment_update, order); + } + } + }; + + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &cu.commitment_signed); + check_added_monitors!(nodes[0], 2); + + // At this point Alice should provide Bob with the revoke_and_ack. + let raa = { + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1, "Expected 1 event, got {}: {:?}", events.len(), events); + match &events[0] { + MessageSendEvent::SendRevokeAndACK { msg: raa, .. } => raa.clone(), + ev => panic!("Expected SendRevokeAndACK, got {:?}", ev) + } + }; + + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa); + check_added_monitors!(nodes[1], 2); + + expect_pending_htlcs_forwardable!(nodes[1]); + expect_payment_claimable!(nodes[1], payment_hash, payment_secret, 1_000_000); +} + +#[test] +fn no_stray_channel_reestablish() { + // Original fuzz trace. + // a0 Disable A’s signer. + // 2c Disconnect A and B, then restart A. + // 0e Reconnect A and B. + // 2d Disconnect A and B (and C), then restart B. + // a1 Unblock A’s signer get_per_commitment_point + // ff Reset. + + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let alice_persister; + let bob_persister; + let alice_new_chain_monitor; + let bob_new_chain_monitor; + + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let alice_deserialized; + let bob_deserialized; + + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let (_, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1); + + // Turn off Alice's signer. + eprintln!("disabling alice's signer"); + nodes[0].set_channel_signer_ops_available( + &nodes[1].node.get_our_node_id(), &channel_id, + ops::GET_PER_COMMITMENT_POINT | ops::RELEASE_COMMITMENT_SECRET | ops::SIGN_COUNTERPARTY_COMMITMENT, + false); + + // Disconnect Bob and restart Alice + eprintln!("disconnecting bob"); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); + + eprintln!("restarting alice"); + { + let alice_serialized = nodes[0].node.encode(); + let alice_monitor_serialized = get_monitor!(nodes[0], channel_id).encode(); + reload_node!(nodes[0], *nodes[0].node.get_current_default_configuration(), &alice_serialized, &[&alice_monitor_serialized], alice_persister, alice_new_chain_monitor, alice_deserialized); + } + + // Reconnect Alice and Bob. + eprintln!("reconnecting alice and bob"); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { + features: nodes[1].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); + + // Disconnect Alice and restart Bob + eprintln!("disconnecting alice"); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + + eprintln!("restarting bob"); + { + let bob_serialized = nodes[1].node.encode(); + let bob_monitor_serialized = get_monitor!(nodes[1], channel_id).encode(); + reload_node!(nodes[1], *nodes[1].node.get_current_default_configuration(), &bob_serialized, &[&bob_monitor_serialized], bob_persister, bob_new_chain_monitor, bob_deserialized); + } + + eprintln!("unblocking alice's signer for get_per_commitment_point"); + nodes[0].set_channel_signer_ops_available(&nodes[1].node.get_our_node_id(), &channel_id, ops::GET_PER_COMMITMENT_POINT, true); + nodes[0].node.signer_unblocked(None); + + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert!(events.is_empty(), "Expected no events from Alice, got {:?}", events); } diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index ec4f26664a7..f0779cd567d 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -793,14 +793,44 @@ pub(super) struct MonitorRestoreUpdates { pub announcement_sigs: Option, } -/// The return value of `signer_maybe_unblocked` +/// The return value of `signer_maybe_unblocked`. +/// +/// When the signer becomes unblocked, any non-`None` event accumulated here should be sent to the +/// peer by the caller. #[allow(unused)] pub(super) struct SignerResumeUpdates { + /// A `commitment_signed` message, possibly with additional HTLC-related messages (e.g., + /// `update_add_htlc`) that should be placed in the commitment. + /// + /// When both this and `raa` contain values, they should be sent to the peer using an ordering + /// consistent with `order`. pub commitment_update: Option, + /// A `revoke_and_ack` message that should be sent to the peer. + /// + /// When both this and `raa` contain values, they should be sent to the peer using an ordering + /// consistent with `order`. + pub raa: Option, + /// The order in which the `commitment_signed` and `revoke_and_ack` messages should be provided to + /// the peer. Only meaningful if both of these messages are present. + pub order: RAACommitmentOrder, + /// A `funding_signed` message that should be sent to the peer. pub funding_signed: Option, + /// A `channel_ready` message that should be sent to the peer. pub channel_ready: Option, } +impl Default for SignerResumeUpdates { + fn default() -> Self { + Self { + commitment_update: None, + raa: None, + order: RAACommitmentOrder::CommitmentFirst, + funding_signed: None, + channel_ready: None, + } + } +} + /// The return value of `channel_reestablish` pub(super) struct ReestablishResponses { pub channel_ready: Option, @@ -997,6 +1027,17 @@ pub(super) struct ChannelContext where SP::Target: SignerProvider { // cost of others, but should really just be changed. cur_holder_commitment_transaction_number: u64, + + // The commitment point corresponding to `cur_holder_commitment_transaction_number`, which is the + // next state. + cur_holder_commitment_point: PublicKey, + // The commitment point corresponding to `cur_holder_commitment_transaction_number - 1`, which is + // the state after the next state. This may be `None` if we are pending retrieval of the + // commitment point from the signer. + next_holder_commitment_point: Option, + // The commitment secret corresponding to `cur_holder_commitment_transaction_number + 2`, which is + // the previous state. + prev_holder_commitment_secret: Option<[u8; 32]>, cur_counterparty_commitment_transaction_number: u64, value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs pending_inbound_htlcs: Vec, @@ -1031,10 +1072,22 @@ pub(super) struct ChannelContext where SP::Target: SignerProvider { /// This flag is set in such a case. Note that we don't need to persist this as we'll end up /// setting it again as a side-effect of [`Channel::channel_reestablish`]. signer_pending_commitment_update: bool, + /// Similar to [`Self::signer_pending_commitment_update`]: indicates that we've deferred sending a + /// `revoke_and_ack`, and should do so once the signer has become unblocked. + signer_pending_revoke_and_ack: bool, /// Similar to [`Self::signer_pending_commitment_update`] but we're waiting to send either a /// [`msgs::FundingCreated`] or [`msgs::FundingSigned`] depending on if this channel is /// outbound or inbound. signer_pending_funding: bool, + /// Similar to [`Self::signer_pending_commitment_update`] but we're waiting to send a + /// [`msgs::ChannelReady`]. + signer_pending_channel_ready: bool, + /// If we attempted to retrieve the per-commitment point for the next transaction but the signer + /// wasn't ready, then this will be set to `true`. + signer_pending_commitment_point: bool, + /// If we attempted to release the per-commitment secret for the previous transaction but the + /// signer wasn't ready, then this will be set to `true`. + signer_pending_released_secret: bool, // pending_update_fee is filled when sending and receiving update_fee. // @@ -1527,6 +1580,91 @@ impl ChannelContext where SP::Target: SignerProvider { self.channel_ready_event_emitted = true; } + /// Caches the next commitment point; i.e., the point at + /// `cur_holder_commitment_transaction_number-1`. + pub fn request_next_holder_per_commitment_point(&mut self, logger: &L) where L::Target: Logger + { + if self.next_holder_commitment_point.is_some() { + return; + } + + let transaction_number = self.cur_holder_commitment_transaction_number - 1; + let signer = self.holder_signer.as_ref(); + + log_trace!(logger, "Retrieving commitment point for {} transaction number {}", self.channel_id(), transaction_number); + self.next_holder_commitment_point = match signer.get_per_commitment_point(transaction_number, &self.secp_ctx) { + Ok(point) => { + if self.signer_pending_commitment_point { + log_trace!(logger, "Commitment point for {} transaction number {} retrieved; clearing signer_pending_commitment_point", + self.channel_id(), transaction_number); + self.signer_pending_commitment_point = false; + } + Some(point) + } + + Err(_) => { + if !self.signer_pending_commitment_point { + log_trace!(logger, "Commitment point for {} transaction number {} is not available; setting signer_pending_commitment_point", + self.channel_id(), transaction_number); + self.signer_pending_commitment_point = true; + } + None + } + }; + } + + /// Advance the current commitment point by moving the cached `next_holder_commitment_point` into + /// `cur_holder_commitment_point`. + /// + /// It is necessary that `next_holder_commitment_point` have a value (i.e., not be `None`): + /// failure to honor this constraint likely indicates that we've provided a revoke-and-ack to the + /// counterparty before being able to retrieve the next commitment point from an asynchronous + /// signer. + fn advance_holder_per_commitment_point(&mut self, logger: &L) where L::Target: Logger + { + self.cur_holder_commitment_transaction_number -= 1; + log_trace!(logger, "Advancing commitment point for {} transaction number {}", self.channel_id(), self.cur_holder_commitment_transaction_number); + self.cur_holder_commitment_point = self.next_holder_commitment_point.take() + .expect("There is no next holder_commitment point"); + self.request_next_holder_per_commitment_point(logger) + } + + pub fn update_holder_commitment_secret(&mut self, logger: &L) where L::Target: Logger + { + let transaction_number = self.cur_holder_commitment_transaction_number; + let signer = self.holder_signer.as_ref(); + + let releasing_transaction_number = transaction_number + 2; + if releasing_transaction_number <= INITIAL_COMMITMENT_NUMBER { + log_trace!(logger, "Retrieving commitment secret for {} transaction number {}", self.channel_id(), releasing_transaction_number); + self.prev_holder_commitment_secret = match signer.release_commitment_secret(releasing_transaction_number) { + Ok(secret) => { + if self.signer_pending_released_secret { + log_trace!(logger, "Commitment secret for {} transaction number {} retrieved; clearing signer_pending_released_secret", + self.channel_id(), releasing_transaction_number); + self.signer_pending_released_secret = false; + } + Some(secret) + } + + Err(_) => { + if !self.signer_pending_released_secret { + log_trace!(logger, "Commitment secret for {} transaction number {} is not available; setting signer_pending_released_secret", + self.channel_id(), releasing_transaction_number); + self.signer_pending_released_secret = true; + } + None + } + } + }; + } + + #[cfg(test)] + pub fn forget_signer_material(&mut self) { + self.next_holder_commitment_point = None; + self.prev_holder_commitment_secret = None; + } + /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will /// no longer be considered when forwarding HTLCs. @@ -1824,8 +1962,8 @@ impl ChannelContext where SP::Target: SignerProvider { /// our counterparty!) /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction) /// TODO Some magic rust shit to compile-time check this? - fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys { - let per_commitment_point = self.holder_signer.as_ref().get_per_commitment_point(commitment_number, &self.secp_ctx); + fn build_holder_transaction_keys(&self) -> TxCreationKeys { + let per_commitment_point = self.cur_holder_commitment_point; let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint; let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint; let counterparty_pubkeys = self.get_counterparty_pubkeys(); @@ -3354,7 +3492,7 @@ impl Channel where let funding_script = self.context.get_funding_redeemscript(); - let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); + let keys = self.context.build_holder_transaction_keys(); let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger); let commitment_txid = { @@ -3518,10 +3656,12 @@ impl Channel where }] }; - self.context.cur_holder_commitment_transaction_number -= 1; self.context.expecting_peer_commitment_signed = false; + self.context.advance_holder_per_commitment_point(logger); + // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call // build_commitment_no_status_check() next which will reset this to RAAFirst. + log_debug!(logger, "setting resend_order to CommitmentFirst"); self.context.resend_order = RAACommitmentOrder::CommitmentFirst; if self.context.channel_state.is_monitor_update_in_progress() { @@ -4016,7 +4156,7 @@ impl Channel where // Before proposing a feerate update, check that we can actually afford the new fee. let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw)); let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw)); - let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); + let keys = self.context.build_holder_transaction_keys(); let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger); let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000; let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat; @@ -4069,6 +4209,22 @@ impl Channel where return Err(()) } + // If we have requests pending to the signer, then clear out any of them that will be recomputed + // on `channel_reestablish`. This ensures we don't send them twice if the signer unblocks before + // we receive the counterparty's reestablish message. + if self.context.signer_pending_channel_ready { + log_trace!(logger, "Clearing signer_pending_channel_ready"); + self.context.signer_pending_channel_ready = false; + } + if self.context.signer_pending_revoke_and_ack { + log_trace!(logger, "Clearing signer_pending_revoke_and_ack"); + self.context.signer_pending_revoke_and_ack = false; + } + if self.context.signer_pending_commitment_update { + log_trace!(logger, "Clearing signer_pending_commitment_update"); + self.context.signer_pending_commitment_update = false; + } + if self.context.channel_state.is_peer_disconnected() { // While the below code should be idempotent, it's simpler to just return early, as // redundant disconnect events can fire, though they should be rare. @@ -4201,12 +4357,7 @@ impl Channel where assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0), "Funding transaction broadcast by the local client before it should have - LDK didn't do it!"); self.context.monitor_pending_channel_ready = false; - let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); - Some(msgs::ChannelReady { - channel_id: self.context.channel_id(), - next_per_commitment_point, - short_channel_id_alias: Some(self.context.outbound_scid_alias), - }) + Some(self.get_channel_ready()) } else { None }; let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger); @@ -4228,7 +4379,12 @@ impl Channel where } let raa = if self.context.monitor_pending_revoke_and_ack { - Some(self.get_last_revoke_and_ack()) + self.context.update_holder_commitment_secret(logger); + self.get_last_revoke_and_ack(logger).or_else(|| { + log_trace!(logger, "Monitor was pending RAA, but RAA is not available; setting signer_pending_revoke_and_ack"); + self.context.signer_pending_revoke_and_ack = true; + None + }) } else { None }; let commitment_update = if self.context.monitor_pending_commitment_signed { self.get_last_commitment_update_for_send(logger).ok() @@ -4237,13 +4393,61 @@ impl Channel where self.mark_awaiting_response(); } + if self.context.monitor_pending_commitment_signed && commitment_update.is_none() { + log_trace!(logger, "Monitor was pending_commitment_signed with no commitment update available; setting signer_pending_commitment_update"); + self.context.signer_pending_commitment_update = true; + } else { + // If the signer was pending a commitment update, but we happened to get one just now because + // the monitor retrieved it, then we can mark the signer as "not pending anymore". + if self.context.signer_pending_commitment_update && commitment_update.is_some() { + log_trace!(logger, "Signer was pending commitment update, monitor retrieved it: clearing signer_pending_commitment_update"); + self.context.signer_pending_commitment_update = false; + } + } + if self.context.monitor_pending_revoke_and_ack && raa.is_none() { + log_trace!(logger, "Monitor was pending_revoke_and_ack with no RAA available; setting signer_pending_revoke_and_ack"); + self.context.signer_pending_revoke_and_ack = true; + } else { + // If the signer was pending a RAA, but we happened to get one just now because the monitor + // retrieved it, then we can mark the signer as "not pending anymore". + if self.context.signer_pending_revoke_and_ack && raa.is_some() { + log_trace!(logger, "Signer was pending RAA, monitor retrived it: clearing signer_pending_revoke_and_ack"); + self.context.signer_pending_revoke_and_ack = false; + } + } + self.context.monitor_pending_revoke_and_ack = false; self.context.monitor_pending_commitment_signed = false; + + // Enforce the ordering between commitment update and revoke-and-ack: with an asynchronous + // signer, they may have become available in an order that violates the ordering that our + // counterparty expects. If that happens, suppress the out-of-order message and mark it as + // pending. When the signer becomes unblocked we can provide the messages in the proper order. let order = self.context.resend_order.clone(); - log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first", + let (commitment_update, raa) = match order { + RAACommitmentOrder::CommitmentFirst => { + if self.context.signer_pending_commitment_update && raa.is_some() { + log_trace!(logger, "RAA is available, but we can't deliver it because ordering requires CommitmentFirst; setting signer_pending_revoke_and_ack = true"); + self.context.signer_pending_revoke_and_ack = true; + } + (commitment_update, if !self.context.signer_pending_commitment_update { raa } else { None }) + } + + RAACommitmentOrder::RevokeAndACKFirst => { + if self.context.signer_pending_revoke_and_ack && commitment_update.is_some() { + log_trace!(logger, "commitment_update is available, but we can't deliver it because ordering requires RevokeAndACKFirst; setting signer_pending_commitment_update = true"); + self.context.signer_pending_commitment_update = true; + } + (if !self.context.signer_pending_revoke_and_ack { commitment_update } else { None }, raa) + } + }; + + log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA{}", &self.context.channel_id(), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" }, if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" }, - match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"}); + if commitment_update.is_some() && raa.is_some() { + match order { RAACommitmentOrder::CommitmentFirst => ", with commitment first", RAACommitmentOrder::RevokeAndACKFirst => ", with RAA first"} + } else { "" }); MonitorRestoreUpdates { raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs } @@ -4285,37 +4489,138 @@ impl Channel where /// blocked. #[cfg(async_signing)] pub fn signer_maybe_unblocked(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger { - let commitment_update = if self.context.signer_pending_commitment_update { - self.get_last_commitment_update_for_send(logger).ok() - } else { None }; + log_trace!(logger, "Signing unblocked in channel {} at sequence {}", + &self.context.channel_id(), self.context.cur_holder_commitment_transaction_number); + + if self.context.signer_pending_commitment_point { + log_trace!(logger, "Attempting to update holder per-commitment point..."); + self.context.request_next_holder_per_commitment_point(logger); + } + + if self.context.signer_pending_released_secret { + log_trace!(logger, "Attempting to update holder commitment secret..."); + self.context.update_holder_commitment_secret(logger); + } + let funding_signed = if self.context.signer_pending_funding && !self.context.is_outbound() { + log_trace!(logger, "Attempting to generate pending funding signed..."); self.context.get_funding_signed_msg(logger).1 } else { None }; - let channel_ready = if funding_signed.is_some() { - self.check_get_channel_ready(0) + + // Provide a `channel_ready` message if we need to, but only if we're _not_ still pending + // funding. + let channel_ready = if self.context.signer_pending_channel_ready && !self.context.signer_pending_funding { + log_trace!(logger, "Generating pending channel_ready..."); + self.context.signer_pending_channel_ready = false; + Some(self.get_channel_ready()) } else { None }; - log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed and {} channel_ready", + // If we're pending a channel_ready to the counterparty, then we can't be sending commitment + // updates. If we're not, then make sure that we honor any ordering requirements between the + // commitment update and revoke-and-ack. + let (commitment_update, raa) = match &self.context.resend_order { + RAACommitmentOrder::CommitmentFirst => { + let cu = if self.context.signer_pending_commitment_update { + log_trace!(logger, "Attempting to generate pending commitment update..."); + self.get_last_commitment_update_for_send(logger).map(|cu| { + log_trace!(logger, "Generated commitment update; clearing signer_pending_commitment_update"); + self.context.signer_pending_commitment_update = false; + cu + }).ok() + } else { None }; + + let raa = if self.context.signer_pending_revoke_and_ack && !self.context.signer_pending_commitment_update { + log_trace!(logger, "Attempting to generate pending RAA..."); + self.get_last_revoke_and_ack(logger).map(|raa| { + log_trace!(logger, "Generated RAA; clearing signer_pending_revoke_and_ack"); + self.context.signer_pending_revoke_and_ack = false; + raa + }) + } else { None }; + + (cu, raa) + } + + RAACommitmentOrder::RevokeAndACKFirst => { + let raa = if self.context.signer_pending_revoke_and_ack { + log_trace!(logger, "Attempting to generate pending RAA..."); + self.get_last_revoke_and_ack(logger).map(|raa| { + log_trace!(logger, "Generated RAA; clearing signer_pending_revoke_and_ack"); + self.context.signer_pending_revoke_and_ack = false; + raa + }) + } else { None }; + + let cu = if self.context.signer_pending_commitment_update && !self.context.signer_pending_revoke_and_ack { + log_trace!(logger, "Attempting to generate pending commitment update..."); + self.get_last_commitment_update_for_send(logger).map(|cu| { + log_trace!(logger, "Generated commitment update; clearing signer_pending_commitment_update"); + self.context.signer_pending_commitment_update = false; + cu + }).ok() + } else { None }; + + (cu, raa) + } + }; + + let order = self.context.resend_order.clone(); + + log_debug!(logger, "Signing unblocked in channel {} at sequence {} resulted in {} commitment update, {} RAA{}, {} funding_signed, {} channel_ready", + &self.context.channel_id(), self.context.cur_holder_commitment_transaction_number, if commitment_update.is_some() { "a" } else { "no" }, + if raa.is_some() { "an" } else { "no" }, + if commitment_update.is_some() && raa.is_some() { + if order == RAACommitmentOrder::CommitmentFirst { " (commitment first)" } else { " (RAA first)" } + } else { "" }, if funding_signed.is_some() { "a" } else { "no" }, if channel_ready.is_some() { "a" } else { "no" }); SignerResumeUpdates { commitment_update, + raa, + order, funding_signed, channel_ready, } } - fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK { - let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); - let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2); - msgs::RevokeAndACK { - channel_id: self.context.channel_id, - per_commitment_secret, - next_per_commitment_point, - #[cfg(taproot)] - next_local_nonce: None, + fn get_last_revoke_and_ack(&self, logger: &L) -> Option where L::Target: Logger { + assert!(self.context.cur_holder_commitment_transaction_number <= INITIAL_COMMITMENT_NUMBER + 2); + match (self.context.next_holder_commitment_point, self.context.prev_holder_commitment_secret) { + (Some(_), Some(per_commitment_secret)) => { + log_debug!(logger, "Regenerated last revoke-and-ack in channel {} for next per-commitment point sequence number {}, releasing secret for {}", + &self.context.channel_id(), self.context.cur_holder_commitment_transaction_number, + self.context.cur_holder_commitment_transaction_number + 2); + + Some(msgs::RevokeAndACK { + channel_id: self.context.channel_id, + per_commitment_secret, + next_per_commitment_point: self.context.cur_holder_commitment_point, + #[cfg(taproot)] + next_local_nonce: None, + }) + }, + + (Some(_), None) => { + log_debug!(logger, "Last revoke-and-ack pending in channel {} for sequence {} because the secret for {} is not available", + &self.context.channel_id(), self.context.cur_holder_commitment_transaction_number, + self.context.cur_holder_commitment_transaction_number + 2); + None + }, + + (None, Some(_)) => { + log_debug!(logger, "Last revoke-and-ack pending in channel {} for sequence {} because the next per-commitment point is not available", + &self.context.channel_id(), self.context.cur_holder_commitment_transaction_number); + None + }, + + (None, None) => { + log_debug!(logger, "Last revoke-and-ack pending in channel {} for sequence {} because neither the next per-commitment point nor the secret for {} is available", + &self.context.channel_id(), self.context.cur_holder_commitment_transaction_number, + self.context.cur_holder_commitment_transaction_number + 2); + None + }, } } @@ -4398,6 +4703,9 @@ impl Channel where return Err(()); } }; + log_debug!(logger, "Regenerated latest commitment update in channel {} at {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds", + &self.context.channel_id(), self.context.cur_holder_commitment_transaction_number, if update_fee.is_some() { " update_fee," } else { "" }, + update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len()); Ok(msgs::CommitmentUpdate { update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee, commitment_signed, @@ -4444,11 +4752,19 @@ impl Channel where let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1; if msg.next_remote_commitment_number > 0 { - let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx); - let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret) - .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?; - if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) { - return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned())); + // TODO(waterson): figure out how to do this verification when an async signer is provided + // with a (more or less) arbitrary state index. Should we require that an async signer cache + // old points? Or should we make it so that we can restart the re-establish after the signer + // becomes unblocked? Or something else? + if false { + let state_index = INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1; + let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(state_index, &self.context.secp_ctx) + .map_err(|_| ChannelError::Close(format!("Unable to retrieve per-commitment point for state {}", state_index)))?; + let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret) + .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?; + if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) { + return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned())); + } } if msg.next_remote_commitment_number > our_commitment_transaction { macro_rules! log_and_panic { @@ -4504,29 +4820,40 @@ impl Channel where } // We have OurChannelReady set! - let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); return Ok(ReestablishResponses { - channel_ready: Some(msgs::ChannelReady { - channel_id: self.context.channel_id(), - next_per_commitment_point, - short_channel_id_alias: Some(self.context.outbound_scid_alias), - }), + channel_ready: Some(self.get_channel_ready()), raa: None, commitment_update: None, order: RAACommitmentOrder::CommitmentFirst, shutdown_msg, announcement_sigs, }); } - let required_revoke = if msg.next_remote_commitment_number == our_commitment_transaction { + // If they think we're behind by one state, then we owe them an RAA. We may or may not have that + // RAA handy depending on the status of the remote signer and the monitor. + let steps_behind = (INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number) - (msg.next_remote_commitment_number + 1); + let raa = if steps_behind == 0 { // Remote isn't waiting on any RevokeAndACK from us! // Note that if we need to repeat our ChannelReady we'll do that in the next if block. None - } else if msg.next_remote_commitment_number + 1 == our_commitment_transaction { + } else if steps_behind == 1 { // msg.next_remote_commitment_number + 1 == (INITIAL_COMMITMENT_NUMBER - 1) - self.context.cur_holder_commitment_transaction_number { if self.context.channel_state.is_monitor_update_in_progress() { self.context.monitor_pending_revoke_and_ack = true; None } else { - Some(self.get_last_revoke_and_ack()) + self.context.update_holder_commitment_secret(logger); + self.get_last_revoke_and_ack(logger).map(|raa| { + if self.context.signer_pending_revoke_and_ack { + log_trace!(logger, "Generated RAA for channel_reestablish; clearing signer_pending_revoke_and_ack"); + self.context.signer_pending_revoke_and_ack = false; + } + raa + }).or_else(|| { + if !self.context.signer_pending_revoke_and_ack { + log_trace!(logger, "Unable to generate RAA for channel_reestablish; setting signer_pending_revoke_and_ack"); + self.context.signer_pending_revoke_and_ack = true; + } + None + }) } } else { debug_assert!(false, "All values should have been handled in the four cases above"); @@ -4549,29 +4876,24 @@ impl Channel where let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 { // We should never have to worry about MonitorUpdateInProgress resending ChannelReady - let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); - Some(msgs::ChannelReady { - channel_id: self.context.channel_id(), - next_per_commitment_point, - short_channel_id_alias: Some(self.context.outbound_scid_alias), - }) + log_debug!(logger, "Reconnecting channel at state 1, re-sending channel_ready"); + Some(self.get_channel_ready()) } else { None }; if msg.next_local_commitment_number == next_counterparty_commitment_number { - if required_revoke.is_some() { + if raa.is_some() || self.context.signer_pending_revoke_and_ack { log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id()); } else { log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id()); } Ok(ReestablishResponses { - channel_ready, shutdown_msg, announcement_sigs, - raa: required_revoke, + channel_ready, shutdown_msg, announcement_sigs, raa, commitment_update: None, order: self.context.resend_order.clone(), }) } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 { - if required_revoke.is_some() { + if raa.is_some() || self.context.signer_pending_revoke_and_ack { log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id()); } else { log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id()); @@ -4585,10 +4907,16 @@ impl Channel where order: self.context.resend_order.clone(), }) } else { + // Get the commitment update. This may fail if we're pending the signer. + let commitment_update = if raa.is_some() || steps_behind == 0 { + self.get_last_commitment_update_for_send(logger).ok() + } else { None }; + self.context.signer_pending_commitment_update = commitment_update.is_none(); + Ok(ReestablishResponses { channel_ready, shutdown_msg, announcement_sigs, - raa: required_revoke, - commitment_update: self.get_last_commitment_update_for_send(logger).ok(), + raa, + commitment_update, order: self.context.resend_order.clone(), }) } @@ -5252,11 +5580,14 @@ impl Channel where self.context.channel_update_status = status; } - fn check_get_channel_ready(&mut self, height: u32) -> Option { + fn check_get_channel_ready(&mut self, height: u32, logger: &L) -> Option + where L::Target: Logger + { // Called: // * always when a new block/transactions are confirmed with the new height // * when funding is signed with a height of 0 if self.context.funding_tx_confirmation_height == 0 && self.context.minimum_depth != Some(0) { + log_debug!(logger, "Can't produce channel_ready: no confirmation height yet on a channel with minimum_depth = {:?}", self.context.minimum_depth); return None; } @@ -5266,17 +5597,14 @@ impl Channel where } if funding_tx_confirmations < self.context.minimum_depth.unwrap_or(0) as i64 { - return None; - } - - // If we're still pending the signature on a funding transaction, then we're not ready to send a - // channel_ready yet. - if self.context.signer_pending_funding { + log_debug!(logger, "Can't produce channel_ready: {} confirmations yet on a channel with minimum_depth = {:?}", funding_tx_confirmations, self.context.minimum_depth); return None; } // Note that we don't include ChannelState::WaitingForBatch as we don't want to send // channel_ready until the entire batch is ready. + log_debug!(logger, "self.context.channel_state = {:?}", self.context.channel_state); + log_debug!(logger, "!FundedStateFlags::ALL = {:?}", !FundedStateFlags::ALL); let need_commitment_update = if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if (f & !FundedStateFlags::ALL).is_empty()) { self.context.channel_state.set_our_channel_ready(); true @@ -5304,22 +5632,39 @@ impl Channel where false }; - if need_commitment_update { - if !self.context.channel_state.is_monitor_update_in_progress() { - if !self.context.channel_state.is_peer_disconnected() { - let next_per_commitment_point = - self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx); - return Some(msgs::ChannelReady { - channel_id: self.context.channel_id, - next_per_commitment_point, - short_channel_id_alias: Some(self.context.outbound_scid_alias), - }); - } - } else { - self.context.monitor_pending_channel_ready = true; - } + if !need_commitment_update { + log_debug!(logger, "Not producing channel_ready: we do not need a commitment update"); + return None; + } + + // If we're still pending the signature on a funding transaction, then we're not ready to send a + // channel_ready yet. + if self.context.signer_pending_funding { + log_debug!(logger, "Can't produce channel_ready: the signer is pending funding. Setting signer_pending_channel_ready."); + self.context.signer_pending_channel_ready = true; + return None; + } + + if self.context.channel_state.is_monitor_update_in_progress() { + log_debug!(logger, "Not producing channel_ready: a monitor update is in progress. Setting monitor_pending_channel_ready."); + self.context.monitor_pending_channel_ready = true; + return None; + } + + if self.context.channel_state.is_peer_disconnected() { + log_debug!(logger, "Not producing channel_ready: the peer is disconnected."); + return None; + } + + Some(self.get_channel_ready()) + } + + fn get_channel_ready(&self) -> msgs::ChannelReady { + msgs::ChannelReady { + channel_id: self.context.channel_id(), + next_per_commitment_point: self.context.cur_holder_commitment_point, + short_channel_id_alias: Some(self.context.outbound_scid_alias), } - None } /// When a transaction is confirmed, we check whether it is or spends the funding transaction @@ -5386,7 +5731,7 @@ impl Channel where // If we allow 1-conf funding, we may need to check for channel_ready here and // send it immediately instead of waiting for a best_block_updated call (which // may have already happened for this block). - if let Some(channel_ready) = self.check_get_channel_ready(height) { + if let Some(channel_ready) = self.check_get_channel_ready(height, logger) { log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id); let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger); msgs = (Some(channel_ready), announcement_sigs); @@ -5452,7 +5797,7 @@ impl Channel where self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time); - if let Some(channel_ready) = self.check_get_channel_ready(height) { + if let Some(channel_ready) = self.check_get_channel_ready(height, logger) { let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer { self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger) } else { None }; @@ -5941,6 +6286,7 @@ impl Channel where self.context.pending_update_fee = None; } } + log_debug!(logger, "setting resend_order to RevokeAndACKFirst"); self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst; let (mut htlcs_ref, counterparty_commitment_tx) = @@ -6280,6 +6626,16 @@ impl OutboundV1Channel where SP::Target: SignerProvider { let temporary_channel_id = temporary_channel_id.unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source)); + // TODO(waterson): this assumes that the signer is available and will respond synchronously, two + // constraints we'd ideally like to remove. + let cur_holder_commitment_point = holder_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER, &secp_ctx) + .map_err(|_| APIError::ChannelUnavailable { err: "Failed to get initial per-commitment point".to_owned() })?; + + let next_holder_commitment_point = Some( + holder_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx) + .map_err(|_| APIError::ChannelUnavailable { err: "Failed to get second per-commitment point".to_owned() })? + ); + Ok(Self { context: ChannelContext { user_id, @@ -6308,6 +6664,9 @@ impl OutboundV1Channel where SP::Target: SignerProvider { destination_script, cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, + cur_holder_commitment_point, + next_holder_commitment_point, + prev_holder_commitment_secret: None, cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, value_to_self_msat, @@ -6330,7 +6689,11 @@ impl OutboundV1Channel where SP::Target: SignerProvider { monitor_pending_finalized_fulfills: Vec::new(), signer_pending_commitment_update: false, + signer_pending_revoke_and_ack: false, signer_pending_funding: false, + signer_pending_channel_ready: false, + signer_pending_commitment_point: false, + signer_pending_released_secret: false, #[cfg(debug_assertions)] holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)), @@ -6582,7 +6945,6 @@ impl OutboundV1Channel where SP::Target: SignerProvider { panic!("Tried to send an open_channel for a channel that has already advanced"); } - let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); let keys = self.context.get_holder_pubkeys(); msgs::OpenChannel { @@ -6602,7 +6964,7 @@ impl OutboundV1Channel where SP::Target: SignerProvider { payment_point: keys.payment_point, delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(), htlc_basepoint: keys.htlc_basepoint.to_public_key(), - first_per_commitment_point, + first_per_commitment_point: self.context.cur_holder_commitment_point, channel_flags: if self.context.config.announced_channel {1} else {0}, shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey { Some(script) => script.clone().into_inner(), @@ -6775,7 +7137,7 @@ impl OutboundV1Channel where SP::Target: SignerProvider { log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}", &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction)); - let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); + let holder_signer = self.context.build_holder_transaction_keys(); let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx; { let trusted_tx = initial_commitment_tx.trust(); @@ -6829,14 +7191,15 @@ impl OutboundV1Channel where SP::Target: SignerProvider { } else { self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()); } - self.context.cur_holder_commitment_transaction_number -= 1; + self.context.advance_holder_per_commitment_point(logger); self.context.cur_counterparty_commitment_transaction_number -= 1; log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id()); let mut channel = Channel { context: self.context }; - let need_channel_ready = channel.check_get_channel_ready(0).is_some(); + let need_channel_ready = channel.check_get_channel_ready(0, logger).is_some(); + log_trace!(logger, "funding_signed {} channel_ready", if need_channel_ready { "needs" } else { "does not need" }); channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new()); Ok((channel, channel_monitor)) } @@ -7076,6 +7439,16 @@ impl InboundV1Channel where SP::Target: SignerProvider { Some(cmp::max(config.channel_handshake_config.minimum_depth, 1)) }; + // TODO(waterson): this assumes that the signer is available and will respond synchronously, two + // constraints we'd ideally like to remove. + let cur_holder_commitment_point = holder_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER, &secp_ctx) + .map_err(|_| ChannelError::Close("Failed to get initial per-commitment point".to_owned()))?; + + let next_holder_commitment_point = Some( + holder_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx) + .map_err(|_| ChannelError::Close("Failed to get second per-commitment point".to_owned()))? + ); + let chan = Self { context: ChannelContext { user_id, @@ -7105,6 +7478,9 @@ impl InboundV1Channel where SP::Target: SignerProvider { destination_script, cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, + cur_holder_commitment_point, + next_holder_commitment_point, + prev_holder_commitment_secret: None, cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, value_to_self_msat: msg.push_msat, @@ -7127,7 +7503,11 @@ impl InboundV1Channel where SP::Target: SignerProvider { monitor_pending_finalized_fulfills: Vec::new(), signer_pending_commitment_update: false, + signer_pending_revoke_and_ack: false, signer_pending_funding: false, + signer_pending_channel_ready: false, + signer_pending_commitment_point: false, + signer_pending_released_secret: false, #[cfg(debug_assertions)] holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)), @@ -7243,9 +7623,7 @@ impl InboundV1Channel where SP::Target: SignerProvider { /// /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel fn generate_accept_channel_message(&self) -> msgs::AcceptChannel { - let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); let keys = self.context.get_holder_pubkeys(); - msgs::AcceptChannel { temporary_channel_id: self.context.channel_id, dust_limit_satoshis: self.context.holder_dust_limit_satoshis, @@ -7260,7 +7638,7 @@ impl InboundV1Channel where SP::Target: SignerProvider { payment_point: keys.payment_point, delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(), htlc_basepoint: keys.htlc_basepoint.to_public_key(), - first_per_commitment_point, + first_per_commitment_point: self.context.cur_holder_commitment_point, shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey { Some(script) => script.clone().into_inner(), None => Builder::new().into_script(), @@ -7283,7 +7661,7 @@ impl InboundV1Channel where SP::Target: SignerProvider { fn check_funding_created_signature(&mut self, sig: &Signature, logger: &L) -> Result where L::Target: Logger { let funding_script = self.context.get_funding_redeemscript(); - let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); + let keys = self.context.build_holder_transaction_keys(); let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx; let trusted_tx = initial_commitment_tx.trust(); let initial_commitment_bitcoin_tx = trusted_tx.built_transaction(); @@ -7358,7 +7736,7 @@ impl InboundV1Channel where SP::Target: SignerProvider { self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()); self.context.channel_id = funding_txo.to_channel_id(); self.context.cur_counterparty_commitment_transaction_number -= 1; - self.context.cur_holder_commitment_transaction_number -= 1; + self.context.advance_holder_per_commitment_point(logger); let (counterparty_initial_commitment_tx, funding_signed) = self.context.get_funding_signed_msg(logger); @@ -7390,7 +7768,9 @@ impl InboundV1Channel where SP::Target: SignerProvider { let mut channel = Channel { context: self.context, }; - let need_channel_ready = channel.check_get_channel_ready(0).is_some(); + + let need_channel_ready = channel.check_get_channel_ready(0, logger).is_some(); + log_trace!(logger, "funding_created {} channel_ready", if need_channel_ready { "needs" } else { "does not need" }); channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new()); Ok((channel, funding_signed, channel_monitor)) @@ -7780,6 +8160,8 @@ impl Writeable for Channel where SP::Target: SignerProvider { (39, pending_outbound_blinding_points, optional_vec), (41, holding_cell_blinding_points, optional_vec), (43, malformed_htlcs, optional_vec), // Added in 0.0.119 + (45, self.context.cur_holder_commitment_point, required), + (47, self.context.next_holder_commitment_point, option), }); Ok(()) @@ -8071,6 +8453,8 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch let mut holding_cell_blinding_points_opt: Option>> = None; let mut malformed_htlcs: Option> = None; + let mut cur_holder_commitment_point_opt: Option = None; + let mut next_holder_commitment_point: Option = None; read_tlv_fields!(reader, { (0, announcement_sigs, option), @@ -8101,8 +8485,13 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch (39, pending_outbound_blinding_points_opt, optional_vec), (41, holding_cell_blinding_points_opt, optional_vec), (43, malformed_htlcs, optional_vec), // Added in 0.0.119 + (45, cur_holder_commitment_point_opt, option), + (47, next_holder_commitment_point, option), }); + let mut secp_ctx = Secp256k1::new(); + secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes()); + let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id { let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id); // If we've gotten to the funding stage of the channel, populate the signer with its @@ -8118,6 +8507,17 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch (holder_signer.channel_keys_id(), holder_signer) }; + // If we're restoring this channel for the first time after an upgrade, then we require that the + // signer be available so that we can immediately populate the current commitment point. Channel + // restoration will fail if this is not possible. + let cur_holder_commitment_point = match cur_holder_commitment_point_opt { + Some(point) => point, + None => { + holder_signer.get_per_commitment_point(cur_holder_commitment_transaction_number, &secp_ctx) + .map_err(|_| DecodeError::Io(io::ErrorKind::NotFound))? + } + }; + if let Some(preimages) = preimages_opt { let mut iter = preimages.into_iter(); for htlc in pending_outbound_htlcs.iter_mut() { @@ -8148,9 +8548,6 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch // To account for that, we're proactively setting/overriding the field here. channel_parameters.channel_type_features = chan_features.clone(); - let mut secp_ctx = Secp256k1::new(); - secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes()); - // `user_id` used to be a single u64 value. In order to remain backwards // compatible with versions prior to 0.0.113, the u128 is serialized as two // separate u64 values. @@ -8237,6 +8634,9 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch destination_script, cur_holder_commitment_transaction_number, + cur_holder_commitment_point, + next_holder_commitment_point, + prev_holder_commitment_secret: None, cur_counterparty_commitment_transaction_number, value_to_self_msat, @@ -8255,7 +8655,11 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(), signer_pending_commitment_update: false, + signer_pending_revoke_and_ack: false, signer_pending_funding: false, + signer_pending_channel_ready: false, + signer_pending_commitment_point: true, + signer_pending_released_secret: true, pending_update_fee, holding_cell_update_fee, @@ -9070,6 +9474,7 @@ mod tests { let delayed_payment_base = &chan.context.holder_signer.as_ref().pubkeys().delayed_payment_basepoint; let per_commitment_secret = SecretKey::from_slice(&>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap(); let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret); + chan.context.cur_holder_commitment_point = per_commitment_point; let htlc_basepoint = &chan.context.holder_signer.as_ref().pubkeys().htlc_basepoint; let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint); @@ -10076,6 +10481,6 @@ mod tests { // Clear the ChannelState::WaitingForBatch only when called by ChannelManager. node_a_chan.set_batch_ready(); assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY)); - assert!(node_a_chan.check_get_channel_ready(0).is_some()); + assert!(node_a_chan.check_get_channel_ready(0, &&logger).is_some()); } } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index ec3e8ef604b..994341cc02f 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -646,7 +646,7 @@ pub(super) const MIN_HTLC_RELAY_HOLDING_CELL_MILLIS: u64 = 100; /// be sent in the order they appear in the return value, however sometimes the order needs to be /// variable at runtime (eg Channel::channel_reestablish needs to re-send messages in the order /// they were originally sent). In those cases, this enum is also returned. -#[derive(Clone, PartialEq)] +#[derive(Clone, Debug, PartialEq)] pub(super) enum RAACommitmentOrder { /// Send the CommitmentUpdate messages first CommitmentFirst, @@ -5912,6 +5912,11 @@ where emit_channel_ready_event!(pending_events, channel); } + log_debug!(self.logger, "Outgoing message queue is{}", if pending_msg_events.is_empty() { " empty" } else { "..." }); + for msg in pending_msg_events.iter() { + log_debug!(self.logger, " {:?}", msg); + } + htlc_forwards } @@ -6310,6 +6315,12 @@ where let logger = WithChannelContext::from(&self.logger, &inbound_chan.context); match inbound_chan.funding_created(msg, best_block, &self.signer_provider, &&logger) { Ok(res) => res, + Err((inbound_chan, ChannelError::Ignore(_))) => { + // If we get an `Ignore` error then something transient went wrong. Put the channel + // back into the table and bail. + peer_state.channel_by_id.insert(msg.temporary_channel_id, ChannelPhase::UnfundedInboundV1(inbound_chan)); + return Ok(()); + }, Err((inbound_chan, err)) => { // We've already removed this inbound channel from the map in `PeerState` // above so at this point we just need to clean up any lingering entries @@ -6464,6 +6475,7 @@ where hash_map::Entry::Occupied(mut chan_phase_entry) => { if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { let logger = WithChannelContext::from(&self.logger, &chan.context); + log_debug!(logger, "<== channel_ready"); let announcement_sigs_opt = try_chan_phase_entry!(self, chan.channel_ready(&msg, &self.node_signer, self.chain_hash, &self.default_configuration, &self.best_block.read().unwrap(), &&logger), chan_phase_entry); if let Some(announcement_sigs) = announcement_sigs_opt { @@ -6699,6 +6711,7 @@ where } }; let logger = WithChannelContext::from(&self.logger, &chan.context); + log_debug!(logger, "<== update_add_htlc: htlc_id={} amount_msat={}", msg.htlc_id, msg.amount_msat); try_chan_phase_entry!(self, chan.update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.fee_estimator, &&logger), chan_phase_entry); } else { return try_chan_phase_entry!(self, Err(ChannelError::Close( @@ -6724,6 +6737,7 @@ where match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan_phase_entry) => { if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { + log_debug!(self.logger, "<== update_fulfill_htlc: htlc_id={}", msg.htlc_id); let res = try_chan_phase_entry!(self, chan.update_fulfill_htlc(&msg), chan_phase_entry); if let HTLCSource::PreviousHopData(prev_hop) = &res.0 { let logger = WithChannelContext::from(&self.logger, &chan.context); @@ -6822,6 +6836,7 @@ where if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { let logger = WithChannelContext::from(&self.logger, &chan.context); let funding_txo = chan.context.get_funding_txo(); + log_debug!(logger, "<== commitment_signed: {} htlcs", msg.htlc_signatures.len()); let monitor_update_opt = try_chan_phase_entry!(self, chan.commitment_signed(&msg, &&logger), chan_phase_entry); if let Some(monitor_update) = monitor_update_opt { handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock, @@ -6996,6 +7011,7 @@ where &peer_state.actions_blocking_raa_monitor_updates, funding_txo, *counterparty_node_id) } else { false }; + log_debug!(self.logger, "<== revoke_and_ack"); let (htlcs_to_fail, monitor_update_opt) = try_chan_phase_entry!(self, chan.revoke_and_ack(&msg, &self.fee_estimator, &&logger, mon_update_blocked), chan_phase_entry); if let Some(monitor_update) = monitor_update_opt { @@ -7146,6 +7162,7 @@ where let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan_phase_entry) => { + log_debug!(logger, "<== channel_reestablish"); if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { // Currently, we expect all holding cell update_adds to be dropped on peer // disconnect, so Channel's reestablish will never hand us any holding cell @@ -7376,14 +7393,12 @@ where let node_id = phase.context().get_counterparty_node_id(); match phase { ChannelPhase::Funded(chan) => { - let msgs = chan.signer_maybe_unblocked(&self.logger); - if let Some(updates) = msgs.commitment_update { - pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id, - updates, - }); - } + let logger = WithChannelContext::from(&self.logger, &chan.context); + let msgs = chan.signer_maybe_unblocked(&&logger); + + // Note that the order in which we enqueue the messages is significant! if let Some(msg) = msgs.funding_signed { + log_trace!(logger, "Queuing funding_signed to {}", node_id); pending_msg_events.push(events::MessageSendEvent::SendFundingSigned { node_id, msg, @@ -7392,6 +7407,29 @@ where if let Some(msg) = msgs.channel_ready { send_channel_ready!(self, pending_msg_events, chan, msg); } + match (msgs.commitment_update, msgs.raa) { + (Some(cu), Some(raa)) if msgs.order == RAACommitmentOrder::CommitmentFirst => { + log_trace!(logger, "Queuing update_htlcs to {}", node_id); + pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { node_id, updates: cu }); + log_trace!(logger, "Queuing revoke_and_ack to {}", node_id); + pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK { node_id, msg: raa }); + }, + (Some(cu), Some(raa)) if msgs.order == RAACommitmentOrder::RevokeAndACKFirst => { + log_trace!(logger, "Queuing revoke_and_ack to {}", node_id); + pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK { node_id, msg: raa }); + log_trace!(logger, "Queuing update_htlcs to {}", node_id); + pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { node_id, updates: cu }); + }, + (Some(cu), _) => { + log_trace!(logger, "Queuing update_htlcs to {}", node_id); + pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { node_id, updates: cu }) + }, + (_, Some(raa)) => { + log_trace!(logger, "Queuing revoke_and_ack to {}", node_id); + pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK { node_id, msg: raa }) + }, + (_, _) => (), + }; } ChannelPhase::UnfundedOutboundV1(chan) => { if let Some(msg) = chan.signer_maybe_unblocked(&self.logger) { @@ -10363,6 +10401,7 @@ where log_info!(logger, "Successfully loaded channel {} at update_id {} against monitor at update id {}", &channel.context.channel_id(), channel.context.get_latest_monitor_update_id(), monitor.get_latest_update_id()); + channel.context.request_next_holder_per_commitment_point(&args.logger); if let Some(short_channel_id) = channel.context.get_short_channel_id() { short_to_chan_info.insert(short_channel_id, (channel.context.get_counterparty_node_id(), channel.context.channel_id())); } diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 8df84000c26..3c862f63578 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -34,6 +34,8 @@ use crate::util::test_channel_signer::TestChannelSigner; use crate::util::test_utils; use crate::util::test_utils::{panicking, TestChainMonitor, TestScorer, TestKeysInterface}; use crate::util::ser::{ReadableArgs, Writeable}; +#[cfg(test)] +use crate::util::test_channel_signer::ops; use bitcoin::blockdata::block::{Block, Header, Version}; use bitcoin::blockdata::locktime::absolute::LockTime; @@ -479,14 +481,14 @@ impl<'a, 'b, 'c> Node<'a, 'b, 'c> { pub fn get_block_header(&self, height: u32) -> Header { self.blocks.lock().unwrap()[height as usize].0.header } + /// Changes the channel signer's availability for the specified peer and channel. /// /// When `available` is set to `true`, the channel signer will behave normally. When set to /// `false`, the channel signer will act like an off-line remote signer and will return `Err` for - /// several of the signing methods. Currently, only `get_per_commitment_point` and - /// `release_commitment_secret` are affected by this setting. + /// several of the signing methods. #[cfg(test)] - pub fn set_channel_signer_available(&self, peer_id: &PublicKey, chan_id: &ChannelId, available: bool) { + pub fn set_channel_signer_ops_available(&self, peer_id: &PublicKey, chan_id: &ChannelId, mask: u32, available: bool) { let per_peer_state = self.node.per_peer_state.read().unwrap(); let chan_lock = per_peer_state.get(peer_id).unwrap().lock().unwrap(); let signer = (|| { @@ -495,8 +497,21 @@ impl<'a, 'b, 'c> Node<'a, 'b, 'c> { None => panic!("Couldn't find a channel with id {}", chan_id), } })(); - log_debug!(self.logger, "Setting channel signer for {} as available={}", chan_id, available); - signer.as_ecdsa().unwrap().set_available(available); + log_debug!(self.logger, "Setting channel signer for {} as {}available for {} (mask={})", + chan_id, if available { "" } else { "un" }, ops::string_from(mask), mask); + signer.as_ecdsa().unwrap().set_ops_available(mask, available); + } + + /// Removes signature material from the channel context. + #[cfg(test)] + pub fn forget_signer_material(&mut self, peer_id: &PublicKey, chan_id: &ChannelId) { + let mut per_peer_state = self.node.per_peer_state.write().unwrap(); + let mut chan_lock = per_peer_state.get_mut(peer_id).unwrap().lock().unwrap(); + match chan_lock.channel_by_id.get_mut(chan_id) { + Some(ref mut phase) => phase.context_mut().forget_signer_material(), + None => panic!("Couldn't find a channel with id {}", chan_id), + } + log_debug!(self.logger, "Forgetting signing material for {}", chan_id); } } @@ -3231,7 +3246,7 @@ macro_rules! get_chan_reestablish_msgs { assert_eq!(*node_id, $dst_node.node.get_our_node_id()); announcements.insert(msg.contents.short_channel_id); } else { - panic!("Unexpected event") + panic!("Unexpected event re-establishing channel, {:?}", msg) } } assert!(announcements.is_empty()); @@ -3287,6 +3302,13 @@ macro_rules! handle_chan_reestablish_msgs { RAACommitmentOrder::CommitmentFirst }; + if let Some(&MessageSendEvent::SendChannelUpdate { ref node_id, .. }) = msg_events.get(idx) { + assert_eq!(*node_id, $dst_node.node.get_our_node_id()); + idx += 1; + assert!(!had_channel_update); + had_channel_update = true; + } + if let Some(ev) = msg_events.get(idx) { match ev { &MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { @@ -3437,7 +3459,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { } else { panic!("Unexpected event! {:?}", announcement_event[0]); } } } else { - assert!(chan_msgs.0.is_none()); + assert!(chan_msgs.0.is_none(), "did not expect to have a ChannelReady for node 1"); } if pending_raa.0 { assert!(chan_msgs.3 == RAACommitmentOrder::RevokeAndACKFirst); @@ -3445,7 +3467,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(node_a, 1); } else { - assert!(chan_msgs.1.is_none()); + assert!(chan_msgs.1.is_none(), "did not expect to have a RevokeAndACK for node 1"); } if pending_htlc_adds.0 != 0 || pending_htlc_claims.0 != 0 || pending_htlc_fails.0 != 0 || pending_cell_htlc_claims.0 != 0 || pending_cell_htlc_fails.0 != 0 || @@ -3478,7 +3500,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { check_added_monitors!(node_b, if pending_responding_commitment_signed_dup_monitor.0 { 0 } else { 1 }); } } else { - assert!(chan_msgs.2.is_none()); + assert!(chan_msgs.2.is_none(), "did not expect to have commitment updates for node 1"); } } @@ -3495,7 +3517,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { } } } else { - assert!(chan_msgs.0.is_none()); + assert!(chan_msgs.0.is_none(), "did not expect to have a ChannelReady for node 2"); } if pending_raa.1 { assert!(chan_msgs.3 == RAACommitmentOrder::RevokeAndACKFirst); @@ -3503,7 +3525,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(node_b, 1); } else { - assert!(chan_msgs.1.is_none()); + assert!(chan_msgs.1.is_none(), "did not expect to have a RevokeAndACK for node 2"); } if pending_htlc_adds.1 != 0 || pending_htlc_claims.1 != 0 || pending_htlc_fails.1 != 0 || pending_cell_htlc_claims.1 != 0 || pending_cell_htlc_fails.1 != 0 || @@ -3536,7 +3558,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { check_added_monitors!(node_a, if pending_responding_commitment_signed_dup_monitor.1 { 0 } else { 1 }); } } else { - assert!(chan_msgs.2.is_none()); + assert!(chan_msgs.2.is_none(), "did not expect to have commitment updates for node 2"); } } } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index be9bfb81f25..6802407d133 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -720,7 +720,7 @@ fn test_update_fee_that_funder_cannot_afford() { let chan_signer = remote_chan.get_signer(); let pubkeys = chan_signer.as_ref().pubkeys(); (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint, - chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx), + chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx).unwrap(), pubkeys.funding_pubkey) }; @@ -1442,8 +1442,8 @@ fn test_fee_spike_violation_fails_htlc() { let pubkeys = chan_signer.as_ref().pubkeys(); (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint, - chan_signer.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER), - chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 2, &secp_ctx), + chan_signer.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER).unwrap(), + chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 2, &secp_ctx).unwrap(), chan_signer.as_ref().pubkeys().funding_pubkey) }; let (remote_delayed_payment_basepoint, remote_htlc_basepoint, remote_point, remote_funding) = { @@ -1455,7 +1455,7 @@ fn test_fee_spike_violation_fails_htlc() { let chan_signer = remote_chan.get_signer(); let pubkeys = chan_signer.as_ref().pubkeys(); (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint, - chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx), + chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx).unwrap(), chan_signer.as_ref().pubkeys().funding_pubkey) }; @@ -7792,15 +7792,15 @@ fn test_counterparty_raa_skip_no_crash() { // Make signer believe we got a counterparty signature, so that it allows the revocation keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1; - per_commitment_secret = keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER); + per_commitment_secret = keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER).unwrap(); // Must revoke without gaps keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1; - keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 1); + keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("unable to release commitment secret"); keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1; next_per_commitment_point = PublicKey::from_secret_key(&Secp256k1::new(), - &SecretKey::from_slice(&keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 2)).unwrap()); + &SecretKey::from_slice(&keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 2).unwrap()).unwrap()); } nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), diff --git a/lightning/src/sign/mod.rs b/lightning/src/sign/mod.rs index e2a89d8485f..d4c29848c55 100644 --- a/lightning/src/sign/mod.rs +++ b/lightning/src/sign/mod.rs @@ -574,7 +574,10 @@ pub trait ChannelSigner { /// Gets the per-commitment point for a specific commitment number /// /// Note that the commitment number starts at `(1 << 48) - 1` and counts backwards. - fn get_per_commitment_point(&self, idx: u64, secp_ctx: &Secp256k1) -> PublicKey; + /// + /// If the signer returns `Err`, then the user is responsible for either force-closing the channel + /// or retrying once the signature is ready. + fn get_per_commitment_point(&self, idx: u64, secp_ctx: &Secp256k1) -> Result; /// Gets the commitment secret for a specific commitment number as part of the revocation process /// @@ -585,7 +588,7 @@ pub trait ChannelSigner { /// /// Note that the commitment number starts at `(1 << 48) - 1` and counts backwards. // TODO: return a Result so we can signal a validation error - fn release_commitment_secret(&self, idx: u64) -> [u8; 32]; + fn release_commitment_secret(&self, idx: u64) -> Result<[u8; 32], ()>; /// Validate the counterparty's signatures on the holder commitment transaction and HTLCs. /// @@ -1069,13 +1072,13 @@ impl EntropySource for InMemorySigner { } impl ChannelSigner for InMemorySigner { - fn get_per_commitment_point(&self, idx: u64, secp_ctx: &Secp256k1) -> PublicKey { + fn get_per_commitment_point(&self, idx: u64, secp_ctx: &Secp256k1) -> Result { let commitment_secret = SecretKey::from_slice(&chan_utils::build_commitment_secret(&self.commitment_seed, idx)).unwrap(); - PublicKey::from_secret_key(secp_ctx, &commitment_secret) + Ok(PublicKey::from_secret_key(secp_ctx, &commitment_secret)) } - fn release_commitment_secret(&self, idx: u64) -> [u8; 32] { - chan_utils::build_commitment_secret(&self.commitment_seed, idx) + fn release_commitment_secret(&self, idx: u64) -> Result<[u8; 32], ()> { + Ok(chan_utils::build_commitment_secret(&self.commitment_seed, idx)) } fn validate_holder_commitment(&self, _holder_tx: &HolderCommitmentTransaction, _outbound_htlc_preimages: Vec) -> Result<(), ()> { diff --git a/lightning/src/util/test_channel_signer.rs b/lightning/src/util/test_channel_signer.rs index a2cbf78b700..e64d67c00a8 100644 --- a/lightning/src/util/test_channel_signer.rs +++ b/lightning/src/util/test_channel_signer.rs @@ -68,9 +68,51 @@ pub struct TestChannelSigner { /// Channel state used for policy enforcement pub state: Arc>, pub disable_revocation_policy_check: bool, - /// When `true` (the default), the signer will respond immediately with signatures. When `false`, - /// the signer will return an error indicating that it is unavailable. - pub available: Arc>, +} + +/// Channel signer operations that can be individually enabled and disabled. If a particular value +/// is set in the `TestChannelSigner::unavailable` bitmask, then that operation will return an +/// error. +pub mod ops { + pub const GET_PER_COMMITMENT_POINT: u32 = 1 << 0; + pub const RELEASE_COMMITMENT_SECRET: u32 = 1 << 1; + pub const VALIDATE_HOLDER_COMMITMENT: u32 = 1 << 2; + pub const SIGN_COUNTERPARTY_COMMITMENT: u32 = 1 << 3; + pub const VALIDATE_COUNTERPARTY_REVOCATION: u32 = 1 << 4; + pub const SIGN_HOLDER_COMMITMENT_AND_HTLCS: u32 = 1 << 5; + pub const SIGN_JUSTICE_REVOKED_OUTPUT: u32 = 1 << 6; + pub const SIGN_JUSTICE_REVOKED_HTLC: u32 = 1 << 7; + pub const SIGN_HOLDER_HTLC_TRANSACTION: u32 = 1 << 8; + pub const SIGN_COUNTERPARTY_HTLC_TRANSATION: u32 = 1 << 9; + pub const SIGN_CLOSING_TRANSACTION: u32 = 1 << 10; + pub const SIGN_HOLDER_ANCHOR_INPUT: u32 = 1 << 11; + pub const SIGN_CHANNEL_ANNOUNCMENT_WITH_FUNDING_KEY: u32 = 1 << 12; + + #[cfg(test)] + pub fn string_from(mask: u32) -> String { + if mask == 0 { + return "nothing".to_owned(); + } + if mask == !(0 as u32) { + return "everything".to_owned(); + } + + vec![ + if (mask & GET_PER_COMMITMENT_POINT) != 0 { Some("get_per_commitment_point") } else { None }, + if (mask & RELEASE_COMMITMENT_SECRET) != 0 { Some("release_commitment_secret") } else { None }, + if (mask & VALIDATE_HOLDER_COMMITMENT) != 0 { Some("validate_holder_commitment") } else { None }, + if (mask & SIGN_COUNTERPARTY_COMMITMENT) != 0 { Some("sign_counterparty_commitment") } else { None }, + if (mask & VALIDATE_COUNTERPARTY_REVOCATION) != 0 { Some("validate_counterparty_revocation") } else { None }, + if (mask & SIGN_HOLDER_COMMITMENT_AND_HTLCS) != 0 { Some("sign_holder_commitment_and_htlcs") } else { None }, + if (mask & SIGN_JUSTICE_REVOKED_OUTPUT) != 0 { Some("sign_justice_revoked_output") } else { None }, + if (mask & SIGN_JUSTICE_REVOKED_HTLC) != 0 { Some("sign_justice_revoked_htlc") } else { None }, + if (mask & SIGN_HOLDER_HTLC_TRANSACTION) != 0 { Some("sign_holder_htlc_transaction") } else { None }, + if (mask & SIGN_COUNTERPARTY_HTLC_TRANSATION) != 0 { Some("sign_counterparty_htlc_transation") } else { None }, + if (mask & SIGN_CLOSING_TRANSACTION) != 0 { Some("sign_closing_transaction") } else { None }, + if (mask & SIGN_HOLDER_ANCHOR_INPUT) != 0 { Some("sign_holder_anchor_input") } else { None }, + if (mask & SIGN_CHANNEL_ANNOUNCMENT_WITH_FUNDING_KEY) != 0 { Some("sign_channel_announcment_with_funding_key") } else { None }, + ].iter().flatten().map(|s| s.to_string()).collect::>().join(", ") + } } impl PartialEq for TestChannelSigner { @@ -87,7 +129,6 @@ impl TestChannelSigner { inner, state, disable_revocation_policy_check: false, - available: Arc::new(Mutex::new(true)), } } @@ -101,7 +142,6 @@ impl TestChannelSigner { inner, state, disable_revocation_policy_check, - available: Arc::new(Mutex::new(true)), } } @@ -113,22 +153,33 @@ impl TestChannelSigner { } /// Marks the signer's availability. - /// - /// When `true`, methods are forwarded to the underlying signer as normal. When `false`, some - /// methods will return `Err` indicating that the signer is unavailable. Intended to be used for - /// testing asynchronous signing. #[cfg(test)] - pub fn set_available(&self, available: bool) { - *self.available.lock().unwrap() = available; + pub fn set_ops_available(&self, mask: u32, available: bool) { + let mut state = self.get_enforcement_state(); + if available { + state.unavailable_signer_ops &= !mask; // clear the bits that are now available + } else { + state.unavailable_signer_ops |= mask; // set the bits that are now unavailable + } + } + + fn is_signer_available(&self, ops_mask: u32) -> bool { + self.state.lock().unwrap().is_signer_available(ops_mask) } } impl ChannelSigner for TestChannelSigner { - fn get_per_commitment_point(&self, idx: u64, secp_ctx: &Secp256k1) -> PublicKey { + fn get_per_commitment_point(&self, idx: u64, secp_ctx: &Secp256k1) -> Result { + if !self.is_signer_available(ops::GET_PER_COMMITMENT_POINT) { + return Err(()); + } self.inner.get_per_commitment_point(idx, secp_ctx) } - fn release_commitment_secret(&self, idx: u64) -> [u8; 32] { + fn release_commitment_secret(&self, idx: u64) -> Result<[u8; 32], ()> { + if !self.is_signer_available(ops::RELEASE_COMMITMENT_SECRET) { + return Err(()); + } { let mut state = self.state.lock().unwrap(); assert!(idx == state.last_holder_revoked_commitment || idx == state.last_holder_revoked_commitment - 1, "can only revoke the current or next unrevoked commitment - trying {}, last revoked {}", idx, state.last_holder_revoked_commitment); @@ -139,6 +190,9 @@ impl ChannelSigner for TestChannelSigner { } fn validate_holder_commitment(&self, holder_tx: &HolderCommitmentTransaction, _outbound_htlc_preimages: Vec) -> Result<(), ()> { + if !self.is_signer_available(ops::VALIDATE_HOLDER_COMMITMENT) { + return Err(()); + } let mut state = self.state.lock().unwrap(); let idx = holder_tx.commitment_number(); assert!(idx == state.last_holder_commitment || idx == state.last_holder_commitment - 1, "expecting to validate the current or next holder commitment - trying {}, current {}", idx, state.last_holder_commitment); @@ -147,7 +201,7 @@ impl ChannelSigner for TestChannelSigner { } fn validate_counterparty_revocation(&self, idx: u64, _secret: &SecretKey) -> Result<(), ()> { - if !*self.available.lock().unwrap() { + if !self.is_signer_available(ops::VALIDATE_COUNTERPARTY_REVOCATION) { return Err(()); } let mut state = self.state.lock().unwrap(); @@ -170,7 +224,7 @@ impl EcdsaChannelSigner for TestChannelSigner { self.verify_counterparty_commitment_tx(commitment_tx, secp_ctx); { - if !*self.available.lock().unwrap() { + if !self.is_signer_available(ops::SIGN_COUNTERPARTY_COMMITMENT) { return Err(()); } let mut state = self.state.lock().unwrap(); @@ -189,7 +243,7 @@ impl EcdsaChannelSigner for TestChannelSigner { } fn sign_holder_commitment(&self, commitment_tx: &HolderCommitmentTransaction, secp_ctx: &Secp256k1) -> Result { - if !*self.available.lock().unwrap() { + if !self.is_signer_available(ops::SIGN_HOLDER_COMMITMENT_AND_HTLCS) { return Err(()); } let trusted_tx = self.verify_holder_commitment_tx(commitment_tx, secp_ctx); @@ -362,6 +416,10 @@ pub struct EnforcementState { pub last_holder_revoked_commitment: u64, /// The last validated holder commitment number, backwards counting pub last_holder_commitment: u64, + /// A flag array that indicates which signing operations are currently *not* available in the + /// channel. When a method's bit is set, then the signer will act as if the signature is + /// unavailable and return an error result. + pub unavailable_signer_ops: u32, } impl EnforcementState { @@ -372,6 +430,19 @@ impl EnforcementState { last_counterparty_revoked_commitment: INITIAL_REVOKED_COMMITMENT_NUMBER, last_holder_revoked_commitment: INITIAL_REVOKED_COMMITMENT_NUMBER, last_holder_commitment: INITIAL_REVOKED_COMMITMENT_NUMBER, + unavailable_signer_ops: 0, } } + + pub fn set_signer_available(&mut self, ops_mask: u32) { + self.unavailable_signer_ops &= !ops_mask; // clear the bits that are now available + } + + pub fn set_signer_unavailable(&mut self, ops_mask: u32) { + self.unavailable_signer_ops |= ops_mask; // set the bits that are now unavailable + } + + pub fn is_signer_available(&self, ops_mask: u32) -> bool { + (self.unavailable_signer_ops & ops_mask) == 0 + } } From 8db91d7a71fa3fbbd2834455eaa3f5db26b52d9f Mon Sep 17 00:00:00 2001 From: Chris Waterson Date: Fri, 26 Jan 2024 16:10:07 -0800 Subject: [PATCH 2/2] Allow async sigs during channel setup Creates and manages an explicit `HolderCommitment` type to deal with managing the current and next commitment points. --- lightning/src/ln/async_signer_tests.rs | 52 ++- lightning/src/ln/channel.rs | 484 +++++++++++++--------- lightning/src/ln/channelmanager.rs | 95 +++-- lightning/src/ln/functional_tests.rs | 3 +- lightning/src/util/test_channel_signer.rs | 31 +- 5 files changed, 436 insertions(+), 229 deletions(-) diff --git a/lightning/src/ln/async_signer_tests.rs b/lightning/src/ln/async_signer_tests.rs index 91d9c0e4165..ca0f5cf08ed 100644 --- a/lightning/src/ln/async_signer_tests.rs +++ b/lightning/src/ln/async_signer_tests.rs @@ -18,7 +18,7 @@ use crate::ln::msgs; use crate::ln::msgs::ChannelMessageHandler; use crate::ln::channelmanager::{PaymentId, RAACommitmentOrder, RecipientOnionFields}; use crate::util::ser::Writeable; -use crate::util::test_channel_signer::ops; +use crate::util::test_channel_signer::{EnforcementState, ops}; use crate::util::test_utils; /// Helper to run operations with a simulated asynchronous signer. @@ -49,6 +49,56 @@ pub fn with_async_signer<'a, DoFn, T>(node: &Node, peer_id: &PublicKey, channel_ res } +#[cfg(feature = "std")] +#[test] +fn test_open_channel() { + // Simulate acquiring the signature for `funding_created` asynchronously. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + // Open an outbound channel simulating an async signer. + let channel_id_0 = EnforcementState::with_default_unavailable( + ops::GET_PER_COMMITMENT_POINT, + || nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None) + ).expect("Failed to create channel"); + + { + let msgs = nodes[0].node.get_and_clear_pending_msg_events(); + assert!(msgs.is_empty(), "Expected no message events; got {:?}", msgs); + } + + nodes[0].set_channel_signer_ops_available(&nodes[1].node.get_our_node_id(), &channel_id_0, ops::GET_PER_COMMITMENT_POINT, true); + nodes[0].node.signer_unblocked(None); + + // nodes[0] --- open_channel --> nodes[1] + let mut open_chan_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + + // Handle an inbound channel simulating an async signer. + EnforcementState::with_default_unavailable( + ops::GET_PER_COMMITMENT_POINT, + || nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_msg) + ); + + { + let msgs = nodes[1].node.get_and_clear_pending_msg_events(); + assert!(msgs.is_empty(), "Expected no message events; got {:?}", msgs); + } + + let channel_id_1 = { + let channels = nodes[1].node.list_channels(); + assert_eq!(channels.len(), 1, "expected one channel, not {}", channels.len()); + channels[0].channel_id + }; + + nodes[1].set_channel_signer_ops_available(&nodes[0].node.get_our_node_id(), &channel_id_1, ops::GET_PER_COMMITMENT_POINT, true); + nodes[1].node.signer_unblocked(None); + + // nodes[0] <-- accept_channel --- nodes[1] + get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); +} + #[cfg(test)] fn do_test_funding_created(masks: &Vec) { // Simulate acquiring the signature for `funding_created` asynchronously. diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index f0779cd567d..1b3874fb553 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -831,6 +831,17 @@ impl Default for SignerResumeUpdates { } } +#[allow(unused)] +pub(super) struct UnfundedInboundV1SignerResumeUpdates { + pub accept_channel: Option, +} + +#[allow(unused)] +pub(super) struct UnfundedOutboundV1SignerResumeUpdates { + pub open_channel: Option, + pub funding_created: Option, +} + /// The return value of `channel_reestablish` pub(super) struct ReestablishResponses { pub channel_ready: Option, @@ -981,6 +992,91 @@ impl UnfundedChannelContext { } } +#[derive(Debug, Copy, Clone)] +enum HolderCommitment { + Uninitialized { transaction_number: u64 }, + PendingNext { transaction_number: u64, current: PublicKey }, + Available { transaction_number: u64, current: PublicKey, next: PublicKey }, +} + +impl HolderCommitment where { + pub fn initial() -> Self { + HolderCommitment::Uninitialized { transaction_number: INITIAL_COMMITMENT_NUMBER } + } + + pub fn is_available(&self) -> bool { + if let HolderCommitment::Available { .. } = self { true } else { false } + } + + pub fn is_uninitialized(&self) -> bool { + if let HolderCommitment::Uninitialized { .. } = self { true } else { false } + } + + pub fn transaction_number(&self) -> u64 { + match self { + HolderCommitment::Uninitialized { transaction_number } => *transaction_number, + HolderCommitment::PendingNext { transaction_number, .. } => *transaction_number, + HolderCommitment::Available { transaction_number, .. } => *transaction_number, + } + } + + pub fn current_point(&self) -> PublicKey { + match self { + HolderCommitment::Uninitialized { .. } => panic!("holder commitment is uninitialized"), + HolderCommitment::PendingNext { current, .. } => *current, + HolderCommitment::Available { current, .. } => *current, + } + } + + pub fn next_point(&self) -> Option { + match self { + HolderCommitment::Uninitialized { .. } => panic!("holder commitment is uninitialized"), + HolderCommitment::PendingNext { .. } => None, + HolderCommitment::Available { next, .. } => Some(*next), + } + } + + pub fn advance(&self, signer: &ChannelSignerType, secp_ctx: &Secp256k1, logger: &L) -> Self + where SP::Target: SignerProvider, L::Target: Logger + { + match self { + HolderCommitment::Available { transaction_number, next, .. } => { + HolderCommitment::PendingNext { + transaction_number: transaction_number - 1, + current: *next, + }.request_next(signer, secp_ctx, logger) + } + _ => panic!("Cannot advance holder commitment; there is no next point available") + } + } + + pub fn request_next(&self, signer: &ChannelSignerType, secp_ctx: &Secp256k1, logger: &L) -> Self + where SP::Target: SignerProvider, L::Target: Logger + { + match self { + HolderCommitment::Uninitialized { transaction_number } => { + if let Ok(current) = signer.as_ref().get_per_commitment_point(*transaction_number, secp_ctx) { + log_trace!(logger, "Retrieved current per-commitment point {}", transaction_number); + HolderCommitment::PendingNext { transaction_number: *transaction_number, current }.request_next(signer, secp_ctx, logger) + } else { + log_trace!(logger, "Current per-commitment point {} is pending", transaction_number); + *self + } + } + HolderCommitment::PendingNext { transaction_number, current } => { + if let Ok(next) = signer.as_ref().get_per_commitment_point(transaction_number - 1, secp_ctx) { + log_trace!(logger, "Retrieved next per-commitment point {}", transaction_number - 1); + HolderCommitment::Available { transaction_number: *transaction_number, current: *current, next } + } else { + log_trace!(logger, "Next per-commitment point {} is pending", transaction_number); + *self + } + } + HolderCommitment::Available { .. } => *self, + } + } +} + /// Contains everything about the channel including state, and various flags. pub(super) struct ChannelContext where SP::Target: SignerProvider { config: LegacyChannelConfig, @@ -1026,15 +1122,7 @@ pub(super) struct ChannelContext where SP::Target: SignerProvider { // generation start at 0 and count up...this simplifies some parts of implementation at the // cost of others, but should really just be changed. - cur_holder_commitment_transaction_number: u64, - - // The commitment point corresponding to `cur_holder_commitment_transaction_number`, which is the - // next state. - cur_holder_commitment_point: PublicKey, - // The commitment point corresponding to `cur_holder_commitment_transaction_number - 1`, which is - // the state after the next state. This may be `None` if we are pending retrieval of the - // commitment point from the signer. - next_holder_commitment_point: Option, + holder_commitment: HolderCommitment, // The commitment secret corresponding to `cur_holder_commitment_transaction_number + 2`, which is // the previous state. prev_holder_commitment_secret: Option<[u8; 32]>, @@ -1082,9 +1170,6 @@ pub(super) struct ChannelContext where SP::Target: SignerProvider { /// Similar to [`Self::signer_pending_commitment_update`] but we're waiting to send a /// [`msgs::ChannelReady`]. signer_pending_channel_ready: bool, - /// If we attempted to retrieve the per-commitment point for the next transaction but the signer - /// wasn't ready, then this will be set to `true`. - signer_pending_commitment_point: bool, /// If we attempted to release the per-commitment secret for the previous transaction but the /// signer wasn't ready, then this will be set to `true`. signer_pending_released_secret: bool, @@ -1580,58 +1665,21 @@ impl ChannelContext where SP::Target: SignerProvider { self.channel_ready_event_emitted = true; } - /// Caches the next commitment point; i.e., the point at - /// `cur_holder_commitment_transaction_number-1`. - pub fn request_next_holder_per_commitment_point(&mut self, logger: &L) where L::Target: Logger - { - if self.next_holder_commitment_point.is_some() { - return; - } - - let transaction_number = self.cur_holder_commitment_transaction_number - 1; - let signer = self.holder_signer.as_ref(); - - log_trace!(logger, "Retrieving commitment point for {} transaction number {}", self.channel_id(), transaction_number); - self.next_holder_commitment_point = match signer.get_per_commitment_point(transaction_number, &self.secp_ctx) { - Ok(point) => { - if self.signer_pending_commitment_point { - log_trace!(logger, "Commitment point for {} transaction number {} retrieved; clearing signer_pending_commitment_point", - self.channel_id(), transaction_number); - self.signer_pending_commitment_point = false; - } - Some(point) - } + pub fn advance_holder_commitment(&mut self, logger: &L) where L::Target: Logger { + self.holder_commitment = self.holder_commitment.advance(&self.holder_signer, &self.secp_ctx, logger); + } - Err(_) => { - if !self.signer_pending_commitment_point { - log_trace!(logger, "Commitment point for {} transaction number {} is not available; setting signer_pending_commitment_point", - self.channel_id(), transaction_number); - self.signer_pending_commitment_point = true; - } - None - } - }; + pub fn request_next_holder_commitment(&mut self, logger: &L) where L::Target: Logger { + self.holder_commitment = self.holder_commitment.request_next(&self.holder_signer, &self.secp_ctx, logger); } - /// Advance the current commitment point by moving the cached `next_holder_commitment_point` into - /// `cur_holder_commitment_point`. - /// - /// It is necessary that `next_holder_commitment_point` have a value (i.e., not be `None`): - /// failure to honor this constraint likely indicates that we've provided a revoke-and-ack to the - /// counterparty before being able to retrieve the next commitment point from an asynchronous - /// signer. - fn advance_holder_per_commitment_point(&mut self, logger: &L) where L::Target: Logger - { - self.cur_holder_commitment_transaction_number -= 1; - log_trace!(logger, "Advancing commitment point for {} transaction number {}", self.channel_id(), self.cur_holder_commitment_transaction_number); - self.cur_holder_commitment_point = self.next_holder_commitment_point.take() - .expect("There is no next holder_commitment point"); - self.request_next_holder_per_commitment_point(logger) + pub fn is_holder_commitment_uninitialized(&self) -> bool { + self.holder_commitment.is_uninitialized() } pub fn update_holder_commitment_secret(&mut self, logger: &L) where L::Target: Logger { - let transaction_number = self.cur_holder_commitment_transaction_number; + let transaction_number = self.holder_commitment.transaction_number(); let signer = self.holder_signer.as_ref(); let releasing_transaction_number = transaction_number + 2; @@ -1661,7 +1709,7 @@ impl ChannelContext where SP::Target: SignerProvider { #[cfg(test)] pub fn forget_signer_material(&mut self) { - self.next_holder_commitment_point = None; + self.holder_commitment = HolderCommitment::Uninitialized { transaction_number: self.holder_commitment.transaction_number() }; self.prev_holder_commitment_secret = None; } @@ -1963,7 +2011,7 @@ impl ChannelContext where SP::Target: SignerProvider { /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction) /// TODO Some magic rust shit to compile-time check this? fn build_holder_transaction_keys(&self) -> TxCreationKeys { - let per_commitment_point = self.cur_holder_commitment_point; + let per_commitment_point = self.holder_commitment.current_point(); let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint; let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint; let counterparty_pubkeys = self.get_counterparty_pubkeys(); @@ -3494,7 +3542,7 @@ impl Channel where let keys = self.context.build_holder_transaction_keys(); - let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger); + let commitment_stats = self.context.build_commitment_transaction(self.context.holder_commitment.transaction_number(), &keys, true, false, logger); let commitment_txid = { let trusted_tx = commitment_stats.tx.trust(); let bitcoin_tx = trusted_tx.built_transaction(); @@ -3657,7 +3705,7 @@ impl Channel where }; self.context.expecting_peer_commitment_signed = false; - self.context.advance_holder_per_commitment_point(logger); + self.context.advance_holder_commitment(logger); // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call // build_commitment_no_status_check() next which will reset this to RAAFirst. @@ -4157,7 +4205,7 @@ impl Channel where let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw)); let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw)); let keys = self.context.build_holder_transaction_keys(); - let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger); + let commitment_stats = self.context.build_commitment_transaction(self.context.holder_commitment.transaction_number(), &keys, true, true, logger); let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000; let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat; if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 { @@ -4490,11 +4538,11 @@ impl Channel where #[cfg(async_signing)] pub fn signer_maybe_unblocked(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger { log_trace!(logger, "Signing unblocked in channel {} at sequence {}", - &self.context.channel_id(), self.context.cur_holder_commitment_transaction_number); + &self.context.channel_id(), self.context.holder_commitment.transaction_number()); - if self.context.signer_pending_commitment_point { + if !self.context.holder_commitment.is_available() { log_trace!(logger, "Attempting to update holder per-commitment point..."); - self.context.request_next_holder_per_commitment_point(logger); + self.context.request_next_holder_commitment(logger); } if self.context.signer_pending_released_secret { @@ -4509,7 +4557,7 @@ impl Channel where // Provide a `channel_ready` message if we need to, but only if we're _not_ still pending // funding. - let channel_ready = if self.context.signer_pending_channel_ready && !self.context.signer_pending_funding { + let channel_ready = if self.context.signer_pending_channel_ready && !self.context.signer_pending_funding && self.context.holder_commitment.is_available() { log_trace!(logger, "Generating pending channel_ready..."); self.context.signer_pending_channel_ready = false; Some(self.get_channel_ready()) @@ -4567,7 +4615,7 @@ impl Channel where let order = self.context.resend_order.clone(); log_debug!(logger, "Signing unblocked in channel {} at sequence {} resulted in {} commitment update, {} RAA{}, {} funding_signed, {} channel_ready", - &self.context.channel_id(), self.context.cur_holder_commitment_transaction_number, + &self.context.channel_id(), self.context.holder_commitment.transaction_number(), if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" }, if commitment_update.is_some() && raa.is_some() { @@ -4586,39 +4634,39 @@ impl Channel where } fn get_last_revoke_and_ack(&self, logger: &L) -> Option where L::Target: Logger { - assert!(self.context.cur_holder_commitment_transaction_number <= INITIAL_COMMITMENT_NUMBER + 2); - match (self.context.next_holder_commitment_point, self.context.prev_holder_commitment_secret) { - (Some(_), Some(per_commitment_secret)) => { + assert!(self.context.holder_commitment.transaction_number() <= INITIAL_COMMITMENT_NUMBER + 2); + match (self.context.holder_commitment, self.context.prev_holder_commitment_secret) { + (HolderCommitment::Available { current, .. }, Some(per_commitment_secret)) => { log_debug!(logger, "Regenerated last revoke-and-ack in channel {} for next per-commitment point sequence number {}, releasing secret for {}", - &self.context.channel_id(), self.context.cur_holder_commitment_transaction_number, - self.context.cur_holder_commitment_transaction_number + 2); + &self.context.channel_id(), self.context.holder_commitment.transaction_number(), + self.context.holder_commitment.transaction_number() + 2); Some(msgs::RevokeAndACK { channel_id: self.context.channel_id, per_commitment_secret, - next_per_commitment_point: self.context.cur_holder_commitment_point, + next_per_commitment_point: current, #[cfg(taproot)] next_local_nonce: None, }) }, - (Some(_), None) => { + (HolderCommitment::Available { .. }, None) => { log_debug!(logger, "Last revoke-and-ack pending in channel {} for sequence {} because the secret for {} is not available", - &self.context.channel_id(), self.context.cur_holder_commitment_transaction_number, - self.context.cur_holder_commitment_transaction_number + 2); + &self.context.channel_id(), self.context.holder_commitment.transaction_number(), + self.context.holder_commitment.transaction_number() + 2); None }, - (None, Some(_)) => { + (_, Some(_)) => { log_debug!(logger, "Last revoke-and-ack pending in channel {} for sequence {} because the next per-commitment point is not available", - &self.context.channel_id(), self.context.cur_holder_commitment_transaction_number); + &self.context.channel_id(), self.context.holder_commitment.transaction_number()); None }, - (None, None) => { + (_, None) => { log_debug!(logger, "Last revoke-and-ack pending in channel {} for sequence {} because neither the next per-commitment point nor the secret for {} is available", - &self.context.channel_id(), self.context.cur_holder_commitment_transaction_number, - self.context.cur_holder_commitment_transaction_number + 2); + &self.context.channel_id(), self.context.holder_commitment.transaction_number(), + self.context.holder_commitment.transaction_number() + 2); None }, } @@ -4704,7 +4752,7 @@ impl Channel where } }; log_debug!(logger, "Regenerated latest commitment update in channel {} at {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds", - &self.context.channel_id(), self.context.cur_holder_commitment_transaction_number, if update_fee.is_some() { " update_fee," } else { "" }, + &self.context.channel_id(), self.context.holder_commitment.transaction_number(), if update_fee.is_some() { " update_fee," } else { "" }, update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len()); Ok(msgs::CommitmentUpdate { update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee, @@ -4750,7 +4798,7 @@ impl Channel where return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned())); } - let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1; + let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.holder_commitment.transaction_number() - 1; if msg.next_remote_commitment_number > 0 { // TODO(waterson): figure out how to do this verification when an async signer is provided // with a (more or less) arbitrary state index. Should we require that an async signer cache @@ -4830,12 +4878,12 @@ impl Channel where // If they think we're behind by one state, then we owe them an RAA. We may or may not have that // RAA handy depending on the status of the remote signer and the monitor. - let steps_behind = (INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number) - (msg.next_remote_commitment_number + 1); + let steps_behind = (INITIAL_COMMITMENT_NUMBER - self.context.holder_commitment.transaction_number()) - (msg.next_remote_commitment_number + 1); let raa = if steps_behind == 0 { // Remote isn't waiting on any RevokeAndACK from us! // Note that if we need to repeat our ChannelReady we'll do that in the next if block. None - } else if steps_behind == 1 { // msg.next_remote_commitment_number + 1 == (INITIAL_COMMITMENT_NUMBER - 1) - self.context.cur_holder_commitment_transaction_number { + } else if steps_behind == 1 { // msg.next_remote_commitment_number + 1 == (INITIAL_COMMITMENT_NUMBER - 1) - self.context.holder_commitment.transaction_number() { if self.context.channel_state.is_monitor_update_in_progress() { self.context.monitor_pending_revoke_and_ack = true; None @@ -4874,7 +4922,7 @@ impl Channel where } let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 }; - let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 { + let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.holder_commitment.transaction_number() == 1 { // We should never have to worry about MonitorUpdateInProgress resending ChannelReady log_debug!(logger, "Reconnecting channel at state 1, re-sending channel_ready"); Some(self.get_channel_ready()) @@ -5431,7 +5479,7 @@ impl Channel where } pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 { - self.context.cur_holder_commitment_transaction_number + 1 + self.context.holder_commitment.transaction_number() + 1 } pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 { @@ -5526,7 +5574,7 @@ impl Channel where debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0); return true; } - if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 && + if self.context.holder_commitment.transaction_number() == INITIAL_COMMITMENT_NUMBER - 1 && self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 { // If we're a 0-conf channel, we'll move beyond AwaitingChannelReady immediately even while // waiting for the initial monitor persistence. Thus, we check if our commitment @@ -5660,9 +5708,10 @@ impl Channel where } fn get_channel_ready(&self) -> msgs::ChannelReady { + debug_assert!(self.context.holder_commitment.is_available()); msgs::ChannelReady { channel_id: self.context.channel_id(), - next_per_commitment_point: self.context.cur_holder_commitment_point, + next_per_commitment_point: self.context.holder_commitment.current_point(), short_channel_id_alias: Some(self.context.outbound_scid_alias), } } @@ -6100,7 +6149,7 @@ impl Channel where // next_local_commitment_number is the next commitment_signed number we expect to // receive (indicating if they need to resend one that we missed). - next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number, + next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.holder_commitment.transaction_number(), // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to // receive, however we track it by the next commitment number for a remote transaction // (which is one further, as they always revoke previous commitment transaction, not @@ -6551,16 +6600,18 @@ impl Channel where pub(super) struct OutboundV1Channel where SP::Target: SignerProvider { pub context: ChannelContext, pub unfunded_context: UnfundedChannelContext, + pub signer_pending_open_channel: bool, } impl OutboundV1Channel where SP::Target: SignerProvider { - pub fn new( + pub fn new( fee_estimator: &LowerBoundedFeeEstimator, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures, channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32, - outbound_scid_alias: u64, temporary_channel_id: Option + outbound_scid_alias: u64, temporary_channel_id: Option, logger: &L, ) -> Result, APIError> where ES::Target: EntropySource, - F::Target: FeeEstimator + F::Target: FeeEstimator, + L::Target: Logger, { let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay; let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id); @@ -6625,16 +6676,8 @@ impl OutboundV1Channel where SP::Target: SignerProvider { }; let temporary_channel_id = temporary_channel_id.unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source)); - - // TODO(waterson): this assumes that the signer is available and will respond synchronously, two - // constraints we'd ideally like to remove. - let cur_holder_commitment_point = holder_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER, &secp_ctx) - .map_err(|_| APIError::ChannelUnavailable { err: "Failed to get initial per-commitment point".to_owned() })?; - - let next_holder_commitment_point = Some( - holder_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx) - .map_err(|_| APIError::ChannelUnavailable { err: "Failed to get second per-commitment point".to_owned() })? - ); + let ecdsa_holder_signer = ChannelSignerType::Ecdsa(holder_signer); + let holder_commitment = HolderCommitment::initial().request_next(&ecdsa_holder_signer, &secp_ctx, logger); Ok(Self { context: ChannelContext { @@ -6659,13 +6702,11 @@ impl OutboundV1Channel where SP::Target: SignerProvider { latest_monitor_update_id: 0, - holder_signer: ChannelSignerType::Ecdsa(holder_signer), + holder_signer: ecdsa_holder_signer, shutdown_scriptpubkey, destination_script, - cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, - cur_holder_commitment_point, - next_holder_commitment_point, + holder_commitment, prev_holder_commitment_secret: None, cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, value_to_self_msat, @@ -6692,7 +6733,6 @@ impl OutboundV1Channel where SP::Target: SignerProvider { signer_pending_revoke_and_ack: false, signer_pending_funding: false, signer_pending_channel_ready: false, - signer_pending_commitment_point: false, signer_pending_released_secret: false, #[cfg(debug_assertions)] @@ -6772,7 +6812,8 @@ impl OutboundV1Channel where SP::Target: SignerProvider { blocked_monitor_updates: Vec::new(), }, - unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 } + unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }, + signer_pending_open_channel: false, }) } @@ -6828,7 +6869,7 @@ impl OutboundV1Channel where SP::Target: SignerProvider { } if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) || self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER || - self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { + self.context.holder_commitment.transaction_number() != INITIAL_COMMITMENT_NUMBER { panic!("Should not have advanced channel commitment tx numbers prior to funding_created"); } @@ -6895,7 +6936,7 @@ impl OutboundV1Channel where SP::Target: SignerProvider { /// and see if we get a new `OpenChannel` message, otherwise the channel is failed. pub(crate) fn maybe_handle_error_without_close( &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator - ) -> Result + ) -> Result, ()> where F::Target: FeeEstimator { @@ -6930,10 +6971,14 @@ impl OutboundV1Channel where SP::Target: SignerProvider { self.context.channel_type = ChannelTypeFeatures::only_static_remote_key(); } self.context.channel_transaction_parameters.channel_type_features = self.context.channel_type.clone(); - Ok(self.get_open_channel(chain_hash)) + let opt_msg = self.get_open_channel(chain_hash); + if opt_msg.is_none() { + self.signer_pending_open_channel = true; + } + Ok(opt_msg) } - pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel { + pub fn get_open_channel(&self, chain_hash: ChainHash) -> Option { if !self.context.is_outbound() { panic!("Tried to open a channel for an inbound channel?"); } @@ -6941,13 +6986,16 @@ impl OutboundV1Channel where SP::Target: SignerProvider { panic!("Cannot generate an open_channel after we've moved forward"); } - if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { + if self.context.holder_commitment.transaction_number() != INITIAL_COMMITMENT_NUMBER { panic!("Tried to send an open_channel for a channel that has already advanced"); } - let keys = self.context.get_holder_pubkeys(); + if !self.context.holder_commitment.is_available() { + return None; + } - msgs::OpenChannel { + let keys = self.context.get_holder_pubkeys(); + Some(msgs::OpenChannel { chain_hash, temporary_channel_id: self.context.channel_id, funding_satoshis: self.context.channel_value_satoshis, @@ -6964,14 +7012,14 @@ impl OutboundV1Channel where SP::Target: SignerProvider { payment_point: keys.payment_point, delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(), htlc_basepoint: keys.htlc_basepoint.to_public_key(), - first_per_commitment_point: self.context.cur_holder_commitment_point, + first_per_commitment_point: self.context.holder_commitment.current_point(), channel_flags: if self.context.config.announced_channel {1} else {0}, shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey { Some(script) => script.clone().into_inner(), None => Builder::new().into_script(), }), channel_type: Some(self.context.channel_type.clone()), - } + }) } // Message handlers @@ -7123,7 +7171,7 @@ impl OutboundV1Channel where SP::Target: SignerProvider { } if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) || self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER || - self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { + self.context.holder_commitment.transaction_number() != INITIAL_COMMITMENT_NUMBER { panic!("Should not have advanced channel commitment tx numbers prior to funding_created"); } @@ -7138,7 +7186,7 @@ impl OutboundV1Channel where SP::Target: SignerProvider { &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction)); let holder_signer = self.context.build_holder_transaction_keys(); - let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx; + let initial_commitment_tx = self.context.build_commitment_transaction(self.context.holder_commitment.transaction_number(), &holder_signer, true, false, logger).tx; { let trusted_tx = initial_commitment_tx.trust(); let initial_commitment_bitcoin_tx = trusted_tx.built_transaction(); @@ -7191,7 +7239,7 @@ impl OutboundV1Channel where SP::Target: SignerProvider { } else { self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()); } - self.context.advance_holder_per_commitment_point(logger); + self.context.advance_holder_commitment(logger); self.context.cur_counterparty_commitment_transaction_number -= 1; log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id()); @@ -7207,11 +7255,27 @@ impl OutboundV1Channel where SP::Target: SignerProvider { /// Indicates that the signer may have some signatures for us, so we should retry if we're /// blocked. #[cfg(async_signing)] - pub fn signer_maybe_unblocked(&mut self, logger: &L) -> Option where L::Target: Logger { - if self.context.signer_pending_funding && self.context.is_outbound() { + pub fn signer_maybe_unblocked(&mut self, chain_hash: &ChainHash, logger: &L) -> UnfundedOutboundV1SignerResumeUpdates + where L::Target: Logger + { + self.context.request_next_holder_commitment(logger); + let open_channel = if self.signer_pending_open_channel && self.context.holder_commitment.is_available() { + self.get_open_channel(chain_hash.clone()).map(|msg| { + log_trace!(logger, "Signer unblocked an open_channel; clearing signer_pending_open_channel"); + self.signer_pending_open_channel = false; + msg + }) + } else { None }; + + let funding_created = if self.context.signer_pending_funding { log_trace!(logger, "Signer unblocked a funding_created"); self.get_funding_created_msg(logger) - } else { None } + } else { None }; + + UnfundedOutboundV1SignerResumeUpdates { + open_channel, + funding_created, + } } } @@ -7219,6 +7283,7 @@ impl OutboundV1Channel where SP::Target: SignerProvider { pub(super) struct InboundV1Channel where SP::Target: SignerProvider { pub context: ChannelContext, pub unfunded_context: UnfundedChannelContext, + pub signer_pending_accept_channel: bool, } /// Fetches the [`ChannelTypeFeatures`] that will be used for a channel built from a given @@ -7439,15 +7504,8 @@ impl InboundV1Channel where SP::Target: SignerProvider { Some(cmp::max(config.channel_handshake_config.minimum_depth, 1)) }; - // TODO(waterson): this assumes that the signer is available and will respond synchronously, two - // constraints we'd ideally like to remove. - let cur_holder_commitment_point = holder_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER, &secp_ctx) - .map_err(|_| ChannelError::Close("Failed to get initial per-commitment point".to_owned()))?; - - let next_holder_commitment_point = Some( - holder_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx) - .map_err(|_| ChannelError::Close("Failed to get second per-commitment point".to_owned()))? - ); + let ecdsa_holder_signer = ChannelSignerType::Ecdsa(holder_signer); + let holder_commitment = HolderCommitment::initial().request_next(&ecdsa_holder_signer, &secp_ctx, &&logger); let chan = Self { context: ChannelContext { @@ -7473,13 +7531,11 @@ impl InboundV1Channel where SP::Target: SignerProvider { latest_monitor_update_id: 0, - holder_signer: ChannelSignerType::Ecdsa(holder_signer), + holder_signer: ecdsa_holder_signer, shutdown_scriptpubkey, destination_script, - cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, - cur_holder_commitment_point, - next_holder_commitment_point, + holder_commitment, prev_holder_commitment_secret: None, cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, value_to_self_msat: msg.push_msat, @@ -7506,7 +7562,6 @@ impl InboundV1Channel where SP::Target: SignerProvider { signer_pending_revoke_and_ack: false, signer_pending_funding: false, signer_pending_channel_ready: false, - signer_pending_commitment_point: false, signer_pending_released_secret: false, #[cfg(debug_assertions)] @@ -7590,7 +7645,8 @@ impl InboundV1Channel where SP::Target: SignerProvider { blocked_monitor_updates: Vec::new(), }, - unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 } + unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }, + signer_pending_accept_channel: false, }; Ok(chan) @@ -7600,7 +7656,7 @@ impl InboundV1Channel where SP::Target: SignerProvider { /// should be sent back to the counterparty node. /// /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel - pub fn accept_inbound_channel(&mut self) -> msgs::AcceptChannel { + pub fn accept_inbound_channel(&mut self) -> Option { if self.context.is_outbound() { panic!("Tried to send accept_channel for an outbound channel?"); } @@ -7610,11 +7666,13 @@ impl InboundV1Channel where SP::Target: SignerProvider { ) { panic!("Tried to send accept_channel after channel had moved forward"); } - if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { + if self.context.holder_commitment.transaction_number() != INITIAL_COMMITMENT_NUMBER { panic!("Tried to send an accept_channel for a channel that has already advanced"); } - self.generate_accept_channel_message() + if self.context.holder_commitment.is_available() { + Some(self.generate_accept_channel_message()) + } else { None } } /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an @@ -7623,6 +7681,7 @@ impl InboundV1Channel where SP::Target: SignerProvider { /// /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel fn generate_accept_channel_message(&self) -> msgs::AcceptChannel { + assert!(self.context.holder_commitment.is_available()); let keys = self.context.get_holder_pubkeys(); msgs::AcceptChannel { temporary_channel_id: self.context.channel_id, @@ -7638,7 +7697,7 @@ impl InboundV1Channel where SP::Target: SignerProvider { payment_point: keys.payment_point, delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(), htlc_basepoint: keys.htlc_basepoint.to_public_key(), - first_per_commitment_point: self.context.cur_holder_commitment_point, + first_per_commitment_point: self.context.holder_commitment.current_point(), shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey { Some(script) => script.clone().into_inner(), None => Builder::new().into_script(), @@ -7662,7 +7721,7 @@ impl InboundV1Channel where SP::Target: SignerProvider { let funding_script = self.context.get_funding_redeemscript(); let keys = self.context.build_holder_transaction_keys(); - let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx; + let initial_commitment_tx = self.context.build_commitment_transaction(self.context.holder_commitment.transaction_number(), &keys, true, false, logger).tx; let trusted_tx = initial_commitment_tx.trust(); let initial_commitment_bitcoin_tx = trusted_tx.built_transaction(); let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis); @@ -7696,7 +7755,7 @@ impl InboundV1Channel where SP::Target: SignerProvider { } if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) || self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER || - self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { + self.context.holder_commitment.transaction_number() != INITIAL_COMMITMENT_NUMBER { panic!("Should not have advanced channel commitment tx numbers prior to funding_created"); } @@ -7736,7 +7795,7 @@ impl InboundV1Channel where SP::Target: SignerProvider { self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()); self.context.channel_id = funding_txo.to_channel_id(); self.context.cur_counterparty_commitment_transaction_number -= 1; - self.context.advance_holder_per_commitment_point(logger); + self.context.advance_holder_commitment(logger); let (counterparty_initial_commitment_tx, funding_signed) = self.context.get_funding_signed_msg(logger); @@ -7775,6 +7834,22 @@ impl InboundV1Channel where SP::Target: SignerProvider { Ok((channel, funding_signed, channel_monitor)) } + + /// Indicates that the signer may have some signatures for us, so we should retry if we're + /// blocked. + #[allow(unused)] + pub fn signer_maybe_unblocked(&mut self, logger: &L) -> UnfundedInboundV1SignerResumeUpdates + where L::Target: Logger + { + self.context.request_next_holder_commitment(logger); + let accept_channel = if self.signer_pending_accept_channel && self.context.holder_commitment.is_available() { + Some(self.generate_accept_channel_message()) + } else { None }; + + UnfundedInboundV1SignerResumeUpdates { + accept_channel, + } + } } const SERIALIZATION_VERSION: u8 = 3; @@ -7872,7 +7947,7 @@ impl Writeable for Channel where SP::Target: SignerProvider { } self.context.destination_script.write(writer)?; - self.context.cur_holder_commitment_transaction_number.write(writer)?; + self.context.holder_commitment.transaction_number().write(writer)?; self.context.cur_counterparty_commitment_transaction_number.write(writer)?; self.context.value_to_self_msat.write(writer)?; @@ -8125,6 +8200,9 @@ impl Writeable for Channel where SP::Target: SignerProvider { let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) }; + let cur_holder_commitment_point = self.context.holder_commitment.current_point(); + let next_holder_commitment_point = self.context.holder_commitment.next_point(); + write_tlv_fields!(writer, { (0, self.context.announcement_sigs, option), // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a @@ -8160,8 +8238,8 @@ impl Writeable for Channel where SP::Target: SignerProvider { (39, pending_outbound_blinding_points, optional_vec), (41, holding_cell_blinding_points, optional_vec), (43, malformed_htlcs, optional_vec), // Added in 0.0.119 - (45, self.context.cur_holder_commitment_point, required), - (47, self.context.next_holder_commitment_point, option), + (45, cur_holder_commitment_point, required), + (47, next_holder_commitment_point, option), }); Ok(()) @@ -8454,7 +8532,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch let mut malformed_htlcs: Option> = None; let mut cur_holder_commitment_point_opt: Option = None; - let mut next_holder_commitment_point: Option = None; + let mut next_holder_commitment_point_opt: Option = None; read_tlv_fields!(reader, { (0, announcement_sigs, option), @@ -8486,7 +8564,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch (41, holding_cell_blinding_points_opt, optional_vec), (43, malformed_htlcs, optional_vec), // Added in 0.0.119 (45, cur_holder_commitment_point_opt, option), - (47, next_holder_commitment_point, option), + (47, next_holder_commitment_point_opt, option), }); let mut secp_ctx = Secp256k1::new(); @@ -8507,14 +8585,20 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch (holder_signer.channel_keys_id(), holder_signer) }; + let ecdsa_holder_signer = ChannelSignerType::Ecdsa(holder_signer); + // If we're restoring this channel for the first time after an upgrade, then we require that the // signer be available so that we can immediately populate the current commitment point. Channel // restoration will fail if this is not possible. - let cur_holder_commitment_point = match cur_holder_commitment_point_opt { - Some(point) => point, - None => { - holder_signer.get_per_commitment_point(cur_holder_commitment_transaction_number, &secp_ctx) - .map_err(|_| DecodeError::Io(io::ErrorKind::NotFound))? + let holder_commitment = match (cur_holder_commitment_point_opt, next_holder_commitment_point_opt) { + (Some(current), Some(next)) => HolderCommitment::Available { + transaction_number: cur_holder_commitment_transaction_number, current, next + }, + (Some(current), _) => HolderCommitment::PendingNext { + transaction_number: cur_holder_commitment_transaction_number, current + }, + (_, _) => HolderCommitment::Uninitialized { + transaction_number: cur_holder_commitment_transaction_number } }; @@ -8629,13 +8713,11 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch latest_monitor_update_id, - holder_signer: ChannelSignerType::Ecdsa(holder_signer), + holder_signer: ecdsa_holder_signer, shutdown_scriptpubkey, destination_script, - cur_holder_commitment_transaction_number, - cur_holder_commitment_point, - next_holder_commitment_point, + holder_commitment, prev_holder_commitment_secret: None, cur_counterparty_commitment_transaction_number, value_to_self_msat, @@ -8658,7 +8740,6 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch signer_pending_revoke_and_ack: false, signer_pending_funding: false, signer_pending_channel_ready: false, - signer_pending_commitment_point: true, signer_pending_released_secret: true, pending_update_fee, @@ -8853,11 +8934,12 @@ mod tests { keys_provider.expect(OnGetShutdownScriptpubkey { returns: non_v0_segwit_shutdown_script.clone(), }); + let logger = test_utils::TestLogger::new(); let secp_ctx = Secp256k1::new(); let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42, None) { + match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42, None, &&logger) { Err(APIError::IncompatibleShutdownScript { script }) => { assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner()); }, @@ -8877,15 +8959,16 @@ mod tests { let seed = [42; 32]; let network = Network::Testnet; let keys_provider = test_utils::TestKeysInterface::new(&seed, network); + let logger = test_utils::TestLogger::new(); let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap(); + let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None, &&logger).unwrap(); // Now change the fee so we can check that the fee in the open_channel message is the // same as the old fee. fee_est.fee_est = 500; - let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network)); + let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network)).unwrap(); assert_eq!(open_channel_msg.feerate_per_kw, original_fee); } @@ -8907,16 +8990,16 @@ mod tests { // Create Node A's channel pointing to Node B's pubkey let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap(); + let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None, &&logger).unwrap(); // Create Node B's channel by receiving Node A's open_channel message // Make sure A's dust limit is as we expect. - let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network)); + let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network)).unwrap(); let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap()); let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap(); // Node B --> Node A: accept channel, explicitly setting B's dust limit. - let mut accept_channel_msg = node_b_chan.accept_inbound_channel(); + let mut accept_channel_msg = node_b_chan.accept_inbound_channel().unwrap(); accept_channel_msg.dust_limit_satoshis = 546; node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap(); node_a_chan.context.holder_dust_limit_satoshis = 1560; @@ -8987,10 +9070,11 @@ mod tests { let seed = [42; 32]; let network = Network::Testnet; let keys_provider = test_utils::TestKeysInterface::new(&seed, network); + let logger = test_utils::TestLogger::new(); let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap(); + let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None, &&logger).unwrap(); let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type()); let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type()); @@ -9039,15 +9123,15 @@ mod tests { // Create Node A's channel pointing to Node B's pubkey let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap(); + let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None, &&logger).unwrap(); // Create Node B's channel by receiving Node A's open_channel message - let open_channel_msg = node_a_chan.get_open_channel(chain_hash); + let open_channel_msg = node_a_chan.get_open_channel(chain_hash).unwrap(); let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap()); let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap(); // Node B --> Node A: accept channel - let accept_channel_msg = node_b_chan.accept_inbound_channel(); + let accept_channel_msg = node_b_chan.accept_inbound_channel().unwrap(); node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap(); // Node A --> Node B: funding created @@ -9103,16 +9187,16 @@ mod tests { // Test that `OutboundV1Channel::new` creates a channel with the correct value for // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value, // which is set to the lower bound + 1 (2%) of the `channel_value`. - let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42, None).unwrap(); + let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42, None, &&logger).unwrap(); let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000; assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64); // Test with the upper bound - 1 of valid values (99%). - let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42, None).unwrap(); + let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42, None, &&logger).unwrap(); let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000; assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64); - let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network)); + let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network)).unwrap(); // Test that `InboundV1Channel::new` creates a channel with the correct value for // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value, @@ -9128,14 +9212,14 @@ mod tests { // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%) // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1. - let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42, None).unwrap(); + let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42, None, &&logger).unwrap(); let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000; assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64); // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value // than 100. - let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42, None).unwrap(); + let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42, None, &&logger).unwrap(); let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000; assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat); @@ -9188,12 +9272,12 @@ mod tests { let mut outbound_node_config = UserConfig::default(); outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32; - let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42, None).unwrap(); + let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42, None, &&logger).unwrap(); let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64); assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve); - let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network)); + let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network)).unwrap(); let mut inbound_node_config = UserConfig::default(); inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32; @@ -9225,16 +9309,16 @@ mod tests { // Create Node A's channel pointing to Node B's pubkey let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap(); + let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None, &&logger).unwrap(); // Create Node B's channel by receiving Node A's open_channel message // Make sure A's dust limit is as we expect. - let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network)); + let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network)).unwrap(); let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap()); let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap(); // Node B --> Node A: accept channel, explicitly setting B's dust limit. - let mut accept_channel_msg = node_b_chan.accept_inbound_channel(); + let mut accept_channel_msg = node_b_chan.accept_inbound_channel().unwrap(); accept_channel_msg.dust_limit_satoshis = 546; node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap(); node_a_chan.context.holder_dust_limit_satoshis = 1560; @@ -9294,11 +9378,12 @@ mod tests { let seed = [42; 32]; let network = Network::Testnet; let keys_provider = test_utils::TestKeysInterface::new(&seed, network); + let logger = test_utils::TestLogger::new(); let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); let features = channelmanager::provided_init_features(&config); - let outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None).unwrap(); + let outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None, &&logger).unwrap(); let mut chan = Channel { context: outbound_chan.context }; let dummy_htlc_source = HTLCSource::OutboundRoute { @@ -9403,7 +9488,7 @@ mod tests { use bitcoin::secp256k1::Message; use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, ecdsa::EcdsaChannelSigner}; use crate::ln::PaymentPreimage; - use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys}; + use crate::ln::channel::{HolderCommitment, HTLCOutputInCommitment, INITIAL_COMMITMENT_NUMBER, TxCreationKeys}; use crate::ln::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint}; use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters}; use crate::util::logger::Logger; @@ -9438,7 +9523,7 @@ mod tests { let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let mut config = UserConfig::default(); config.channel_handshake_config.announced_channel = false; - let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42, None).unwrap(); // Nothing uses their network key in this test + let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42, None, &logger).unwrap(); // Nothing uses their network key in this test chan.context.holder_dust_limit_satoshis = 546; chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel @@ -9474,7 +9559,7 @@ mod tests { let delayed_payment_base = &chan.context.holder_signer.as_ref().pubkeys().delayed_payment_basepoint; let per_commitment_secret = SecretKey::from_slice(&>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap(); let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret); - chan.context.cur_holder_commitment_point = per_commitment_point; + chan.context.holder_commitment = HolderCommitment::PendingNext { transaction_number: INITIAL_COMMITMENT_NUMBER, current: per_commitment_point }; let htlc_basepoint = &chan.context.holder_signer.as_ref().pubkeys().htlc_basepoint; let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint); @@ -10186,12 +10271,12 @@ mod tests { let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, - node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap(); + node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None, &&logger).unwrap(); let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key(); channel_type_features.set_zero_conf_required(); - let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network)); + let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network)).unwrap(); open_channel_msg.channel_type = Some(channel_type_features); let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap()); let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, @@ -10221,7 +10306,7 @@ mod tests { let channel_a = OutboundV1Channel::<&TestKeysInterface>::new( &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42, - &config, 0, 42, None + &config, 0, 42, None, &&logger, ).unwrap(); assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx()); @@ -10232,10 +10317,10 @@ mod tests { let channel_a = OutboundV1Channel::<&TestKeysInterface>::new( &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, - None + None, &&logger ).unwrap(); - let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network)); + let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network)).unwrap(); let channel_b = InboundV1Channel::<&TestKeysInterface>::new( &fee_estimator, &&keys_provider, &&keys_provider, node_id_a, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), @@ -10270,11 +10355,11 @@ mod tests { let channel_a = OutboundV1Channel::<&TestKeysInterface>::new( &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, - None + None, &&logger ).unwrap(); // Set `channel_type` to `None` to force the implicit feature negotiation. - let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network)); + let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network)).unwrap(); open_channel_msg.channel_type = None; // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts @@ -10317,10 +10402,10 @@ mod tests { let channel_a = OutboundV1Channel::<&TestKeysInterface>::new( &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, - None + None, &&logger ).unwrap(); - let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network)); + let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network)).unwrap(); open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone()); let res = InboundV1Channel::<&TestKeysInterface>::new( @@ -10336,10 +10421,10 @@ mod tests { // LDK. let mut channel_a = OutboundV1Channel::<&TestKeysInterface>::new( &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init, - 10000000, 100000, 42, &config, 0, 42, None + 10000000, 100000, 42, &config, 0, 42, None, &&logger ).unwrap(); - let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network)); + let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network)).unwrap(); let channel_b = InboundV1Channel::<&TestKeysInterface>::new( &fee_estimator, &&keys_provider, &&keys_provider, node_id_a, @@ -10386,10 +10471,11 @@ mod tests { &config, 0, 42, - None + None, + &&logger, ).unwrap(); - let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network)); + let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network)).unwrap(); let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap()); let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new( &feeest, @@ -10406,7 +10492,7 @@ mod tests { true, // Allow node b to send a 0conf channel_ready. ).unwrap(); - let accept_channel_msg = node_b_chan.accept_inbound_channel(); + let accept_channel_msg = node_b_chan.accept_inbound_channel().unwrap(); node_a_chan.accept_channel( &accept_channel_msg, &config.channel_handshake_limits, diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 994341cc02f..d36c69106dd 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -2542,13 +2542,13 @@ where } } - let channel = { + let mut channel = { let outbound_scid_alias = self.create_and_insert_outbound_scid_alias(); let their_features = &peer_state.latest_features; let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration }; match OutboundV1Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider, their_network_key, their_features, channel_value_satoshis, push_msat, user_channel_id, config, - self.best_block.read().unwrap().height(), outbound_scid_alias, temporary_channel_id) + self.best_block.read().unwrap().height(), outbound_scid_alias, temporary_channel_id, &self.logger) { Ok(res) => res, Err(e) => { @@ -2557,7 +2557,11 @@ where }, } }; - let res = channel.get_open_channel(self.chain_hash); + let opt_msg = channel.get_open_channel(self.chain_hash); + if opt_msg.is_none() { + log_trace!(self.logger, "Awaiting signer for open_channel, setting signer_pending_open_channel"); + channel.signer_pending_open_channel = true; + } let temporary_channel_id = channel.context.channel_id(); match peer_state.channel_by_id.entry(temporary_channel_id) { @@ -2571,10 +2575,13 @@ where hash_map::Entry::Vacant(entry) => { entry.insert(ChannelPhase::UnfundedOutboundV1(channel)); } } - peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { - node_id: their_network_key, - msg: res, - }); + if let Some(msg) = opt_msg { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { + node_id: their_network_key, + msg, + }); + }; + Ok(temporary_channel_id) } @@ -6091,10 +6098,15 @@ where let outbound_scid_alias = self.create_and_insert_outbound_scid_alias(); channel.context.set_outbound_scid_alias(outbound_scid_alias); - peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { - node_id: channel.context.get_counterparty_node_id(), - msg: channel.accept_inbound_channel(), - }); + if let Some(msg) = channel.accept_inbound_channel() { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { + node_id: channel.context.get_counterparty_node_id(), + msg + }) + } else { + log_trace!(self.logger, "Awaiting signer for accept_channel; setting signing_pending_accept_channel"); + channel.signer_pending_accept_channel = true; + } peer_state.channel_by_id.insert(temporary_channel_id.clone(), ChannelPhase::UnfundedInboundV1(channel)); @@ -6251,10 +6263,16 @@ where let outbound_scid_alias = self.create_and_insert_outbound_scid_alias(); channel.context.set_outbound_scid_alias(outbound_scid_alias); - peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { - node_id: counterparty_node_id.clone(), - msg: channel.accept_inbound_channel(), - }); + if let Some(msg) = channel.accept_inbound_channel() { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { + node_id: counterparty_node_id.clone(), + msg, + }); + } else { + log_trace!(self.logger, "Awaiting signer for accept_channel; setting signer_pending_accept_channel"); + channel.signer_pending_accept_channel = true; + } + peer_state.channel_by_id.insert(channel_id, ChannelPhase::UnfundedInboundV1(channel)); Ok(()) } @@ -7405,6 +7423,7 @@ where }); } if let Some(msg) = msgs.channel_ready { + log_trace!(logger, "Queuing channel_ready to {}", node_id); send_channel_ready!(self, pending_msg_events, chan, msg); } match (msgs.commitment_update, msgs.raa) { @@ -7432,14 +7451,27 @@ where }; } ChannelPhase::UnfundedOutboundV1(chan) => { - if let Some(msg) = chan.signer_maybe_unblocked(&self.logger) { - pending_msg_events.push(events::MessageSendEvent::SendFundingCreated { - node_id, - msg, - }); + let logger = WithChannelContext::from(&self.logger, &chan.context); + let msgs = chan.signer_maybe_unblocked(&self.chain_hash, &&logger); + let node_id = phase.context().get_counterparty_node_id(); + if let Some(msg) = msgs.open_channel { + log_trace!(logger, "Queuing open_channel to {}", node_id); + pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { node_id, msg }); + } + if let Some(msg) = msgs.funding_created { + log_trace!(logger, "Queuing funding_created to {}", node_id); + pending_msg_events.push(events::MessageSendEvent::SendFundingCreated { node_id, msg }); + } + } + ChannelPhase::UnfundedInboundV1(chan) => { + let logger = WithChannelContext::from(&self.logger, &chan.context); + let msgs = chan.signer_maybe_unblocked(&&logger); + let node_id = phase.context().get_counterparty_node_id(); + if let Some(msg) = msgs.accept_channel { + log_trace!(logger, "Queuing accept_channel to {}", node_id); + pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { node_id, msg }); } } - ChannelPhase::UnfundedInboundV1(_) => {}, } }; @@ -9135,11 +9167,13 @@ where let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); let peer_state = &mut *peer_state_lock; if let Some(ChannelPhase::UnfundedOutboundV1(chan)) = peer_state.channel_by_id.get_mut(&msg.channel_id) { - if let Ok(msg) = chan.maybe_handle_error_without_close(self.chain_hash, &self.fee_estimator) { - peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { - node_id: *counterparty_node_id, - msg, - }); + if let Ok(opt_msg) = chan.maybe_handle_error_without_close(self.chain_hash, &self.fee_estimator) { + if let Some(msg) = opt_msg { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { + node_id: *counterparty_node_id, + msg, + }); + } return; } } @@ -10401,7 +10435,14 @@ where log_info!(logger, "Successfully loaded channel {} at update_id {} against monitor at update id {}", &channel.context.channel_id(), channel.context.get_latest_monitor_update_id(), monitor.get_latest_update_id()); - channel.context.request_next_holder_per_commitment_point(&args.logger); + channel.context.request_next_holder_commitment(&&logger); + // TODO(waterson): maybe we should have a different error here in case the signer is not + // available when we need to restore a channel for which the initial point was never + // written? + if channel.context.is_holder_commitment_uninitialized() { + return Err(DecodeError::Io(io::ErrorKind::NotFound)); + } + if let Some(short_channel_id) = channel.context.get_short_channel_id() { short_to_chan_info.insert(short_channel_id, (channel.context.get_counterparty_node_id(), channel.context.channel_id())); } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 6802407d133..dcdb0460eff 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -7186,11 +7186,12 @@ fn test_user_configurable_csv_delay() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let logger = test_utils::TestLogger::new(); // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in OutboundV1Channel::new() if let Err(error) = OutboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }), &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[1].node.init_features(), 1000000, 1000000, 0, - &low_our_to_self_config, 0, 42, None) + &low_our_to_self_config, 0, 42, None, &&logger) { match error { APIError::APIMisuseError { err } => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str())); }, diff --git a/lightning/src/util/test_channel_signer.rs b/lightning/src/util/test_channel_signer.rs index e64d67c00a8..c6901179e2d 100644 --- a/lightning/src/util/test_channel_signer.rs +++ b/lightning/src/util/test_channel_signer.rs @@ -40,6 +40,9 @@ use crate::ln::msgs::PartialSignatureWithNonce; #[cfg(taproot)] use crate::sign::taproot::TaprootChannelSigner; +#[cfg(feature = "std")] +use std::cell::RefCell; + /// Initial value for revoked commitment downward counter pub const INITIAL_REVOKED_COMMITMENT_NUMBER: u64 = 1 << 48; @@ -423,6 +426,11 @@ pub struct EnforcementState { } impl EnforcementState { + #[cfg(feature = "std")] + thread_local! { + static DEFAULT_UNAVAILABLE_SIGNER_OPS: RefCell = RefCell::new(0); + } + /// Enforcement state for a new channel pub fn new() -> Self { EnforcementState { @@ -430,7 +438,16 @@ impl EnforcementState { last_counterparty_revoked_commitment: INITIAL_REVOKED_COMMITMENT_NUMBER, last_holder_revoked_commitment: INITIAL_REVOKED_COMMITMENT_NUMBER, last_holder_commitment: INITIAL_REVOKED_COMMITMENT_NUMBER, - unavailable_signer_ops: 0, + unavailable_signer_ops: { + #[cfg(feature = "std")] + { + EnforcementState::DEFAULT_UNAVAILABLE_SIGNER_OPS.with(|ops| *ops.borrow()) + } + #[cfg(not(feature = "std"))] + { + 0 + } + } } } @@ -445,4 +462,16 @@ impl EnforcementState { pub fn is_signer_available(&self, ops_mask: u32) -> bool { (self.unavailable_signer_ops & ops_mask) == 0 } + + #[cfg(feature = "std")] + pub fn with_default_unavailable(ops: u32, f: F) -> R + where F: FnOnce() -> R + { + EnforcementState::DEFAULT_UNAVAILABLE_SIGNER_OPS.with(|unavailable_ops| { + unavailable_ops.replace(ops); + let res = f(); + unavailable_ops.replace(0); + res + }) + } }