Skip to content
Draft
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
217 changes: 114 additions & 103 deletions lightning/src/ln/channelmanager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18015,33 +18015,32 @@ where
is_channel_closed = !peer_state.channel_by_id.contains_key(channel_id);
}

if is_channel_closed {
for (htlc_source, (htlc, preimage_opt)) in
monitor.get_all_current_outbound_htlcs()
{
let logger = WithChannelMonitor::from(
&args.logger,
monitor,
Some(htlc.payment_hash),
);
let htlc_id = SentHTLCId::from_source(&htlc_source);
match htlc_source {
HTLCSource::PreviousHopData(prev_hop_data) => {
let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| {
info.prev_funding_outpoint == prev_hop_data.outpoint
&& info.prev_htlc_id == prev_hop_data.htlc_id
};
for (htlc_source, (htlc, preimage_opt)) in monitor.get_all_current_outbound_htlcs()
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Would re-arranging the loop in a separate commit make the diff easier?

{
let logger =
WithChannelMonitor::from(&args.logger, monitor, Some(htlc.payment_hash));
let htlc_id = SentHTLCId::from_source(&htlc_source);
match htlc_source {
HTLCSource::PreviousHopData(prev_hop_data) => {
let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| {
info.prev_funding_outpoint == prev_hop_data.outpoint
&& info.prev_htlc_id == prev_hop_data.htlc_id
};
// We always add all inbound committed HTLCs to `decode_update_add_htlcs` in the above
// loop, but we need to prune from those added HTLCs if they were already forwarded to
// the outbound edge. Otherwise, we'll double-forward.
dedup_decode_update_add_htlcs(
&mut decode_update_add_htlcs,
&prev_hop_data,
"HTLC was forwarded to the closed channel",
&args.logger,
);
if is_channel_closed {
// The ChannelMonitor is now responsible for this HTLC's
// failure/success and will let us know what its outcome is. If we
// still have an entry for this HTLC in `forward_htlcs`,
// `pending_intercepted_htlcs`, or `decode_update_add_htlcs`, we were apparently not
// persisted after the monitor was when forwarding the payment.
dedup_decode_update_add_htlcs(
&mut decode_update_add_htlcs,
&prev_hop_data,
"HTLC was forwarded to the closed channel",
&args.logger,
);
dedup_decode_update_add_htlcs(
&mut decode_update_add_htlcs_legacy,
&prev_hop_data,
Expand Down Expand Up @@ -18072,99 +18071,111 @@ where
false
} else { true }
});
},
HTLCSource::OutboundRoute {
payment_id,
session_priv,
path,
bolt12_invoice,
..
} => {
if let Some(preimage) = preimage_opt {
let pending_events = Mutex::new(pending_events_read);
let update = PaymentCompleteUpdate {
counterparty_node_id: monitor.get_counterparty_node_id(),
channel_funding_outpoint: monitor.get_funding_txo(),
channel_id: monitor.channel_id(),
htlc_id,
};
let mut compl_action = Some(
}
},
HTLCSource::OutboundRoute {
payment_id,
session_priv,
path,
bolt12_invoice,
..
} => {
if !is_channel_closed {
continue;
}
if let Some(preimage) = preimage_opt {
let pending_events = Mutex::new(pending_events_read);
let update = PaymentCompleteUpdate {
counterparty_node_id: monitor.get_counterparty_node_id(),
channel_funding_outpoint: monitor.get_funding_txo(),
channel_id: monitor.channel_id(),
htlc_id,
};
let mut compl_action = Some(
EventCompletionAction::ReleasePaymentCompleteChannelMonitorUpdate(update)
);
pending_outbounds.claim_htlc(
payment_id,
preimage,
bolt12_invoice,
session_priv,
path,
true,
&mut compl_action,
&pending_events,
);
// If the completion action was not consumed, then there was no
// payment to claim, and we need to tell the `ChannelMonitor`
// we don't need to hear about the HTLC again, at least as long
// as the PaymentSent event isn't still sitting around in our
// event queue.
let have_action = if compl_action.is_some() {
let pending_events = pending_events.lock().unwrap();
pending_events.iter().any(|(_, act)| *act == compl_action)
} else {
false
};
if !have_action && compl_action.is_some() {
let mut peer_state = per_peer_state
.get(&counterparty_node_id)
.map(|state| state.lock().unwrap())
.expect("Channels originating a preimage must have peer state");
let update_id = peer_state
.closed_channel_monitor_update_ids
.get_mut(channel_id)
.expect("Channels originating a preimage must have a monitor");
// Note that for channels closed pre-0.1, the latest
// update_id is `u64::MAX`.
*update_id = update_id.saturating_add(1);

pending_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
counterparty_node_id: monitor.get_counterparty_node_id(),
pending_outbounds.claim_htlc(
payment_id,
preimage,
bolt12_invoice,
session_priv,
path,
true,
&mut compl_action,
&pending_events,
);
// If the completion action was not consumed, then there was no
// payment to claim, and we need to tell the `ChannelMonitor`
// we don't need to hear about the HTLC again, at least as long
// as the PaymentSent event isn't still sitting around in our
// event queue.
let have_action = if compl_action.is_some() {
let pending_events = pending_events.lock().unwrap();
pending_events.iter().any(|(_, act)| *act == compl_action)
} else {
false
};
if !have_action && compl_action.is_some() {
let mut peer_state = per_peer_state
.get(&counterparty_node_id)
.map(|state| state.lock().unwrap())
.expect(
"Channels originating a preimage must have peer state",
);
let update_id = peer_state
.closed_channel_monitor_update_ids
.get_mut(channel_id)
.expect(
"Channels originating a preimage must have a monitor",
);
// Note that for channels closed pre-0.1, the latest
// update_id is `u64::MAX`.
*update_id = update_id.saturating_add(1);

pending_background_events.push(
BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
counterparty_node_id: monitor
.get_counterparty_node_id(),
funding_txo: monitor.get_funding_txo(),
channel_id: monitor.channel_id(),
update: ChannelMonitorUpdate {
update_id: *update_id,
channel_id: Some(monitor.channel_id()),
updates: vec![ChannelMonitorUpdateStep::ReleasePaymentComplete {
updates: vec![
ChannelMonitorUpdateStep::ReleasePaymentComplete {
htlc: htlc_id,
}],
},
],
},
});
}
pending_events_read = pending_events.into_inner().unwrap();
},
);
}
},
}
pending_events_read = pending_events.into_inner().unwrap();
}
},
}
for (htlc_source, payment_hash) in monitor.get_onchain_failed_outbound_htlcs() {
log_info!(
args.logger,
"Failing HTLC with payment hash {} as it was resolved on-chain.",
payment_hash
);
let completion_action = Some(PaymentCompleteUpdate {
counterparty_node_id: monitor.get_counterparty_node_id(),
channel_funding_outpoint: monitor.get_funding_txo(),
channel_id: monitor.channel_id(),
htlc_id: SentHTLCId::from_source(&htlc_source),
});
}
for (htlc_source, payment_hash) in monitor.get_onchain_failed_outbound_htlcs() {
log_info!(
args.logger,
"Failing HTLC with payment hash {} as it was resolved on-chain.",
payment_hash
);
let completion_action = Some(PaymentCompleteUpdate {
counterparty_node_id: monitor.get_counterparty_node_id(),
channel_funding_outpoint: monitor.get_funding_txo(),
channel_id: monitor.channel_id(),
htlc_id: SentHTLCId::from_source(&htlc_source),
});

failed_htlcs.push((
htlc_source,
payment_hash,
monitor.get_counterparty_node_id(),
monitor.channel_id(),
LocalHTLCFailureReason::OnChainTimeout,
completion_action,
));
}
failed_htlcs.push((
htlc_source,
payment_hash,
monitor.get_counterparty_node_id(),
monitor.channel_id(),
LocalHTLCFailureReason::OnChainTimeout,
completion_action,
));
}

// Whether the downstream channel was closed or not, try to re-apply any payment
Expand Down
56 changes: 56 additions & 0 deletions lightning/src/ln/reload_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1258,6 +1258,62 @@ fn do_manager_persisted_pre_outbound_edge_forward(intercept_htlc: bool) {
expect_payment_sent(&nodes[0], payment_preimage, None, true, true);
}

#[test]
fn test_manager_persisted_post_outbound_edge_forward() {
let chanmon_cfgs = create_chanmon_cfgs(3);
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
let persister;
let new_chain_monitor;
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
let nodes_1_deserialized;
let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);

let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1).2;
let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2;

// Lock in the HTLC from node_a <> node_b.
let amt_msat = 5000;
let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat);
nodes[0].node.send_payment_with_route(route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
check_added_monitors(&nodes[0], 1);
let updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id());
nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
do_commitment_signed_dance(&nodes[1], &nodes[0], &updates.commitment_signed, false, false);

// Add the HTLC to the outbound edge, node_b <> node_c.
nodes[1].node.process_pending_htlc_forwards();
check_added_monitors(&nodes[1], 1);

// Disconnect peers and reload the forwarding node_b.
nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id());
nodes[2].node.peer_disconnected(nodes[1].node.get_our_node_id());

let node_b_encoded = nodes[1].node.encode();
let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id_1).encode();
let chan_1_monitor_serialized = get_monitor!(nodes[1], chan_id_2).encode();
reload_node!(nodes[1], node_b_encoded, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized);

reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[0]));
let mut args_b_c = ReconnectArgs::new(&nodes[1], &nodes[2]);
args_b_c.send_channel_ready = (true, true);
args_b_c.send_announcement_sigs = (true, true);
args_b_c.pending_htlc_adds = (0, 1);
// While reconnecting, we re-send node_b's outbound update_add and commit the HTLC to the b<>c
// channel.
reconnect_nodes(args_b_c);

// Ensure node_b won't double-forward the outbound HTLC (this was previously broken).
nodes[1].node.process_pending_htlc_forwards();
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());

// Claim the HTLC backwards to node_a.
expect_and_process_pending_htlcs(&nodes[2], false);
expect_payment_claimable!(nodes[2], payment_hash, payment_secret, amt_msat, None, nodes[2].node.get_our_node_id());
let path: &[&[_]] = &[&[&nodes[1], &nodes[2]]];
do_claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], path, payment_preimage));
expect_payment_sent(&nodes[0], payment_preimage, None, true, true);
}

#[test]
fn test_reload_partial_funding_batch() {
let chanmon_cfgs = create_chanmon_cfgs(3);
Expand Down