2018-04-10 15:53:58 +02:00
|
|
|
#!/usr/bin/env python3
|
2018-10-25 19:34:31 +02:00
|
|
|
#
|
|
|
|
|
# Copyright (C) 2018 The Electrum developers
|
|
|
|
|
# Distributed under the MIT software license, see the accompanying
|
|
|
|
|
# file LICENCE or http://www.opensource.org/licenses/mit-license.php
|
2018-04-10 15:53:58 +02:00
|
|
|
|
2019-02-20 21:03:53 +01:00
|
|
|
import zlib
|
2018-10-05 15:37:47 +02:00
|
|
|
from collections import OrderedDict, defaultdict
|
2018-04-10 15:53:58 +02:00
|
|
|
import asyncio
|
2018-04-11 05:01:34 +02:00
|
|
|
import os
|
2018-04-11 06:11:07 +02:00
|
|
|
import time
|
2021-03-11 19:31:22 +01:00
|
|
|
from typing import Tuple, Dict, TYPE_CHECKING, Optional, Union, Set
|
2019-03-18 11:03:37 +01:00
|
|
|
from datetime import datetime
|
2021-03-04 16:44:13 +01:00
|
|
|
import functools
|
2018-10-05 15:37:47 +02:00
|
|
|
|
2018-10-04 14:03:29 +02:00
|
|
|
import aiorpcx
|
2022-02-08 12:34:49 +01:00
|
|
|
from aiorpcx import ignore_after
|
2018-04-10 15:53:58 +02:00
|
|
|
|
2018-10-25 23:30:36 +02:00
|
|
|
from .crypto import sha256, sha256d
|
2020-04-14 16:12:47 +02:00
|
|
|
from . import bitcoin, util
|
2018-05-26 17:04:55 +02:00
|
|
|
from . import ecc
|
2020-12-19 21:21:24 +01:00
|
|
|
from .ecc import sig_string_from_r_and_s, der_sig_from_sig_string
|
2018-04-12 09:47:09 +02:00
|
|
|
from . import constants
|
2023-02-17 11:35:03 +00:00
|
|
|
from .util import (bfh, log_exceptions, ignore_exceptions, chunks, OldTaskGroup,
|
2020-09-13 16:55:37 +02:00
|
|
|
UnrelatedTransactionException)
|
2020-02-25 17:54:49 +01:00
|
|
|
from . import transaction
|
2021-03-09 09:55:55 +01:00
|
|
|
from .bitcoin import make_op_return
|
2023-02-17 11:07:19 +00:00
|
|
|
from .transaction import PartialTxOutput, match_script_against_template, Sighash
|
2019-05-02 17:59:11 +02:00
|
|
|
from .logging import Logger
|
2020-12-19 21:21:24 +01:00
|
|
|
from .lnonion import (new_onion_packet, OnionFailureCode, calc_hops_data_for_payment,
|
2021-02-10 13:16:33 +01:00
|
|
|
process_onion_packet, OnionPacket, construct_onion_error, OnionRoutingFailure,
|
2020-03-17 18:02:51 +01:00
|
|
|
ProcessedOnionPacket, UnsupportedOnionPacketVersion, InvalidOnionMac, InvalidOnionPubkey,
|
|
|
|
|
OnionFailureCodeMetaFlag)
|
2022-06-07 22:53:05 +02:00
|
|
|
from .lnchannel import Channel, RevokeAndAck, RemoteCtnTooFarInFuture, ChannelState, PeerState, ChanCloseOption
|
2019-08-14 21:41:24 +02:00
|
|
|
from . import lnutil
|
2021-09-24 19:58:32 +02:00
|
|
|
from .lnutil import (Outpoint, LocalConfig, RECEIVED, UpdateAddHtlc, ChannelConfig,
|
2018-10-10 22:54:30 +02:00
|
|
|
RemoteConfig, OnlyPubkeyKeypair, ChannelConstraints, RevocationStore,
|
2018-10-15 11:05:53 +02:00
|
|
|
funding_output_script, get_per_commitment_secret_from_seed,
|
2020-03-16 22:07:00 +01:00
|
|
|
secret_to_pubkey, PaymentFailure, LnFeatures,
|
2020-12-19 21:21:24 +01:00
|
|
|
LOCAL, REMOTE, HTLCOwner,
|
2020-05-02 22:27:28 +02:00
|
|
|
ln_compare_features, privkey_to_pubkey, MIN_FINAL_CLTV_EXPIRY_ACCEPTED,
|
2020-12-19 21:21:24 +01:00
|
|
|
LightningPeerConnectionClosed, HandshakeFailed,
|
2021-03-02 12:10:17 +01:00
|
|
|
RemoteMisbehaving, ShortChannelID,
|
2020-06-08 21:17:23 +02:00
|
|
|
IncompatibleLightningFeatures, derive_payment_secret_from_payment_preimage,
|
2022-01-26 16:59:10 +01:00
|
|
|
ChannelType, LNProtocolWarning)
|
2020-03-13 11:44:29 +01:00
|
|
|
from .lnutil import FeeUpdate, channel_id_from_funding_tx
|
2018-10-22 15:35:57 +02:00
|
|
|
from .lntransport import LNTransport, LNTransportBase
|
2023-01-13 10:37:06 +00:00
|
|
|
from .lnmsg import encode_msg, decode_msg, UnknownOptionalMsgType, FailedToParseMsg
|
2020-12-19 21:21:24 +01:00
|
|
|
from .interface import GracefulDisconnect
|
2019-08-16 22:44:07 +02:00
|
|
|
from .lnrouter import fee_for_edge_msat
|
2019-11-13 09:20:19 +01:00
|
|
|
from .lnutil import ln_dummy_address
|
2020-02-23 20:35:03 +01:00
|
|
|
from .json_db import StoredDict
|
2021-03-09 09:35:43 +01:00
|
|
|
from .invoices import PR_PAID
|
2021-11-26 09:45:06 +01:00
|
|
|
from .simple_config import FEE_LN_ETA_TARGET
|
2018-10-22 15:35:57 +02:00
|
|
|
|
|
|
|
|
if TYPE_CHECKING:
|
2021-03-09 09:55:55 +01:00
|
|
|
from .lnworker import LNGossip, LNWallet
|
2020-12-19 21:21:24 +01:00
|
|
|
from .lnrouter import LNPaymentRoute
|
2019-11-13 09:20:19 +01:00
|
|
|
from .transaction import PartialTransaction
|
2018-10-05 15:37:47 +02:00
|
|
|
|
2018-04-10 15:53:58 +02:00
|
|
|
|
2019-09-05 18:31:51 +02:00
|
|
|
LN_P2P_NETWORK_TIMEOUT = 20
|
|
|
|
|
|
2018-05-29 11:51:48 +02:00
|
|
|
|
2019-05-02 17:59:11 +02:00
|
|
|
class Peer(Logger):
|
2023-02-05 22:49:12 +00:00
|
|
|
# note: in general this class is NOT thread-safe. Most methods are assumed to be running on asyncio thread.
|
|
|
|
|
|
2020-04-12 12:29:46 +02:00
|
|
|
LOGGING_SHORTCUT = 'P'
|
2018-10-14 22:36:23 +02:00
|
|
|
|
2021-03-29 21:29:51 +02:00
|
|
|
ORDERED_MESSAGES = (
|
2022-02-18 18:52:02 +01:00
|
|
|
'accept_channel', 'funding_signed', 'funding_created', 'accept_channel', 'closing_signed')
|
2021-03-29 21:29:51 +02:00
|
|
|
SPAMMY_MESSAGES = (
|
|
|
|
|
'ping', 'pong', 'channel_announcement', 'node_announcement', 'channel_update',)
|
|
|
|
|
|
2021-11-04 18:04:16 +01:00
|
|
|
DELAY_INC_MSG_PROCESSING_SLEEP = 0.01
|
|
|
|
|
|
2020-06-19 06:38:00 +02:00
|
|
|
def __init__(
|
|
|
|
|
self,
|
2021-03-09 09:55:55 +01:00
|
|
|
lnworker: Union['LNGossip', 'LNWallet'],
|
2020-06-19 06:38:00 +02:00
|
|
|
pubkey: bytes,
|
2021-03-09 09:55:55 +01:00
|
|
|
transport: LNTransportBase,
|
|
|
|
|
*, is_channel_backup= False):
|
|
|
|
|
|
2022-04-26 18:05:45 +02:00
|
|
|
self.lnworker = lnworker
|
|
|
|
|
self.network = lnworker.network
|
|
|
|
|
self.asyncio_loop = self.network.asyncio_loop
|
2021-03-09 09:55:55 +01:00
|
|
|
self.is_channel_backup = is_channel_backup
|
lnpeer: only set initialized after both sent AND received "init"
had a trace where we tried to send "funding_locked" before being initialized:
D | lnpeer.Peer.[iq7zhmhck54vcax2vlrdcavq2m32wao7ekh6jyeglmnuuvv3js57r4id.onion:9735] | Sending FUNDING_LOCKED
E | lnworker.LNWallet | Exception in on_update_open_channel: AttributeError("'LNTransport' object has no attribute 'sk'")
Traceback (most recent call last):
File "...\electrum\electrum\util.py", line 999, in wrapper
return await func(*args, **kwargs)
File "...\electrum\electrum\lnworker.py", line 674, in on_update_open_channel
peer.send_funding_locked(chan)
File "...\electrum\electrum\lnpeer.py", line 876, in send_funding_locked
self.send_message("funding_locked", channel_id=channel_id, next_per_commitment_point=per_commitment_point_second)
File "...\electrum\electrum\lnpeer.py", line 102, in send_message
self.transport.send_bytes(raw_msg)
File "...\electrum\electrum\lntransport.py", line 93, in send_bytes
lc = aead_encrypt(self.sk, self.sn(), b'', l)
AttributeError: 'LNTransport' object has no attribute 'sk'
2019-11-22 21:33:56 +01:00
|
|
|
self._sent_init = False # type: bool
|
|
|
|
|
self._received_init = False # type: bool
|
2022-04-26 18:05:45 +02:00
|
|
|
self.initialized = self.asyncio_loop.create_future()
|
2021-03-11 19:31:22 +01:00
|
|
|
self.got_disconnected = asyncio.Event()
|
2019-05-16 11:47:55 +02:00
|
|
|
self.querying = asyncio.Event()
|
2018-10-17 01:50:36 +02:00
|
|
|
self.transport = transport
|
2019-08-16 22:35:25 +02:00
|
|
|
self.pubkey = pubkey # remote pubkey
|
2020-04-06 19:06:27 +02:00
|
|
|
self.privkey = self.transport.privkey # local privkey
|
2021-02-22 19:53:01 +01:00
|
|
|
self.features = self.lnworker.features # type: LnFeatures
|
|
|
|
|
self.their_features = LnFeatures(0) # type: LnFeatures
|
2019-02-01 20:21:59 +01:00
|
|
|
self.node_ids = [self.pubkey, privkey_to_pubkey(self.privkey)]
|
2020-09-15 18:35:16 +02:00
|
|
|
assert self.node_ids[0] != self.node_ids[1]
|
2022-05-30 09:30:18 +02:00
|
|
|
self.last_message_time = 0
|
|
|
|
|
self.pong_event = asyncio.Event()
|
2019-05-13 14:30:02 +02:00
|
|
|
self.reply_channel_range = asyncio.Queue()
|
2019-05-15 12:30:19 +02:00
|
|
|
# gossip uses a single queue to preserve message order
|
|
|
|
|
self.gossip_queue = asyncio.Queue()
|
2022-01-26 16:51:42 +01:00
|
|
|
self.ordered_message_queues = defaultdict(asyncio.Queue) # for messages that are ordered
|
lnpeer: some rework of error/warning message handling
- rm the `_get_channel_ids` abstraction as each of its usages needs subtle differences.
Some code duplication is preferable in this case.
- raise exceptions in `wait_for_message`, so that callers such as the GUI can show user-feedback
- on_error/on_warning were dropping messages with temp_chan_ids if they were not stored in
`temp_id_to_id` - which was only done once the mapping was known (so the normal chan_id was known).
To fix this, we now store temp_chan_ids into `temp_id_to_id` early.
- `schedule_force_closing` only works if the chan_id is already in `channels`
related:
https://github.com/spesmilo/electrum/pull/7645 (and related commits)
-----
example before commit:
```
D/P | lnpeer.Peer.[LNWallet, 03933884aa-3b53e4ab] | Sending OPEN_CHANNEL
D/P | lnpeer.Peer.[LNWallet, 03933884aa-3b53e4ab] | Received ERROR
I/P | lnpeer.Peer.[LNWallet, 03933884aa-3b53e4ab] | remote peer sent error [DO NOT TRUST THIS MESSAGE]: invalid funding_satoshis=10000 sat (min=400000 sat max=1500000000 sat)
E | gui.qt.main_window.[test_segwit_2] | Could not open channel
Traceback (most recent call last):
File "...\electrum\electrum\util.py", line 1160, in wrapper
return await func(*args, **kwargs)
File "...\electrum\electrum\lnpeer.py", line 661, in wrapper
return await func(self, *args, **kwargs)
File "...\electrum\electrum\lnpeer.py", line 742, in channel_establishment_flow
payload = await self.wait_for_message('accept_channel', temp_channel_id) #
File "...\electrum\electrum\lnpeer.py", line 315, in wait_for_message
name, payload = await asyncio.wait_for(q.get(), LN_P2P_NETWORK_TIMEOUT)
File "...\Python39\lib\asyncio\tasks.py", line 468, in wait_for
await waiter
asyncio.exceptions.CancelledError
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "...\Python39\lib\asyncio\tasks.py", line 492, in wait_for
fut.result()
asyncio.exceptions.CancelledError
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "...\electrum\electrum\gui\qt\util.py", line 914, in run
result = task.task()
File "...\electrum\electrum\gui\qt\main_window.py", line 1875, in task
return self.wallet.lnworker.open_channel(
File "...\electrum\electrum\lnworker.py", line 1075, in open_channel
chan, funding_tx = fut.result()
File "...\Python39\lib\concurrent\futures\_base.py", line 445, in result
return self.__get_result()
File "...\Python39\lib\concurrent\futures\_base.py", line 390, in __get_result
raise self._exception
File "...\electrum\electrum\util.py", line 1160, in wrapper
return await func(*args, **kwargs)
File "...\electrum\electrum\lnworker.py", line 1006, in _open_channel_coroutine
chan, funding_tx = await asyncio.wait_for(coro, LN_P2P_NETWORK_TIMEOUT)
File "...\Python39\lib\asyncio\tasks.py", line 494, in wait_for
raise exceptions.TimeoutError() from exc
asyncio.exceptions.TimeoutError
```
example after commit:
```
D/P | lnpeer.Peer.[LNWallet, 03933884aa-ff3a866f] | Sending OPEN_CHANNEL
D/P | lnpeer.Peer.[LNWallet, 03933884aa-ff3a866f] | Received ERROR
I/P | lnpeer.Peer.[LNWallet, 03933884aa-ff3a866f] | remote peer sent error [DO NOT TRUST THIS MESSAGE]: invalid funding_satoshis=10000 sat (min=400000 sat max=1500000000 sat). chan_id=124ca21fa6aa2993430ad71f465f0d44731ef87f7478e4b31327e4459b5a3988
E | lnworker.LNWallet.[test_segwit_2] | Exception in _open_channel_coroutine: GracefulDisconnect('remote peer sent error [DO NOT TRUST THIS MESSAGE]: invalid funding_satoshis=10000 sat (min=400000 sat max=1500000000 sat)')
Traceback (most recent call last):
File "...\electrum\electrum\util.py", line 1160, in wrapper
return await func(*args, **kwargs)
File "...\electrum\electrum\lnworker.py", line 1006, in _open_channel_coroutine
chan, funding_tx = await asyncio.wait_for(coro, LN_P2P_NETWORK_TIMEOUT)
File "...\Python39\lib\asyncio\tasks.py", line 481, in wait_for
return fut.result()
File "...\electrum\electrum\lnpeer.py", line 673, in wrapper
return await func(self, *args, **kwargs)
File "...\electrum\electrum\lnpeer.py", line 755, in channel_establishment_flow
payload = await self.wait_for_message('accept_channel', temp_channel_id)
File "...\electrum\electrum\lnpeer.py", line 326, in wait_for_message
raise GracefulDisconnect(
electrum.interface.GracefulDisconnect: remote peer sent error [DO NOT TRUST THIS MESSAGE]: invalid funding_satoshis=10000 sat (min=400000 sat max=1500000000 sat)
I/P | lnpeer.Peer.[LNWallet, 03933884aa-ff3a866f] | Disconnecting: GracefulDisconnect()
```
2022-03-17 17:14:51 +01:00
|
|
|
self.temp_id_to_id = {} # type: Dict[bytes, Optional[bytes]] # to forward error messages
|
2020-02-27 19:17:58 +01:00
|
|
|
self.funding_created_sent = set() # for channels in PREOPENING
|
|
|
|
|
self.funding_signed_sent = set() # for channels in PREOPENING
|
2020-05-29 11:30:08 +02:00
|
|
|
self.shutdown_received = {} # chan_id -> asyncio.Future()
|
2018-10-14 22:36:23 +02:00
|
|
|
self.announcement_signatures = defaultdict(asyncio.Queue)
|
2022-04-26 18:05:45 +02:00
|
|
|
self.channel_reestablish_msg = defaultdict(self.asyncio_loop.create_future)
|
2021-03-16 19:07:31 +01:00
|
|
|
self.orphan_channel_updates = OrderedDict() # type: OrderedDict[ShortChannelID, dict]
|
2019-05-02 17:59:11 +02:00
|
|
|
Logger.__init__(self)
|
2022-02-08 12:34:49 +01:00
|
|
|
self.taskgroup = OldTaskGroup()
|
2021-03-11 19:31:22 +01:00
|
|
|
# HTLCs offered by REMOTE, that we started removing but are still active:
|
|
|
|
|
self.received_htlcs_pending_removal = set() # type: Set[Tuple[Channel, int]]
|
|
|
|
|
self.received_htlc_removed_event = asyncio.Event()
|
|
|
|
|
self._htlc_switch_iterstart_event = asyncio.Event()
|
|
|
|
|
self._htlc_switch_iterdone_event = asyncio.Event()
|
2021-11-04 16:32:40 +01:00
|
|
|
self._received_revack_event = asyncio.Event()
|
2022-05-24 23:49:58 +02:00
|
|
|
self.received_commitsig_event = asyncio.Event()
|
2021-11-04 19:16:02 +01:00
|
|
|
self.downstream_htlc_resolved_event = asyncio.Event()
|
2018-10-14 22:36:23 +02:00
|
|
|
|
2018-10-25 19:53:31 +02:00
|
|
|
def send_message(self, message_name: str, **kwargs):
|
2023-02-05 22:49:12 +00:00
|
|
|
assert util.get_running_loop() == util.get_asyncio_loop(), f"this must be run on the asyncio thread!"
|
2018-10-14 22:36:23 +02:00
|
|
|
assert type(message_name) is str
|
2021-03-29 21:29:51 +02:00
|
|
|
if message_name not in self.SPAMMY_MESSAGES:
|
|
|
|
|
self.logger.debug(f"Sending {message_name.upper()}")
|
2020-02-21 19:45:45 +01:00
|
|
|
if message_name.upper() != "INIT" and not self.is_initialized():
|
lnpeer: only set initialized after both sent AND received "init"
had a trace where we tried to send "funding_locked" before being initialized:
D | lnpeer.Peer.[iq7zhmhck54vcax2vlrdcavq2m32wao7ekh6jyeglmnuuvv3js57r4id.onion:9735] | Sending FUNDING_LOCKED
E | lnworker.LNWallet | Exception in on_update_open_channel: AttributeError("'LNTransport' object has no attribute 'sk'")
Traceback (most recent call last):
File "...\electrum\electrum\util.py", line 999, in wrapper
return await func(*args, **kwargs)
File "...\electrum\electrum\lnworker.py", line 674, in on_update_open_channel
peer.send_funding_locked(chan)
File "...\electrum\electrum\lnpeer.py", line 876, in send_funding_locked
self.send_message("funding_locked", channel_id=channel_id, next_per_commitment_point=per_commitment_point_second)
File "...\electrum\electrum\lnpeer.py", line 102, in send_message
self.transport.send_bytes(raw_msg)
File "...\electrum\electrum\lntransport.py", line 93, in send_bytes
lc = aead_encrypt(self.sk, self.sn(), b'', l)
AttributeError: 'LNTransport' object has no attribute 'sk'
2019-11-22 21:33:56 +01:00
|
|
|
raise Exception("tried to send message before we are initialized")
|
2019-08-02 17:55:45 +02:00
|
|
|
raw_msg = encode_msg(message_name, **kwargs)
|
|
|
|
|
self._store_raw_msg_if_local_update(raw_msg, message_name=message_name, channel_id=kwargs.get("channel_id"))
|
|
|
|
|
self.transport.send_bytes(raw_msg)
|
|
|
|
|
|
|
|
|
|
def _store_raw_msg_if_local_update(self, raw_msg: bytes, *, message_name: str, channel_id: Optional[bytes]):
|
2019-08-12 18:37:13 +02:00
|
|
|
is_commitment_signed = message_name == "commitment_signed"
|
|
|
|
|
if not (message_name.startswith("update_") or is_commitment_signed):
|
2019-08-02 17:55:45 +02:00
|
|
|
return
|
|
|
|
|
assert channel_id
|
2020-04-30 21:13:29 +02:00
|
|
|
chan = self.get_channel_by_id(channel_id)
|
2020-05-06 10:56:33 +02:00
|
|
|
if not chan:
|
|
|
|
|
raise Exception(f"channel {channel_id.hex()} not found for peer {self.pubkey.hex()}")
|
2019-08-12 18:37:13 +02:00
|
|
|
chan.hm.store_local_update_raw_msg(raw_msg, is_commitment_signed=is_commitment_signed)
|
|
|
|
|
if is_commitment_signed:
|
2019-08-02 17:55:45 +02:00
|
|
|
# saving now, to ensure replaying updates works (in case of channel reestablishment)
|
|
|
|
|
self.lnworker.save_channel(chan)
|
2018-10-14 22:36:23 +02:00
|
|
|
|
2020-02-21 19:45:45 +01:00
|
|
|
def maybe_set_initialized(self):
|
|
|
|
|
if self.initialized.done():
|
|
|
|
|
return
|
|
|
|
|
if self._sent_init and self._received_init:
|
|
|
|
|
self.initialized.set_result(True)
|
|
|
|
|
|
lnpeer: Peer.is_initialized() should not raise
Traceback (most recent call last):
File "...\electrum\electrum\gui\qt\main_window.py", line 434, in on_network_qt
self.update_lightning_icon()
File "...\electrum\electrum\gui\qt\main_window.py", line 2092, in update_lightning_icon
cur, total = self.network.lngossip.get_sync_progress_estimate()
File "...\electrum\electrum\lnworker.py", line 373, in get_sync_progress_estimate
if self.num_peers() == 0:
File "...\electrum\electrum\lnworker.py", line 202, in num_peers
return sum([p.is_initialized() for p in self.peers.values()])
File "...\electrum\electrum\lnworker.py", line 202, in
return sum([p.is_initialized() for p in self.peers.values()])
File "...\electrum\electrum\lnpeer.py", line 128, in is_initialized
return self.initialized.done() and self.initialized.result() is True
concurrent.futures._base.CancelledError
2020-02-29 20:10:02 +01:00
|
|
|
def is_initialized(self) -> bool:
|
|
|
|
|
return (self.initialized.done()
|
|
|
|
|
and not self.initialized.cancelled()
|
|
|
|
|
and self.initialized.exception() is None
|
|
|
|
|
and self.initialized.result() is True)
|
2020-02-21 19:45:45 +01:00
|
|
|
|
2018-10-14 22:36:23 +02:00
|
|
|
async def initialize(self):
|
lntransport: change name used in logs to make collisions unlikely
In particular, in the regtests, with incoming peers, we can have multiple transports open with the same node simultaneously
(see e.g. lnworker._request_force_close_from_backup).
We now use the first few bytes of peer_pubkey, as that is potentially familiar to users,
and the first few bytes of sha256(id(self)) to mitigate collisions in case the peer_pubkeys collide.
log excerpt:
```
I/P | lnpeer.Peer.[LNWallet, 030f0bf260-e0b33756] | handshake done for 030f0bf260acdbd3edcad84d7588ec7c5df4711e87e6a23016f989b8d3a4147230@163.172.94.64:9735
D/P | lnpeer.Peer.[LNWallet, 030f0bf260-e0b33756] | Sending INIT
I/P | lnpeer.Peer.[LNWallet, 03933884aa-5e5dce45] | handshake done for 03933884aaf1d6b108397e5efe5c86bcf2d8ca8d2f700eda99db9214fc2712b134@34.250.234.192:9735
D/P | lnpeer.Peer.[LNWallet, 03933884aa-5e5dce45] | Sending INIT
D/P | lnpeer.Peer.[LNWallet, 030f0bf260-e0b33756] | Received INIT
I/P | lnpeer.Peer.[LNWallet, 02651acf4a-79696c42] | handshake done for 02651acf4a7096091bf42baad19b3643ea318d6979f6dcc16ebaec43d5b0f4baf2@82.119.233.36:19735
D/P | lnpeer.Peer.[LNWallet, 02651acf4a-79696c42] | Sending INIT
D/P | lnpeer.Peer.[LNWallet, 03933884aa-5e5dce45] | Received INIT
I/P | lnpeer.Peer.[LNWallet, 030f0bf260-e0b33756] | saved remote_update
D/P | lnpeer.Peer.[LNWallet, 030f0bf260-e0b33756] | Received CHANNEL_REESTABLISH
```
2022-02-16 18:53:24 +01:00
|
|
|
# If outgoing transport, do handshake now. For incoming, it has already been done.
|
2019-02-02 08:53:06 +01:00
|
|
|
if isinstance(self.transport, LNTransport):
|
|
|
|
|
await self.transport.handshake()
|
lntransport: change name used in logs to make collisions unlikely
In particular, in the regtests, with incoming peers, we can have multiple transports open with the same node simultaneously
(see e.g. lnworker._request_force_close_from_backup).
We now use the first few bytes of peer_pubkey, as that is potentially familiar to users,
and the first few bytes of sha256(id(self)) to mitigate collisions in case the peer_pubkeys collide.
log excerpt:
```
I/P | lnpeer.Peer.[LNWallet, 030f0bf260-e0b33756] | handshake done for 030f0bf260acdbd3edcad84d7588ec7c5df4711e87e6a23016f989b8d3a4147230@163.172.94.64:9735
D/P | lnpeer.Peer.[LNWallet, 030f0bf260-e0b33756] | Sending INIT
I/P | lnpeer.Peer.[LNWallet, 03933884aa-5e5dce45] | handshake done for 03933884aaf1d6b108397e5efe5c86bcf2d8ca8d2f700eda99db9214fc2712b134@34.250.234.192:9735
D/P | lnpeer.Peer.[LNWallet, 03933884aa-5e5dce45] | Sending INIT
D/P | lnpeer.Peer.[LNWallet, 030f0bf260-e0b33756] | Received INIT
I/P | lnpeer.Peer.[LNWallet, 02651acf4a-79696c42] | handshake done for 02651acf4a7096091bf42baad19b3643ea318d6979f6dcc16ebaec43d5b0f4baf2@82.119.233.36:19735
D/P | lnpeer.Peer.[LNWallet, 02651acf4a-79696c42] | Sending INIT
D/P | lnpeer.Peer.[LNWallet, 03933884aa-5e5dce45] | Received INIT
I/P | lnpeer.Peer.[LNWallet, 030f0bf260-e0b33756] | saved remote_update
D/P | lnpeer.Peer.[LNWallet, 030f0bf260-e0b33756] | Received CHANNEL_REESTABLISH
```
2022-02-16 18:53:24 +01:00
|
|
|
self.logger.info(f"handshake done for {self.transport.peer_addr or self.pubkey.hex()}")
|
2021-01-28 11:14:53 +01:00
|
|
|
features = self.features.for_init_message()
|
|
|
|
|
b = int.bit_length(features)
|
|
|
|
|
flen = b // 8 + int(bool(b % 8))
|
|
|
|
|
self.send_message(
|
|
|
|
|
"init", gflen=0, flen=flen,
|
|
|
|
|
features=features,
|
|
|
|
|
init_tlvs={
|
|
|
|
|
'networks':
|
|
|
|
|
{'chains': constants.net.rev_genesis_bytes()}
|
|
|
|
|
})
|
lnpeer: only set initialized after both sent AND received "init"
had a trace where we tried to send "funding_locked" before being initialized:
D | lnpeer.Peer.[iq7zhmhck54vcax2vlrdcavq2m32wao7ekh6jyeglmnuuvv3js57r4id.onion:9735] | Sending FUNDING_LOCKED
E | lnworker.LNWallet | Exception in on_update_open_channel: AttributeError("'LNTransport' object has no attribute 'sk'")
Traceback (most recent call last):
File "...\electrum\electrum\util.py", line 999, in wrapper
return await func(*args, **kwargs)
File "...\electrum\electrum\lnworker.py", line 674, in on_update_open_channel
peer.send_funding_locked(chan)
File "...\electrum\electrum\lnpeer.py", line 876, in send_funding_locked
self.send_message("funding_locked", channel_id=channel_id, next_per_commitment_point=per_commitment_point_second)
File "...\electrum\electrum\lnpeer.py", line 102, in send_message
self.transport.send_bytes(raw_msg)
File "...\electrum\electrum\lntransport.py", line 93, in send_bytes
lc = aead_encrypt(self.sk, self.sn(), b'', l)
AttributeError: 'LNTransport' object has no attribute 'sk'
2019-11-22 21:33:56 +01:00
|
|
|
self._sent_init = True
|
2020-02-21 19:45:45 +01:00
|
|
|
self.maybe_set_initialized()
|
2018-10-14 22:36:23 +02:00
|
|
|
|
|
|
|
|
@property
|
2018-10-18 22:56:40 +02:00
|
|
|
def channels(self) -> Dict[bytes, Channel]:
|
2019-02-01 20:21:59 +01:00
|
|
|
return self.lnworker.channels_for_peer(self.pubkey)
|
2018-10-14 22:36:23 +02:00
|
|
|
|
2020-04-30 21:13:29 +02:00
|
|
|
def get_channel_by_id(self, channel_id: bytes) -> Optional[Channel]:
|
|
|
|
|
# note: this is faster than self.channels.get(channel_id)
|
|
|
|
|
chan = self.lnworker.get_channel_by_id(channel_id)
|
|
|
|
|
if not chan:
|
|
|
|
|
return None
|
|
|
|
|
if chan.node_id != self.pubkey:
|
|
|
|
|
return None
|
|
|
|
|
return chan
|
|
|
|
|
|
2018-10-14 22:36:23 +02:00
|
|
|
def diagnostic_name(self):
|
2020-02-21 09:48:36 +01:00
|
|
|
return self.lnworker.__class__.__name__ + ', ' + self.transport.name()
|
2018-10-14 22:36:23 +02:00
|
|
|
|
2022-05-30 09:30:18 +02:00
|
|
|
async def ping_if_required(self):
|
|
|
|
|
if time.time() - self.last_message_time > 30:
|
2018-10-14 22:36:23 +02:00
|
|
|
self.send_message('ping', num_pong_bytes=4, byteslen=4)
|
2022-05-30 09:30:18 +02:00
|
|
|
self.pong_event.clear()
|
|
|
|
|
await self.pong_event.wait()
|
2018-10-14 22:36:23 +02:00
|
|
|
|
2023-01-13 10:37:06 +00:00
|
|
|
def process_message(self, message: bytes):
|
2021-03-19 20:51:38 +01:00
|
|
|
try:
|
|
|
|
|
message_type, payload = decode_msg(message)
|
|
|
|
|
except UnknownOptionalMsgType as e:
|
|
|
|
|
self.logger.info(f"received unknown message from peer. ignoring: {e!r}")
|
|
|
|
|
return
|
2023-01-13 10:37:06 +00:00
|
|
|
except FailedToParseMsg as e:
|
|
|
|
|
self.logger.info(
|
|
|
|
|
f"failed to parse message from peer. disconnecting. "
|
|
|
|
|
f"msg_type={e.msg_type_name}({e.msg_type_int}). exc={e!r}")
|
|
|
|
|
#self.logger.info(f"failed to parse message: message(SECRET?)={message.hex()}")
|
|
|
|
|
raise GracefulDisconnect() from e
|
2022-05-30 09:30:18 +02:00
|
|
|
self.last_message_time = time.time()
|
2021-03-29 21:29:51 +02:00
|
|
|
if message_type not in self.SPAMMY_MESSAGES:
|
|
|
|
|
self.logger.debug(f"Received {message_type.upper()}")
|
2020-06-23 13:12:11 +02:00
|
|
|
# only process INIT if we are a backup
|
2021-03-09 09:55:55 +01:00
|
|
|
if self.is_channel_backup is True and message_type != 'init':
|
2020-06-23 13:12:11 +02:00
|
|
|
return
|
2021-03-29 21:29:51 +02:00
|
|
|
if message_type in self.ORDERED_MESSAGES:
|
2020-02-23 17:18:45 +01:00
|
|
|
chan_id = payload.get('channel_id') or payload["temporary_channel_id"]
|
|
|
|
|
self.ordered_message_queues[chan_id].put_nowait((message_type, payload))
|
|
|
|
|
else:
|
2022-01-26 16:51:42 +01:00
|
|
|
if message_type not in ('error', 'warning') and 'channel_id' in payload:
|
2020-04-30 21:13:29 +02:00
|
|
|
chan = self.get_channel_by_id(payload['channel_id'])
|
2020-03-11 17:02:44 +01:00
|
|
|
if chan is None:
|
2022-04-27 10:06:05 +02:00
|
|
|
self.logger.info(f"Received {message_type} for unknown channel {payload['channel_id'].hex()}")
|
|
|
|
|
return
|
2020-03-11 17:02:44 +01:00
|
|
|
args = (chan, payload)
|
|
|
|
|
else:
|
|
|
|
|
args = (payload,)
|
2020-02-23 17:18:45 +01:00
|
|
|
try:
|
|
|
|
|
f = getattr(self, 'on_' + message_type)
|
|
|
|
|
except AttributeError:
|
|
|
|
|
#self.logger.info("Received '%s'" % message_type.upper(), payload)
|
|
|
|
|
return
|
|
|
|
|
# raw message is needed to check signature
|
|
|
|
|
if message_type in ['node_announcement', 'channel_announcement', 'channel_update']:
|
|
|
|
|
payload['raw'] = message
|
2020-03-11 17:02:44 +01:00
|
|
|
execution_result = f(*args)
|
2020-02-23 17:18:45 +01:00
|
|
|
if asyncio.iscoroutinefunction(f):
|
2021-01-01 10:12:07 +01:00
|
|
|
asyncio.ensure_future(self.taskgroup.spawn(execution_result))
|
2018-04-11 13:53:51 +02:00
|
|
|
|
2022-01-26 16:51:42 +01:00
|
|
|
def on_warning(self, payload):
|
lnpeer: some rework of error/warning message handling
- rm the `_get_channel_ids` abstraction as each of its usages needs subtle differences.
Some code duplication is preferable in this case.
- raise exceptions in `wait_for_message`, so that callers such as the GUI can show user-feedback
- on_error/on_warning were dropping messages with temp_chan_ids if they were not stored in
`temp_id_to_id` - which was only done once the mapping was known (so the normal chan_id was known).
To fix this, we now store temp_chan_ids into `temp_id_to_id` early.
- `schedule_force_closing` only works if the chan_id is already in `channels`
related:
https://github.com/spesmilo/electrum/pull/7645 (and related commits)
-----
example before commit:
```
D/P | lnpeer.Peer.[LNWallet, 03933884aa-3b53e4ab] | Sending OPEN_CHANNEL
D/P | lnpeer.Peer.[LNWallet, 03933884aa-3b53e4ab] | Received ERROR
I/P | lnpeer.Peer.[LNWallet, 03933884aa-3b53e4ab] | remote peer sent error [DO NOT TRUST THIS MESSAGE]: invalid funding_satoshis=10000 sat (min=400000 sat max=1500000000 sat)
E | gui.qt.main_window.[test_segwit_2] | Could not open channel
Traceback (most recent call last):
File "...\electrum\electrum\util.py", line 1160, in wrapper
return await func(*args, **kwargs)
File "...\electrum\electrum\lnpeer.py", line 661, in wrapper
return await func(self, *args, **kwargs)
File "...\electrum\electrum\lnpeer.py", line 742, in channel_establishment_flow
payload = await self.wait_for_message('accept_channel', temp_channel_id) #
File "...\electrum\electrum\lnpeer.py", line 315, in wait_for_message
name, payload = await asyncio.wait_for(q.get(), LN_P2P_NETWORK_TIMEOUT)
File "...\Python39\lib\asyncio\tasks.py", line 468, in wait_for
await waiter
asyncio.exceptions.CancelledError
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "...\Python39\lib\asyncio\tasks.py", line 492, in wait_for
fut.result()
asyncio.exceptions.CancelledError
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "...\electrum\electrum\gui\qt\util.py", line 914, in run
result = task.task()
File "...\electrum\electrum\gui\qt\main_window.py", line 1875, in task
return self.wallet.lnworker.open_channel(
File "...\electrum\electrum\lnworker.py", line 1075, in open_channel
chan, funding_tx = fut.result()
File "...\Python39\lib\concurrent\futures\_base.py", line 445, in result
return self.__get_result()
File "...\Python39\lib\concurrent\futures\_base.py", line 390, in __get_result
raise self._exception
File "...\electrum\electrum\util.py", line 1160, in wrapper
return await func(*args, **kwargs)
File "...\electrum\electrum\lnworker.py", line 1006, in _open_channel_coroutine
chan, funding_tx = await asyncio.wait_for(coro, LN_P2P_NETWORK_TIMEOUT)
File "...\Python39\lib\asyncio\tasks.py", line 494, in wait_for
raise exceptions.TimeoutError() from exc
asyncio.exceptions.TimeoutError
```
example after commit:
```
D/P | lnpeer.Peer.[LNWallet, 03933884aa-ff3a866f] | Sending OPEN_CHANNEL
D/P | lnpeer.Peer.[LNWallet, 03933884aa-ff3a866f] | Received ERROR
I/P | lnpeer.Peer.[LNWallet, 03933884aa-ff3a866f] | remote peer sent error [DO NOT TRUST THIS MESSAGE]: invalid funding_satoshis=10000 sat (min=400000 sat max=1500000000 sat). chan_id=124ca21fa6aa2993430ad71f465f0d44731ef87f7478e4b31327e4459b5a3988
E | lnworker.LNWallet.[test_segwit_2] | Exception in _open_channel_coroutine: GracefulDisconnect('remote peer sent error [DO NOT TRUST THIS MESSAGE]: invalid funding_satoshis=10000 sat (min=400000 sat max=1500000000 sat)')
Traceback (most recent call last):
File "...\electrum\electrum\util.py", line 1160, in wrapper
return await func(*args, **kwargs)
File "...\electrum\electrum\lnworker.py", line 1006, in _open_channel_coroutine
chan, funding_tx = await asyncio.wait_for(coro, LN_P2P_NETWORK_TIMEOUT)
File "...\Python39\lib\asyncio\tasks.py", line 481, in wait_for
return fut.result()
File "...\electrum\electrum\lnpeer.py", line 673, in wrapper
return await func(self, *args, **kwargs)
File "...\electrum\electrum\lnpeer.py", line 755, in channel_establishment_flow
payload = await self.wait_for_message('accept_channel', temp_channel_id)
File "...\electrum\electrum\lnpeer.py", line 326, in wait_for_message
raise GracefulDisconnect(
electrum.interface.GracefulDisconnect: remote peer sent error [DO NOT TRUST THIS MESSAGE]: invalid funding_satoshis=10000 sat (min=400000 sat max=1500000000 sat)
I/P | lnpeer.Peer.[LNWallet, 03933884aa-ff3a866f] | Disconnecting: GracefulDisconnect()
```
2022-03-17 17:14:51 +01:00
|
|
|
chan_id = payload.get("channel_id")
|
|
|
|
|
self.logger.info(f"remote peer sent warning [DO NOT TRUST THIS MESSAGE]: "
|
|
|
|
|
f"{payload['data'].decode('ascii')}. chan_id={chan_id.hex()}")
|
|
|
|
|
if chan_id in self.channels:
|
|
|
|
|
self.ordered_message_queues[chan_id].put_nowait((None, {'warning': payload['data']}))
|
|
|
|
|
elif chan_id in self.temp_id_to_id:
|
|
|
|
|
chan_id = self.temp_id_to_id[chan_id] or chan_id
|
|
|
|
|
self.ordered_message_queues[chan_id].put_nowait((None, {'warning': payload['data']}))
|
|
|
|
|
else:
|
|
|
|
|
# if no existing channel is referred to by channel_id:
|
|
|
|
|
# - MUST ignore the message.
|
|
|
|
|
return
|
|
|
|
|
raise GracefulDisconnect
|
2022-01-26 16:51:42 +01:00
|
|
|
|
2018-04-12 11:12:21 +02:00
|
|
|
def on_error(self, payload):
|
lnpeer: some rework of error/warning message handling
- rm the `_get_channel_ids` abstraction as each of its usages needs subtle differences.
Some code duplication is preferable in this case.
- raise exceptions in `wait_for_message`, so that callers such as the GUI can show user-feedback
- on_error/on_warning were dropping messages with temp_chan_ids if they were not stored in
`temp_id_to_id` - which was only done once the mapping was known (so the normal chan_id was known).
To fix this, we now store temp_chan_ids into `temp_id_to_id` early.
- `schedule_force_closing` only works if the chan_id is already in `channels`
related:
https://github.com/spesmilo/electrum/pull/7645 (and related commits)
-----
example before commit:
```
D/P | lnpeer.Peer.[LNWallet, 03933884aa-3b53e4ab] | Sending OPEN_CHANNEL
D/P | lnpeer.Peer.[LNWallet, 03933884aa-3b53e4ab] | Received ERROR
I/P | lnpeer.Peer.[LNWallet, 03933884aa-3b53e4ab] | remote peer sent error [DO NOT TRUST THIS MESSAGE]: invalid funding_satoshis=10000 sat (min=400000 sat max=1500000000 sat)
E | gui.qt.main_window.[test_segwit_2] | Could not open channel
Traceback (most recent call last):
File "...\electrum\electrum\util.py", line 1160, in wrapper
return await func(*args, **kwargs)
File "...\electrum\electrum\lnpeer.py", line 661, in wrapper
return await func(self, *args, **kwargs)
File "...\electrum\electrum\lnpeer.py", line 742, in channel_establishment_flow
payload = await self.wait_for_message('accept_channel', temp_channel_id) #
File "...\electrum\electrum\lnpeer.py", line 315, in wait_for_message
name, payload = await asyncio.wait_for(q.get(), LN_P2P_NETWORK_TIMEOUT)
File "...\Python39\lib\asyncio\tasks.py", line 468, in wait_for
await waiter
asyncio.exceptions.CancelledError
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "...\Python39\lib\asyncio\tasks.py", line 492, in wait_for
fut.result()
asyncio.exceptions.CancelledError
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "...\electrum\electrum\gui\qt\util.py", line 914, in run
result = task.task()
File "...\electrum\electrum\gui\qt\main_window.py", line 1875, in task
return self.wallet.lnworker.open_channel(
File "...\electrum\electrum\lnworker.py", line 1075, in open_channel
chan, funding_tx = fut.result()
File "...\Python39\lib\concurrent\futures\_base.py", line 445, in result
return self.__get_result()
File "...\Python39\lib\concurrent\futures\_base.py", line 390, in __get_result
raise self._exception
File "...\electrum\electrum\util.py", line 1160, in wrapper
return await func(*args, **kwargs)
File "...\electrum\electrum\lnworker.py", line 1006, in _open_channel_coroutine
chan, funding_tx = await asyncio.wait_for(coro, LN_P2P_NETWORK_TIMEOUT)
File "...\Python39\lib\asyncio\tasks.py", line 494, in wait_for
raise exceptions.TimeoutError() from exc
asyncio.exceptions.TimeoutError
```
example after commit:
```
D/P | lnpeer.Peer.[LNWallet, 03933884aa-ff3a866f] | Sending OPEN_CHANNEL
D/P | lnpeer.Peer.[LNWallet, 03933884aa-ff3a866f] | Received ERROR
I/P | lnpeer.Peer.[LNWallet, 03933884aa-ff3a866f] | remote peer sent error [DO NOT TRUST THIS MESSAGE]: invalid funding_satoshis=10000 sat (min=400000 sat max=1500000000 sat). chan_id=124ca21fa6aa2993430ad71f465f0d44731ef87f7478e4b31327e4459b5a3988
E | lnworker.LNWallet.[test_segwit_2] | Exception in _open_channel_coroutine: GracefulDisconnect('remote peer sent error [DO NOT TRUST THIS MESSAGE]: invalid funding_satoshis=10000 sat (min=400000 sat max=1500000000 sat)')
Traceback (most recent call last):
File "...\electrum\electrum\util.py", line 1160, in wrapper
return await func(*args, **kwargs)
File "...\electrum\electrum\lnworker.py", line 1006, in _open_channel_coroutine
chan, funding_tx = await asyncio.wait_for(coro, LN_P2P_NETWORK_TIMEOUT)
File "...\Python39\lib\asyncio\tasks.py", line 481, in wait_for
return fut.result()
File "...\electrum\electrum\lnpeer.py", line 673, in wrapper
return await func(self, *args, **kwargs)
File "...\electrum\electrum\lnpeer.py", line 755, in channel_establishment_flow
payload = await self.wait_for_message('accept_channel', temp_channel_id)
File "...\electrum\electrum\lnpeer.py", line 326, in wait_for_message
raise GracefulDisconnect(
electrum.interface.GracefulDisconnect: remote peer sent error [DO NOT TRUST THIS MESSAGE]: invalid funding_satoshis=10000 sat (min=400000 sat max=1500000000 sat)
I/P | lnpeer.Peer.[LNWallet, 03933884aa-ff3a866f] | Disconnecting: GracefulDisconnect()
```
2022-03-17 17:14:51 +01:00
|
|
|
chan_id = payload.get("channel_id")
|
|
|
|
|
self.logger.info(f"remote peer sent error [DO NOT TRUST THIS MESSAGE]: "
|
|
|
|
|
f"{payload['data'].decode('ascii')}. chan_id={chan_id.hex()}")
|
|
|
|
|
if chan_id in self.channels:
|
|
|
|
|
self.schedule_force_closing(chan_id)
|
|
|
|
|
self.ordered_message_queues[chan_id].put_nowait((None, {'error': payload['data']}))
|
|
|
|
|
elif chan_id in self.temp_id_to_id:
|
|
|
|
|
chan_id = self.temp_id_to_id[chan_id] or chan_id
|
|
|
|
|
self.ordered_message_queues[chan_id].put_nowait((None, {'error': payload['data']}))
|
|
|
|
|
elif chan_id == bytes(32):
|
|
|
|
|
# if channel_id is all zero:
|
|
|
|
|
# - MUST fail all channels with the sending node.
|
|
|
|
|
for cid in self.channels:
|
|
|
|
|
self.schedule_force_closing(cid)
|
|
|
|
|
self.ordered_message_queues[cid].put_nowait((None, {'error': payload['data']}))
|
|
|
|
|
else:
|
|
|
|
|
# if no existing channel is referred to by channel_id:
|
|
|
|
|
# - MUST ignore the message.
|
|
|
|
|
return
|
|
|
|
|
raise GracefulDisconnect
|
2022-01-26 16:51:42 +01:00
|
|
|
|
|
|
|
|
async def send_warning(self, channel_id: bytes, message: str = None, *, close_connection=True):
|
|
|
|
|
"""Sends a warning and disconnects if close_connection.
|
|
|
|
|
|
|
|
|
|
Note:
|
|
|
|
|
* channel_id is the temporary channel id when the channel id is not yet available
|
|
|
|
|
|
|
|
|
|
A sending node:
|
|
|
|
|
MAY set channel_id to all zero if the warning is not related to a specific channel.
|
|
|
|
|
|
|
|
|
|
when failure was caused by an invalid signature check:
|
|
|
|
|
* SHOULD include the raw, hex-encoded transaction in reply to a funding_created,
|
|
|
|
|
funding_signed, closing_signed, or commitment_signed message.
|
|
|
|
|
"""
|
|
|
|
|
assert isinstance(channel_id, bytes)
|
|
|
|
|
encoded_data = b'' if not message else message.encode('ascii')
|
|
|
|
|
self.send_message('warning', channel_id=channel_id, data=encoded_data, len=len(encoded_data))
|
|
|
|
|
if close_connection:
|
|
|
|
|
raise GracefulDisconnect
|
|
|
|
|
|
|
|
|
|
async def send_error(self, channel_id: bytes, message: str = None, *, force_close_channel=False):
|
|
|
|
|
"""Sends an error message and force closes the channel.
|
|
|
|
|
|
|
|
|
|
Note:
|
|
|
|
|
* channel_id is the temporary channel id when the channel id is not yet available
|
|
|
|
|
|
|
|
|
|
A sending node:
|
|
|
|
|
* SHOULD send error for protocol violations or internal errors that make channels
|
|
|
|
|
unusable or that make further communication unusable.
|
|
|
|
|
* SHOULD send error with the unknown channel_id in reply to messages of type
|
|
|
|
|
32-255 related to unknown channels.
|
|
|
|
|
* MUST fail the channel(s) referred to by the error message.
|
|
|
|
|
* MAY set channel_id to all zero to indicate all channels.
|
|
|
|
|
|
|
|
|
|
when failure was caused by an invalid signature check:
|
|
|
|
|
* SHOULD include the raw, hex-encoded transaction in reply to a funding_created,
|
|
|
|
|
funding_signed, closing_signed, or commitment_signed message.
|
|
|
|
|
"""
|
|
|
|
|
assert isinstance(channel_id, bytes)
|
|
|
|
|
encoded_data = b'' if not message else message.encode('ascii')
|
|
|
|
|
self.send_message('error', channel_id=channel_id, data=encoded_data, len=len(encoded_data))
|
|
|
|
|
# MUST fail the channel(s) referred to by the error message:
|
|
|
|
|
# we may violate this with force_close_channel
|
|
|
|
|
if force_close_channel:
|
lnpeer: some rework of error/warning message handling
- rm the `_get_channel_ids` abstraction as each of its usages needs subtle differences.
Some code duplication is preferable in this case.
- raise exceptions in `wait_for_message`, so that callers such as the GUI can show user-feedback
- on_error/on_warning were dropping messages with temp_chan_ids if they were not stored in
`temp_id_to_id` - which was only done once the mapping was known (so the normal chan_id was known).
To fix this, we now store temp_chan_ids into `temp_id_to_id` early.
- `schedule_force_closing` only works if the chan_id is already in `channels`
related:
https://github.com/spesmilo/electrum/pull/7645 (and related commits)
-----
example before commit:
```
D/P | lnpeer.Peer.[LNWallet, 03933884aa-3b53e4ab] | Sending OPEN_CHANNEL
D/P | lnpeer.Peer.[LNWallet, 03933884aa-3b53e4ab] | Received ERROR
I/P | lnpeer.Peer.[LNWallet, 03933884aa-3b53e4ab] | remote peer sent error [DO NOT TRUST THIS MESSAGE]: invalid funding_satoshis=10000 sat (min=400000 sat max=1500000000 sat)
E | gui.qt.main_window.[test_segwit_2] | Could not open channel
Traceback (most recent call last):
File "...\electrum\electrum\util.py", line 1160, in wrapper
return await func(*args, **kwargs)
File "...\electrum\electrum\lnpeer.py", line 661, in wrapper
return await func(self, *args, **kwargs)
File "...\electrum\electrum\lnpeer.py", line 742, in channel_establishment_flow
payload = await self.wait_for_message('accept_channel', temp_channel_id) #
File "...\electrum\electrum\lnpeer.py", line 315, in wait_for_message
name, payload = await asyncio.wait_for(q.get(), LN_P2P_NETWORK_TIMEOUT)
File "...\Python39\lib\asyncio\tasks.py", line 468, in wait_for
await waiter
asyncio.exceptions.CancelledError
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "...\Python39\lib\asyncio\tasks.py", line 492, in wait_for
fut.result()
asyncio.exceptions.CancelledError
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "...\electrum\electrum\gui\qt\util.py", line 914, in run
result = task.task()
File "...\electrum\electrum\gui\qt\main_window.py", line 1875, in task
return self.wallet.lnworker.open_channel(
File "...\electrum\electrum\lnworker.py", line 1075, in open_channel
chan, funding_tx = fut.result()
File "...\Python39\lib\concurrent\futures\_base.py", line 445, in result
return self.__get_result()
File "...\Python39\lib\concurrent\futures\_base.py", line 390, in __get_result
raise self._exception
File "...\electrum\electrum\util.py", line 1160, in wrapper
return await func(*args, **kwargs)
File "...\electrum\electrum\lnworker.py", line 1006, in _open_channel_coroutine
chan, funding_tx = await asyncio.wait_for(coro, LN_P2P_NETWORK_TIMEOUT)
File "...\Python39\lib\asyncio\tasks.py", line 494, in wait_for
raise exceptions.TimeoutError() from exc
asyncio.exceptions.TimeoutError
```
example after commit:
```
D/P | lnpeer.Peer.[LNWallet, 03933884aa-ff3a866f] | Sending OPEN_CHANNEL
D/P | lnpeer.Peer.[LNWallet, 03933884aa-ff3a866f] | Received ERROR
I/P | lnpeer.Peer.[LNWallet, 03933884aa-ff3a866f] | remote peer sent error [DO NOT TRUST THIS MESSAGE]: invalid funding_satoshis=10000 sat (min=400000 sat max=1500000000 sat). chan_id=124ca21fa6aa2993430ad71f465f0d44731ef87f7478e4b31327e4459b5a3988
E | lnworker.LNWallet.[test_segwit_2] | Exception in _open_channel_coroutine: GracefulDisconnect('remote peer sent error [DO NOT TRUST THIS MESSAGE]: invalid funding_satoshis=10000 sat (min=400000 sat max=1500000000 sat)')
Traceback (most recent call last):
File "...\electrum\electrum\util.py", line 1160, in wrapper
return await func(*args, **kwargs)
File "...\electrum\electrum\lnworker.py", line 1006, in _open_channel_coroutine
chan, funding_tx = await asyncio.wait_for(coro, LN_P2P_NETWORK_TIMEOUT)
File "...\Python39\lib\asyncio\tasks.py", line 481, in wait_for
return fut.result()
File "...\electrum\electrum\lnpeer.py", line 673, in wrapper
return await func(self, *args, **kwargs)
File "...\electrum\electrum\lnpeer.py", line 755, in channel_establishment_flow
payload = await self.wait_for_message('accept_channel', temp_channel_id)
File "...\electrum\electrum\lnpeer.py", line 326, in wait_for_message
raise GracefulDisconnect(
electrum.interface.GracefulDisconnect: remote peer sent error [DO NOT TRUST THIS MESSAGE]: invalid funding_satoshis=10000 sat (min=400000 sat max=1500000000 sat)
I/P | lnpeer.Peer.[LNWallet, 03933884aa-ff3a866f] | Disconnecting: GracefulDisconnect()
```
2022-03-17 17:14:51 +01:00
|
|
|
if channel_id in self.channels:
|
2022-01-26 16:51:42 +01:00
|
|
|
self.schedule_force_closing(channel_id)
|
lnpeer: some rework of error/warning message handling
- rm the `_get_channel_ids` abstraction as each of its usages needs subtle differences.
Some code duplication is preferable in this case.
- raise exceptions in `wait_for_message`, so that callers such as the GUI can show user-feedback
- on_error/on_warning were dropping messages with temp_chan_ids if they were not stored in
`temp_id_to_id` - which was only done once the mapping was known (so the normal chan_id was known).
To fix this, we now store temp_chan_ids into `temp_id_to_id` early.
- `schedule_force_closing` only works if the chan_id is already in `channels`
related:
https://github.com/spesmilo/electrum/pull/7645 (and related commits)
-----
example before commit:
```
D/P | lnpeer.Peer.[LNWallet, 03933884aa-3b53e4ab] | Sending OPEN_CHANNEL
D/P | lnpeer.Peer.[LNWallet, 03933884aa-3b53e4ab] | Received ERROR
I/P | lnpeer.Peer.[LNWallet, 03933884aa-3b53e4ab] | remote peer sent error [DO NOT TRUST THIS MESSAGE]: invalid funding_satoshis=10000 sat (min=400000 sat max=1500000000 sat)
E | gui.qt.main_window.[test_segwit_2] | Could not open channel
Traceback (most recent call last):
File "...\electrum\electrum\util.py", line 1160, in wrapper
return await func(*args, **kwargs)
File "...\electrum\electrum\lnpeer.py", line 661, in wrapper
return await func(self, *args, **kwargs)
File "...\electrum\electrum\lnpeer.py", line 742, in channel_establishment_flow
payload = await self.wait_for_message('accept_channel', temp_channel_id) #
File "...\electrum\electrum\lnpeer.py", line 315, in wait_for_message
name, payload = await asyncio.wait_for(q.get(), LN_P2P_NETWORK_TIMEOUT)
File "...\Python39\lib\asyncio\tasks.py", line 468, in wait_for
await waiter
asyncio.exceptions.CancelledError
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "...\Python39\lib\asyncio\tasks.py", line 492, in wait_for
fut.result()
asyncio.exceptions.CancelledError
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "...\electrum\electrum\gui\qt\util.py", line 914, in run
result = task.task()
File "...\electrum\electrum\gui\qt\main_window.py", line 1875, in task
return self.wallet.lnworker.open_channel(
File "...\electrum\electrum\lnworker.py", line 1075, in open_channel
chan, funding_tx = fut.result()
File "...\Python39\lib\concurrent\futures\_base.py", line 445, in result
return self.__get_result()
File "...\Python39\lib\concurrent\futures\_base.py", line 390, in __get_result
raise self._exception
File "...\electrum\electrum\util.py", line 1160, in wrapper
return await func(*args, **kwargs)
File "...\electrum\electrum\lnworker.py", line 1006, in _open_channel_coroutine
chan, funding_tx = await asyncio.wait_for(coro, LN_P2P_NETWORK_TIMEOUT)
File "...\Python39\lib\asyncio\tasks.py", line 494, in wait_for
raise exceptions.TimeoutError() from exc
asyncio.exceptions.TimeoutError
```
example after commit:
```
D/P | lnpeer.Peer.[LNWallet, 03933884aa-ff3a866f] | Sending OPEN_CHANNEL
D/P | lnpeer.Peer.[LNWallet, 03933884aa-ff3a866f] | Received ERROR
I/P | lnpeer.Peer.[LNWallet, 03933884aa-ff3a866f] | remote peer sent error [DO NOT TRUST THIS MESSAGE]: invalid funding_satoshis=10000 sat (min=400000 sat max=1500000000 sat). chan_id=124ca21fa6aa2993430ad71f465f0d44731ef87f7478e4b31327e4459b5a3988
E | lnworker.LNWallet.[test_segwit_2] | Exception in _open_channel_coroutine: GracefulDisconnect('remote peer sent error [DO NOT TRUST THIS MESSAGE]: invalid funding_satoshis=10000 sat (min=400000 sat max=1500000000 sat)')
Traceback (most recent call last):
File "...\electrum\electrum\util.py", line 1160, in wrapper
return await func(*args, **kwargs)
File "...\electrum\electrum\lnworker.py", line 1006, in _open_channel_coroutine
chan, funding_tx = await asyncio.wait_for(coro, LN_P2P_NETWORK_TIMEOUT)
File "...\Python39\lib\asyncio\tasks.py", line 481, in wait_for
return fut.result()
File "...\electrum\electrum\lnpeer.py", line 673, in wrapper
return await func(self, *args, **kwargs)
File "...\electrum\electrum\lnpeer.py", line 755, in channel_establishment_flow
payload = await self.wait_for_message('accept_channel', temp_channel_id)
File "...\electrum\electrum\lnpeer.py", line 326, in wait_for_message
raise GracefulDisconnect(
electrum.interface.GracefulDisconnect: remote peer sent error [DO NOT TRUST THIS MESSAGE]: invalid funding_satoshis=10000 sat (min=400000 sat max=1500000000 sat)
I/P | lnpeer.Peer.[LNWallet, 03933884aa-ff3a866f] | Disconnecting: GracefulDisconnect()
```
2022-03-17 17:14:51 +01:00
|
|
|
elif channel_id == bytes(32):
|
|
|
|
|
for cid in self.channels:
|
|
|
|
|
self.schedule_force_closing(cid)
|
2022-01-26 16:51:42 +01:00
|
|
|
raise GracefulDisconnect
|
2018-04-12 11:12:21 +02:00
|
|
|
|
2018-04-11 13:53:51 +02:00
|
|
|
def on_ping(self, payload):
|
2020-03-12 04:08:13 +01:00
|
|
|
l = payload['num_pong_bytes']
|
2018-10-14 22:36:23 +02:00
|
|
|
self.send_message('pong', byteslen=l)
|
2018-04-11 13:53:51 +02:00
|
|
|
|
2018-09-25 11:57:37 +02:00
|
|
|
def on_pong(self, payload):
|
2022-05-30 09:30:18 +02:00
|
|
|
self.pong_event.set()
|
2018-09-25 11:57:37 +02:00
|
|
|
|
2020-02-23 17:18:45 +01:00
|
|
|
async def wait_for_message(self, expected_name, channel_id):
|
|
|
|
|
q = self.ordered_message_queues[channel_id]
|
|
|
|
|
name, payload = await asyncio.wait_for(q.get(), LN_P2P_NETWORK_TIMEOUT)
|
lnpeer: some rework of error/warning message handling
- rm the `_get_channel_ids` abstraction as each of its usages needs subtle differences.
Some code duplication is preferable in this case.
- raise exceptions in `wait_for_message`, so that callers such as the GUI can show user-feedback
- on_error/on_warning were dropping messages with temp_chan_ids if they were not stored in
`temp_id_to_id` - which was only done once the mapping was known (so the normal chan_id was known).
To fix this, we now store temp_chan_ids into `temp_id_to_id` early.
- `schedule_force_closing` only works if the chan_id is already in `channels`
related:
https://github.com/spesmilo/electrum/pull/7645 (and related commits)
-----
example before commit:
```
D/P | lnpeer.Peer.[LNWallet, 03933884aa-3b53e4ab] | Sending OPEN_CHANNEL
D/P | lnpeer.Peer.[LNWallet, 03933884aa-3b53e4ab] | Received ERROR
I/P | lnpeer.Peer.[LNWallet, 03933884aa-3b53e4ab] | remote peer sent error [DO NOT TRUST THIS MESSAGE]: invalid funding_satoshis=10000 sat (min=400000 sat max=1500000000 sat)
E | gui.qt.main_window.[test_segwit_2] | Could not open channel
Traceback (most recent call last):
File "...\electrum\electrum\util.py", line 1160, in wrapper
return await func(*args, **kwargs)
File "...\electrum\electrum\lnpeer.py", line 661, in wrapper
return await func(self, *args, **kwargs)
File "...\electrum\electrum\lnpeer.py", line 742, in channel_establishment_flow
payload = await self.wait_for_message('accept_channel', temp_channel_id) #
File "...\electrum\electrum\lnpeer.py", line 315, in wait_for_message
name, payload = await asyncio.wait_for(q.get(), LN_P2P_NETWORK_TIMEOUT)
File "...\Python39\lib\asyncio\tasks.py", line 468, in wait_for
await waiter
asyncio.exceptions.CancelledError
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "...\Python39\lib\asyncio\tasks.py", line 492, in wait_for
fut.result()
asyncio.exceptions.CancelledError
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "...\electrum\electrum\gui\qt\util.py", line 914, in run
result = task.task()
File "...\electrum\electrum\gui\qt\main_window.py", line 1875, in task
return self.wallet.lnworker.open_channel(
File "...\electrum\electrum\lnworker.py", line 1075, in open_channel
chan, funding_tx = fut.result()
File "...\Python39\lib\concurrent\futures\_base.py", line 445, in result
return self.__get_result()
File "...\Python39\lib\concurrent\futures\_base.py", line 390, in __get_result
raise self._exception
File "...\electrum\electrum\util.py", line 1160, in wrapper
return await func(*args, **kwargs)
File "...\electrum\electrum\lnworker.py", line 1006, in _open_channel_coroutine
chan, funding_tx = await asyncio.wait_for(coro, LN_P2P_NETWORK_TIMEOUT)
File "...\Python39\lib\asyncio\tasks.py", line 494, in wait_for
raise exceptions.TimeoutError() from exc
asyncio.exceptions.TimeoutError
```
example after commit:
```
D/P | lnpeer.Peer.[LNWallet, 03933884aa-ff3a866f] | Sending OPEN_CHANNEL
D/P | lnpeer.Peer.[LNWallet, 03933884aa-ff3a866f] | Received ERROR
I/P | lnpeer.Peer.[LNWallet, 03933884aa-ff3a866f] | remote peer sent error [DO NOT TRUST THIS MESSAGE]: invalid funding_satoshis=10000 sat (min=400000 sat max=1500000000 sat). chan_id=124ca21fa6aa2993430ad71f465f0d44731ef87f7478e4b31327e4459b5a3988
E | lnworker.LNWallet.[test_segwit_2] | Exception in _open_channel_coroutine: GracefulDisconnect('remote peer sent error [DO NOT TRUST THIS MESSAGE]: invalid funding_satoshis=10000 sat (min=400000 sat max=1500000000 sat)')
Traceback (most recent call last):
File "...\electrum\electrum\util.py", line 1160, in wrapper
return await func(*args, **kwargs)
File "...\electrum\electrum\lnworker.py", line 1006, in _open_channel_coroutine
chan, funding_tx = await asyncio.wait_for(coro, LN_P2P_NETWORK_TIMEOUT)
File "...\Python39\lib\asyncio\tasks.py", line 481, in wait_for
return fut.result()
File "...\electrum\electrum\lnpeer.py", line 673, in wrapper
return await func(self, *args, **kwargs)
File "...\electrum\electrum\lnpeer.py", line 755, in channel_establishment_flow
payload = await self.wait_for_message('accept_channel', temp_channel_id)
File "...\electrum\electrum\lnpeer.py", line 326, in wait_for_message
raise GracefulDisconnect(
electrum.interface.GracefulDisconnect: remote peer sent error [DO NOT TRUST THIS MESSAGE]: invalid funding_satoshis=10000 sat (min=400000 sat max=1500000000 sat)
I/P | lnpeer.Peer.[LNWallet, 03933884aa-ff3a866f] | Disconnecting: GracefulDisconnect()
```
2022-03-17 17:14:51 +01:00
|
|
|
# raise exceptions for errors/warnings, so that the caller sees them
|
|
|
|
|
if payload.get('error'):
|
|
|
|
|
raise GracefulDisconnect(
|
|
|
|
|
f"remote peer sent error [DO NOT TRUST THIS MESSAGE]: {payload['error'].decode('ascii')}")
|
|
|
|
|
elif payload.get('warning'):
|
|
|
|
|
raise GracefulDisconnect(
|
|
|
|
|
f"remote peer sent warning [DO NOT TRUST THIS MESSAGE]: {payload['warning'].decode('ascii')}")
|
2020-02-23 17:18:45 +01:00
|
|
|
if name != expected_name:
|
|
|
|
|
raise Exception(f"Received unexpected '{name}'")
|
|
|
|
|
return payload
|
2018-10-04 14:03:29 +02:00
|
|
|
|
2018-04-15 15:25:31 +02:00
|
|
|
def on_init(self, payload):
|
lnpeer: only set initialized after both sent AND received "init"
had a trace where we tried to send "funding_locked" before being initialized:
D | lnpeer.Peer.[iq7zhmhck54vcax2vlrdcavq2m32wao7ekh6jyeglmnuuvv3js57r4id.onion:9735] | Sending FUNDING_LOCKED
E | lnworker.LNWallet | Exception in on_update_open_channel: AttributeError("'LNTransport' object has no attribute 'sk'")
Traceback (most recent call last):
File "...\electrum\electrum\util.py", line 999, in wrapper
return await func(*args, **kwargs)
File "...\electrum\electrum\lnworker.py", line 674, in on_update_open_channel
peer.send_funding_locked(chan)
File "...\electrum\electrum\lnpeer.py", line 876, in send_funding_locked
self.send_message("funding_locked", channel_id=channel_id, next_per_commitment_point=per_commitment_point_second)
File "...\electrum\electrum\lnpeer.py", line 102, in send_message
self.transport.send_bytes(raw_msg)
File "...\electrum\electrum\lntransport.py", line 93, in send_bytes
lc = aead_encrypt(self.sk, self.sn(), b'', l)
AttributeError: 'LNTransport' object has no attribute 'sk'
2019-11-22 21:33:56 +01:00
|
|
|
if self._received_init:
|
2019-05-02 17:59:11 +02:00
|
|
|
self.logger.info("ALREADY INITIALIZED BUT RECEIVED INIT")
|
2019-01-30 17:50:10 +01:00
|
|
|
return
|
2020-04-16 12:39:12 +02:00
|
|
|
self.their_features = LnFeatures(int.from_bytes(payload['features'], byteorder="big"))
|
2020-03-16 22:07:00 +01:00
|
|
|
their_globalfeatures = int.from_bytes(payload['globalfeatures'], byteorder="big")
|
2020-04-16 12:39:12 +02:00
|
|
|
self.their_features |= their_globalfeatures
|
2020-03-16 22:07:00 +01:00
|
|
|
# check transitive dependencies for received features
|
2020-11-20 08:51:01 +01:00
|
|
|
if not self.their_features.validate_transitive_dependencies():
|
2020-03-16 22:07:00 +01:00
|
|
|
raise GracefulDisconnect("remote did not set all dependencies for the features they sent")
|
|
|
|
|
# check if features are compatible, and set self.features to what we negotiated
|
2020-02-21 10:57:13 +01:00
|
|
|
try:
|
2020-04-16 12:39:12 +02:00
|
|
|
self.features = ln_compare_features(self.features, self.their_features)
|
2020-02-26 21:10:33 +01:00
|
|
|
except IncompatibleLightningFeatures as e:
|
2020-02-21 19:45:45 +01:00
|
|
|
self.initialized.set_exception(e)
|
2020-02-26 21:10:33 +01:00
|
|
|
raise GracefulDisconnect(f"{str(e)}")
|
2020-03-16 03:30:26 +01:00
|
|
|
# check that they are on the same chain as us, if provided
|
|
|
|
|
their_networks = payload["init_tlvs"].get("networks")
|
|
|
|
|
if their_networks:
|
|
|
|
|
their_chains = list(chunks(their_networks["chains"], 32))
|
|
|
|
|
if constants.net.rev_genesis_bytes() not in their_chains:
|
|
|
|
|
raise GracefulDisconnect(f"no common chain found with remote. (they sent: {their_chains})")
|
|
|
|
|
# all checks passed
|
2020-04-15 16:40:16 +02:00
|
|
|
self.lnworker.on_peer_successfully_established(self)
|
lnpeer: only set initialized after both sent AND received "init"
had a trace where we tried to send "funding_locked" before being initialized:
D | lnpeer.Peer.[iq7zhmhck54vcax2vlrdcavq2m32wao7ekh6jyeglmnuuvv3js57r4id.onion:9735] | Sending FUNDING_LOCKED
E | lnworker.LNWallet | Exception in on_update_open_channel: AttributeError("'LNTransport' object has no attribute 'sk'")
Traceback (most recent call last):
File "...\electrum\electrum\util.py", line 999, in wrapper
return await func(*args, **kwargs)
File "...\electrum\electrum\lnworker.py", line 674, in on_update_open_channel
peer.send_funding_locked(chan)
File "...\electrum\electrum\lnpeer.py", line 876, in send_funding_locked
self.send_message("funding_locked", channel_id=channel_id, next_per_commitment_point=per_commitment_point_second)
File "...\electrum\electrum\lnpeer.py", line 102, in send_message
self.transport.send_bytes(raw_msg)
File "...\electrum\electrum\lntransport.py", line 93, in send_bytes
lc = aead_encrypt(self.sk, self.sn(), b'', l)
AttributeError: 'LNTransport' object has no attribute 'sk'
2019-11-22 21:33:56 +01:00
|
|
|
self._received_init = True
|
2020-02-21 19:45:45 +01:00
|
|
|
self.maybe_set_initialized()
|
2018-04-15 15:25:31 +02:00
|
|
|
|
2019-02-01 20:59:59 +01:00
|
|
|
def on_node_announcement(self, payload):
|
2022-09-19 17:43:13 +02:00
|
|
|
if not self.lnworker.uses_trampoline():
|
2020-11-11 11:03:31 +01:00
|
|
|
self.gossip_queue.put_nowait(('node_announcement', payload))
|
2018-04-15 15:25:31 +02:00
|
|
|
|
|
|
|
|
def on_channel_announcement(self, payload):
|
2022-09-19 17:43:13 +02:00
|
|
|
if not self.lnworker.uses_trampoline():
|
2020-11-11 11:03:31 +01:00
|
|
|
self.gossip_queue.put_nowait(('channel_announcement', payload))
|
2019-05-15 10:27:57 +02:00
|
|
|
|
|
|
|
|
def on_channel_update(self, payload):
|
2019-09-08 19:14:22 +02:00
|
|
|
self.maybe_save_remote_update(payload)
|
2022-09-19 17:43:13 +02:00
|
|
|
if not self.lnworker.uses_trampoline():
|
2020-11-11 11:03:31 +01:00
|
|
|
self.gossip_queue.put_nowait(('channel_update', payload))
|
2018-04-15 15:25:31 +02:00
|
|
|
|
2019-09-08 19:14:22 +02:00
|
|
|
def maybe_save_remote_update(self, payload):
|
2020-12-22 12:28:40 +01:00
|
|
|
if not self.channels:
|
|
|
|
|
return
|
2019-09-08 19:14:22 +02:00
|
|
|
for chan in self.channels.values():
|
2023-01-07 12:20:03 +01:00
|
|
|
if payload['short_channel_id'] in [chan.short_channel_id, chan.get_local_alias()]:
|
2021-03-16 19:07:31 +01:00
|
|
|
chan.set_remote_update(payload)
|
2022-06-28 20:34:47 +02:00
|
|
|
self.logger.info(f"saved remote channel_update gossip msg for chan {chan.get_id_for_log()}")
|
2020-12-21 13:26:56 +01:00
|
|
|
break
|
|
|
|
|
else:
|
|
|
|
|
# Save (some bounded number of) orphan channel updates for later
|
|
|
|
|
# as it might be for our own direct channel with this peer
|
|
|
|
|
# (and we might not yet know the short channel id for that)
|
2020-12-22 12:28:40 +01:00
|
|
|
# Background: this code is here to deal with a bug in LND,
|
|
|
|
|
# see https://github.com/lightningnetwork/lnd/issues/3651
|
|
|
|
|
# and https://github.com/lightningnetwork/lightning-rfc/pull/657
|
|
|
|
|
# This code assumes gossip_queries is set. BOLT7: "if the
|
|
|
|
|
# gossip_queries feature is negotiated, [a node] MUST NOT
|
|
|
|
|
# send gossip it did not generate itself"
|
2020-12-21 13:26:56 +01:00
|
|
|
short_channel_id = ShortChannelID(payload['short_channel_id'])
|
2020-12-22 12:28:40 +01:00
|
|
|
self.logger.info(f'received orphan channel update {short_channel_id}')
|
2020-12-21 13:26:56 +01:00
|
|
|
self.orphan_channel_updates[short_channel_id] = payload
|
|
|
|
|
while len(self.orphan_channel_updates) > 25:
|
|
|
|
|
self.orphan_channel_updates.popitem(last=False)
|
2019-09-08 19:14:22 +02:00
|
|
|
|
2020-03-17 18:02:51 +01:00
|
|
|
def on_announcement_signatures(self, chan: Channel, payload):
|
2018-10-10 22:54:30 +02:00
|
|
|
if chan.config[LOCAL].was_announced:
|
2018-06-19 13:02:59 +02:00
|
|
|
h, local_node_sig, local_bitcoin_sig = self.send_announcement_signatures(chan)
|
|
|
|
|
else:
|
2020-03-11 17:02:44 +01:00
|
|
|
self.announcement_signatures[chan.channel_id].put_nowait(payload)
|
2018-06-18 15:34:18 +02:00
|
|
|
|
2018-10-05 16:33:35 +02:00
|
|
|
def handle_disconnect(func):
|
2021-03-04 16:44:13 +01:00
|
|
|
@functools.wraps(func)
|
2018-10-05 16:33:35 +02:00
|
|
|
async def wrapper_func(self, *args, **kwargs):
|
|
|
|
|
try:
|
|
|
|
|
return await func(self, *args, **kwargs)
|
2019-06-13 19:06:47 +02:00
|
|
|
except GracefulDisconnect as e:
|
|
|
|
|
self.logger.log(e.log_level, f"Disconnecting: {repr(e)}")
|
2020-04-15 21:41:33 +02:00
|
|
|
except (LightningPeerConnectionClosed, IncompatibleLightningFeatures,
|
|
|
|
|
aiorpcx.socks.SOCKSError) as e:
|
2019-06-13 19:06:47 +02:00
|
|
|
self.logger.info(f"Disconnecting: {repr(e)}")
|
2018-10-05 16:33:35 +02:00
|
|
|
finally:
|
|
|
|
|
self.close_and_cleanup()
|
|
|
|
|
return wrapper_func
|
|
|
|
|
|
2020-02-27 18:50:03 +01:00
|
|
|
@ignore_exceptions # do not kill outer taskgroup
|
2019-06-13 19:06:47 +02:00
|
|
|
@log_exceptions
|
2018-10-05 16:33:35 +02:00
|
|
|
@handle_disconnect
|
2018-07-13 17:05:04 +02:00
|
|
|
async def main_loop(self):
|
2020-02-27 19:00:59 +01:00
|
|
|
async with self.taskgroup as group:
|
2019-02-21 18:55:12 +01:00
|
|
|
await group.spawn(self._message_loop())
|
2020-03-02 15:41:50 +01:00
|
|
|
await group.spawn(self.htlc_switch())
|
2019-05-15 12:30:19 +02:00
|
|
|
await group.spawn(self.query_gossip())
|
|
|
|
|
await group.spawn(self.process_gossip())
|
2019-05-15 10:27:57 +02:00
|
|
|
|
2019-05-15 12:30:19 +02:00
|
|
|
async def process_gossip(self):
|
2019-05-15 10:27:57 +02:00
|
|
|
while True:
|
2019-05-15 16:09:23 +02:00
|
|
|
await asyncio.sleep(5)
|
2020-11-11 11:03:31 +01:00
|
|
|
if not self.network.lngossip:
|
|
|
|
|
continue
|
2019-05-15 16:09:23 +02:00
|
|
|
chan_anns = []
|
|
|
|
|
chan_upds = []
|
|
|
|
|
node_anns = []
|
|
|
|
|
while True:
|
|
|
|
|
name, payload = await self.gossip_queue.get()
|
|
|
|
|
if name == 'channel_announcement':
|
|
|
|
|
chan_anns.append(payload)
|
|
|
|
|
elif name == 'channel_update':
|
|
|
|
|
chan_upds.append(payload)
|
|
|
|
|
elif name == 'node_announcement':
|
|
|
|
|
node_anns.append(payload)
|
|
|
|
|
else:
|
|
|
|
|
raise Exception('unknown message')
|
|
|
|
|
if self.gossip_queue.empty():
|
|
|
|
|
break
|
2020-11-11 11:03:31 +01:00
|
|
|
if self.network.lngossip:
|
|
|
|
|
await self.network.lngossip.process_gossip(chan_anns, node_anns, chan_upds)
|
2019-05-15 16:09:23 +02:00
|
|
|
|
2019-05-15 12:30:19 +02:00
|
|
|
async def query_gossip(self):
|
2019-06-13 19:06:47 +02:00
|
|
|
try:
|
2020-02-21 19:45:45 +01:00
|
|
|
await asyncio.wait_for(self.initialized, LN_P2P_NETWORK_TIMEOUT)
|
2020-02-22 16:00:38 +01:00
|
|
|
except Exception as e:
|
2020-02-28 20:27:35 +01:00
|
|
|
raise GracefulDisconnect(f"Failed to initialize: {e!r}") from e
|
2019-05-13 22:33:56 +02:00
|
|
|
if self.lnworker == self.lnworker.network.lngossip:
|
2019-06-13 19:06:47 +02:00
|
|
|
try:
|
2019-09-05 18:31:51 +02:00
|
|
|
ids, complete = await asyncio.wait_for(self.get_channel_range(), LN_P2P_NETWORK_TIMEOUT)
|
2019-06-13 19:06:47 +02:00
|
|
|
except asyncio.TimeoutError as e:
|
|
|
|
|
raise GracefulDisconnect("query_channel_range timed out") from e
|
2019-05-13 22:33:56 +02:00
|
|
|
self.logger.info('Received {} channel ids. (complete: {})'.format(len(ids), complete))
|
2019-06-18 13:49:31 +02:00
|
|
|
await self.lnworker.add_new_ids(ids)
|
2019-05-13 22:33:56 +02:00
|
|
|
while True:
|
|
|
|
|
todo = self.lnworker.get_ids_to_query()
|
|
|
|
|
if not todo:
|
|
|
|
|
await asyncio.sleep(1)
|
|
|
|
|
continue
|
2019-05-16 11:47:55 +02:00
|
|
|
await self.get_short_channel_ids(todo)
|
2019-05-13 22:33:56 +02:00
|
|
|
|
|
|
|
|
async def get_channel_range(self):
|
2019-05-26 05:58:29 +02:00
|
|
|
first_block = constants.net.BLOCK_HEIGHT_FIRST_LIGHTNING_CHANNELS
|
|
|
|
|
num_blocks = self.lnworker.network.get_local_height() - first_block
|
|
|
|
|
self.query_channel_range(first_block, num_blocks)
|
2019-05-13 22:33:56 +02:00
|
|
|
intervals = []
|
|
|
|
|
ids = set()
|
2019-05-14 20:52:07 +02:00
|
|
|
# note: implementations behave differently...
|
|
|
|
|
# "sane implementation that follows BOLT-07" example:
|
|
|
|
|
# query_channel_range. <<< first_block 497000, num_blocks 79038
|
|
|
|
|
# on_reply_channel_range. >>> first_block 497000, num_blocks 39516, num_ids 4648, complete True
|
|
|
|
|
# on_reply_channel_range. >>> first_block 536516, num_blocks 19758, num_ids 5734, complete True
|
|
|
|
|
# on_reply_channel_range. >>> first_block 556274, num_blocks 9879, num_ids 13712, complete True
|
|
|
|
|
# on_reply_channel_range. >>> first_block 566153, num_blocks 9885, num_ids 18114, complete True
|
|
|
|
|
# lnd example:
|
|
|
|
|
# query_channel_range. <<< first_block 497000, num_blocks 79038
|
|
|
|
|
# on_reply_channel_range. >>> first_block 497000, num_blocks 79038, num_ids 8000, complete False
|
|
|
|
|
# on_reply_channel_range. >>> first_block 497000, num_blocks 79038, num_ids 8000, complete False
|
|
|
|
|
# on_reply_channel_range. >>> first_block 497000, num_blocks 79038, num_ids 8000, complete False
|
|
|
|
|
# on_reply_channel_range. >>> first_block 497000, num_blocks 79038, num_ids 8000, complete False
|
|
|
|
|
# on_reply_channel_range. >>> first_block 497000, num_blocks 79038, num_ids 5344, complete True
|
2019-05-13 22:33:56 +02:00
|
|
|
while True:
|
|
|
|
|
index, num, complete, _ids = await self.reply_channel_range.get()
|
|
|
|
|
ids.update(_ids)
|
|
|
|
|
intervals.append((index, index+num))
|
|
|
|
|
intervals.sort()
|
|
|
|
|
while len(intervals) > 1:
|
|
|
|
|
a,b = intervals[0]
|
|
|
|
|
c,d = intervals[1]
|
2019-05-14 20:52:07 +02:00
|
|
|
if not (a <= c and a <= b and c <= d):
|
|
|
|
|
raise Exception(f"insane reply_channel_range intervals {(a,b,c,d)}")
|
|
|
|
|
if b >= c:
|
2019-05-13 22:33:56 +02:00
|
|
|
intervals = [(a,d)] + intervals[2:]
|
|
|
|
|
else:
|
|
|
|
|
break
|
2019-05-14 20:52:07 +02:00
|
|
|
if len(intervals) == 1 and complete:
|
2019-05-13 22:33:56 +02:00
|
|
|
a, b = intervals[0]
|
2019-05-26 05:58:29 +02:00
|
|
|
if a <= first_block and b >= first_block + num_blocks:
|
2019-05-13 22:33:56 +02:00
|
|
|
break
|
|
|
|
|
return ids, complete
|
2019-02-21 18:55:12 +01:00
|
|
|
|
2019-05-13 14:30:02 +02:00
|
|
|
def request_gossip(self, timestamp=0):
|
2019-03-18 11:03:37 +01:00
|
|
|
if timestamp == 0:
|
2019-05-02 17:59:11 +02:00
|
|
|
self.logger.info('requesting whole channel graph')
|
2019-03-18 11:03:37 +01:00
|
|
|
else:
|
2019-05-02 17:59:11 +02:00
|
|
|
self.logger.info(f'requesting channel graph since {datetime.fromtimestamp(timestamp).ctime()}')
|
2019-03-18 11:03:37 +01:00
|
|
|
self.send_message(
|
|
|
|
|
'gossip_timestamp_filter',
|
|
|
|
|
chain_hash=constants.net.rev_genesis_bytes(),
|
|
|
|
|
first_timestamp=timestamp,
|
2019-03-18 12:24:37 +01:00
|
|
|
timestamp_range=b'\xff'*4)
|
2019-05-13 14:30:02 +02:00
|
|
|
|
2019-05-26 05:58:29 +02:00
|
|
|
def query_channel_range(self, first_block, num_blocks):
|
|
|
|
|
self.logger.info(f'query channel range {first_block} {num_blocks}')
|
2019-05-13 14:30:02 +02:00
|
|
|
self.send_message(
|
|
|
|
|
'query_channel_range',
|
|
|
|
|
chain_hash=constants.net.rev_genesis_bytes(),
|
2019-05-26 05:58:29 +02:00
|
|
|
first_blocknum=first_block,
|
|
|
|
|
number_of_blocks=num_blocks)
|
2019-05-13 14:30:02 +02:00
|
|
|
|
|
|
|
|
def decode_short_ids(self, encoded):
|
|
|
|
|
if encoded[0] == 0:
|
|
|
|
|
decoded = encoded[1:]
|
|
|
|
|
elif encoded[0] == 1:
|
|
|
|
|
decoded = zlib.decompress(encoded[1:])
|
|
|
|
|
else:
|
2019-08-16 22:40:17 +02:00
|
|
|
raise Exception(f'decode_short_ids: unexpected first byte: {encoded[0]}')
|
2019-05-13 14:30:02 +02:00
|
|
|
ids = [decoded[i:i+8] for i in range(0, len(decoded), 8)]
|
|
|
|
|
return ids
|
|
|
|
|
|
|
|
|
|
def on_reply_channel_range(self, payload):
|
2020-03-12 04:08:13 +01:00
|
|
|
first = payload['first_blocknum']
|
|
|
|
|
num = payload['number_of_blocks']
|
2023-01-07 12:20:03 +01:00
|
|
|
complete = bool(int.from_bytes(payload['sync_complete'], 'big'))
|
2019-05-13 14:30:02 +02:00
|
|
|
encoded = payload['encoded_short_ids']
|
|
|
|
|
ids = self.decode_short_ids(encoded)
|
2019-05-14 20:52:07 +02:00
|
|
|
#self.logger.info(f"on_reply_channel_range. >>> first_block {first}, num_blocks {num}, num_ids {len(ids)}, complete {repr(payload['complete'])}")
|
2019-05-13 14:30:02 +02:00
|
|
|
self.reply_channel_range.put_nowait((first, num, complete, ids))
|
|
|
|
|
|
2019-05-16 11:47:55 +02:00
|
|
|
async def get_short_channel_ids(self, ids):
|
|
|
|
|
self.logger.info(f'Querying {len(ids)} short_channel_ids')
|
|
|
|
|
assert not self.querying.is_set()
|
|
|
|
|
self.query_short_channel_ids(ids)
|
|
|
|
|
await self.querying.wait()
|
|
|
|
|
self.querying.clear()
|
|
|
|
|
|
2019-05-13 22:33:56 +02:00
|
|
|
def query_short_channel_ids(self, ids, compressed=True):
|
2019-05-14 21:24:38 +02:00
|
|
|
ids = sorted(ids)
|
2019-05-13 14:30:02 +02:00
|
|
|
s = b''.join(ids)
|
|
|
|
|
encoded = zlib.compress(s) if compressed else s
|
|
|
|
|
prefix = b'\x01' if compressed else b'\x00'
|
|
|
|
|
self.send_message(
|
|
|
|
|
'query_short_channel_ids',
|
|
|
|
|
chain_hash=constants.net.rev_genesis_bytes(),
|
|
|
|
|
len=1+len(encoded),
|
|
|
|
|
encoded_short_ids=prefix+encoded)
|
2019-02-21 18:55:12 +01:00
|
|
|
|
|
|
|
|
async def _message_loop(self):
|
2018-10-05 16:33:35 +02:00
|
|
|
try:
|
2019-09-05 18:31:51 +02:00
|
|
|
await asyncio.wait_for(self.initialize(), LN_P2P_NETWORK_TIMEOUT)
|
2018-10-05 16:33:35 +02:00
|
|
|
except (OSError, asyncio.TimeoutError, HandshakeFailed) as e:
|
2019-06-13 19:06:47 +02:00
|
|
|
raise GracefulDisconnect(f'initialize failed: {repr(e)}') from e
|
2018-10-14 22:36:23 +02:00
|
|
|
async for msg in self.transport.read_messages():
|
2018-09-24 11:44:28 +02:00
|
|
|
self.process_message(msg)
|
2021-11-04 18:04:16 +01:00
|
|
|
if self.DELAY_INC_MSG_PROCESSING_SLEEP:
|
|
|
|
|
# rate-limit message-processing a bit, to make it harder
|
|
|
|
|
# for a single peer to bog down the event loop / cpu:
|
|
|
|
|
await asyncio.sleep(self.DELAY_INC_MSG_PROCESSING_SLEEP)
|
2018-07-30 13:51:03 +02:00
|
|
|
|
2019-02-20 21:03:53 +01:00
|
|
|
def on_reply_short_channel_ids_end(self, payload):
|
2019-05-16 11:47:55 +02:00
|
|
|
self.querying.set()
|
2019-02-20 21:03:53 +01:00
|
|
|
|
2018-07-30 13:51:03 +02:00
|
|
|
def close_and_cleanup(self):
|
2021-03-29 20:51:54 +02:00
|
|
|
# note: This method might get called multiple times!
|
|
|
|
|
# E.g. if you call close_and_cleanup() to cause a disconnection from the peer,
|
|
|
|
|
# it will get called a second time in handle_disconnect().
|
2018-07-30 13:51:03 +02:00
|
|
|
try:
|
2018-10-17 12:10:43 +02:00
|
|
|
if self.transport:
|
2018-10-22 15:35:57 +02:00
|
|
|
self.transport.close()
|
2018-07-30 13:51:03 +02:00
|
|
|
except:
|
|
|
|
|
pass
|
2019-05-13 22:33:56 +02:00
|
|
|
self.lnworker.peer_closed(self)
|
2021-03-11 19:31:22 +01:00
|
|
|
self.got_disconnected.set()
|
2018-04-11 11:02:10 +02:00
|
|
|
|
2021-10-22 11:59:44 +02:00
|
|
|
def is_shutdown_anysegwit(self):
|
|
|
|
|
return self.features.supports(LnFeatures.OPTION_SHUTDOWN_ANYSEGWIT_OPT)
|
|
|
|
|
|
2022-01-18 14:55:43 +01:00
|
|
|
def is_channel_type(self):
|
|
|
|
|
return self.features.supports(LnFeatures.OPTION_CHANNEL_TYPE_OPT)
|
|
|
|
|
|
2020-12-18 11:06:42 +01:00
|
|
|
def is_upfront_shutdown_script(self):
|
2021-02-22 19:53:01 +01:00
|
|
|
return self.features.supports(LnFeatures.OPTION_UPFRONT_SHUTDOWN_SCRIPT_OPT)
|
2020-12-18 11:06:42 +01:00
|
|
|
|
|
|
|
|
def upfront_shutdown_script_from_payload(self, payload, msg_identifier: str) -> Optional[bytes]:
|
|
|
|
|
if msg_identifier not in ['accept', 'open']:
|
|
|
|
|
raise ValueError("msg_identifier must be either 'accept' or 'open'")
|
|
|
|
|
|
|
|
|
|
uss_tlv = payload[msg_identifier + '_channel_tlvs'].get(
|
|
|
|
|
'upfront_shutdown_script')
|
|
|
|
|
|
|
|
|
|
if uss_tlv and self.is_upfront_shutdown_script():
|
|
|
|
|
upfront_shutdown_script = uss_tlv['shutdown_scriptpubkey']
|
|
|
|
|
else:
|
|
|
|
|
upfront_shutdown_script = b''
|
|
|
|
|
self.logger.info(f"upfront shutdown script received: {upfront_shutdown_script}")
|
|
|
|
|
return upfront_shutdown_script
|
|
|
|
|
|
2022-01-18 14:55:43 +01:00
|
|
|
def make_local_config(self, funding_sat: int, push_msat: int, initiator: HTLCOwner, channel_type: ChannelType) -> LocalConfig:
|
2020-04-06 16:53:48 +02:00
|
|
|
channel_seed = os.urandom(32)
|
|
|
|
|
initial_msat = funding_sat * 1000 - push_msat if initiator == LOCAL else push_msat
|
2020-12-18 11:06:42 +01:00
|
|
|
|
|
|
|
|
# sending empty bytes as the upfront_shutdown_script will give us the
|
|
|
|
|
# flexibility to decide an address at closing time
|
|
|
|
|
upfront_shutdown_script = b''
|
|
|
|
|
|
2022-09-23 11:25:49 +02:00
|
|
|
assert channel_type & channel_type.OPTION_STATIC_REMOTEKEY
|
|
|
|
|
wallet = self.lnworker.wallet
|
|
|
|
|
assert wallet.txin_type == 'p2wpkh'
|
|
|
|
|
addr = wallet.get_new_sweep_address_for_channel()
|
|
|
|
|
static_remotekey = bytes.fromhex(wallet.get_public_key(addr))
|
|
|
|
|
|
2021-10-22 12:58:04 +02:00
|
|
|
dust_limit_sat = bitcoin.DUST_LIMIT_P2PKH
|
2020-12-31 08:44:26 +01:00
|
|
|
reserve_sat = max(funding_sat // 100, dust_limit_sat)
|
2021-03-05 16:12:01 +01:00
|
|
|
# for comparison of defaults, see
|
|
|
|
|
# https://github.com/ACINQ/eclair/blob/afa378fbb73c265da44856b4ad0f2128a88ae6c6/eclair-core/src/main/resources/reference.conf#L66
|
|
|
|
|
# https://github.com/ElementsProject/lightning/blob/0056dd75572a8857cff36fcbdb1a2295a1ac9253/lightningd/options.c#L657
|
|
|
|
|
# https://github.com/lightningnetwork/lnd/blob/56b61078c5b2be007d318673a5f3b40c6346883a/config.go#L81
|
2020-03-31 12:43:43 +02:00
|
|
|
local_config = LocalConfig.from_seed(
|
2020-04-06 16:53:48 +02:00
|
|
|
channel_seed=channel_seed,
|
2020-03-31 12:43:43 +02:00
|
|
|
static_remotekey=static_remotekey,
|
2020-12-18 11:06:42 +01:00
|
|
|
upfront_shutdown_script=upfront_shutdown_script,
|
2020-06-09 18:36:34 +02:00
|
|
|
to_self_delay=self.network.config.get('lightning_to_self_delay', 7 * 144),
|
2020-12-31 08:44:26 +01:00
|
|
|
dust_limit_sat=dust_limit_sat,
|
2019-01-30 17:50:10 +01:00
|
|
|
max_htlc_value_in_flight_msat=funding_sat * 1000,
|
2021-03-05 16:12:01 +01:00
|
|
|
max_accepted_htlcs=30,
|
2018-10-04 14:03:29 +02:00
|
|
|
initial_msat=initial_msat,
|
2020-12-31 08:44:26 +01:00
|
|
|
reserve_sat=reserve_sat,
|
2018-11-07 17:54:46 +01:00
|
|
|
funding_locked_received=False,
|
|
|
|
|
was_announced=False,
|
|
|
|
|
current_commitment_signature=None,
|
2019-11-07 18:28:27 +01:00
|
|
|
current_htlc_signatures=b'',
|
2020-03-26 06:32:12 +01:00
|
|
|
htlc_minimum_msat=1,
|
2018-05-02 12:11:28 +02:00
|
|
|
)
|
2020-06-08 21:17:23 +02:00
|
|
|
local_config.validate_params(funding_sat=funding_sat)
|
2018-11-07 17:54:46 +01:00
|
|
|
return local_config
|
2018-10-04 14:03:29 +02:00
|
|
|
|
2020-05-22 17:11:53 +02:00
|
|
|
def temporarily_reserve_funding_tx_change_address(func):
|
|
|
|
|
# During the channel open flow, if we initiated, we might have used a change address
|
|
|
|
|
# of ours in the funding tx. The funding tx is not part of the wallet history
|
|
|
|
|
# at that point yet, but we should already consider this change address as 'used'.
|
2021-03-04 16:44:13 +01:00
|
|
|
@functools.wraps(func)
|
2020-05-22 17:11:53 +02:00
|
|
|
async def wrapper(self: 'Peer', *args, **kwargs):
|
|
|
|
|
funding_tx = kwargs['funding_tx'] # type: PartialTransaction
|
|
|
|
|
wallet = self.lnworker.wallet
|
|
|
|
|
change_addresses = [txout.address for txout in funding_tx.outputs()
|
|
|
|
|
if wallet.is_change(txout.address)]
|
|
|
|
|
for addr in change_addresses:
|
|
|
|
|
wallet.set_reserved_state_of_address(addr, reserved=True)
|
|
|
|
|
try:
|
|
|
|
|
return await func(self, *args, **kwargs)
|
|
|
|
|
finally:
|
|
|
|
|
for addr in change_addresses:
|
|
|
|
|
self.lnworker.wallet.set_reserved_state_of_address(addr, reserved=False)
|
|
|
|
|
return wrapper
|
|
|
|
|
|
|
|
|
|
@temporarily_reserve_funding_tx_change_address
|
|
|
|
|
async def channel_establishment_flow(
|
|
|
|
|
self, *,
|
|
|
|
|
funding_tx: 'PartialTransaction',
|
|
|
|
|
funding_sat: int,
|
|
|
|
|
push_msat: int,
|
|
|
|
|
temp_channel_id: bytes
|
|
|
|
|
) -> Tuple[Channel, 'PartialTransaction']:
|
2020-12-18 11:06:42 +01:00
|
|
|
"""Implements the channel opening flow.
|
|
|
|
|
|
|
|
|
|
-> open_channel message
|
|
|
|
|
<- accept_channel message
|
|
|
|
|
-> funding_created message
|
|
|
|
|
<- funding_signed message
|
|
|
|
|
|
|
|
|
|
Channel configurations are initialized in this method.
|
|
|
|
|
"""
|
2021-02-15 16:47:42 +01:00
|
|
|
# will raise if init fails
|
2020-02-21 19:45:45 +01:00
|
|
|
await asyncio.wait_for(self.initialized, LN_P2P_NETWORK_TIMEOUT)
|
2020-11-11 11:03:31 +01:00
|
|
|
# trampoline is not yet in features
|
2022-09-19 17:43:13 +02:00
|
|
|
if self.lnworker.uses_trampoline() and not self.lnworker.is_trampoline_peer(self.pubkey):
|
2021-02-17 18:18:21 +01:00
|
|
|
raise Exception('Not a trampoline node: ' + str(self.their_features))
|
2020-12-18 11:06:42 +01:00
|
|
|
|
2021-04-03 04:13:28 +02:00
|
|
|
feerate = self.lnworker.current_feerate_per_kw()
|
2022-01-18 14:55:43 +01:00
|
|
|
# we set a channel type for internal bookkeeping
|
|
|
|
|
open_channel_tlvs = {}
|
2022-09-23 11:25:49 +02:00
|
|
|
assert self.their_features.supports(LnFeatures.OPTION_STATIC_REMOTEKEY_OPT)
|
|
|
|
|
our_channel_type = ChannelType(ChannelType.OPTION_STATIC_REMOTEKEY)
|
2023-01-07 12:20:03 +01:00
|
|
|
# We do not set the option_scid_alias bit in channel_type because LND rejects it.
|
|
|
|
|
# Eclair accepts channel_type with that bit, but does not require it.
|
2022-09-23 11:25:49 +02:00
|
|
|
|
2022-01-18 14:55:43 +01:00
|
|
|
# if option_channel_type is negotiated: MUST set channel_type
|
|
|
|
|
if self.is_channel_type():
|
|
|
|
|
# if it includes channel_type: MUST set it to a defined type representing the type it wants.
|
|
|
|
|
open_channel_tlvs['channel_type'] = {
|
|
|
|
|
'type': our_channel_type.to_bytes_minimal()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
local_config = self.make_local_config(funding_sat, push_msat, LOCAL, our_channel_type)
|
|
|
|
|
# if it includes open_channel_tlvs: MUST include upfront_shutdown_script.
|
|
|
|
|
open_channel_tlvs['upfront_shutdown_script'] = {
|
|
|
|
|
'shutdown_scriptpubkey': local_config.upfront_shutdown_script
|
|
|
|
|
}
|
2021-04-03 04:13:28 +02:00
|
|
|
|
2018-05-02 12:11:28 +02:00
|
|
|
# for the first commitment transaction
|
2020-12-19 21:21:24 +01:00
|
|
|
per_commitment_secret_first = get_per_commitment_secret_from_seed(
|
|
|
|
|
local_config.per_commitment_secret_seed,
|
|
|
|
|
RevocationStore.START_INDEX
|
|
|
|
|
)
|
|
|
|
|
per_commitment_point_first = secret_to_pubkey(
|
|
|
|
|
int.from_bytes(per_commitment_secret_first, 'big'))
|
lnpeer: some rework of error/warning message handling
- rm the `_get_channel_ids` abstraction as each of its usages needs subtle differences.
Some code duplication is preferable in this case.
- raise exceptions in `wait_for_message`, so that callers such as the GUI can show user-feedback
- on_error/on_warning were dropping messages with temp_chan_ids if they were not stored in
`temp_id_to_id` - which was only done once the mapping was known (so the normal chan_id was known).
To fix this, we now store temp_chan_ids into `temp_id_to_id` early.
- `schedule_force_closing` only works if the chan_id is already in `channels`
related:
https://github.com/spesmilo/electrum/pull/7645 (and related commits)
-----
example before commit:
```
D/P | lnpeer.Peer.[LNWallet, 03933884aa-3b53e4ab] | Sending OPEN_CHANNEL
D/P | lnpeer.Peer.[LNWallet, 03933884aa-3b53e4ab] | Received ERROR
I/P | lnpeer.Peer.[LNWallet, 03933884aa-3b53e4ab] | remote peer sent error [DO NOT TRUST THIS MESSAGE]: invalid funding_satoshis=10000 sat (min=400000 sat max=1500000000 sat)
E | gui.qt.main_window.[test_segwit_2] | Could not open channel
Traceback (most recent call last):
File "...\electrum\electrum\util.py", line 1160, in wrapper
return await func(*args, **kwargs)
File "...\electrum\electrum\lnpeer.py", line 661, in wrapper
return await func(self, *args, **kwargs)
File "...\electrum\electrum\lnpeer.py", line 742, in channel_establishment_flow
payload = await self.wait_for_message('accept_channel', temp_channel_id) #
File "...\electrum\electrum\lnpeer.py", line 315, in wait_for_message
name, payload = await asyncio.wait_for(q.get(), LN_P2P_NETWORK_TIMEOUT)
File "...\Python39\lib\asyncio\tasks.py", line 468, in wait_for
await waiter
asyncio.exceptions.CancelledError
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "...\Python39\lib\asyncio\tasks.py", line 492, in wait_for
fut.result()
asyncio.exceptions.CancelledError
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "...\electrum\electrum\gui\qt\util.py", line 914, in run
result = task.task()
File "...\electrum\electrum\gui\qt\main_window.py", line 1875, in task
return self.wallet.lnworker.open_channel(
File "...\electrum\electrum\lnworker.py", line 1075, in open_channel
chan, funding_tx = fut.result()
File "...\Python39\lib\concurrent\futures\_base.py", line 445, in result
return self.__get_result()
File "...\Python39\lib\concurrent\futures\_base.py", line 390, in __get_result
raise self._exception
File "...\electrum\electrum\util.py", line 1160, in wrapper
return await func(*args, **kwargs)
File "...\electrum\electrum\lnworker.py", line 1006, in _open_channel_coroutine
chan, funding_tx = await asyncio.wait_for(coro, LN_P2P_NETWORK_TIMEOUT)
File "...\Python39\lib\asyncio\tasks.py", line 494, in wait_for
raise exceptions.TimeoutError() from exc
asyncio.exceptions.TimeoutError
```
example after commit:
```
D/P | lnpeer.Peer.[LNWallet, 03933884aa-ff3a866f] | Sending OPEN_CHANNEL
D/P | lnpeer.Peer.[LNWallet, 03933884aa-ff3a866f] | Received ERROR
I/P | lnpeer.Peer.[LNWallet, 03933884aa-ff3a866f] | remote peer sent error [DO NOT TRUST THIS MESSAGE]: invalid funding_satoshis=10000 sat (min=400000 sat max=1500000000 sat). chan_id=124ca21fa6aa2993430ad71f465f0d44731ef87f7478e4b31327e4459b5a3988
E | lnworker.LNWallet.[test_segwit_2] | Exception in _open_channel_coroutine: GracefulDisconnect('remote peer sent error [DO NOT TRUST THIS MESSAGE]: invalid funding_satoshis=10000 sat (min=400000 sat max=1500000000 sat)')
Traceback (most recent call last):
File "...\electrum\electrum\util.py", line 1160, in wrapper
return await func(*args, **kwargs)
File "...\electrum\electrum\lnworker.py", line 1006, in _open_channel_coroutine
chan, funding_tx = await asyncio.wait_for(coro, LN_P2P_NETWORK_TIMEOUT)
File "...\Python39\lib\asyncio\tasks.py", line 481, in wait_for
return fut.result()
File "...\electrum\electrum\lnpeer.py", line 673, in wrapper
return await func(self, *args, **kwargs)
File "...\electrum\electrum\lnpeer.py", line 755, in channel_establishment_flow
payload = await self.wait_for_message('accept_channel', temp_channel_id)
File "...\electrum\electrum\lnpeer.py", line 326, in wait_for_message
raise GracefulDisconnect(
electrum.interface.GracefulDisconnect: remote peer sent error [DO NOT TRUST THIS MESSAGE]: invalid funding_satoshis=10000 sat (min=400000 sat max=1500000000 sat)
I/P | lnpeer.Peer.[LNWallet, 03933884aa-ff3a866f] | Disconnecting: GracefulDisconnect()
```
2022-03-17 17:14:51 +01:00
|
|
|
|
|
|
|
|
# store the temp id now, so that it is recognized for e.g. 'error' messages
|
|
|
|
|
# TODO: this is never cleaned up; the dict grows unbounded until disconnect
|
|
|
|
|
self.temp_id_to_id[temp_channel_id] = None
|
2018-10-14 22:36:23 +02:00
|
|
|
self.send_message(
|
2018-04-13 12:22:47 +02:00
|
|
|
"open_channel",
|
|
|
|
|
temporary_channel_id=temp_channel_id,
|
2018-07-31 14:36:42 +02:00
|
|
|
chain_hash=constants.net.rev_genesis_bytes(),
|
2018-04-30 23:34:33 +02:00
|
|
|
funding_satoshis=funding_sat,
|
2018-04-18 11:36:33 +02:00
|
|
|
push_msat=push_msat,
|
2018-05-02 12:32:31 +02:00
|
|
|
dust_limit_satoshis=local_config.dust_limit_sat,
|
2018-10-10 22:54:30 +02:00
|
|
|
feerate_per_kw=feerate,
|
2018-05-02 12:32:31 +02:00
|
|
|
max_accepted_htlcs=local_config.max_accepted_htlcs,
|
2018-05-02 12:11:28 +02:00
|
|
|
funding_pubkey=local_config.multisig_key.pubkey,
|
|
|
|
|
revocation_basepoint=local_config.revocation_basepoint.pubkey,
|
|
|
|
|
htlc_basepoint=local_config.htlc_basepoint.pubkey,
|
|
|
|
|
payment_basepoint=local_config.payment_basepoint.pubkey,
|
|
|
|
|
delayed_payment_basepoint=local_config.delayed_basepoint.pubkey,
|
2018-04-20 13:22:56 +02:00
|
|
|
first_per_commitment_point=per_commitment_point_first,
|
2018-05-02 12:32:31 +02:00
|
|
|
to_self_delay=local_config.to_self_delay,
|
2018-05-22 17:01:56 +02:00
|
|
|
max_htlc_value_in_flight_msat=local_config.max_htlc_value_in_flight_msat,
|
2018-10-08 20:31:15 +02:00
|
|
|
channel_flags=0x00, # not willing to announce channel
|
2018-10-24 20:39:07 +02:00
|
|
|
channel_reserve_satoshis=local_config.reserve_sat,
|
2020-03-26 06:32:12 +01:00
|
|
|
htlc_minimum_msat=local_config.htlc_minimum_msat,
|
2022-01-18 14:55:43 +01:00
|
|
|
open_channel_tlvs=open_channel_tlvs,
|
2018-04-13 12:22:47 +02:00
|
|
|
)
|
2020-12-18 11:06:42 +01:00
|
|
|
|
|
|
|
|
# <- accept_channel
|
2020-02-23 17:18:45 +01:00
|
|
|
payload = await self.wait_for_message('accept_channel', temp_channel_id)
|
2023-02-13 01:23:47 +00:00
|
|
|
self.logger.debug(f"received accept_channel for temp_channel_id={temp_channel_id.hex()}. {payload=}")
|
2018-04-17 12:58:26 +02:00
|
|
|
remote_per_commitment_point = payload['first_per_commitment_point']
|
2020-03-12 04:08:13 +01:00
|
|
|
funding_txn_minimum_depth = payload['minimum_depth']
|
2019-08-12 17:54:27 +02:00
|
|
|
if funding_txn_minimum_depth <= 0:
|
|
|
|
|
raise Exception(f"minimum depth too low, {funding_txn_minimum_depth}")
|
|
|
|
|
if funding_txn_minimum_depth > 30:
|
|
|
|
|
raise Exception(f"minimum depth too high, {funding_txn_minimum_depth}")
|
2020-12-18 11:06:42 +01:00
|
|
|
|
|
|
|
|
upfront_shutdown_script = self.upfront_shutdown_script_from_payload(
|
|
|
|
|
payload, 'accept')
|
|
|
|
|
|
2022-01-18 14:55:43 +01:00
|
|
|
accept_channel_tlvs = payload.get('accept_channel_tlvs')
|
|
|
|
|
their_channel_type = accept_channel_tlvs.get('channel_type') if accept_channel_tlvs else None
|
|
|
|
|
if their_channel_type:
|
|
|
|
|
their_channel_type = ChannelType.from_bytes(their_channel_type['type'], byteorder='big').discard_unknown_and_check()
|
|
|
|
|
# if channel_type is set, and channel_type was set in open_channel,
|
|
|
|
|
# and they are not equal types: MUST reject the channel.
|
|
|
|
|
if open_channel_tlvs.get('channel_type') is not None and their_channel_type != our_channel_type:
|
|
|
|
|
raise Exception("Channel type is not the one that we sent.")
|
|
|
|
|
|
2018-10-10 22:54:30 +02:00
|
|
|
remote_config = RemoteConfig(
|
2018-05-02 12:11:28 +02:00
|
|
|
payment_basepoint=OnlyPubkeyKeypair(payload['payment_basepoint']),
|
|
|
|
|
multisig_key=OnlyPubkeyKeypair(payload["funding_pubkey"]),
|
|
|
|
|
htlc_basepoint=OnlyPubkeyKeypair(payload['htlc_basepoint']),
|
|
|
|
|
delayed_basepoint=OnlyPubkeyKeypair(payload['delayed_payment_basepoint']),
|
|
|
|
|
revocation_basepoint=OnlyPubkeyKeypair(payload['revocation_basepoint']),
|
2020-06-08 21:17:23 +02:00
|
|
|
to_self_delay=payload['to_self_delay'],
|
|
|
|
|
dust_limit_sat=payload['dust_limit_satoshis'],
|
|
|
|
|
max_htlc_value_in_flight_msat=payload['max_htlc_value_in_flight_msat'],
|
|
|
|
|
max_accepted_htlcs=payload["max_accepted_htlcs"],
|
2018-10-10 22:54:30 +02:00
|
|
|
initial_msat=push_msat,
|
2020-06-08 21:17:23 +02:00
|
|
|
reserve_sat=payload["channel_reserve_satoshis"],
|
|
|
|
|
htlc_minimum_msat=payload['htlc_minimum_msat'],
|
2018-10-10 22:54:30 +02:00
|
|
|
next_per_commitment_point=remote_per_commitment_point,
|
|
|
|
|
current_per_commitment_point=None,
|
2022-01-18 14:55:43 +01:00
|
|
|
upfront_shutdown_script=upfront_shutdown_script,
|
2018-05-02 12:11:28 +02:00
|
|
|
)
|
2021-09-24 19:58:32 +02:00
|
|
|
ChannelConfig.cross_validate_params(
|
|
|
|
|
local_config=local_config,
|
|
|
|
|
remote_config=remote_config,
|
|
|
|
|
funding_sat=funding_sat,
|
|
|
|
|
is_local_initiator=True,
|
|
|
|
|
initial_feerate_per_kw=feerate,
|
|
|
|
|
)
|
2020-12-18 11:06:42 +01:00
|
|
|
|
|
|
|
|
# -> funding created
|
2019-11-13 09:20:19 +01:00
|
|
|
# replace dummy output in funding tx
|
2018-06-20 15:46:22 +02:00
|
|
|
redeem_script = funding_output_script(local_config, remote_config)
|
2018-04-13 09:59:42 +02:00
|
|
|
funding_address = bitcoin.redeem_script_to_address('p2wsh', redeem_script)
|
2019-10-23 17:09:41 +02:00
|
|
|
funding_output = PartialTxOutput.from_address_and_value(funding_address, funding_sat)
|
2019-11-13 09:20:19 +01:00
|
|
|
dummy_output = PartialTxOutput.from_address_and_value(ln_dummy_address(), funding_sat)
|
2021-03-18 19:13:33 +01:00
|
|
|
if dummy_output not in funding_tx.outputs(): raise Exception("LN dummy output (err 1)")
|
|
|
|
|
funding_tx._outputs.remove(dummy_output)
|
|
|
|
|
if dummy_output in funding_tx.outputs(): raise Exception("LN dummy output (err 2)")
|
2019-11-13 09:20:19 +01:00
|
|
|
funding_tx.add_outputs([funding_output])
|
2021-03-09 09:55:55 +01:00
|
|
|
# find and encrypt op_return data associated to funding_address
|
2021-03-24 11:13:24 +01:00
|
|
|
has_onchain_backup = self.lnworker and self.lnworker.has_recoverable_channels()
|
|
|
|
|
if has_onchain_backup:
|
2021-03-09 09:55:55 +01:00
|
|
|
backup_data = self.lnworker.cb_data(self.pubkey)
|
|
|
|
|
dummy_scriptpubkey = make_op_return(backup_data)
|
|
|
|
|
for o in funding_tx.outputs():
|
|
|
|
|
if o.scriptpubkey == dummy_scriptpubkey:
|
|
|
|
|
encrypted_data = self.lnworker.encrypt_cb_data(backup_data, funding_address)
|
|
|
|
|
assert len(encrypted_data) == len(backup_data)
|
|
|
|
|
o.scriptpubkey = make_op_return(encrypted_data)
|
|
|
|
|
break
|
|
|
|
|
else:
|
|
|
|
|
raise Exception('op_return output not found in funding tx')
|
|
|
|
|
# must not be malleable
|
2019-11-13 09:20:19 +01:00
|
|
|
funding_tx.set_rbf(False)
|
2021-02-15 16:47:42 +01:00
|
|
|
if not funding_tx.is_segwit():
|
|
|
|
|
raise Exception('Funding transaction is not segwit')
|
2018-04-18 11:36:33 +02:00
|
|
|
funding_txid = funding_tx.txid()
|
2019-11-23 20:28:46 +01:00
|
|
|
assert funding_txid
|
2018-04-13 12:22:47 +02:00
|
|
|
funding_index = funding_tx.outputs().index(funding_output)
|
2020-12-18 11:06:42 +01:00
|
|
|
# build remote commitment transaction
|
2018-05-29 11:51:48 +02:00
|
|
|
channel_id, funding_txid_bytes = channel_id_from_funding_tx(funding_txid, funding_index)
|
2020-01-31 13:33:38 +01:00
|
|
|
outpoint = Outpoint(funding_txid, funding_index)
|
2020-12-19 21:21:24 +01:00
|
|
|
constraints = ChannelConstraints(
|
|
|
|
|
capacity=funding_sat,
|
|
|
|
|
is_initiator=True,
|
|
|
|
|
funding_txn_minimum_depth=funding_txn_minimum_depth
|
|
|
|
|
)
|
2021-03-24 11:13:24 +01:00
|
|
|
storage = self.create_channel_storage(
|
2022-01-18 14:55:43 +01:00
|
|
|
channel_id, outpoint, local_config, remote_config, constraints, our_channel_type)
|
2020-12-19 21:21:24 +01:00
|
|
|
chan = Channel(
|
2021-03-24 11:13:24 +01:00
|
|
|
storage,
|
2020-12-19 21:21:24 +01:00
|
|
|
lnworker=self.lnworker,
|
|
|
|
|
initial_feerate=feerate
|
|
|
|
|
)
|
2020-02-27 19:51:16 +01:00
|
|
|
chan.storage['funding_inputs'] = [txin.prevout.to_json() for txin in funding_tx.inputs()]
|
2021-03-24 11:13:24 +01:00
|
|
|
chan.storage['has_onchain_backup'] = has_onchain_backup
|
2020-03-06 03:37:00 +01:00
|
|
|
if isinstance(self.transport, LNTransport):
|
|
|
|
|
chan.add_or_update_peer_addr(self.transport.peer_addr)
|
2018-10-25 19:53:31 +02:00
|
|
|
sig_64, _ = chan.sign_next_commitment()
|
2020-02-23 17:18:45 +01:00
|
|
|
self.temp_id_to_id[temp_channel_id] = channel_id
|
2020-12-18 11:06:42 +01:00
|
|
|
|
2018-10-14 22:36:23 +02:00
|
|
|
self.send_message("funding_created",
|
2018-06-13 16:27:44 +02:00
|
|
|
temporary_channel_id=temp_channel_id,
|
|
|
|
|
funding_txid=funding_txid_bytes,
|
|
|
|
|
funding_output_index=funding_index,
|
2018-10-14 22:36:23 +02:00
|
|
|
signature=sig_64)
|
2020-02-27 19:17:58 +01:00
|
|
|
self.funding_created_sent.add(channel_id)
|
2020-12-18 11:06:42 +01:00
|
|
|
|
|
|
|
|
# <- funding signed
|
2020-02-23 17:18:45 +01:00
|
|
|
payload = await self.wait_for_message('funding_signed', channel_id)
|
2019-05-02 17:59:11 +02:00
|
|
|
self.logger.info('received funding_signed')
|
2018-06-13 16:27:44 +02:00
|
|
|
remote_sig = payload['signature']
|
2022-01-26 16:59:10 +01:00
|
|
|
try:
|
|
|
|
|
chan.receive_new_commitment(remote_sig, [])
|
|
|
|
|
except LNProtocolWarning as e:
|
|
|
|
|
await self.send_warning(channel_id, message=str(e), close_connection=True)
|
2019-10-29 08:02:14 +01:00
|
|
|
chan.open_with_first_pcp(remote_per_commitment_point, remote_sig)
|
2020-04-13 16:02:05 +02:00
|
|
|
chan.set_state(ChannelState.OPENING)
|
2020-02-27 15:22:22 +01:00
|
|
|
self.lnworker.add_new_channel(chan)
|
2019-11-23 13:30:05 +01:00
|
|
|
return chan, funding_tx
|
2018-05-03 13:57:33 +02:00
|
|
|
|
2022-01-18 14:55:43 +01:00
|
|
|
def create_channel_storage(self, channel_id, outpoint, local_config, remote_config, constraints, channel_type):
|
2020-01-31 13:33:38 +01:00
|
|
|
chan_dict = {
|
|
|
|
|
"node_id": self.pubkey.hex(),
|
|
|
|
|
"channel_id": channel_id.hex(),
|
|
|
|
|
"short_channel_id": None,
|
|
|
|
|
"funding_outpoint": outpoint,
|
|
|
|
|
"remote_config": remote_config,
|
|
|
|
|
"local_config": local_config,
|
|
|
|
|
"constraints": constraints,
|
|
|
|
|
"remote_update": None,
|
2020-04-13 16:02:05 +02:00
|
|
|
"state": ChannelState.PREOPENING.name,
|
2020-02-04 13:35:58 +01:00
|
|
|
'onion_keys': {},
|
|
|
|
|
'data_loss_protect_remote_pcp': {},
|
|
|
|
|
"log": {},
|
2021-09-20 11:57:12 +02:00
|
|
|
"fail_htlc_reasons": {}, # htlc_id -> onion_packet
|
|
|
|
|
"unfulfilled_htlcs": {}, # htlc_id -> error_bytes, failure_message
|
2020-01-31 13:33:38 +01:00
|
|
|
"revocation_store": {},
|
2022-01-18 14:55:43 +01:00
|
|
|
"channel_type": channel_type,
|
2020-01-31 13:33:38 +01:00
|
|
|
}
|
2021-01-16 19:04:58 +01:00
|
|
|
return StoredDict(chan_dict, self.lnworker.db if self.lnworker else None, [])
|
2020-01-31 13:33:38 +01:00
|
|
|
|
2018-10-04 14:03:29 +02:00
|
|
|
async def on_open_channel(self, payload):
|
2020-12-18 11:06:42 +01:00
|
|
|
"""Implements the channel acceptance flow.
|
|
|
|
|
|
|
|
|
|
<- open_channel message
|
|
|
|
|
-> accept_channel message
|
|
|
|
|
<- funding_created message
|
|
|
|
|
-> funding_signed message
|
|
|
|
|
|
|
|
|
|
Channel configurations are initialized in this method.
|
|
|
|
|
"""
|
2021-03-09 09:55:55 +01:00
|
|
|
if self.lnworker.has_recoverable_channels():
|
|
|
|
|
# FIXME: we might want to keep the connection open
|
|
|
|
|
raise Exception('not accepting channels')
|
2020-12-18 11:06:42 +01:00
|
|
|
# <- open_channel
|
2018-10-04 14:03:29 +02:00
|
|
|
if payload['chain_hash'] != constants.net.rev_genesis_bytes():
|
|
|
|
|
raise Exception('wrong chain_hash')
|
2020-03-12 04:08:13 +01:00
|
|
|
funding_sat = payload['funding_satoshis']
|
|
|
|
|
push_msat = payload['push_msat']
|
2020-06-08 21:17:23 +02:00
|
|
|
feerate = payload['feerate_per_kw'] # note: we are not validating this
|
2018-10-04 14:03:29 +02:00
|
|
|
temp_chan_id = payload['temporary_channel_id']
|
lnpeer: some rework of error/warning message handling
- rm the `_get_channel_ids` abstraction as each of its usages needs subtle differences.
Some code duplication is preferable in this case.
- raise exceptions in `wait_for_message`, so that callers such as the GUI can show user-feedback
- on_error/on_warning were dropping messages with temp_chan_ids if they were not stored in
`temp_id_to_id` - which was only done once the mapping was known (so the normal chan_id was known).
To fix this, we now store temp_chan_ids into `temp_id_to_id` early.
- `schedule_force_closing` only works if the chan_id is already in `channels`
related:
https://github.com/spesmilo/electrum/pull/7645 (and related commits)
-----
example before commit:
```
D/P | lnpeer.Peer.[LNWallet, 03933884aa-3b53e4ab] | Sending OPEN_CHANNEL
D/P | lnpeer.Peer.[LNWallet, 03933884aa-3b53e4ab] | Received ERROR
I/P | lnpeer.Peer.[LNWallet, 03933884aa-3b53e4ab] | remote peer sent error [DO NOT TRUST THIS MESSAGE]: invalid funding_satoshis=10000 sat (min=400000 sat max=1500000000 sat)
E | gui.qt.main_window.[test_segwit_2] | Could not open channel
Traceback (most recent call last):
File "...\electrum\electrum\util.py", line 1160, in wrapper
return await func(*args, **kwargs)
File "...\electrum\electrum\lnpeer.py", line 661, in wrapper
return await func(self, *args, **kwargs)
File "...\electrum\electrum\lnpeer.py", line 742, in channel_establishment_flow
payload = await self.wait_for_message('accept_channel', temp_channel_id) #
File "...\electrum\electrum\lnpeer.py", line 315, in wait_for_message
name, payload = await asyncio.wait_for(q.get(), LN_P2P_NETWORK_TIMEOUT)
File "...\Python39\lib\asyncio\tasks.py", line 468, in wait_for
await waiter
asyncio.exceptions.CancelledError
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "...\Python39\lib\asyncio\tasks.py", line 492, in wait_for
fut.result()
asyncio.exceptions.CancelledError
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "...\electrum\electrum\gui\qt\util.py", line 914, in run
result = task.task()
File "...\electrum\electrum\gui\qt\main_window.py", line 1875, in task
return self.wallet.lnworker.open_channel(
File "...\electrum\electrum\lnworker.py", line 1075, in open_channel
chan, funding_tx = fut.result()
File "...\Python39\lib\concurrent\futures\_base.py", line 445, in result
return self.__get_result()
File "...\Python39\lib\concurrent\futures\_base.py", line 390, in __get_result
raise self._exception
File "...\electrum\electrum\util.py", line 1160, in wrapper
return await func(*args, **kwargs)
File "...\electrum\electrum\lnworker.py", line 1006, in _open_channel_coroutine
chan, funding_tx = await asyncio.wait_for(coro, LN_P2P_NETWORK_TIMEOUT)
File "...\Python39\lib\asyncio\tasks.py", line 494, in wait_for
raise exceptions.TimeoutError() from exc
asyncio.exceptions.TimeoutError
```
example after commit:
```
D/P | lnpeer.Peer.[LNWallet, 03933884aa-ff3a866f] | Sending OPEN_CHANNEL
D/P | lnpeer.Peer.[LNWallet, 03933884aa-ff3a866f] | Received ERROR
I/P | lnpeer.Peer.[LNWallet, 03933884aa-ff3a866f] | remote peer sent error [DO NOT TRUST THIS MESSAGE]: invalid funding_satoshis=10000 sat (min=400000 sat max=1500000000 sat). chan_id=124ca21fa6aa2993430ad71f465f0d44731ef87f7478e4b31327e4459b5a3988
E | lnworker.LNWallet.[test_segwit_2] | Exception in _open_channel_coroutine: GracefulDisconnect('remote peer sent error [DO NOT TRUST THIS MESSAGE]: invalid funding_satoshis=10000 sat (min=400000 sat max=1500000000 sat)')
Traceback (most recent call last):
File "...\electrum\electrum\util.py", line 1160, in wrapper
return await func(*args, **kwargs)
File "...\electrum\electrum\lnworker.py", line 1006, in _open_channel_coroutine
chan, funding_tx = await asyncio.wait_for(coro, LN_P2P_NETWORK_TIMEOUT)
File "...\Python39\lib\asyncio\tasks.py", line 481, in wait_for
return fut.result()
File "...\electrum\electrum\lnpeer.py", line 673, in wrapper
return await func(self, *args, **kwargs)
File "...\electrum\electrum\lnpeer.py", line 755, in channel_establishment_flow
payload = await self.wait_for_message('accept_channel', temp_channel_id)
File "...\electrum\electrum\lnpeer.py", line 326, in wait_for_message
raise GracefulDisconnect(
electrum.interface.GracefulDisconnect: remote peer sent error [DO NOT TRUST THIS MESSAGE]: invalid funding_satoshis=10000 sat (min=400000 sat max=1500000000 sat)
I/P | lnpeer.Peer.[LNWallet, 03933884aa-ff3a866f] | Disconnecting: GracefulDisconnect()
```
2022-03-17 17:14:51 +01:00
|
|
|
# store the temp id now, so that it is recognized for e.g. 'error' messages
|
|
|
|
|
# TODO: this is never cleaned up; the dict grows unbounded until disconnect
|
|
|
|
|
self.temp_id_to_id[temp_chan_id] = None
|
2022-01-18 14:55:43 +01:00
|
|
|
|
|
|
|
|
open_channel_tlvs = payload.get('open_channel_tlvs')
|
|
|
|
|
channel_type = open_channel_tlvs.get('channel_type') if open_channel_tlvs else None
|
|
|
|
|
# The receiving node MAY fail the channel if:
|
|
|
|
|
# option_channel_type was negotiated but the message doesn't include a channel_type
|
|
|
|
|
if self.is_channel_type() and channel_type is None:
|
|
|
|
|
raise Exception("sender has advertized option_channel_type, but hasn't sent the channel type")
|
|
|
|
|
# MUST fail the channel if it supports channel_type,
|
|
|
|
|
# channel_type was set, and the type is not suitable.
|
|
|
|
|
elif self.is_channel_type() and channel_type is not None:
|
|
|
|
|
channel_type = ChannelType.from_bytes(channel_type['type'], byteorder='big').discard_unknown_and_check()
|
|
|
|
|
if not channel_type.complies_with_features(self.features):
|
|
|
|
|
raise Exception("sender has sent a channel type we don't support")
|
|
|
|
|
|
|
|
|
|
local_config = self.make_local_config(funding_sat, push_msat, REMOTE, channel_type)
|
2020-12-18 11:06:42 +01:00
|
|
|
|
|
|
|
|
upfront_shutdown_script = self.upfront_shutdown_script_from_payload(
|
|
|
|
|
payload, 'open')
|
|
|
|
|
|
2020-06-08 21:17:23 +02:00
|
|
|
remote_config = RemoteConfig(
|
|
|
|
|
payment_basepoint=OnlyPubkeyKeypair(payload['payment_basepoint']),
|
|
|
|
|
multisig_key=OnlyPubkeyKeypair(payload['funding_pubkey']),
|
|
|
|
|
htlc_basepoint=OnlyPubkeyKeypair(payload['htlc_basepoint']),
|
|
|
|
|
delayed_basepoint=OnlyPubkeyKeypair(payload['delayed_payment_basepoint']),
|
|
|
|
|
revocation_basepoint=OnlyPubkeyKeypair(payload['revocation_basepoint']),
|
|
|
|
|
to_self_delay=payload['to_self_delay'],
|
|
|
|
|
dust_limit_sat=payload['dust_limit_satoshis'],
|
|
|
|
|
max_htlc_value_in_flight_msat=payload['max_htlc_value_in_flight_msat'],
|
|
|
|
|
max_accepted_htlcs=payload['max_accepted_htlcs'],
|
|
|
|
|
initial_msat=funding_sat * 1000 - push_msat,
|
|
|
|
|
reserve_sat=payload['channel_reserve_satoshis'],
|
|
|
|
|
htlc_minimum_msat=payload['htlc_minimum_msat'],
|
|
|
|
|
next_per_commitment_point=payload['first_per_commitment_point'],
|
|
|
|
|
current_per_commitment_point=None,
|
2020-12-18 11:06:42 +01:00
|
|
|
upfront_shutdown_script=upfront_shutdown_script,
|
2020-06-08 21:17:23 +02:00
|
|
|
)
|
2021-09-24 19:58:32 +02:00
|
|
|
ChannelConfig.cross_validate_params(
|
|
|
|
|
local_config=local_config,
|
|
|
|
|
remote_config=remote_config,
|
|
|
|
|
funding_sat=funding_sat,
|
|
|
|
|
is_local_initiator=False,
|
|
|
|
|
initial_feerate_per_kw=feerate,
|
|
|
|
|
)
|
2020-12-18 11:06:42 +01:00
|
|
|
|
2020-06-08 21:17:23 +02:00
|
|
|
# note: we ignore payload['channel_flags'], which e.g. contains 'announce_channel'.
|
2022-02-16 17:40:30 +01:00
|
|
|
# Notably, if the remote sets 'announce_channel' to True, we will ignore that too,
|
2020-06-08 21:17:23 +02:00
|
|
|
# but we will not play along with actually announcing the channel (so we keep it private).
|
2020-12-18 11:06:42 +01:00
|
|
|
|
|
|
|
|
# -> accept channel
|
2018-10-04 14:03:29 +02:00
|
|
|
# for the first commitment transaction
|
2020-12-19 21:21:24 +01:00
|
|
|
per_commitment_secret_first = get_per_commitment_secret_from_seed(
|
|
|
|
|
local_config.per_commitment_secret_seed,
|
|
|
|
|
RevocationStore.START_INDEX
|
|
|
|
|
)
|
|
|
|
|
per_commitment_point_first = secret_to_pubkey(
|
|
|
|
|
int.from_bytes(per_commitment_secret_first, 'big'))
|
2018-10-04 14:03:29 +02:00
|
|
|
min_depth = 3
|
2022-01-18 14:55:43 +01:00
|
|
|
accept_channel_tlvs = {
|
|
|
|
|
'upfront_shutdown_script': {
|
|
|
|
|
'shutdown_scriptpubkey': local_config.upfront_shutdown_script
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
# The sender: if it sets channel_type: MUST set it to the channel_type from open_channel
|
|
|
|
|
if self.is_channel_type():
|
|
|
|
|
accept_channel_tlvs['channel_type'] = {
|
|
|
|
|
'type': channel_type.to_bytes_minimal()
|
|
|
|
|
}
|
|
|
|
|
|
2020-12-18 11:06:42 +01:00
|
|
|
self.send_message(
|
|
|
|
|
'accept_channel',
|
2018-10-04 14:03:29 +02:00
|
|
|
temporary_channel_id=temp_chan_id,
|
|
|
|
|
dust_limit_satoshis=local_config.dust_limit_sat,
|
|
|
|
|
max_htlc_value_in_flight_msat=local_config.max_htlc_value_in_flight_msat,
|
2018-10-24 20:39:07 +02:00
|
|
|
channel_reserve_satoshis=local_config.reserve_sat,
|
2020-06-08 21:17:23 +02:00
|
|
|
htlc_minimum_msat=local_config.htlc_minimum_msat,
|
2018-10-04 14:03:29 +02:00
|
|
|
minimum_depth=min_depth,
|
|
|
|
|
to_self_delay=local_config.to_self_delay,
|
|
|
|
|
max_accepted_htlcs=local_config.max_accepted_htlcs,
|
|
|
|
|
funding_pubkey=local_config.multisig_key.pubkey,
|
|
|
|
|
revocation_basepoint=local_config.revocation_basepoint.pubkey,
|
|
|
|
|
payment_basepoint=local_config.payment_basepoint.pubkey,
|
|
|
|
|
delayed_payment_basepoint=local_config.delayed_basepoint.pubkey,
|
|
|
|
|
htlc_basepoint=local_config.htlc_basepoint.pubkey,
|
|
|
|
|
first_per_commitment_point=per_commitment_point_first,
|
2022-01-18 14:55:43 +01:00
|
|
|
accept_channel_tlvs=accept_channel_tlvs,
|
2018-10-14 22:36:23 +02:00
|
|
|
)
|
2020-12-18 11:06:42 +01:00
|
|
|
|
|
|
|
|
# <- funding created
|
2020-02-23 17:18:45 +01:00
|
|
|
funding_created = await self.wait_for_message('funding_created', temp_chan_id)
|
2020-12-18 11:06:42 +01:00
|
|
|
|
|
|
|
|
# -> funding signed
|
2020-03-12 04:08:13 +01:00
|
|
|
funding_idx = funding_created['funding_output_index']
|
2023-02-17 11:35:03 +00:00
|
|
|
funding_txid = funding_created['funding_txid'][::-1].hex()
|
2018-10-04 14:03:29 +02:00
|
|
|
channel_id, funding_txid_bytes = channel_id_from_funding_tx(funding_txid, funding_idx)
|
2020-12-19 21:21:24 +01:00
|
|
|
constraints = ChannelConstraints(
|
|
|
|
|
capacity=funding_sat,
|
|
|
|
|
is_initiator=False,
|
|
|
|
|
funding_txn_minimum_depth=min_depth
|
|
|
|
|
)
|
2020-01-31 13:33:38 +01:00
|
|
|
outpoint = Outpoint(funding_txid, funding_idx)
|
2020-12-19 21:21:24 +01:00
|
|
|
chan_dict = self.create_channel_storage(
|
2022-01-18 14:55:43 +01:00
|
|
|
channel_id, outpoint, local_config, remote_config, constraints, channel_type)
|
2020-12-19 21:21:24 +01:00
|
|
|
chan = Channel(
|
|
|
|
|
chan_dict,
|
|
|
|
|
lnworker=self.lnworker,
|
|
|
|
|
initial_feerate=feerate
|
|
|
|
|
)
|
2020-02-24 12:01:54 +01:00
|
|
|
chan.storage['init_timestamp'] = int(time.time())
|
2020-03-06 03:37:00 +01:00
|
|
|
if isinstance(self.transport, LNTransport):
|
|
|
|
|
chan.add_or_update_peer_addr(self.transport.peer_addr)
|
2018-10-04 14:03:29 +02:00
|
|
|
remote_sig = funding_created['signature']
|
2022-01-26 16:59:10 +01:00
|
|
|
try:
|
|
|
|
|
chan.receive_new_commitment(remote_sig, [])
|
|
|
|
|
except LNProtocolWarning as e:
|
|
|
|
|
await self.send_warning(channel_id, message=str(e), close_connection=True)
|
2018-10-25 19:53:31 +02:00
|
|
|
sig_64, _ = chan.sign_next_commitment()
|
2018-10-14 22:36:23 +02:00
|
|
|
self.send_message('funding_signed',
|
2018-10-04 14:03:29 +02:00
|
|
|
channel_id=channel_id,
|
|
|
|
|
signature=sig_64,
|
2018-10-14 22:36:23 +02:00
|
|
|
)
|
lnpeer: some rework of error/warning message handling
- rm the `_get_channel_ids` abstraction as each of its usages needs subtle differences.
Some code duplication is preferable in this case.
- raise exceptions in `wait_for_message`, so that callers such as the GUI can show user-feedback
- on_error/on_warning were dropping messages with temp_chan_ids if they were not stored in
`temp_id_to_id` - which was only done once the mapping was known (so the normal chan_id was known).
To fix this, we now store temp_chan_ids into `temp_id_to_id` early.
- `schedule_force_closing` only works if the chan_id is already in `channels`
related:
https://github.com/spesmilo/electrum/pull/7645 (and related commits)
-----
example before commit:
```
D/P | lnpeer.Peer.[LNWallet, 03933884aa-3b53e4ab] | Sending OPEN_CHANNEL
D/P | lnpeer.Peer.[LNWallet, 03933884aa-3b53e4ab] | Received ERROR
I/P | lnpeer.Peer.[LNWallet, 03933884aa-3b53e4ab] | remote peer sent error [DO NOT TRUST THIS MESSAGE]: invalid funding_satoshis=10000 sat (min=400000 sat max=1500000000 sat)
E | gui.qt.main_window.[test_segwit_2] | Could not open channel
Traceback (most recent call last):
File "...\electrum\electrum\util.py", line 1160, in wrapper
return await func(*args, **kwargs)
File "...\electrum\electrum\lnpeer.py", line 661, in wrapper
return await func(self, *args, **kwargs)
File "...\electrum\electrum\lnpeer.py", line 742, in channel_establishment_flow
payload = await self.wait_for_message('accept_channel', temp_channel_id) #
File "...\electrum\electrum\lnpeer.py", line 315, in wait_for_message
name, payload = await asyncio.wait_for(q.get(), LN_P2P_NETWORK_TIMEOUT)
File "...\Python39\lib\asyncio\tasks.py", line 468, in wait_for
await waiter
asyncio.exceptions.CancelledError
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "...\Python39\lib\asyncio\tasks.py", line 492, in wait_for
fut.result()
asyncio.exceptions.CancelledError
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "...\electrum\electrum\gui\qt\util.py", line 914, in run
result = task.task()
File "...\electrum\electrum\gui\qt\main_window.py", line 1875, in task
return self.wallet.lnworker.open_channel(
File "...\electrum\electrum\lnworker.py", line 1075, in open_channel
chan, funding_tx = fut.result()
File "...\Python39\lib\concurrent\futures\_base.py", line 445, in result
return self.__get_result()
File "...\Python39\lib\concurrent\futures\_base.py", line 390, in __get_result
raise self._exception
File "...\electrum\electrum\util.py", line 1160, in wrapper
return await func(*args, **kwargs)
File "...\electrum\electrum\lnworker.py", line 1006, in _open_channel_coroutine
chan, funding_tx = await asyncio.wait_for(coro, LN_P2P_NETWORK_TIMEOUT)
File "...\Python39\lib\asyncio\tasks.py", line 494, in wait_for
raise exceptions.TimeoutError() from exc
asyncio.exceptions.TimeoutError
```
example after commit:
```
D/P | lnpeer.Peer.[LNWallet, 03933884aa-ff3a866f] | Sending OPEN_CHANNEL
D/P | lnpeer.Peer.[LNWallet, 03933884aa-ff3a866f] | Received ERROR
I/P | lnpeer.Peer.[LNWallet, 03933884aa-ff3a866f] | remote peer sent error [DO NOT TRUST THIS MESSAGE]: invalid funding_satoshis=10000 sat (min=400000 sat max=1500000000 sat). chan_id=124ca21fa6aa2993430ad71f465f0d44731ef87f7478e4b31327e4459b5a3988
E | lnworker.LNWallet.[test_segwit_2] | Exception in _open_channel_coroutine: GracefulDisconnect('remote peer sent error [DO NOT TRUST THIS MESSAGE]: invalid funding_satoshis=10000 sat (min=400000 sat max=1500000000 sat)')
Traceback (most recent call last):
File "...\electrum\electrum\util.py", line 1160, in wrapper
return await func(*args, **kwargs)
File "...\electrum\electrum\lnworker.py", line 1006, in _open_channel_coroutine
chan, funding_tx = await asyncio.wait_for(coro, LN_P2P_NETWORK_TIMEOUT)
File "...\Python39\lib\asyncio\tasks.py", line 481, in wait_for
return fut.result()
File "...\electrum\electrum\lnpeer.py", line 673, in wrapper
return await func(self, *args, **kwargs)
File "...\electrum\electrum\lnpeer.py", line 755, in channel_establishment_flow
payload = await self.wait_for_message('accept_channel', temp_channel_id)
File "...\electrum\electrum\lnpeer.py", line 326, in wait_for_message
raise GracefulDisconnect(
electrum.interface.GracefulDisconnect: remote peer sent error [DO NOT TRUST THIS MESSAGE]: invalid funding_satoshis=10000 sat (min=400000 sat max=1500000000 sat)
I/P | lnpeer.Peer.[LNWallet, 03933884aa-ff3a866f] | Disconnecting: GracefulDisconnect()
```
2022-03-17 17:14:51 +01:00
|
|
|
self.temp_id_to_id[temp_chan_id] = channel_id
|
2020-02-27 19:17:58 +01:00
|
|
|
self.funding_signed_sent.add(chan.channel_id)
|
2019-03-07 16:33:07 +01:00
|
|
|
chan.open_with_first_pcp(payload['first_per_commitment_point'], remote_sig)
|
2020-04-13 16:02:05 +02:00
|
|
|
chan.set_state(ChannelState.OPENING)
|
2020-02-23 20:35:03 +01:00
|
|
|
self.lnworker.add_new_channel(chan)
|
2018-10-04 14:03:29 +02:00
|
|
|
|
2022-06-10 17:13:11 +02:00
|
|
|
async def request_force_close(self, channel_id: bytes):
|
|
|
|
|
"""Try to trigger the remote peer to force-close."""
|
2020-03-13 11:44:29 +01:00
|
|
|
await self.initialized
|
2022-06-07 19:53:27 +02:00
|
|
|
# First, we intentionally send a "channel_reestablish" msg with an old state.
|
|
|
|
|
# Many nodes (but not all) automatically force-close when seeing this.
|
2020-06-19 12:01:18 +02:00
|
|
|
latest_point = secret_to_pubkey(42) # we need a valid point (BOLT2)
|
2020-03-13 11:44:29 +01:00
|
|
|
self.send_message(
|
|
|
|
|
"channel_reestablish",
|
|
|
|
|
channel_id=channel_id,
|
2020-06-19 06:38:00 +02:00
|
|
|
next_commitment_number=0,
|
|
|
|
|
next_revocation_number=0,
|
2020-03-13 11:44:29 +01:00
|
|
|
your_last_per_commitment_secret=0,
|
|
|
|
|
my_current_per_commitment_point=latest_point)
|
2022-06-07 19:53:27 +02:00
|
|
|
# Newish nodes that have lightning/bolts/pull/950 force-close upon receiving an "error" msg,
|
|
|
|
|
# so send that too. E.g. old "channel_reestablish" is not enough for eclair 0.7+,
|
|
|
|
|
# but "error" is. see https://github.com/ACINQ/eclair/pull/2036
|
|
|
|
|
# The receiving node:
|
|
|
|
|
# - upon receiving `error`:
|
|
|
|
|
# - MUST fail the channel referred to by `channel_id`, if that channel is with the sending node.
|
|
|
|
|
self.send_message("error", channel_id=channel_id, data=b"", len=0)
|
2020-03-13 11:44:29 +01:00
|
|
|
|
2022-01-26 16:51:42 +01:00
|
|
|
def schedule_force_closing(self, channel_id: bytes):
|
2022-03-09 15:52:46 +01:00
|
|
|
""" wrapper of lnworker's method, that raises if channel is not with this peer """
|
2022-01-26 16:51:42 +01:00
|
|
|
channels_with_peer = list(self.channels.keys())
|
|
|
|
|
channels_with_peer.extend(self.temp_id_to_id.values())
|
|
|
|
|
if channel_id not in channels_with_peer:
|
|
|
|
|
raise ValueError(f"channel {channel_id.hex()} does not belong to this peer")
|
2022-06-07 22:53:05 +02:00
|
|
|
chan = self.channels.get(channel_id)
|
|
|
|
|
if not chan:
|
|
|
|
|
self.logger.warning(f"tried to force-close channel {channel_id.hex()} but it is not in self.channels yet")
|
|
|
|
|
if ChanCloseOption.LOCAL_FCLOSE in chan.get_close_options():
|
lnpeer: some rework of error/warning message handling
- rm the `_get_channel_ids` abstraction as each of its usages needs subtle differences.
Some code duplication is preferable in this case.
- raise exceptions in `wait_for_message`, so that callers such as the GUI can show user-feedback
- on_error/on_warning were dropping messages with temp_chan_ids if they were not stored in
`temp_id_to_id` - which was only done once the mapping was known (so the normal chan_id was known).
To fix this, we now store temp_chan_ids into `temp_id_to_id` early.
- `schedule_force_closing` only works if the chan_id is already in `channels`
related:
https://github.com/spesmilo/electrum/pull/7645 (and related commits)
-----
example before commit:
```
D/P | lnpeer.Peer.[LNWallet, 03933884aa-3b53e4ab] | Sending OPEN_CHANNEL
D/P | lnpeer.Peer.[LNWallet, 03933884aa-3b53e4ab] | Received ERROR
I/P | lnpeer.Peer.[LNWallet, 03933884aa-3b53e4ab] | remote peer sent error [DO NOT TRUST THIS MESSAGE]: invalid funding_satoshis=10000 sat (min=400000 sat max=1500000000 sat)
E | gui.qt.main_window.[test_segwit_2] | Could not open channel
Traceback (most recent call last):
File "...\electrum\electrum\util.py", line 1160, in wrapper
return await func(*args, **kwargs)
File "...\electrum\electrum\lnpeer.py", line 661, in wrapper
return await func(self, *args, **kwargs)
File "...\electrum\electrum\lnpeer.py", line 742, in channel_establishment_flow
payload = await self.wait_for_message('accept_channel', temp_channel_id) #
File "...\electrum\electrum\lnpeer.py", line 315, in wait_for_message
name, payload = await asyncio.wait_for(q.get(), LN_P2P_NETWORK_TIMEOUT)
File "...\Python39\lib\asyncio\tasks.py", line 468, in wait_for
await waiter
asyncio.exceptions.CancelledError
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "...\Python39\lib\asyncio\tasks.py", line 492, in wait_for
fut.result()
asyncio.exceptions.CancelledError
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "...\electrum\electrum\gui\qt\util.py", line 914, in run
result = task.task()
File "...\electrum\electrum\gui\qt\main_window.py", line 1875, in task
return self.wallet.lnworker.open_channel(
File "...\electrum\electrum\lnworker.py", line 1075, in open_channel
chan, funding_tx = fut.result()
File "...\Python39\lib\concurrent\futures\_base.py", line 445, in result
return self.__get_result()
File "...\Python39\lib\concurrent\futures\_base.py", line 390, in __get_result
raise self._exception
File "...\electrum\electrum\util.py", line 1160, in wrapper
return await func(*args, **kwargs)
File "...\electrum\electrum\lnworker.py", line 1006, in _open_channel_coroutine
chan, funding_tx = await asyncio.wait_for(coro, LN_P2P_NETWORK_TIMEOUT)
File "...\Python39\lib\asyncio\tasks.py", line 494, in wait_for
raise exceptions.TimeoutError() from exc
asyncio.exceptions.TimeoutError
```
example after commit:
```
D/P | lnpeer.Peer.[LNWallet, 03933884aa-ff3a866f] | Sending OPEN_CHANNEL
D/P | lnpeer.Peer.[LNWallet, 03933884aa-ff3a866f] | Received ERROR
I/P | lnpeer.Peer.[LNWallet, 03933884aa-ff3a866f] | remote peer sent error [DO NOT TRUST THIS MESSAGE]: invalid funding_satoshis=10000 sat (min=400000 sat max=1500000000 sat). chan_id=124ca21fa6aa2993430ad71f465f0d44731ef87f7478e4b31327e4459b5a3988
E | lnworker.LNWallet.[test_segwit_2] | Exception in _open_channel_coroutine: GracefulDisconnect('remote peer sent error [DO NOT TRUST THIS MESSAGE]: invalid funding_satoshis=10000 sat (min=400000 sat max=1500000000 sat)')
Traceback (most recent call last):
File "...\electrum\electrum\util.py", line 1160, in wrapper
return await func(*args, **kwargs)
File "...\electrum\electrum\lnworker.py", line 1006, in _open_channel_coroutine
chan, funding_tx = await asyncio.wait_for(coro, LN_P2P_NETWORK_TIMEOUT)
File "...\Python39\lib\asyncio\tasks.py", line 481, in wait_for
return fut.result()
File "...\electrum\electrum\lnpeer.py", line 673, in wrapper
return await func(self, *args, **kwargs)
File "...\electrum\electrum\lnpeer.py", line 755, in channel_establishment_flow
payload = await self.wait_for_message('accept_channel', temp_channel_id)
File "...\electrum\electrum\lnpeer.py", line 326, in wait_for_message
raise GracefulDisconnect(
electrum.interface.GracefulDisconnect: remote peer sent error [DO NOT TRUST THIS MESSAGE]: invalid funding_satoshis=10000 sat (min=400000 sat max=1500000000 sat)
I/P | lnpeer.Peer.[LNWallet, 03933884aa-ff3a866f] | Disconnecting: GracefulDisconnect()
```
2022-03-17 17:14:51 +01:00
|
|
|
self.lnworker.schedule_force_closing(channel_id)
|
|
|
|
|
else:
|
2022-06-07 22:53:05 +02:00
|
|
|
self.logger.info(f"tried to force-close channel {chan.get_id_for_log()} "
|
|
|
|
|
f"but close option is not allowed. {chan.get_state()=!r}")
|
2022-01-26 16:51:42 +01:00
|
|
|
|
2022-02-18 18:52:02 +01:00
|
|
|
def on_channel_reestablish(self, chan, msg):
|
2020-03-12 04:08:13 +01:00
|
|
|
their_next_local_ctn = msg["next_commitment_number"]
|
|
|
|
|
their_oldest_unrevoked_remote_ctn = msg["next_revocation_number"]
|
2020-02-12 06:18:22 +01:00
|
|
|
their_local_pcp = msg.get("my_current_per_commitment_point")
|
|
|
|
|
their_claim_of_our_last_per_commitment_secret = msg.get("your_last_per_commitment_secret")
|
2022-02-18 18:52:02 +01:00
|
|
|
self.logger.info(
|
|
|
|
|
f'channel_reestablish ({chan.get_id_for_log()}): received channel_reestablish with '
|
|
|
|
|
f'(their_next_local_ctn={their_next_local_ctn}, '
|
|
|
|
|
f'their_oldest_unrevoked_remote_ctn={their_oldest_unrevoked_remote_ctn})')
|
2019-08-05 17:43:06 +02:00
|
|
|
# sanity checks of received values
|
|
|
|
|
if their_next_local_ctn < 0:
|
|
|
|
|
raise RemoteMisbehaving(f"channel reestablish: their_next_local_ctn < 0")
|
|
|
|
|
if their_oldest_unrevoked_remote_ctn < 0:
|
|
|
|
|
raise RemoteMisbehaving(f"channel reestablish: their_oldest_unrevoked_remote_ctn < 0")
|
2022-02-18 18:52:02 +01:00
|
|
|
# ctns
|
|
|
|
|
oldest_unrevoked_local_ctn = chan.get_oldest_unrevoked_ctn(LOCAL)
|
|
|
|
|
latest_local_ctn = chan.get_latest_ctn(LOCAL)
|
|
|
|
|
next_local_ctn = chan.get_next_ctn(LOCAL)
|
|
|
|
|
oldest_unrevoked_remote_ctn = chan.get_oldest_unrevoked_ctn(REMOTE)
|
|
|
|
|
latest_remote_ctn = chan.get_latest_ctn(REMOTE)
|
|
|
|
|
next_remote_ctn = chan.get_next_ctn(REMOTE)
|
|
|
|
|
# compare remote ctns
|
2020-02-12 06:18:22 +01:00
|
|
|
we_are_ahead = False
|
|
|
|
|
they_are_ahead = False
|
2022-02-18 18:52:02 +01:00
|
|
|
we_must_resend_revoke_and_ack = False
|
2019-08-02 20:30:47 +02:00
|
|
|
if next_remote_ctn != their_next_local_ctn:
|
2019-08-02 17:55:45 +02:00
|
|
|
if their_next_local_ctn == latest_remote_ctn and chan.hm.is_revack_pending(REMOTE):
|
2022-02-18 18:52:02 +01:00
|
|
|
# We will replay the local updates (see reestablish_channel), which should contain a commitment_signed
|
|
|
|
|
# (due to is_revack_pending being true), and this should remedy this situation.
|
2019-08-12 17:06:05 +02:00
|
|
|
pass
|
2019-08-02 20:30:47 +02:00
|
|
|
else:
|
2022-02-18 18:52:02 +01:00
|
|
|
self.logger.warning(
|
|
|
|
|
f"channel_reestablish ({chan.get_id_for_log()}): "
|
|
|
|
|
f"expected remote ctn {next_remote_ctn}, got {their_next_local_ctn}")
|
2019-08-02 17:55:45 +02:00
|
|
|
if their_next_local_ctn < next_remote_ctn:
|
2020-02-12 06:18:22 +01:00
|
|
|
we_are_ahead = True
|
2019-08-02 17:55:45 +02:00
|
|
|
else:
|
2020-02-12 06:18:22 +01:00
|
|
|
they_are_ahead = True
|
2018-10-09 17:41:24 +02:00
|
|
|
# compare local ctns
|
2019-08-02 20:30:47 +02:00
|
|
|
if oldest_unrevoked_local_ctn != their_oldest_unrevoked_remote_ctn:
|
|
|
|
|
if oldest_unrevoked_local_ctn - 1 == their_oldest_unrevoked_remote_ctn:
|
2022-02-19 10:54:14 +01:00
|
|
|
# A node:
|
|
|
|
|
# if next_revocation_number is equal to the commitment number of the last revoke_and_ack
|
|
|
|
|
# the receiving node sent, AND the receiving node hasn't already received a closing_signed:
|
|
|
|
|
# MUST re-send the revoke_and_ack.
|
2022-02-18 18:52:02 +01:00
|
|
|
we_must_resend_revoke_and_ack = True
|
2018-10-12 18:42:57 +02:00
|
|
|
else:
|
2022-02-18 18:52:02 +01:00
|
|
|
self.logger.warning(
|
|
|
|
|
f"channel_reestablish ({chan.get_id_for_log()}): "
|
|
|
|
|
f"expected local ctn {oldest_unrevoked_local_ctn}, got {their_oldest_unrevoked_remote_ctn}")
|
2019-08-02 20:30:47 +02:00
|
|
|
if their_oldest_unrevoked_remote_ctn < oldest_unrevoked_local_ctn:
|
2020-02-12 06:18:22 +01:00
|
|
|
we_are_ahead = True
|
2019-08-02 20:30:47 +02:00
|
|
|
else:
|
2020-02-12 06:18:22 +01:00
|
|
|
they_are_ahead = True
|
2019-08-02 18:04:13 +02:00
|
|
|
# option_data_loss_protect
|
2022-02-18 18:52:02 +01:00
|
|
|
assert self.features.supports(LnFeatures.OPTION_DATA_LOSS_PROTECT_OPT)
|
2019-08-02 18:04:13 +02:00
|
|
|
def are_datalossprotect_fields_valid() -> bool:
|
|
|
|
|
if their_local_pcp is None or their_claim_of_our_last_per_commitment_secret is None:
|
2020-02-11 21:32:10 +01:00
|
|
|
return False
|
2019-08-05 17:43:06 +02:00
|
|
|
if their_oldest_unrevoked_remote_ctn > 0:
|
|
|
|
|
our_pcs, __ = chan.get_secret_and_point(LOCAL, their_oldest_unrevoked_remote_ctn - 1)
|
|
|
|
|
else:
|
|
|
|
|
assert their_oldest_unrevoked_remote_ctn == 0
|
|
|
|
|
our_pcs = bytes(32)
|
2019-08-02 18:04:13 +02:00
|
|
|
if our_pcs != their_claim_of_our_last_per_commitment_secret:
|
2022-02-18 18:52:02 +01:00
|
|
|
self.logger.error(
|
|
|
|
|
f"channel_reestablish ({chan.get_id_for_log()}): "
|
2023-02-17 11:35:03 +00:00
|
|
|
f"(DLP) local PCS mismatch: {our_pcs.hex()} != {their_claim_of_our_last_per_commitment_secret.hex()}")
|
2019-08-02 18:04:13 +02:00
|
|
|
return False
|
2022-08-12 15:37:23 +02:00
|
|
|
assert chan.is_static_remotekey_enabled()
|
2019-08-02 18:04:13 +02:00
|
|
|
return True
|
|
|
|
|
if not are_datalossprotect_fields_valid():
|
2019-08-05 17:46:01 +02:00
|
|
|
raise RemoteMisbehaving("channel_reestablish: data loss protect fields invalid")
|
2022-02-19 13:44:58 +01:00
|
|
|
fut = self.channel_reestablish_msg[chan.channel_id]
|
2020-02-12 06:18:22 +01:00
|
|
|
if they_are_ahead:
|
2022-02-18 18:52:02 +01:00
|
|
|
self.logger.warning(
|
|
|
|
|
f"channel_reestablish ({chan.get_id_for_log()}): "
|
2023-02-17 11:35:03 +00:00
|
|
|
f"remote is ahead of us! They should force-close. Remote PCP: {their_local_pcp.hex()}")
|
2020-02-11 21:32:10 +01:00
|
|
|
# data_loss_protect_remote_pcp is used in lnsweep
|
|
|
|
|
chan.set_data_loss_protect_remote_pcp(their_next_local_ctn - 1, their_local_pcp)
|
2022-06-07 22:53:05 +02:00
|
|
|
chan.set_state(ChannelState.WE_ARE_TOXIC)
|
2020-02-11 21:32:10 +01:00
|
|
|
self.lnworker.save_channel(chan)
|
2020-04-13 16:02:05 +02:00
|
|
|
chan.peer_state = PeerState.BAD
|
2022-02-19 13:44:58 +01:00
|
|
|
# raise after we send channel_reestablish, so the remote can realize they are ahead
|
|
|
|
|
fut.set_exception(RemoteMisbehaving("remote ahead of us"))
|
|
|
|
|
elif we_are_ahead:
|
2020-02-24 16:32:18 +01:00
|
|
|
self.logger.warning(f"channel_reestablish ({chan.get_id_for_log()}): we are ahead of remote! trying to force-close.")
|
2022-03-09 15:52:46 +01:00
|
|
|
self.schedule_force_closing(chan.channel_id)
|
2022-02-19 13:44:58 +01:00
|
|
|
fut.set_exception(RemoteMisbehaving("we are ahead of remote"))
|
|
|
|
|
else:
|
|
|
|
|
# all good
|
|
|
|
|
fut.set_result((we_must_resend_revoke_and_ack, their_next_local_ctn))
|
2022-02-18 18:52:02 +01:00
|
|
|
|
|
|
|
|
async def reestablish_channel(self, chan: Channel):
|
|
|
|
|
await self.initialized
|
|
|
|
|
chan_id = chan.channel_id
|
|
|
|
|
if chan.should_request_force_close:
|
2022-06-07 22:53:05 +02:00
|
|
|
chan.set_state(ChannelState.REQUESTED_FCLOSE)
|
2022-06-10 17:13:11 +02:00
|
|
|
await self.request_force_close(chan_id)
|
2022-02-18 18:52:02 +01:00
|
|
|
chan.should_request_force_close = False
|
2019-08-02 20:30:47 +02:00
|
|
|
return
|
2022-02-18 18:52:02 +01:00
|
|
|
assert ChannelState.PREOPENING < chan.get_state() < ChannelState.FORCE_CLOSING
|
|
|
|
|
if chan.peer_state != PeerState.DISCONNECTED:
|
|
|
|
|
self.logger.info(
|
|
|
|
|
f'reestablish_channel was called but channel {chan.get_id_for_log()} '
|
|
|
|
|
f'already in peer_state {chan.peer_state!r}')
|
|
|
|
|
return
|
|
|
|
|
chan.peer_state = PeerState.REESTABLISHING
|
|
|
|
|
util.trigger_callback('channel', self.lnworker.wallet, chan)
|
|
|
|
|
# ctns
|
|
|
|
|
oldest_unrevoked_local_ctn = chan.get_oldest_unrevoked_ctn(LOCAL)
|
|
|
|
|
latest_local_ctn = chan.get_latest_ctn(LOCAL)
|
|
|
|
|
next_local_ctn = chan.get_next_ctn(LOCAL)
|
|
|
|
|
oldest_unrevoked_remote_ctn = chan.get_oldest_unrevoked_ctn(REMOTE)
|
|
|
|
|
latest_remote_ctn = chan.get_latest_ctn(REMOTE)
|
|
|
|
|
next_remote_ctn = chan.get_next_ctn(REMOTE)
|
|
|
|
|
# BOLT-02: "A node [...] upon disconnection [...] MUST reverse any uncommitted updates sent by the other side"
|
|
|
|
|
chan.hm.discard_unsigned_remote_updates()
|
|
|
|
|
# send message
|
2022-08-12 15:37:23 +02:00
|
|
|
assert chan.is_static_remotekey_enabled()
|
|
|
|
|
latest_secret, latest_point = chan.get_secret_and_point(LOCAL, 0)
|
2022-02-18 18:52:02 +01:00
|
|
|
if oldest_unrevoked_remote_ctn == 0:
|
|
|
|
|
last_rev_secret = 0
|
|
|
|
|
else:
|
|
|
|
|
last_rev_index = oldest_unrevoked_remote_ctn - 1
|
|
|
|
|
last_rev_secret = chan.revocation_store.retrieve_secret(RevocationStore.START_INDEX - last_rev_index)
|
|
|
|
|
self.send_message(
|
|
|
|
|
"channel_reestablish",
|
|
|
|
|
channel_id=chan_id,
|
|
|
|
|
next_commitment_number=next_local_ctn,
|
|
|
|
|
next_revocation_number=oldest_unrevoked_remote_ctn,
|
|
|
|
|
your_last_per_commitment_secret=last_rev_secret,
|
|
|
|
|
my_current_per_commitment_point=latest_point)
|
|
|
|
|
self.logger.info(
|
|
|
|
|
f'channel_reestablish ({chan.get_id_for_log()}): sent channel_reestablish with '
|
|
|
|
|
f'(next_local_ctn={next_local_ctn}, '
|
|
|
|
|
f'oldest_unrevoked_remote_ctn={oldest_unrevoked_remote_ctn})')
|
|
|
|
|
|
|
|
|
|
# wait until we receive their channel_reestablish
|
2022-02-19 13:44:58 +01:00
|
|
|
fut = self.channel_reestablish_msg[chan_id]
|
|
|
|
|
await fut
|
|
|
|
|
we_must_resend_revoke_and_ack, their_next_local_ctn = fut.result()
|
2019-08-02 20:30:47 +02:00
|
|
|
|
2022-05-24 23:49:58 +02:00
|
|
|
def replay_updates_and_commitsig():
|
|
|
|
|
# Replay un-acked local updates (including commitment_signed) byte-for-byte.
|
|
|
|
|
# If we have sent them a commitment signature that they "lost" (due to disconnect),
|
|
|
|
|
# we need to make sure we replay the same local updates, as otherwise they could
|
|
|
|
|
# end up with two (or more) signed valid commitment transactions at the same ctn.
|
|
|
|
|
# Multiple valid ctxs at the same ctn is a major headache for pre-signing spending txns,
|
|
|
|
|
# e.g. for watchtowers, hence we must ensure these ctxs coincide.
|
|
|
|
|
# We replay the local updates even if they were not yet committed.
|
|
|
|
|
unacked = chan.hm.get_unacked_local_updates()
|
|
|
|
|
replayed_msgs = []
|
|
|
|
|
for ctn, messages in unacked.items():
|
|
|
|
|
if ctn < their_next_local_ctn:
|
|
|
|
|
# They claim to have received these messages and the corresponding
|
|
|
|
|
# commitment_signed, hence we must not replay them.
|
|
|
|
|
continue
|
|
|
|
|
for raw_upd_msg in messages:
|
|
|
|
|
self.transport.send_bytes(raw_upd_msg)
|
|
|
|
|
replayed_msgs.append(raw_upd_msg)
|
|
|
|
|
self.logger.info(f'channel_reestablish ({chan.get_id_for_log()}): replayed {len(replayed_msgs)} unacked messages. '
|
|
|
|
|
f'{[decode_msg(raw_upd_msg)[0] for raw_upd_msg in replayed_msgs]}')
|
|
|
|
|
|
|
|
|
|
def resend_revoke_and_ack():
|
2022-02-18 18:52:02 +01:00
|
|
|
last_secret, last_point = chan.get_secret_and_point(LOCAL, oldest_unrevoked_local_ctn - 1)
|
|
|
|
|
next_secret, next_point = chan.get_secret_and_point(LOCAL, oldest_unrevoked_local_ctn + 1)
|
|
|
|
|
self.send_message(
|
|
|
|
|
"revoke_and_ack",
|
|
|
|
|
channel_id=chan.channel_id,
|
|
|
|
|
per_commitment_secret=last_secret,
|
|
|
|
|
next_per_commitment_point=next_point)
|
2022-05-24 23:49:58 +02:00
|
|
|
|
|
|
|
|
# We need to preserve relative order of last revack and commitsig.
|
|
|
|
|
# note: it is not possible to recover and reestablish a channel if we are out-of-sync by
|
|
|
|
|
# more than one ctns, i.e. we will only ever retransmit up to one commitment_signed message.
|
|
|
|
|
# Hence, if we need to retransmit a revack, without loss of generality, we can either replay
|
|
|
|
|
# it as the first message or as the last message.
|
|
|
|
|
was_revoke_last = chan.hm.was_revoke_last()
|
|
|
|
|
if we_must_resend_revoke_and_ack and not was_revoke_last:
|
|
|
|
|
self.logger.info(f'channel_reestablish ({chan.get_id_for_log()}): replaying a revoke_and_ack first.')
|
|
|
|
|
resend_revoke_and_ack()
|
|
|
|
|
replay_updates_and_commitsig()
|
|
|
|
|
if we_must_resend_revoke_and_ack and was_revoke_last:
|
|
|
|
|
self.logger.info(f'channel_reestablish ({chan.get_id_for_log()}): replaying a revoke_and_ack last.')
|
|
|
|
|
resend_revoke_and_ack()
|
|
|
|
|
|
2020-04-13 16:02:05 +02:00
|
|
|
chan.peer_state = PeerState.GOOD
|
2020-04-10 14:59:21 +02:00
|
|
|
if chan.is_funded() and their_next_local_ctn == next_local_ctn == 1:
|
2023-01-07 12:20:03 +01:00
|
|
|
self.send_channel_ready(chan)
|
2018-10-09 17:41:24 +02:00
|
|
|
# checks done
|
2020-04-10 14:59:21 +02:00
|
|
|
if chan.is_funded() and chan.config[LOCAL].funding_locked_received:
|
2019-02-02 12:06:12 +01:00
|
|
|
self.mark_open(chan)
|
2020-06-19 04:11:35 +02:00
|
|
|
util.trigger_callback('channel', self.lnworker.wallet, chan)
|
2020-05-29 11:30:08 +02:00
|
|
|
# if we have sent a previous shutdown, it must be retransmitted (Bolt2)
|
|
|
|
|
if chan.get_state() == ChannelState.SHUTDOWN:
|
2020-02-26 09:04:54 +01:00
|
|
|
await self.send_shutdown(chan)
|
2018-05-28 18:22:45 +02:00
|
|
|
|
2023-01-07 12:20:03 +01:00
|
|
|
def send_channel_ready(self, chan: Channel):
|
2018-06-27 18:49:51 +02:00
|
|
|
channel_id = chan.channel_id
|
2018-07-25 14:00:22 +02:00
|
|
|
per_commitment_secret_index = RevocationStore.START_INDEX - 1
|
2023-01-07 12:20:03 +01:00
|
|
|
second_per_commitment_point = secret_to_pubkey(int.from_bytes(
|
2018-10-10 22:54:30 +02:00
|
|
|
get_per_commitment_secret_from_seed(chan.config[LOCAL].per_commitment_secret_seed, per_commitment_secret_index), 'big'))
|
2023-01-07 12:20:03 +01:00
|
|
|
|
|
|
|
|
channel_ready_tlvs = {}
|
|
|
|
|
if self.their_features.supports(LnFeatures.OPTION_SCID_ALIAS_OPT):
|
|
|
|
|
# LND requires that we send an alias if the option has been negotiated in INIT.
|
|
|
|
|
# otherwise, the channel will not be marked as active.
|
|
|
|
|
# This does not apply if the channel was previously marked active without an alias.
|
|
|
|
|
channel_ready_tlvs['short_channel_id'] = {'alias':chan.get_local_alias()}
|
|
|
|
|
|
2023-01-07 12:20:03 +01:00
|
|
|
# note: if 'channel_ready' was not yet received, we might send it multiple times
|
|
|
|
|
self.send_message(
|
|
|
|
|
"channel_ready",
|
|
|
|
|
channel_id=channel_id,
|
2023-01-07 12:20:03 +01:00
|
|
|
second_per_commitment_point=second_per_commitment_point,
|
|
|
|
|
channel_ready_tlvs=channel_ready_tlvs)
|
2020-04-10 14:59:21 +02:00
|
|
|
if chan.is_funded() and chan.config[LOCAL].funding_locked_received:
|
2018-06-05 13:57:04 +02:00
|
|
|
self.mark_open(chan)
|
|
|
|
|
|
2023-01-07 12:20:03 +01:00
|
|
|
def on_channel_ready(self, chan: Channel, payload):
|
2023-02-17 11:35:03 +00:00
|
|
|
self.logger.info(f"on_channel_ready. channel: {chan.channel_id.hex()}")
|
2023-01-07 12:20:03 +01:00
|
|
|
# save remote alias for use in invoices
|
|
|
|
|
scid_alias = payload.get('channel_ready_tlvs', {}).get('short_channel_id', {}).get('alias')
|
|
|
|
|
if scid_alias:
|
|
|
|
|
chan.save_remote_alias(scid_alias)
|
|
|
|
|
|
2018-10-10 22:54:30 +02:00
|
|
|
if not chan.config[LOCAL].funding_locked_received:
|
2023-01-07 12:20:03 +01:00
|
|
|
their_next_point = payload["second_per_commitment_point"]
|
2020-01-30 18:09:32 +01:00
|
|
|
chan.config[REMOTE].next_per_commitment_point = their_next_point
|
|
|
|
|
chan.config[LOCAL].funding_locked_received = True
|
2018-06-20 10:48:31 +02:00
|
|
|
self.lnworker.save_channel(chan)
|
2020-04-10 14:59:21 +02:00
|
|
|
if chan.is_funded():
|
2018-06-05 13:57:04 +02:00
|
|
|
self.mark_open(chan)
|
|
|
|
|
|
2018-10-25 19:53:31 +02:00
|
|
|
def on_network_update(self, chan: Channel, funding_tx_depth: int):
|
2018-06-19 13:02:59 +02:00
|
|
|
"""
|
|
|
|
|
Only called when the channel is OPEN.
|
|
|
|
|
|
|
|
|
|
Runs on the Network thread.
|
|
|
|
|
"""
|
2018-10-10 22:54:30 +02:00
|
|
|
if not chan.config[LOCAL].was_announced and funding_tx_depth >= 6:
|
2018-10-08 20:31:15 +02:00
|
|
|
# don't announce our channels
|
|
|
|
|
# FIXME should this be a field in chan.local_state maybe?
|
|
|
|
|
return
|
2020-01-30 18:09:32 +01:00
|
|
|
chan.config[LOCAL].was_announced = True
|
2018-06-19 13:02:59 +02:00
|
|
|
self.lnworker.save_channel(chan)
|
2020-01-30 18:09:32 +01:00
|
|
|
coro = self.handle_announcements(chan)
|
2022-04-26 18:05:45 +02:00
|
|
|
asyncio.run_coroutine_threadsafe(coro, self.asyncio_loop)
|
2018-06-19 13:02:59 +02:00
|
|
|
|
2018-10-12 19:40:12 +02:00
|
|
|
@log_exceptions
|
2020-03-17 18:02:51 +01:00
|
|
|
async def handle_announcements(self, chan: Channel):
|
2018-06-18 15:34:18 +02:00
|
|
|
h, local_node_sig, local_bitcoin_sig = self.send_announcement_signatures(chan)
|
2018-06-27 18:49:51 +02:00
|
|
|
announcement_signatures_msg = await self.announcement_signatures[chan.channel_id].get()
|
2018-06-18 15:34:18 +02:00
|
|
|
remote_node_sig = announcement_signatures_msg["node_signature"]
|
|
|
|
|
remote_bitcoin_sig = announcement_signatures_msg["bitcoin_signature"]
|
2018-10-10 22:54:30 +02:00
|
|
|
if not ecc.verify_signature(chan.config[REMOTE].multisig_key.pubkey, remote_bitcoin_sig, h):
|
2018-06-18 15:34:18 +02:00
|
|
|
raise Exception("bitcoin_sig invalid in announcement_signatures")
|
2019-02-01 20:21:59 +01:00
|
|
|
if not ecc.verify_signature(self.pubkey, remote_node_sig, h):
|
2018-06-18 15:34:18 +02:00
|
|
|
raise Exception("node_sig invalid in announcement_signatures")
|
|
|
|
|
|
2018-10-25 18:28:18 +02:00
|
|
|
node_sigs = [remote_node_sig, local_node_sig]
|
|
|
|
|
bitcoin_sigs = [remote_bitcoin_sig, local_bitcoin_sig]
|
|
|
|
|
bitcoin_keys = [chan.config[REMOTE].multisig_key.pubkey, chan.config[LOCAL].multisig_key.pubkey]
|
2018-06-18 15:34:18 +02:00
|
|
|
|
2018-10-25 18:28:18 +02:00
|
|
|
if self.node_ids[0] > self.node_ids[1]:
|
2018-06-18 15:34:18 +02:00
|
|
|
node_sigs.reverse()
|
|
|
|
|
bitcoin_sigs.reverse()
|
2018-10-25 18:28:18 +02:00
|
|
|
node_ids = list(reversed(self.node_ids))
|
2018-06-18 15:34:18 +02:00
|
|
|
bitcoin_keys.reverse()
|
2018-10-25 18:28:18 +02:00
|
|
|
else:
|
|
|
|
|
node_ids = self.node_ids
|
2018-06-18 15:34:18 +02:00
|
|
|
|
2018-10-14 22:36:23 +02:00
|
|
|
self.send_message("channel_announcement",
|
2018-06-18 15:34:18 +02:00
|
|
|
node_signatures_1=node_sigs[0],
|
|
|
|
|
node_signatures_2=node_sigs[1],
|
|
|
|
|
bitcoin_signature_1=bitcoin_sigs[0],
|
|
|
|
|
bitcoin_signature_2=bitcoin_sigs[1],
|
|
|
|
|
len=0,
|
2018-06-19 13:02:59 +02:00
|
|
|
#features not set (defaults to zeros)
|
2018-07-31 14:36:42 +02:00
|
|
|
chain_hash=constants.net.rev_genesis_bytes(),
|
2018-06-27 20:23:03 +02:00
|
|
|
short_channel_id=chan.short_channel_id,
|
2018-06-18 15:34:18 +02:00
|
|
|
node_id_1=node_ids[0],
|
|
|
|
|
node_id_2=node_ids[1],
|
|
|
|
|
bitcoin_key_1=bitcoin_keys[0],
|
|
|
|
|
bitcoin_key_2=bitcoin_keys[1]
|
|
|
|
|
)
|
|
|
|
|
|
2018-10-25 19:53:31 +02:00
|
|
|
def mark_open(self, chan: Channel):
|
2020-04-10 14:59:21 +02:00
|
|
|
assert chan.is_funded()
|
2019-10-29 08:02:14 +01:00
|
|
|
# only allow state transition from "FUNDED" to "OPEN"
|
2019-11-22 21:37:15 +01:00
|
|
|
old_state = chan.get_state()
|
2020-04-13 16:02:05 +02:00
|
|
|
if old_state == ChannelState.OPEN:
|
2019-11-22 21:37:15 +01:00
|
|
|
return
|
2020-04-13 16:02:05 +02:00
|
|
|
if old_state != ChannelState.FUNDED:
|
2020-02-24 16:32:18 +01:00
|
|
|
self.logger.info(f"cannot mark open ({chan.get_id_for_log()}), current state: {repr(old_state)}")
|
2018-06-05 13:57:04 +02:00
|
|
|
return
|
2018-10-10 22:54:30 +02:00
|
|
|
assert chan.config[LOCAL].funding_locked_received
|
2020-04-13 16:02:05 +02:00
|
|
|
chan.set_state(ChannelState.OPEN)
|
2020-06-19 04:11:35 +02:00
|
|
|
util.trigger_callback('channel', self.lnworker.wallet, chan)
|
2020-02-17 20:38:41 +01:00
|
|
|
# peer may have sent us a channel update for the incoming direction previously
|
|
|
|
|
pending_channel_update = self.orphan_channel_updates.get(chan.short_channel_id)
|
|
|
|
|
if pending_channel_update:
|
2021-03-16 19:07:31 +01:00
|
|
|
chan.set_remote_update(pending_channel_update)
|
2020-02-24 16:32:18 +01:00
|
|
|
self.logger.info(f"CHANNEL OPENING COMPLETED ({chan.get_id_for_log()})")
|
2019-08-16 22:35:25 +02:00
|
|
|
forwarding_enabled = self.network.config.get('lightning_forward_payments', False)
|
|
|
|
|
if forwarding_enabled:
|
|
|
|
|
# send channel_update of outgoing edge to peer,
|
|
|
|
|
# so that channel can be used to to receive payments
|
2020-02-24 16:32:18 +01:00
|
|
|
self.logger.info(f"sending channel update for outgoing edge ({chan.get_id_for_log()})")
|
2020-02-17 20:38:41 +01:00
|
|
|
chan_upd = chan.get_outgoing_gossip_channel_update()
|
2019-08-16 22:35:25 +02:00
|
|
|
self.transport.send_bytes(chan_upd)
|
2019-06-18 13:49:31 +02:00
|
|
|
|
2018-10-25 19:53:31 +02:00
|
|
|
def send_announcement_signatures(self, chan: Channel):
|
2020-02-17 20:38:41 +01:00
|
|
|
chan_ann = chan.construct_channel_announcement_without_sigs()
|
|
|
|
|
preimage = chan_ann[256+2:]
|
|
|
|
|
msg_hash = sha256d(preimage)
|
|
|
|
|
bitcoin_signature = ecc.ECPrivkey(chan.config[LOCAL].multisig_key.privkey).sign(msg_hash, sig_string_from_r_and_s)
|
|
|
|
|
node_signature = ecc.ECPrivkey(self.privkey).sign(msg_hash, sig_string_from_r_and_s)
|
2018-10-14 22:36:23 +02:00
|
|
|
self.send_message("announcement_signatures",
|
2018-06-27 18:49:51 +02:00
|
|
|
channel_id=chan.channel_id,
|
2018-06-27 20:23:03 +02:00
|
|
|
short_channel_id=chan.short_channel_id,
|
2018-06-18 15:34:18 +02:00
|
|
|
node_signature=node_signature,
|
|
|
|
|
bitcoin_signature=bitcoin_signature
|
2018-10-14 22:36:23 +02:00
|
|
|
)
|
2020-02-17 20:38:41 +01:00
|
|
|
return msg_hash, node_signature, bitcoin_signature
|
2018-06-18 15:34:18 +02:00
|
|
|
|
2020-03-17 18:02:51 +01:00
|
|
|
def on_update_fail_htlc(self, chan: Channel, payload):
|
2020-03-12 04:08:13 +01:00
|
|
|
htlc_id = payload["id"]
|
2019-10-11 13:37:54 +02:00
|
|
|
reason = payload["reason"]
|
2019-09-07 07:29:22 +02:00
|
|
|
self.logger.info(f"on_update_fail_htlc. chan {chan.short_channel_id}. htlc_id {htlc_id}")
|
2020-03-17 19:23:04 +01:00
|
|
|
chan.receive_fail_htlc(htlc_id, error_bytes=reason) # TODO handle exc and maybe fail channel (e.g. bad htlc_id)
|
2020-03-04 11:54:42 +01:00
|
|
|
self.maybe_send_commitment(chan)
|
2018-05-17 15:12:22 +02:00
|
|
|
|
2021-11-04 19:16:02 +01:00
|
|
|
def maybe_send_commitment(self, chan: Channel) -> bool:
|
2023-02-05 22:49:12 +00:00
|
|
|
assert util.get_running_loop() == util.get_asyncio_loop(), f"this must be run on the asyncio thread!"
|
2019-08-12 14:22:57 +02:00
|
|
|
# REMOTE should revoke first before we can sign a new ctx
|
|
|
|
|
if chan.hm.is_revack_pending(REMOTE):
|
2021-11-04 19:16:02 +01:00
|
|
|
return False
|
2019-02-26 17:28:08 +01:00
|
|
|
# if there are no changes, we will not (and must not) send a new commitment
|
2020-02-26 19:08:48 +01:00
|
|
|
if not chan.has_pending_changes(REMOTE):
|
2021-11-04 19:16:02 +01:00
|
|
|
return False
|
2020-02-26 19:14:49 +01:00
|
|
|
self.logger.info(f'send_commitment. chan {chan.short_channel_id}. ctn: {chan.get_next_ctn(REMOTE)}.')
|
2018-09-27 20:27:52 +02:00
|
|
|
sig_64, htlc_sigs = chan.sign_next_commitment()
|
2018-10-14 22:36:23 +02:00
|
|
|
self.send_message("commitment_signed", channel_id=chan.channel_id, signature=sig_64, num_htlcs=len(htlc_sigs), htlc_signature=b"".join(htlc_sigs))
|
2021-11-04 19:16:02 +01:00
|
|
|
return True
|
2018-09-27 20:27:52 +02:00
|
|
|
|
2021-02-27 09:45:19 +01:00
|
|
|
def pay(self, *,
|
|
|
|
|
route: 'LNPaymentRoute',
|
|
|
|
|
chan: Channel,
|
|
|
|
|
amount_msat: int,
|
|
|
|
|
total_msat: int,
|
|
|
|
|
payment_hash: bytes,
|
|
|
|
|
min_final_cltv_expiry: int,
|
|
|
|
|
payment_secret: bytes = None,
|
|
|
|
|
trampoline_onion=None) -> UpdateAddHtlc:
|
|
|
|
|
|
2018-06-05 13:57:04 +02:00
|
|
|
assert amount_msat > 0, "amount_msat is not greater zero"
|
2020-03-24 17:25:49 +01:00
|
|
|
assert len(route) > 0
|
2020-02-27 14:40:58 +01:00
|
|
|
if not chan.can_send_update_add_htlc():
|
|
|
|
|
raise PaymentFailure("Channel cannot send update_add_htlc")
|
2020-03-24 17:25:49 +01:00
|
|
|
# add features learned during "init" for direct neighbour:
|
|
|
|
|
route[0].node_features |= self.features
|
2020-03-30 01:42:14 +02:00
|
|
|
local_height = self.network.get_local_height()
|
|
|
|
|
final_cltv = local_height + min_final_cltv_expiry
|
2021-02-19 08:34:03 +01:00
|
|
|
hops_data, amount_msat, cltv = calc_hops_data_for_payment(
|
|
|
|
|
route,
|
|
|
|
|
amount_msat,
|
|
|
|
|
final_cltv,
|
2021-02-24 16:14:19 +01:00
|
|
|
total_msat=total_msat,
|
2021-02-27 09:45:19 +01:00
|
|
|
payment_secret=payment_secret)
|
|
|
|
|
num_hops = len(hops_data)
|
2020-11-11 11:03:31 +01:00
|
|
|
self.logger.info(f"lnpeer.pay len(route)={len(route)}")
|
|
|
|
|
for i in range(len(route)):
|
|
|
|
|
self.logger.info(f" {i}: edge={route[i].short_channel_id} hop_data={hops_data[i]!r}")
|
2018-10-22 18:57:51 +02:00
|
|
|
assert final_cltv <= cltv, (final_cltv, cltv)
|
2020-11-11 11:03:31 +01:00
|
|
|
session_key = os.urandom(32) # session_key
|
2021-02-09 15:09:27 +01:00
|
|
|
# if we are forwarding a trampoline payment, add trampoline onion
|
2021-02-27 09:45:19 +01:00
|
|
|
if trampoline_onion:
|
2021-02-09 15:09:27 +01:00
|
|
|
self.logger.info(f'adding trampoline onion to final payload')
|
|
|
|
|
trampoline_payload = hops_data[num_hops-2].payload
|
|
|
|
|
trampoline_payload["trampoline_onion_packet"] = {
|
2021-02-27 09:45:19 +01:00
|
|
|
"version": trampoline_onion.version,
|
|
|
|
|
"public_key": trampoline_onion.public_key,
|
|
|
|
|
"hops_data": trampoline_onion.hops_data,
|
|
|
|
|
"hmac": trampoline_onion.hmac
|
2021-02-09 15:09:27 +01:00
|
|
|
}
|
2020-11-11 11:03:31 +01:00
|
|
|
# create onion packet
|
2021-02-27 09:45:19 +01:00
|
|
|
payment_path_pubkeys = [x.node_id for x in route]
|
2020-11-11 11:03:31 +01:00
|
|
|
onion = new_onion_packet(payment_path_pubkeys, session_key, hops_data, associated_data=payment_hash) # must use another sessionkey
|
|
|
|
|
self.logger.info(f"starting payment. len(route)={len(hops_data)}.")
|
2018-10-17 20:48:21 +02:00
|
|
|
# create htlc
|
2020-03-30 01:42:14 +02:00
|
|
|
if cltv > local_height + lnutil.NBLOCK_CLTV_EXPIRY_TOO_FAR_INTO_FUTURE:
|
|
|
|
|
raise PaymentFailure(f"htlc expiry too far into future. (in {cltv-local_height} blocks)")
|
2019-03-07 17:51:35 +01:00
|
|
|
htlc = UpdateAddHtlc(amount_msat=amount_msat, payment_hash=payment_hash, cltv_expiry=cltv, timestamp=int(time.time()))
|
2019-02-14 17:53:09 +01:00
|
|
|
htlc = chan.add_htlc(htlc)
|
2020-11-11 11:03:31 +01:00
|
|
|
chan.set_onion_key(htlc.htlc_id, session_key) # should it be the outer onion secret?
|
|
|
|
|
self.logger.info(f"starting payment. htlc: {htlc}")
|
2020-03-04 11:54:42 +01:00
|
|
|
self.send_message(
|
|
|
|
|
"update_add_htlc",
|
|
|
|
|
channel_id=chan.channel_id,
|
|
|
|
|
id=htlc.htlc_id,
|
|
|
|
|
cltv_expiry=htlc.cltv_expiry,
|
|
|
|
|
amount_msat=htlc.amount_msat,
|
|
|
|
|
payment_hash=htlc.payment_hash,
|
|
|
|
|
onion_routing_packet=onion.to_bytes())
|
|
|
|
|
self.maybe_send_commitment(chan)
|
2019-02-14 17:53:09 +01:00
|
|
|
return htlc
|
2018-07-24 19:31:00 +02:00
|
|
|
|
2019-02-03 15:27:48 +01:00
|
|
|
def send_revoke_and_ack(self, chan: Channel):
|
2019-09-07 07:29:22 +02:00
|
|
|
self.logger.info(f'send_revoke_and_ack. chan {chan.short_channel_id}. ctn: {chan.get_oldest_unrevoked_ctn(LOCAL)}')
|
2020-03-04 18:09:43 +01:00
|
|
|
rev = chan.revoke_current_commitment()
|
2018-10-25 19:53:31 +02:00
|
|
|
self.lnworker.save_channel(chan)
|
2018-10-14 22:36:23 +02:00
|
|
|
self.send_message("revoke_and_ack",
|
2018-10-25 19:53:31 +02:00
|
|
|
channel_id=chan.channel_id,
|
2018-06-15 16:35:29 +02:00
|
|
|
per_commitment_secret=rev.per_commitment_secret,
|
2018-10-14 22:36:23 +02:00
|
|
|
next_per_commitment_point=rev.next_per_commitment_point)
|
2019-02-14 21:42:37 +01:00
|
|
|
self.maybe_send_commitment(chan)
|
2018-06-15 16:35:29 +02:00
|
|
|
|
2020-03-17 18:02:51 +01:00
|
|
|
def on_commitment_signed(self, chan: Channel, payload):
|
2020-04-13 16:02:05 +02:00
|
|
|
if chan.peer_state == PeerState.BAD:
|
2020-02-12 10:22:22 +01:00
|
|
|
return
|
2020-02-26 19:14:49 +01:00
|
|
|
self.logger.info(f'on_commitment_signed. chan {chan.short_channel_id}. ctn: {chan.get_next_ctn(LOCAL)}.')
|
2019-02-26 17:28:08 +01:00
|
|
|
# make sure there were changes to the ctx, otherwise the remote peer is misbehaving
|
2020-02-26 19:08:48 +01:00
|
|
|
if not chan.has_pending_changes(LOCAL):
|
2019-09-07 07:29:22 +02:00
|
|
|
# TODO if feerate changed A->B->A; so there were updates but the value is identical,
|
|
|
|
|
# then it might be legal to send a commitment_signature
|
|
|
|
|
# see https://github.com/lightningnetwork/lightning-rfc/pull/618
|
2019-06-07 12:24:46 +02:00
|
|
|
raise RemoteMisbehaving('received commitment_signed without pending changes')
|
2019-08-12 14:22:57 +02:00
|
|
|
# REMOTE should wait until we have revoked
|
|
|
|
|
if chan.hm.is_revack_pending(LOCAL):
|
|
|
|
|
raise RemoteMisbehaving('received commitment_signed before we revoked previous ctx')
|
2019-02-04 12:37:30 +01:00
|
|
|
data = payload["htlc_signature"]
|
2020-03-30 02:42:07 +02:00
|
|
|
htlc_sigs = list(chunks(data, 64))
|
2019-02-04 12:37:30 +01:00
|
|
|
chan.receive_new_commitment(payload["signature"], htlc_sigs)
|
|
|
|
|
self.send_revoke_and_ack(chan)
|
2022-05-24 23:49:58 +02:00
|
|
|
self.received_commitsig_event.set()
|
|
|
|
|
self.received_commitsig_event.clear()
|
2018-04-11 11:02:10 +02:00
|
|
|
|
2020-03-17 18:02:51 +01:00
|
|
|
def on_update_fulfill_htlc(self, chan: Channel, payload):
|
2020-03-11 14:17:06 +01:00
|
|
|
preimage = payload["payment_preimage"]
|
2019-10-09 19:38:22 +02:00
|
|
|
payment_hash = sha256(preimage)
|
2020-03-12 04:08:13 +01:00
|
|
|
htlc_id = payload["id"]
|
2019-09-07 07:29:22 +02:00
|
|
|
self.logger.info(f"on_update_fulfill_htlc. chan {chan.short_channel_id}. htlc_id {htlc_id}")
|
2020-03-17 18:02:51 +01:00
|
|
|
chan.receive_htlc_settle(preimage, htlc_id) # TODO handle exc and maybe fail channel (e.g. bad htlc_id)
|
2019-10-09 19:38:22 +02:00
|
|
|
self.lnworker.save_preimage(payment_hash, preimage)
|
2020-03-04 11:54:42 +01:00
|
|
|
self.maybe_send_commitment(chan)
|
2018-10-01 18:06:47 +02:00
|
|
|
|
2020-03-17 18:02:51 +01:00
|
|
|
def on_update_fail_malformed_htlc(self, chan: Channel, payload):
|
|
|
|
|
htlc_id = payload["id"]
|
|
|
|
|
failure_code = payload["failure_code"]
|
|
|
|
|
self.logger.info(f"on_update_fail_malformed_htlc. chan {chan.get_id_for_log()}. "
|
|
|
|
|
f"htlc_id {htlc_id}. failure_code={failure_code}")
|
|
|
|
|
if failure_code & OnionFailureCodeMetaFlag.BADONION == 0:
|
2022-03-09 15:52:46 +01:00
|
|
|
self.schedule_force_closing(chan.channel_id)
|
2020-03-17 18:02:51 +01:00
|
|
|
raise RemoteMisbehaving(f"received update_fail_malformed_htlc with unexpected failure code: {failure_code}")
|
2021-02-10 13:16:33 +01:00
|
|
|
reason = OnionRoutingFailure(code=failure_code, data=payload["sha256_of_onion"])
|
2020-03-17 19:23:04 +01:00
|
|
|
chan.receive_fail_htlc(htlc_id, error_bytes=None, reason=reason)
|
2020-03-17 18:02:51 +01:00
|
|
|
self.maybe_send_commitment(chan)
|
2018-05-15 15:30:50 +02:00
|
|
|
|
2020-03-17 18:02:51 +01:00
|
|
|
def on_update_add_htlc(self, chan: Channel, payload):
|
2018-05-30 13:42:25 +02:00
|
|
|
payment_hash = payload["payment_hash"]
|
2020-03-12 04:08:13 +01:00
|
|
|
htlc_id = payload["id"]
|
|
|
|
|
cltv_expiry = payload["cltv_expiry"]
|
|
|
|
|
amount_msat_htlc = payload["amount_msat"]
|
2020-02-29 09:12:33 +01:00
|
|
|
onion_packet = payload["onion_routing_packet"]
|
|
|
|
|
htlc = UpdateAddHtlc(
|
|
|
|
|
amount_msat=amount_msat_htlc,
|
|
|
|
|
payment_hash=payment_hash,
|
|
|
|
|
cltv_expiry=cltv_expiry,
|
|
|
|
|
timestamp=int(time.time()),
|
|
|
|
|
htlc_id=htlc_id)
|
2021-03-08 18:51:52 +01:00
|
|
|
self.logger.info(f"on_update_add_htlc. chan {chan.short_channel_id}. htlc={str(htlc)}")
|
|
|
|
|
if chan.get_state() != ChannelState.OPEN:
|
|
|
|
|
raise RemoteMisbehaving(f"received update_add_htlc while chan.get_state() != OPEN. state was {chan.get_state()!r}")
|
|
|
|
|
if cltv_expiry > bitcoin.NLOCKTIME_BLOCKHEIGHT_MAX:
|
2022-03-09 15:52:46 +01:00
|
|
|
self.schedule_force_closing(chan.channel_id)
|
2021-03-08 18:51:52 +01:00
|
|
|
raise RemoteMisbehaving(f"received update_add_htlc with cltv_expiry > BLOCKHEIGHT_MAX. value was {cltv_expiry}")
|
|
|
|
|
# add htlc
|
2020-02-29 09:12:33 +01:00
|
|
|
chan.receive_htlc(htlc, onion_packet)
|
2020-06-21 05:16:27 +02:00
|
|
|
util.trigger_callback('htlc_added', chan, htlc, RECEIVED)
|
2020-02-29 09:12:33 +01:00
|
|
|
|
2021-02-24 20:03:12 +01:00
|
|
|
def maybe_forward_htlc(
|
2021-03-03 20:36:48 +01:00
|
|
|
self, *,
|
2021-02-24 20:03:12 +01:00
|
|
|
htlc: UpdateAddHtlc,
|
2021-03-03 20:36:48 +01:00
|
|
|
processed_onion: ProcessedOnionPacket) -> Tuple[bytes, int]:
|
|
|
|
|
|
2019-02-03 08:00:44 +01:00
|
|
|
# Forward HTLC
|
2019-08-12 20:44:32 +02:00
|
|
|
# FIXME: there are critical safety checks MISSING here
|
2021-03-11 18:51:45 +01:00
|
|
|
# - for example; atm we forward first and then persist "forwarding_info",
|
|
|
|
|
# so if we segfault in-between and restart, we might forward an HTLC twice...
|
|
|
|
|
# (same for trampoline forwarding)
|
2021-10-22 13:02:59 +02:00
|
|
|
# - we could check for the exposure to dust HTLCs, see:
|
|
|
|
|
# https://github.com/ACINQ/eclair/pull/1985
|
2019-08-12 20:44:32 +02:00
|
|
|
forwarding_enabled = self.network.config.get('lightning_forward_payments', False)
|
|
|
|
|
if not forwarding_enabled:
|
|
|
|
|
self.logger.info(f"forwarding is disabled. failing htlc.")
|
2021-02-10 13:16:33 +01:00
|
|
|
raise OnionRoutingFailure(code=OnionFailureCode.PERMANENT_CHANNEL_FAILURE, data=b'')
|
2020-04-13 17:04:27 +02:00
|
|
|
chain = self.network.blockchain()
|
|
|
|
|
if chain.is_tip_stale():
|
2021-02-10 13:16:33 +01:00
|
|
|
raise OnionRoutingFailure(code=OnionFailureCode.TEMPORARY_NODE_FAILURE, data=b'')
|
2020-03-24 17:48:06 +01:00
|
|
|
try:
|
|
|
|
|
next_chan_scid = processed_onion.hop_data.payload["short_channel_id"]["short_channel_id"]
|
|
|
|
|
except:
|
2021-02-10 13:16:33 +01:00
|
|
|
raise OnionRoutingFailure(code=OnionFailureCode.INVALID_ONION_PAYLOAD, data=b'\x00\x00\x00')
|
2020-03-24 12:12:36 +01:00
|
|
|
next_chan = self.lnworker.get_channel_by_short_id(next_chan_scid)
|
2020-04-13 17:04:27 +02:00
|
|
|
local_height = chain.height()
|
2019-08-14 21:41:24 +02:00
|
|
|
if next_chan is None:
|
|
|
|
|
self.logger.info(f"cannot forward htlc. cannot find next_chan {next_chan_scid}")
|
2021-02-10 13:16:33 +01:00
|
|
|
raise OnionRoutingFailure(code=OnionFailureCode.UNKNOWN_NEXT_PEER, data=b'')
|
2020-02-17 20:38:41 +01:00
|
|
|
outgoing_chan_upd = next_chan.get_outgoing_gossip_channel_update()[2:]
|
2019-08-16 22:44:07 +02:00
|
|
|
outgoing_chan_upd_len = len(outgoing_chan_upd).to_bytes(2, byteorder="big")
|
2021-03-17 09:32:23 +01:00
|
|
|
outgoing_chan_upd_message = outgoing_chan_upd_len + outgoing_chan_upd
|
2020-02-27 14:40:58 +01:00
|
|
|
if not next_chan.can_send_update_add_htlc():
|
2020-02-26 20:35:46 +01:00
|
|
|
self.logger.info(f"cannot forward htlc. next_chan {next_chan_scid} cannot send ctx updates. "
|
2020-05-01 03:43:12 +02:00
|
|
|
f"chan state {next_chan.get_state()!r}, peer state: {next_chan.peer_state!r}")
|
2021-03-17 09:32:23 +01:00
|
|
|
raise OnionRoutingFailure(code=OnionFailureCode.TEMPORARY_CHANNEL_FAILURE, data=outgoing_chan_upd_message)
|
|
|
|
|
try:
|
|
|
|
|
next_amount_msat_htlc = processed_onion.hop_data.payload["amt_to_forward"]["amt_to_forward"]
|
|
|
|
|
except:
|
|
|
|
|
raise OnionRoutingFailure(code=OnionFailureCode.INVALID_ONION_PAYLOAD, data=b'\x00\x00\x00')
|
|
|
|
|
if not next_chan.can_pay(next_amount_msat_htlc):
|
|
|
|
|
self.logger.info(f"cannot forward htlc due to transient errors (likely due to insufficient funds)")
|
|
|
|
|
raise OnionRoutingFailure(code=OnionFailureCode.TEMPORARY_CHANNEL_FAILURE, data=outgoing_chan_upd_message)
|
2020-03-24 17:48:06 +01:00
|
|
|
try:
|
|
|
|
|
next_cltv_expiry = processed_onion.hop_data.payload["outgoing_cltv_value"]["outgoing_cltv_value"]
|
|
|
|
|
except:
|
2021-02-10 13:16:33 +01:00
|
|
|
raise OnionRoutingFailure(code=OnionFailureCode.INVALID_ONION_PAYLOAD, data=b'\x00\x00\x00')
|
2021-03-02 12:10:17 +01:00
|
|
|
if htlc.cltv_expiry - next_cltv_expiry < next_chan.forwarding_cltv_expiry_delta:
|
2021-03-17 09:32:23 +01:00
|
|
|
data = htlc.cltv_expiry.to_bytes(4, byteorder="big") + outgoing_chan_upd_message
|
2021-02-10 13:16:33 +01:00
|
|
|
raise OnionRoutingFailure(code=OnionFailureCode.INCORRECT_CLTV_EXPIRY, data=data)
|
2020-04-13 17:04:27 +02:00
|
|
|
if htlc.cltv_expiry - lnutil.MIN_FINAL_CLTV_EXPIRY_ACCEPTED <= local_height \
|
2019-08-14 21:41:24 +02:00
|
|
|
or next_cltv_expiry <= local_height:
|
2021-03-17 09:32:23 +01:00
|
|
|
raise OnionRoutingFailure(code=OnionFailureCode.EXPIRY_TOO_SOON, data=outgoing_chan_upd_message)
|
2019-08-14 21:41:24 +02:00
|
|
|
if max(htlc.cltv_expiry, next_cltv_expiry) > local_height + lnutil.NBLOCK_CLTV_EXPIRY_TOO_FAR_INTO_FUTURE:
|
2021-02-10 13:16:33 +01:00
|
|
|
raise OnionRoutingFailure(code=OnionFailureCode.EXPIRY_TOO_FAR, data=b'')
|
2020-03-02 21:18:56 +01:00
|
|
|
forwarding_fees = fee_for_edge_msat(
|
|
|
|
|
forwarded_amount_msat=next_amount_msat_htlc,
|
2021-03-02 12:10:17 +01:00
|
|
|
fee_base_msat=next_chan.forwarding_fee_base_msat,
|
|
|
|
|
fee_proportional_millionths=next_chan.forwarding_fee_proportional_millionths)
|
2019-08-16 22:44:07 +02:00
|
|
|
if htlc.amount_msat - next_amount_msat_htlc < forwarding_fees:
|
2021-03-17 09:32:23 +01:00
|
|
|
data = next_amount_msat_htlc.to_bytes(8, byteorder="big") + outgoing_chan_upd_message
|
2021-02-10 13:16:33 +01:00
|
|
|
raise OnionRoutingFailure(code=OnionFailureCode.FEE_INSUFFICIENT, data=data)
|
2022-05-30 16:46:31 +02:00
|
|
|
self.logger.info(f'forwarding htlc to {next_chan.node_id.hex()}')
|
|
|
|
|
next_peer = self.lnworker.peers.get(next_chan.node_id)
|
|
|
|
|
if next_peer is None:
|
|
|
|
|
self.logger.info(f"failed to forward htlc: next_peer offline ({next_chan.node_id.hex()})")
|
|
|
|
|
raise OnionRoutingFailure(code=OnionFailureCode.TEMPORARY_CHANNEL_FAILURE, data=outgoing_chan_upd_message)
|
2020-03-02 21:18:56 +01:00
|
|
|
next_htlc = UpdateAddHtlc(
|
|
|
|
|
amount_msat=next_amount_msat_htlc,
|
|
|
|
|
payment_hash=htlc.payment_hash,
|
|
|
|
|
cltv_expiry=next_cltv_expiry,
|
|
|
|
|
timestamp=int(time.time()))
|
2019-02-14 17:53:09 +01:00
|
|
|
next_htlc = next_chan.add_htlc(next_htlc)
|
2020-03-02 21:18:56 +01:00
|
|
|
try:
|
|
|
|
|
next_peer.send_message(
|
|
|
|
|
"update_add_htlc",
|
|
|
|
|
channel_id=next_chan.channel_id,
|
|
|
|
|
id=next_htlc.htlc_id,
|
2020-03-24 12:12:36 +01:00
|
|
|
cltv_expiry=next_cltv_expiry,
|
|
|
|
|
amount_msat=next_amount_msat_htlc,
|
2020-03-02 21:18:56 +01:00
|
|
|
payment_hash=next_htlc.payment_hash,
|
|
|
|
|
onion_routing_packet=processed_onion.next_packet.to_bytes()
|
|
|
|
|
)
|
|
|
|
|
except BaseException as e:
|
|
|
|
|
self.logger.info(f"failed to forward htlc: error sending message. {e}")
|
2021-03-17 09:32:23 +01:00
|
|
|
raise OnionRoutingFailure(code=OnionFailureCode.TEMPORARY_CHANNEL_FAILURE, data=outgoing_chan_upd_message)
|
2021-11-04 19:16:02 +01:00
|
|
|
next_peer.maybe_send_commitment(next_chan)
|
2021-02-10 13:16:33 +01:00
|
|
|
return next_chan_scid, next_htlc.htlc_id
|
2019-02-11 16:31:50 +01:00
|
|
|
|
2021-02-09 15:09:27 +01:00
|
|
|
def maybe_forward_trampoline(
|
|
|
|
|
self, *,
|
|
|
|
|
chan: Channel,
|
|
|
|
|
htlc: UpdateAddHtlc,
|
|
|
|
|
trampoline_onion: ProcessedOnionPacket):
|
|
|
|
|
|
2021-07-02 18:44:39 +02:00
|
|
|
forwarding_enabled = self.network.config.get('lightning_forward_payments', False)
|
|
|
|
|
forwarding_trampoline_enabled = self.network.config.get('lightning_forward_trampoline_payments', False)
|
|
|
|
|
if not (forwarding_enabled and forwarding_trampoline_enabled):
|
|
|
|
|
self.logger.info(f"trampoline forwarding is disabled. failing htlc.")
|
|
|
|
|
raise OnionRoutingFailure(code=OnionFailureCode.PERMANENT_CHANNEL_FAILURE, data=b'')
|
|
|
|
|
|
2021-02-09 15:09:27 +01:00
|
|
|
payload = trampoline_onion.hop_data.payload
|
|
|
|
|
payment_hash = htlc.payment_hash
|
2021-07-20 17:54:49 +02:00
|
|
|
payment_data = payload.get('payment_data')
|
|
|
|
|
if payment_data: # legacy case
|
|
|
|
|
payment_secret = payment_data['payment_secret']
|
|
|
|
|
else:
|
|
|
|
|
payment_secret = os.urandom(32)
|
|
|
|
|
|
2021-02-09 15:09:27 +01:00
|
|
|
try:
|
|
|
|
|
outgoing_node_id = payload["outgoing_node_id"]["outgoing_node_id"]
|
|
|
|
|
amt_to_forward = payload["amt_to_forward"]["amt_to_forward"]
|
|
|
|
|
cltv_from_onion = payload["outgoing_cltv_value"]["outgoing_cltv_value"]
|
|
|
|
|
if "invoice_features" in payload:
|
|
|
|
|
self.logger.info('forward_trampoline: legacy')
|
|
|
|
|
next_trampoline_onion = None
|
|
|
|
|
invoice_features = payload["invoice_features"]["invoice_features"]
|
|
|
|
|
invoice_routing_info = payload["invoice_routing_info"]["invoice_routing_info"]
|
2021-07-02 18:44:39 +02:00
|
|
|
# TODO use invoice_routing_info
|
2022-01-10 15:45:29 +01:00
|
|
|
# TODO legacy mpp payment, use total_msat from trampoline onion
|
2021-02-09 15:09:27 +01:00
|
|
|
else:
|
|
|
|
|
self.logger.info('forward_trampoline: end-to-end')
|
2021-03-03 20:36:48 +01:00
|
|
|
invoice_features = LnFeatures.BASIC_MPP_OPT
|
2021-02-09 15:09:27 +01:00
|
|
|
next_trampoline_onion = trampoline_onion.next_packet
|
|
|
|
|
except Exception as e:
|
|
|
|
|
self.logger.exception('')
|
|
|
|
|
raise OnionRoutingFailure(code=OnionFailureCode.INVALID_ONION_PAYLOAD, data=b'\x00\x00\x00')
|
|
|
|
|
|
2021-03-03 20:36:48 +01:00
|
|
|
# these are the fee/cltv paid by the sender
|
|
|
|
|
# pay_to_node will raise if they are not sufficient
|
2021-02-09 15:09:27 +01:00
|
|
|
trampoline_cltv_delta = htlc.cltv_expiry - cltv_from_onion
|
|
|
|
|
trampoline_fee = htlc.amount_msat - amt_to_forward
|
|
|
|
|
|
|
|
|
|
@log_exceptions
|
|
|
|
|
async def forward_trampoline_payment():
|
|
|
|
|
try:
|
|
|
|
|
await self.lnworker.pay_to_node(
|
|
|
|
|
node_pubkey=outgoing_node_id,
|
|
|
|
|
payment_hash=payment_hash,
|
|
|
|
|
payment_secret=payment_secret,
|
|
|
|
|
amount_to_pay=amt_to_forward,
|
|
|
|
|
min_cltv_expiry=cltv_from_onion,
|
|
|
|
|
r_tags=[],
|
|
|
|
|
invoice_features=invoice_features,
|
2021-03-03 20:36:48 +01:00
|
|
|
fwd_trampoline_onion=next_trampoline_onion,
|
2021-03-05 10:23:00 +01:00
|
|
|
fwd_trampoline_fee=trampoline_fee,
|
|
|
|
|
fwd_trampoline_cltv_delta=trampoline_cltv_delta,
|
2021-02-09 15:09:27 +01:00
|
|
|
attempts=1)
|
|
|
|
|
except OnionRoutingFailure as e:
|
|
|
|
|
# FIXME: cannot use payment_hash as key
|
|
|
|
|
self.lnworker.trampoline_forwarding_failures[payment_hash] = e
|
|
|
|
|
except PaymentFailure as e:
|
|
|
|
|
# FIXME: adapt the error code
|
2021-03-05 12:47:18 +01:00
|
|
|
error_reason = OnionRoutingFailure(code=OnionFailureCode.UNKNOWN_NEXT_PEER, data=b'')
|
2021-02-09 15:09:27 +01:00
|
|
|
self.lnworker.trampoline_forwarding_failures[payment_hash] = error_reason
|
|
|
|
|
|
|
|
|
|
asyncio.ensure_future(forward_trampoline_payment())
|
|
|
|
|
|
2021-01-28 13:10:43 +01:00
|
|
|
def maybe_fulfill_htlc(
|
|
|
|
|
self, *,
|
|
|
|
|
chan: Channel,
|
|
|
|
|
htlc: UpdateAddHtlc,
|
2020-11-11 11:03:31 +01:00
|
|
|
processed_onion: ProcessedOnionPacket,
|
2021-03-08 22:18:06 +01:00
|
|
|
is_trampoline: bool = False) -> Tuple[Optional[bytes], Optional[OnionPacket]]:
|
2021-03-02 14:26:16 +01:00
|
|
|
|
2021-02-25 12:12:44 +01:00
|
|
|
"""As a final recipient of an HTLC, decide if we should fulfill it.
|
2021-03-03 20:36:48 +01:00
|
|
|
Return (preimage, trampoline_onion_packet) with at most a single element not None
|
2021-02-25 12:12:44 +01:00
|
|
|
"""
|
2021-03-02 18:53:08 +01:00
|
|
|
def log_fail_reason(reason: str):
|
|
|
|
|
self.logger.info(f"maybe_fulfill_htlc. will FAIL HTLC: chan {chan.short_channel_id}. "
|
|
|
|
|
f"{reason}. htlc={str(htlc)}. onion_payload={processed_onion.hop_data.payload}")
|
2021-02-25 12:12:44 +01:00
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
amt_to_forward = processed_onion.hop_data.payload["amt_to_forward"]["amt_to_forward"]
|
|
|
|
|
except:
|
2021-03-02 18:53:08 +01:00
|
|
|
log_fail_reason(f"'amt_to_forward' missing from onion")
|
2021-02-25 12:12:44 +01:00
|
|
|
raise OnionRoutingFailure(code=OnionFailureCode.INVALID_ONION_PAYLOAD, data=b'\x00\x00\x00')
|
2021-01-28 13:10:43 +01:00
|
|
|
|
2020-04-13 17:04:27 +02:00
|
|
|
# Check that our blockchain tip is sufficiently recent so that we have an approx idea of the height.
|
|
|
|
|
# We should not release the preimage for an HTLC that its sender could already time out as
|
|
|
|
|
# then they might try to force-close and it becomes a race.
|
|
|
|
|
chain = self.network.blockchain()
|
|
|
|
|
if chain.is_tip_stale():
|
2021-03-02 18:53:08 +01:00
|
|
|
log_fail_reason(f"our chain tip is stale")
|
2021-02-10 13:16:33 +01:00
|
|
|
raise OnionRoutingFailure(code=OnionFailureCode.TEMPORARY_NODE_FAILURE, data=b'')
|
2020-04-13 17:04:27 +02:00
|
|
|
local_height = chain.height()
|
2021-02-25 12:12:44 +01:00
|
|
|
exc_incorrect_or_unknown_pd = OnionRoutingFailure(
|
2021-03-02 14:26:16 +01:00
|
|
|
code=OnionFailureCode.INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS,
|
|
|
|
|
data=amt_to_forward.to_bytes(8, byteorder="big") + local_height.to_bytes(4, byteorder="big"))
|
2019-02-14 17:53:09 +01:00
|
|
|
if local_height + MIN_FINAL_CLTV_EXPIRY_ACCEPTED > htlc.cltv_expiry:
|
2021-03-02 18:53:08 +01:00
|
|
|
log_fail_reason(f"htlc.cltv_expiry is unreasonably close")
|
2021-02-25 12:12:44 +01:00
|
|
|
raise exc_incorrect_or_unknown_pd
|
2020-03-24 17:48:06 +01:00
|
|
|
try:
|
|
|
|
|
cltv_from_onion = processed_onion.hop_data.payload["outgoing_cltv_value"]["outgoing_cltv_value"]
|
|
|
|
|
except:
|
2021-03-02 18:53:08 +01:00
|
|
|
log_fail_reason(f"'outgoing_cltv_value' missing from onion")
|
2021-02-10 13:16:33 +01:00
|
|
|
raise OnionRoutingFailure(code=OnionFailureCode.INVALID_ONION_PAYLOAD, data=b'\x00\x00\x00')
|
2021-02-09 15:09:27 +01:00
|
|
|
|
|
|
|
|
if not is_trampoline:
|
|
|
|
|
if cltv_from_onion != htlc.cltv_expiry:
|
2021-03-02 18:53:08 +01:00
|
|
|
log_fail_reason(f"cltv_from_onion != htlc.cltv_expiry")
|
2021-02-09 15:09:27 +01:00
|
|
|
raise OnionRoutingFailure(
|
|
|
|
|
code=OnionFailureCode.FINAL_INCORRECT_CLTV_EXPIRY,
|
|
|
|
|
data=htlc.cltv_expiry.to_bytes(4, byteorder="big"))
|
2020-03-24 20:07:00 +01:00
|
|
|
try:
|
2021-01-27 19:27:06 +01:00
|
|
|
total_msat = processed_onion.hop_data.payload["payment_data"]["total_msat"]
|
2020-03-24 20:07:00 +01:00
|
|
|
except:
|
2021-01-27 19:27:06 +01:00
|
|
|
total_msat = amt_to_forward # fall back to "amt_to_forward"
|
|
|
|
|
|
2020-11-11 11:03:31 +01:00
|
|
|
if not is_trampoline and amt_to_forward != htlc.amount_msat:
|
2021-03-02 18:53:08 +01:00
|
|
|
log_fail_reason(f"amt_to_forward != htlc.amount_msat")
|
2021-02-10 13:16:33 +01:00
|
|
|
raise OnionRoutingFailure(
|
2021-01-27 19:27:06 +01:00
|
|
|
code=OnionFailureCode.FINAL_INCORRECT_HTLC_AMOUNT,
|
2021-02-25 12:49:06 +01:00
|
|
|
data=htlc.amount_msat.to_bytes(8, byteorder="big"))
|
2020-11-11 11:03:31 +01:00
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
payment_secret_from_onion = processed_onion.hop_data.payload["payment_data"]["payment_secret"]
|
|
|
|
|
except:
|
2021-02-24 20:34:34 +01:00
|
|
|
if total_msat > amt_to_forward:
|
|
|
|
|
# payment_secret is required for MPP
|
2021-03-02 18:53:08 +01:00
|
|
|
log_fail_reason(f"'payment_secret' missing from onion")
|
2021-02-25 12:12:44 +01:00
|
|
|
raise exc_incorrect_or_unknown_pd
|
2021-02-24 20:34:34 +01:00
|
|
|
# TODO fail here if invoice has set PAYMENT_SECRET_REQ
|
2021-03-03 20:36:48 +01:00
|
|
|
payment_secret_from_onion = None
|
|
|
|
|
|
2021-03-05 17:02:10 +01:00
|
|
|
if total_msat > amt_to_forward:
|
2021-03-11 16:53:55 +01:00
|
|
|
mpp_status = self.lnworker.check_received_mpp_htlc(payment_secret_from_onion, chan.short_channel_id, htlc, total_msat)
|
2021-03-05 17:02:10 +01:00
|
|
|
if mpp_status is None:
|
|
|
|
|
return None, None
|
|
|
|
|
if mpp_status is False:
|
|
|
|
|
log_fail_reason(f"MPP_TIMEOUT")
|
|
|
|
|
raise OnionRoutingFailure(code=OnionFailureCode.MPP_TIMEOUT, data=b'')
|
|
|
|
|
assert mpp_status is True
|
2021-03-03 20:36:48 +01:00
|
|
|
|
|
|
|
|
# if there is a trampoline_onion, maybe_fulfill_htlc will be called again
|
|
|
|
|
if processed_onion.trampoline_onion_packet:
|
2021-03-11 16:53:55 +01:00
|
|
|
# TODO: we should check that all trampoline_onions are the same
|
2021-03-03 20:36:48 +01:00
|
|
|
return None, processed_onion.trampoline_onion_packet
|
|
|
|
|
|
2021-03-23 17:30:40 +01:00
|
|
|
# TODO don't accept payments twice for same invoice
|
|
|
|
|
# TODO check invoice expiry
|
2021-03-03 20:36:48 +01:00
|
|
|
info = self.lnworker.get_payment_info(htlc.payment_hash)
|
|
|
|
|
if info is None:
|
|
|
|
|
log_fail_reason(f"no payment_info found for RHASH {htlc.payment_hash.hex()}")
|
|
|
|
|
raise exc_incorrect_or_unknown_pd
|
|
|
|
|
preimage = self.lnworker.get_preimage(htlc.payment_hash)
|
|
|
|
|
if payment_secret_from_onion:
|
2020-11-11 11:03:31 +01:00
|
|
|
if payment_secret_from_onion != derive_payment_secret_from_payment_preimage(preimage):
|
2021-03-02 18:53:08 +01:00
|
|
|
log_fail_reason(f'incorrect payment secret {payment_secret_from_onion.hex()} != {derive_payment_secret_from_payment_preimage(preimage).hex()}')
|
2021-02-25 12:12:44 +01:00
|
|
|
raise exc_incorrect_or_unknown_pd
|
2021-02-24 20:40:49 +01:00
|
|
|
invoice_msat = info.amount_msat
|
|
|
|
|
if not (invoice_msat is None or invoice_msat <= total_msat <= 2 * invoice_msat):
|
2021-03-02 18:53:08 +01:00
|
|
|
log_fail_reason(f"total_msat={total_msat} too different from invoice_msat={invoice_msat}")
|
2021-02-25 12:12:44 +01:00
|
|
|
raise exc_incorrect_or_unknown_pd
|
2021-03-08 18:51:52 +01:00
|
|
|
self.logger.info(f"maybe_fulfill_htlc. will FULFILL HTLC: chan {chan.short_channel_id}. htlc={str(htlc)}")
|
2021-03-09 09:35:43 +01:00
|
|
|
self.lnworker.set_request_status(htlc.payment_hash, PR_PAID)
|
2021-03-03 20:36:48 +01:00
|
|
|
return preimage, None
|
2018-10-18 22:56:40 +02:00
|
|
|
|
2020-03-02 15:06:45 +01:00
|
|
|
def fulfill_htlc(self, chan: Channel, htlc_id: int, preimage: bytes):
|
2019-09-07 07:29:22 +02:00
|
|
|
self.logger.info(f"_fulfill_htlc. chan {chan.short_channel_id}. htlc_id {htlc_id}")
|
2020-02-29 16:38:33 +01:00
|
|
|
assert chan.can_send_ctx_updates(), f"cannot send updates: {chan.short_channel_id}"
|
2021-03-11 19:09:35 +01:00
|
|
|
assert chan.hm.is_htlc_irrevocably_added_yet(htlc_proposer=REMOTE, htlc_id=htlc_id)
|
2021-03-11 19:31:22 +01:00
|
|
|
self.received_htlcs_pending_removal.add((chan, htlc_id))
|
2018-10-18 12:41:47 +02:00
|
|
|
chan.settle_htlc(preimage, htlc_id)
|
2021-03-09 09:35:43 +01:00
|
|
|
self.send_message(
|
|
|
|
|
"update_fulfill_htlc",
|
|
|
|
|
channel_id=chan.channel_id,
|
|
|
|
|
id=htlc_id,
|
|
|
|
|
payment_preimage=preimage)
|
2018-04-19 11:50:43 +02:00
|
|
|
|
2020-06-05 11:42:22 +02:00
|
|
|
def fail_htlc(self, *, chan: Channel, htlc_id: int, error_bytes: bytes):
|
|
|
|
|
self.logger.info(f"fail_htlc. chan {chan.short_channel_id}. htlc_id {htlc_id}.")
|
2020-02-29 16:38:33 +01:00
|
|
|
assert chan.can_send_ctx_updates(), f"cannot send updates: {chan.short_channel_id}"
|
2021-03-11 19:31:22 +01:00
|
|
|
self.received_htlcs_pending_removal.add((chan, htlc_id))
|
2018-10-18 22:56:40 +02:00
|
|
|
chan.fail_htlc(htlc_id)
|
2020-06-05 11:42:22 +02:00
|
|
|
self.send_message(
|
|
|
|
|
"update_fail_htlc",
|
|
|
|
|
channel_id=chan.channel_id,
|
|
|
|
|
id=htlc_id,
|
|
|
|
|
len=len(error_bytes),
|
|
|
|
|
reason=error_bytes)
|
|
|
|
|
|
2021-02-10 13:16:33 +01:00
|
|
|
def fail_malformed_htlc(self, *, chan: Channel, htlc_id: int, reason: OnionRoutingFailure):
|
2020-06-05 11:42:22 +02:00
|
|
|
self.logger.info(f"fail_malformed_htlc. chan {chan.short_channel_id}. htlc_id {htlc_id}.")
|
|
|
|
|
assert chan.can_send_ctx_updates(), f"cannot send updates: {chan.short_channel_id}"
|
|
|
|
|
if not (reason.code & OnionFailureCodeMetaFlag.BADONION and len(reason.data) == 32):
|
|
|
|
|
raise Exception(f"unexpected reason when sending 'update_fail_malformed_htlc': {reason!r}")
|
2021-03-11 19:31:22 +01:00
|
|
|
self.received_htlcs_pending_removal.add((chan, htlc_id))
|
|
|
|
|
chan.fail_htlc(htlc_id)
|
2020-06-05 11:42:22 +02:00
|
|
|
self.send_message(
|
|
|
|
|
"update_fail_malformed_htlc",
|
|
|
|
|
channel_id=chan.channel_id,
|
|
|
|
|
id=htlc_id,
|
|
|
|
|
sha256_of_onion=reason.data,
|
|
|
|
|
failure_code=reason.code)
|
2020-03-17 18:02:51 +01:00
|
|
|
|
|
|
|
|
def on_revoke_and_ack(self, chan: Channel, payload):
|
2020-04-13 16:02:05 +02:00
|
|
|
if chan.peer_state == PeerState.BAD:
|
2020-02-12 10:22:22 +01:00
|
|
|
return
|
2019-09-07 07:29:22 +02:00
|
|
|
self.logger.info(f'on_revoke_and_ack. chan {chan.short_channel_id}. ctn: {chan.get_oldest_unrevoked_ctn(REMOTE)}')
|
2019-06-24 11:13:18 +02:00
|
|
|
rev = RevokeAndAck(payload["per_commitment_secret"], payload["next_per_commitment_point"])
|
|
|
|
|
chan.receive_revocation(rev)
|
2019-02-04 12:37:30 +01:00
|
|
|
self.lnworker.save_channel(chan)
|
2019-05-30 22:17:38 +02:00
|
|
|
self.maybe_send_commitment(chan)
|
2021-11-04 16:32:40 +01:00
|
|
|
self._received_revack_event.set()
|
|
|
|
|
self._received_revack_event.clear()
|
2018-04-27 14:24:19 +02:00
|
|
|
|
2020-03-17 18:02:51 +01:00
|
|
|
def on_update_fee(self, chan: Channel, payload):
|
2020-03-12 04:08:13 +01:00
|
|
|
feerate = payload["feerate_per_kw"]
|
2019-02-12 11:26:38 +01:00
|
|
|
chan.update_fee(feerate, False)
|
2018-06-27 18:33:55 +02:00
|
|
|
|
2020-03-06 18:14:00 +01:00
|
|
|
async def maybe_update_fee(self, chan: Channel):
|
2018-07-17 15:28:27 +02:00
|
|
|
"""
|
2018-07-31 14:36:42 +02:00
|
|
|
called when our fee estimates change
|
2018-07-17 15:28:27 +02:00
|
|
|
"""
|
2020-02-26 20:35:46 +01:00
|
|
|
if not chan.can_send_ctx_updates():
|
|
|
|
|
return
|
2020-03-06 18:14:00 +01:00
|
|
|
feerate_per_kw = self.lnworker.current_feerate_per_kw()
|
2018-07-17 15:28:27 +02:00
|
|
|
if not chan.constraints.is_initiator:
|
2020-03-06 18:14:00 +01:00
|
|
|
if constants.net is not constants.BitcoinRegtest:
|
|
|
|
|
chan_feerate = chan.get_latest_feerate(LOCAL)
|
|
|
|
|
ratio = chan_feerate / feerate_per_kw
|
|
|
|
|
if ratio < 0.5:
|
|
|
|
|
# Note that we trust the Electrum server about fee rates
|
|
|
|
|
# Thus, automated force-closing might not be a good idea
|
|
|
|
|
# Maybe we should display something in the GUI instead
|
|
|
|
|
self.logger.warning(
|
|
|
|
|
f"({chan.get_id_for_log()}) feerate is {chan_feerate} sat/kw, "
|
|
|
|
|
f"current recommended feerate is {feerate_per_kw} sat/kw, consider force closing!")
|
2018-07-17 15:28:27 +02:00
|
|
|
return
|
2019-07-23 19:23:39 +02:00
|
|
|
chan_fee = chan.get_next_feerate(REMOTE)
|
2018-09-26 15:08:57 +02:00
|
|
|
if feerate_per_kw < chan_fee / 2:
|
2019-05-02 17:59:11 +02:00
|
|
|
self.logger.info("FEES HAVE FALLEN")
|
2018-09-26 15:08:57 +02:00
|
|
|
elif feerate_per_kw > chan_fee * 2:
|
2019-05-02 17:59:11 +02:00
|
|
|
self.logger.info("FEES HAVE RISEN")
|
2022-08-02 18:00:33 +02:00
|
|
|
elif chan.get_latest_ctn(REMOTE) == 0:
|
2021-03-19 06:40:49 +01:00
|
|
|
# workaround eclair issue https://github.com/ACINQ/eclair/issues/1730
|
|
|
|
|
self.logger.info("updating fee to bump remote ctn")
|
|
|
|
|
if feerate_per_kw == chan_fee:
|
|
|
|
|
feerate_per_kw += 1
|
2018-07-17 15:28:27 +02:00
|
|
|
else:
|
|
|
|
|
return
|
2020-03-04 08:35:56 +01:00
|
|
|
self.logger.info(f"(chan: {chan.get_id_for_log()}) current pending feerate {chan_fee}. "
|
|
|
|
|
f"new feerate {feerate_per_kw}")
|
2018-11-12 11:55:13 +01:00
|
|
|
chan.update_fee(feerate_per_kw, True)
|
2020-03-04 11:54:42 +01:00
|
|
|
self.send_message(
|
|
|
|
|
"update_fee",
|
|
|
|
|
channel_id=chan.channel_id,
|
|
|
|
|
feerate_per_kw=feerate_per_kw)
|
|
|
|
|
self.maybe_send_commitment(chan)
|
2018-07-19 13:10:41 +02:00
|
|
|
|
2018-10-24 17:36:07 +02:00
|
|
|
@log_exceptions
|
2018-10-25 19:53:31 +02:00
|
|
|
async def close_channel(self, chan_id: bytes):
|
2018-10-24 17:36:07 +02:00
|
|
|
chan = self.channels[chan_id]
|
2022-04-26 18:05:45 +02:00
|
|
|
self.shutdown_received[chan_id] = self.asyncio_loop.create_future()
|
2020-02-26 09:04:54 +01:00
|
|
|
await self.send_shutdown(chan)
|
2018-10-24 17:36:07 +02:00
|
|
|
payload = await self.shutdown_received[chan_id]
|
2021-03-26 09:21:49 +01:00
|
|
|
try:
|
|
|
|
|
txid = await self._shutdown(chan, payload, is_local=True)
|
|
|
|
|
self.logger.info(f'({chan.get_id_for_log()}) Channel closed {txid}')
|
|
|
|
|
except asyncio.TimeoutError:
|
|
|
|
|
txid = chan.unconfirmed_closing_txid
|
|
|
|
|
self.logger.info(f'({chan.get_id_for_log()}) did not send closing_signed, {txid}')
|
|
|
|
|
if txid is None:
|
|
|
|
|
raise Exception('The remote peer did not send their final signature. The channel may not have been be closed')
|
2018-10-24 18:26:05 +02:00
|
|
|
return txid
|
2018-10-24 17:36:07 +02:00
|
|
|
|
2020-03-17 18:02:51 +01:00
|
|
|
async def on_shutdown(self, chan: Channel, payload):
|
2022-01-26 16:59:10 +01:00
|
|
|
# TODO: A receiving node: if it hasn't received a funding_signed (if it is a
|
|
|
|
|
# funder) or a funding_created (if it is a fundee):
|
|
|
|
|
# SHOULD send an error and fail the channel.
|
2020-02-25 17:54:49 +01:00
|
|
|
their_scriptpubkey = payload['scriptpubkey']
|
2020-12-18 11:06:42 +01:00
|
|
|
their_upfront_scriptpubkey = chan.config[REMOTE].upfront_shutdown_script
|
|
|
|
|
# BOLT-02 check if they use the upfront shutdown script they advertized
|
2022-01-26 16:59:10 +01:00
|
|
|
if self.is_upfront_shutdown_script() and their_upfront_scriptpubkey:
|
2020-12-18 11:06:42 +01:00
|
|
|
if not (their_scriptpubkey == their_upfront_scriptpubkey):
|
2022-01-26 16:59:10 +01:00
|
|
|
await self.send_warning(
|
|
|
|
|
chan.channel_id,
|
|
|
|
|
"remote didn't use upfront shutdown script it commited to in channel opening",
|
|
|
|
|
close_connection=True)
|
2021-10-22 11:59:44 +02:00
|
|
|
else:
|
|
|
|
|
# BOLT-02 restrict the scriptpubkey to some templates:
|
2021-10-22 12:58:04 +02:00
|
|
|
if self.is_shutdown_anysegwit() and match_script_against_template(their_scriptpubkey, transaction.SCRIPTPUBKEY_TEMPLATE_ANYSEGWIT):
|
2021-10-22 11:59:44 +02:00
|
|
|
pass
|
|
|
|
|
elif match_script_against_template(their_scriptpubkey, transaction.SCRIPTPUBKEY_TEMPLATE_WITNESS_V0):
|
|
|
|
|
pass
|
|
|
|
|
else:
|
2022-01-26 16:59:10 +01:00
|
|
|
await self.send_warning(
|
|
|
|
|
chan.channel_id,
|
|
|
|
|
f'scriptpubkey in received shutdown message does not conform to any template: {their_scriptpubkey.hex()}',
|
|
|
|
|
close_connection=True)
|
2021-10-22 11:59:44 +02:00
|
|
|
|
2020-03-11 17:02:44 +01:00
|
|
|
chan_id = chan.channel_id
|
2018-10-24 17:36:07 +02:00
|
|
|
if chan_id in self.shutdown_received:
|
|
|
|
|
self.shutdown_received[chan_id].set_result(payload)
|
|
|
|
|
else:
|
|
|
|
|
chan = self.channels[chan_id]
|
2020-02-26 09:04:54 +01:00
|
|
|
await self.send_shutdown(chan)
|
2021-02-24 20:03:12 +01:00
|
|
|
txid = await self._shutdown(chan, payload, is_local=False)
|
2020-02-24 16:32:18 +01:00
|
|
|
self.logger.info(f'({chan.get_id_for_log()}) Channel closed by remote peer {txid}')
|
2018-10-24 17:36:07 +02:00
|
|
|
|
2021-02-24 20:03:12 +01:00
|
|
|
def can_send_shutdown(self, chan: Channel):
|
2020-04-13 16:02:05 +02:00
|
|
|
if chan.get_state() >= ChannelState.OPENING:
|
2020-02-27 19:17:58 +01:00
|
|
|
return True
|
|
|
|
|
if chan.constraints.is_initiator and chan.channel_id in self.funding_created_sent:
|
|
|
|
|
return True
|
|
|
|
|
if not chan.constraints.is_initiator and chan.channel_id in self.funding_signed_sent:
|
|
|
|
|
return True
|
|
|
|
|
return False
|
|
|
|
|
|
2020-02-26 09:04:54 +01:00
|
|
|
async def send_shutdown(self, chan: Channel):
|
2020-02-27 19:17:58 +01:00
|
|
|
if not self.can_send_shutdown(chan):
|
|
|
|
|
raise Exception('cannot send shutdown')
|
2020-12-18 11:06:42 +01:00
|
|
|
if chan.config[LOCAL].upfront_shutdown_script:
|
|
|
|
|
scriptpubkey = chan.config[LOCAL].upfront_shutdown_script
|
|
|
|
|
else:
|
|
|
|
|
scriptpubkey = bfh(bitcoin.address_to_script(chan.sweep_address))
|
|
|
|
|
assert scriptpubkey
|
2020-02-26 09:04:54 +01:00
|
|
|
# wait until no more pending updates (bolt2)
|
2020-02-26 20:35:46 +01:00
|
|
|
chan.set_can_send_ctx_updates(False)
|
2020-03-04 11:54:42 +01:00
|
|
|
while chan.has_pending_changes(REMOTE):
|
|
|
|
|
await asyncio.sleep(0.1)
|
2018-10-14 22:36:23 +02:00
|
|
|
self.send_message('shutdown', channel_id=chan.channel_id, len=len(scriptpubkey), scriptpubkey=scriptpubkey)
|
2020-05-29 11:30:08 +02:00
|
|
|
chan.set_state(ChannelState.SHUTDOWN)
|
2021-01-04 12:39:24 +01:00
|
|
|
# can fullfill or fail htlcs. cannot add htlcs, because state != OPEN
|
2020-02-27 14:40:58 +01:00
|
|
|
chan.set_can_send_ctx_updates(True)
|
2018-10-24 17:36:07 +02:00
|
|
|
|
2022-03-08 11:34:57 +01:00
|
|
|
def get_shutdown_fee_range(self, chan, closing_tx, is_local):
|
|
|
|
|
""" return the closing fee and fee range we initially try to enforce """
|
|
|
|
|
config = self.network.config
|
|
|
|
|
if config.get('test_shutdown_fee'):
|
|
|
|
|
our_fee = config.get('test_shutdown_fee')
|
|
|
|
|
else:
|
|
|
|
|
fee_rate_per_kb = config.eta_target_to_fee(FEE_LN_ETA_TARGET)
|
|
|
|
|
if not fee_rate_per_kb: # fallback
|
|
|
|
|
fee_rate_per_kb = self.network.config.fee_per_kb()
|
|
|
|
|
our_fee = fee_rate_per_kb * closing_tx.estimated_size() // 1000
|
|
|
|
|
# TODO: anchors: remove this, as commitment fee rate can be below chain head fee rate?
|
|
|
|
|
# BOLT2: The sending node MUST set fee less than or equal to the base fee of the final ctx
|
|
|
|
|
max_fee = chan.get_latest_fee(LOCAL if is_local else REMOTE)
|
|
|
|
|
our_fee = min(our_fee, max_fee)
|
|
|
|
|
# config modern_fee_negotiation can be set in tests
|
|
|
|
|
if config.get('test_shutdown_legacy'):
|
|
|
|
|
our_fee_range = None
|
|
|
|
|
elif config.get('test_shutdown_fee_range'):
|
|
|
|
|
our_fee_range = config.get('test_shutdown_fee_range')
|
|
|
|
|
else:
|
|
|
|
|
# we aim at a fee between next block inclusion and some lower value
|
|
|
|
|
our_fee_range = {'min_fee_satoshis': our_fee // 2, 'max_fee_satoshis': our_fee * 2}
|
|
|
|
|
self.logger.info(f"Our fee range: {our_fee_range} and fee: {our_fee}")
|
|
|
|
|
return our_fee, our_fee_range
|
|
|
|
|
|
2018-10-24 17:36:07 +02:00
|
|
|
@log_exceptions
|
2021-02-24 20:03:12 +01:00
|
|
|
async def _shutdown(self, chan: Channel, payload, *, is_local: bool):
|
2019-08-14 21:47:57 +02:00
|
|
|
# wait until no HTLCs remain in either commitment transaction
|
2022-07-08 12:27:04 +02:00
|
|
|
while chan.has_unsettled_htlcs():
|
2020-02-24 16:32:18 +01:00
|
|
|
self.logger.info(f'(chan: {chan.short_channel_id}) waiting for htlcs to settle...')
|
2018-11-22 16:18:28 +01:00
|
|
|
await asyncio.sleep(1)
|
2020-02-27 14:40:58 +01:00
|
|
|
# if no HTLCs remain, we must not send updates
|
|
|
|
|
chan.set_can_send_ctx_updates(False)
|
2020-02-25 12:35:07 +01:00
|
|
|
their_scriptpubkey = payload['scriptpubkey']
|
2020-12-18 11:06:42 +01:00
|
|
|
if chan.config[LOCAL].upfront_shutdown_script:
|
|
|
|
|
our_scriptpubkey = chan.config[LOCAL].upfront_shutdown_script
|
|
|
|
|
else:
|
|
|
|
|
our_scriptpubkey = bfh(bitcoin.address_to_script(chan.sweep_address))
|
|
|
|
|
assert our_scriptpubkey
|
2020-02-25 12:35:07 +01:00
|
|
|
# estimate fee of closing tx
|
2022-03-15 09:15:48 +01:00
|
|
|
dummy_sig, dummy_tx = chan.make_closing_tx(our_scriptpubkey, their_scriptpubkey, fee_sat=0)
|
2023-02-17 11:07:19 +00:00
|
|
|
our_sig = None # type: Optional[bytes]
|
|
|
|
|
closing_tx = None # type: Optional[PartialTransaction]
|
2022-02-22 18:25:24 +01:00
|
|
|
is_initiator = chan.constraints.is_initiator
|
2022-03-15 09:15:48 +01:00
|
|
|
our_fee, our_fee_range = self.get_shutdown_fee_range(chan, dummy_tx, is_local)
|
2022-02-22 18:25:24 +01:00
|
|
|
|
|
|
|
|
def send_closing_signed(our_fee, our_fee_range, drop_remote):
|
2022-03-15 09:15:48 +01:00
|
|
|
nonlocal our_sig, closing_tx
|
2022-02-22 18:25:24 +01:00
|
|
|
if our_fee_range:
|
|
|
|
|
closing_signed_tlvs = {'fee_range': our_fee_range}
|
2021-11-26 09:45:06 +01:00
|
|
|
else:
|
|
|
|
|
closing_signed_tlvs = {}
|
2022-02-22 18:25:24 +01:00
|
|
|
our_sig, closing_tx = chan.make_closing_tx(our_scriptpubkey, their_scriptpubkey, fee_sat=our_fee, drop_remote=drop_remote)
|
2021-11-26 09:45:06 +01:00
|
|
|
self.logger.info(f"Sending fee range: {closing_signed_tlvs} and fee: {our_fee}")
|
|
|
|
|
self.send_message(
|
|
|
|
|
'closing_signed',
|
|
|
|
|
channel_id=chan.channel_id,
|
|
|
|
|
fee_satoshis=our_fee,
|
|
|
|
|
signature=our_sig,
|
|
|
|
|
closing_signed_tlvs=closing_signed_tlvs,
|
|
|
|
|
)
|
2022-02-22 18:25:24 +01:00
|
|
|
|
2020-02-26 14:16:21 +01:00
|
|
|
def verify_signature(tx, sig):
|
|
|
|
|
their_pubkey = chan.config[REMOTE].multisig_key.pubkey
|
|
|
|
|
preimage_hex = tx.serialize_preimage(0)
|
|
|
|
|
pre_hash = sha256d(bfh(preimage_hex))
|
|
|
|
|
return ecc.verify_signature(their_pubkey, sig, pre_hash)
|
2021-11-26 09:45:06 +01:00
|
|
|
|
2022-02-22 18:25:24 +01:00
|
|
|
async def receive_closing_signed():
|
2022-03-15 09:15:48 +01:00
|
|
|
nonlocal our_sig, closing_tx
|
2021-11-26 09:45:06 +01:00
|
|
|
try:
|
|
|
|
|
cs_payload = await self.wait_for_message('closing_signed', chan.channel_id)
|
|
|
|
|
except asyncio.exceptions.TimeoutError:
|
2022-03-09 15:52:46 +01:00
|
|
|
self.schedule_force_closing(chan.channel_id)
|
2022-02-22 18:25:24 +01:00
|
|
|
raise Exception("closing_signed not received, force closing.")
|
2020-03-12 04:08:13 +01:00
|
|
|
their_fee = cs_payload['fee_satoshis']
|
2022-02-22 18:25:24 +01:00
|
|
|
their_fee_range = cs_payload['closing_signed_tlvs'].get('fee_range')
|
2020-02-26 11:01:53 +01:00
|
|
|
their_sig = cs_payload['signature']
|
2022-02-22 18:25:24 +01:00
|
|
|
# perform checks
|
2020-02-26 11:01:53 +01:00
|
|
|
our_sig, closing_tx = chan.make_closing_tx(our_scriptpubkey, their_scriptpubkey, fee_sat=their_fee, drop_remote=False)
|
2020-02-26 15:49:55 +01:00
|
|
|
if verify_signature(closing_tx, their_sig):
|
2022-02-22 18:25:24 +01:00
|
|
|
drop_remote = False
|
2020-02-26 11:01:53 +01:00
|
|
|
else:
|
|
|
|
|
our_sig, closing_tx = chan.make_closing_tx(our_scriptpubkey, their_scriptpubkey, fee_sat=their_fee, drop_remote=True)
|
2020-02-26 15:49:55 +01:00
|
|
|
if verify_signature(closing_tx, their_sig):
|
2022-02-22 18:25:24 +01:00
|
|
|
drop_remote = True
|
2020-02-26 11:01:53 +01:00
|
|
|
else:
|
2021-10-22 12:58:04 +02:00
|
|
|
# this can happen if we consider our output too valuable to drop,
|
|
|
|
|
# but the remote drops it because it violates their dust limit
|
2020-02-26 11:01:53 +01:00
|
|
|
raise Exception('failed to verify their signature')
|
2021-10-22 12:58:04 +02:00
|
|
|
# at this point we know how the closing tx looks like
|
|
|
|
|
# check that their output is above their scriptpubkey's network dust limit
|
2021-12-06 16:53:44 +01:00
|
|
|
to_remote_set = closing_tx.get_output_idxs_from_scriptpubkey(their_scriptpubkey.hex())
|
2022-02-22 18:25:24 +01:00
|
|
|
if not drop_remote and to_remote_set:
|
2021-12-06 16:53:44 +01:00
|
|
|
to_remote_idx = to_remote_set.pop()
|
2021-10-22 12:58:04 +02:00
|
|
|
to_remote_amount = closing_tx.outputs()[to_remote_idx].value
|
|
|
|
|
transaction.check_scriptpubkey_template_and_dust(their_scriptpubkey, to_remote_amount)
|
2022-02-22 18:25:24 +01:00
|
|
|
return their_fee, their_fee_range, their_sig, drop_remote
|
2021-10-22 12:58:04 +02:00
|
|
|
|
2022-02-22 18:25:24 +01:00
|
|
|
def choose_new_fee(our_fee, our_fee_range, their_fee, their_fee_range, their_previous_fee):
|
|
|
|
|
assert our_fee != their_fee
|
|
|
|
|
fee_range_sent = our_fee_range and (is_initiator or (their_previous_fee is not None))
|
|
|
|
|
|
|
|
|
|
# The sending node, if it is not the funder:
|
2022-03-08 11:34:57 +01:00
|
|
|
if our_fee_range and their_fee_range and not is_initiator and not self.network.config.get('test_shutdown_fee_range'):
|
2021-11-26 09:45:06 +01:00
|
|
|
# SHOULD set max_fee_satoshis to at least the max_fee_satoshis received
|
2022-02-22 18:25:24 +01:00
|
|
|
our_fee_range['max_fee_satoshis'] = max(their_fee_range['max_fee_satoshis'], our_fee_range['max_fee_satoshis'])
|
2021-11-26 09:45:06 +01:00
|
|
|
# SHOULD set min_fee_satoshis to a fairly low value
|
2022-03-08 11:34:57 +01:00
|
|
|
our_fee_range['min_fee_satoshis'] = min(their_fee_range['min_fee_satoshis'], our_fee_range['min_fee_satoshis'])
|
|
|
|
|
# Note: the BOLT describes what the sending node SHOULD do.
|
|
|
|
|
# However, this assumes that we have decided to send 'funding_signed' in response to their fee_range.
|
|
|
|
|
# In practice, we might prefer to fail the channel in some cases (TODO)
|
2021-11-26 09:45:06 +01:00
|
|
|
|
2022-02-22 18:25:24 +01:00
|
|
|
# the receiving node, if fee_satoshis matches its previously sent fee_range,
|
|
|
|
|
if fee_range_sent and (our_fee_range['min_fee_satoshis'] <= their_fee <= our_fee_range['max_fee_satoshis']):
|
2021-11-26 09:45:06 +01:00
|
|
|
# SHOULD reply with a closing_signed with the same fee_satoshis value if it is different from its previously sent fee_satoshis
|
2022-02-22 18:25:24 +01:00
|
|
|
our_fee = their_fee
|
2021-11-26 09:45:06 +01:00
|
|
|
|
2022-02-22 18:25:24 +01:00
|
|
|
# the receiving node, if the message contains a fee_range
|
|
|
|
|
elif our_fee_range and their_fee_range:
|
|
|
|
|
overlap_min = max(our_fee_range['min_fee_satoshis'], their_fee_range['min_fee_satoshis'])
|
|
|
|
|
overlap_max = min(our_fee_range['max_fee_satoshis'], their_fee_range['max_fee_satoshis'])
|
2021-11-26 09:45:06 +01:00
|
|
|
# if there is no overlap between that and its own fee_range
|
|
|
|
|
if overlap_min > overlap_max:
|
2022-03-08 11:34:57 +01:00
|
|
|
# TODO: the receiving node should first send a warning, and fail the channel
|
|
|
|
|
# only if it doesn't receive a satisfying fee_range after a reasonable amount of time
|
2022-03-09 15:52:46 +01:00
|
|
|
self.schedule_force_closing(chan.channel_id)
|
2022-02-22 18:25:24 +01:00
|
|
|
raise Exception("There is no overlap between between their and our fee range.")
|
|
|
|
|
# otherwise, if it is the funder
|
|
|
|
|
if is_initiator:
|
|
|
|
|
# if fee_satoshis is not in the overlap between the sent and received fee_range:
|
|
|
|
|
if not (overlap_min <= their_fee <= overlap_max):
|
|
|
|
|
# MUST fail the channel
|
2022-03-09 15:52:46 +01:00
|
|
|
self.schedule_force_closing(chan.channel_id)
|
2022-02-22 18:25:24 +01:00
|
|
|
raise Exception("Their fee is not in the overlap region, we force closed.")
|
|
|
|
|
# otherwise, MUST reply with the same fee_satoshis.
|
|
|
|
|
our_fee = their_fee
|
|
|
|
|
# otherwise (it is not the funder):
|
2021-11-26 09:45:06 +01:00
|
|
|
else:
|
2022-02-22 18:25:24 +01:00
|
|
|
# if it has already sent a closing_signed:
|
|
|
|
|
if fee_range_sent:
|
|
|
|
|
# fee_satoshis is not the same as the value we sent, we MUST fail the channel
|
2022-03-09 15:52:46 +01:00
|
|
|
self.schedule_force_closing(chan.channel_id)
|
2022-02-22 18:25:24 +01:00
|
|
|
raise Exception("Expected the same fee as ours, we force closed.")
|
|
|
|
|
# otherwise:
|
|
|
|
|
# MUST propose a fee_satoshis in the overlap between received and (about-to-be) sent fee_range.
|
|
|
|
|
our_fee = (overlap_min + overlap_max) // 2
|
2021-11-26 09:45:06 +01:00
|
|
|
else:
|
2022-02-22 18:25:24 +01:00
|
|
|
# otherwise, if fee_satoshis is not strictly between its last-sent fee_satoshis
|
|
|
|
|
# and its previously-received fee_satoshis, UNLESS it has since reconnected:
|
|
|
|
|
if their_previous_fee and not (min(our_fee, their_previous_fee) < their_fee < max(our_fee, their_previous_fee)):
|
|
|
|
|
# SHOULD fail the connection.
|
|
|
|
|
raise Exception('Their fee is not between our last sent and their last sent fee.')
|
|
|
|
|
# accept their fee if they are very close
|
|
|
|
|
if abs(their_fee - our_fee) < 2:
|
|
|
|
|
our_fee = their_fee
|
2021-11-26 09:45:06 +01:00
|
|
|
else:
|
2022-02-22 18:25:24 +01:00
|
|
|
# this will be "strictly between" (as in BOLT2) previous values because of the above
|
|
|
|
|
our_fee = (our_fee + their_fee) // 2
|
2021-11-26 09:45:06 +01:00
|
|
|
|
2022-02-22 18:25:24 +01:00
|
|
|
return our_fee, our_fee_range
|
|
|
|
|
|
|
|
|
|
# Fee negotiation: both parties exchange 'funding_signed' messages.
|
|
|
|
|
# The funder sends the first message, the non-funder sends the last message.
|
|
|
|
|
# In the 'modern' case, at most 3 messages are exchanged, because choose_new_fee of the funder either returns their_fee or fails
|
|
|
|
|
their_fee = None
|
|
|
|
|
drop_remote = False # does the peer drop its to_local output or not?
|
|
|
|
|
if is_initiator:
|
|
|
|
|
send_closing_signed(our_fee, our_fee_range, drop_remote)
|
|
|
|
|
while True:
|
|
|
|
|
their_previous_fee = their_fee
|
|
|
|
|
their_fee, their_fee_range, their_sig, drop_remote = await receive_closing_signed()
|
|
|
|
|
if our_fee == their_fee:
|
|
|
|
|
break
|
|
|
|
|
our_fee, our_fee_range = choose_new_fee(our_fee, our_fee_range, their_fee, their_fee_range, their_previous_fee)
|
|
|
|
|
if not is_initiator and our_fee == their_fee:
|
|
|
|
|
break
|
|
|
|
|
send_closing_signed(our_fee, our_fee_range, drop_remote)
|
|
|
|
|
if is_initiator and our_fee == their_fee:
|
|
|
|
|
break
|
2021-11-26 09:45:06 +01:00
|
|
|
if not is_initiator:
|
2022-02-22 18:25:24 +01:00
|
|
|
send_closing_signed(our_fee, our_fee_range, drop_remote)
|
2021-11-26 09:45:06 +01:00
|
|
|
|
2019-02-01 18:28:35 +01:00
|
|
|
# add signatures
|
2020-05-29 11:30:08 +02:00
|
|
|
closing_tx.add_signature_to_txin(
|
|
|
|
|
txin_idx=0,
|
|
|
|
|
signing_pubkey=chan.config[LOCAL].multisig_key.pubkey.hex(),
|
2023-02-17 11:35:03 +00:00
|
|
|
sig=(der_sig_from_sig_string(our_sig) + Sighash.to_sigbytes(Sighash.ALL)).hex())
|
2020-05-29 11:30:08 +02:00
|
|
|
closing_tx.add_signature_to_txin(
|
|
|
|
|
txin_idx=0,
|
|
|
|
|
signing_pubkey=chan.config[REMOTE].multisig_key.pubkey.hex(),
|
2023-02-17 11:35:03 +00:00
|
|
|
sig=(der_sig_from_sig_string(their_sig) + Sighash.to_sigbytes(Sighash.ALL)).hex())
|
2020-05-29 11:30:08 +02:00
|
|
|
# save local transaction and set state
|
2020-09-13 16:55:37 +02:00
|
|
|
try:
|
2022-06-01 23:03:35 +02:00
|
|
|
self.lnworker.wallet.adb.add_transaction(closing_tx)
|
2020-09-13 16:55:37 +02:00
|
|
|
except UnrelatedTransactionException:
|
|
|
|
|
pass # this can happen if (~all the balance goes to REMOTE)
|
2020-05-29 11:30:08 +02:00
|
|
|
chan.set_state(ChannelState.CLOSING)
|
2018-11-22 16:18:28 +01:00
|
|
|
# broadcast
|
2020-02-25 12:35:07 +01:00
|
|
|
await self.network.try_broadcasting(closing_tx, 'closing')
|
2018-11-22 16:18:28 +01:00
|
|
|
return closing_tx.txid()
|
2020-03-02 15:41:50 +01:00
|
|
|
|
|
|
|
|
async def htlc_switch(self):
|
2020-04-13 11:34:58 +02:00
|
|
|
await self.initialized
|
2020-03-02 15:41:50 +01:00
|
|
|
while True:
|
2022-05-30 09:30:18 +02:00
|
|
|
await self.ping_if_required()
|
2021-03-11 19:31:22 +01:00
|
|
|
self._htlc_switch_iterdone_event.set()
|
|
|
|
|
self._htlc_switch_iterdone_event.clear()
|
2021-11-04 16:32:40 +01:00
|
|
|
# We poll every 0.1 sec to check if there is work to do,
|
2021-11-04 19:16:02 +01:00
|
|
|
# or we can also be triggered via events.
|
|
|
|
|
# When forwarding an HTLC originating from this peer (the upstream),
|
|
|
|
|
# we can get triggered for events that happen on the downstream peer.
|
|
|
|
|
# TODO: trampoline forwarding relies on the polling
|
2021-11-04 16:32:40 +01:00
|
|
|
async with ignore_after(0.1):
|
2022-02-08 12:34:49 +01:00
|
|
|
async with OldTaskGroup(wait=any) as group:
|
2021-11-04 19:16:02 +01:00
|
|
|
await group.spawn(self._received_revack_event.wait())
|
|
|
|
|
await group.spawn(self.downstream_htlc_resolved_event.wait())
|
2021-03-11 19:31:22 +01:00
|
|
|
self._htlc_switch_iterstart_event.set()
|
|
|
|
|
self._htlc_switch_iterstart_event.clear()
|
|
|
|
|
self._maybe_cleanup_received_htlcs_pending_removal()
|
2020-03-02 15:41:50 +01:00
|
|
|
for chan_id, chan in self.channels.items():
|
|
|
|
|
if not chan.can_send_ctx_updates():
|
|
|
|
|
continue
|
|
|
|
|
self.maybe_send_commitment(chan)
|
|
|
|
|
done = set()
|
2021-09-20 11:57:12 +02:00
|
|
|
unfulfilled = chan.unfulfilled_htlcs
|
2020-05-03 12:13:08 +02:00
|
|
|
for htlc_id, (local_ctn, remote_ctn, onion_packet_hex, forwarding_info) in unfulfilled.items():
|
2021-11-04 19:16:02 +01:00
|
|
|
if forwarding_info:
|
|
|
|
|
self.lnworker.downstream_htlc_to_upstream_peer_map[forwarding_info] = self.pubkey
|
2021-03-11 19:09:35 +01:00
|
|
|
if not chan.hm.is_htlc_irrevocably_added_yet(htlc_proposer=REMOTE, htlc_id=htlc_id):
|
2020-03-02 15:41:50 +01:00
|
|
|
continue
|
2020-09-02 21:21:49 +02:00
|
|
|
htlc = chan.hm.get_htlc_by_id(REMOTE, htlc_id)
|
2021-02-10 13:16:33 +01:00
|
|
|
error_reason = None # type: Optional[OnionRoutingFailure]
|
2020-03-17 19:23:04 +01:00
|
|
|
error_bytes = None # type: Optional[bytes]
|
2020-03-17 18:02:51 +01:00
|
|
|
preimage = None
|
2021-02-10 13:16:33 +01:00
|
|
|
fw_info = None
|
2020-03-17 18:02:51 +01:00
|
|
|
onion_packet_bytes = bytes.fromhex(onion_packet_hex)
|
|
|
|
|
onion_packet = None
|
|
|
|
|
try:
|
|
|
|
|
onion_packet = OnionPacket.from_bytes(onion_packet_bytes)
|
2021-02-10 13:16:33 +01:00
|
|
|
except OnionRoutingFailure as e:
|
|
|
|
|
error_reason = e
|
2020-03-02 15:41:50 +01:00
|
|
|
else:
|
2021-02-10 13:16:33 +01:00
|
|
|
try:
|
2021-03-18 07:48:30 +01:00
|
|
|
preimage, fw_info, error_bytes = self.process_unfulfilled_htlc(
|
2021-02-24 20:03:12 +01:00
|
|
|
chan=chan,
|
|
|
|
|
htlc=htlc,
|
|
|
|
|
forwarding_info=forwarding_info,
|
|
|
|
|
onion_packet_bytes=onion_packet_bytes,
|
|
|
|
|
onion_packet=onion_packet)
|
2021-02-10 13:16:33 +01:00
|
|
|
except OnionRoutingFailure as e:
|
|
|
|
|
error_bytes = construct_onion_error(e, onion_packet, our_onion_private_key=self.privkey)
|
|
|
|
|
if fw_info:
|
|
|
|
|
unfulfilled[htlc_id] = local_ctn, remote_ctn, onion_packet_hex, fw_info
|
2021-11-04 19:16:02 +01:00
|
|
|
self.lnworker.downstream_htlc_to_upstream_peer_map[fw_info] = self.pubkey
|
2021-02-10 13:16:33 +01:00
|
|
|
elif preimage or error_reason or error_bytes:
|
2020-03-17 18:02:51 +01:00
|
|
|
if preimage:
|
2021-03-18 07:48:30 +01:00
|
|
|
if not self.lnworker.enable_htlc_settle:
|
|
|
|
|
continue
|
2020-03-17 18:02:51 +01:00
|
|
|
self.fulfill_htlc(chan, htlc.htlc_id, preimage)
|
2021-02-10 13:16:33 +01:00
|
|
|
elif error_bytes:
|
2020-06-05 11:42:22 +02:00
|
|
|
self.fail_htlc(
|
|
|
|
|
chan=chan,
|
|
|
|
|
htlc_id=htlc.htlc_id,
|
|
|
|
|
error_bytes=error_bytes)
|
|
|
|
|
else:
|
|
|
|
|
self.fail_malformed_htlc(
|
|
|
|
|
chan=chan,
|
|
|
|
|
htlc_id=htlc.htlc_id,
|
|
|
|
|
reason=error_reason)
|
2020-03-02 15:41:50 +01:00
|
|
|
done.add(htlc_id)
|
|
|
|
|
# cleanup
|
|
|
|
|
for htlc_id in done:
|
2021-11-04 19:16:02 +01:00
|
|
|
local_ctn, remote_ctn, onion_packet_hex, forwarding_info = unfulfilled.pop(htlc_id)
|
|
|
|
|
if forwarding_info:
|
|
|
|
|
self.lnworker.downstream_htlc_to_upstream_peer_map.pop(forwarding_info, None)
|
|
|
|
|
self.maybe_send_commitment(chan)
|
2021-02-10 13:16:33 +01:00
|
|
|
|
2021-03-11 19:31:22 +01:00
|
|
|
def _maybe_cleanup_received_htlcs_pending_removal(self) -> None:
|
|
|
|
|
done = set()
|
|
|
|
|
for chan, htlc_id in self.received_htlcs_pending_removal:
|
|
|
|
|
if chan.hm.is_htlc_irrevocably_removed_yet(htlc_proposer=REMOTE, htlc_id=htlc_id):
|
|
|
|
|
done.add((chan, htlc_id))
|
|
|
|
|
if done:
|
|
|
|
|
for key in done:
|
|
|
|
|
self.received_htlcs_pending_removal.remove(key)
|
|
|
|
|
self.received_htlc_removed_event.set()
|
|
|
|
|
self.received_htlc_removed_event.clear()
|
|
|
|
|
|
|
|
|
|
async def wait_one_htlc_switch_iteration(self) -> None:
|
|
|
|
|
"""Waits until the HTLC switch does a full iteration or the peer disconnects,
|
|
|
|
|
whichever happens first.
|
|
|
|
|
"""
|
|
|
|
|
async def htlc_switch_iteration():
|
|
|
|
|
await self._htlc_switch_iterstart_event.wait()
|
|
|
|
|
await self._htlc_switch_iterdone_event.wait()
|
|
|
|
|
|
2022-02-08 12:34:49 +01:00
|
|
|
async with OldTaskGroup(wait=any) as group:
|
2021-03-11 19:31:22 +01:00
|
|
|
await group.spawn(htlc_switch_iteration())
|
|
|
|
|
await group.spawn(self.got_disconnected.wait())
|
|
|
|
|
|
2021-03-18 07:48:30 +01:00
|
|
|
def process_unfulfilled_htlc(
|
2021-03-03 20:36:48 +01:00
|
|
|
self, *,
|
2021-02-24 20:03:12 +01:00
|
|
|
chan: Channel,
|
|
|
|
|
htlc: UpdateAddHtlc,
|
|
|
|
|
forwarding_info: Tuple[str, int],
|
|
|
|
|
onion_packet_bytes: bytes,
|
2021-03-03 20:36:48 +01:00
|
|
|
onion_packet: OnionPacket) -> Tuple[Optional[bytes], Union[bool, None, Tuple[str, int]], Optional[bytes]]:
|
2021-02-10 13:16:33 +01:00
|
|
|
"""
|
2021-03-03 20:36:48 +01:00
|
|
|
return (preimage, fw_info, error_bytes) with at most a single element that is not None
|
2021-02-10 13:16:33 +01:00
|
|
|
raise an OnionRoutingFailure if we need to fail the htlc
|
|
|
|
|
"""
|
|
|
|
|
payment_hash = htlc.payment_hash
|
2021-02-24 20:03:12 +01:00
|
|
|
processed_onion = self.process_onion_packet(
|
|
|
|
|
onion_packet,
|
|
|
|
|
payment_hash=payment_hash,
|
|
|
|
|
onion_packet_bytes=onion_packet_bytes)
|
2021-02-10 13:16:33 +01:00
|
|
|
if processed_onion.are_we_final:
|
2021-03-12 19:41:47 +01:00
|
|
|
# either we are final recipient; or if trampoline, see cases below
|
2021-03-03 20:36:48 +01:00
|
|
|
preimage, trampoline_onion_packet = self.maybe_fulfill_htlc(
|
2021-02-10 13:16:33 +01:00
|
|
|
chan=chan,
|
|
|
|
|
htlc=htlc,
|
|
|
|
|
processed_onion=processed_onion)
|
2021-03-03 20:36:48 +01:00
|
|
|
if trampoline_onion_packet:
|
2021-03-12 19:41:47 +01:00
|
|
|
# trampoline- recipient or forwarding
|
2021-02-09 15:09:27 +01:00
|
|
|
if not forwarding_info:
|
|
|
|
|
trampoline_onion = self.process_onion_packet(
|
2021-03-03 20:36:48 +01:00
|
|
|
trampoline_onion_packet,
|
2021-02-24 20:03:12 +01:00
|
|
|
payment_hash=htlc.payment_hash,
|
|
|
|
|
onion_packet_bytes=onion_packet_bytes,
|
2021-02-09 15:09:27 +01:00
|
|
|
is_trampoline=True)
|
2021-02-20 14:20:55 +01:00
|
|
|
if trampoline_onion.are_we_final:
|
2021-03-12 19:41:47 +01:00
|
|
|
# trampoline- we are final recipient of HTLC
|
2021-03-03 20:36:48 +01:00
|
|
|
preimage, _ = self.maybe_fulfill_htlc(
|
2021-02-20 14:20:55 +01:00
|
|
|
chan=chan,
|
|
|
|
|
htlc=htlc,
|
|
|
|
|
processed_onion=trampoline_onion,
|
|
|
|
|
is_trampoline=True)
|
|
|
|
|
else:
|
2021-03-12 19:41:47 +01:00
|
|
|
# trampoline- HTLC we are supposed to forward, but haven't forwarded yet
|
2021-03-18 07:48:30 +01:00
|
|
|
if not self.lnworker.enable_htlc_forwarding:
|
|
|
|
|
return None, None, None
|
2021-02-20 14:20:55 +01:00
|
|
|
self.maybe_forward_trampoline(
|
|
|
|
|
chan=chan,
|
|
|
|
|
htlc=htlc,
|
|
|
|
|
trampoline_onion=trampoline_onion)
|
|
|
|
|
# return True so that this code gets executed only once
|
|
|
|
|
return None, True, None
|
2021-02-09 15:09:27 +01:00
|
|
|
else:
|
2021-03-12 19:41:47 +01:00
|
|
|
# trampoline- HTLC we are supposed to forward, and have already forwarded
|
2021-02-09 15:09:27 +01:00
|
|
|
preimage = self.lnworker.get_preimage(payment_hash)
|
|
|
|
|
error_reason = self.lnworker.trampoline_forwarding_failures.pop(payment_hash, None)
|
|
|
|
|
if error_reason:
|
2021-03-06 00:31:30 +01:00
|
|
|
self.logger.info(f'trampoline forwarding failure: {error_reason.code_name()}')
|
2021-02-09 15:09:27 +01:00
|
|
|
raise error_reason
|
|
|
|
|
|
2021-02-10 13:16:33 +01:00
|
|
|
elif not forwarding_info:
|
2021-03-12 19:41:47 +01:00
|
|
|
# HTLC we are supposed to forward, but haven't forwarded yet
|
2021-03-18 07:48:30 +01:00
|
|
|
if not self.lnworker.enable_htlc_forwarding:
|
|
|
|
|
return None, None, None
|
2021-02-10 13:16:33 +01:00
|
|
|
next_chan_id, next_htlc_id = self.maybe_forward_htlc(
|
|
|
|
|
htlc=htlc,
|
|
|
|
|
processed_onion=processed_onion)
|
2021-02-24 20:03:12 +01:00
|
|
|
fw_info = (next_chan_id.hex(), next_htlc_id)
|
|
|
|
|
return None, fw_info, None
|
2021-02-10 13:16:33 +01:00
|
|
|
else:
|
2021-03-12 19:41:47 +01:00
|
|
|
# HTLC we are supposed to forward, and have already forwarded
|
2021-02-10 13:16:33 +01:00
|
|
|
preimage = self.lnworker.get_preimage(payment_hash)
|
|
|
|
|
next_chan_id_hex, htlc_id = forwarding_info
|
|
|
|
|
next_chan = self.lnworker.get_channel_by_short_id(bytes.fromhex(next_chan_id_hex))
|
|
|
|
|
if next_chan:
|
|
|
|
|
error_bytes, error_reason = next_chan.pop_fail_htlc_reason(htlc_id)
|
|
|
|
|
if error_bytes:
|
|
|
|
|
return None, None, error_bytes
|
|
|
|
|
if error_reason:
|
|
|
|
|
raise error_reason
|
|
|
|
|
if preimage:
|
|
|
|
|
return preimage, None, None
|
|
|
|
|
return None, None, None
|
|
|
|
|
|
2021-02-24 20:03:12 +01:00
|
|
|
def process_onion_packet(
|
|
|
|
|
self,
|
2021-03-03 20:36:48 +01:00
|
|
|
onion_packet: OnionPacket, *,
|
2021-02-24 20:03:12 +01:00
|
|
|
payment_hash: bytes,
|
|
|
|
|
onion_packet_bytes: bytes,
|
2021-03-03 20:36:48 +01:00
|
|
|
is_trampoline: bool = False) -> ProcessedOnionPacket:
|
|
|
|
|
|
2021-02-10 13:16:33 +01:00
|
|
|
failure_data = sha256(onion_packet_bytes)
|
|
|
|
|
try:
|
2021-02-09 15:09:27 +01:00
|
|
|
processed_onion = process_onion_packet(
|
|
|
|
|
onion_packet,
|
|
|
|
|
associated_data=payment_hash,
|
|
|
|
|
our_onion_private_key=self.privkey,
|
|
|
|
|
is_trampoline=is_trampoline)
|
2021-02-10 13:16:33 +01:00
|
|
|
except UnsupportedOnionPacketVersion:
|
|
|
|
|
raise OnionRoutingFailure(code=OnionFailureCode.INVALID_ONION_VERSION, data=failure_data)
|
|
|
|
|
except InvalidOnionPubkey:
|
|
|
|
|
raise OnionRoutingFailure(code=OnionFailureCode.INVALID_ONION_KEY, data=failure_data)
|
|
|
|
|
except InvalidOnionMac:
|
|
|
|
|
raise OnionRoutingFailure(code=OnionFailureCode.INVALID_ONION_HMAC, data=failure_data)
|
|
|
|
|
except Exception as e:
|
|
|
|
|
self.logger.info(f"error processing onion packet: {e!r}")
|
|
|
|
|
raise OnionRoutingFailure(code=OnionFailureCode.INVALID_ONION_VERSION, data=failure_data)
|
|
|
|
|
if self.network.config.get('test_fail_malformed_htlc'):
|
|
|
|
|
raise OnionRoutingFailure(code=OnionFailureCode.INVALID_ONION_VERSION, data=failure_data)
|
|
|
|
|
if self.network.config.get('test_fail_htlcs_with_temp_node_failure'):
|
|
|
|
|
raise OnionRoutingFailure(code=OnionFailureCode.TEMPORARY_NODE_FAILURE, data=b'')
|
|
|
|
|
return processed_onion
|