2018-10-25 18:28:18 +02:00
|
|
|
import asyncio
|
2022-08-10 18:16:17 +02:00
|
|
|
import shutil
|
2018-10-25 18:28:18 +02:00
|
|
|
import tempfile
|
2018-10-25 21:59:16 +02:00
|
|
|
from decimal import Decimal
|
|
|
|
|
import os
|
|
|
|
|
from contextlib import contextmanager
|
|
|
|
|
from collections import defaultdict
|
2019-05-02 18:09:11 +02:00
|
|
|
import logging
|
2019-12-11 23:07:47 +01:00
|
|
|
import concurrent
|
|
|
|
|
from concurrent import futures
|
2020-03-06 21:54:05 +01:00
|
|
|
import unittest
|
2021-03-17 09:32:23 +01:00
|
|
|
from typing import Iterable, NamedTuple, Tuple, List, Dict
|
2020-03-06 21:54:05 +01:00
|
|
|
|
2022-02-08 12:34:49 +01:00
|
|
|
from aiorpcx import timeout_after, TaskTimeout
|
2018-10-25 21:59:16 +02:00
|
|
|
|
2021-07-02 18:44:39 +02:00
|
|
|
import electrum
|
|
|
|
|
import electrum.trampoline
|
2020-12-29 17:40:01 +01:00
|
|
|
from electrum import bitcoin
|
asyncio: stop using get_event_loop(). introduce ~singleton loop.
asyncio.get_event_loop() became deprecated in python3.10. (see https://github.com/python/cpython/issues/83710)
```
.../electrum/electrum/daemon.py:470: DeprecationWarning: There is no current event loop
self.asyncio_loop = asyncio.get_event_loop()
.../electrum/electrum/network.py:276: DeprecationWarning: There is no current event loop
self.asyncio_loop = asyncio.get_event_loop()
```
Also, according to that thread, "set_event_loop() [... is] not deprecated by oversight".
So, we stop using get_event_loop() and set_event_loop() in our own code.
Note that libraries we use (such as the stdlib for python <3.10), might call get_event_loop,
which then relies on us having called set_event_loop e.g. for the GUI thread. To work around
this, a custom event loop policy providing a get_event_loop implementation is used.
Previously, we have been using a single asyncio event loop, created with
util.create_and_start_event_loop, and code in many places got a reference to this loop
using asyncio.get_event_loop().
Now, we still use a single asyncio event loop, but it is now stored as a global in
util._asyncio_event_loop (access with util.get_asyncio_loop()).
I believe these changes also fix https://github.com/spesmilo/electrum/issues/5376
2022-04-29 18:24:49 +02:00
|
|
|
from electrum import util
|
2020-02-27 13:41:40 +01:00
|
|
|
from electrum import constants
|
2018-10-25 21:59:16 +02:00
|
|
|
from electrum.network import Network
|
|
|
|
|
from electrum.ecc import ECPrivkey
|
|
|
|
|
from electrum import simple_config, lnutil
|
|
|
|
|
from electrum.lnaddr import lnencode, LnAddr, lndecode
|
|
|
|
|
from electrum.bitcoin import COIN, sha256
|
2023-02-17 11:35:03 +00:00
|
|
|
from electrum.util import NetworkRetryManager, bfh, OldTaskGroup, EventListener
|
2022-01-26 16:59:10 +01:00
|
|
|
from electrum.lnpeer import Peer
|
2018-10-25 21:59:16 +02:00
|
|
|
from electrum.lnutil import LNPeerAddr, Keypair, privkey_to_pubkey
|
2020-03-16 22:07:00 +01:00
|
|
|
from electrum.lnutil import PaymentFailure, LnFeatures, HTLCOwner
|
2020-04-13 16:02:05 +02:00
|
|
|
from electrum.lnchannel import ChannelState, PeerState, Channel
|
2020-05-06 11:00:58 +02:00
|
|
|
from electrum.lnrouter import LNPathFinder, PathEdge, LNPathInconsistent
|
2019-06-18 13:49:31 +02:00
|
|
|
from electrum.channel_db import ChannelDB
|
2019-10-09 19:23:09 +02:00
|
|
|
from electrum.lnworker import LNWallet, NoPathFound
|
2019-02-05 17:56:01 +01:00
|
|
|
from electrum.lnmsg import encode_msg, decode_msg
|
2021-03-19 20:51:38 +01:00
|
|
|
from electrum import lnmsg
|
2020-03-06 21:54:05 +01:00
|
|
|
from electrum.logging import console_stderr_handler, Logger
|
2021-03-08 22:18:06 +01:00
|
|
|
from electrum.lnworker import PaymentInfo, RECEIVED
|
2020-05-06 11:00:58 +02:00
|
|
|
from electrum.lnonion import OnionFailureCode
|
2022-05-24 23:49:58 +02:00
|
|
|
from electrum.lnutil import derive_payment_secret_from_payment_preimage, UpdateAddHtlc
|
2021-03-02 18:35:07 +01:00
|
|
|
from electrum.lnutil import LOCAL, REMOTE
|
2021-03-08 22:18:06 +01:00
|
|
|
from electrum.invoices import PR_PAID, PR_UNPAID
|
2022-01-26 16:51:42 +01:00
|
|
|
from electrum.interface import GracefulDisconnect
|
2022-08-10 18:16:17 +02:00
|
|
|
from electrum.simple_config import SimpleConfig
|
2018-10-25 21:59:16 +02:00
|
|
|
|
2019-02-09 10:29:33 +01:00
|
|
|
from .test_lnchannel import create_test_channels
|
2020-03-04 18:54:20 +01:00
|
|
|
from .test_bitcoin import needs_test_with_all_chacha20_implementations
|
2023-02-18 06:44:30 +00:00
|
|
|
from . import ElectrumTestCase
|
2018-10-25 18:28:18 +02:00
|
|
|
|
2018-10-25 21:59:16 +02:00
|
|
|
def keypair():
|
|
|
|
|
priv = ECPrivkey.generate_random_key().get_secret_bytes()
|
|
|
|
|
k1 = Keypair(
|
|
|
|
|
pubkey=privkey_to_pubkey(priv),
|
|
|
|
|
privkey=priv)
|
|
|
|
|
return k1
|
|
|
|
|
|
|
|
|
|
@contextmanager
|
|
|
|
|
def noop_lock():
|
|
|
|
|
yield
|
|
|
|
|
|
2018-10-25 18:28:18 +02:00
|
|
|
class MockNetwork:
|
2022-08-10 18:16:17 +02:00
|
|
|
def __init__(self, tx_queue, *, config: SimpleConfig):
|
2018-10-25 21:59:16 +02:00
|
|
|
self.callbacks = defaultdict(list)
|
2018-10-25 18:28:18 +02:00
|
|
|
self.lnwatcher = None
|
2019-03-06 06:17:52 +01:00
|
|
|
self.interface = None
|
2022-08-10 18:16:17 +02:00
|
|
|
self.config = config
|
asyncio: stop using get_event_loop(). introduce ~singleton loop.
asyncio.get_event_loop() became deprecated in python3.10. (see https://github.com/python/cpython/issues/83710)
```
.../electrum/electrum/daemon.py:470: DeprecationWarning: There is no current event loop
self.asyncio_loop = asyncio.get_event_loop()
.../electrum/electrum/network.py:276: DeprecationWarning: There is no current event loop
self.asyncio_loop = asyncio.get_event_loop()
```
Also, according to that thread, "set_event_loop() [... is] not deprecated by oversight".
So, we stop using get_event_loop() and set_event_loop() in our own code.
Note that libraries we use (such as the stdlib for python <3.10), might call get_event_loop,
which then relies on us having called set_event_loop e.g. for the GUI thread. To work around
this, a custom event loop policy providing a get_event_loop implementation is used.
Previously, we have been using a single asyncio event loop, created with
util.create_and_start_event_loop, and code in many places got a reference to this loop
using asyncio.get_event_loop().
Now, we still use a single asyncio event loop, but it is now stored as a global in
util._asyncio_event_loop (access with util.get_asyncio_loop()).
I believe these changes also fix https://github.com/spesmilo/electrum/issues/5376
2022-04-29 18:24:49 +02:00
|
|
|
self.asyncio_loop = util.get_asyncio_loop()
|
2018-10-25 18:28:18 +02:00
|
|
|
self.channel_db = ChannelDB(self)
|
2020-03-10 15:11:16 +01:00
|
|
|
self.channel_db.data_loaded.set()
|
2018-10-25 21:59:16 +02:00
|
|
|
self.path_finder = LNPathFinder(self.channel_db)
|
2018-11-02 19:16:42 +01:00
|
|
|
self.tx_queue = tx_queue
|
2020-04-13 17:04:27 +02:00
|
|
|
self._blockchain = MockBlockchain()
|
2018-10-25 21:59:16 +02:00
|
|
|
|
|
|
|
|
@property
|
|
|
|
|
def callback_lock(self):
|
|
|
|
|
return noop_lock()
|
|
|
|
|
|
|
|
|
|
def get_local_height(self):
|
|
|
|
|
return 0
|
2018-10-25 18:28:18 +02:00
|
|
|
|
2020-04-13 17:04:27 +02:00
|
|
|
def blockchain(self):
|
|
|
|
|
return self._blockchain
|
|
|
|
|
|
2020-03-06 12:40:42 +01:00
|
|
|
async def broadcast_transaction(self, tx):
|
2018-11-02 19:16:42 +01:00
|
|
|
if self.tx_queue:
|
|
|
|
|
await self.tx_queue.put(tx)
|
|
|
|
|
|
2020-03-06 12:40:42 +01:00
|
|
|
async def try_broadcasting(self, tx, name):
|
2020-04-08 13:18:56 +02:00
|
|
|
await self.broadcast_transaction(tx)
|
2020-03-06 12:40:42 +01:00
|
|
|
|
2020-04-13 17:04:27 +02:00
|
|
|
|
|
|
|
|
class MockBlockchain:
|
|
|
|
|
|
|
|
|
|
def height(self):
|
|
|
|
|
return 0
|
|
|
|
|
|
|
|
|
|
def is_tip_stale(self):
|
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
|
|
2022-06-01 23:03:35 +02:00
|
|
|
class MockADB:
|
|
|
|
|
def add_transaction(self, tx):
|
|
|
|
|
pass
|
|
|
|
|
|
2018-11-07 18:00:28 +01:00
|
|
|
class MockWallet:
|
2022-04-23 19:40:21 +02:00
|
|
|
receive_requests = {}
|
2022-06-01 23:03:35 +02:00
|
|
|
adb = MockADB()
|
2020-05-29 11:30:08 +02:00
|
|
|
|
2022-06-15 16:24:29 +02:00
|
|
|
def get_request(self, key):
|
2022-06-14 12:50:04 +02:00
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
def get_key_for_receive_request(self, x):
|
|
|
|
|
pass
|
|
|
|
|
|
2019-05-08 12:41:57 +02:00
|
|
|
def set_label(self, x, y):
|
|
|
|
|
pass
|
2020-05-29 11:30:08 +02:00
|
|
|
|
2020-02-05 15:13:37 +01:00
|
|
|
def save_db(self):
|
|
|
|
|
pass
|
2020-05-29 11:30:08 +02:00
|
|
|
|
2020-02-12 19:23:09 +01:00
|
|
|
def is_lightning_backup(self):
|
|
|
|
|
return False
|
2018-11-07 18:00:28 +01:00
|
|
|
|
2020-09-15 15:37:47 +00:00
|
|
|
def is_mine(self, addr):
|
|
|
|
|
return True
|
|
|
|
|
|
2023-01-07 12:20:03 +01:00
|
|
|
def get_fingerprint(self):
|
|
|
|
|
return ''
|
|
|
|
|
|
2020-05-29 11:30:08 +02:00
|
|
|
|
2022-06-16 12:05:05 +02:00
|
|
|
class MockLNWallet(Logger, EventListener, NetworkRetryManager[LNPeerAddr]):
|
2021-03-11 20:35:21 +01:00
|
|
|
MPP_EXPIRY = 2 # HTLC timestamps are cast to int, so this cannot be 1
|
2022-04-29 12:11:50 +02:00
|
|
|
PAYMENT_TIMEOUT = 120
|
2021-03-11 20:35:21 +01:00
|
|
|
TIMEOUT_SHUTDOWN_FAIL_PENDING_HTLCS = 0
|
2021-03-22 17:03:50 +01:00
|
|
|
INITIAL_TRAMPOLINE_FEE_LEVEL = 0
|
2021-03-10 21:00:58 +01:00
|
|
|
|
2021-03-08 22:18:06 +01:00
|
|
|
def __init__(self, *, local_keypair: Keypair, chans: Iterable['Channel'], tx_queue, name):
|
|
|
|
|
self.name = name
|
2020-03-06 21:54:05 +01:00
|
|
|
Logger.__init__(self)
|
2020-04-15 17:39:39 +02:00
|
|
|
NetworkRetryManager.__init__(self, max_retry_delay_normal=1, init_retry_delay_normal=1)
|
2018-10-25 21:59:16 +02:00
|
|
|
self.node_keypair = local_keypair
|
2022-08-10 18:16:17 +02:00
|
|
|
self._user_dir = tempfile.mkdtemp(prefix="electrum-lnpeer-test-")
|
|
|
|
|
self.config = SimpleConfig({}, read_user_dir_function=lambda: self._user_dir)
|
|
|
|
|
self.network = MockNetwork(tx_queue, config=self.config)
|
2022-02-08 12:34:49 +01:00
|
|
|
self.taskgroup = OldTaskGroup()
|
2021-03-11 20:35:21 +01:00
|
|
|
self.lnwatcher = None
|
|
|
|
|
self.listen_server = None
|
2021-03-05 13:00:24 +01:00
|
|
|
self._channels = {chan.channel_id: chan for chan in chans}
|
2022-06-14 12:50:04 +02:00
|
|
|
self.payment_info = {}
|
2019-10-22 18:54:00 +02:00
|
|
|
self.logs = defaultdict(list)
|
2018-11-07 18:00:28 +01:00
|
|
|
self.wallet = MockWallet()
|
2020-03-16 22:07:00 +01:00
|
|
|
self.features = LnFeatures(0)
|
|
|
|
|
self.features |= LnFeatures.OPTION_DATA_LOSS_PROTECT_OPT
|
2020-12-29 17:40:01 +01:00
|
|
|
self.features |= LnFeatures.OPTION_UPFRONT_SHUTDOWN_SCRIPT_OPT
|
2021-03-02 18:35:07 +01:00
|
|
|
self.features |= LnFeatures.VAR_ONION_OPT
|
|
|
|
|
self.features |= LnFeatures.PAYMENT_SECRET_OPT
|
2023-01-13 12:46:55 +01:00
|
|
|
self.features |= LnFeatures.OPTION_TRAMPOLINE_ROUTING_OPT_ELECTRUM
|
2022-01-18 14:55:43 +01:00
|
|
|
self.features |= LnFeatures.OPTION_CHANNEL_TYPE_OPT
|
2023-01-07 12:20:03 +01:00
|
|
|
self.features |= LnFeatures.OPTION_SCID_ALIAS_OPT
|
2019-05-23 12:37:24 +02:00
|
|
|
self.pending_payments = defaultdict(asyncio.Future)
|
2020-05-06 10:44:38 +02:00
|
|
|
for chan in chans:
|
|
|
|
|
chan.lnworker = self
|
|
|
|
|
self._peers = {} # bytes -> Peer
|
2020-02-27 20:53:50 +01:00
|
|
|
# used in tests
|
2021-03-18 07:48:30 +01:00
|
|
|
self.enable_htlc_settle = True
|
|
|
|
|
self.enable_htlc_forwarding = True
|
2021-03-11 16:53:55 +01:00
|
|
|
self.received_mpp_htlcs = dict()
|
2021-02-05 17:09:47 +01:00
|
|
|
self.sent_htlcs = defaultdict(asyncio.Queue)
|
2022-02-16 16:27:37 +01:00
|
|
|
self.sent_htlcs_info = dict()
|
2021-02-27 09:45:19 +01:00
|
|
|
self.sent_buckets = defaultdict(set)
|
2021-03-05 13:00:24 +01:00
|
|
|
self.trampoline_forwarding_failures = {}
|
2021-03-05 17:04:06 +01:00
|
|
|
self.inflight_payments = set()
|
|
|
|
|
self.preimages = {}
|
2021-03-11 19:31:22 +01:00
|
|
|
self.stopping_soon = False
|
2021-11-04 19:16:02 +01:00
|
|
|
self.downstream_htlc_to_upstream_peer_map = {}
|
2018-10-25 21:59:16 +02:00
|
|
|
|
2021-07-02 18:44:39 +02:00
|
|
|
self.logger.info(f"created LNWallet[{name}] with nodeID={local_keypair.pubkey.hex()}")
|
|
|
|
|
|
2021-01-25 19:40:15 +01:00
|
|
|
def pay_scheduled_invoices(self):
|
|
|
|
|
pass
|
|
|
|
|
|
2019-08-15 13:17:16 +02:00
|
|
|
def get_invoice_status(self, key):
|
|
|
|
|
pass
|
|
|
|
|
|
2018-10-25 21:59:16 +02:00
|
|
|
@property
|
|
|
|
|
def lock(self):
|
|
|
|
|
return noop_lock()
|
|
|
|
|
|
2021-03-05 13:00:24 +01:00
|
|
|
@property
|
|
|
|
|
def channel_db(self):
|
|
|
|
|
return self.network.channel_db if self.network else None
|
|
|
|
|
|
2022-09-19 17:43:13 +02:00
|
|
|
def uses_trampoline(self):
|
|
|
|
|
return not bool(self.channel_db)
|
|
|
|
|
|
2020-04-30 21:08:26 +02:00
|
|
|
@property
|
|
|
|
|
def channels(self):
|
|
|
|
|
return self._channels
|
|
|
|
|
|
2018-10-25 18:28:18 +02:00
|
|
|
@property
|
|
|
|
|
def peers(self):
|
2020-04-15 21:32:53 +02:00
|
|
|
return self._peers
|
|
|
|
|
|
2019-02-03 15:27:48 +01:00
|
|
|
def get_channel_by_short_id(self, short_channel_id):
|
|
|
|
|
with self.lock:
|
2020-04-30 21:08:26 +02:00
|
|
|
for chan in self._channels.values():
|
2019-02-03 15:27:48 +01:00
|
|
|
if chan.short_channel_id == short_channel_id:
|
|
|
|
|
return chan
|
|
|
|
|
|
2020-04-10 19:50:20 +02:00
|
|
|
def channel_state_changed(self, chan):
|
|
|
|
|
pass
|
|
|
|
|
|
2018-10-25 21:59:16 +02:00
|
|
|
def save_channel(self, chan):
|
2018-11-02 19:16:42 +01:00
|
|
|
print("Ignoring channel save")
|
2018-10-25 21:59:16 +02:00
|
|
|
|
2021-03-08 22:18:06 +01:00
|
|
|
def diagnostic_name(self):
|
|
|
|
|
return self.name
|
|
|
|
|
|
tests: fix tearDown() issue in test_lnrouter.py
similar to 05fd42454842bdce853e96d6b3ffbb043960f7c4
from logs when running tests:
--- Logging error ---
Traceback (most recent call last):
File "...\Python39\lib\logging\__init__.py", line 1082, in emit
stream.write(msg + self.terminator)
ValueError: I/O operation on closed file.
Call stack:
File "...\Python39\lib\threading.py", line 912, in _bootstrap
self._bootstrap_inner()
File "...\Python39\lib\threading.py", line 954, in _bootstrap_inner
self.run()
File "...\Python39\lib\threading.py", line 892, in run
self._target(*self._args, **self._kwargs)
File "...\electrum\electrum\sql_db.py", line 71, in run_sql
self.logger.info("SQL thread terminated")
Message: 'SQL thread terminated'
Arguments: ()
2021-03-10 21:23:41 +01:00
|
|
|
async def stop(self):
|
2021-03-11 20:35:21 +01:00
|
|
|
await LNWallet.stop(self)
|
tests: fix tearDown() issue in test_lnrouter.py
similar to 05fd42454842bdce853e96d6b3ffbb043960f7c4
from logs when running tests:
--- Logging error ---
Traceback (most recent call last):
File "...\Python39\lib\logging\__init__.py", line 1082, in emit
stream.write(msg + self.terminator)
ValueError: I/O operation on closed file.
Call stack:
File "...\Python39\lib\threading.py", line 912, in _bootstrap
self._bootstrap_inner()
File "...\Python39\lib\threading.py", line 954, in _bootstrap_inner
self.run()
File "...\Python39\lib\threading.py", line 892, in run
self._target(*self._args, **self._kwargs)
File "...\electrum\electrum\sql_db.py", line 71, in run_sql
self.logger.info("SQL thread terminated")
Message: 'SQL thread terminated'
Arguments: ()
2021-03-10 21:23:41 +01:00
|
|
|
if self.channel_db:
|
|
|
|
|
self.channel_db.stop()
|
|
|
|
|
await self.channel_db.stopped_event.wait()
|
|
|
|
|
|
2021-05-10 09:21:53 +02:00
|
|
|
async def create_routes_from_invoice(self, amount_msat: int, decoded_invoice: LnAddr, *, full_path=None):
|
|
|
|
|
return [r async for r in self.create_routes_for_payment(
|
|
|
|
|
amount_msat=amount_msat,
|
|
|
|
|
final_total_msat=amount_msat,
|
|
|
|
|
invoice_pubkey=decoded_invoice.pubkey.serialize(),
|
|
|
|
|
min_cltv_expiry=decoded_invoice.get_min_final_cltv_expiry(),
|
|
|
|
|
r_tags=decoded_invoice.get_routing_info('r'),
|
|
|
|
|
invoice_features=decoded_invoice.get_features(),
|
2022-01-10 15:45:29 +01:00
|
|
|
trampoline_fee_level=0,
|
2021-05-10 09:21:53 +02:00
|
|
|
use_two_trampolines=False,
|
|
|
|
|
payment_hash=decoded_invoice.paymenthash,
|
|
|
|
|
payment_secret=decoded_invoice.payment_secret,
|
|
|
|
|
full_path=full_path)]
|
|
|
|
|
|
2021-02-27 20:26:58 +01:00
|
|
|
get_payments = LNWallet.get_payments
|
2019-10-09 20:16:11 +02:00
|
|
|
get_payment_info = LNWallet.get_payment_info
|
|
|
|
|
save_payment_info = LNWallet.save_payment_info
|
2020-03-10 13:51:08 +01:00
|
|
|
set_invoice_status = LNWallet.set_invoice_status
|
2021-03-09 09:35:43 +01:00
|
|
|
set_request_status = LNWallet.set_request_status
|
2019-10-09 20:16:11 +02:00
|
|
|
set_payment_status = LNWallet.set_payment_status
|
|
|
|
|
get_payment_status = LNWallet.get_payment_status
|
2021-03-11 16:53:55 +01:00
|
|
|
check_received_mpp_htlc = LNWallet.check_received_mpp_htlc
|
2021-01-30 16:10:51 +01:00
|
|
|
htlc_fulfilled = LNWallet.htlc_fulfilled
|
|
|
|
|
htlc_failed = LNWallet.htlc_failed
|
2019-09-20 17:15:49 +02:00
|
|
|
save_preimage = LNWallet.save_preimage
|
2019-04-26 12:48:02 +02:00
|
|
|
get_preimage = LNWallet.get_preimage
|
2021-02-19 09:04:19 +01:00
|
|
|
create_route_for_payment = LNWallet.create_route_for_payment
|
2021-02-07 11:57:20 +01:00
|
|
|
create_routes_for_payment = LNWallet.create_routes_for_payment
|
2019-04-26 12:48:02 +02:00
|
|
|
_check_invoice = staticmethod(LNWallet._check_invoice)
|
2021-01-30 16:10:51 +01:00
|
|
|
pay_to_route = LNWallet.pay_to_route
|
2021-02-07 11:57:20 +01:00
|
|
|
pay_to_node = LNWallet.pay_to_node
|
2021-02-07 12:09:37 +01:00
|
|
|
pay_invoice = LNWallet.pay_invoice
|
2019-04-26 12:48:02 +02:00
|
|
|
force_close_channel = LNWallet.force_close_channel
|
2022-02-21 18:09:45 +01:00
|
|
|
schedule_force_closing = LNWallet.schedule_force_closing
|
2018-11-27 00:40:55 +01:00
|
|
|
get_first_timestamp = lambda self: 0
|
2020-04-15 17:39:39 +02:00
|
|
|
on_peer_successfully_established = LNWallet.on_peer_successfully_established
|
2020-04-30 21:13:29 +02:00
|
|
|
get_channel_by_id = LNWallet.get_channel_by_id
|
2020-05-06 10:44:38 +02:00
|
|
|
channels_for_peer = LNWallet.channels_for_peer
|
2022-03-29 17:42:04 +02:00
|
|
|
calc_routing_hints_for_invoice = LNWallet.calc_routing_hints_for_invoice
|
2022-05-20 18:18:46 +02:00
|
|
|
get_channels_for_receiving = LNWallet.get_channels_for_receiving
|
2020-05-06 10:44:38 +02:00
|
|
|
handle_error_code_from_failed_htlc = LNWallet.handle_error_code_from_failed_htlc
|
2021-03-05 13:00:24 +01:00
|
|
|
is_trampoline_peer = LNWallet.is_trampoline_peer
|
2021-03-11 20:35:21 +01:00
|
|
|
wait_for_received_pending_htlcs_to_get_removed = LNWallet.wait_for_received_pending_htlcs_to_get_removed
|
2022-06-16 12:05:05 +02:00
|
|
|
#on_event_proxy_set = LNWallet.on_event_proxy_set
|
2021-03-17 09:32:23 +01:00
|
|
|
_decode_channel_update_msg = LNWallet._decode_channel_update_msg
|
|
|
|
|
_handle_chanupd_from_failed_htlc = LNWallet._handle_chanupd_from_failed_htlc
|
2021-11-04 19:16:02 +01:00
|
|
|
_on_maybe_forwarded_htlc_resolved = LNWallet._on_maybe_forwarded_htlc_resolved
|
2022-02-18 19:32:27 +01:00
|
|
|
_force_close_channel = LNWallet._force_close_channel
|
2022-09-20 13:22:12 +02:00
|
|
|
suggest_splits = LNWallet.suggest_splits
|
2020-03-04 18:09:43 +01:00
|
|
|
|
2018-10-25 18:28:18 +02:00
|
|
|
|
|
|
|
|
class MockTransport:
|
2019-02-10 19:17:04 +01:00
|
|
|
def __init__(self, name):
|
2022-05-24 23:49:58 +02:00
|
|
|
self.queue = asyncio.Queue() # incoming messages
|
2019-02-10 19:17:04 +01:00
|
|
|
self._name = name
|
lntransport: change name used in logs to make collisions unlikely
In particular, in the regtests, with incoming peers, we can have multiple transports open with the same node simultaneously
(see e.g. lnworker._request_force_close_from_backup).
We now use the first few bytes of peer_pubkey, as that is potentially familiar to users,
and the first few bytes of sha256(id(self)) to mitigate collisions in case the peer_pubkeys collide.
log excerpt:
```
I/P | lnpeer.Peer.[LNWallet, 030f0bf260-e0b33756] | handshake done for 030f0bf260acdbd3edcad84d7588ec7c5df4711e87e6a23016f989b8d3a4147230@163.172.94.64:9735
D/P | lnpeer.Peer.[LNWallet, 030f0bf260-e0b33756] | Sending INIT
I/P | lnpeer.Peer.[LNWallet, 03933884aa-5e5dce45] | handshake done for 03933884aaf1d6b108397e5efe5c86bcf2d8ca8d2f700eda99db9214fc2712b134@34.250.234.192:9735
D/P | lnpeer.Peer.[LNWallet, 03933884aa-5e5dce45] | Sending INIT
D/P | lnpeer.Peer.[LNWallet, 030f0bf260-e0b33756] | Received INIT
I/P | lnpeer.Peer.[LNWallet, 02651acf4a-79696c42] | handshake done for 02651acf4a7096091bf42baad19b3643ea318d6979f6dcc16ebaec43d5b0f4baf2@82.119.233.36:19735
D/P | lnpeer.Peer.[LNWallet, 02651acf4a-79696c42] | Sending INIT
D/P | lnpeer.Peer.[LNWallet, 03933884aa-5e5dce45] | Received INIT
I/P | lnpeer.Peer.[LNWallet, 030f0bf260-e0b33756] | saved remote_update
D/P | lnpeer.Peer.[LNWallet, 030f0bf260-e0b33756] | Received CHANNEL_REESTABLISH
```
2022-02-16 18:53:24 +01:00
|
|
|
self.peer_addr = None
|
2018-10-25 21:59:16 +02:00
|
|
|
|
2019-02-01 20:21:59 +01:00
|
|
|
def name(self):
|
2019-02-10 19:17:04 +01:00
|
|
|
return self._name
|
2019-02-01 20:21:59 +01:00
|
|
|
|
2018-10-25 18:28:18 +02:00
|
|
|
async def read_messages(self):
|
|
|
|
|
while True:
|
2022-05-24 23:49:58 +02:00
|
|
|
data = await self.queue.get()
|
|
|
|
|
if isinstance(data, asyncio.Event): # to artificially delay messages
|
|
|
|
|
await data.wait()
|
|
|
|
|
continue
|
|
|
|
|
yield data
|
2018-10-25 18:28:18 +02:00
|
|
|
|
2018-10-25 21:59:16 +02:00
|
|
|
class NoFeaturesTransport(MockTransport):
|
|
|
|
|
"""
|
|
|
|
|
This answers the init message with a init that doesn't signal any features.
|
|
|
|
|
Used for testing that we require DATA_LOSS_PROTECT.
|
|
|
|
|
"""
|
2018-10-25 18:28:18 +02:00
|
|
|
def send_bytes(self, data):
|
|
|
|
|
decoded = decode_msg(data)
|
|
|
|
|
print(decoded)
|
|
|
|
|
if decoded[0] == 'init':
|
2019-02-05 17:56:01 +01:00
|
|
|
self.queue.put_nowait(encode_msg('init', lflen=1, gflen=1, localfeatures=b"\x00", globalfeatures=b"\x00"))
|
2018-10-25 18:28:18 +02:00
|
|
|
|
2018-10-25 21:59:16 +02:00
|
|
|
class PutIntoOthersQueueTransport(MockTransport):
|
2020-04-06 19:06:27 +02:00
|
|
|
def __init__(self, keypair, name):
|
2019-02-10 19:17:04 +01:00
|
|
|
super().__init__(name)
|
2018-10-25 21:59:16 +02:00
|
|
|
self.other_mock_transport = None
|
2020-04-06 19:06:27 +02:00
|
|
|
self.privkey = keypair.privkey
|
2018-10-25 21:59:16 +02:00
|
|
|
|
|
|
|
|
def send_bytes(self, data):
|
|
|
|
|
self.other_mock_transport.queue.put_nowait(data)
|
|
|
|
|
|
2020-04-06 19:06:27 +02:00
|
|
|
def transport_pair(k1, k2, name1, name2):
|
2021-07-02 18:44:39 +02:00
|
|
|
t1 = PutIntoOthersQueueTransport(k1, name1)
|
|
|
|
|
t2 = PutIntoOthersQueueTransport(k2, name2)
|
2018-10-25 21:59:16 +02:00
|
|
|
t1.other_mock_transport = t2
|
|
|
|
|
t2.other_mock_transport = t1
|
|
|
|
|
return t1, t2
|
|
|
|
|
|
2020-05-06 10:44:38 +02:00
|
|
|
|
2021-11-04 18:04:16 +01:00
|
|
|
class PeerInTests(Peer):
|
|
|
|
|
DELAY_INC_MSG_PROCESSING_SLEEP = 0 # disable rate-limiting
|
|
|
|
|
|
|
|
|
|
|
2021-07-16 14:18:29 +02:00
|
|
|
high_fee_channel = {
|
|
|
|
|
'local_balance_msat': 10 * bitcoin.COIN * 1000 // 2,
|
|
|
|
|
'remote_balance_msat': 10 * bitcoin.COIN * 1000 // 2,
|
|
|
|
|
'local_base_fee_msat': 500_000,
|
|
|
|
|
'local_fee_rate_millionths': 500,
|
|
|
|
|
'remote_base_fee_msat': 500_000,
|
|
|
|
|
'remote_fee_rate_millionths': 500,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
low_fee_channel = {
|
|
|
|
|
'local_balance_msat': 10 * bitcoin.COIN * 1000 // 2,
|
|
|
|
|
'remote_balance_msat': 10 * bitcoin.COIN * 1000 // 2,
|
|
|
|
|
'local_base_fee_msat': 1_000,
|
|
|
|
|
'local_fee_rate_millionths': 1,
|
|
|
|
|
'remote_base_fee_msat': 1_000,
|
|
|
|
|
'remote_fee_rate_millionths': 1,
|
|
|
|
|
}
|
|
|
|
|
|
2022-09-09 19:52:36 +02:00
|
|
|
depleted_channel = {
|
|
|
|
|
'local_balance_msat': 0,
|
|
|
|
|
'remote_balance_msat': 10 * bitcoin.COIN * 1000,
|
|
|
|
|
'local_base_fee_msat': 1_000,
|
|
|
|
|
'local_fee_rate_millionths': 1,
|
|
|
|
|
'remote_base_fee_msat': 1_000,
|
|
|
|
|
'remote_fee_rate_millionths': 1,
|
|
|
|
|
}
|
|
|
|
|
|
2021-07-16 14:18:29 +02:00
|
|
|
GRAPH_DEFINITIONS = {
|
|
|
|
|
'square_graph': {
|
|
|
|
|
'alice': {
|
|
|
|
|
'channels': {
|
|
|
|
|
# we should use copies of channel definitions if
|
|
|
|
|
# we want to independently alter them in a test
|
|
|
|
|
'bob': high_fee_channel.copy(),
|
|
|
|
|
'carol': low_fee_channel.copy(),
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
'bob': {
|
|
|
|
|
'channels': {
|
|
|
|
|
'dave': high_fee_channel.copy(),
|
|
|
|
|
},
|
|
|
|
|
'config': {
|
|
|
|
|
'lightning_forward_payments': True,
|
|
|
|
|
'lightning_forward_trampoline_payments': True,
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
'carol': {
|
|
|
|
|
'channels': {
|
|
|
|
|
'dave': low_fee_channel.copy(),
|
|
|
|
|
},
|
|
|
|
|
'config': {
|
|
|
|
|
'lightning_forward_payments': True,
|
|
|
|
|
'lightning_forward_trampoline_payments': True,
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
'dave': {
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class Graph(NamedTuple):
|
|
|
|
|
workers: Dict[str, MockLNWallet]
|
|
|
|
|
peers: Dict[Tuple[str, str], Peer]
|
|
|
|
|
channels: Dict[Tuple[str, str], Channel]
|
tests: fix tearDown() issue in test_lnrouter.py
similar to 05fd42454842bdce853e96d6b3ffbb043960f7c4
from logs when running tests:
--- Logging error ---
Traceback (most recent call last):
File "...\Python39\lib\logging\__init__.py", line 1082, in emit
stream.write(msg + self.terminator)
ValueError: I/O operation on closed file.
Call stack:
File "...\Python39\lib\threading.py", line 912, in _bootstrap
self._bootstrap_inner()
File "...\Python39\lib\threading.py", line 954, in _bootstrap_inner
self.run()
File "...\Python39\lib\threading.py", line 892, in run
self._target(*self._args, **self._kwargs)
File "...\electrum\electrum\sql_db.py", line 71, in run_sql
self.logger.info("SQL thread terminated")
Message: 'SQL thread terminated'
Arguments: ()
2021-03-10 21:23:41 +01:00
|
|
|
|
2020-05-06 11:00:58 +02:00
|
|
|
|
2020-05-06 10:44:38 +02:00
|
|
|
class PaymentDone(Exception): pass
|
2021-03-23 17:17:43 +01:00
|
|
|
class SuccessfulTest(Exception): pass
|
2020-05-06 10:44:38 +02:00
|
|
|
|
|
|
|
|
|
2023-02-18 06:44:30 +00:00
|
|
|
class TestPeer(ElectrumTestCase):
|
|
|
|
|
TESTNET = True
|
2019-03-06 06:17:52 +01:00
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
|
def setUpClass(cls):
|
|
|
|
|
super().setUpClass()
|
2019-05-02 18:09:11 +02:00
|
|
|
console_stderr_handler.setLevel(logging.DEBUG)
|
2019-02-10 19:17:04 +01:00
|
|
|
|
2018-10-25 18:28:18 +02:00
|
|
|
def setUp(self):
|
2019-03-06 06:17:52 +01:00
|
|
|
super().setUp()
|
tests: fix tearDown() issue in test_lnrouter.py
similar to 05fd42454842bdce853e96d6b3ffbb043960f7c4
from logs when running tests:
--- Logging error ---
Traceback (most recent call last):
File "...\Python39\lib\logging\__init__.py", line 1082, in emit
stream.write(msg + self.terminator)
ValueError: I/O operation on closed file.
Call stack:
File "...\Python39\lib\threading.py", line 912, in _bootstrap
self._bootstrap_inner()
File "...\Python39\lib\threading.py", line 954, in _bootstrap_inner
self.run()
File "...\Python39\lib\threading.py", line 892, in run
self._target(*self._args, **self._kwargs)
File "...\electrum\electrum\sql_db.py", line 71, in run_sql
self.logger.info("SQL thread terminated")
Message: 'SQL thread terminated'
Arguments: ()
2021-03-10 21:23:41 +01:00
|
|
|
self._lnworkers_created = [] # type: List[MockLNWallet]
|
2018-10-25 21:59:16 +02:00
|
|
|
|
2023-02-18 10:01:21 +00:00
|
|
|
async def asyncTearDown(self):
|
|
|
|
|
# clean up lnworkers
|
|
|
|
|
async with OldTaskGroup() as group:
|
2022-08-10 18:16:17 +02:00
|
|
|
for lnworker in self._lnworkers_created:
|
2023-02-18 10:01:21 +00:00
|
|
|
await group.spawn(lnworker.stop())
|
|
|
|
|
for lnworker in self._lnworkers_created:
|
|
|
|
|
shutil.rmtree(lnworker._user_dir)
|
|
|
|
|
self._lnworkers_created.clear()
|
2022-09-20 19:16:01 +02:00
|
|
|
electrum.trampoline._TRAMPOLINE_NODES_UNITTESTS = {}
|
2023-02-18 10:01:21 +00:00
|
|
|
await super().asyncTearDown()
|
2019-03-06 06:17:52 +01:00
|
|
|
|
2022-05-24 23:49:58 +02:00
|
|
|
def prepare_peers(
|
|
|
|
|
self, alice_channel: Channel, bob_channel: Channel,
|
|
|
|
|
*, k1: Keypair = None, k2: Keypair = None,
|
|
|
|
|
):
|
|
|
|
|
if k1 is None:
|
|
|
|
|
k1 = keypair()
|
|
|
|
|
if k2 is None:
|
|
|
|
|
k2 = keypair()
|
2020-05-06 10:44:38 +02:00
|
|
|
alice_channel.node_id = k2.pubkey
|
|
|
|
|
bob_channel.node_id = k1.pubkey
|
|
|
|
|
t1, t2 = transport_pair(k1, k2, alice_channel.name, bob_channel.name)
|
2018-11-02 19:16:42 +01:00
|
|
|
q1, q2 = asyncio.Queue(), asyncio.Queue()
|
2021-03-08 22:18:06 +01:00
|
|
|
w1 = MockLNWallet(local_keypair=k1, chans=[alice_channel], tx_queue=q1, name=bob_channel.name)
|
|
|
|
|
w2 = MockLNWallet(local_keypair=k2, chans=[bob_channel], tx_queue=q2, name=alice_channel.name)
|
tests: fix tearDown() issue in test_lnrouter.py
similar to 05fd42454842bdce853e96d6b3ffbb043960f7c4
from logs when running tests:
--- Logging error ---
Traceback (most recent call last):
File "...\Python39\lib\logging\__init__.py", line 1082, in emit
stream.write(msg + self.terminator)
ValueError: I/O operation on closed file.
Call stack:
File "...\Python39\lib\threading.py", line 912, in _bootstrap
self._bootstrap_inner()
File "...\Python39\lib\threading.py", line 954, in _bootstrap_inner
self.run()
File "...\Python39\lib\threading.py", line 892, in run
self._target(*self._args, **self._kwargs)
File "...\electrum\electrum\sql_db.py", line 71, in run_sql
self.logger.info("SQL thread terminated")
Message: 'SQL thread terminated'
Arguments: ()
2021-03-10 21:23:41 +01:00
|
|
|
self._lnworkers_created.extend([w1, w2])
|
2021-11-04 18:04:16 +01:00
|
|
|
p1 = PeerInTests(w1, k2.pubkey, t1)
|
|
|
|
|
p2 = PeerInTests(w2, k1.pubkey, t2)
|
2020-05-06 10:44:38 +02:00
|
|
|
w1._peers[p1.pubkey] = p1
|
|
|
|
|
w2._peers[p2.pubkey] = p2
|
2018-10-25 21:59:16 +02:00
|
|
|
# mark_open won't work if state is already OPEN.
|
2019-10-29 08:02:14 +01:00
|
|
|
# so set it to FUNDED
|
2020-04-13 16:02:05 +02:00
|
|
|
alice_channel._state = ChannelState.FUNDED
|
|
|
|
|
bob_channel._state = ChannelState.FUNDED
|
2018-10-25 21:59:16 +02:00
|
|
|
# this populates the channel graph:
|
2020-02-12 10:32:55 +01:00
|
|
|
p1.mark_open(alice_channel)
|
|
|
|
|
p2.mark_open(bob_channel)
|
2018-11-02 19:16:42 +01:00
|
|
|
return p1, p2, w1, w2, q1, q2
|
|
|
|
|
|
2021-07-16 14:18:29 +02:00
|
|
|
def prepare_chans_and_peers_in_graph(self, graph_definition) -> Graph:
|
|
|
|
|
keys = {k: keypair() for k in graph_definition}
|
|
|
|
|
txs_queues = {k: asyncio.Queue() for k in graph_definition}
|
|
|
|
|
channels = {} # type: Dict[Tuple[str, str], Channel]
|
|
|
|
|
transports = {}
|
|
|
|
|
workers = {} # type: Dict[str, MockLNWallet]
|
|
|
|
|
peers = {}
|
|
|
|
|
|
|
|
|
|
# create channels
|
|
|
|
|
for a, definition in graph_definition.items():
|
|
|
|
|
for b, channel_def in definition.get('channels', {}).items():
|
|
|
|
|
channel_ab, channel_ba = create_test_channels(
|
|
|
|
|
alice_name=a,
|
|
|
|
|
bob_name=b,
|
|
|
|
|
alice_pubkey=keys[a].pubkey,
|
|
|
|
|
bob_pubkey=keys[b].pubkey,
|
|
|
|
|
local_msat=channel_def['local_balance_msat'],
|
|
|
|
|
remote_msat=channel_def['remote_balance_msat'],
|
|
|
|
|
)
|
|
|
|
|
channels[(a, b)], channels[(b, a)] = channel_ab, channel_ba
|
|
|
|
|
transport_ab, transport_ba = transport_pair(keys[a], keys[b], channel_ab.name, channel_ba.name)
|
|
|
|
|
transports[(a, b)], transports[(b, a)] = transport_ab, transport_ba
|
|
|
|
|
# set fees
|
|
|
|
|
channel_ab.forwarding_fee_proportional_millionths = channel_def['local_fee_rate_millionths']
|
|
|
|
|
channel_ab.forwarding_fee_base_msat = channel_def['local_base_fee_msat']
|
|
|
|
|
channel_ba.forwarding_fee_proportional_millionths = channel_def['remote_fee_rate_millionths']
|
|
|
|
|
channel_ba.forwarding_fee_base_msat = channel_def['remote_base_fee_msat']
|
|
|
|
|
|
|
|
|
|
# create workers and peers
|
|
|
|
|
for a, definition in graph_definition.items():
|
|
|
|
|
channels_of_node = [c for k, c in channels.items() if k[0] == a]
|
|
|
|
|
workers[a] = MockLNWallet(local_keypair=keys[a], chans=channels_of_node, tx_queue=txs_queues[a], name=a)
|
|
|
|
|
self._lnworkers_created.extend(list(workers.values()))
|
|
|
|
|
|
|
|
|
|
# create peers
|
|
|
|
|
for ab in channels.keys():
|
|
|
|
|
peers[ab] = Peer(workers[ab[0]], keys[ab[1]].pubkey, transports[ab])
|
|
|
|
|
|
|
|
|
|
# add peers to workers
|
|
|
|
|
for a, w in workers.items():
|
|
|
|
|
for ab, peer_ab in peers.items():
|
|
|
|
|
if ab[0] == a:
|
|
|
|
|
w._peers[peer_ab.pubkey] = peer_ab
|
|
|
|
|
|
|
|
|
|
# set forwarding properties
|
|
|
|
|
for a, definition in graph_definition.items():
|
|
|
|
|
for property in definition.get('config', {}).items():
|
|
|
|
|
workers[a].network.config.set_key(*property)
|
2021-03-02 18:35:07 +01:00
|
|
|
|
2020-05-06 11:00:58 +02:00
|
|
|
# mark_open won't work if state is already OPEN.
|
|
|
|
|
# so set it to FUNDED
|
2021-07-16 14:18:29 +02:00
|
|
|
for channel_ab in channels.values():
|
|
|
|
|
channel_ab._state = ChannelState.FUNDED
|
|
|
|
|
|
2020-05-06 11:00:58 +02:00
|
|
|
# this populates the channel graph:
|
2021-07-16 14:18:29 +02:00
|
|
|
for ab, peer_ab in peers.items():
|
|
|
|
|
peer_ab.mark_open(channels[ab])
|
|
|
|
|
|
|
|
|
|
graph = Graph(
|
|
|
|
|
workers=workers,
|
|
|
|
|
peers=peers,
|
|
|
|
|
channels=channels,
|
2020-05-06 11:00:58 +02:00
|
|
|
)
|
2021-07-16 14:18:29 +02:00
|
|
|
for a in workers:
|
2022-01-10 15:45:29 +01:00
|
|
|
print(f"{a:5s}: {keys[a].pubkey}")
|
|
|
|
|
print(f" {keys[a].pubkey.hex()}")
|
2021-07-02 18:44:39 +02:00
|
|
|
return graph
|
2020-05-06 11:00:58 +02:00
|
|
|
|
2018-11-02 19:16:42 +01:00
|
|
|
@staticmethod
|
2022-03-29 17:42:04 +02:00
|
|
|
def prepare_invoice(
|
2020-05-06 10:44:38 +02:00
|
|
|
w2: MockLNWallet, # receiver
|
2020-03-06 21:54:05 +01:00
|
|
|
*,
|
2021-02-01 14:17:04 +01:00
|
|
|
amount_msat=100_000_000,
|
2020-05-06 10:44:38 +02:00
|
|
|
include_routing_hints=False,
|
2021-03-08 21:46:56 +01:00
|
|
|
) -> Tuple[LnAddr, str]:
|
2021-02-01 14:17:04 +01:00
|
|
|
amount_btc = amount_msat/Decimal(COIN*1000)
|
2018-10-25 21:59:16 +02:00
|
|
|
payment_preimage = os.urandom(32)
|
|
|
|
|
RHASH = sha256(payment_preimage)
|
2021-02-01 14:17:04 +01:00
|
|
|
info = PaymentInfo(RHASH, amount_msat, RECEIVED, PR_UNPAID)
|
2019-09-20 17:15:49 +02:00
|
|
|
w2.save_preimage(RHASH, payment_preimage)
|
2019-10-09 20:16:11 +02:00
|
|
|
w2.save_payment_info(info)
|
2020-05-06 10:44:38 +02:00
|
|
|
if include_routing_hints:
|
2022-05-18 17:07:18 +02:00
|
|
|
routing_hints, trampoline_hints = w2.calc_routing_hints_for_invoice(amount_msat)
|
2020-05-06 10:44:38 +02:00
|
|
|
else:
|
|
|
|
|
routing_hints = []
|
2022-05-18 17:07:18 +02:00
|
|
|
trampoline_hints = []
|
2021-03-02 18:35:07 +01:00
|
|
|
invoice_features = w2.features.for_invoice()
|
|
|
|
|
if invoice_features.supports(LnFeatures.PAYMENT_SECRET_OPT):
|
|
|
|
|
payment_secret = derive_payment_secret_from_payment_preimage(payment_preimage)
|
|
|
|
|
else:
|
|
|
|
|
payment_secret = None
|
2021-03-08 21:46:56 +01:00
|
|
|
lnaddr1 = LnAddr(
|
2020-03-24 20:07:00 +01:00
|
|
|
paymenthash=RHASH,
|
|
|
|
|
amount=amount_btc,
|
2018-10-25 21:59:16 +02:00
|
|
|
tags=[('c', lnutil.MIN_FINAL_CLTV_EXPIRY_FOR_INVOICE),
|
2021-03-02 18:35:07 +01:00
|
|
|
('d', 'coffee'),
|
|
|
|
|
('9', invoice_features),
|
2021-03-05 13:00:24 +01:00
|
|
|
] + routing_hints + trampoline_hints,
|
2021-03-02 18:35:07 +01:00
|
|
|
payment_secret=payment_secret,
|
|
|
|
|
)
|
2021-03-08 21:46:56 +01:00
|
|
|
invoice = lnencode(lnaddr1, w2.node_keypair.privkey)
|
|
|
|
|
lnaddr2 = lndecode(invoice) # unlike lnaddr1, this now has a pubkey set
|
|
|
|
|
return lnaddr2, invoice
|
2018-11-02 19:16:42 +01:00
|
|
|
|
2023-02-18 10:01:21 +00:00
|
|
|
async def test_reestablish(self):
|
2020-02-12 10:32:55 +01:00
|
|
|
alice_channel, bob_channel = create_test_channels()
|
|
|
|
|
p1, p2, w1, w2, _q1, _q2 = self.prepare_peers(alice_channel, bob_channel)
|
2020-02-26 20:35:46 +01:00
|
|
|
for chan in (alice_channel, bob_channel):
|
2020-04-13 16:02:05 +02:00
|
|
|
chan.peer_state = PeerState.DISCONNECTED
|
2020-02-11 19:53:21 +01:00
|
|
|
async def reestablish():
|
|
|
|
|
await asyncio.gather(
|
2020-02-12 10:32:55 +01:00
|
|
|
p1.reestablish_channel(alice_channel),
|
|
|
|
|
p2.reestablish_channel(bob_channel))
|
2020-04-13 16:02:05 +02:00
|
|
|
self.assertEqual(alice_channel.peer_state, PeerState.GOOD)
|
|
|
|
|
self.assertEqual(bob_channel.peer_state, PeerState.GOOD)
|
2020-02-11 19:53:21 +01:00
|
|
|
gath.cancel()
|
2020-03-02 15:41:50 +01:00
|
|
|
gath = asyncio.gather(reestablish(), p1._message_loop(), p2._message_loop(), p1.htlc_switch(), p1.htlc_switch())
|
2023-02-18 10:01:21 +00:00
|
|
|
with self.assertRaises(asyncio.CancelledError):
|
2020-02-11 19:53:21 +01:00
|
|
|
await gath
|
|
|
|
|
|
2020-03-04 18:54:20 +01:00
|
|
|
@needs_test_with_all_chacha20_implementations
|
2023-02-18 10:01:21 +00:00
|
|
|
async def test_reestablish_with_old_state(self):
|
2020-05-06 10:44:38 +02:00
|
|
|
random_seed = os.urandom(32)
|
|
|
|
|
alice_channel, bob_channel = create_test_channels(random_seed=random_seed)
|
|
|
|
|
alice_channel_0, bob_channel_0 = create_test_channels(random_seed=random_seed) # these are identical
|
2020-02-12 10:32:55 +01:00
|
|
|
p1, p2, w1, w2, _q1, _q2 = self.prepare_peers(alice_channel, bob_channel)
|
2022-03-29 17:42:04 +02:00
|
|
|
lnaddr, pay_req = self.prepare_invoice(w2)
|
fix test: test_reestablish_with_old_state
Messages sent as part of the payment were getting interleaved with the channel_reestablish.
It does not actually make sense to do a payment and then reestablish the channel in the same transport -- the channel is supposed to already have been reestablished to do a payment in the first place.
So, after payment, strip down the transport, and set up a new transport before reestablishing.
Traceback (most recent call last):
File "...\Python\Python38\lib\unittest\case.py", line 60, in testPartExecutor
yield
File "...\Python\Python38\lib\unittest\case.py", line 676, in run
self._callTestMethod(testMethod)
File "...\Python\Python38\lib\unittest\case.py", line 633, in _callTestMethod
method()
File "...\electrum\electrum\tests\test_lnpeer.py", line 262, in test_reestablish_with_old_state
run(f())
File "...\electrum\electrum\tests\test_lnpeer.py", line 302, in run
return asyncio.run_coroutine_threadsafe(coro, loop=asyncio.get_event_loop()).result()
File "...\Python\Python38\lib\concurrent\futures\_base.py", line 439, in result
return self.__get_result()
File "...\Python\Python38\lib\concurrent\futures\_base.py", line 388, in __get_result
raise self._exception
File "...\electrum\electrum\tests\test_lnpeer.py", line 260, in f
await gath
File "...\electrum\electrum\lnpeer.py", line 439, in _message_loop
self.process_message(msg)
File "...\electrum\electrum\lnpeer.py", line 159, in process_message
execution_result = f(payload)
File "...\electrum\electrum\lnpeer.py", line 1308, in on_revoke_and_ack
chan.receive_revocation(rev)
File "...\electrum\electrum\lnchannel.py", line 556, in receive_revocation
raise Exception('revoked secret not for current point')
Exception: revoked secret not for current point
2020-02-24 21:09:34 +01:00
|
|
|
async def pay():
|
2021-02-07 12:09:37 +01:00
|
|
|
result, log = await w1.pay_invoice(pay_req)
|
2020-02-12 10:22:22 +01:00
|
|
|
self.assertEqual(result, True)
|
fix test: test_reestablish_with_old_state
Messages sent as part of the payment were getting interleaved with the channel_reestablish.
It does not actually make sense to do a payment and then reestablish the channel in the same transport -- the channel is supposed to already have been reestablished to do a payment in the first place.
So, after payment, strip down the transport, and set up a new transport before reestablishing.
Traceback (most recent call last):
File "...\Python\Python38\lib\unittest\case.py", line 60, in testPartExecutor
yield
File "...\Python\Python38\lib\unittest\case.py", line 676, in run
self._callTestMethod(testMethod)
File "...\Python\Python38\lib\unittest\case.py", line 633, in _callTestMethod
method()
File "...\electrum\electrum\tests\test_lnpeer.py", line 262, in test_reestablish_with_old_state
run(f())
File "...\electrum\electrum\tests\test_lnpeer.py", line 302, in run
return asyncio.run_coroutine_threadsafe(coro, loop=asyncio.get_event_loop()).result()
File "...\Python\Python38\lib\concurrent\futures\_base.py", line 439, in result
return self.__get_result()
File "...\Python\Python38\lib\concurrent\futures\_base.py", line 388, in __get_result
raise self._exception
File "...\electrum\electrum\tests\test_lnpeer.py", line 260, in f
await gath
File "...\electrum\electrum\lnpeer.py", line 439, in _message_loop
self.process_message(msg)
File "...\electrum\electrum\lnpeer.py", line 159, in process_message
execution_result = f(payload)
File "...\electrum\electrum\lnpeer.py", line 1308, in on_revoke_and_ack
chan.receive_revocation(rev)
File "...\electrum\electrum\lnchannel.py", line 556, in receive_revocation
raise Exception('revoked secret not for current point')
Exception: revoked secret not for current point
2020-02-24 21:09:34 +01:00
|
|
|
gath.cancel()
|
2020-03-02 15:41:50 +01:00
|
|
|
gath = asyncio.gather(pay(), p1._message_loop(), p2._message_loop(), p1.htlc_switch(), p2.htlc_switch())
|
2023-02-18 10:01:21 +00:00
|
|
|
with self.assertRaises(asyncio.CancelledError):
|
fix test: test_reestablish_with_old_state
Messages sent as part of the payment were getting interleaved with the channel_reestablish.
It does not actually make sense to do a payment and then reestablish the channel in the same transport -- the channel is supposed to already have been reestablished to do a payment in the first place.
So, after payment, strip down the transport, and set up a new transport before reestablishing.
Traceback (most recent call last):
File "...\Python\Python38\lib\unittest\case.py", line 60, in testPartExecutor
yield
File "...\Python\Python38\lib\unittest\case.py", line 676, in run
self._callTestMethod(testMethod)
File "...\Python\Python38\lib\unittest\case.py", line 633, in _callTestMethod
method()
File "...\electrum\electrum\tests\test_lnpeer.py", line 262, in test_reestablish_with_old_state
run(f())
File "...\electrum\electrum\tests\test_lnpeer.py", line 302, in run
return asyncio.run_coroutine_threadsafe(coro, loop=asyncio.get_event_loop()).result()
File "...\Python\Python38\lib\concurrent\futures\_base.py", line 439, in result
return self.__get_result()
File "...\Python\Python38\lib\concurrent\futures\_base.py", line 388, in __get_result
raise self._exception
File "...\electrum\electrum\tests\test_lnpeer.py", line 260, in f
await gath
File "...\electrum\electrum\lnpeer.py", line 439, in _message_loop
self.process_message(msg)
File "...\electrum\electrum\lnpeer.py", line 159, in process_message
execution_result = f(payload)
File "...\electrum\electrum\lnpeer.py", line 1308, in on_revoke_and_ack
chan.receive_revocation(rev)
File "...\electrum\electrum\lnchannel.py", line 556, in receive_revocation
raise Exception('revoked secret not for current point')
Exception: revoked secret not for current point
2020-02-24 21:09:34 +01:00
|
|
|
await gath
|
|
|
|
|
p1, p2, w1, w2, _q1, _q2 = self.prepare_peers(alice_channel_0, bob_channel)
|
2020-02-26 20:35:46 +01:00
|
|
|
for chan in (alice_channel_0, bob_channel):
|
2020-04-13 16:02:05 +02:00
|
|
|
chan.peer_state = PeerState.DISCONNECTED
|
fix test: test_reestablish_with_old_state
Messages sent as part of the payment were getting interleaved with the channel_reestablish.
It does not actually make sense to do a payment and then reestablish the channel in the same transport -- the channel is supposed to already have been reestablished to do a payment in the first place.
So, after payment, strip down the transport, and set up a new transport before reestablishing.
Traceback (most recent call last):
File "...\Python\Python38\lib\unittest\case.py", line 60, in testPartExecutor
yield
File "...\Python\Python38\lib\unittest\case.py", line 676, in run
self._callTestMethod(testMethod)
File "...\Python\Python38\lib\unittest\case.py", line 633, in _callTestMethod
method()
File "...\electrum\electrum\tests\test_lnpeer.py", line 262, in test_reestablish_with_old_state
run(f())
File "...\electrum\electrum\tests\test_lnpeer.py", line 302, in run
return asyncio.run_coroutine_threadsafe(coro, loop=asyncio.get_event_loop()).result()
File "...\Python\Python38\lib\concurrent\futures\_base.py", line 439, in result
return self.__get_result()
File "...\Python\Python38\lib\concurrent\futures\_base.py", line 388, in __get_result
raise self._exception
File "...\electrum\electrum\tests\test_lnpeer.py", line 260, in f
await gath
File "...\electrum\electrum\lnpeer.py", line 439, in _message_loop
self.process_message(msg)
File "...\electrum\electrum\lnpeer.py", line 159, in process_message
execution_result = f(payload)
File "...\electrum\electrum\lnpeer.py", line 1308, in on_revoke_and_ack
chan.receive_revocation(rev)
File "...\electrum\electrum\lnchannel.py", line 556, in receive_revocation
raise Exception('revoked secret not for current point')
Exception: revoked secret not for current point
2020-02-24 21:09:34 +01:00
|
|
|
async def reestablish():
|
2020-02-12 10:22:22 +01:00
|
|
|
await asyncio.gather(
|
2020-02-12 10:32:55 +01:00
|
|
|
p1.reestablish_channel(alice_channel_0),
|
|
|
|
|
p2.reestablish_channel(bob_channel))
|
2020-03-02 15:41:50 +01:00
|
|
|
gath = asyncio.gather(reestablish(), p1._message_loop(), p2._message_loop(), p1.htlc_switch(), p2.htlc_switch())
|
2023-02-18 10:01:21 +00:00
|
|
|
with self.assertRaises(lnutil.RemoteMisbehaving):
|
2020-02-12 10:22:22 +01:00
|
|
|
await gath
|
2022-02-18 18:52:02 +01:00
|
|
|
self.assertEqual(alice_channel_0.peer_state, PeerState.BAD)
|
|
|
|
|
self.assertEqual(bob_channel._state, ChannelState.FORCE_CLOSING)
|
2020-02-12 10:22:22 +01:00
|
|
|
|
2022-05-24 23:49:58 +02:00
|
|
|
@staticmethod
|
|
|
|
|
def _send_fake_htlc(peer: Peer, chan: Channel) -> UpdateAddHtlc:
|
|
|
|
|
htlc = UpdateAddHtlc(amount_msat=10000, payment_hash=os.urandom(32), cltv_expiry=999, timestamp=1)
|
|
|
|
|
htlc = chan.add_htlc(htlc)
|
|
|
|
|
peer.send_message(
|
|
|
|
|
"update_add_htlc",
|
|
|
|
|
channel_id=chan.channel_id,
|
|
|
|
|
id=htlc.htlc_id,
|
|
|
|
|
cltv_expiry=htlc.cltv_expiry,
|
|
|
|
|
amount_msat=htlc.amount_msat,
|
|
|
|
|
payment_hash=htlc.payment_hash,
|
|
|
|
|
onion_routing_packet=1366 * b"0",
|
|
|
|
|
)
|
|
|
|
|
return htlc
|
|
|
|
|
|
2023-02-18 10:01:21 +00:00
|
|
|
async def test_reestablish_replay_messages_rev_then_sig(self):
|
2022-05-24 23:49:58 +02:00
|
|
|
"""
|
|
|
|
|
See https://github.com/lightning/bolts/pull/810#issue-728299277
|
|
|
|
|
|
|
|
|
|
Rev then Sig
|
|
|
|
|
A B
|
|
|
|
|
<---add-----
|
|
|
|
|
----add---->
|
|
|
|
|
<---sig-----
|
|
|
|
|
----rev----x
|
|
|
|
|
----sig----x
|
|
|
|
|
|
|
|
|
|
A needs to retransmit:
|
|
|
|
|
----rev--> (note that 'add' can be first too)
|
|
|
|
|
----add-->
|
|
|
|
|
----sig-->
|
|
|
|
|
"""
|
|
|
|
|
chan_AB, chan_BA = create_test_channels()
|
|
|
|
|
k1, k2 = keypair(), keypair()
|
|
|
|
|
# note: we don't start peer.htlc_switch() so that the fake htlcs are left alone.
|
|
|
|
|
async def f():
|
|
|
|
|
p1, p2, w1, w2, _q1, _q2 = self.prepare_peers(chan_AB, chan_BA, k1=k1, k2=k2)
|
|
|
|
|
async with OldTaskGroup() as group:
|
|
|
|
|
await group.spawn(p1._message_loop())
|
|
|
|
|
await group.spawn(p2._message_loop())
|
|
|
|
|
await p1.initialized
|
|
|
|
|
await p2.initialized
|
|
|
|
|
self._send_fake_htlc(p2, chan_BA)
|
|
|
|
|
self._send_fake_htlc(p1, chan_AB)
|
|
|
|
|
p2.transport.queue.put_nowait(asyncio.Event()) # break Bob's incoming pipe
|
|
|
|
|
self.assertTrue(p2.maybe_send_commitment(chan_BA))
|
|
|
|
|
await p1.received_commitsig_event.wait()
|
|
|
|
|
await group.cancel_remaining()
|
|
|
|
|
# simulating disconnection. recreate transports.
|
|
|
|
|
p1, p2, w1, w2, _q1, _q2 = self.prepare_peers(chan_AB, chan_BA, k1=k1, k2=k2)
|
|
|
|
|
for chan in (chan_AB, chan_BA):
|
|
|
|
|
chan.peer_state = PeerState.DISCONNECTED
|
|
|
|
|
async with OldTaskGroup() as group:
|
|
|
|
|
await group.spawn(p1._message_loop())
|
|
|
|
|
await group.spawn(p2._message_loop())
|
|
|
|
|
with self.assertLogs('electrum', level='INFO') as logs:
|
|
|
|
|
async with OldTaskGroup() as group2:
|
|
|
|
|
await group2.spawn(p1.reestablish_channel(chan_AB))
|
|
|
|
|
await group2.spawn(p2.reestablish_channel(chan_BA))
|
|
|
|
|
self.assertTrue(any(("alice->bob" in msg and
|
|
|
|
|
"replaying a revoke_and_ack first" in msg) for msg in logs.output))
|
|
|
|
|
self.assertTrue(any(("alice->bob" in msg and
|
|
|
|
|
"replayed 2 unacked messages. ['update_add_htlc', 'commitment_signed']" in msg) for msg in logs.output))
|
|
|
|
|
self.assertEqual(chan_AB.peer_state, PeerState.GOOD)
|
|
|
|
|
self.assertEqual(chan_BA.peer_state, PeerState.GOOD)
|
|
|
|
|
raise SuccessfulTest()
|
|
|
|
|
with self.assertRaises(SuccessfulTest):
|
2023-02-18 10:01:21 +00:00
|
|
|
await f()
|
2022-05-24 23:49:58 +02:00
|
|
|
|
2023-02-18 10:01:21 +00:00
|
|
|
async def test_reestablish_replay_messages_sig_then_rev(self):
|
2022-05-24 23:49:58 +02:00
|
|
|
"""
|
|
|
|
|
See https://github.com/lightning/bolts/pull/810#issue-728299277
|
|
|
|
|
|
|
|
|
|
Sig then Rev
|
|
|
|
|
A B
|
|
|
|
|
<---add-----
|
|
|
|
|
----add---->
|
|
|
|
|
----sig----x
|
|
|
|
|
<---sig-----
|
|
|
|
|
----rev----x
|
|
|
|
|
|
|
|
|
|
A needs to retransmit:
|
|
|
|
|
----add-->
|
|
|
|
|
----sig-->
|
|
|
|
|
----rev-->
|
|
|
|
|
"""
|
|
|
|
|
chan_AB, chan_BA = create_test_channels()
|
|
|
|
|
k1, k2 = keypair(), keypair()
|
|
|
|
|
# note: we don't start peer.htlc_switch() so that the fake htlcs are left alone.
|
|
|
|
|
async def f():
|
|
|
|
|
p1, p2, w1, w2, _q1, _q2 = self.prepare_peers(chan_AB, chan_BA, k1=k1, k2=k2)
|
|
|
|
|
async with OldTaskGroup() as group:
|
|
|
|
|
await group.spawn(p1._message_loop())
|
|
|
|
|
await group.spawn(p2._message_loop())
|
|
|
|
|
await p1.initialized
|
|
|
|
|
await p2.initialized
|
|
|
|
|
self._send_fake_htlc(p2, chan_BA)
|
|
|
|
|
self._send_fake_htlc(p1, chan_AB)
|
|
|
|
|
p2.transport.queue.put_nowait(asyncio.Event()) # break Bob's incoming pipe
|
|
|
|
|
self.assertTrue(p1.maybe_send_commitment(chan_AB))
|
|
|
|
|
self.assertTrue(p2.maybe_send_commitment(chan_BA))
|
|
|
|
|
await p1.received_commitsig_event.wait()
|
|
|
|
|
await group.cancel_remaining()
|
|
|
|
|
# simulating disconnection. recreate transports.
|
|
|
|
|
p1, p2, w1, w2, _q1, _q2 = self.prepare_peers(chan_AB, chan_BA, k1=k1, k2=k2)
|
|
|
|
|
for chan in (chan_AB, chan_BA):
|
|
|
|
|
chan.peer_state = PeerState.DISCONNECTED
|
|
|
|
|
async with OldTaskGroup() as group:
|
|
|
|
|
await group.spawn(p1._message_loop())
|
|
|
|
|
await group.spawn(p2._message_loop())
|
|
|
|
|
with self.assertLogs('electrum', level='INFO') as logs:
|
|
|
|
|
async with OldTaskGroup() as group2:
|
|
|
|
|
await group2.spawn(p1.reestablish_channel(chan_AB))
|
|
|
|
|
await group2.spawn(p2.reestablish_channel(chan_BA))
|
|
|
|
|
self.assertTrue(any(("alice->bob" in msg and
|
|
|
|
|
"replaying a revoke_and_ack last" in msg) for msg in logs.output))
|
|
|
|
|
self.assertTrue(any(("alice->bob" in msg and
|
|
|
|
|
"replayed 2 unacked messages. ['update_add_htlc', 'commitment_signed']" in msg) for msg in logs.output))
|
|
|
|
|
self.assertEqual(chan_AB.peer_state, PeerState.GOOD)
|
|
|
|
|
self.assertEqual(chan_BA.peer_state, PeerState.GOOD)
|
|
|
|
|
raise SuccessfulTest()
|
|
|
|
|
with self.assertRaises(SuccessfulTest):
|
2023-02-18 10:01:21 +00:00
|
|
|
await f()
|
2022-05-24 23:49:58 +02:00
|
|
|
|
2023-02-18 10:01:21 +00:00
|
|
|
async def _test_simple_payment(self, trampoline: bool):
|
2021-03-08 22:18:06 +01:00
|
|
|
"""Alice pays Bob a single HTLC via direct channel."""
|
2020-02-12 10:32:55 +01:00
|
|
|
alice_channel, bob_channel = create_test_channels()
|
|
|
|
|
p1, p2, w1, w2, _q1, _q2 = self.prepare_peers(alice_channel, bob_channel)
|
2022-09-20 11:28:51 +02:00
|
|
|
async def turn_on_trampoline_alice():
|
|
|
|
|
if w1.network.channel_db:
|
|
|
|
|
w1.network.channel_db.stop()
|
|
|
|
|
await w1.network.channel_db.stopped_event.wait()
|
|
|
|
|
w1.network.channel_db = None
|
2021-03-08 22:18:06 +01:00
|
|
|
async def pay(lnaddr, pay_req):
|
|
|
|
|
self.assertEqual(PR_UNPAID, w2.get_payment_status(lnaddr.paymenthash))
|
2021-02-07 12:09:37 +01:00
|
|
|
result, log = await w1.pay_invoice(pay_req)
|
2020-02-17 20:38:41 +01:00
|
|
|
self.assertTrue(result)
|
2021-03-08 22:18:06 +01:00
|
|
|
self.assertEqual(PR_PAID, w2.get_payment_status(lnaddr.paymenthash))
|
2020-05-06 10:44:38 +02:00
|
|
|
raise PaymentDone()
|
2019-03-06 06:17:52 +01:00
|
|
|
async def f():
|
2022-09-20 11:28:51 +02:00
|
|
|
if trampoline:
|
|
|
|
|
await turn_on_trampoline_alice()
|
2022-02-08 12:34:49 +01:00
|
|
|
async with OldTaskGroup() as group:
|
2020-05-06 10:44:38 +02:00
|
|
|
await group.spawn(p1._message_loop())
|
|
|
|
|
await group.spawn(p1.htlc_switch())
|
|
|
|
|
await group.spawn(p2._message_loop())
|
|
|
|
|
await group.spawn(p2.htlc_switch())
|
|
|
|
|
await asyncio.sleep(0.01)
|
2022-03-29 17:42:04 +02:00
|
|
|
lnaddr, pay_req = self.prepare_invoice(w2)
|
2021-03-08 22:18:06 +01:00
|
|
|
invoice_features = lnaddr.get_features()
|
|
|
|
|
self.assertFalse(invoice_features.supports(LnFeatures.BASIC_MPP_OPT))
|
|
|
|
|
await group.spawn(pay(lnaddr, pay_req))
|
2022-09-20 11:28:51 +02:00
|
|
|
# declare bob as trampoline node
|
|
|
|
|
electrum.trampoline._TRAMPOLINE_NODES_UNITTESTS = {
|
|
|
|
|
'bob': LNPeerAddr(host="127.0.0.1", port=9735, pubkey=w2.node_keypair.pubkey),
|
|
|
|
|
}
|
2020-05-06 10:44:38 +02:00
|
|
|
with self.assertRaises(PaymentDone):
|
2023-02-18 10:01:21 +00:00
|
|
|
await f()
|
2018-11-02 19:16:42 +01:00
|
|
|
|
2022-09-20 11:28:51 +02:00
|
|
|
@needs_test_with_all_chacha20_implementations
|
2023-02-18 10:01:21 +00:00
|
|
|
async def test_simple_payment(self):
|
|
|
|
|
await self._test_simple_payment(trampoline=False)
|
2022-09-20 11:28:51 +02:00
|
|
|
|
|
|
|
|
@needs_test_with_all_chacha20_implementations
|
2023-02-18 10:01:21 +00:00
|
|
|
async def test_simple_payment_trampoline(self):
|
|
|
|
|
await self._test_simple_payment(trampoline=True)
|
2022-09-20 11:28:51 +02:00
|
|
|
|
2021-01-28 20:00:48 +01:00
|
|
|
@needs_test_with_all_chacha20_implementations
|
2023-02-18 10:01:21 +00:00
|
|
|
async def test_payment_race(self):
|
2021-01-28 20:00:48 +01:00
|
|
|
"""Alice and Bob pay each other simultaneously.
|
|
|
|
|
They both send 'update_add_htlc' and receive each other's update
|
|
|
|
|
before sending 'commitment_signed'. Neither party should fulfill
|
|
|
|
|
the respective HTLCs until those are irrevocably committed to.
|
|
|
|
|
"""
|
|
|
|
|
alice_channel, bob_channel = create_test_channels()
|
|
|
|
|
p1, p2, w1, w2, _q1, _q2 = self.prepare_peers(alice_channel, bob_channel)
|
|
|
|
|
async def pay():
|
|
|
|
|
await asyncio.wait_for(p1.initialized, 1)
|
|
|
|
|
await asyncio.wait_for(p2.initialized, 1)
|
|
|
|
|
# prep
|
|
|
|
|
_maybe_send_commitment1 = p1.maybe_send_commitment
|
|
|
|
|
_maybe_send_commitment2 = p2.maybe_send_commitment
|
2022-03-29 17:42:04 +02:00
|
|
|
lnaddr2, pay_req2 = self.prepare_invoice(w2)
|
|
|
|
|
lnaddr1, pay_req1 = self.prepare_invoice(w1)
|
2021-02-28 13:58:31 +01:00
|
|
|
# create the htlc queues now (side-effecting defaultdict)
|
|
|
|
|
q1 = w1.sent_htlcs[lnaddr2.paymenthash]
|
|
|
|
|
q2 = w2.sent_htlcs[lnaddr1.paymenthash]
|
2021-01-28 20:00:48 +01:00
|
|
|
# alice sends htlc BUT NOT COMMITMENT_SIGNED
|
|
|
|
|
p1.maybe_send_commitment = lambda x: None
|
2021-05-10 09:21:53 +02:00
|
|
|
route1 = (await w1.create_routes_from_invoice(lnaddr2.get_amount_msat(), decoded_invoice=lnaddr2))[0][0]
|
2021-03-06 10:59:29 +01:00
|
|
|
amount_msat = lnaddr2.get_amount_msat()
|
2021-03-02 10:23:30 +01:00
|
|
|
await w1.pay_to_route(
|
2021-01-30 16:10:51 +01:00
|
|
|
route=route1,
|
2021-03-06 10:59:29 +01:00
|
|
|
amount_msat=amount_msat,
|
|
|
|
|
total_msat=amount_msat,
|
|
|
|
|
amount_receiver_msat=amount_msat,
|
2021-01-28 20:00:48 +01:00
|
|
|
payment_hash=lnaddr2.paymenthash,
|
2021-03-02 10:23:30 +01:00
|
|
|
min_cltv_expiry=lnaddr2.get_min_final_cltv_expiry(),
|
2021-01-28 20:00:48 +01:00
|
|
|
payment_secret=lnaddr2.payment_secret,
|
2022-02-16 16:27:37 +01:00
|
|
|
trampoline_fee_level=0,
|
2022-09-09 19:52:36 +02:00
|
|
|
trampoline_route=None,
|
2021-01-28 20:00:48 +01:00
|
|
|
)
|
|
|
|
|
p1.maybe_send_commitment = _maybe_send_commitment1
|
|
|
|
|
# bob sends htlc BUT NOT COMMITMENT_SIGNED
|
|
|
|
|
p2.maybe_send_commitment = lambda x: None
|
2021-05-10 09:21:53 +02:00
|
|
|
route2 = (await w2.create_routes_from_invoice(lnaddr1.get_amount_msat(), decoded_invoice=lnaddr1))[0][0]
|
2021-03-06 10:59:29 +01:00
|
|
|
amount_msat = lnaddr1.get_amount_msat()
|
2021-03-02 10:23:30 +01:00
|
|
|
await w2.pay_to_route(
|
2021-01-30 16:10:51 +01:00
|
|
|
route=route2,
|
2021-03-06 10:59:29 +01:00
|
|
|
amount_msat=amount_msat,
|
|
|
|
|
total_msat=amount_msat,
|
|
|
|
|
amount_receiver_msat=amount_msat,
|
2021-01-28 20:00:48 +01:00
|
|
|
payment_hash=lnaddr1.paymenthash,
|
2021-03-02 10:23:30 +01:00
|
|
|
min_cltv_expiry=lnaddr1.get_min_final_cltv_expiry(),
|
2021-01-28 20:00:48 +01:00
|
|
|
payment_secret=lnaddr1.payment_secret,
|
2022-02-16 16:27:37 +01:00
|
|
|
trampoline_fee_level=0,
|
2022-09-09 19:52:36 +02:00
|
|
|
trampoline_route=None,
|
2021-01-28 20:00:48 +01:00
|
|
|
)
|
|
|
|
|
p2.maybe_send_commitment = _maybe_send_commitment2
|
|
|
|
|
# sleep a bit so that they both receive msgs sent so far
|
2021-02-28 09:43:46 +01:00
|
|
|
await asyncio.sleep(0.2)
|
2021-01-28 20:00:48 +01:00
|
|
|
# now they both send COMMITMENT_SIGNED
|
|
|
|
|
p1.maybe_send_commitment(alice_channel)
|
|
|
|
|
p2.maybe_send_commitment(bob_channel)
|
|
|
|
|
|
2021-02-28 13:58:31 +01:00
|
|
|
htlc_log1 = await q1.get()
|
2021-01-30 16:10:51 +01:00
|
|
|
assert htlc_log1.success
|
2021-02-28 13:58:31 +01:00
|
|
|
htlc_log2 = await q2.get()
|
2021-01-30 16:10:51 +01:00
|
|
|
assert htlc_log2.success
|
2021-01-28 20:00:48 +01:00
|
|
|
raise PaymentDone()
|
|
|
|
|
|
|
|
|
|
async def f():
|
2022-02-08 12:34:49 +01:00
|
|
|
async with OldTaskGroup() as group:
|
2021-01-28 20:00:48 +01:00
|
|
|
await group.spawn(p1._message_loop())
|
|
|
|
|
await group.spawn(p1.htlc_switch())
|
|
|
|
|
await group.spawn(p2._message_loop())
|
|
|
|
|
await group.spawn(p2.htlc_switch())
|
|
|
|
|
await asyncio.sleep(0.01)
|
|
|
|
|
await group.spawn(pay())
|
|
|
|
|
with self.assertRaises(PaymentDone):
|
2023-02-18 10:01:21 +00:00
|
|
|
await f()
|
2021-01-28 20:00:48 +01:00
|
|
|
|
2020-03-07 05:05:05 +01:00
|
|
|
#@unittest.skip("too expensive")
|
2020-03-06 21:54:05 +01:00
|
|
|
#@needs_test_with_all_chacha20_implementations
|
2023-02-18 10:01:21 +00:00
|
|
|
async def test_payments_stresstest(self):
|
2020-03-06 21:54:05 +01:00
|
|
|
alice_channel, bob_channel = create_test_channels()
|
|
|
|
|
p1, p2, w1, w2, _q1, _q2 = self.prepare_peers(alice_channel, bob_channel)
|
|
|
|
|
alice_init_balance_msat = alice_channel.balance(HTLCOwner.LOCAL)
|
|
|
|
|
bob_init_balance_msat = bob_channel.balance(HTLCOwner.LOCAL)
|
2020-03-07 05:05:05 +01:00
|
|
|
num_payments = 50
|
2021-02-01 14:17:04 +01:00
|
|
|
payment_value_msat = 10_000_000 # make it large enough so that there are actually HTLCs on the ctx
|
2020-03-06 21:54:05 +01:00
|
|
|
max_htlcs_in_flight = asyncio.Semaphore(5)
|
|
|
|
|
async def single_payment(pay_req):
|
|
|
|
|
async with max_htlcs_in_flight:
|
2021-02-07 12:09:37 +01:00
|
|
|
await w1.pay_invoice(pay_req)
|
2020-03-06 21:54:05 +01:00
|
|
|
async def many_payments():
|
2022-02-08 12:34:49 +01:00
|
|
|
async with OldTaskGroup() as group:
|
2022-03-29 17:42:04 +02:00
|
|
|
for i in range(num_payments):
|
|
|
|
|
lnaddr, pay_req = self.prepare_invoice(w2, amount_msat=payment_value_msat)
|
2020-03-06 21:54:05 +01:00
|
|
|
await group.spawn(single_payment(pay_req))
|
|
|
|
|
gath.cancel()
|
|
|
|
|
gath = asyncio.gather(many_payments(), p1._message_loop(), p2._message_loop(), p1.htlc_switch(), p2.htlc_switch())
|
2023-02-18 10:01:21 +00:00
|
|
|
with self.assertRaises(asyncio.CancelledError):
|
2020-03-06 21:54:05 +01:00
|
|
|
await gath
|
2021-02-01 14:17:04 +01:00
|
|
|
self.assertEqual(alice_init_balance_msat - num_payments * payment_value_msat, alice_channel.balance(HTLCOwner.LOCAL))
|
|
|
|
|
self.assertEqual(alice_init_balance_msat - num_payments * payment_value_msat, bob_channel.balance(HTLCOwner.REMOTE))
|
|
|
|
|
self.assertEqual(bob_init_balance_msat + num_payments * payment_value_msat, bob_channel.balance(HTLCOwner.LOCAL))
|
|
|
|
|
self.assertEqual(bob_init_balance_msat + num_payments * payment_value_msat, alice_channel.balance(HTLCOwner.REMOTE))
|
2020-03-06 21:54:05 +01:00
|
|
|
|
2020-05-06 11:00:58 +02:00
|
|
|
@needs_test_with_all_chacha20_implementations
|
2023-02-18 10:01:21 +00:00
|
|
|
async def test_payment_multihop(self):
|
2021-07-16 14:18:29 +02:00
|
|
|
graph = self.prepare_chans_and_peers_in_graph(GRAPH_DEFINITIONS['square_graph'])
|
|
|
|
|
peers = graph.peers.values()
|
2021-03-08 22:18:06 +01:00
|
|
|
async def pay(lnaddr, pay_req):
|
2021-07-16 14:18:29 +02:00
|
|
|
self.assertEqual(PR_UNPAID, graph.workers['dave'].get_payment_status(lnaddr.paymenthash))
|
|
|
|
|
result, log = await graph.workers['alice'].pay_invoice(pay_req)
|
2020-05-06 11:00:58 +02:00
|
|
|
self.assertTrue(result)
|
2021-07-16 14:18:29 +02:00
|
|
|
self.assertEqual(PR_PAID, graph.workers['dave'].get_payment_status(lnaddr.paymenthash))
|
2020-05-06 11:00:58 +02:00
|
|
|
raise PaymentDone()
|
|
|
|
|
async def f():
|
2022-02-08 12:34:49 +01:00
|
|
|
async with OldTaskGroup() as group:
|
2020-05-06 11:00:58 +02:00
|
|
|
for peer in peers:
|
|
|
|
|
await group.spawn(peer._message_loop())
|
|
|
|
|
await group.spawn(peer.htlc_switch())
|
2022-09-23 12:41:42 +02:00
|
|
|
for peer in peers:
|
|
|
|
|
await peer.initialized
|
2022-03-29 17:42:04 +02:00
|
|
|
lnaddr, pay_req = self.prepare_invoice(graph.workers['dave'], include_routing_hints=True)
|
2021-03-08 22:18:06 +01:00
|
|
|
await group.spawn(pay(lnaddr, pay_req))
|
2020-05-06 11:00:58 +02:00
|
|
|
with self.assertRaises(PaymentDone):
|
2023-02-18 10:01:21 +00:00
|
|
|
await f()
|
2020-05-06 11:00:58 +02:00
|
|
|
|
|
|
|
|
@needs_test_with_all_chacha20_implementations
|
2023-02-18 10:01:21 +00:00
|
|
|
async def test_payment_multihop_with_preselected_path(self):
|
2021-07-16 14:18:29 +02:00
|
|
|
graph = self.prepare_chans_and_peers_in_graph(GRAPH_DEFINITIONS['square_graph'])
|
|
|
|
|
peers = graph.peers.values()
|
2020-05-06 11:00:58 +02:00
|
|
|
async def pay(pay_req):
|
|
|
|
|
with self.subTest(msg="bad path: edges do not chain together"):
|
2021-07-16 14:18:29 +02:00
|
|
|
path = [PathEdge(start_node=graph.workers['alice'].node_keypair.pubkey,
|
|
|
|
|
end_node=graph.workers['carol'].node_keypair.pubkey,
|
|
|
|
|
short_channel_id=graph.channels[('alice', 'bob')].short_channel_id),
|
|
|
|
|
PathEdge(start_node=graph.workers['bob'].node_keypair.pubkey,
|
|
|
|
|
end_node=graph.workers['dave'].node_keypair.pubkey,
|
|
|
|
|
short_channel_id=graph.channels['bob', 'dave'].short_channel_id)]
|
2021-01-30 16:10:51 +01:00
|
|
|
with self.assertRaises(LNPathInconsistent):
|
2021-07-16 14:18:29 +02:00
|
|
|
await graph.workers['alice'].pay_invoice(pay_req, full_path=path)
|
2020-05-06 11:00:58 +02:00
|
|
|
with self.subTest(msg="bad path: last node id differs from invoice pubkey"):
|
2021-07-16 14:18:29 +02:00
|
|
|
path = [PathEdge(start_node=graph.workers['alice'].node_keypair.pubkey,
|
|
|
|
|
end_node=graph.workers['bob'].node_keypair.pubkey,
|
|
|
|
|
short_channel_id=graph.channels[('alice', 'bob')].short_channel_id)]
|
2021-01-30 16:10:51 +01:00
|
|
|
with self.assertRaises(LNPathInconsistent):
|
2021-07-16 14:18:29 +02:00
|
|
|
await graph.workers['alice'].pay_invoice(pay_req, full_path=path)
|
2020-05-06 11:00:58 +02:00
|
|
|
with self.subTest(msg="good path"):
|
2021-07-16 14:18:29 +02:00
|
|
|
path = [PathEdge(start_node=graph.workers['alice'].node_keypair.pubkey,
|
|
|
|
|
end_node=graph.workers['bob'].node_keypair.pubkey,
|
|
|
|
|
short_channel_id=graph.channels[('alice', 'bob')].short_channel_id),
|
|
|
|
|
PathEdge(start_node=graph.workers['bob'].node_keypair.pubkey,
|
|
|
|
|
end_node=graph.workers['dave'].node_keypair.pubkey,
|
|
|
|
|
short_channel_id=graph.channels['bob', 'dave'].short_channel_id)]
|
|
|
|
|
result, log = await graph.workers['alice'].pay_invoice(pay_req, full_path=path)
|
2020-05-06 11:00:58 +02:00
|
|
|
self.assertTrue(result)
|
2021-01-30 16:10:51 +01:00
|
|
|
self.assertEqual(
|
|
|
|
|
[edge.short_channel_id for edge in path],
|
|
|
|
|
[edge.short_channel_id for edge in log[0].route])
|
2020-05-06 11:00:58 +02:00
|
|
|
raise PaymentDone()
|
|
|
|
|
async def f():
|
2022-02-08 12:34:49 +01:00
|
|
|
async with OldTaskGroup() as group:
|
2020-05-06 11:00:58 +02:00
|
|
|
for peer in peers:
|
|
|
|
|
await group.spawn(peer._message_loop())
|
|
|
|
|
await group.spawn(peer.htlc_switch())
|
2022-09-23 12:41:42 +02:00
|
|
|
for peer in peers:
|
|
|
|
|
await peer.initialized
|
2022-03-29 17:42:04 +02:00
|
|
|
lnaddr, pay_req = self.prepare_invoice(graph.workers['dave'], include_routing_hints=True)
|
2020-05-06 11:00:58 +02:00
|
|
|
await group.spawn(pay(pay_req))
|
|
|
|
|
with self.assertRaises(PaymentDone):
|
2023-02-18 10:01:21 +00:00
|
|
|
await f()
|
2020-05-06 11:00:58 +02:00
|
|
|
|
|
|
|
|
@needs_test_with_all_chacha20_implementations
|
2023-02-18 10:01:21 +00:00
|
|
|
async def test_payment_multihop_temp_node_failure(self):
|
2021-07-16 14:18:29 +02:00
|
|
|
graph = self.prepare_chans_and_peers_in_graph(GRAPH_DEFINITIONS['square_graph'])
|
|
|
|
|
graph.workers['bob'].network.config.set_key('test_fail_htlcs_with_temp_node_failure', True)
|
|
|
|
|
graph.workers['carol'].network.config.set_key('test_fail_htlcs_with_temp_node_failure', True)
|
|
|
|
|
peers = graph.peers.values()
|
2021-03-08 22:18:06 +01:00
|
|
|
async def pay(lnaddr, pay_req):
|
2021-07-16 14:18:29 +02:00
|
|
|
self.assertEqual(PR_UNPAID, graph.workers['dave'].get_payment_status(lnaddr.paymenthash))
|
|
|
|
|
result, log = await graph.workers['alice'].pay_invoice(pay_req)
|
2020-05-06 11:00:58 +02:00
|
|
|
self.assertFalse(result)
|
2021-07-16 14:18:29 +02:00
|
|
|
self.assertEqual(PR_UNPAID, graph.workers['dave'].get_payment_status(lnaddr.paymenthash))
|
2021-01-30 16:10:51 +01:00
|
|
|
self.assertEqual(OnionFailureCode.TEMPORARY_NODE_FAILURE, log[0].failure_msg.code)
|
2020-05-06 11:00:58 +02:00
|
|
|
raise PaymentDone()
|
|
|
|
|
async def f():
|
2022-02-08 12:34:49 +01:00
|
|
|
async with OldTaskGroup() as group:
|
2020-05-06 11:00:58 +02:00
|
|
|
for peer in peers:
|
|
|
|
|
await group.spawn(peer._message_loop())
|
|
|
|
|
await group.spawn(peer.htlc_switch())
|
2022-09-23 12:41:42 +02:00
|
|
|
for peer in peers:
|
|
|
|
|
await peer.initialized
|
2022-03-29 17:42:04 +02:00
|
|
|
lnaddr, pay_req = self.prepare_invoice(graph.workers['dave'], include_routing_hints=True)
|
2021-03-08 22:18:06 +01:00
|
|
|
await group.spawn(pay(lnaddr, pay_req))
|
2020-05-06 11:00:58 +02:00
|
|
|
with self.assertRaises(PaymentDone):
|
2023-02-18 10:01:21 +00:00
|
|
|
await f()
|
2020-05-06 11:00:58 +02:00
|
|
|
|
2021-03-02 18:35:07 +01:00
|
|
|
@needs_test_with_all_chacha20_implementations
|
2023-02-18 10:01:21 +00:00
|
|
|
async def test_payment_multihop_route_around_failure(self):
|
2021-03-02 18:35:07 +01:00
|
|
|
# Alice will pay Dave. Alice first tries A->C->D route, due to lower fees, but Carol
|
|
|
|
|
# will fail the htlc and get blacklisted. Alice will then try A->B->D and succeed.
|
2021-07-16 14:18:29 +02:00
|
|
|
graph = self.prepare_chans_and_peers_in_graph(GRAPH_DEFINITIONS['square_graph'])
|
|
|
|
|
graph.workers['carol'].network.config.set_key('test_fail_htlcs_with_temp_node_failure', True)
|
|
|
|
|
peers = graph.peers.values()
|
2021-03-08 22:18:06 +01:00
|
|
|
async def pay(lnaddr, pay_req):
|
2021-07-16 14:18:29 +02:00
|
|
|
self.assertEqual(500000000000, graph.channels[('alice', 'bob')].balance(LOCAL))
|
|
|
|
|
self.assertEqual(500000000000, graph.channels[('dave', 'bob')].balance(LOCAL))
|
|
|
|
|
self.assertEqual(PR_UNPAID, graph.workers['dave'].get_payment_status(lnaddr.paymenthash))
|
|
|
|
|
result, log = await graph.workers['alice'].pay_invoice(pay_req, attempts=2)
|
2021-03-02 18:35:07 +01:00
|
|
|
self.assertEqual(2, len(log))
|
|
|
|
|
self.assertTrue(result)
|
2021-07-16 14:18:29 +02:00
|
|
|
self.assertEqual(PR_PAID, graph.workers['dave'].get_payment_status(lnaddr.paymenthash))
|
|
|
|
|
self.assertEqual([graph.channels[('alice', 'carol')].short_channel_id, graph.channels[('carol', 'dave')].short_channel_id],
|
2021-03-02 18:35:07 +01:00
|
|
|
[edge.short_channel_id for edge in log[0].route])
|
2021-07-16 14:18:29 +02:00
|
|
|
self.assertEqual([graph.channels[('alice', 'bob')].short_channel_id, graph.channels[('bob', 'dave')].short_channel_id],
|
2021-03-02 18:35:07 +01:00
|
|
|
[edge.short_channel_id for edge in log[1].route])
|
|
|
|
|
self.assertEqual(OnionFailureCode.TEMPORARY_NODE_FAILURE, log[0].failure_msg.code)
|
2021-07-16 14:18:29 +02:00
|
|
|
self.assertEqual(499899450000, graph.channels[('alice', 'bob')].balance(LOCAL))
|
2021-03-02 18:35:07 +01:00
|
|
|
await asyncio.sleep(0.2) # wait for COMMITMENT_SIGNED / REVACK msgs to update balance
|
2021-07-16 14:18:29 +02:00
|
|
|
self.assertEqual(500100000000, graph.channels[('dave', 'bob')].balance(LOCAL))
|
2021-03-02 18:35:07 +01:00
|
|
|
raise PaymentDone()
|
|
|
|
|
async def f():
|
2022-02-08 12:34:49 +01:00
|
|
|
async with OldTaskGroup() as group:
|
2021-03-02 18:35:07 +01:00
|
|
|
for peer in peers:
|
|
|
|
|
await group.spawn(peer._message_loop())
|
|
|
|
|
await group.spawn(peer.htlc_switch())
|
2022-09-23 12:41:42 +02:00
|
|
|
for peer in peers:
|
|
|
|
|
await peer.initialized
|
2022-03-29 17:42:04 +02:00
|
|
|
lnaddr, pay_req = self.prepare_invoice(graph.workers['dave'], include_routing_hints=True)
|
2021-03-08 21:46:56 +01:00
|
|
|
invoice_features = lnaddr.get_features()
|
2021-03-02 18:35:07 +01:00
|
|
|
self.assertFalse(invoice_features.supports(LnFeatures.BASIC_MPP_OPT))
|
2021-03-08 22:18:06 +01:00
|
|
|
await group.spawn(pay(lnaddr, pay_req))
|
2021-03-02 18:35:07 +01:00
|
|
|
with self.assertRaises(PaymentDone):
|
2023-02-18 10:01:21 +00:00
|
|
|
await f()
|
2021-03-02 18:35:07 +01:00
|
|
|
|
2021-03-17 09:32:23 +01:00
|
|
|
@needs_test_with_all_chacha20_implementations
|
2023-02-18 10:01:21 +00:00
|
|
|
async def test_payment_with_temp_channel_failure_and_liquidity_hints(self):
|
2021-03-17 09:32:23 +01:00
|
|
|
# prepare channels such that a temporary channel failure happens at c->d
|
2021-07-16 14:18:29 +02:00
|
|
|
graph_definition = GRAPH_DEFINITIONS['square_graph'].copy()
|
|
|
|
|
graph_definition['alice']['channels']['carol']['local_balance_msat'] = 200_000_000
|
|
|
|
|
graph_definition['alice']['channels']['carol']['remote_balance_msat'] = 200_000_000
|
|
|
|
|
graph_definition['carol']['channels']['dave']['local_balance_msat'] = 50_000_000
|
|
|
|
|
graph_definition['carol']['channels']['dave']['remote_balance_msat'] = 200_000_000
|
|
|
|
|
graph_definition['alice']['channels']['bob']['local_balance_msat'] = 200_000_000
|
|
|
|
|
graph_definition['alice']['channels']['bob']['remote_balance_msat'] = 200_000_000
|
|
|
|
|
graph_definition['bob']['channels']['dave']['local_balance_msat'] = 200_000_000
|
|
|
|
|
graph_definition['bob']['channels']['dave']['remote_balance_msat'] = 200_000_000
|
|
|
|
|
graph = self.prepare_chans_and_peers_in_graph(graph_definition)
|
|
|
|
|
|
2021-03-26 10:53:46 +01:00
|
|
|
# the payment happens in two attempts:
|
|
|
|
|
# 1. along a->c->d due to low fees with temp channel failure:
|
2021-03-17 09:32:23 +01:00
|
|
|
# with chanupd: ORPHANED, private channel update
|
2021-03-26 10:53:46 +01:00
|
|
|
# c->d gets a liquidity hint and gets blocked
|
|
|
|
|
# 2. along a->b->d with success
|
2021-03-17 09:32:23 +01:00
|
|
|
amount_to_pay = 100_000_000
|
2021-07-16 14:18:29 +02:00
|
|
|
peers = graph.peers.values()
|
2021-03-17 09:32:23 +01:00
|
|
|
async def pay(lnaddr, pay_req):
|
2021-07-16 14:18:29 +02:00
|
|
|
self.assertEqual(PR_UNPAID, graph.workers['dave'].get_payment_status(lnaddr.paymenthash))
|
|
|
|
|
result, log = await graph.workers['alice'].pay_invoice(pay_req, attempts=3)
|
2021-03-17 09:32:23 +01:00
|
|
|
self.assertTrue(result)
|
2021-03-26 10:53:46 +01:00
|
|
|
self.assertEqual(2, len(log))
|
2021-07-16 14:18:29 +02:00
|
|
|
self.assertEqual(PR_PAID, graph.workers['dave'].get_payment_status(lnaddr.paymenthash))
|
2021-03-17 09:32:23 +01:00
|
|
|
self.assertEqual(OnionFailureCode.TEMPORARY_CHANNEL_FAILURE, log[0].failure_msg.code)
|
2021-03-09 08:47:30 +01:00
|
|
|
|
2021-07-16 14:18:29 +02:00
|
|
|
liquidity_hints = graph.workers['alice'].network.path_finder.liquidity_hints
|
|
|
|
|
pubkey_a = graph.workers['alice'].node_keypair.pubkey
|
|
|
|
|
pubkey_b = graph.workers['bob'].node_keypair.pubkey
|
|
|
|
|
pubkey_c = graph.workers['carol'].node_keypair.pubkey
|
|
|
|
|
pubkey_d = graph.workers['dave'].node_keypair.pubkey
|
2021-03-09 08:47:30 +01:00
|
|
|
# check liquidity hints for failing route:
|
2021-07-16 14:18:29 +02:00
|
|
|
hint_ac = liquidity_hints.get_hint(graph.channels[('alice', 'carol')].short_channel_id)
|
|
|
|
|
hint_cd = liquidity_hints.get_hint(graph.channels[('carol', 'dave')].short_channel_id)
|
2021-03-09 08:47:30 +01:00
|
|
|
self.assertEqual(amount_to_pay, hint_ac.can_send(pubkey_a < pubkey_c))
|
|
|
|
|
self.assertEqual(None, hint_ac.cannot_send(pubkey_a < pubkey_c))
|
|
|
|
|
self.assertEqual(None, hint_cd.can_send(pubkey_c < pubkey_d))
|
|
|
|
|
self.assertEqual(amount_to_pay, hint_cd.cannot_send(pubkey_c < pubkey_d))
|
|
|
|
|
# check liquidity hints for successful route:
|
2021-07-16 14:18:29 +02:00
|
|
|
hint_ab = liquidity_hints.get_hint(graph.channels[('alice', 'bob')].short_channel_id)
|
|
|
|
|
hint_bd = liquidity_hints.get_hint(graph.channels[('bob', 'dave')].short_channel_id)
|
2021-03-09 08:47:30 +01:00
|
|
|
self.assertEqual(amount_to_pay, hint_ab.can_send(pubkey_a < pubkey_b))
|
|
|
|
|
self.assertEqual(None, hint_ab.cannot_send(pubkey_a < pubkey_b))
|
|
|
|
|
self.assertEqual(amount_to_pay, hint_bd.can_send(pubkey_b < pubkey_d))
|
|
|
|
|
self.assertEqual(None, hint_bd.cannot_send(pubkey_b < pubkey_d))
|
|
|
|
|
|
2021-03-17 09:32:23 +01:00
|
|
|
raise PaymentDone()
|
|
|
|
|
async def f():
|
2022-02-08 12:34:49 +01:00
|
|
|
async with OldTaskGroup() as group:
|
2021-03-17 09:32:23 +01:00
|
|
|
for peer in peers:
|
|
|
|
|
await group.spawn(peer._message_loop())
|
|
|
|
|
await group.spawn(peer.htlc_switch())
|
2022-09-23 12:41:42 +02:00
|
|
|
for peer in peers:
|
|
|
|
|
await peer.initialized
|
2022-03-29 17:42:04 +02:00
|
|
|
lnaddr, pay_req = self.prepare_invoice(graph.workers['dave'], amount_msat=amount_to_pay, include_routing_hints=True)
|
2021-03-17 09:32:23 +01:00
|
|
|
await group.spawn(pay(lnaddr, pay_req))
|
|
|
|
|
with self.assertRaises(PaymentDone):
|
2023-02-18 10:01:21 +00:00
|
|
|
await f()
|
2021-03-17 09:32:23 +01:00
|
|
|
|
2023-02-18 10:01:21 +00:00
|
|
|
async def _run_mpp(self, graph, fail_kwargs, success_kwargs):
|
2021-07-12 16:16:18 +02:00
|
|
|
"""Tests a multipart payment scenario for failing and successful cases."""
|
2021-07-16 14:18:29 +02:00
|
|
|
self.assertEqual(500_000_000_000, graph.channels[('alice', 'bob')].balance(LOCAL))
|
|
|
|
|
self.assertEqual(500_000_000_000, graph.channels[('alice', 'carol')].balance(LOCAL))
|
2021-03-03 12:52:52 +01:00
|
|
|
amount_to_pay = 600_000_000_000
|
2021-07-16 14:18:29 +02:00
|
|
|
peers = graph.peers.values()
|
2021-07-12 16:16:18 +02:00
|
|
|
async def pay(
|
|
|
|
|
attempts=1,
|
|
|
|
|
alice_uses_trampoline=False,
|
|
|
|
|
bob_forwarding=True,
|
2022-01-10 15:45:29 +01:00
|
|
|
mpp_invoice=True,
|
|
|
|
|
disable_trampoline_receiving=False,
|
2021-07-12 16:16:18 +02:00
|
|
|
):
|
2021-03-11 10:37:44 +01:00
|
|
|
if mpp_invoice:
|
2021-07-16 14:18:29 +02:00
|
|
|
graph.workers['dave'].features |= LnFeatures.BASIC_MPP_OPT
|
2022-01-10 15:45:29 +01:00
|
|
|
if disable_trampoline_receiving:
|
2023-01-13 12:46:55 +01:00
|
|
|
graph.workers['dave'].features &= ~LnFeatures.OPTION_TRAMPOLINE_ROUTING_OPT_ELECTRUM
|
2021-03-11 11:01:35 +01:00
|
|
|
if not bob_forwarding:
|
2021-07-16 14:18:29 +02:00
|
|
|
graph.workers['bob'].enable_htlc_forwarding = False
|
2021-03-11 10:37:44 +01:00
|
|
|
if alice_uses_trampoline:
|
2021-07-16 14:18:29 +02:00
|
|
|
if graph.workers['alice'].network.channel_db:
|
|
|
|
|
graph.workers['alice'].network.channel_db.stop()
|
|
|
|
|
await graph.workers['alice'].network.channel_db.stopped_event.wait()
|
|
|
|
|
graph.workers['alice'].network.channel_db = None
|
2021-03-11 10:37:44 +01:00
|
|
|
else:
|
2021-07-16 14:18:29 +02:00
|
|
|
assert graph.workers['alice'].network.channel_db is not None
|
2022-03-29 17:42:04 +02:00
|
|
|
lnaddr, pay_req = self.prepare_invoice(graph.workers['dave'], include_routing_hints=True, amount_msat=amount_to_pay)
|
2021-07-16 14:18:29 +02:00
|
|
|
self.assertEqual(PR_UNPAID, graph.workers['dave'].get_payment_status(lnaddr.paymenthash))
|
|
|
|
|
result, log = await graph.workers['alice'].pay_invoice(pay_req, attempts=attempts)
|
2021-03-11 11:01:35 +01:00
|
|
|
if not bob_forwarding:
|
|
|
|
|
# reset to previous state, sleep 2s so that the second htlc can time out
|
2021-07-16 14:18:29 +02:00
|
|
|
graph.workers['bob'].enable_htlc_forwarding = True
|
2021-03-11 11:01:35 +01:00
|
|
|
await asyncio.sleep(2)
|
2021-03-03 12:52:52 +01:00
|
|
|
if result:
|
2021-07-16 14:18:29 +02:00
|
|
|
self.assertEqual(PR_PAID, graph.workers['dave'].get_payment_status(lnaddr.paymenthash))
|
2021-03-03 12:52:52 +01:00
|
|
|
raise PaymentDone()
|
|
|
|
|
else:
|
|
|
|
|
raise NoPathFound()
|
2021-03-11 10:37:44 +01:00
|
|
|
|
|
|
|
|
async def f(kwargs):
|
2022-02-08 12:34:49 +01:00
|
|
|
async with OldTaskGroup() as group:
|
2021-03-11 10:37:44 +01:00
|
|
|
for peer in peers:
|
|
|
|
|
await group.spawn(peer._message_loop())
|
|
|
|
|
await group.spawn(peer.htlc_switch())
|
2022-09-23 12:41:42 +02:00
|
|
|
for peer in peers:
|
|
|
|
|
await peer.initialized
|
2021-03-11 10:37:44 +01:00
|
|
|
await group.spawn(pay(**kwargs))
|
|
|
|
|
|
2022-01-10 15:45:29 +01:00
|
|
|
if fail_kwargs:
|
|
|
|
|
with self.assertRaises(NoPathFound):
|
2023-02-18 10:01:21 +00:00
|
|
|
await f(fail_kwargs)
|
2022-01-10 15:45:29 +01:00
|
|
|
if success_kwargs:
|
|
|
|
|
with self.assertRaises(PaymentDone):
|
2023-02-18 10:01:21 +00:00
|
|
|
await f(success_kwargs)
|
2021-03-10 17:09:07 +01:00
|
|
|
|
|
|
|
|
@needs_test_with_all_chacha20_implementations
|
2023-02-18 10:01:21 +00:00
|
|
|
async def test_payment_multipart_with_timeout(self):
|
2021-07-16 14:18:29 +02:00
|
|
|
graph = self.prepare_chans_and_peers_in_graph(GRAPH_DEFINITIONS['square_graph'])
|
2023-02-18 10:01:21 +00:00
|
|
|
await self._run_mpp(graph, {'bob_forwarding': False}, {'bob_forwarding': True})
|
2021-03-03 12:52:52 +01:00
|
|
|
|
2021-03-05 13:00:24 +01:00
|
|
|
@needs_test_with_all_chacha20_implementations
|
2023-02-18 10:01:21 +00:00
|
|
|
async def test_payment_multipart(self):
|
2021-07-16 14:18:29 +02:00
|
|
|
graph = self.prepare_chans_and_peers_in_graph(GRAPH_DEFINITIONS['square_graph'])
|
2023-02-18 10:01:21 +00:00
|
|
|
await self._run_mpp(graph, {'mpp_invoice': False}, {'mpp_invoice': True})
|
2021-03-05 13:00:24 +01:00
|
|
|
|
2023-02-18 10:01:21 +00:00
|
|
|
async def _run_trampoline_payment(self, is_legacy, direct, drop_dave=None):
|
|
|
|
|
if drop_dave is None: drop_dave = []
|
2021-07-20 16:53:58 +02:00
|
|
|
async def turn_on_trampoline_alice():
|
|
|
|
|
if graph.workers['alice'].network.channel_db:
|
|
|
|
|
graph.workers['alice'].network.channel_db.stop()
|
|
|
|
|
await graph.workers['alice'].network.channel_db.stopped_event.wait()
|
|
|
|
|
graph.workers['alice'].network.channel_db = None
|
|
|
|
|
|
|
|
|
|
async def pay(lnaddr, pay_req):
|
|
|
|
|
self.assertEqual(PR_UNPAID, graph.workers['dave'].get_payment_status(lnaddr.paymenthash))
|
|
|
|
|
result, log = await graph.workers['alice'].pay_invoice(pay_req, attempts=10)
|
2022-09-09 19:52:36 +02:00
|
|
|
if result:
|
|
|
|
|
self.assertEqual(PR_PAID, graph.workers['dave'].get_payment_status(lnaddr.paymenthash))
|
|
|
|
|
raise PaymentDone()
|
|
|
|
|
else:
|
|
|
|
|
raise NoPathFound()
|
|
|
|
|
|
|
|
|
|
def do_drop_dave(t):
|
|
|
|
|
# this will trigger UNKNOWN_NEXT_PEER
|
|
|
|
|
dave_node_id = graph.workers['dave'].node_keypair.pubkey
|
|
|
|
|
graph.workers[t].peers.pop(dave_node_id)
|
2021-07-20 16:53:58 +02:00
|
|
|
|
|
|
|
|
async def f():
|
|
|
|
|
await turn_on_trampoline_alice()
|
2022-02-08 12:34:49 +01:00
|
|
|
async with OldTaskGroup() as group:
|
2021-07-20 16:53:58 +02:00
|
|
|
for peer in peers:
|
|
|
|
|
await group.spawn(peer._message_loop())
|
|
|
|
|
await group.spawn(peer.htlc_switch())
|
2022-09-23 12:41:42 +02:00
|
|
|
for peer in peers:
|
|
|
|
|
await peer.initialized
|
2022-03-29 17:42:04 +02:00
|
|
|
lnaddr, pay_req = self.prepare_invoice(graph.workers['dave'], include_routing_hints=True)
|
2022-09-09 19:52:36 +02:00
|
|
|
for p in drop_dave:
|
|
|
|
|
do_drop_dave(p)
|
2021-07-20 16:53:58 +02:00
|
|
|
await group.spawn(pay(lnaddr, pay_req))
|
|
|
|
|
|
2022-09-03 11:16:37 +02:00
|
|
|
graph_definition = GRAPH_DEFINITIONS['square_graph'].copy()
|
2022-09-09 19:52:36 +02:00
|
|
|
if not direct:
|
|
|
|
|
# deplete channel from alice to carol
|
|
|
|
|
graph_definition['alice']['channels']['carol'] = depleted_channel
|
|
|
|
|
# insert a channel from bob to carol
|
|
|
|
|
graph_definition['bob']['channels']['carol'] = high_fee_channel
|
|
|
|
|
|
2022-09-03 11:16:37 +02:00
|
|
|
graph = self.prepare_chans_and_peers_in_graph(graph_definition)
|
|
|
|
|
peers = graph.peers.values()
|
|
|
|
|
if is_legacy:
|
2022-09-09 19:52:36 +02:00
|
|
|
# turn off trampoline features in invoice
|
2023-01-13 12:46:55 +01:00
|
|
|
graph.workers['dave'].features = graph.workers['dave'].features ^ LnFeatures.OPTION_TRAMPOLINE_ROUTING_OPT_ELECTRUM
|
2021-07-20 16:53:58 +02:00
|
|
|
|
2022-09-03 11:16:37 +02:00
|
|
|
# declare routing nodes as trampoline nodes
|
|
|
|
|
electrum.trampoline._TRAMPOLINE_NODES_UNITTESTS = {
|
|
|
|
|
graph.workers['bob'].name: LNPeerAddr(host="127.0.0.1", port=9735, pubkey=graph.workers['bob'].node_keypair.pubkey),
|
|
|
|
|
graph.workers['carol'].name: LNPeerAddr(host="127.0.0.1", port=9735, pubkey=graph.workers['carol'].node_keypair.pubkey),
|
|
|
|
|
}
|
|
|
|
|
|
2023-02-18 10:01:21 +00:00
|
|
|
await f()
|
2022-09-09 19:52:36 +02:00
|
|
|
|
|
|
|
|
@needs_test_with_all_chacha20_implementations
|
2023-02-18 10:01:21 +00:00
|
|
|
async def test_payment_trampoline_legacy(self):
|
2022-09-03 11:16:37 +02:00
|
|
|
with self.assertRaises(PaymentDone):
|
2023-02-18 10:01:21 +00:00
|
|
|
await self._run_trampoline_payment(is_legacy=True, direct=False)
|
2022-09-03 11:16:37 +02:00
|
|
|
|
|
|
|
|
@needs_test_with_all_chacha20_implementations
|
2023-02-18 10:01:21 +00:00
|
|
|
async def test_payment_trampoline_e2e_direct(self):
|
2022-09-09 19:52:36 +02:00
|
|
|
with self.assertRaises(PaymentDone):
|
2023-02-18 10:01:21 +00:00
|
|
|
await self._run_trampoline_payment(is_legacy=False, direct=True)
|
2022-09-03 11:16:37 +02:00
|
|
|
|
|
|
|
|
@needs_test_with_all_chacha20_implementations
|
2023-02-18 10:01:21 +00:00
|
|
|
async def test_payment_trampoline_e2e_indirect(self):
|
2022-09-09 19:52:36 +02:00
|
|
|
# must use two trampolines
|
|
|
|
|
with self.assertRaises(PaymentDone):
|
2023-02-18 10:01:21 +00:00
|
|
|
await self._run_trampoline_payment(is_legacy=False, direct=False, drop_dave=['bob'])
|
2022-09-09 19:52:36 +02:00
|
|
|
# both trampolines drop dave
|
|
|
|
|
with self.assertRaises(NoPathFound):
|
2023-02-18 10:01:21 +00:00
|
|
|
await self._run_trampoline_payment(is_legacy=False, direct=False, drop_dave=['bob', 'carol'])
|
2021-07-20 16:53:58 +02:00
|
|
|
|
2021-03-05 13:00:24 +01:00
|
|
|
@needs_test_with_all_chacha20_implementations
|
2023-02-18 10:01:21 +00:00
|
|
|
async def test_payment_multipart_trampoline_e2e(self):
|
2022-01-10 15:45:29 +01:00
|
|
|
graph = self.prepare_chans_and_peers_in_graph(GRAPH_DEFINITIONS['square_graph'])
|
|
|
|
|
electrum.trampoline._TRAMPOLINE_NODES_UNITTESTS = {
|
|
|
|
|
graph.workers['bob'].name: LNPeerAddr(host="127.0.0.1", port=9735, pubkey=graph.workers['bob'].node_keypair.pubkey),
|
|
|
|
|
graph.workers['carol'].name: LNPeerAddr(host="127.0.0.1", port=9735, pubkey=graph.workers['carol'].node_keypair.pubkey),
|
|
|
|
|
}
|
2022-09-20 19:16:01 +02:00
|
|
|
# end-to-end trampoline: we attempt
|
|
|
|
|
# * a payment with one trial: fails, because
|
|
|
|
|
# we need at least one trial because the initial fees are too low
|
|
|
|
|
# * a payment with several trials: should succeed
|
2023-02-18 10:01:21 +00:00
|
|
|
await self._run_mpp(
|
2022-09-20 19:16:01 +02:00
|
|
|
graph,
|
|
|
|
|
fail_kwargs={'alice_uses_trampoline': True, 'attempts': 1},
|
|
|
|
|
success_kwargs={'alice_uses_trampoline': True, 'attempts': 30})
|
2022-01-10 15:45:29 +01:00
|
|
|
|
|
|
|
|
@needs_test_with_all_chacha20_implementations
|
2023-02-18 10:01:21 +00:00
|
|
|
async def test_payment_multipart_trampoline_legacy(self):
|
2021-07-16 14:18:29 +02:00
|
|
|
graph = self.prepare_chans_and_peers_in_graph(GRAPH_DEFINITIONS['square_graph'])
|
2021-07-02 18:44:39 +02:00
|
|
|
electrum.trampoline._TRAMPOLINE_NODES_UNITTESTS = {
|
2021-07-16 14:18:29 +02:00
|
|
|
graph.workers['bob'].name: LNPeerAddr(host="127.0.0.1", port=9735, pubkey=graph.workers['bob'].node_keypair.pubkey),
|
|
|
|
|
graph.workers['carol'].name: LNPeerAddr(host="127.0.0.1", port=9735, pubkey=graph.workers['carol'].node_keypair.pubkey),
|
2021-07-02 18:44:39 +02:00
|
|
|
}
|
2022-09-20 19:16:01 +02:00
|
|
|
# trampoline-to-legacy: this is restricted, as there are no forwarders capable of doing this
|
2023-02-18 10:01:21 +00:00
|
|
|
await self._run_mpp(
|
2022-09-20 19:16:01 +02:00
|
|
|
graph,
|
|
|
|
|
fail_kwargs={'alice_uses_trampoline': True, 'attempts': 30, 'disable_trampoline_receiving': True},
|
|
|
|
|
success_kwargs={})
|
2021-03-05 13:00:24 +01:00
|
|
|
|
2021-03-11 20:35:21 +01:00
|
|
|
@needs_test_with_all_chacha20_implementations
|
2023-02-18 10:01:21 +00:00
|
|
|
async def test_fail_pending_htlcs_on_shutdown(self):
|
2021-03-11 20:35:21 +01:00
|
|
|
"""Alice tries to pay Dave via MPP. Dave receives some HTLCs but not all.
|
|
|
|
|
Dave shuts down (stops wallet).
|
|
|
|
|
We test if Dave fails the pending HTLCs during shutdown.
|
|
|
|
|
"""
|
2021-07-16 14:18:29 +02:00
|
|
|
graph = self.prepare_chans_and_peers_in_graph(GRAPH_DEFINITIONS['square_graph'])
|
|
|
|
|
self.assertEqual(500_000_000_000, graph.channels[('alice', 'bob')].balance(LOCAL))
|
|
|
|
|
self.assertEqual(500_000_000_000, graph.channels[('alice', 'carol')].balance(LOCAL))
|
2021-03-11 20:35:21 +01:00
|
|
|
amount_to_pay = 600_000_000_000
|
2021-07-16 14:18:29 +02:00
|
|
|
peers = graph.peers.values()
|
|
|
|
|
graph.workers['dave'].MPP_EXPIRY = 120
|
|
|
|
|
graph.workers['dave'].TIMEOUT_SHUTDOWN_FAIL_PENDING_HTLCS = 3
|
2021-03-11 20:35:21 +01:00
|
|
|
async def pay():
|
2021-07-16 14:18:29 +02:00
|
|
|
graph.workers['dave'].features |= LnFeatures.BASIC_MPP_OPT
|
|
|
|
|
graph.workers['bob'].enable_htlc_forwarding = False # Bob will hold forwarded HTLCs
|
|
|
|
|
assert graph.workers['alice'].network.channel_db is not None
|
2022-03-29 17:42:04 +02:00
|
|
|
lnaddr, pay_req = self.prepare_invoice(graph.workers['dave'], include_routing_hints=True, amount_msat=amount_to_pay)
|
2022-09-22 18:26:02 +02:00
|
|
|
result, log = await graph.workers['alice'].pay_invoice(pay_req, attempts=1)
|
|
|
|
|
async def stop():
|
|
|
|
|
hm = graph.channels[('dave', 'carol')].hm
|
|
|
|
|
while len(hm.htlcs(LOCAL)) == 0 or len(hm.htlcs(REMOTE)) == 0:
|
|
|
|
|
await asyncio.sleep(0.1)
|
|
|
|
|
self.assertTrue(len(hm.htlcs(LOCAL)) > 0)
|
|
|
|
|
self.assertTrue(len(hm.htlcs(REMOTE)) > 0)
|
2021-07-16 14:18:29 +02:00
|
|
|
await graph.workers['dave'].stop()
|
2021-03-11 20:35:21 +01:00
|
|
|
# Dave is supposed to have failed the pending incomplete MPP HTLCs
|
2022-09-22 18:26:02 +02:00
|
|
|
self.assertEqual(0, len(hm.htlcs(LOCAL)))
|
|
|
|
|
self.assertEqual(0, len(hm.htlcs(REMOTE)))
|
2021-03-23 17:17:43 +01:00
|
|
|
raise SuccessfulTest()
|
2021-03-11 20:35:21 +01:00
|
|
|
|
|
|
|
|
async def f():
|
2022-02-08 12:34:49 +01:00
|
|
|
async with OldTaskGroup() as group:
|
2021-03-11 20:35:21 +01:00
|
|
|
for peer in peers:
|
|
|
|
|
await group.spawn(peer._message_loop())
|
|
|
|
|
await group.spawn(peer.htlc_switch())
|
2022-09-23 12:41:42 +02:00
|
|
|
for peer in peers:
|
|
|
|
|
await peer.initialized
|
2021-03-11 20:35:21 +01:00
|
|
|
await group.spawn(pay())
|
2022-09-22 18:26:02 +02:00
|
|
|
await group.spawn(stop())
|
2021-03-11 20:35:21 +01:00
|
|
|
|
2021-03-23 17:17:43 +01:00
|
|
|
with self.assertRaises(SuccessfulTest):
|
2023-02-18 10:01:21 +00:00
|
|
|
await f()
|
2021-03-11 20:35:21 +01:00
|
|
|
|
2020-03-04 18:54:20 +01:00
|
|
|
@needs_test_with_all_chacha20_implementations
|
2023-02-18 10:01:21 +00:00
|
|
|
async def test_legacy_shutdown_low(self):
|
|
|
|
|
await self._test_shutdown(alice_fee=100, bob_fee=150)
|
2022-02-22 18:25:24 +01:00
|
|
|
|
|
|
|
|
@needs_test_with_all_chacha20_implementations
|
2023-02-18 10:01:21 +00:00
|
|
|
async def test_legacy_shutdown_high(self):
|
|
|
|
|
await self._test_shutdown(alice_fee=2000, bob_fee=100)
|
2022-02-22 18:25:24 +01:00
|
|
|
|
2022-03-08 11:34:57 +01:00
|
|
|
@needs_test_with_all_chacha20_implementations
|
2023-02-18 10:01:21 +00:00
|
|
|
async def test_modern_shutdown_with_overlap(self):
|
|
|
|
|
await self._test_shutdown(
|
2022-03-08 11:34:57 +01:00
|
|
|
alice_fee=1,
|
|
|
|
|
bob_fee=200,
|
|
|
|
|
alice_fee_range={'min_fee_satoshis': 1, 'max_fee_satoshis': 10},
|
|
|
|
|
bob_fee_range={'min_fee_satoshis': 10, 'max_fee_satoshis': 300})
|
|
|
|
|
|
|
|
|
|
## This test works but it is too slow (LN_P2P_NETWORK_TIMEOUT)
|
|
|
|
|
## because tests do not use a proper LNWorker object
|
|
|
|
|
#@needs_test_with_all_chacha20_implementations
|
|
|
|
|
#def test_modern_shutdown_no_overlap(self):
|
|
|
|
|
# self.assertRaises(Exception, lambda: asyncio.run(
|
|
|
|
|
# self._test_shutdown(
|
|
|
|
|
# alice_fee=1,
|
|
|
|
|
# bob_fee=200,
|
|
|
|
|
# alice_fee_range={'min_fee_satoshis': 1, 'max_fee_satoshis': 10},
|
|
|
|
|
# bob_fee_range={'min_fee_satoshis': 50, 'max_fee_satoshis': 300})
|
|
|
|
|
# ))
|
|
|
|
|
|
2023-02-18 10:01:21 +00:00
|
|
|
async def _test_shutdown(self, alice_fee, bob_fee, alice_fee_range=None, bob_fee_range=None):
|
2020-02-25 12:35:07 +01:00
|
|
|
alice_channel, bob_channel = create_test_channels()
|
|
|
|
|
p1, p2, w1, w2, _q1, _q2 = self.prepare_peers(alice_channel, bob_channel)
|
2022-03-08 11:34:57 +01:00
|
|
|
w1.network.config.set_key('test_shutdown_fee', alice_fee)
|
|
|
|
|
w2.network.config.set_key('test_shutdown_fee', bob_fee)
|
|
|
|
|
if alice_fee_range is not None:
|
|
|
|
|
w1.network.config.set_key('test_shutdown_fee_range', alice_fee_range)
|
|
|
|
|
else:
|
|
|
|
|
w1.network.config.set_key('test_shutdown_legacy', True)
|
|
|
|
|
if bob_fee_range is not None:
|
|
|
|
|
w2.network.config.set_key('test_shutdown_fee_range', bob_fee_range)
|
|
|
|
|
else:
|
|
|
|
|
w2.network.config.set_key('test_shutdown_legacy', True)
|
2021-03-18 07:48:30 +01:00
|
|
|
w2.enable_htlc_settle = False
|
2022-03-29 17:42:04 +02:00
|
|
|
lnaddr, pay_req = self.prepare_invoice(w2)
|
2020-02-25 12:35:07 +01:00
|
|
|
async def pay():
|
|
|
|
|
await asyncio.wait_for(p1.initialized, 1)
|
|
|
|
|
await asyncio.wait_for(p2.initialized, 1)
|
2020-02-27 13:41:40 +01:00
|
|
|
# alice sends htlc
|
2021-05-10 09:21:53 +02:00
|
|
|
route, amount_msat = (await w1.create_routes_from_invoice(lnaddr.get_amount_msat(), decoded_invoice=lnaddr))[0][0:2]
|
|
|
|
|
p1.pay(route=route,
|
|
|
|
|
chan=alice_channel,
|
|
|
|
|
amount_msat=lnaddr.get_amount_msat(),
|
|
|
|
|
total_msat=lnaddr.get_amount_msat(),
|
|
|
|
|
payment_hash=lnaddr.paymenthash,
|
|
|
|
|
min_final_cltv_expiry=lnaddr.get_min_final_cltv_expiry(),
|
|
|
|
|
payment_secret=lnaddr.payment_secret)
|
2020-02-27 13:41:40 +01:00
|
|
|
# alice closes
|
2020-02-25 12:35:07 +01:00
|
|
|
await p1.close_channel(alice_channel.channel_id)
|
|
|
|
|
gath.cancel()
|
2020-02-27 20:53:50 +01:00
|
|
|
async def set_settle():
|
|
|
|
|
await asyncio.sleep(0.1)
|
2021-03-18 07:48:30 +01:00
|
|
|
w2.enable_htlc_settle = True
|
2020-03-02 15:41:50 +01:00
|
|
|
gath = asyncio.gather(pay(), set_settle(), p1._message_loop(), p2._message_loop(), p1.htlc_switch(), p2.htlc_switch())
|
2023-02-18 10:01:21 +00:00
|
|
|
with self.assertRaises(asyncio.CancelledError):
|
2020-02-25 12:35:07 +01:00
|
|
|
await gath
|
|
|
|
|
|
2022-01-26 16:51:42 +01:00
|
|
|
@needs_test_with_all_chacha20_implementations
|
2023-02-18 10:01:21 +00:00
|
|
|
async def test_warning(self):
|
2022-01-26 16:51:42 +01:00
|
|
|
alice_channel, bob_channel = create_test_channels()
|
|
|
|
|
p1, p2, w1, w2, _q1, _q2 = self.prepare_peers(alice_channel, bob_channel)
|
|
|
|
|
|
|
|
|
|
async def action():
|
|
|
|
|
await asyncio.wait_for(p1.initialized, 1)
|
|
|
|
|
await asyncio.wait_for(p2.initialized, 1)
|
|
|
|
|
await p1.send_warning(alice_channel.channel_id, 'be warned!', close_connection=True)
|
|
|
|
|
gath = asyncio.gather(action(), p1._message_loop(), p2._message_loop(), p1.htlc_switch(), p2.htlc_switch())
|
|
|
|
|
with self.assertRaises(GracefulDisconnect):
|
2023-02-18 10:01:21 +00:00
|
|
|
await gath
|
2022-01-26 16:51:42 +01:00
|
|
|
|
|
|
|
|
@needs_test_with_all_chacha20_implementations
|
2023-02-18 10:01:21 +00:00
|
|
|
async def test_error(self):
|
2022-01-26 16:51:42 +01:00
|
|
|
alice_channel, bob_channel = create_test_channels()
|
|
|
|
|
p1, p2, w1, w2, _q1, _q2 = self.prepare_peers(alice_channel, bob_channel)
|
|
|
|
|
|
|
|
|
|
async def action():
|
|
|
|
|
await asyncio.wait_for(p1.initialized, 1)
|
|
|
|
|
await asyncio.wait_for(p2.initialized, 1)
|
|
|
|
|
await p1.send_error(alice_channel.channel_id, 'some error happened!', force_close_channel=True)
|
|
|
|
|
assert alice_channel.is_closed()
|
|
|
|
|
gath.cancel()
|
|
|
|
|
gath = asyncio.gather(action(), p1._message_loop(), p2._message_loop(), p1.htlc_switch(), p2.htlc_switch())
|
|
|
|
|
with self.assertRaises(GracefulDisconnect):
|
2023-02-18 10:01:21 +00:00
|
|
|
await gath
|
2022-01-26 16:51:42 +01:00
|
|
|
|
2020-12-29 17:40:01 +01:00
|
|
|
@needs_test_with_all_chacha20_implementations
|
2023-02-18 10:01:21 +00:00
|
|
|
async def test_close_upfront_shutdown_script(self):
|
2020-12-29 17:40:01 +01:00
|
|
|
alice_channel, bob_channel = create_test_channels()
|
|
|
|
|
|
|
|
|
|
# create upfront shutdown script for bob, alice doesn't use upfront
|
|
|
|
|
# shutdown script
|
|
|
|
|
bob_uss_pub = lnutil.privkey_to_pubkey(os.urandom(32))
|
2023-02-17 11:35:03 +00:00
|
|
|
bob_uss_addr = bitcoin.pubkey_to_address('p2wpkh', bob_uss_pub.hex())
|
2020-12-29 17:40:01 +01:00
|
|
|
bob_uss = bfh(bitcoin.address_to_script(bob_uss_addr))
|
|
|
|
|
|
|
|
|
|
# bob commits to close to bob_uss
|
|
|
|
|
alice_channel.config[HTLCOwner.REMOTE].upfront_shutdown_script = bob_uss
|
|
|
|
|
# but bob closes to some receiving address, which we achieve by not
|
|
|
|
|
# setting the upfront shutdown script in the channel config
|
|
|
|
|
bob_channel.config[HTLCOwner.LOCAL].upfront_shutdown_script = b''
|
|
|
|
|
|
|
|
|
|
p1, p2, w1, w2, q1, q2 = self.prepare_peers(alice_channel, bob_channel)
|
|
|
|
|
w1.network.config.set_key('dynamic_fees', False)
|
|
|
|
|
w2.network.config.set_key('dynamic_fees', False)
|
|
|
|
|
w1.network.config.set_key('fee_per_kb', 5000)
|
|
|
|
|
w2.network.config.set_key('fee_per_kb', 1000)
|
|
|
|
|
|
|
|
|
|
async def test():
|
|
|
|
|
async def close():
|
|
|
|
|
await asyncio.wait_for(p1.initialized, 1)
|
|
|
|
|
await asyncio.wait_for(p2.initialized, 1)
|
|
|
|
|
# bob closes channel with different shutdown script
|
|
|
|
|
await p1.close_channel(alice_channel.channel_id)
|
|
|
|
|
gath.cancel()
|
|
|
|
|
|
|
|
|
|
async def main_loop(peer):
|
|
|
|
|
async with peer.taskgroup as group:
|
|
|
|
|
await group.spawn(peer._message_loop())
|
|
|
|
|
await group.spawn(peer.htlc_switch())
|
|
|
|
|
|
|
|
|
|
coros = [close(), main_loop(p1), main_loop(p2)]
|
|
|
|
|
gath = asyncio.gather(*coros)
|
|
|
|
|
await gath
|
|
|
|
|
|
2022-01-26 16:59:10 +01:00
|
|
|
with self.assertRaises(GracefulDisconnect):
|
2023-02-18 10:01:21 +00:00
|
|
|
await test()
|
2020-12-29 17:40:01 +01:00
|
|
|
|
|
|
|
|
# bob sends the same upfront_shutdown_script has he announced
|
|
|
|
|
alice_channel.config[HTLCOwner.REMOTE].upfront_shutdown_script = bob_uss
|
|
|
|
|
bob_channel.config[HTLCOwner.LOCAL].upfront_shutdown_script = bob_uss
|
|
|
|
|
|
|
|
|
|
p1, p2, w1, w2, q1, q2 = self.prepare_peers(alice_channel, bob_channel)
|
|
|
|
|
w1.network.config.set_key('dynamic_fees', False)
|
|
|
|
|
w2.network.config.set_key('dynamic_fees', False)
|
|
|
|
|
w1.network.config.set_key('fee_per_kb', 5000)
|
|
|
|
|
w2.network.config.set_key('fee_per_kb', 1000)
|
|
|
|
|
|
|
|
|
|
async def test():
|
|
|
|
|
async def close():
|
|
|
|
|
await asyncio.wait_for(p1.initialized, 1)
|
|
|
|
|
await asyncio.wait_for(p2.initialized, 1)
|
|
|
|
|
await p1.close_channel(alice_channel.channel_id)
|
|
|
|
|
gath.cancel()
|
|
|
|
|
|
|
|
|
|
async def main_loop(peer):
|
|
|
|
|
async with peer.taskgroup as group:
|
|
|
|
|
await group.spawn(peer._message_loop())
|
|
|
|
|
await group.spawn(peer.htlc_switch())
|
|
|
|
|
|
|
|
|
|
coros = [close(), main_loop(p1), main_loop(p2)]
|
|
|
|
|
gath = asyncio.gather(*coros)
|
|
|
|
|
await gath
|
|
|
|
|
|
2023-02-18 10:01:21 +00:00
|
|
|
with self.assertRaises(asyncio.CancelledError):
|
|
|
|
|
await test()
|
|
|
|
|
|
|
|
|
|
async def test_channel_usage_after_closing(self):
|
2020-02-12 10:32:55 +01:00
|
|
|
alice_channel, bob_channel = create_test_channels()
|
|
|
|
|
p1, p2, w1, w2, q1, q2 = self.prepare_peers(alice_channel, bob_channel)
|
2022-03-29 17:42:04 +02:00
|
|
|
lnaddr, pay_req = self.prepare_invoice(w2)
|
2018-11-02 19:16:42 +01:00
|
|
|
|
2021-01-30 16:10:51 +01:00
|
|
|
lnaddr = w1._check_invoice(pay_req)
|
2023-02-18 10:01:21 +00:00
|
|
|
route, amount_msat = (await w1.create_routes_from_invoice(lnaddr.get_amount_msat(), decoded_invoice=lnaddr))[0][0:2]
|
2021-01-30 16:10:51 +01:00
|
|
|
assert amount_msat == lnaddr.get_amount_msat()
|
2018-11-02 19:16:42 +01:00
|
|
|
|
2023-02-18 10:01:21 +00:00
|
|
|
await w1.force_close_channel(alice_channel.channel_id)
|
2018-11-02 19:16:42 +01:00
|
|
|
# check if a tx (commitment transaction) was broadcasted:
|
|
|
|
|
assert q1.qsize() == 1
|
|
|
|
|
|
2019-10-09 19:23:09 +02:00
|
|
|
with self.assertRaises(NoPathFound) as e:
|
2023-02-18 10:01:21 +00:00
|
|
|
await w1.create_routes_from_invoice(lnaddr.get_amount_msat(), decoded_invoice=lnaddr)
|
2018-11-02 19:16:42 +01:00
|
|
|
|
|
|
|
|
peer = w1.peers[route[0].node_id]
|
|
|
|
|
# AssertionError is ok since we shouldn't use old routes, and the
|
|
|
|
|
# route finding should fail when channel is closed
|
2019-03-06 06:17:52 +01:00
|
|
|
async def f():
|
2021-02-07 11:57:20 +01:00
|
|
|
min_cltv_expiry = lnaddr.get_min_final_cltv_expiry()
|
|
|
|
|
payment_hash = lnaddr.paymenthash
|
|
|
|
|
payment_secret = lnaddr.payment_secret
|
2021-02-24 20:03:12 +01:00
|
|
|
pay = w1.pay_to_route(
|
2021-02-27 20:26:58 +01:00
|
|
|
route=route,
|
2021-02-24 20:03:12 +01:00
|
|
|
amount_msat=amount_msat,
|
|
|
|
|
total_msat=amount_msat,
|
2021-03-06 10:59:29 +01:00
|
|
|
amount_receiver_msat=amount_msat,
|
2021-02-24 20:03:12 +01:00
|
|
|
payment_hash=payment_hash,
|
|
|
|
|
payment_secret=payment_secret,
|
2022-02-16 16:27:37 +01:00
|
|
|
min_cltv_expiry=min_cltv_expiry,
|
|
|
|
|
trampoline_fee_level=0,
|
2022-09-09 19:52:36 +02:00
|
|
|
trampoline_route=None,
|
2022-02-16 16:27:37 +01:00
|
|
|
)
|
2021-02-07 11:57:20 +01:00
|
|
|
await asyncio.gather(pay, p1._message_loop(), p2._message_loop(), p1.htlc_switch(), p2.htlc_switch())
|
2019-05-19 11:55:55 +02:00
|
|
|
with self.assertRaises(PaymentFailure):
|
2023-02-18 10:01:21 +00:00
|
|
|
await f()
|
2018-11-02 19:16:42 +01:00
|
|
|
|
2021-03-19 20:51:38 +01:00
|
|
|
@needs_test_with_all_chacha20_implementations
|
2023-02-18 10:01:21 +00:00
|
|
|
async def test_sending_weird_messages_that_should_be_ignored(self):
|
2021-03-19 20:51:38 +01:00
|
|
|
alice_channel, bob_channel = create_test_channels()
|
|
|
|
|
p1, p2, w1, w2, _q1, _q2 = self.prepare_peers(alice_channel, bob_channel)
|
|
|
|
|
|
|
|
|
|
async def send_weird_messages():
|
|
|
|
|
await asyncio.wait_for(p1.initialized, 1)
|
|
|
|
|
await asyncio.wait_for(p2.initialized, 1)
|
|
|
|
|
# peer1 sends known message with trailing garbage
|
|
|
|
|
# BOLT-01 says peer2 should ignore trailing garbage
|
|
|
|
|
raw_msg1 = encode_msg('ping', num_pong_bytes=4, byteslen=4) + bytes(range(55))
|
|
|
|
|
p1.transport.send_bytes(raw_msg1)
|
|
|
|
|
await asyncio.sleep(0.05)
|
|
|
|
|
# peer1 sends unknown 'odd-type' message
|
|
|
|
|
# BOLT-01 says peer2 should ignore whole message
|
|
|
|
|
raw_msg2 = (43333).to_bytes(length=2, byteorder="big") + bytes(range(55))
|
|
|
|
|
p1.transport.send_bytes(raw_msg2)
|
|
|
|
|
await asyncio.sleep(0.05)
|
2021-03-23 17:17:43 +01:00
|
|
|
raise SuccessfulTest()
|
2021-03-19 20:51:38 +01:00
|
|
|
|
|
|
|
|
async def f():
|
2022-02-08 12:34:49 +01:00
|
|
|
async with OldTaskGroup() as group:
|
2021-03-19 20:51:38 +01:00
|
|
|
for peer in [p1, p2]:
|
|
|
|
|
await group.spawn(peer._message_loop())
|
|
|
|
|
await group.spawn(peer.htlc_switch())
|
2022-09-23 12:41:42 +02:00
|
|
|
for peer in [p1, p2]:
|
|
|
|
|
await peer.initialized
|
2021-03-19 20:51:38 +01:00
|
|
|
await group.spawn(send_weird_messages())
|
|
|
|
|
|
2021-03-23 17:17:43 +01:00
|
|
|
with self.assertRaises(SuccessfulTest):
|
2023-02-18 10:01:21 +00:00
|
|
|
await f()
|
2021-03-19 20:51:38 +01:00
|
|
|
|
|
|
|
|
@needs_test_with_all_chacha20_implementations
|
2023-02-18 10:01:21 +00:00
|
|
|
async def test_sending_weird_messages__unknown_even_type(self):
|
2021-03-19 20:51:38 +01:00
|
|
|
alice_channel, bob_channel = create_test_channels()
|
|
|
|
|
p1, p2, w1, w2, _q1, _q2 = self.prepare_peers(alice_channel, bob_channel)
|
|
|
|
|
|
|
|
|
|
async def send_weird_messages():
|
|
|
|
|
await asyncio.wait_for(p1.initialized, 1)
|
|
|
|
|
await asyncio.wait_for(p2.initialized, 1)
|
|
|
|
|
# peer1 sends unknown 'even-type' message
|
|
|
|
|
# BOLT-01 says peer2 should close the connection
|
|
|
|
|
raw_msg2 = (43334).to_bytes(length=2, byteorder="big") + bytes(range(55))
|
|
|
|
|
p1.transport.send_bytes(raw_msg2)
|
|
|
|
|
await asyncio.sleep(0.05)
|
|
|
|
|
|
|
|
|
|
failing_task = None
|
|
|
|
|
async def f():
|
|
|
|
|
nonlocal failing_task
|
2022-02-08 12:34:49 +01:00
|
|
|
async with OldTaskGroup() as group:
|
2021-03-19 20:51:38 +01:00
|
|
|
await group.spawn(p1._message_loop())
|
|
|
|
|
await group.spawn(p1.htlc_switch())
|
|
|
|
|
failing_task = await group.spawn(p2._message_loop())
|
|
|
|
|
await group.spawn(p2.htlc_switch())
|
2022-09-23 12:41:42 +02:00
|
|
|
for peer in [p1, p2]:
|
|
|
|
|
await peer.initialized
|
2021-03-19 20:51:38 +01:00
|
|
|
await group.spawn(send_weird_messages())
|
|
|
|
|
|
2023-01-13 10:55:52 +00:00
|
|
|
with self.assertRaises(GracefulDisconnect):
|
2023-02-18 10:01:21 +00:00
|
|
|
await f()
|
2023-01-13 10:55:52 +00:00
|
|
|
self.assertTrue(isinstance(failing_task.exception().__cause__, lnmsg.UnknownMandatoryMsgType))
|
2021-03-19 20:51:38 +01:00
|
|
|
|
|
|
|
|
@needs_test_with_all_chacha20_implementations
|
2023-02-18 10:01:21 +00:00
|
|
|
async def test_sending_weird_messages__known_msg_with_insufficient_length(self):
|
2021-03-19 20:51:38 +01:00
|
|
|
alice_channel, bob_channel = create_test_channels()
|
|
|
|
|
p1, p2, w1, w2, _q1, _q2 = self.prepare_peers(alice_channel, bob_channel)
|
|
|
|
|
|
|
|
|
|
async def send_weird_messages():
|
|
|
|
|
await asyncio.wait_for(p1.initialized, 1)
|
|
|
|
|
await asyncio.wait_for(p2.initialized, 1)
|
|
|
|
|
# peer1 sends known message with insufficient length for the contents
|
|
|
|
|
# BOLT-01 says peer2 should fail the connection
|
|
|
|
|
raw_msg1 = encode_msg('ping', num_pong_bytes=4, byteslen=4)[:-1]
|
|
|
|
|
p1.transport.send_bytes(raw_msg1)
|
|
|
|
|
await asyncio.sleep(0.05)
|
|
|
|
|
|
|
|
|
|
failing_task = None
|
|
|
|
|
async def f():
|
|
|
|
|
nonlocal failing_task
|
2022-02-08 12:34:49 +01:00
|
|
|
async with OldTaskGroup() as group:
|
2021-03-19 20:51:38 +01:00
|
|
|
await group.spawn(p1._message_loop())
|
|
|
|
|
await group.spawn(p1.htlc_switch())
|
|
|
|
|
failing_task = await group.spawn(p2._message_loop())
|
|
|
|
|
await group.spawn(p2.htlc_switch())
|
2022-09-23 12:41:42 +02:00
|
|
|
for peer in [p1, p2]:
|
|
|
|
|
await peer.initialized
|
2021-03-19 20:51:38 +01:00
|
|
|
await group.spawn(send_weird_messages())
|
|
|
|
|
|
2023-01-13 10:55:52 +00:00
|
|
|
with self.assertRaises(GracefulDisconnect):
|
2023-02-18 10:01:21 +00:00
|
|
|
await f()
|
2023-01-13 10:55:52 +00:00
|
|
|
self.assertTrue(isinstance(failing_task.exception().__cause__, lnmsg.UnexpectedEndOfStream))
|