2019-06-22 09:47:08 +02:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
#
|
|
|
|
|
# Electrum - lightweight Bitcoin client
|
|
|
|
|
# Copyright (C) 2018 The Electrum developers
|
|
|
|
|
#
|
|
|
|
|
# Permission is hereby granted, free of charge, to any person
|
|
|
|
|
# obtaining a copy of this software and associated documentation files
|
|
|
|
|
# (the "Software"), to deal in the Software without restriction,
|
|
|
|
|
# including without limitation the rights to use, copy, modify, merge,
|
|
|
|
|
# publish, distribute, sublicense, and/or sell copies of the Software,
|
|
|
|
|
# and to permit persons to whom the Software is furnished to do so,
|
|
|
|
|
# subject to the following conditions:
|
|
|
|
|
#
|
|
|
|
|
# The above copyright notice and this permission notice shall be
|
|
|
|
|
# included in all copies or substantial portions of the Software.
|
|
|
|
|
#
|
|
|
|
|
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
|
|
|
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
|
|
|
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
|
|
|
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
|
|
|
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
|
|
|
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
|
|
|
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
|
|
|
# SOFTWARE.
|
|
|
|
|
|
|
|
|
|
import time
|
|
|
|
|
import random
|
|
|
|
|
import os
|
|
|
|
|
from collections import defaultdict
|
|
|
|
|
from typing import Sequence, List, Tuple, Optional, Dict, NamedTuple, TYPE_CHECKING, Set
|
|
|
|
|
import binascii
|
|
|
|
|
import base64
|
2019-09-05 18:32:45 +02:00
|
|
|
import asyncio
|
2020-02-29 18:32:47 +01:00
|
|
|
import threading
|
2020-04-24 11:45:39 +02:00
|
|
|
from enum import IntEnum
|
2019-06-22 09:47:08 +02:00
|
|
|
|
2021-01-09 19:56:05 +01:00
|
|
|
from aiorpcx import NetAddress
|
2019-06-22 09:47:08 +02:00
|
|
|
|
|
|
|
|
from .sql_db import SqlDB, sql
|
2020-04-14 16:12:47 +02:00
|
|
|
from . import constants, util
|
2023-05-16 15:08:26 +02:00
|
|
|
from .util import profiler, get_headers_dir, is_ip_address, json_normalize, UserFacingException
|
2019-06-22 09:47:08 +02:00
|
|
|
from .logging import Logger
|
2020-03-24 15:31:19 +01:00
|
|
|
from .lnutil import (LNPeerAddr, format_short_channel_id, ShortChannelID,
|
2021-03-15 20:21:37 +01:00
|
|
|
validate_features, IncompatibleOrInsaneFeatures, InvalidGossipMsg)
|
2019-06-22 09:47:08 +02:00
|
|
|
from .lnverifier import LNChannelVerifier, verify_sig_for_channel_update
|
2020-02-17 20:38:41 +01:00
|
|
|
from .lnmsg import decode_msg
|
2021-03-15 20:21:37 +01:00
|
|
|
from . import ecc
|
|
|
|
|
from .crypto import sha256d
|
2023-01-13 15:47:02 +01:00
|
|
|
from .lnmsg import FailedToParseMsg
|
2019-06-22 09:47:08 +02:00
|
|
|
|
|
|
|
|
if TYPE_CHECKING:
|
|
|
|
|
from .network import Network
|
2020-02-17 20:38:41 +01:00
|
|
|
from .lnchannel import Channel
|
2021-03-02 18:00:31 +01:00
|
|
|
from .lnrouter import RouteEdge
|
2022-06-02 18:28:21 +02:00
|
|
|
from .simple_config import SimpleConfig
|
2019-06-22 09:47:08 +02:00
|
|
|
|
2019-08-16 22:37:33 +02:00
|
|
|
|
2019-06-22 09:47:08 +02:00
|
|
|
FLAG_DISABLE = 1 << 1
|
|
|
|
|
FLAG_DIRECTION = 1 << 0
|
|
|
|
|
|
2020-09-22 08:47:58 +02:00
|
|
|
|
2023-05-16 15:08:26 +02:00
|
|
|
class ChannelDBNotLoaded(UserFacingException): pass
|
|
|
|
|
|
|
|
|
|
|
2019-06-18 13:49:31 +02:00
|
|
|
class ChannelInfo(NamedTuple):
|
2019-09-06 18:09:05 +02:00
|
|
|
short_channel_id: ShortChannelID
|
2019-06-18 13:49:31 +02:00
|
|
|
node1_id: bytes
|
|
|
|
|
node2_id: bytes
|
2019-09-06 18:09:05 +02:00
|
|
|
capacity_sat: Optional[int]
|
2019-06-22 09:47:08 +02:00
|
|
|
|
|
|
|
|
@staticmethod
|
2020-02-17 20:38:41 +01:00
|
|
|
def from_msg(payload: dict) -> 'ChannelInfo':
|
2019-06-22 09:47:08 +02:00
|
|
|
features = int.from_bytes(payload['features'], 'big')
|
lnutil.LnFeatures: limit max feature bit to 10_000
closes https://github.com/spesmilo/electrum/issues/8403
> In Python 3.10 that worked fine, however in Python 3.11 large integer check https://github.com/python/cpython/issues/95778, so now this throws an error.
Apparently this change was deemed a security fix and was backported to all supported branches of CPython (going back to 3.7). i.e. it affects ~all versions of python (if sufficiently updated with bugfix patches), not just 3.11
> Some offending node aliases:
> ```
> ergvein-fiatchannels
> test-mainnet
> arakis
> ```
The features bits set by some of these nodes:
```
(1, 7, 8, 11, 13, 14, 17, 19, 23, 27, 45, 32973, 52973)
(1, 7, 8, 11, 13, 14, 17, 19, 23, 27, 39, 45, 55, 32973, 52973)
```
> P.S. I see there are a lot of nodes with 253 bytes in their feature vectors. Any idea why that could happen?
Note that the valid [merged-into-spec features](https://github.com/lightning/bolts/blob/50b2df24a27879e8329712c275db78876fd022fe/09-features.md) currently only go as high as ~51.
However the spec does not specify how to choose feature bits for experimental stuff, so I guess some people are using values in the 50k range. The only limit imposed by the spec on the length of the features bitvector is an implicit one due to the max message size: every msg must be smaller than 65KB, and the features bitvector needs to fit inside the init message, hence it can be up to ~524K bits.
(note that the features are not stored in a sparse representation in the init message and in gossip messages, so if many nodes set such high feature bits, that would noticably impact the size of the gossip).
-----
Anyway, our current implementation of LnFeatures is subclassing IntFlag, and it looks like it does not work well for such large integers. I've managed to make IntFlags reasonably in python 3.11 by overriding __str__ and __repr__ (note that in cpython it is apparently only the base2<->base10 conversions that are slow, power-of-2 conversions are fast, so we can e.g. use `hex()`). However in python 3.10 and older, enum.py itself seems really slow for bigints, e.g. enum._decompose in python 3.10.
Try e.g. this script, which is instant in py3.11 but takes minutes in py3.10:
```py
from enum import IntFlag
class c(IntFlag):
known_flag_1 = 1 << 0
known_flag_2 = 1 << 1
known_flag_3 = 1 << 2
if hasattr(IntFlag, "_numeric_repr_"): # python 3.11+
_numeric_repr_ = hex
def __repr__(self):
return f"<{self._name_}: {hex(self._value_)}>"
def __str__(self):
return hex(self._value_)
a = c(2**70000-1)
q1 = repr(a)
q2 = str(a)
```
AFAICT we have two options: either we rewrite LnFeatures so that it does not use IntFlag (and enum.py), or, for the short term as workaround, we could just reject very large feature bits.
For now, I've opted to the latter, rejecting feature bits over 10k.
(note that another option is bumping the min required python to 3.11, in which case with the overrides added in this commit the performance looks perfectly fine)
2023-05-08 19:37:33 +00:00
|
|
|
features = validate_features(features)
|
2019-06-18 13:49:31 +02:00
|
|
|
channel_id = payload['short_channel_id']
|
|
|
|
|
node_id_1 = payload['node_id_1']
|
|
|
|
|
node_id_2 = payload['node_id_2']
|
2019-06-22 09:47:08 +02:00
|
|
|
assert list(sorted([node_id_1, node_id_2])) == [node_id_1, node_id_2]
|
|
|
|
|
capacity_sat = None
|
2019-06-18 13:49:31 +02:00
|
|
|
return ChannelInfo(
|
2019-09-06 18:09:05 +02:00
|
|
|
short_channel_id = ShortChannelID.normalize(channel_id),
|
2019-06-18 13:49:31 +02:00
|
|
|
node1_id = node_id_1,
|
|
|
|
|
node2_id = node_id_2,
|
2019-09-06 18:09:05 +02:00
|
|
|
capacity_sat = capacity_sat
|
|
|
|
|
)
|
2019-06-18 13:49:31 +02:00
|
|
|
|
2020-02-17 20:38:41 +01:00
|
|
|
@staticmethod
|
|
|
|
|
def from_raw_msg(raw: bytes) -> 'ChannelInfo':
|
|
|
|
|
payload_dict = decode_msg(raw)[1]
|
|
|
|
|
return ChannelInfo.from_msg(payload_dict)
|
|
|
|
|
|
2021-03-02 18:00:31 +01:00
|
|
|
@staticmethod
|
|
|
|
|
def from_route_edge(route_edge: 'RouteEdge') -> 'ChannelInfo':
|
|
|
|
|
node1_id, node2_id = sorted([route_edge.start_node, route_edge.end_node])
|
|
|
|
|
return ChannelInfo(
|
|
|
|
|
short_channel_id=route_edge.short_channel_id,
|
|
|
|
|
node1_id=node1_id,
|
|
|
|
|
node2_id=node2_id,
|
|
|
|
|
capacity_sat=None,
|
|
|
|
|
)
|
|
|
|
|
|
2019-06-18 13:49:31 +02:00
|
|
|
|
|
|
|
|
class Policy(NamedTuple):
|
|
|
|
|
key: bytes
|
|
|
|
|
cltv_expiry_delta: int
|
|
|
|
|
htlc_minimum_msat: int
|
2019-08-16 22:31:11 +02:00
|
|
|
htlc_maximum_msat: Optional[int]
|
2019-06-18 13:49:31 +02:00
|
|
|
fee_base_msat: int
|
|
|
|
|
fee_proportional_millionths: int
|
|
|
|
|
channel_flags: int
|
2019-08-16 22:31:11 +02:00
|
|
|
message_flags: int
|
2019-06-18 13:49:31 +02:00
|
|
|
timestamp: int
|
2019-06-22 09:47:08 +02:00
|
|
|
|
|
|
|
|
@staticmethod
|
2020-02-17 20:38:41 +01:00
|
|
|
def from_msg(payload: dict) -> 'Policy':
|
2019-06-18 13:49:31 +02:00
|
|
|
return Policy(
|
|
|
|
|
key = payload['short_channel_id'] + payload['start_node'],
|
2020-03-12 04:08:13 +01:00
|
|
|
cltv_expiry_delta = payload['cltv_expiry_delta'],
|
|
|
|
|
htlc_minimum_msat = payload['htlc_minimum_msat'],
|
|
|
|
|
htlc_maximum_msat = payload.get('htlc_maximum_msat', None),
|
|
|
|
|
fee_base_msat = payload['fee_base_msat'],
|
|
|
|
|
fee_proportional_millionths = payload['fee_proportional_millionths'],
|
2019-08-16 22:31:11 +02:00
|
|
|
message_flags = int.from_bytes(payload['message_flags'], "big"),
|
2019-06-18 13:49:31 +02:00
|
|
|
channel_flags = int.from_bytes(payload['channel_flags'], "big"),
|
2020-03-12 04:08:13 +01:00
|
|
|
timestamp = payload['timestamp'],
|
2019-06-18 13:49:31 +02:00
|
|
|
)
|
2019-06-22 09:47:08 +02:00
|
|
|
|
2020-02-28 09:24:10 +01:00
|
|
|
@staticmethod
|
|
|
|
|
def from_raw_msg(key:bytes, raw: bytes) -> 'Policy':
|
|
|
|
|
payload = decode_msg(raw)[1]
|
|
|
|
|
payload['start_node'] = key[8:]
|
|
|
|
|
return Policy.from_msg(payload)
|
|
|
|
|
|
2021-03-02 18:00:31 +01:00
|
|
|
@staticmethod
|
|
|
|
|
def from_route_edge(route_edge: 'RouteEdge') -> 'Policy':
|
|
|
|
|
return Policy(
|
|
|
|
|
key=route_edge.short_channel_id + route_edge.start_node,
|
|
|
|
|
cltv_expiry_delta=route_edge.cltv_expiry_delta,
|
|
|
|
|
htlc_minimum_msat=0,
|
|
|
|
|
htlc_maximum_msat=None,
|
|
|
|
|
fee_base_msat=route_edge.fee_base_msat,
|
|
|
|
|
fee_proportional_millionths=route_edge.fee_proportional_millionths,
|
|
|
|
|
channel_flags=0,
|
|
|
|
|
message_flags=0,
|
|
|
|
|
timestamp=0,
|
|
|
|
|
)
|
|
|
|
|
|
2019-06-22 09:47:08 +02:00
|
|
|
def is_disabled(self):
|
|
|
|
|
return self.channel_flags & FLAG_DISABLE
|
|
|
|
|
|
2019-06-18 13:49:31 +02:00
|
|
|
@property
|
2019-09-06 18:09:05 +02:00
|
|
|
def short_channel_id(self) -> ShortChannelID:
|
|
|
|
|
return ShortChannelID.normalize(self.key[0:8])
|
2019-06-18 13:49:31 +02:00
|
|
|
|
|
|
|
|
@property
|
2020-05-06 10:56:33 +02:00
|
|
|
def start_node(self) -> bytes:
|
2019-06-18 13:49:31 +02:00
|
|
|
return self.key[8:]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class NodeInfo(NamedTuple):
|
|
|
|
|
node_id: bytes
|
|
|
|
|
features: int
|
|
|
|
|
timestamp: int
|
|
|
|
|
alias: str
|
2019-06-22 09:47:08 +02:00
|
|
|
|
|
|
|
|
@staticmethod
|
2020-03-03 02:15:32 +01:00
|
|
|
def from_msg(payload) -> Tuple['NodeInfo', Sequence['LNPeerAddr']]:
|
2019-06-18 13:49:31 +02:00
|
|
|
node_id = payload['node_id']
|
2019-06-22 09:47:08 +02:00
|
|
|
features = int.from_bytes(payload['features'], "big")
|
lnutil.LnFeatures: limit max feature bit to 10_000
closes https://github.com/spesmilo/electrum/issues/8403
> In Python 3.10 that worked fine, however in Python 3.11 large integer check https://github.com/python/cpython/issues/95778, so now this throws an error.
Apparently this change was deemed a security fix and was backported to all supported branches of CPython (going back to 3.7). i.e. it affects ~all versions of python (if sufficiently updated with bugfix patches), not just 3.11
> Some offending node aliases:
> ```
> ergvein-fiatchannels
> test-mainnet
> arakis
> ```
The features bits set by some of these nodes:
```
(1, 7, 8, 11, 13, 14, 17, 19, 23, 27, 45, 32973, 52973)
(1, 7, 8, 11, 13, 14, 17, 19, 23, 27, 39, 45, 55, 32973, 52973)
```
> P.S. I see there are a lot of nodes with 253 bytes in their feature vectors. Any idea why that could happen?
Note that the valid [merged-into-spec features](https://github.com/lightning/bolts/blob/50b2df24a27879e8329712c275db78876fd022fe/09-features.md) currently only go as high as ~51.
However the spec does not specify how to choose feature bits for experimental stuff, so I guess some people are using values in the 50k range. The only limit imposed by the spec on the length of the features bitvector is an implicit one due to the max message size: every msg must be smaller than 65KB, and the features bitvector needs to fit inside the init message, hence it can be up to ~524K bits.
(note that the features are not stored in a sparse representation in the init message and in gossip messages, so if many nodes set such high feature bits, that would noticably impact the size of the gossip).
-----
Anyway, our current implementation of LnFeatures is subclassing IntFlag, and it looks like it does not work well for such large integers. I've managed to make IntFlags reasonably in python 3.11 by overriding __str__ and __repr__ (note that in cpython it is apparently only the base2<->base10 conversions that are slow, power-of-2 conversions are fast, so we can e.g. use `hex()`). However in python 3.10 and older, enum.py itself seems really slow for bigints, e.g. enum._decompose in python 3.10.
Try e.g. this script, which is instant in py3.11 but takes minutes in py3.10:
```py
from enum import IntFlag
class c(IntFlag):
known_flag_1 = 1 << 0
known_flag_2 = 1 << 1
known_flag_3 = 1 << 2
if hasattr(IntFlag, "_numeric_repr_"): # python 3.11+
_numeric_repr_ = hex
def __repr__(self):
return f"<{self._name_}: {hex(self._value_)}>"
def __str__(self):
return hex(self._value_)
a = c(2**70000-1)
q1 = repr(a)
q2 = str(a)
```
AFAICT we have two options: either we rewrite LnFeatures so that it does not use IntFlag (and enum.py), or, for the short term as workaround, we could just reject very large feature bits.
For now, I've opted to the latter, rejecting feature bits over 10k.
(note that another option is bumping the min required python to 3.11, in which case with the overrides added in this commit the performance looks perfectly fine)
2023-05-08 19:37:33 +00:00
|
|
|
features = validate_features(features)
|
2019-06-22 09:47:08 +02:00
|
|
|
addresses = NodeInfo.parse_addresses_field(payload['addresses'])
|
2020-03-03 02:15:32 +01:00
|
|
|
peer_addrs = []
|
|
|
|
|
for host, port in addresses:
|
|
|
|
|
try:
|
|
|
|
|
peer_addrs.append(LNPeerAddr(host=host, port=port, pubkey=node_id))
|
|
|
|
|
except ValueError:
|
|
|
|
|
pass
|
2019-06-18 13:49:31 +02:00
|
|
|
alias = payload['alias'].rstrip(b'\x00')
|
2020-03-02 04:31:21 +01:00
|
|
|
try:
|
|
|
|
|
alias = alias.decode('utf8')
|
2023-04-23 01:33:12 +00:00
|
|
|
except Exception:
|
2020-03-02 04:31:21 +01:00
|
|
|
alias = ''
|
2020-03-12 04:08:13 +01:00
|
|
|
timestamp = payload['timestamp']
|
2020-03-03 02:15:32 +01:00
|
|
|
node_info = NodeInfo(node_id=node_id, features=features, timestamp=timestamp, alias=alias)
|
|
|
|
|
return node_info, peer_addrs
|
2019-06-22 09:47:08 +02:00
|
|
|
|
2020-02-28 09:24:10 +01:00
|
|
|
@staticmethod
|
2020-03-03 02:15:32 +01:00
|
|
|
def from_raw_msg(raw: bytes) -> Tuple['NodeInfo', Sequence['LNPeerAddr']]:
|
2020-02-28 09:24:10 +01:00
|
|
|
payload_dict = decode_msg(raw)[1]
|
|
|
|
|
return NodeInfo.from_msg(payload_dict)
|
|
|
|
|
|
2019-06-22 09:47:08 +02:00
|
|
|
@staticmethod
|
|
|
|
|
def parse_addresses_field(addresses_field):
|
|
|
|
|
buf = addresses_field
|
|
|
|
|
def read(n):
|
|
|
|
|
nonlocal buf
|
|
|
|
|
data, buf = buf[0:n], buf[n:]
|
|
|
|
|
return data
|
|
|
|
|
addresses = []
|
|
|
|
|
while buf:
|
|
|
|
|
atype = ord(read(1))
|
|
|
|
|
if atype == 0:
|
|
|
|
|
pass
|
|
|
|
|
elif atype == 1: # IPv4
|
|
|
|
|
ipv4_addr = '.'.join(map(lambda x: '%d' % x, read(4)))
|
|
|
|
|
port = int.from_bytes(read(2), 'big')
|
|
|
|
|
if is_ip_address(ipv4_addr) and port != 0:
|
|
|
|
|
addresses.append((ipv4_addr, port))
|
|
|
|
|
elif atype == 2: # IPv6
|
|
|
|
|
ipv6_addr = b':'.join([binascii.hexlify(read(2)) for i in range(8)])
|
|
|
|
|
ipv6_addr = ipv6_addr.decode('ascii')
|
|
|
|
|
port = int.from_bytes(read(2), 'big')
|
|
|
|
|
if is_ip_address(ipv6_addr) and port != 0:
|
|
|
|
|
addresses.append((ipv6_addr, port))
|
|
|
|
|
elif atype == 3: # onion v2
|
|
|
|
|
host = base64.b32encode(read(10)) + b'.onion'
|
|
|
|
|
host = host.decode('ascii').lower()
|
|
|
|
|
port = int.from_bytes(read(2), 'big')
|
|
|
|
|
addresses.append((host, port))
|
|
|
|
|
elif atype == 4: # onion v3
|
|
|
|
|
host = base64.b32encode(read(35)) + b'.onion'
|
|
|
|
|
host = host.decode('ascii').lower()
|
|
|
|
|
port = int.from_bytes(read(2), 'big')
|
|
|
|
|
addresses.append((host, port))
|
|
|
|
|
else:
|
|
|
|
|
# unknown address type
|
|
|
|
|
# we don't know how long it is -> have to escape
|
|
|
|
|
# if there are other addresses we could have parsed later, they are lost.
|
|
|
|
|
break
|
|
|
|
|
return addresses
|
|
|
|
|
|
2019-06-18 13:49:31 +02:00
|
|
|
|
2020-04-24 11:45:39 +02:00
|
|
|
class UpdateStatus(IntEnum):
|
|
|
|
|
ORPHANED = 0
|
|
|
|
|
EXPIRED = 1
|
|
|
|
|
DEPRECATED = 2
|
2020-04-24 12:16:21 +02:00
|
|
|
UNCHANGED = 3
|
|
|
|
|
GOOD = 4
|
2020-04-24 11:45:39 +02:00
|
|
|
|
2019-08-16 22:24:26 +02:00
|
|
|
class CategorizedChannelUpdates(NamedTuple):
|
|
|
|
|
orphaned: List # no channel announcement for channel update
|
|
|
|
|
expired: List # update older than two weeks
|
|
|
|
|
deprecated: List # update older than database entry
|
2020-04-24 12:16:21 +02:00
|
|
|
unchanged: List # unchanged policies
|
2019-08-16 22:24:26 +02:00
|
|
|
good: List # good updates
|
|
|
|
|
|
2019-08-16 22:31:11 +02:00
|
|
|
|
2020-11-23 14:57:14 +01:00
|
|
|
def get_mychannel_info(short_channel_id: ShortChannelID,
|
|
|
|
|
my_channels: Dict[ShortChannelID, 'Channel']) -> Optional[ChannelInfo]:
|
|
|
|
|
chan = my_channels.get(short_channel_id)
|
2021-03-02 18:00:31 +01:00
|
|
|
if not chan:
|
|
|
|
|
return
|
2020-11-23 14:57:14 +01:00
|
|
|
ci = ChannelInfo.from_raw_msg(chan.construct_channel_announcement_without_sigs())
|
|
|
|
|
return ci._replace(capacity_sat=chan.constraints.capacity)
|
|
|
|
|
|
|
|
|
|
def get_mychannel_policy(short_channel_id: bytes, node_id: bytes,
|
|
|
|
|
my_channels: Dict[ShortChannelID, 'Channel']) -> Optional[Policy]:
|
|
|
|
|
chan = my_channels.get(short_channel_id) # type: Optional[Channel]
|
|
|
|
|
if not chan:
|
|
|
|
|
return
|
|
|
|
|
if node_id == chan.node_id: # incoming direction (to us)
|
|
|
|
|
remote_update_raw = chan.get_remote_update()
|
|
|
|
|
if not remote_update_raw:
|
|
|
|
|
return
|
|
|
|
|
now = int(time.time())
|
|
|
|
|
remote_update_decoded = decode_msg(remote_update_raw)[1]
|
|
|
|
|
remote_update_decoded['timestamp'] = now
|
|
|
|
|
remote_update_decoded['start_node'] = node_id
|
|
|
|
|
return Policy.from_msg(remote_update_decoded)
|
|
|
|
|
elif node_id == chan.get_local_pubkey(): # outgoing direction (from us)
|
|
|
|
|
local_update_decoded = decode_msg(chan.get_outgoing_gossip_channel_update())[1]
|
|
|
|
|
local_update_decoded['start_node'] = node_id
|
|
|
|
|
return Policy.from_msg(local_update_decoded)
|
|
|
|
|
|
|
|
|
|
|
2019-06-27 09:03:34 +02:00
|
|
|
create_channel_info = """
|
|
|
|
|
CREATE TABLE IF NOT EXISTS channel_info (
|
2020-02-28 09:24:10 +01:00
|
|
|
short_channel_id BLOB(8),
|
|
|
|
|
msg BLOB,
|
2019-06-27 09:03:34 +02:00
|
|
|
PRIMARY KEY(short_channel_id)
|
|
|
|
|
)"""
|
|
|
|
|
|
|
|
|
|
create_policy = """
|
|
|
|
|
CREATE TABLE IF NOT EXISTS policy (
|
2020-02-28 09:24:10 +01:00
|
|
|
key BLOB(41),
|
|
|
|
|
msg BLOB,
|
2019-06-27 09:03:34 +02:00
|
|
|
PRIMARY KEY(key)
|
|
|
|
|
)"""
|
|
|
|
|
|
|
|
|
|
create_address = """
|
|
|
|
|
CREATE TABLE IF NOT EXISTS address (
|
2020-02-28 09:24:10 +01:00
|
|
|
node_id BLOB(33),
|
2019-06-27 09:03:34 +02:00
|
|
|
host STRING(256),
|
|
|
|
|
port INTEGER NOT NULL,
|
|
|
|
|
timestamp INTEGER,
|
|
|
|
|
PRIMARY KEY(node_id, host, port)
|
|
|
|
|
)"""
|
|
|
|
|
|
|
|
|
|
create_node_info = """
|
|
|
|
|
CREATE TABLE IF NOT EXISTS node_info (
|
2020-03-03 03:56:22 +01:00
|
|
|
node_id BLOB(33),
|
2020-02-28 09:24:10 +01:00
|
|
|
msg BLOB,
|
2019-06-27 09:03:34 +02:00
|
|
|
PRIMARY KEY(node_id)
|
|
|
|
|
)"""
|
2019-06-22 09:47:08 +02:00
|
|
|
|
|
|
|
|
|
|
|
|
|
class ChannelDB(SqlDB):
|
|
|
|
|
|
|
|
|
|
NUM_MAX_RECENT_PEERS = 20
|
2023-08-15 16:33:24 +00:00
|
|
|
PRIVATE_CHAN_UPD_CACHE_TTL_NORMAL = 600
|
|
|
|
|
PRIVATE_CHAN_UPD_CACHE_TTL_SHORT = 120
|
2019-06-22 09:47:08 +02:00
|
|
|
|
|
|
|
|
def __init__(self, network: 'Network'):
|
2022-06-02 18:28:21 +02:00
|
|
|
path = self.get_file_path(network.config)
|
2020-04-16 10:58:40 +02:00
|
|
|
super().__init__(network.asyncio_loop, path, commit_interval=100)
|
2020-02-29 18:32:47 +01:00
|
|
|
self.lock = threading.RLock()
|
2019-06-22 09:47:08 +02:00
|
|
|
self.num_nodes = 0
|
|
|
|
|
self.num_channels = 0
|
2023-08-15 16:33:24 +00:00
|
|
|
self._channel_updates_for_private_channels = {} # type: Dict[Tuple[bytes, bytes], Tuple[dict, int]]
|
|
|
|
|
# note: ^ we could maybe move this cache into PaySession instead of being global.
|
|
|
|
|
# That would only make sense though if PaySessions were never too short
|
|
|
|
|
# (e.g. consider trampoline forwarding).
|
2019-06-22 09:47:08 +02:00
|
|
|
self.ca_verifier = LNChannelVerifier(network, self)
|
2020-02-29 18:32:47 +01:00
|
|
|
|
2019-06-18 13:49:31 +02:00
|
|
|
# initialized in load_data
|
2020-02-29 18:32:47 +01:00
|
|
|
# note: modify/iterate needs self.lock
|
2020-03-02 16:56:15 +01:00
|
|
|
self._channels = {} # type: Dict[ShortChannelID, ChannelInfo]
|
|
|
|
|
self._policies = {} # type: Dict[Tuple[bytes, ShortChannelID], Policy] # (node_id, scid) -> Policy
|
2020-02-28 20:27:35 +01:00
|
|
|
self._nodes = {} # type: Dict[bytes, NodeInfo] # node_id -> NodeInfo
|
2021-01-09 19:56:05 +01:00
|
|
|
# node_id -> NetAddress -> timestamp
|
|
|
|
|
self._addresses = defaultdict(dict) # type: Dict[bytes, Dict[NetAddress, int]]
|
2020-02-17 20:38:41 +01:00
|
|
|
self._channels_for_node = defaultdict(set) # type: Dict[bytes, Set[ShortChannelID]]
|
2020-03-02 02:19:13 +01:00
|
|
|
self._recent_peers = [] # type: List[bytes] # list of node_ids
|
2020-03-02 16:56:15 +01:00
|
|
|
self._chans_with_0_policies = set() # type: Set[ShortChannelID]
|
|
|
|
|
self._chans_with_1_policies = set() # type: Set[ShortChannelID]
|
|
|
|
|
self._chans_with_2_policies = set() # type: Set[ShortChannelID]
|
2020-02-29 18:32:47 +01:00
|
|
|
|
2019-09-05 18:32:45 +02:00
|
|
|
self.data_loaded = asyncio.Event()
|
2019-10-12 19:15:51 +02:00
|
|
|
self.network = network # only for callback
|
2019-06-22 09:47:08 +02:00
|
|
|
|
2022-06-02 18:28:21 +02:00
|
|
|
@classmethod
|
|
|
|
|
def get_file_path(cls, config: 'SimpleConfig') -> str:
|
|
|
|
|
return os.path.join(get_headers_dir(config), 'gossip_db')
|
|
|
|
|
|
2019-06-22 09:47:08 +02:00
|
|
|
def update_counts(self):
|
2019-10-12 19:15:51 +02:00
|
|
|
self.num_nodes = len(self._nodes)
|
2019-06-18 13:49:31 +02:00
|
|
|
self.num_channels = len(self._channels)
|
|
|
|
|
self.num_policies = len(self._policies)
|
2020-04-14 16:12:47 +02:00
|
|
|
util.trigger_callback('channel_db', self.num_nodes, self.num_channels, self.num_policies)
|
|
|
|
|
util.trigger_callback('ln_gossip_sync_progress')
|
2019-06-22 09:47:08 +02:00
|
|
|
|
2019-06-18 13:49:31 +02:00
|
|
|
def get_channel_ids(self):
|
2020-02-29 18:32:47 +01:00
|
|
|
with self.lock:
|
|
|
|
|
return set(self._channels.keys())
|
2019-06-22 09:47:08 +02:00
|
|
|
|
|
|
|
|
def add_recent_peer(self, peer: LNPeerAddr):
|
|
|
|
|
now = int(time.time())
|
2019-06-18 13:49:31 +02:00
|
|
|
node_id = peer.pubkey
|
2020-02-29 18:32:47 +01:00
|
|
|
with self.lock:
|
2021-01-09 19:56:05 +01:00
|
|
|
self._addresses[node_id][peer.net_addr()] = now
|
2020-03-02 02:19:13 +01:00
|
|
|
# list is ordered
|
|
|
|
|
if node_id in self._recent_peers:
|
|
|
|
|
self._recent_peers.remove(node_id)
|
|
|
|
|
self._recent_peers.insert(0, node_id)
|
|
|
|
|
self._recent_peers = self._recent_peers[:self.NUM_MAX_RECENT_PEERS]
|
2020-03-03 03:54:36 +01:00
|
|
|
self._db_save_node_address(peer, now)
|
2019-06-22 09:47:08 +02:00
|
|
|
|
2019-06-18 13:49:31 +02:00
|
|
|
def get_200_randomly_sorted_nodes_not_in(self, node_ids):
|
2020-02-29 18:32:47 +01:00
|
|
|
with self.lock:
|
|
|
|
|
unshuffled = set(self._nodes.keys()) - node_ids
|
2021-09-22 10:59:33 +02:00
|
|
|
return random.sample(list(unshuffled), min(200, len(unshuffled)))
|
2019-06-22 09:47:08 +02:00
|
|
|
|
2021-01-09 18:41:41 +01:00
|
|
|
def get_last_good_address(self, node_id: bytes) -> Optional[LNPeerAddr]:
|
|
|
|
|
"""Returns latest address we successfully connected to, for given node."""
|
2021-01-09 19:56:05 +01:00
|
|
|
addr_to_ts = self._addresses.get(node_id)
|
|
|
|
|
if not addr_to_ts:
|
2019-06-22 09:47:08 +02:00
|
|
|
return None
|
2021-01-09 19:56:05 +01:00
|
|
|
addr = sorted(list(addr_to_ts), key=lambda a: addr_to_ts[a], reverse=True)[0]
|
2019-11-26 00:15:33 +01:00
|
|
|
try:
|
2021-01-09 19:56:05 +01:00
|
|
|
return LNPeerAddr(str(addr.host), addr.port, node_id)
|
2019-11-26 00:15:33 +01:00
|
|
|
except ValueError:
|
|
|
|
|
return None
|
2019-06-22 09:47:08 +02:00
|
|
|
|
|
|
|
|
def get_recent_peers(self):
|
2020-03-10 15:11:16 +01:00
|
|
|
if not self.data_loaded.is_set():
|
2023-05-16 15:08:26 +02:00
|
|
|
raise ChannelDBNotLoaded("channelDB data not loaded yet!")
|
2020-02-29 18:32:47 +01:00
|
|
|
with self.lock:
|
2020-03-02 02:19:13 +01:00
|
|
|
ret = [self.get_last_good_address(node_id)
|
|
|
|
|
for node_id in self._recent_peers]
|
|
|
|
|
return ret
|
2019-06-22 09:47:08 +02:00
|
|
|
|
2019-12-10 01:14:38 +01:00
|
|
|
# note: currently channel announcements are trusted by default (trusted=True);
|
2020-03-12 01:44:42 +01:00
|
|
|
# they are not SPV-verified. Verifying them would make the gossip sync
|
2019-12-10 01:14:38 +01:00
|
|
|
# even slower; especially as servers will start throttling us.
|
|
|
|
|
# It would probably put significant strain on servers if all clients
|
|
|
|
|
# verified the complete gossip.
|
2021-03-15 20:21:37 +01:00
|
|
|
def add_channel_announcements(self, msg_payloads, *, trusted=True):
|
2020-03-12 01:44:42 +01:00
|
|
|
# note: signatures have already been verified.
|
2019-06-22 09:47:08 +02:00
|
|
|
if type(msg_payloads) is dict:
|
|
|
|
|
msg_payloads = [msg_payloads]
|
2019-06-18 13:49:31 +02:00
|
|
|
added = 0
|
2019-06-22 09:47:08 +02:00
|
|
|
for msg in msg_payloads:
|
2019-09-06 18:09:05 +02:00
|
|
|
short_channel_id = ShortChannelID(msg['short_channel_id'])
|
2019-06-18 13:49:31 +02:00
|
|
|
if short_channel_id in self._channels:
|
2019-06-22 09:47:08 +02:00
|
|
|
continue
|
|
|
|
|
if constants.net.rev_genesis_bytes() != msg['chain_hash']:
|
2023-02-17 11:35:03 +00:00
|
|
|
self.logger.info("ChanAnn has unexpected chain_hash {}".format(msg['chain_hash'].hex()))
|
2019-06-22 09:47:08 +02:00
|
|
|
continue
|
|
|
|
|
try:
|
|
|
|
|
channel_info = ChannelInfo.from_msg(msg)
|
2020-03-25 13:44:39 +01:00
|
|
|
except IncompatibleOrInsaneFeatures as e:
|
|
|
|
|
self.logger.info(f"unknown or insane feature bits: {e!r}")
|
2019-06-22 09:47:08 +02:00
|
|
|
continue
|
2019-12-10 01:14:38 +01:00
|
|
|
if trusted:
|
|
|
|
|
added += 1
|
|
|
|
|
self.add_verified_channel_info(msg)
|
|
|
|
|
else:
|
|
|
|
|
added += self.ca_verifier.add_new_channel_info(short_channel_id, msg)
|
2019-06-22 09:47:08 +02:00
|
|
|
|
2019-06-18 13:49:31 +02:00
|
|
|
self.update_counts()
|
|
|
|
|
self.logger.debug('add_channel_announcement: %d/%d'%(added, len(msg_payloads)))
|
2019-06-22 09:47:08 +02:00
|
|
|
|
2019-12-10 01:14:38 +01:00
|
|
|
def add_verified_channel_info(self, msg: dict, *, capacity_sat: int = None) -> None:
|
|
|
|
|
try:
|
|
|
|
|
channel_info = ChannelInfo.from_msg(msg)
|
2020-03-25 13:44:39 +01:00
|
|
|
except IncompatibleOrInsaneFeatures:
|
2019-12-10 01:14:38 +01:00
|
|
|
return
|
|
|
|
|
channel_info = channel_info._replace(capacity_sat=capacity_sat)
|
2020-02-29 18:32:47 +01:00
|
|
|
with self.lock:
|
|
|
|
|
self._channels[channel_info.short_channel_id] = channel_info
|
|
|
|
|
self._channels_for_node[channel_info.node1_id].add(channel_info.short_channel_id)
|
|
|
|
|
self._channels_for_node[channel_info.node2_id].add(channel_info.short_channel_id)
|
2020-03-02 16:56:15 +01:00
|
|
|
self._update_num_policies_for_chan(channel_info.short_channel_id)
|
2020-02-28 10:34:04 +01:00
|
|
|
if 'raw' in msg:
|
2020-03-03 03:54:36 +01:00
|
|
|
self._db_save_channel(channel_info.short_channel_id, msg['raw'])
|
2019-12-10 01:14:38 +01:00
|
|
|
|
2020-04-24 12:16:21 +02:00
|
|
|
def policy_changed(self, old_policy: Policy, new_policy: Policy, verbose: bool) -> bool:
|
|
|
|
|
changed = False
|
2019-06-22 09:47:08 +02:00
|
|
|
if old_policy.cltv_expiry_delta != new_policy.cltv_expiry_delta:
|
2020-04-24 12:16:21 +02:00
|
|
|
changed |= True
|
|
|
|
|
if verbose:
|
|
|
|
|
self.logger.info(f'cltv_expiry_delta: {old_policy.cltv_expiry_delta} -> {new_policy.cltv_expiry_delta}')
|
2019-06-22 09:47:08 +02:00
|
|
|
if old_policy.htlc_minimum_msat != new_policy.htlc_minimum_msat:
|
2020-04-24 12:16:21 +02:00
|
|
|
changed |= True
|
|
|
|
|
if verbose:
|
|
|
|
|
self.logger.info(f'htlc_minimum_msat: {old_policy.htlc_minimum_msat} -> {new_policy.htlc_minimum_msat}')
|
2019-06-22 09:47:08 +02:00
|
|
|
if old_policy.htlc_maximum_msat != new_policy.htlc_maximum_msat:
|
2020-04-24 12:16:21 +02:00
|
|
|
changed |= True
|
|
|
|
|
if verbose:
|
|
|
|
|
self.logger.info(f'htlc_maximum_msat: {old_policy.htlc_maximum_msat} -> {new_policy.htlc_maximum_msat}')
|
2019-06-22 09:47:08 +02:00
|
|
|
if old_policy.fee_base_msat != new_policy.fee_base_msat:
|
2020-04-24 12:16:21 +02:00
|
|
|
changed |= True
|
|
|
|
|
if verbose:
|
|
|
|
|
self.logger.info(f'fee_base_msat: {old_policy.fee_base_msat} -> {new_policy.fee_base_msat}')
|
2019-06-22 09:47:08 +02:00
|
|
|
if old_policy.fee_proportional_millionths != new_policy.fee_proportional_millionths:
|
2020-04-24 12:16:21 +02:00
|
|
|
changed |= True
|
|
|
|
|
if verbose:
|
|
|
|
|
self.logger.info(f'fee_proportional_millionths: {old_policy.fee_proportional_millionths} -> {new_policy.fee_proportional_millionths}')
|
2019-06-22 09:47:08 +02:00
|
|
|
if old_policy.channel_flags != new_policy.channel_flags:
|
2020-04-24 12:16:21 +02:00
|
|
|
changed |= True
|
|
|
|
|
if verbose:
|
|
|
|
|
self.logger.info(f'channel_flags: {old_policy.channel_flags} -> {new_policy.channel_flags}')
|
2019-08-16 22:31:11 +02:00
|
|
|
if old_policy.message_flags != new_policy.message_flags:
|
2020-04-24 12:16:21 +02:00
|
|
|
changed |= True
|
|
|
|
|
if verbose:
|
|
|
|
|
self.logger.info(f'message_flags: {old_policy.message_flags} -> {new_policy.message_flags}')
|
|
|
|
|
if not changed and verbose:
|
|
|
|
|
self.logger.info(f'policy unchanged: {old_policy.timestamp} -> {new_policy.timestamp}')
|
|
|
|
|
return changed
|
2019-06-22 09:47:08 +02:00
|
|
|
|
2021-03-15 20:21:37 +01:00
|
|
|
def add_channel_update(
|
|
|
|
|
self, payload, *, max_age=None, verify=True, verbose=True) -> UpdateStatus:
|
2020-04-24 11:45:39 +02:00
|
|
|
now = int(time.time())
|
|
|
|
|
short_channel_id = ShortChannelID(payload['short_channel_id'])
|
|
|
|
|
timestamp = payload['timestamp']
|
|
|
|
|
if max_age and now - timestamp > max_age:
|
|
|
|
|
return UpdateStatus.EXPIRED
|
2020-05-10 12:16:16 +02:00
|
|
|
if timestamp - now > 60:
|
|
|
|
|
return UpdateStatus.DEPRECATED
|
2020-04-24 11:45:39 +02:00
|
|
|
channel_info = self._channels.get(short_channel_id)
|
|
|
|
|
if not channel_info:
|
|
|
|
|
return UpdateStatus.ORPHANED
|
|
|
|
|
flags = int.from_bytes(payload['channel_flags'], 'big')
|
|
|
|
|
direction = flags & FLAG_DIRECTION
|
|
|
|
|
start_node = channel_info.node1_id if direction == 0 else channel_info.node2_id
|
|
|
|
|
payload['start_node'] = start_node
|
|
|
|
|
# compare updates to existing database entries
|
|
|
|
|
short_channel_id = ShortChannelID(payload['short_channel_id'])
|
|
|
|
|
key = (start_node, short_channel_id)
|
|
|
|
|
old_policy = self._policies.get(key)
|
2020-05-10 12:16:16 +02:00
|
|
|
if old_policy and timestamp <= old_policy.timestamp + 60:
|
2020-04-24 11:45:39 +02:00
|
|
|
return UpdateStatus.DEPRECATED
|
|
|
|
|
if verify:
|
|
|
|
|
self.verify_channel_update(payload)
|
|
|
|
|
policy = Policy.from_msg(payload)
|
|
|
|
|
with self.lock:
|
|
|
|
|
self._policies[key] = policy
|
|
|
|
|
self._update_num_policies_for_chan(short_channel_id)
|
|
|
|
|
if 'raw' in payload:
|
|
|
|
|
self._db_save_policy(policy.key, payload['raw'])
|
2020-04-24 12:16:21 +02:00
|
|
|
if old_policy and not self.policy_changed(old_policy, policy, verbose):
|
|
|
|
|
return UpdateStatus.UNCHANGED
|
|
|
|
|
else:
|
|
|
|
|
return UpdateStatus.GOOD
|
2020-04-24 11:45:39 +02:00
|
|
|
|
|
|
|
|
def add_channel_updates(self, payloads, max_age=None) -> CategorizedChannelUpdates:
|
2019-08-16 22:24:26 +02:00
|
|
|
orphaned = []
|
|
|
|
|
expired = []
|
|
|
|
|
deprecated = []
|
2020-04-24 12:16:21 +02:00
|
|
|
unchanged = []
|
2019-08-16 22:24:26 +02:00
|
|
|
good = []
|
2019-06-22 09:47:08 +02:00
|
|
|
for payload in payloads:
|
2021-03-15 20:21:37 +01:00
|
|
|
r = self.add_channel_update(payload, max_age=max_age, verbose=False, verify=True)
|
2020-04-24 11:45:39 +02:00
|
|
|
if r == UpdateStatus.ORPHANED:
|
2019-08-16 22:24:26 +02:00
|
|
|
orphaned.append(payload)
|
2020-04-24 11:45:39 +02:00
|
|
|
elif r == UpdateStatus.EXPIRED:
|
|
|
|
|
expired.append(payload)
|
|
|
|
|
elif r == UpdateStatus.DEPRECATED:
|
2019-08-16 22:24:26 +02:00
|
|
|
deprecated.append(payload)
|
2020-04-24 12:16:21 +02:00
|
|
|
elif r == UpdateStatus.UNCHANGED:
|
|
|
|
|
unchanged.append(payload)
|
2020-04-24 11:45:39 +02:00
|
|
|
elif r == UpdateStatus.GOOD:
|
|
|
|
|
good.append(payload)
|
2019-06-18 13:49:31 +02:00
|
|
|
self.update_counts()
|
2019-08-16 22:24:26 +02:00
|
|
|
return CategorizedChannelUpdates(
|
|
|
|
|
orphaned=orphaned,
|
|
|
|
|
expired=expired,
|
|
|
|
|
deprecated=deprecated,
|
2020-04-24 12:16:21 +02:00
|
|
|
unchanged=unchanged,
|
2020-04-24 11:45:39 +02:00
|
|
|
good=good)
|
2019-06-22 09:47:08 +02:00
|
|
|
|
2020-02-20 11:26:19 +01:00
|
|
|
|
2019-06-27 09:03:34 +02:00
|
|
|
def create_database(self):
|
|
|
|
|
c = self.conn.cursor()
|
|
|
|
|
c.execute(create_node_info)
|
|
|
|
|
c.execute(create_address)
|
|
|
|
|
c.execute(create_policy)
|
|
|
|
|
c.execute(create_channel_info)
|
|
|
|
|
self.conn.commit()
|
|
|
|
|
|
2019-06-22 09:47:08 +02:00
|
|
|
@sql
|
2020-03-03 03:54:36 +01:00
|
|
|
def _db_save_policy(self, key: bytes, msg: bytes):
|
|
|
|
|
# 'msg' is a 'channel_update' message
|
2019-06-27 09:03:34 +02:00
|
|
|
c = self.conn.cursor()
|
2020-02-28 09:24:10 +01:00
|
|
|
c.execute("""REPLACE INTO policy (key, msg) VALUES (?,?)""", [key, msg])
|
2019-06-22 09:47:08 +02:00
|
|
|
|
|
|
|
|
@sql
|
2020-03-03 03:54:36 +01:00
|
|
|
def _db_delete_policy(self, node_id: bytes, short_channel_id: ShortChannelID):
|
2019-07-11 13:50:21 +02:00
|
|
|
key = short_channel_id + node_id
|
2019-06-27 09:03:34 +02:00
|
|
|
c = self.conn.cursor()
|
|
|
|
|
c.execute("""DELETE FROM policy WHERE key=?""", (key,))
|
2019-06-18 13:49:31 +02:00
|
|
|
|
|
|
|
|
@sql
|
2020-03-03 03:54:36 +01:00
|
|
|
def _db_save_channel(self, short_channel_id: ShortChannelID, msg: bytes):
|
|
|
|
|
# 'msg' is a 'channel_announcement' message
|
2019-06-27 09:03:34 +02:00
|
|
|
c = self.conn.cursor()
|
2020-02-28 09:24:10 +01:00
|
|
|
c.execute("REPLACE INTO channel_info (short_channel_id, msg) VALUES (?,?)", [short_channel_id, msg])
|
2019-06-27 09:03:34 +02:00
|
|
|
|
2019-07-11 13:50:21 +02:00
|
|
|
@sql
|
2020-03-03 03:54:36 +01:00
|
|
|
def _db_delete_channel(self, short_channel_id: ShortChannelID):
|
2019-07-11 13:50:21 +02:00
|
|
|
c = self.conn.cursor()
|
|
|
|
|
c.execute("""DELETE FROM channel_info WHERE short_channel_id=?""", (short_channel_id,))
|
|
|
|
|
|
2019-06-27 09:03:34 +02:00
|
|
|
@sql
|
2020-03-03 03:54:36 +01:00
|
|
|
def _db_save_node_info(self, node_id: bytes, msg: bytes):
|
|
|
|
|
# 'msg' is a 'node_announcement' message
|
2019-06-27 09:03:34 +02:00
|
|
|
c = self.conn.cursor()
|
2020-02-28 09:24:10 +01:00
|
|
|
c.execute("REPLACE INTO node_info (node_id, msg) VALUES (?,?)", [node_id, msg])
|
2019-06-27 09:03:34 +02:00
|
|
|
|
|
|
|
|
@sql
|
2020-03-03 03:54:36 +01:00
|
|
|
def _db_save_node_address(self, peer: LNPeerAddr, timestamp: int):
|
2019-06-27 09:03:34 +02:00
|
|
|
c = self.conn.cursor()
|
2020-03-03 03:54:36 +01:00
|
|
|
c.execute("REPLACE INTO address (node_id, host, port, timestamp) VALUES (?,?,?,?)",
|
|
|
|
|
(peer.pubkey, peer.host, peer.port, timestamp))
|
2019-06-27 09:03:34 +02:00
|
|
|
|
|
|
|
|
@sql
|
2020-03-03 03:54:36 +01:00
|
|
|
def _db_save_node_addresses(self, node_addresses: Sequence[LNPeerAddr]):
|
2019-06-27 09:03:34 +02:00
|
|
|
c = self.conn.cursor()
|
|
|
|
|
for addr in node_addresses:
|
2020-03-03 02:15:32 +01:00
|
|
|
c.execute("SELECT * FROM address WHERE node_id=? AND host=? AND port=?", (addr.pubkey, addr.host, addr.port))
|
2019-06-27 09:03:34 +02:00
|
|
|
r = c.fetchall()
|
|
|
|
|
if r == []:
|
2020-03-03 02:15:32 +01:00
|
|
|
c.execute("INSERT INTO address (node_id, host, port, timestamp) VALUES (?,?,?,?)", (addr.pubkey, addr.host, addr.port, 0))
|
2019-06-18 13:49:31 +02:00
|
|
|
|
2021-03-15 20:21:37 +01:00
|
|
|
@classmethod
|
2021-03-16 19:07:31 +01:00
|
|
|
def verify_channel_update(cls, payload, *, start_node: bytes = None) -> None:
|
2019-06-18 13:49:31 +02:00
|
|
|
short_channel_id = payload['short_channel_id']
|
2019-09-06 18:09:05 +02:00
|
|
|
short_channel_id = ShortChannelID(short_channel_id)
|
2019-06-18 13:49:31 +02:00
|
|
|
if constants.net.rev_genesis_bytes() != payload['chain_hash']:
|
2021-03-15 20:21:37 +01:00
|
|
|
raise InvalidGossipMsg('wrong chain hash')
|
2021-03-16 19:07:31 +01:00
|
|
|
start_node = payload.get('start_node', None) or start_node
|
|
|
|
|
assert start_node is not None
|
|
|
|
|
if not verify_sig_for_channel_update(payload, start_node):
|
2021-03-15 20:21:37 +01:00
|
|
|
raise InvalidGossipMsg(f'failed verifying channel update for {short_channel_id}')
|
|
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
|
def verify_channel_announcement(cls, payload) -> None:
|
|
|
|
|
h = sha256d(payload['raw'][2+256:])
|
|
|
|
|
pubkeys = [payload['node_id_1'], payload['node_id_2'], payload['bitcoin_key_1'], payload['bitcoin_key_2']]
|
|
|
|
|
sigs = [payload['node_signature_1'], payload['node_signature_2'], payload['bitcoin_signature_1'], payload['bitcoin_signature_2']]
|
|
|
|
|
for pubkey, sig in zip(pubkeys, sigs):
|
|
|
|
|
if not ecc.verify_signature(pubkey, sig, h):
|
|
|
|
|
raise InvalidGossipMsg('signature failed')
|
|
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
|
def verify_node_announcement(cls, payload) -> None:
|
|
|
|
|
pubkey = payload['node_id']
|
|
|
|
|
signature = payload['signature']
|
|
|
|
|
h = sha256d(payload['raw'][66:])
|
|
|
|
|
if not ecc.verify_signature(pubkey, signature, h):
|
|
|
|
|
raise InvalidGossipMsg('signature failed')
|
|
|
|
|
|
|
|
|
|
def add_node_announcements(self, msg_payloads):
|
2020-03-12 01:44:42 +01:00
|
|
|
# note: signatures have already been verified.
|
2019-06-22 09:47:08 +02:00
|
|
|
if type(msg_payloads) is dict:
|
|
|
|
|
msg_payloads = [msg_payloads]
|
|
|
|
|
new_nodes = {}
|
|
|
|
|
for msg_payload in msg_payloads:
|
|
|
|
|
try:
|
|
|
|
|
node_info, node_addresses = NodeInfo.from_msg(msg_payload)
|
2020-03-25 13:44:39 +01:00
|
|
|
except IncompatibleOrInsaneFeatures:
|
2019-06-22 09:47:08 +02:00
|
|
|
continue
|
|
|
|
|
node_id = node_info.node_id
|
|
|
|
|
# Ignore node if it has no associated channel (DoS protection)
|
2019-06-18 13:49:31 +02:00
|
|
|
if node_id not in self._channels_for_node:
|
2019-06-22 09:47:08 +02:00
|
|
|
#self.logger.info('ignoring orphan node_announcement')
|
|
|
|
|
continue
|
2019-06-18 13:49:31 +02:00
|
|
|
node = self._nodes.get(node_id)
|
2019-06-22 09:47:08 +02:00
|
|
|
if node and node.timestamp >= node_info.timestamp:
|
|
|
|
|
continue
|
|
|
|
|
node = new_nodes.get(node_id)
|
|
|
|
|
if node and node.timestamp >= node_info.timestamp:
|
|
|
|
|
continue
|
2019-06-18 13:49:31 +02:00
|
|
|
# save
|
2020-02-29 18:32:47 +01:00
|
|
|
with self.lock:
|
|
|
|
|
self._nodes[node_id] = node_info
|
2020-02-28 10:34:04 +01:00
|
|
|
if 'raw' in msg_payload:
|
2020-03-03 03:54:36 +01:00
|
|
|
self._db_save_node_info(node_id, msg_payload['raw'])
|
2020-02-29 18:32:47 +01:00
|
|
|
with self.lock:
|
|
|
|
|
for addr in node_addresses:
|
2021-01-09 19:56:05 +01:00
|
|
|
net_addr = NetAddress(addr.host, addr.port)
|
|
|
|
|
self._addresses[node_id][net_addr] = self._addresses[node_id].get(net_addr) or 0
|
2020-03-03 03:54:36 +01:00
|
|
|
self._db_save_node_addresses(node_addresses)
|
2019-06-18 13:49:31 +02:00
|
|
|
|
2019-06-22 09:47:08 +02:00
|
|
|
self.logger.debug("on_node_announcement: %d/%d"%(len(new_nodes), len(msg_payloads)))
|
2019-06-18 13:49:31 +02:00
|
|
|
self.update_counts()
|
|
|
|
|
|
2020-03-02 16:56:15 +01:00
|
|
|
def get_old_policies(self, delta) -> Sequence[Tuple[bytes, ShortChannelID]]:
|
2020-02-29 18:32:47 +01:00
|
|
|
with self.lock:
|
|
|
|
|
_policies = self._policies.copy()
|
2019-06-18 13:49:31 +02:00
|
|
|
now = int(time.time())
|
2020-02-29 18:32:47 +01:00
|
|
|
return list(k for k, v in _policies.items() if v.timestamp <= now - delta)
|
2019-06-22 09:47:08 +02:00
|
|
|
|
|
|
|
|
def prune_old_policies(self, delta):
|
2020-03-02 16:56:15 +01:00
|
|
|
old_policies = self.get_old_policies(delta)
|
|
|
|
|
if old_policies:
|
|
|
|
|
for key in old_policies:
|
|
|
|
|
node_id, scid = key
|
2020-02-29 18:32:47 +01:00
|
|
|
with self.lock:
|
2020-03-02 16:56:15 +01:00
|
|
|
self._policies.pop(key)
|
2020-03-03 03:54:36 +01:00
|
|
|
self._db_delete_policy(*key)
|
2020-03-02 16:56:15 +01:00
|
|
|
self._update_num_policies_for_chan(scid)
|
2019-10-12 19:15:51 +02:00
|
|
|
self.update_counts()
|
2020-03-02 16:56:15 +01:00
|
|
|
self.logger.info(f'Deleting {len(old_policies)} old policies')
|
2019-06-22 09:47:08 +02:00
|
|
|
|
|
|
|
|
def prune_orphaned_channels(self):
|
2020-03-02 16:56:15 +01:00
|
|
|
with self.lock:
|
|
|
|
|
orphaned_chans = self._chans_with_0_policies.copy()
|
|
|
|
|
if orphaned_chans:
|
|
|
|
|
for short_channel_id in orphaned_chans:
|
2019-10-12 19:15:51 +02:00
|
|
|
self.remove_channel(short_channel_id)
|
|
|
|
|
self.update_counts()
|
2020-03-02 16:56:15 +01:00
|
|
|
self.logger.info(f'Deleting {len(orphaned_chans)} orphaned channels')
|
2019-06-22 09:47:08 +02:00
|
|
|
|
2023-08-15 16:33:24 +00:00
|
|
|
def _get_channel_update_for_private_channel(
|
|
|
|
|
self,
|
|
|
|
|
start_node_id: bytes,
|
|
|
|
|
short_channel_id: ShortChannelID,
|
|
|
|
|
*,
|
|
|
|
|
now: int = None, # unix ts
|
|
|
|
|
) -> Optional[dict]:
|
|
|
|
|
if now is None:
|
|
|
|
|
now = int(time.time())
|
|
|
|
|
key = (start_node_id, short_channel_id)
|
|
|
|
|
chan_upd_dict, cache_expiration = self._channel_updates_for_private_channels.get(key, (None, 0))
|
|
|
|
|
if cache_expiration < now:
|
|
|
|
|
chan_upd_dict = None # already expired
|
|
|
|
|
# TODO rm expired entries from cache (note: perf vs thread-safety)
|
|
|
|
|
return chan_upd_dict
|
|
|
|
|
|
|
|
|
|
def add_channel_update_for_private_channel(
|
|
|
|
|
self,
|
|
|
|
|
msg_payload: dict,
|
|
|
|
|
start_node_id: bytes,
|
|
|
|
|
*,
|
|
|
|
|
cache_ttl: int = None, # seconds
|
|
|
|
|
) -> bool:
|
2021-03-01 21:26:05 +01:00
|
|
|
"""Returns True iff the channel update was successfully added and it was different than
|
|
|
|
|
what we had before (if any).
|
|
|
|
|
"""
|
2019-06-22 09:47:08 +02:00
|
|
|
if not verify_sig_for_channel_update(msg_payload, start_node_id):
|
2021-03-01 21:26:05 +01:00
|
|
|
return False # ignore
|
2023-08-15 16:33:24 +00:00
|
|
|
now = int(time.time())
|
2019-09-06 18:09:05 +02:00
|
|
|
short_channel_id = ShortChannelID(msg_payload['short_channel_id'])
|
2019-06-22 09:47:08 +02:00
|
|
|
msg_payload['start_node'] = start_node_id
|
2023-08-15 16:33:24 +00:00
|
|
|
prev_chanupd = self._get_channel_update_for_private_channel(start_node_id, short_channel_id, now=now)
|
2021-03-01 21:26:05 +01:00
|
|
|
if prev_chanupd == msg_payload:
|
|
|
|
|
return False
|
2023-08-15 16:33:24 +00:00
|
|
|
if cache_ttl is None:
|
|
|
|
|
cache_ttl = self.PRIVATE_CHAN_UPD_CACHE_TTL_NORMAL
|
|
|
|
|
cache_expiration = now + cache_ttl
|
|
|
|
|
key = (start_node_id, short_channel_id)
|
|
|
|
|
with self.lock:
|
|
|
|
|
self._channel_updates_for_private_channels[key] = msg_payload, cache_expiration
|
2021-03-01 21:26:05 +01:00
|
|
|
return True
|
2019-06-22 09:47:08 +02:00
|
|
|
|
2019-09-06 18:09:05 +02:00
|
|
|
def remove_channel(self, short_channel_id: ShortChannelID):
|
2020-03-02 16:56:15 +01:00
|
|
|
# FIXME what about rm-ing policies?
|
2020-02-29 18:32:47 +01:00
|
|
|
with self.lock:
|
|
|
|
|
channel_info = self._channels.pop(short_channel_id, None)
|
|
|
|
|
if channel_info:
|
|
|
|
|
self._channels_for_node[channel_info.node1_id].remove(channel_info.short_channel_id)
|
|
|
|
|
self._channels_for_node[channel_info.node2_id].remove(channel_info.short_channel_id)
|
2020-03-02 16:56:15 +01:00
|
|
|
self._update_num_policies_for_chan(short_channel_id)
|
2019-10-23 08:20:15 +02:00
|
|
|
# delete from database
|
2020-03-03 03:54:36 +01:00
|
|
|
self._db_delete_channel(short_channel_id)
|
2019-06-22 09:47:08 +02:00
|
|
|
|
2021-01-09 19:56:05 +01:00
|
|
|
def get_node_addresses(self, node_id: bytes) -> Sequence[Tuple[str, int, int]]:
|
|
|
|
|
"""Returns list of (host, port, timestamp)."""
|
|
|
|
|
addr_to_ts = self._addresses.get(node_id)
|
|
|
|
|
if not addr_to_ts:
|
|
|
|
|
return []
|
|
|
|
|
return [(str(net_addr.host), net_addr.port, ts)
|
|
|
|
|
for net_addr, ts in addr_to_ts.items()]
|
2019-06-22 09:47:08 +02:00
|
|
|
|
|
|
|
|
@sql
|
|
|
|
|
@profiler
|
|
|
|
|
def load_data(self):
|
2020-10-08 06:36:02 +02:00
|
|
|
if self.data_loaded.is_set():
|
|
|
|
|
return
|
2020-03-03 04:04:30 +01:00
|
|
|
# Note: this method takes several seconds... mostly due to lnmsg.decode_msg being slow.
|
2019-06-27 09:03:34 +02:00
|
|
|
c = self.conn.cursor()
|
|
|
|
|
c.execute("""SELECT * FROM address""")
|
|
|
|
|
for x in c:
|
|
|
|
|
node_id, host, port, timestamp = x
|
2021-01-09 19:56:05 +01:00
|
|
|
try:
|
|
|
|
|
net_addr = NetAddress(host, port)
|
|
|
|
|
except Exception:
|
|
|
|
|
continue
|
|
|
|
|
self._addresses[node_id][net_addr] = int(timestamp or 0)
|
2020-03-02 02:19:13 +01:00
|
|
|
def newest_ts_for_node_id(node_id):
|
|
|
|
|
newest_ts = 0
|
2021-01-09 19:56:05 +01:00
|
|
|
for addr, ts in self._addresses[node_id].items():
|
|
|
|
|
newest_ts = max(newest_ts, ts)
|
2020-03-02 02:19:13 +01:00
|
|
|
return newest_ts
|
|
|
|
|
sorted_node_ids = sorted(self._addresses.keys(), key=newest_ts_for_node_id, reverse=True)
|
|
|
|
|
self._recent_peers = sorted_node_ids[:self.NUM_MAX_RECENT_PEERS]
|
2019-06-27 09:03:34 +02:00
|
|
|
c.execute("""SELECT * FROM channel_info""")
|
2020-02-28 09:24:10 +01:00
|
|
|
for short_channel_id, msg in c:
|
2020-03-25 13:44:39 +01:00
|
|
|
try:
|
|
|
|
|
ci = ChannelInfo.from_raw_msg(msg)
|
|
|
|
|
except IncompatibleOrInsaneFeatures:
|
|
|
|
|
continue
|
2023-01-13 15:47:02 +01:00
|
|
|
except FailedToParseMsg:
|
|
|
|
|
continue
|
2020-03-02 16:56:15 +01:00
|
|
|
self._channels[ShortChannelID.normalize(short_channel_id)] = ci
|
2019-08-11 14:51:00 +02:00
|
|
|
c.execute("""SELECT * FROM node_info""")
|
2020-02-28 09:24:10 +01:00
|
|
|
for node_id, msg in c:
|
2020-03-25 13:44:39 +01:00
|
|
|
try:
|
|
|
|
|
node_info, node_addresses = NodeInfo.from_raw_msg(msg)
|
|
|
|
|
except IncompatibleOrInsaneFeatures:
|
|
|
|
|
continue
|
2023-01-13 15:47:02 +01:00
|
|
|
except FailedToParseMsg:
|
|
|
|
|
continue
|
2020-02-28 09:24:10 +01:00
|
|
|
# don't load node_addresses because they dont have timestamps
|
|
|
|
|
self._nodes[node_id] = node_info
|
2019-06-27 09:03:34 +02:00
|
|
|
c.execute("""SELECT * FROM policy""")
|
2020-02-28 09:24:10 +01:00
|
|
|
for key, msg in c:
|
2023-01-13 15:47:02 +01:00
|
|
|
try:
|
|
|
|
|
p = Policy.from_raw_msg(key, msg)
|
|
|
|
|
except FailedToParseMsg:
|
|
|
|
|
continue
|
2019-06-18 13:49:31 +02:00
|
|
|
self._policies[(p.start_node, p.short_channel_id)] = p
|
2019-06-22 09:47:08 +02:00
|
|
|
for channel_info in self._channels.values():
|
2019-06-18 13:49:31 +02:00
|
|
|
self._channels_for_node[channel_info.node1_id].add(channel_info.short_channel_id)
|
|
|
|
|
self._channels_for_node[channel_info.node2_id].add(channel_info.short_channel_id)
|
2020-03-02 16:56:15 +01:00
|
|
|
self._update_num_policies_for_chan(channel_info.short_channel_id)
|
2021-03-23 17:30:40 +01:00
|
|
|
self.logger.info(f'data loaded. {len(self._channels)} chans. {len(self._policies)} policies. '
|
|
|
|
|
f'{len(self._channels_for_node)} nodes.')
|
2019-06-18 13:49:31 +02:00
|
|
|
self.update_counts()
|
2020-02-29 20:02:45 +01:00
|
|
|
(nchans_with_0p, nchans_with_1p, nchans_with_2p) = self.get_num_channels_partitioned_by_policy_count()
|
|
|
|
|
self.logger.info(f'num_channels_partitioned_by_policy_count. '
|
|
|
|
|
f'0p: {nchans_with_0p}, 1p: {nchans_with_1p}, 2p: {nchans_with_2p}')
|
2021-03-15 17:54:13 +01:00
|
|
|
self.asyncio_loop.call_soon_threadsafe(self.data_loaded.set)
|
2020-05-29 19:23:29 +02:00
|
|
|
util.trigger_callback('gossip_db_loaded')
|
2019-07-13 08:46:17 +02:00
|
|
|
|
2020-03-02 16:56:15 +01:00
|
|
|
def _update_num_policies_for_chan(self, short_channel_id: ShortChannelID) -> None:
|
|
|
|
|
channel_info = self.get_channel_info(short_channel_id)
|
|
|
|
|
if channel_info is None:
|
|
|
|
|
with self.lock:
|
|
|
|
|
self._chans_with_0_policies.discard(short_channel_id)
|
|
|
|
|
self._chans_with_1_policies.discard(short_channel_id)
|
|
|
|
|
self._chans_with_2_policies.discard(short_channel_id)
|
|
|
|
|
return
|
|
|
|
|
p1 = self.get_policy_for_node(short_channel_id, channel_info.node1_id)
|
|
|
|
|
p2 = self.get_policy_for_node(short_channel_id, channel_info.node2_id)
|
2020-02-29 18:32:47 +01:00
|
|
|
with self.lock:
|
2020-03-02 16:56:15 +01:00
|
|
|
self._chans_with_0_policies.discard(short_channel_id)
|
|
|
|
|
self._chans_with_1_policies.discard(short_channel_id)
|
|
|
|
|
self._chans_with_2_policies.discard(short_channel_id)
|
2020-02-29 20:02:45 +01:00
|
|
|
if p1 is not None and p2 is not None:
|
2020-03-02 16:56:15 +01:00
|
|
|
self._chans_with_2_policies.add(short_channel_id)
|
2020-02-29 20:02:45 +01:00
|
|
|
elif p1 is None and p2 is None:
|
2020-03-02 16:56:15 +01:00
|
|
|
self._chans_with_0_policies.add(short_channel_id)
|
2020-02-29 20:02:45 +01:00
|
|
|
else:
|
2020-03-02 16:56:15 +01:00
|
|
|
self._chans_with_1_policies.add(short_channel_id)
|
|
|
|
|
|
|
|
|
|
def get_num_channels_partitioned_by_policy_count(self) -> Tuple[int, int, int]:
|
|
|
|
|
nchans_with_0p = len(self._chans_with_0_policies)
|
|
|
|
|
nchans_with_1p = len(self._chans_with_1_policies)
|
|
|
|
|
nchans_with_2p = len(self._chans_with_2_policies)
|
2020-02-29 20:02:45 +01:00
|
|
|
return nchans_with_0p, nchans_with_1p, nchans_with_2p
|
2019-06-22 09:47:08 +02:00
|
|
|
|
2021-03-02 18:00:31 +01:00
|
|
|
def get_policy_for_node(
|
|
|
|
|
self,
|
2023-08-15 16:33:24 +00:00
|
|
|
short_channel_id: ShortChannelID,
|
2021-03-02 18:00:31 +01:00
|
|
|
node_id: bytes,
|
|
|
|
|
*,
|
|
|
|
|
my_channels: Dict[ShortChannelID, 'Channel'] = None,
|
|
|
|
|
private_route_edges: Dict[ShortChannelID, 'RouteEdge'] = None,
|
2023-08-15 16:33:24 +00:00
|
|
|
now: int = None, # unix ts
|
2021-03-02 18:00:31 +01:00
|
|
|
) -> Optional['Policy']:
|
2020-02-17 20:38:41 +01:00
|
|
|
channel_info = self.get_channel_info(short_channel_id)
|
|
|
|
|
if channel_info is not None: # publicly announced channel
|
|
|
|
|
policy = self._policies.get((node_id, short_channel_id))
|
|
|
|
|
if policy:
|
|
|
|
|
return policy
|
2023-08-15 16:33:24 +00:00
|
|
|
elif chan_upd_dict := self._get_channel_update_for_private_channel(node_id, short_channel_id, now=now):
|
|
|
|
|
return Policy.from_msg(chan_upd_dict)
|
2020-02-17 20:38:41 +01:00
|
|
|
# check if it's one of our own channels
|
2020-11-23 14:57:14 +01:00
|
|
|
if my_channels:
|
2021-03-02 18:00:31 +01:00
|
|
|
policy = get_mychannel_policy(short_channel_id, node_id, my_channels)
|
|
|
|
|
if policy:
|
|
|
|
|
return policy
|
|
|
|
|
if private_route_edges:
|
|
|
|
|
route_edge = private_route_edges.get(short_channel_id, None)
|
|
|
|
|
if route_edge:
|
|
|
|
|
return Policy.from_route_edge(route_edge)
|
|
|
|
|
|
|
|
|
|
def get_channel_info(
|
|
|
|
|
self,
|
|
|
|
|
short_channel_id: ShortChannelID,
|
|
|
|
|
*,
|
|
|
|
|
my_channels: Dict[ShortChannelID, 'Channel'] = None,
|
|
|
|
|
private_route_edges: Dict[ShortChannelID, 'RouteEdge'] = None,
|
|
|
|
|
) -> Optional[ChannelInfo]:
|
2020-02-17 20:38:41 +01:00
|
|
|
ret = self._channels.get(short_channel_id)
|
|
|
|
|
if ret:
|
|
|
|
|
return ret
|
|
|
|
|
# check if it's one of our own channels
|
2020-11-23 14:57:14 +01:00
|
|
|
if my_channels:
|
2021-03-02 18:00:31 +01:00
|
|
|
channel_info = get_mychannel_info(short_channel_id, my_channels)
|
|
|
|
|
if channel_info:
|
|
|
|
|
return channel_info
|
|
|
|
|
if private_route_edges:
|
|
|
|
|
route_edge = private_route_edges.get(short_channel_id)
|
|
|
|
|
if route_edge:
|
|
|
|
|
return ChannelInfo.from_route_edge(route_edge)
|
|
|
|
|
|
|
|
|
|
def get_channels_for_node(
|
|
|
|
|
self,
|
|
|
|
|
node_id: bytes,
|
|
|
|
|
*,
|
|
|
|
|
my_channels: Dict[ShortChannelID, 'Channel'] = None,
|
|
|
|
|
private_route_edges: Dict[ShortChannelID, 'RouteEdge'] = None,
|
2021-07-23 13:12:20 +02:00
|
|
|
) -> Set[ShortChannelID]:
|
2020-02-17 20:38:41 +01:00
|
|
|
"""Returns the set of short channel IDs where node_id is one of the channel participants."""
|
2020-03-10 15:11:16 +01:00
|
|
|
if not self.data_loaded.is_set():
|
2023-05-16 15:08:26 +02:00
|
|
|
raise ChannelDBNotLoaded("channelDB data not loaded yet!")
|
2020-02-17 20:38:41 +01:00
|
|
|
relevant_channels = self._channels_for_node.get(node_id) or set()
|
|
|
|
|
relevant_channels = set(relevant_channels) # copy
|
|
|
|
|
# add our own channels # TODO maybe slow?
|
2021-03-02 18:00:31 +01:00
|
|
|
if my_channels:
|
|
|
|
|
for chan in my_channels.values():
|
|
|
|
|
if node_id in (chan.node_id, chan.get_local_pubkey()):
|
|
|
|
|
relevant_channels.add(chan.short_channel_id)
|
|
|
|
|
# add private channels # TODO maybe slow?
|
|
|
|
|
if private_route_edges:
|
|
|
|
|
for route_edge in private_route_edges.values():
|
|
|
|
|
if node_id in (route_edge.start_node, route_edge.end_node):
|
|
|
|
|
relevant_channels.add(route_edge.short_channel_id)
|
2020-02-17 20:38:41 +01:00
|
|
|
return relevant_channels
|
2020-03-02 04:31:21 +01:00
|
|
|
|
2020-05-06 10:56:33 +02:00
|
|
|
def get_endnodes_for_chan(self, short_channel_id: ShortChannelID, *,
|
|
|
|
|
my_channels: Dict[ShortChannelID, 'Channel'] = None) -> Optional[Tuple[bytes, bytes]]:
|
|
|
|
|
channel_info = self.get_channel_info(short_channel_id)
|
|
|
|
|
if channel_info is not None: # publicly announced channel
|
|
|
|
|
return channel_info.node1_id, channel_info.node2_id
|
|
|
|
|
# check if it's one of our own channels
|
|
|
|
|
if not my_channels:
|
|
|
|
|
return
|
|
|
|
|
chan = my_channels.get(short_channel_id) # type: Optional[Channel]
|
|
|
|
|
if not chan:
|
|
|
|
|
return
|
|
|
|
|
return chan.get_local_pubkey(), chan.node_id
|
|
|
|
|
|
2020-03-02 04:31:21 +01:00
|
|
|
def get_node_info_for_node_id(self, node_id: bytes) -> Optional['NodeInfo']:
|
|
|
|
|
return self._nodes.get(node_id)
|
2020-09-22 08:47:58 +02:00
|
|
|
|
2020-10-28 08:17:25 +01:00
|
|
|
def get_node_infos(self) -> Dict[bytes, NodeInfo]:
|
|
|
|
|
with self.lock:
|
2020-11-17 07:51:45 +01:00
|
|
|
return self._nodes.copy()
|
2020-10-28 08:17:25 +01:00
|
|
|
|
|
|
|
|
def get_node_policies(self) -> Dict[Tuple[bytes, ShortChannelID], Policy]:
|
|
|
|
|
with self.lock:
|
2020-11-17 07:51:45 +01:00
|
|
|
return self._policies.copy()
|
2020-10-28 08:17:25 +01:00
|
|
|
|
2021-03-09 09:55:55 +01:00
|
|
|
def get_node_by_prefix(self, prefix):
|
|
|
|
|
with self.lock:
|
|
|
|
|
for k in self._addresses.keys():
|
|
|
|
|
if k.startswith(prefix):
|
|
|
|
|
return k
|
|
|
|
|
raise Exception('node not found')
|
|
|
|
|
|
2020-09-22 08:47:58 +02:00
|
|
|
def to_dict(self) -> dict:
|
|
|
|
|
""" Generates a graph representation in terms of a dictionary.
|
|
|
|
|
|
|
|
|
|
The dictionary contains only native python types and can be encoded
|
|
|
|
|
to json.
|
|
|
|
|
"""
|
|
|
|
|
with self.lock:
|
|
|
|
|
graph = {'nodes': [], 'channels': []}
|
|
|
|
|
|
|
|
|
|
# gather nodes
|
|
|
|
|
for pk, nodeinfo in self._nodes.items():
|
|
|
|
|
# use _asdict() to convert NamedTuples to json encodable dicts
|
|
|
|
|
graph['nodes'].append(
|
|
|
|
|
nodeinfo._asdict(),
|
|
|
|
|
)
|
2021-01-09 19:56:05 +01:00
|
|
|
graph['nodes'][-1]['addresses'] = [
|
|
|
|
|
{'host': str(addr.host), 'port': addr.port, 'timestamp': ts}
|
|
|
|
|
for addr, ts in self._addresses[pk].items()
|
|
|
|
|
]
|
2020-09-22 08:47:58 +02:00
|
|
|
|
|
|
|
|
# gather channels
|
|
|
|
|
for cid, channelinfo in self._channels.items():
|
|
|
|
|
graph['channels'].append(
|
|
|
|
|
channelinfo._asdict(),
|
|
|
|
|
)
|
|
|
|
|
policy1 = self._policies.get(
|
|
|
|
|
(channelinfo.node1_id, channelinfo.short_channel_id))
|
|
|
|
|
policy2 = self._policies.get(
|
|
|
|
|
(channelinfo.node2_id, channelinfo.short_channel_id))
|
|
|
|
|
graph['channels'][-1]['policy1'] = policy1._asdict() if policy1 else None
|
|
|
|
|
graph['channels'][-1]['policy2'] = policy2._asdict() if policy2 else None
|
|
|
|
|
|
|
|
|
|
# need to use json_normalize otherwise json encoding in rpc server fails
|
|
|
|
|
graph = json_normalize(graph)
|
|
|
|
|
return graph
|