2018-10-25 19:34:31 +02:00
# Copyright (C) 2018 The Electrum developers
# Distributed under the MIT software license, see the accompanying
# file LICENCE or http://www.opensource.org/licenses/mit-license.php
2018-04-16 10:24:03 +02:00
import asyncio
2018-04-30 23:34:33 +02:00
import os
2018-04-23 15:11:56 +02:00
from decimal import Decimal
2018-07-13 17:05:04 +02:00
import random
2018-07-27 20:59:04 +02:00
import time
2018-10-22 15:35:57 +02:00
from typing import Optional , Sequence , Tuple , List , Dict , TYPE_CHECKING
2018-07-30 13:51:03 +02:00
import threading
2018-09-27 16:43:33 +02:00
import socket
2018-10-25 00:22:42 +02:00
import json
2018-11-07 17:44:49 +01:00
from datetime import datetime , timezone
2018-11-19 18:09:43 +01:00
from functools import partial
2019-02-27 11:42:09 +01:00
from collections import defaultdict
2019-05-30 21:46:30 +02:00
import concurrent
2018-07-27 20:59:04 +02:00
import dns . resolver
import dns . exception
2018-05-28 09:39:05 +02:00
2018-05-28 18:22:45 +02:00
from . import constants
2018-10-05 15:37:47 +02:00
from . import keystore
2019-10-09 19:23:09 +02:00
from . util import profiler
from . util import PR_UNPAID , PR_EXPIRED , PR_PAID , PR_INFLIGHT , PR_FAILED
2019-08-11 14:47:06 +02:00
from . util import PR_TYPE_LN
2018-10-05 15:37:47 +02:00
from . keystore import BIP32_KeyStore
2018-10-25 23:30:36 +02:00
from . bitcoin import COIN
2018-12-12 18:02:55 +01:00
from . transaction import Transaction
2018-10-25 23:30:36 +02:00
from . crypto import sha256
2019-03-06 16:13:28 +01:00
from . bip32 import BIP32Node
2019-05-02 17:59:11 +02:00
from . util import bh2u , bfh , InvoiceError , resolve_dns_srv , is_ip_address , log_exceptions
2019-09-19 18:50:57 +02:00
from . util import ignore_exceptions , make_aiohttp_session
2019-01-31 16:41:43 +01:00
from . util import timestamp_to_datetime
2019-05-02 17:59:11 +02:00
from . logging import Logger
2019-02-01 20:21:59 +01:00
from . lntransport import LNTransport , LNResponderTransport
2019-09-05 18:31:51 +02:00
from . lnpeer import Peer , LN_P2P_NETWORK_TIMEOUT
2018-06-29 12:33:16 +02:00
from . lnaddr import lnencode , LnAddr , lndecode
2018-06-28 15:50:45 +02:00
from . ecc import der_sig_from_sig_string
2019-09-06 15:08:15 +02:00
from . ecc_fast import is_using_fast_ecc
2019-02-09 10:29:33 +01:00
from . lnchannel import Channel , ChannelJsonEncoder
2019-08-14 21:38:02 +02:00
from . import lnutil
2019-09-06 18:09:05 +02:00
from . lnutil import ( Outpoint , LNPeerAddr ,
2018-09-27 16:43:33 +02:00
get_compressed_pubkey_from_bech32 , extract_nodeid ,
2018-10-05 15:37:47 +02:00
PaymentFailure , split_host_port , ConnStringFormatError ,
2018-10-18 22:56:40 +02:00
generate_keypair , LnKeyFamily , LOCAL , REMOTE ,
2018-10-19 21:47:51 +02:00
UnknownPaymentHash , MIN_FINAL_CLTV_EXPIRY_FOR_INVOICE ,
2019-01-21 21:27:27 +01:00
NUM_MAX_EDGES_IN_PAYMENT_PATH , SENT , RECEIVED , HTLCOwner ,
2019-09-06 18:09:05 +02:00
UpdateAddHtlc , Direction , LnLocalFeatures , format_short_channel_id ,
ShortChannelID )
2019-10-12 18:22:19 +02:00
from . lnonion import OnionFailureCode
from . lnmsg import decode_msg
2018-07-17 21:27:59 +02:00
from . i18n import _
2018-10-19 21:47:51 +02:00
from . lnrouter import RouteEdge , is_route_sane_to_use
2018-11-05 17:23:49 +01:00
from . address_synchronizer import TX_HEIGHT_LOCAL
2019-05-23 16:13:28 +02:00
from . import lnsweep
2019-07-03 08:46:00 +02:00
from . lnwatcher import LNWatcher
2018-05-28 09:39:05 +02:00
2018-10-22 15:35:57 +02:00
if TYPE_CHECKING :
from . network import Network
from . wallet import Abstract_Wallet
2019-09-09 19:38:35 +02:00
from . lnsweep import SweepInfo
2018-10-22 15:35:57 +02:00
2018-07-27 20:59:04 +02:00
NUM_PEERS_TARGET = 4
PEER_RETRY_INTERVAL = 600 # seconds
2018-07-30 13:51:03 +02:00
PEER_RETRY_INTERVAL_FOR_CHANNELS = 30 # seconds
2018-11-27 00:40:55 +01:00
GRAPH_DOWNLOAD_SECONDS = 600
2018-07-27 20:59:04 +02:00
2018-10-14 10:24:06 +02:00
FALLBACK_NODE_LIST_TESTNET = (
2018-07-27 20:59:04 +02:00
LNPeerAddr ( ' ecdsa.net ' , 9735 , bfh ( ' 038370f0e7a03eded3e1d41dc081084a87f0afa1c5b22090b4f3abb391eb15d8ff ' ) ) ,
2019-02-01 20:59:59 +01:00
LNPeerAddr ( ' 148.251.87.112 ' , 9735 , bfh ( ' 021a8bd8d8f1f2e208992a2eb755cdc74d44e66b6a0c924d3a3cce949123b9ce40 ' ) ) , # janus test server
LNPeerAddr ( ' 122.199.61.90 ' , 9735 , bfh ( ' 038863cf8ab91046230f561cd5b386cbff8309fa02e3f0c3ed161a3aeb64a643b9 ' ) ) , # popular node https://1ml.com/testnet/node/038863cf8ab91046230f561cd5b386cbff8309fa02e3f0c3ed161a3aeb64a643b9
2018-07-27 20:59:04 +02:00
)
2019-03-18 11:03:37 +01:00
FALLBACK_NODE_LIST_MAINNET = [
LNPeerAddr ( host = ' 52.168.166.221 ' , port = 9735 , pubkey = b ' \x02 \x14 8+ \xdc \xe7 u \r \xfc \xb8 \x12 m \xf8 \xe2 \xb1 - \xe3 \x85 6 \x90 - \xc3 j \xbc \xeb \xda \xee \xfd \xec \xa1 \xdf \x82 \x84 ' ) ,
LNPeerAddr ( host = ' 35.230.100.60 ' , port = 9735 , pubkey = b ' \x02 ?^5 \x82 qk \xed \x96 \xf6 \xf2 l \xfc \xd8 \x03 ~ \x07 GM { GC \xaf \xdc \x8b \x07 \xe6 \x92 \xdf cFM~ ' ) ,
LNPeerAddr ( host = ' 40.69.71.114 ' , port = 9735 , pubkey = b ' \x02 \x83 \x03 \x18 , \x98 \x85 \xda \x93 \xb3 \xb2 \\ \x96 ! \xd2 , \xf3 Du \xe6 < \x12 9B \xe4 \x02 \xab S \x0c \x05 V \xe6 u ' ) ,
LNPeerAddr ( host = ' 62.210.110.5 ' , port = 9735 , pubkey = b ' \x02 v \xe0 \x9a &u \x92 \xe7 E \x1a \x93 \x9c \x93 , \xf6 \x85 \xf0 uM \xe3 \x82 \xa3 \xca \x85 \xd2 \xfb : \x86 ML6Z \xd5 ' ) ,
LNPeerAddr ( host = ' 34.236.113.58 ' , port = 9735 , pubkey = b ' \x02 \xfa P \xc7 . \xe1 \xe2 \xeb _ \x1b m \x9c 02 \x08 \x0c L \x86 Cs \xc4 \x1d \xfa )f \xaa 4 \xee \xe1 \x05 \x1f \x97 ' ) ,
LNPeerAddr ( host = ' 52.168.166.221 ' , port = 9735 , pubkey = b ' \x02 \x14 8+ \xdc \xe7 u \r \xfc \xb8 \x12 m \xf8 \xe2 \xb1 - \xe3 \x85 6 \x90 - \xc3 j \xbc \xeb \xda \xee \xfd \xec \xa1 \xdf \x82 \x84 ' ) ,
LNPeerAddr ( host = ' 34.236.113.58 ' , port = 9735 , pubkey = b ' \x02 \xfa P \xc7 . \xe1 \xe2 \xeb _ \x1b m \x9c 02 \x08 \x0c L \x86 Cs \xc4 \x1d \xfa )f \xaa 4 \xee \xe1 \x05 \x1f \x97 ' ) ,
]
2018-07-27 20:59:04 +02:00
2018-11-07 17:44:49 +01:00
encoder = ChannelJsonEncoder ( )
2019-09-20 17:15:49 +02:00
from typing import NamedTuple
2019-10-09 20:16:11 +02:00
class PaymentInfo ( NamedTuple ) :
2019-09-20 17:15:49 +02:00
payment_hash : bytes
amount : int
direction : int
status : int
2019-10-09 19:23:09 +02:00
class NoPathFound ( PaymentFailure ) :
pass
2019-05-02 17:59:11 +02:00
class LNWorker ( Logger ) :
2018-05-28 09:39:05 +02:00
2019-04-26 12:48:02 +02:00
def __init__ ( self , xprv ) :
2019-05-02 17:59:11 +02:00
Logger . __init__ ( self )
2019-04-26 12:48:02 +02:00
self . node_keypair = generate_keypair ( keystore . from_xprv ( xprv ) , LnKeyFamily . NODE_KEY , 0 )
self . peers = { } # type: Dict[bytes, Peer] # pubkey -> Peer
2019-06-13 18:52:07 +02:00
# set some feature flags as baseline for both LNWallet and LNGossip
# note that e.g. DATA_LOSS_PROTECT is needed for LNGossip as many peers require it
2019-04-26 12:48:02 +02:00
self . localfeatures = LnLocalFeatures ( 0 )
2019-06-13 18:52:07 +02:00
self . localfeatures | = LnLocalFeatures . OPTION_DATA_LOSS_PROTECT_OPT
2019-04-26 12:48:02 +02:00
2019-09-08 19:25:35 +02:00
def channels_for_peer ( self , node_id ) :
return { }
2019-04-26 12:48:02 +02:00
async def maybe_listen ( self ) :
listen_addr = self . config . get ( ' lightning_listen ' )
if listen_addr :
addr , port = listen_addr . rsplit ( ' : ' , 2 )
if addr [ 0 ] == ' [ ' :
# ipv6
addr = addr [ 1 : - 1 ]
async def cb ( reader , writer ) :
transport = LNResponderTransport ( self . node_keypair . privkey , reader , writer )
try :
node_id = await transport . handshake ( )
except :
2019-05-02 17:59:11 +02:00
self . logger . info ( ' handshake failure from incoming connection ' )
2019-04-26 12:48:02 +02:00
return
peer = Peer ( self , node_id , transport )
self . peers [ node_id ] = peer
await self . network . main_taskgroup . spawn ( peer . main_loop ( ) )
await asyncio . start_server ( cb , addr , int ( port ) )
2019-05-13 22:33:56 +02:00
@log_exceptions
2019-04-26 12:48:02 +02:00
async def main_loop ( self ) :
while True :
await asyncio . sleep ( 1 )
now = time . time ( )
if len ( self . peers ) > = NUM_PEERS_TARGET :
continue
2019-06-18 13:49:31 +02:00
peers = await self . _get_next_peers_to_try ( )
2019-04-26 12:48:02 +02:00
for peer in peers :
last_tried = self . _last_tried_peer . get ( peer , 0 )
if last_tried + PEER_RETRY_INTERVAL < now :
2019-06-05 11:08:16 +02:00
await self . _add_peer ( peer . host , peer . port , peer . pubkey )
2019-04-26 12:48:02 +02:00
2019-06-05 11:08:16 +02:00
async def _add_peer ( self , host , port , node_id ) :
2019-04-26 12:48:02 +02:00
if node_id in self . peers :
return self . peers [ node_id ]
port = int ( port )
peer_addr = LNPeerAddr ( host , port , node_id )
transport = LNTransport ( self . node_keypair . privkey , peer_addr )
self . _last_tried_peer [ peer_addr ] = time . time ( )
2019-05-02 17:59:11 +02:00
self . logger . info ( f " adding peer { peer_addr } " )
2019-04-26 12:48:02 +02:00
peer = Peer ( self , node_id , transport )
await self . network . main_taskgroup . spawn ( peer . main_loop ( ) )
self . peers [ node_id ] = peer
return peer
2019-10-12 19:15:51 +02:00
def num_peers ( self ) :
return sum ( [ p . initialized . is_set ( ) for p in self . peers . values ( ) ] )
2019-04-26 12:48:02 +02:00
def start_network ( self , network : ' Network ' ) :
self . network = network
self . config = network . config
self . channel_db = self . network . channel_db
self . _last_tried_peer = { } # LNPeerAddr -> unix timestamp
self . _add_peers_from_config ( )
asyncio . run_coroutine_threadsafe ( self . network . main_taskgroup . spawn ( self . main_loop ( ) ) , self . network . asyncio_loop )
def _add_peers_from_config ( self ) :
peer_list = self . config . get ( ' lightning_peers ' , [ ] )
for host , port , pubkey in peer_list :
asyncio . run_coroutine_threadsafe (
2019-06-05 11:08:16 +02:00
self . _add_peer ( host , int ( port ) , bfh ( pubkey ) ) ,
2019-04-26 12:48:02 +02:00
self . network . asyncio_loop )
2019-06-18 13:49:31 +02:00
async def _get_next_peers_to_try ( self ) - > Sequence [ LNPeerAddr ] :
2019-04-26 12:48:02 +02:00
now = time . time ( )
2019-09-05 18:32:45 +02:00
await self . channel_db . data_loaded . wait ( )
2019-04-26 12:48:02 +02:00
recent_peers = self . channel_db . get_recent_peers ( )
# maintenance for last tried times
# due to this, below we can just test membership in _last_tried_peer
for peer in list ( self . _last_tried_peer ) :
if now > = self . _last_tried_peer [ peer ] + PEER_RETRY_INTERVAL :
del self . _last_tried_peer [ peer ]
# first try from recent peers
for peer in recent_peers :
2019-06-18 13:49:31 +02:00
if peer . pubkey in self . peers :
continue
if peer in self . _last_tried_peer :
continue
2019-04-26 12:48:02 +02:00
return [ peer ]
# try random peer from graph
unconnected_nodes = self . channel_db . get_200_randomly_sorted_nodes_not_in ( self . peers . keys ( ) )
if unconnected_nodes :
2019-06-18 13:49:31 +02:00
for node_id in unconnected_nodes :
addrs = self . channel_db . get_node_addresses ( node_id )
2019-04-26 12:48:02 +02:00
if not addrs :
continue
2019-07-03 08:15:41 +02:00
host , port , timestamp = self . choose_preferred_address ( list ( addrs ) )
2019-06-18 13:49:31 +02:00
peer = LNPeerAddr ( host , port , node_id )
if peer in self . _last_tried_peer :
continue
2019-05-13 22:33:56 +02:00
#self.logger.info('taking random ln peer from our channel db')
2019-04-26 12:48:02 +02:00
return [ peer ]
# TODO remove this. For some reason the dns seeds seem to ignore the realm byte
# and only return mainnet nodes. so for the time being dns seeding is disabled:
if constants . net in ( constants . BitcoinTestnet , ) :
return [ random . choice ( FALLBACK_NODE_LIST_TESTNET ) ]
elif constants . net in ( constants . BitcoinMainnet , ) :
return [ random . choice ( FALLBACK_NODE_LIST_MAINNET ) ]
else :
return [ ]
# try peers from dns seed.
# return several peers to reduce the number of dns queries.
if not constants . net . LN_DNS_SEEDS :
return [ ]
dns_seed = random . choice ( constants . net . LN_DNS_SEEDS )
2019-05-02 17:59:11 +02:00
self . logger . info ( ' asking dns seed " {} " for ln peers ' . format ( dns_seed ) )
2019-04-26 12:48:02 +02:00
try :
# note: this might block for several seconds
# this will include bech32-encoded-pubkeys and ports
srv_answers = resolve_dns_srv ( ' r {} . {} ' . format (
constants . net . LN_REALM_BYTE , dns_seed ) )
except dns . exception . DNSException as e :
return [ ]
random . shuffle ( srv_answers )
num_peers = 2 * NUM_PEERS_TARGET
srv_answers = srv_answers [ : num_peers ]
# we now have pubkeys and ports but host is still needed
peers = [ ]
for srv_ans in srv_answers :
try :
# note: this might block for several seconds
answers = dns . resolver . query ( srv_ans [ ' host ' ] )
except dns . exception . DNSException :
continue
try :
ln_host = str ( answers [ 0 ] )
port = int ( srv_ans [ ' port ' ] )
bech32_pubkey = srv_ans [ ' host ' ] . split ( ' . ' ) [ 0 ]
pubkey = get_compressed_pubkey_from_bech32 ( bech32_pubkey )
peers . append ( LNPeerAddr ( ln_host , port , pubkey ) )
except Exception as e :
2019-05-02 17:59:11 +02:00
self . logger . info ( ' error with parsing peer from dns seed: {} ' . format ( e ) )
2019-04-26 12:48:02 +02:00
continue
2019-05-02 17:59:11 +02:00
self . logger . info ( ' got {} ln peers from dns seed ' . format ( len ( peers ) ) )
2019-04-26 12:48:02 +02:00
return peers
2019-05-13 14:30:02 +02:00
@staticmethod
2019-10-15 15:41:18 +02:00
def choose_preferred_address ( addr_list : Sequence [ Tuple [ str , int , int ] ] ) - > Tuple [ str , int , int ] :
2019-05-13 14:30:02 +02:00
assert len ( addr_list ) > = 1
# choose first one that is an IP
2019-06-18 13:49:31 +02:00
for host , port , timestamp in addr_list :
2019-05-13 14:30:02 +02:00
if is_ip_address ( host ) :
2019-06-18 13:49:31 +02:00
return host , port , timestamp
2019-05-13 14:30:02 +02:00
# otherwise choose one at random
# TODO maybe filter out onion if not on tor?
choice = random . choice ( addr_list )
2019-06-18 13:49:31 +02:00
return choice
2019-04-26 12:48:02 +02:00
class LNGossip ( LNWorker ) :
2019-05-16 09:56:16 +02:00
max_age = 14 * 24 * 3600
2019-04-26 12:48:02 +02:00
def __init__ ( self , network ) :
seed = os . urandom ( 32 )
node = BIP32Node . from_rootseed ( seed , xtype = ' standard ' )
xprv = node . to_xprv ( )
super ( ) . __init__ ( xprv )
2019-05-13 22:33:56 +02:00
self . localfeatures | = LnLocalFeatures . GOSSIP_QUERIES_OPT
2019-04-26 12:48:02 +02:00
self . localfeatures | = LnLocalFeatures . GOSSIP_QUERIES_REQ
2019-05-13 22:33:56 +02:00
self . unknown_ids = set ( )
2019-09-06 15:08:15 +02:00
assert is_using_fast_ecc ( ) , " verifying LN gossip msgs without libsecp256k1 is hopeless "
2019-04-26 12:48:02 +02:00
2019-05-13 14:30:02 +02:00
def start_network ( self , network : ' Network ' ) :
super ( ) . start_network ( network )
2019-05-16 09:56:16 +02:00
asyncio . run_coroutine_threadsafe ( self . network . main_taskgroup . spawn ( self . maintain_db ( ) ) , self . network . asyncio_loop )
async def maintain_db ( self ) :
2019-07-11 13:50:21 +02:00
await self . channel_db . load_data ( )
2019-05-16 09:56:16 +02:00
while True :
2019-07-11 13:50:21 +02:00
if len ( self . unknown_ids ) == 0 :
self . channel_db . prune_old_policies ( self . max_age )
self . channel_db . prune_orphaned_channels ( )
await asyncio . sleep ( 120 )
2019-05-13 14:30:02 +02:00
2019-06-18 13:49:31 +02:00
async def add_new_ids ( self , ids ) :
known = self . channel_db . get_channel_ids ( )
2019-05-15 16:09:23 +02:00
new = set ( ids ) - set ( known )
2019-05-13 22:33:56 +02:00
self . unknown_ids . update ( new )
2019-10-12 19:15:51 +02:00
self . network . trigger_callback ( ' unknown_channels ' , len ( self . unknown_ids ) )
self . network . trigger_callback ( ' gossip_peers ' , self . num_peers ( ) )
2019-05-13 14:30:02 +02:00
2019-05-13 22:33:56 +02:00
def get_ids_to_query ( self ) :
2019-07-11 13:50:21 +02:00
N = 500
2019-05-13 22:33:56 +02:00
l = list ( self . unknown_ids )
self . unknown_ids = set ( l [ N : ] )
2019-10-14 10:39:52 +02:00
self . network . trigger_callback ( ' unknown_channels ' , len ( self . unknown_ids ) )
2019-05-13 22:33:56 +02:00
return l [ 0 : N ]
def peer_closed ( self , peer ) :
self . peers . pop ( peer . pubkey )
2019-05-13 14:30:02 +02:00
2019-04-26 12:48:02 +02:00
class LNWallet ( LNWorker ) :
2019-10-13 20:34:38 +02:00
def __init__ ( self , wallet : ' Abstract_Wallet ' , xprv ) :
2019-05-02 17:59:11 +02:00
Logger . __init__ ( self )
2018-05-28 09:39:05 +02:00
self . wallet = wallet
2019-02-13 15:46:35 +01:00
self . storage = wallet . storage
2019-09-22 20:46:01 +02:00
self . config = wallet . config
2019-07-05 14:42:09 +02:00
LNWorker . __init__ ( self , xprv )
2019-04-26 12:48:02 +02:00
self . ln_keystore = keystore . from_xprv ( xprv )
2019-08-12 18:05:12 +02:00
self . localfeatures | = LnLocalFeatures . OPTION_DATA_LOSS_PROTECT_REQ
2019-10-09 20:16:11 +02:00
self . payments = self . storage . get ( ' lightning_payments ' , { } ) # RHASH -> amount, direction, is_paid
2019-03-07 17:51:35 +01:00
self . preimages = self . storage . get ( ' lightning_preimages ' , { } ) # RHASH -> preimage
2018-09-12 16:17:10 +02:00
self . sweep_address = wallet . get_receiving_address ( )
2018-07-30 13:51:03 +02:00
self . lock = threading . RLock ( )
2019-10-22 15:41:45 +02:00
self . logs = defaultdict ( list )
2019-09-19 18:17:03 +02:00
# note: accessing channels (besides simple lookup) needs self.lock!
2018-11-19 18:09:43 +01:00
self . channels = { } # type: Dict[bytes, Channel]
for x in wallet . storage . get ( " channels " , [ ] ) :
2019-02-27 10:51:40 +01:00
c = Channel ( x , sweep_address = self . sweep_address , lnworker = self )
2019-01-21 21:27:27 +01:00
self . channels [ c . channel_id ] = c
2019-01-31 16:41:43 +01:00
# timestamps of opening and closing transactions
2019-02-13 15:46:35 +01:00
self . channel_timestamps = self . storage . get ( ' lightning_channel_timestamps ' , { } )
2019-05-23 12:37:24 +02:00
self . pending_payments = defaultdict ( asyncio . Future )
2019-01-26 17:57:00 +01:00
2019-07-05 14:42:09 +02:00
@ignore_exceptions
@log_exceptions
async def sync_with_local_watchtower ( self ) :
watchtower = self . network . local_watchtower
if watchtower :
while True :
2019-09-19 18:17:03 +02:00
with self . lock :
channels = list ( self . channels . values ( ) )
for chan in channels :
2019-07-29 11:16:17 +02:00
await self . sync_channel_with_watchtower ( chan , watchtower . sweepstore )
2019-07-05 14:42:09 +02:00
await asyncio . sleep ( 5 )
@ignore_exceptions
@log_exceptions
async def sync_with_remote_watchtower ( self ) :
2019-07-29 11:16:17 +02:00
import aiohttp
from jsonrpcclient . clients . aiohttp_client import AiohttpClient
class myAiohttpClient ( AiohttpClient ) :
async def request ( self , * args , * * kwargs ) :
r = await super ( ) . request ( * args , * * kwargs )
return r . data . result
2019-07-05 14:42:09 +02:00
while True :
await asyncio . sleep ( 5 )
2019-09-19 18:17:03 +02:00
watchtower_url = self . config . get ( ' watchtower_url ' )
if not watchtower_url :
continue
with self . lock :
channels = list ( self . channels . values ( ) )
try :
2019-09-19 18:50:57 +02:00
async with make_aiohttp_session ( proxy = self . network . proxy ) as session :
2019-09-19 18:17:03 +02:00
watchtower = myAiohttpClient ( session , watchtower_url )
for chan in channels :
await self . sync_channel_with_watchtower ( chan , watchtower )
except aiohttp . client_exceptions . ClientConnectorError :
self . logger . info ( f ' could not contact remote watchtower { watchtower_url } ' )
2019-07-05 14:42:09 +02:00
2019-08-03 17:34:11 +02:00
async def sync_channel_with_watchtower ( self , chan : Channel , watchtower ) :
2019-07-05 14:42:09 +02:00
outpoint = chan . funding_outpoint . to_str ( )
addr = chan . get_funding_address ( )
2019-08-03 17:34:11 +02:00
current_ctn = chan . get_oldest_unrevoked_ctn ( REMOTE )
2019-07-29 11:16:17 +02:00
watchtower_ctn = await watchtower . get_ctn ( outpoint , addr )
2019-07-05 14:42:09 +02:00
for ctn in range ( watchtower_ctn + 1 , current_ctn ) :
sweeptxs = chan . create_sweeptxs ( ctn )
for tx in sweeptxs :
2019-07-29 11:16:17 +02:00
await watchtower . add_sweep_tx ( outpoint , ctn , tx . prevout ( 0 ) , str ( tx ) )
2019-07-05 14:42:09 +02:00
2019-01-26 17:57:00 +01:00
def start_network ( self , network : ' Network ' ) :
2019-07-03 08:46:00 +02:00
self . lnwatcher = LNWatcher ( network )
2019-07-05 14:42:09 +02:00
self . lnwatcher . start_network ( network )
2019-01-26 17:57:00 +01:00
self . network = network
2018-10-08 20:26:44 +02:00
self . network . register_callback ( self . on_network_update , [ ' wallet_updated ' , ' network_updated ' , ' verified ' , ' fee ' ] ) # thread safe
2018-12-04 20:50:24 +01:00
self . network . register_callback ( self . on_channel_open , [ ' channel_open ' ] )
self . network . register_callback ( self . on_channel_closed , [ ' channel_closed ' ] )
2019-04-26 12:48:02 +02:00
for chan_id , chan in self . channels . items ( ) :
2019-07-03 08:46:00 +02:00
self . lnwatcher . add_channel ( chan . funding_outpoint . to_str ( ) , chan . get_funding_address ( ) )
2019-06-18 13:49:31 +02:00
2019-04-26 12:48:02 +02:00
super ( ) . start_network ( network )
for coro in [
self . maybe_listen ( ) ,
self . on_network_update ( ' network_updated ' ) , # shortcut (don't block) if funding tx locked and verified
2019-07-03 08:46:00 +02:00
self . lnwatcher . on_network_update ( ' network_updated ' ) , # ping watcher to check our channels
2019-07-05 14:42:09 +02:00
self . reestablish_peers_and_channels ( ) ,
self . sync_with_local_watchtower ( ) ,
self . sync_with_remote_watchtower ( ) ,
2019-04-26 12:48:02 +02:00
] :
2019-08-30 09:54:32 +02:00
# FIXME: exceptions in those coroutines will cancel network.main_taskgroup
2019-04-26 12:48:02 +02:00
asyncio . run_coroutine_threadsafe ( self . network . main_taskgroup . spawn ( coro ) , self . network . asyncio_loop )
2018-11-27 00:40:55 +01:00
2019-05-13 22:33:56 +02:00
def peer_closed ( self , peer ) :
for chan in self . channels_for_peer ( peer . pubkey ) . values ( ) :
2019-05-19 11:55:55 +02:00
chan . set_state ( ' DISCONNECTED ' )
2019-05-13 22:33:56 +02:00
self . network . trigger_callback ( ' channel ' , chan )
self . peers . pop ( peer . pubkey )
2019-02-26 16:31:09 +01:00
def payment_completed ( self , chan : Channel , direction : Direction ,
2019-02-26 21:28:11 +01:00
htlc : UpdateAddHtlc ) :
2018-11-19 18:09:43 +01:00
chan_id = chan . channel_id
2019-02-26 21:28:11 +01:00
preimage = self . get_preimage ( htlc . payment_hash )
2019-02-26 16:31:09 +01:00
timestamp = int ( time . time ( ) )
2019-02-13 15:46:35 +01:00
self . network . trigger_callback ( ' ln_payment_completed ' , timestamp , direction , htlc , preimage , chan_id )
2019-01-29 19:01:04 +01:00
def get_payments ( self ) :
2019-02-27 11:42:09 +01:00
# return one item per payment_hash
2019-01-29 19:01:04 +01:00
# note: with AMP we will have several channels per payment
2019-02-27 11:42:09 +01:00
out = defaultdict ( list )
2019-09-19 18:17:03 +02:00
with self . lock :
channels = list ( self . channels . values ( ) )
for chan in channels :
2019-02-27 11:42:09 +01:00
d = chan . get_payments ( )
for k , v in d . items ( ) :
out [ k ] . append ( v )
2019-01-29 19:01:04 +01:00
return out
2018-11-07 17:44:49 +01:00
2019-09-20 17:15:49 +02:00
def parse_bech32_invoice ( self , invoice ) :
lnaddr = lndecode ( invoice , expected_hrp = constants . net . SEGWIT_HRP )
amount = int ( lnaddr . amount * COIN ) if lnaddr . amount else None
return {
' type ' : PR_TYPE_LN ,
' invoice ' : invoice ,
' amount ' : amount ,
' message ' : lnaddr . get_description ( ) ,
' time ' : lnaddr . date ,
' exp ' : lnaddr . get_expiry ( ) ,
' pubkey ' : bh2u ( lnaddr . pubkey . serialize ( ) ) ,
' rhash ' : lnaddr . paymenthash . hex ( ) ,
}
2019-08-11 14:47:06 +02:00
def get_unsettled_payments ( self ) :
out = [ ]
for payment_hash , plist in self . get_payments ( ) . items ( ) :
if len ( plist ) != 1 :
continue
chan_id , htlc , _direction , status = plist [ 0 ]
if _direction != SENT :
continue
if status == ' settled ' :
continue
amount = htlc . amount_msat / / 1000
item = {
' is_lightning ' : True ,
' status ' : status ,
' key ' : payment_hash ,
' amount ' : amount ,
' timestamp ' : htlc . timestamp ,
' label ' : self . wallet . get_label ( payment_hash )
}
out . append ( item )
return out
2019-01-31 16:41:43 +01:00
def get_history ( self ) :
out = [ ]
2019-09-20 17:15:49 +02:00
for key , plist in self . get_payments ( ) . items ( ) :
2019-09-11 17:06:07 +02:00
plist = list ( filter ( lambda x : x [ 3 ] == ' settled ' , plist ) )
if len ( plist ) == 0 :
continue
elif len ( plist ) == 1 :
2019-02-27 11:42:09 +01:00
chan_id , htlc , _direction , status = plist [ 0 ]
direction = ' sent ' if _direction == SENT else ' received '
2019-09-11 17:02:03 +02:00
amount_msat = int ( _direction ) * htlc . amount_msat
2019-03-07 17:51:35 +01:00
timestamp = htlc . timestamp
2019-09-20 17:15:49 +02:00
label = self . wallet . get_label ( key )
if _direction == SENT :
try :
2019-10-09 20:16:11 +02:00
inv = self . get_payment_info ( bfh ( key ) )
2019-10-22 18:27:41 +02:00
fee_msat = - inv . amount * 1000 - amount_msat if inv . amount else None
2019-09-20 17:15:49 +02:00
except UnknownPaymentHash :
fee_msat = None
2019-09-11 17:02:03 +02:00
else :
fee_msat = None
2019-02-27 11:42:09 +01:00
else :
# assume forwarding
direction = ' forwarding '
amount_msat = sum ( [ int ( _direction ) * htlc . amount_msat for chan_id , htlc , _direction , status in plist ] )
status = ' '
label = _ ( ' Forwarding ' )
2019-03-07 17:51:35 +01:00
timestamp = min ( [ htlc . timestamp for chan_id , htlc , _direction , status in plist ] )
2019-09-11 17:02:03 +02:00
fee_msat = None # fixme
2019-02-27 11:42:09 +01:00
2019-01-31 16:41:43 +01:00
item = {
2019-02-27 11:42:09 +01:00
' type ' : ' payment ' ,
' label ' : label ,
2019-09-11 17:02:03 +02:00
' timestamp ' : timestamp or 0 ,
2019-02-27 11:42:09 +01:00
' date ' : timestamp_to_datetime ( timestamp ) ,
' direction ' : direction ,
' status ' : status ,
' amount_msat ' : amount_msat ,
2019-09-11 17:02:03 +02:00
' fee_msat ' : fee_msat ,
2019-09-20 17:15:49 +02:00
' payment_hash ' : key ,
2019-01-31 16:41:43 +01:00
}
out . append ( item )
# add funding events
2019-09-19 18:17:03 +02:00
with self . lock :
channels = list ( self . channels . values ( ) )
for chan in channels :
2019-08-12 18:14:21 +02:00
item = self . channel_timestamps . get ( chan . channel_id . hex ( ) )
if item is None :
continue
funding_txid , funding_height , funding_timestamp , closing_txid , closing_height , closing_timestamp = item
2019-01-31 16:41:43 +01:00
item = {
' channel_id ' : bh2u ( chan . channel_id ) ,
' type ' : ' channel_opening ' ,
2019-02-27 11:42:09 +01:00
' label ' : _ ( ' Open channel ' ) ,
2019-01-31 16:41:43 +01:00
' txid ' : funding_txid ,
' amount_msat ' : chan . balance ( LOCAL , ctn = 0 ) ,
' direction ' : ' received ' ,
' timestamp ' : funding_timestamp ,
2019-09-19 11:06:44 +02:00
' fee_msat ' : None ,
2019-01-31 16:41:43 +01:00
}
out . append ( item )
if not chan . is_closed ( ) :
continue
item = {
' channel_id ' : bh2u ( chan . channel_id ) ,
2019-03-01 05:29:42 +01:00
' txid ' : closing_txid ,
2019-02-27 11:42:09 +01:00
' label ' : _ ( ' Close channel ' ) ,
2019-01-31 16:41:43 +01:00
' type ' : ' channel_closure ' ,
2019-05-06 15:58:12 +02:00
' amount_msat ' : - chan . balance_minus_outgoing_htlcs ( LOCAL ) ,
2019-01-31 16:41:43 +01:00
' direction ' : ' sent ' ,
' timestamp ' : closing_timestamp ,
2019-09-19 11:06:44 +02:00
' fee_msat ' : None ,
2019-01-31 16:41:43 +01:00
}
out . append ( item )
2019-02-01 21:22:39 +01:00
# sort by timestamp
out . sort ( key = lambda x : ( x . get ( ' timestamp ' ) or float ( " inf " ) ) )
2019-01-31 16:41:43 +01:00
balance_msat = 0
for item in out :
2019-02-27 11:42:09 +01:00
balance_msat + = item [ ' amount_msat ' ]
2019-01-31 16:41:43 +01:00
item [ ' balance_msat ' ] = balance_msat
return out
2018-10-05 15:37:47 +02:00
def get_and_inc_counter_for_channel_keys ( self ) :
with self . lock :
2019-02-13 15:46:35 +01:00
ctr = self . storage . get ( ' lightning_channel_key_der_ctr ' , - 1 )
2018-10-05 15:37:47 +02:00
ctr + = 1
2019-02-13 15:46:35 +01:00
self . storage . put ( ' lightning_channel_key_der_ctr ' , ctr )
self . storage . write ( )
2018-10-05 15:37:47 +02:00
return ctr
2018-07-14 19:39:28 +02:00
def suggest_peer ( self ) :
2019-06-17 08:29:17 +02:00
r = [ ]
2018-07-14 19:39:28 +02:00
for node_id , peer in self . peers . items ( ) :
2019-01-30 17:50:10 +01:00
if not peer . initialized . is_set ( ) :
2018-07-14 19:39:28 +02:00
continue
2019-01-30 19:40:20 +01:00
if not all ( [ chan . is_closed ( ) for chan in peer . channels . values ( ) ] ) :
2018-10-17 11:56:34 +02:00
continue
2019-06-17 08:29:17 +02:00
r . append ( node_id )
return random . choice ( r ) if r else None
2018-07-14 19:39:28 +02:00
2018-05-31 17:16:01 +02:00
def channels_for_peer ( self , node_id ) :
assert type ( node_id ) is bytes
2018-07-30 13:51:03 +02:00
with self . lock :
return { x : y for ( x , y ) in self . channels . items ( ) if y . node_id == node_id }
2018-05-31 17:16:01 +02:00
2019-05-19 13:24:29 +02:00
def save_channel ( self , chan ) :
assert type ( chan ) is Channel
if chan . config [ REMOTE ] . next_per_commitment_point == chan . config [ REMOTE ] . current_per_commitment_point :
2018-06-20 15:46:22 +02:00
raise Exception ( " Tried to save channel with next_point == current_point, this should not happen " )
2018-07-30 13:51:03 +02:00
with self . lock :
2019-05-19 13:24:29 +02:00
self . channels [ chan . channel_id ] = chan
self . save_channels ( )
self . network . trigger_callback ( ' channel ' , chan )
def save_channels ( self ) :
with self . lock :
2018-07-30 13:51:03 +02:00
dumped = [ x . serialize ( ) for x in self . channels . values ( ) ]
2019-02-13 15:46:35 +01:00
self . storage . put ( " channels " , dumped )
self . storage . write ( )
2018-05-28 10:43:50 +02:00
2018-05-29 18:12:48 +02:00
def save_short_chan_id ( self , chan ) :
"""
2018-10-08 20:31:15 +02:00
Checks if Funding TX has been mined . If it has , save the short channel ID in chan ;
if it ' s also deep enough, also save to disk.
Returns tuple ( mined_deep_enough , num_confirmations ) .
2018-05-29 18:12:48 +02:00
"""
2019-07-03 08:46:00 +02:00
conf = self . lnwatcher . get_tx_height ( chan . funding_outpoint . txid ) . conf
2018-10-08 20:31:15 +02:00
if conf > 0 :
2019-07-03 08:46:00 +02:00
block_height , tx_pos = self . lnwatcher . get_txpos ( chan . funding_outpoint . txid )
2018-10-08 20:31:15 +02:00
assert tx_pos > = 0
2019-09-06 18:09:05 +02:00
chan . short_channel_id_predicted = ShortChannelID . from_components (
block_height , tx_pos , chan . funding_outpoint . output_index )
2018-10-08 20:31:15 +02:00
if conf > = chan . constraints . funding_txn_minimum_depth > 0 :
chan . short_channel_id = chan . short_channel_id_predicted
2019-09-06 18:09:05 +02:00
self . logger . info ( f " save_short_channel_id: { chan . short_channel_id } " )
2018-05-29 18:12:48 +02:00
self . save_channel ( chan )
2018-11-12 18:01:59 +01:00
self . on_channels_updated ( )
2019-02-02 17:00:03 +01:00
else :
2019-05-02 17:59:11 +02:00
self . logger . info ( f " funding tx is still not at sufficient depth. actual depth: { conf } " )
2018-05-29 18:12:48 +02:00
2018-12-04 20:50:24 +01:00
def channel_by_txo ( self , txo ) :
2018-10-08 18:33:23 +02:00
with self . lock :
channels = list ( self . channels . values ( ) )
for chan in channels :
if chan . funding_outpoint . to_str ( ) == txo :
2018-12-04 20:50:24 +01:00
return chan
2019-01-31 16:41:43 +01:00
def on_channel_open ( self , event , funding_outpoint , funding_txid , funding_height ) :
2018-12-04 20:50:24 +01:00
chan = self . channel_by_txo ( funding_outpoint )
if not chan :
2018-10-08 18:33:23 +02:00
return
2019-10-14 10:42:41 +02:00
#self.logger.debug(f'on_channel_open {funding_outpoint}')
2019-01-31 16:41:43 +01:00
self . channel_timestamps [ bh2u ( chan . channel_id ) ] = funding_txid , funding_height . height , funding_height . timestamp , None , None , None
2019-02-13 15:46:35 +01:00
self . storage . put ( ' lightning_channel_timestamps ' , self . channel_timestamps )
2018-12-04 20:50:24 +01:00
chan . set_funding_txo_spentness ( False )
# send event to GUI
2018-07-16 18:31:24 +02:00
self . network . trigger_callback ( ' channel ' , chan )
2018-06-22 12:17:11 +02:00
2018-12-04 20:50:24 +01:00
@log_exceptions
2019-05-23 16:13:28 +02:00
async def on_channel_closed ( self , event , funding_outpoint , spenders , funding_txid , funding_height , closing_txid , closing_height , closing_tx ) :
2018-12-04 20:50:24 +01:00
chan = self . channel_by_txo ( funding_outpoint )
if not chan :
return
2019-10-14 10:42:41 +02:00
#self.logger.debug(f'on_channel_closed {funding_outpoint}')
2019-01-31 16:41:43 +01:00
self . channel_timestamps [ bh2u ( chan . channel_id ) ] = funding_txid , funding_height . height , funding_height . timestamp , closing_txid , closing_height . height , closing_height . timestamp
2019-02-13 15:46:35 +01:00
self . storage . put ( ' lightning_channel_timestamps ' , self . channel_timestamps )
2018-12-04 20:50:24 +01:00
chan . set_funding_txo_spentness ( True )
2019-05-19 11:55:55 +02:00
chan . set_state ( ' CLOSED ' )
self . on_channels_updated ( )
2018-12-04 20:50:24 +01:00
self . network . trigger_callback ( ' channel ' , chan )
# remove from channel_db
2019-02-02 13:49:04 +01:00
if chan . short_channel_id is not None :
self . channel_db . remove_channel ( chan . short_channel_id )
2019-05-25 06:02:23 +02:00
# detect who closed and set sweep_info
2019-09-09 19:38:35 +02:00
sweep_info_dict = chan . sweep_ctx ( closing_tx )
self . logger . info ( f ' sweep_info_dict length: { len ( sweep_info_dict ) } ' )
2019-05-25 06:02:23 +02:00
# create and broadcast transaction
2019-09-09 19:38:35 +02:00
for prevout , sweep_info in sweep_info_dict . items ( ) :
name = sweep_info . name
2019-06-24 11:13:18 +02:00
spender = spenders . get ( prevout )
if spender is not None :
spender_tx = await self . network . get_transaction ( spender )
spender_tx = Transaction ( spender_tx )
e_htlc_tx = chan . sweep_htlc ( closing_tx , spender_tx )
if e_htlc_tx :
spender2 = spenders . get ( spender_tx . outputs ( ) [ 0 ] )
if spender2 :
self . logger . info ( f ' htlc is already spent { name } : { prevout } ' )
else :
self . logger . info ( f ' trying to redeem htlc { name } : { prevout } ' )
await self . try_redeem ( spender + ' :0 ' , e_htlc_tx )
else :
self . logger . info ( f ' outpoint already spent { name } : { prevout } ' )
2019-03-01 12:34:38 +01:00
else :
2019-06-24 11:13:18 +02:00
self . logger . info ( f ' trying to redeem { name } : { prevout } ' )
2019-09-09 19:38:35 +02:00
await self . try_redeem ( prevout , sweep_info )
2019-06-24 11:13:18 +02:00
@log_exceptions
2019-09-09 19:38:35 +02:00
async def try_redeem ( self , prevout : str , sweep_info : ' SweepInfo ' ) - > None :
name = sweep_info . name
2019-06-24 11:13:18 +02:00
prev_txid , prev_index = prevout . split ( ' : ' )
broadcast = True
2019-09-09 19:38:35 +02:00
if sweep_info . cltv_expiry :
2019-06-24 11:13:18 +02:00
local_height = self . network . get_local_height ( )
2019-09-09 19:38:35 +02:00
remaining = sweep_info . cltv_expiry - local_height
2019-06-24 11:13:18 +02:00
if remaining > 0 :
self . logger . info ( ' waiting for {} : CLTV ( {} > {} ), prevout {} '
2019-09-09 19:38:35 +02:00
. format ( name , local_height , sweep_info . cltv_expiry , prevout ) )
2019-06-24 11:13:18 +02:00
broadcast = False
2019-09-09 19:38:35 +02:00
if sweep_info . csv_delay :
2019-07-03 08:46:00 +02:00
prev_height = self . lnwatcher . get_tx_height ( prev_txid )
2019-09-09 19:38:35 +02:00
remaining = sweep_info . csv_delay - prev_height . conf
2019-06-24 11:13:18 +02:00
if remaining > 0 :
self . logger . info ( ' waiting for {} : CSV ( {} >= {} ), prevout: {} '
2019-09-09 19:38:35 +02:00
. format ( name , prev_height . conf , sweep_info . csv_delay , prevout ) )
2019-06-24 11:13:18 +02:00
broadcast = False
2019-09-09 19:38:35 +02:00
tx = sweep_info . gen_tx ( )
2019-06-24 11:13:18 +02:00
if tx is None :
self . logger . info ( f ' { name } could not claim output: { prevout } , dust ' )
2019-09-09 19:38:35 +02:00
self . wallet . set_label ( tx . txid ( ) , name )
2019-06-24 11:13:18 +02:00
if broadcast :
try :
await self . network . broadcast_transaction ( tx )
except Exception as e :
self . logger . info ( f ' could NOT publish { name } for prevout: { prevout } , { str ( e ) } ' )
else :
self . logger . info ( f ' success: broadcasting { name } for prevout: { prevout } ' )
else :
# it's OK to add local transaction, the fee will be recomputed
try :
self . wallet . add_future_tx ( tx , remaining )
self . logger . info ( f ' adding future tx: { name } . prevout: { prevout } ' )
except Exception as e :
self . logger . info ( f ' could not add future tx: { name } . prevout: { prevout } { str ( e ) } ' )
2018-12-04 20:50:24 +01:00
2019-08-14 21:38:02 +02:00
def should_channel_be_closed_due_to_expiring_htlcs ( self , chan : Channel ) - > bool :
local_height = self . network . get_local_height ( )
htlcs_we_could_reclaim = { } # type: Dict[Tuple[Direction, int], UpdateAddHtlc]
# If there is a received HTLC for which we already released the preimage
# but the remote did not revoke yet, and the CLTV of this HTLC is dangerously close
# to the present, then unilaterally close channel
recv_htlc_deadline = lnutil . NBLOCK_DEADLINE_BEFORE_EXPIRY_FOR_RECEIVED_HTLCS
for sub , dir , ctn in ( ( LOCAL , RECEIVED , chan . get_latest_ctn ( LOCAL ) ) ,
( REMOTE , SENT , chan . get_oldest_unrevoked_ctn ( LOCAL ) ) ,
( REMOTE , SENT , chan . get_latest_ctn ( LOCAL ) ) , ) :
for htlc_id , htlc in chan . hm . htlcs_by_direction ( subject = sub , direction = dir , ctn = ctn ) . items ( ) :
if not chan . hm . was_htlc_preimage_released ( htlc_id = htlc_id , htlc_sender = REMOTE ) :
continue
if htlc . cltv_expiry - recv_htlc_deadline > local_height :
continue
htlcs_we_could_reclaim [ ( RECEIVED , htlc_id ) ] = htlc
# If there is an offered HTLC which has already expired (+ some grace period after), we
# will unilaterally close the channel and time out the HTLC
offered_htlc_deadline = lnutil . NBLOCK_DEADLINE_AFTER_EXPIRY_FOR_OFFERED_HTLCS
for sub , dir , ctn in ( ( LOCAL , SENT , chan . get_latest_ctn ( LOCAL ) ) ,
( REMOTE , RECEIVED , chan . get_oldest_unrevoked_ctn ( LOCAL ) ) ,
( REMOTE , RECEIVED , chan . get_latest_ctn ( LOCAL ) ) , ) :
for htlc_id , htlc in chan . hm . htlcs_by_direction ( subject = sub , direction = dir , ctn = ctn ) . items ( ) :
if htlc . cltv_expiry + offered_htlc_deadline > local_height :
continue
htlcs_we_could_reclaim [ ( SENT , htlc_id ) ] = htlc
total_value_sat = sum ( [ htlc . amount_msat / / 1000 for htlc in htlcs_we_could_reclaim . values ( ) ] )
num_htlcs = len ( htlcs_we_could_reclaim )
min_value_worth_closing_channel_over_sat = max ( num_htlcs * 10 * chan . config [ REMOTE ] . dust_limit_sat ,
500_000 )
return total_value_sat > min_value_worth_closing_channel_over_sat
2018-12-04 20:50:24 +01:00
2019-08-30 09:54:32 +02:00
@ignore_exceptions
2018-10-12 19:40:12 +02:00
@log_exceptions
2018-09-10 18:01:55 +02:00
async def on_network_update ( self , event , * args ) :
# TODO
2018-07-17 15:32:47 +02:00
# Race discovered in save_channel (assertion failing):
# since short_channel_id could be changed while saving.
2018-09-10 18:01:55 +02:00
with self . lock :
channels = list ( self . channels . values ( ) )
2018-10-08 20:26:44 +02:00
if event in ( ' verified ' , ' wallet_updated ' ) :
2019-07-03 08:46:00 +02:00
if args [ 0 ] != self . lnwatcher :
2018-10-08 20:26:44 +02:00
return
2018-09-10 18:01:55 +02:00
for chan in channels :
2019-05-29 22:08:53 +02:00
if chan . is_closed ( ) :
continue
2019-08-14 21:38:02 +02:00
if chan . get_state ( ) != ' CLOSED ' and self . should_channel_be_closed_due_to_expiring_htlcs ( chan ) :
2019-08-30 09:54:32 +02:00
self . logger . info ( f " force-closing due to expiring htlcs " )
2019-03-01 19:32:49 +01:00
await self . force_close_channel ( chan . channel_id )
continue
2019-02-02 17:00:03 +01:00
if chan . short_channel_id is None :
self . save_short_chan_id ( chan )
if chan . get_state ( ) == " OPENING " and chan . short_channel_id :
2018-09-10 18:01:55 +02:00
peer = self . peers [ chan . node_id ]
2019-02-02 12:06:12 +01:00
peer . send_funding_locked ( chan )
2018-09-10 18:01:55 +02:00
elif chan . get_state ( ) == " OPEN " :
peer = self . peers . get ( chan . node_id )
if peer is None :
2019-05-02 17:59:11 +02:00
self . logger . info ( " peer not found for {} " . format ( bh2u ( chan . node_id ) ) )
2018-09-10 18:01:55 +02:00
return
if event == ' fee ' :
2018-09-26 19:15:59 +02:00
await peer . bitcoin_fee_update ( chan )
2019-07-03 08:46:00 +02:00
conf = self . lnwatcher . get_tx_height ( chan . funding_outpoint . txid ) . conf
2018-09-10 18:01:55 +02:00
peer . on_network_update ( chan , conf )
2019-05-19 11:55:55 +02:00
elif chan . force_closed and chan . get_state ( ) != ' CLOSED ' :
2018-11-05 17:23:49 +01:00
txid = chan . force_close_tx ( ) . txid ( )
2019-07-03 08:46:00 +02:00
height = self . lnwatcher . get_tx_height ( txid ) . height
2019-05-02 17:59:11 +02:00
self . logger . info ( f " force closing tx { txid } , height { height } " )
2018-11-05 17:23:49 +01:00
if height == TX_HEIGHT_LOCAL :
2019-05-02 17:59:11 +02:00
self . logger . info ( ' REBROADCASTING CLOSING TX ' )
2018-11-05 17:23:49 +01:00
await self . force_close_channel ( chan . channel_id )
2018-05-28 11:55:20 +02:00
2019-08-15 13:17:16 +02:00
@log_exceptions
2019-06-18 13:49:31 +02:00
async def _open_channel_coroutine ( self , connect_str , local_amount_sat , push_sat , password ) :
peer = await self . add_peer ( connect_str )
2018-09-27 16:43:33 +02:00
# peer might just have been connected to
2019-09-05 18:31:51 +02:00
await asyncio . wait_for ( peer . initialized . wait ( ) , LN_P2P_NETWORK_TIMEOUT )
2018-10-07 12:19:34 +02:00
chan = await peer . channel_establishment_flow (
password ,
funding_sat = local_amount_sat + push_sat ,
push_msat = push_sat * 1000 ,
temp_channel_id = os . urandom ( 32 ) )
self . save_channel ( chan )
2019-07-03 08:46:00 +02:00
self . lnwatcher . add_channel ( chan . funding_outpoint . to_str ( ) , chan . get_funding_address ( ) )
2018-05-31 12:38:02 +02:00
self . on_channels_updated ( )
2018-10-11 18:28:38 +02:00
return chan
2018-05-31 12:38:02 +02:00
def on_channels_updated ( self ) :
2019-10-13 20:34:38 +02:00
self . network . trigger_callback ( ' channels_updated ' , self . wallet )
2018-05-28 11:55:20 +02:00
2019-08-15 13:17:16 +02:00
@log_exceptions
2019-07-23 20:14:59 +02:00
async def add_peer ( self , connect_str : str ) - > Peer :
2019-06-05 11:08:16 +02:00
node_id , rest = extract_nodeid ( connect_str )
2018-09-27 16:43:33 +02:00
peer = self . peers . get ( node_id )
if not peer :
if rest is not None :
host , port = split_host_port ( rest )
else :
2019-06-18 13:49:31 +02:00
addrs = self . channel_db . get_node_addresses ( node_id )
2019-10-15 15:41:18 +02:00
if not addrs :
2019-02-01 20:59:59 +01:00
raise ConnStringFormatError ( _ ( ' Don \' t know any addresses for node: ' ) + ' ' + bh2u ( node_id ) )
2019-10-15 15:41:18 +02:00
host , port , timestamp = self . choose_preferred_address ( addrs )
2018-09-27 16:43:33 +02:00
try :
socket . getaddrinfo ( host , int ( port ) )
except socket . gaierror :
raise ConnStringFormatError ( _ ( ' Hostname does not resolve (getaddrinfo failed) ' ) )
2019-06-18 13:49:31 +02:00
# add peer
peer = await self . _add_peer ( host , port , node_id )
2019-06-05 11:08:16 +02:00
return peer
def open_channel ( self , connect_str , local_amt_sat , push_amt_sat , password = None , timeout = 20 ) :
2019-06-18 13:49:31 +02:00
coro = self . _open_channel_coroutine ( connect_str , local_amt_sat , push_amt_sat , password )
2019-06-05 11:08:16 +02:00
fut = asyncio . run_coroutine_threadsafe ( coro , self . network . asyncio_loop )
try :
chan = fut . result ( timeout = timeout )
except concurrent . futures . TimeoutError :
raise Exception ( _ ( " open_channel timed out " ) )
2019-08-12 17:54:27 +02:00
return chan
2018-05-28 18:22:45 +02:00
2019-08-11 14:47:06 +02:00
def pay ( self , invoice , amount_sat = None , attempts = 1 ) :
2018-10-25 21:59:16 +02:00
"""
2019-05-23 12:37:24 +02:00
Can be called from other threads
2018-10-25 21:59:16 +02:00
"""
2019-08-11 14:47:06 +02:00
coro = self . _pay ( invoice , amount_sat , attempts )
2019-08-15 13:17:16 +02:00
fut = asyncio . run_coroutine_threadsafe ( coro , self . network . asyncio_loop )
2019-10-09 19:23:09 +02:00
success = fut . result ( )
2019-05-30 21:46:30 +02:00
2019-09-06 18:09:05 +02:00
def get_channel_by_short_id ( self , short_channel_id : ShortChannelID ) - > Channel :
2019-02-02 22:47:45 +01:00
with self . lock :
for chan in self . channels . values ( ) :
if chan . short_channel_id == short_channel_id :
return chan
2019-08-15 13:17:16 +02:00
@log_exceptions
2019-08-11 14:47:06 +02:00
async def _pay ( self , invoice , amount_sat = None , attempts = 1 ) :
2019-09-20 17:15:49 +02:00
lnaddr = lndecode ( invoice , expected_hrp = constants . net . SEGWIT_HRP )
2019-10-09 19:23:09 +02:00
payment_hash = lnaddr . paymenthash
key = payment_hash . hex ( )
2019-09-20 17:15:49 +02:00
amount = int ( lnaddr . amount * COIN ) if lnaddr . amount else None
2019-10-09 19:23:09 +02:00
status = self . get_payment_status ( payment_hash )
2019-09-20 17:15:49 +02:00
if status == PR_PAID :
2019-08-15 13:17:16 +02:00
raise PaymentFailure ( _ ( " This invoice has been paid already " ) )
2019-10-11 10:11:41 +02:00
if status == PR_INFLIGHT :
raise PaymentFailure ( _ ( " A payment was already initiated for this invoice " ) )
2019-10-09 20:16:11 +02:00
info = PaymentInfo ( lnaddr . paymenthash , amount , SENT , PR_UNPAID )
self . save_payment_info ( info )
2019-08-15 13:17:16 +02:00
self . _check_invoice ( invoice , amount_sat )
2019-09-20 17:15:49 +02:00
self . wallet . set_label ( key , lnaddr . get_description ( ) )
2019-10-22 15:41:45 +02:00
log = self . logs [ key ]
2019-05-30 21:04:35 +02:00
for i in range ( attempts ) :
2019-10-09 19:23:09 +02:00
try :
route = await self . _create_route_from_invoice ( decoded_invoice = lnaddr )
except NoPathFound :
success = False
break
2019-10-22 15:41:45 +02:00
self . network . trigger_callback ( ' invoice_status ' , key , PR_INFLIGHT )
2019-10-12 18:36:25 +02:00
success , preimage , failure_log = await self . _pay_to_route ( route , lnaddr )
2019-10-11 13:37:54 +02:00
if success :
2019-10-09 19:23:09 +02:00
log . append ( ( route , True , preimage ) )
break
else :
2019-10-12 18:36:25 +02:00
log . append ( ( route , False , failure_log ) )
2019-10-22 15:41:45 +02:00
self . network . trigger_callback ( ' invoice_status ' , key , PR_PAID if success else PR_FAILED )
2019-10-09 19:23:09 +02:00
return success
2018-11-02 19:16:42 +01:00
2019-10-04 18:06:53 +02:00
async def _pay_to_route ( self , route , lnaddr ) :
2018-11-02 19:16:42 +01:00
short_channel_id = route [ 0 ] . short_channel_id
2019-02-02 22:47:45 +01:00
chan = self . get_channel_by_short_id ( short_channel_id )
if not chan :
2019-10-23 08:20:15 +02:00
self . channel_db . remove_channel ( short_channel_id )
2019-09-06 18:09:05 +02:00
raise Exception ( f " PathFinder returned path with short_channel_id "
f " { short_channel_id } that is not in channel list " )
2019-10-09 20:16:11 +02:00
self . set_payment_status ( lnaddr . paymenthash , PR_INFLIGHT )
2018-11-02 19:16:42 +01:00
peer = self . peers [ route [ 0 ] . node_id ]
2019-10-04 18:06:53 +02:00
htlc = await peer . pay ( route , chan , int ( lnaddr . amount * COIN * 1000 ) , lnaddr . paymenthash , lnaddr . get_min_final_cltv_expiry ( ) )
self . network . trigger_callback ( ' htlc_added ' , htlc , lnaddr , SENT )
2019-10-09 19:23:09 +02:00
success , preimage , reason = await self . await_payment ( lnaddr . paymenthash )
if success :
2019-10-12 18:36:25 +02:00
failure_log = None
2019-10-09 19:23:09 +02:00
else :
failure_msg , sender_idx = chan . decode_onion_error ( reason , route , htlc . htlc_id )
2019-10-12 18:36:25 +02:00
blacklist = self . handle_error_code_from_failed_htlc ( failure_msg , sender_idx , route , peer )
if blacklist :
# blacklist channel after reporter node
# TODO this should depend on the error (even more granularity)
# also, we need finer blacklisting (directed edges; nodes)
try :
short_chan_id = route [ sender_idx + 1 ] . short_channel_id
except IndexError :
self . logger . info ( " payment destination reported error " )
else :
self . network . path_finder . add_to_blacklist ( short_chan_id )
failure_log = ( sender_idx , failure_msg , blacklist )
return success , preimage , failure_log
2018-05-28 10:43:50 +02:00
2019-10-12 18:36:25 +02:00
def handle_error_code_from_failed_htlc ( self , failure_msg , sender_idx , route , peer ) :
code , data = failure_msg . code , failure_msg . data
2019-10-12 18:22:19 +02:00
self . logger . info ( f " UPDATE_FAIL_HTLC { repr ( code ) } { data } " )
self . logger . info ( f " error reported by { bh2u ( route [ sender_idx ] . node_id ) } " )
# handle some specific error codes
failure_codes = {
OnionFailureCode . TEMPORARY_CHANNEL_FAILURE : 0 ,
OnionFailureCode . AMOUNT_BELOW_MINIMUM : 8 ,
OnionFailureCode . FEE_INSUFFICIENT : 8 ,
OnionFailureCode . INCORRECT_CLTV_EXPIRY : 4 ,
OnionFailureCode . EXPIRY_TOO_SOON : 0 ,
OnionFailureCode . CHANNEL_DISABLED : 2 ,
}
if code in failure_codes :
offset = failure_codes [ code ]
channel_update_len = int . from_bytes ( data [ offset : offset + 2 ] , byteorder = " big " )
channel_update_as_received = data [ offset + 2 : offset + 2 + channel_update_len ]
channel_update_typed = ( 258 ) . to_bytes ( length = 2 , byteorder = " big " ) + channel_update_as_received
# note: some nodes put channel updates in error msgs with the leading msg_type already there.
# we try decoding both ways here.
try :
message_type , payload = decode_msg ( channel_update_typed )
payload [ ' raw ' ] = channel_update_typed
except : # FIXME: too broad
message_type , payload = decode_msg ( channel_update_as_received )
payload [ ' raw ' ] = channel_update_as_received
categorized_chan_upds = self . channel_db . add_channel_updates ( [ payload ] )
blacklist = False
if categorized_chan_upds . good :
self . logger . info ( " applied channel update on our db " )
peer . maybe_save_remote_update ( payload )
elif categorized_chan_upds . orphaned :
# maybe it is a private channel (and data in invoice was outdated)
self . logger . info ( " maybe channel update is for private channel? " )
start_node_id = route [ sender_idx ] . node_id
self . channel_db . add_channel_update_for_private_channel ( payload , start_node_id )
elif categorized_chan_upds . expired :
blacklist = True
elif categorized_chan_upds . deprecated :
self . logger . info ( f ' channel update is not more recent. ' )
blacklist = True
else :
blacklist = True
2019-10-12 18:36:25 +02:00
return blacklist
2019-10-12 18:22:19 +02:00
2018-11-02 19:16:42 +01:00
@staticmethod
def _check_invoice ( invoice , amount_sat = None ) :
addr = lndecode ( invoice , expected_hrp = constants . net . SEGWIT_HRP )
2019-08-02 11:43:05 +02:00
if addr . is_expired ( ) :
raise InvoiceError ( _ ( " This invoice has expired " ) )
2018-11-02 19:16:42 +01:00
if amount_sat :
addr . amount = Decimal ( amount_sat ) / COIN
if addr . amount is None :
raise InvoiceError ( _ ( " Missing amount " ) )
if addr . get_min_final_cltv_expiry ( ) > 60 * 144 :
raise InvoiceError ( " {} \n {} " . format (
_ ( " Invoice wants us to risk locking funds for unreasonably long. " ) ,
f " min_final_cltv_expiry: { addr . get_min_final_cltv_expiry ( ) } " ) )
return addr
2019-02-01 20:59:59 +01:00
async def _create_route_from_invoice ( self , decoded_invoice ) - > List [ RouteEdge ] :
2018-11-02 19:16:42 +01:00
amount_msat = int ( decoded_invoice . amount * COIN * 1000 )
2018-10-08 20:36:46 +02:00
invoice_pubkey = decoded_invoice . pubkey . serialize ( )
# use 'r' field from invoice
2019-08-16 22:03:20 +02:00
route = None # type: Optional[List[RouteEdge]]
2018-10-08 21:14:56 +02:00
# only want 'r' tags
r_tags = list ( filter ( lambda x : x [ 0 ] == ' r ' , decoded_invoice . tags ) )
# strip the tag type, it's implicitly 'r' now
r_tags = list ( map ( lambda x : x [ 1 ] , r_tags ) )
# if there are multiple hints, we will use the first one that works,
# from a random permutation
random . shuffle ( r_tags )
2018-10-15 21:01:08 +02:00
with self . lock :
channels = list ( self . channels . values ( ) )
2018-10-08 21:14:56 +02:00
for private_route in r_tags :
2019-06-18 13:49:31 +02:00
if len ( private_route ) == 0 :
continue
if len ( private_route ) > NUM_MAX_EDGES_IN_PAYMENT_PATH :
continue
2018-10-08 20:36:46 +02:00
border_node_pubkey = private_route [ 0 ] [ 0 ]
2018-10-15 21:01:08 +02:00
path = self . network . path_finder . find_path_for_payment ( self . node_keypair . pubkey , border_node_pubkey , amount_msat , channels )
2019-06-18 13:49:31 +02:00
if not path :
continue
2018-10-08 20:36:46 +02:00
route = self . network . path_finder . create_route_from_path ( path , self . node_keypair . pubkey )
# we need to shift the node pubkey by one towards the destination:
private_route_nodes = [ edge [ 0 ] for edge in private_route ] [ 1 : ] + [ invoice_pubkey ]
private_route_rest = [ edge [ 1 : ] for edge in private_route ]
2018-10-16 21:35:30 +02:00
prev_node_id = border_node_pubkey
2018-10-08 20:36:46 +02:00
for node_pubkey , edge_rest in zip ( private_route_nodes , private_route_rest ) :
short_channel_id , fee_base_msat , fee_proportional_millionths , cltv_expiry_delta = edge_rest
2019-09-06 18:09:05 +02:00
short_channel_id = ShortChannelID ( short_channel_id )
2018-10-16 21:35:30 +02:00
# if we have a routing policy for this edge in the db, that takes precedence,
# as it is likely from a previous failure
channel_policy = self . channel_db . get_routing_policy_for_channel ( prev_node_id , short_channel_id )
if channel_policy :
fee_base_msat = channel_policy . fee_base_msat
fee_proportional_millionths = channel_policy . fee_proportional_millionths
cltv_expiry_delta = channel_policy . cltv_expiry_delta
2018-10-08 20:36:46 +02:00
route . append ( RouteEdge ( node_pubkey , short_channel_id , fee_base_msat , fee_proportional_millionths ,
cltv_expiry_delta ) )
2018-10-16 21:35:30 +02:00
prev_node_id = node_pubkey
2018-10-19 21:47:51 +02:00
# test sanity
if not is_route_sane_to_use ( route , amount_msat , decoded_invoice . get_min_final_cltv_expiry ( ) ) :
2019-05-02 17:59:11 +02:00
self . logger . info ( f " rejecting insane route { route } " )
2018-10-19 21:47:51 +02:00
route = None
continue
2018-10-08 20:36:46 +02:00
break
# if could not find route using any hint; try without hint now
if route is None :
2018-10-15 21:01:08 +02:00
path = self . network . path_finder . find_path_for_payment ( self . node_keypair . pubkey , invoice_pubkey , amount_msat , channels )
if not path :
2019-10-09 19:23:09 +02:00
raise NoPathFound ( )
2018-10-08 20:36:46 +02:00
route = self . network . path_finder . create_route_from_path ( path , self . node_keypair . pubkey )
2018-10-19 21:47:51 +02:00
if not is_route_sane_to_use ( route , amount_msat , decoded_invoice . get_min_final_cltv_expiry ( ) ) :
2019-05-02 17:59:11 +02:00
self . logger . info ( f " rejecting insane route { route } " )
2019-10-09 19:23:09 +02:00
raise NoPathFound ( )
2018-10-08 20:36:46 +02:00
return route
2019-09-20 17:15:49 +02:00
def add_request ( self , amount_sat , message , expiry ) :
coro = self . _add_request_coro ( amount_sat , message , expiry )
2019-06-18 13:49:31 +02:00
fut = asyncio . run_coroutine_threadsafe ( coro , self . network . asyncio_loop )
try :
return fut . result ( timeout = 5 )
except concurrent . futures . TimeoutError :
2019-09-20 17:15:49 +02:00
raise Exception ( _ ( " add invoice timed out " ) )
2019-06-18 13:49:31 +02:00
2019-08-15 13:17:16 +02:00
@log_exceptions
2019-09-20 17:15:49 +02:00
async def _add_request_coro ( self , amount_sat , message , expiry ) :
timestamp = int ( time . time ( ) )
2019-06-18 13:49:31 +02:00
routing_hints = await self . _calc_routing_hints_for_invoice ( amount_sat )
2018-10-17 17:32:58 +02:00
if not routing_hints :
2019-05-02 17:59:11 +02:00
self . logger . info ( " Warning. No routing hints added to invoice. "
2018-10-17 17:32:58 +02:00
" Other clients will likely not be able to send to us. " )
2019-09-20 17:15:49 +02:00
payment_preimage = os . urandom ( 32 )
payment_hash = sha256 ( payment_preimage )
2019-10-09 20:16:11 +02:00
info = PaymentInfo ( payment_hash , amount_sat , RECEIVED , PR_UNPAID )
2019-09-20 17:15:49 +02:00
amount_btc = amount_sat / Decimal ( COIN ) if amount_sat else None
lnaddr = LnAddr ( payment_hash , amount_btc ,
tags = [ ( ' d ' , message ) ,
( ' c ' , MIN_FINAL_CLTV_EXPIRY_FOR_INVOICE ) ,
( ' x ' , expiry ) ]
+ routing_hints ,
date = timestamp )
invoice = lnencode ( lnaddr , self . node_keypair . privkey )
key = bh2u ( lnaddr . paymenthash )
req = {
' type ' : PR_TYPE_LN ,
' amount ' : amount_sat ,
' time ' : lnaddr . date ,
' exp ' : expiry ,
' message ' : message ,
' rhash ' : key ,
' invoice ' : invoice
}
2019-03-07 17:51:35 +01:00
self . save_preimage ( payment_hash , payment_preimage )
2019-10-09 20:16:11 +02:00
self . save_payment_info ( info )
2019-09-20 17:15:49 +02:00
self . wallet . add_payment_request ( req )
self . wallet . set_label ( key , message )
return key
2019-02-13 15:46:35 +01:00
2019-03-07 17:51:35 +01:00
def save_preimage ( self , payment_hash : bytes , preimage : bytes ) :
2019-02-13 15:46:35 +01:00
assert sha256 ( preimage ) == payment_hash
2019-09-20 17:15:49 +02:00
self . preimages [ bh2u ( payment_hash ) ] = bh2u ( preimage )
2019-02-13 15:46:35 +01:00
self . storage . put ( ' lightning_preimages ' , self . preimages )
self . storage . write ( )
2019-03-07 17:51:35 +01:00
def get_preimage ( self , payment_hash : bytes ) - > bytes :
2019-09-20 17:15:49 +02:00
return bfh ( self . preimages . get ( bh2u ( payment_hash ) ) )
2019-01-29 19:01:04 +01:00
2019-10-09 20:16:11 +02:00
def get_payment_info ( self , payment_hash : bytes ) - > bytes :
2019-09-20 17:15:49 +02:00
key = payment_hash . hex ( )
with self . lock :
2019-10-09 20:16:11 +02:00
if key not in self . payments :
2019-09-20 17:15:49 +02:00
raise UnknownPaymentHash ( payment_hash )
2019-10-09 20:16:11 +02:00
amount , direction , status = self . payments [ key ]
return PaymentInfo ( payment_hash , amount , direction , status )
2019-08-11 14:47:06 +02:00
2019-10-09 20:16:11 +02:00
def save_payment_info ( self , info ) :
2019-09-20 17:15:49 +02:00
key = info . payment_hash . hex ( )
2019-10-11 10:11:41 +02:00
assert info . status in [ PR_PAID , PR_UNPAID , PR_INFLIGHT ]
lnworker.invoices access now uses lock
(qt gui thread vs asyncio thread race)
Traceback (most recent call last):
File "/home/user/wspace/electrum/electrum/gui/qt/main_window.py", line 1685, in do_send
self.pay_lightning_invoice(self.payto_e.lightning_invoice)
File "/home/user/wspace/electrum/electrum/gui/qt/main_window.py", line 1667, in pay_lightning_invoice
self.invoice_list.update()
File "/home/user/wspace/electrum/electrum/gui/qt/invoice_list.py", line 73, in update
_list = self.parent.wallet.get_invoices()
File "/home/user/wspace/electrum/electrum/wallet.py", line 525, in get_invoices
out += self.lnworker.get_invoices()
File "/home/user/wspace/electrum/electrum/util.py", line 401, in
return lambda *args, **kw_args: do_profile(args, kw_args)
File "/home/user/wspace/electrum/electrum/util.py", line 397, in do_profile
o = func(*args, **kw_args)
File "/home/user/wspace/electrum/electrum/lnworker.py", line 1007, in get_invoices
for key, (invoice, direction, status) in self.invoices.items():
RuntimeError: dictionary changed size during iteration
2019-09-06 18:27:47 +02:00
with self . lock :
2019-10-09 20:16:11 +02:00
self . payments [ key ] = info . amount , info . direction , info . status
self . storage . put ( ' lightning_payments ' , self . payments )
2019-02-13 15:46:35 +01:00
self . storage . write ( )
2018-05-28 09:39:05 +02:00
2019-10-09 20:16:11 +02:00
def get_payment_status ( self , payment_hash ) :
2018-10-23 18:54:23 +02:00
try :
2019-10-09 20:16:11 +02:00
info = self . get_payment_info ( payment_hash )
2019-10-11 10:11:41 +02:00
status = info . status
2019-09-20 17:15:49 +02:00
except UnknownPaymentHash :
2019-10-11 10:11:41 +02:00
status = PR_UNPAID
return status
async def await_payment ( self , payment_hash ) :
2019-10-11 13:37:54 +02:00
success , preimage , reason = await self . pending_payments [ payment_hash ]
2019-10-11 12:54:00 +02:00
self . pending_payments . pop ( payment_hash )
2019-10-11 13:37:54 +02:00
return success , preimage , reason
2018-10-18 13:17:29 +02:00
2019-10-09 20:16:11 +02:00
def set_payment_status ( self , payment_hash : bytes , status ) :
2019-09-20 17:15:49 +02:00
try :
2019-10-09 20:16:11 +02:00
info = self . get_payment_info ( payment_hash )
2019-09-20 17:15:49 +02:00
except UnknownPaymentHash :
# if we are forwarding
2019-08-11 14:47:06 +02:00
return
2019-09-20 17:15:49 +02:00
info = info . _replace ( status = status )
2019-10-09 20:16:11 +02:00
self . save_payment_info ( info )
2019-10-11 10:11:41 +02:00
2019-10-11 13:37:54 +02:00
def payment_failed ( self , payment_hash : bytes , reason ) :
self . set_payment_status ( payment_hash , PR_UNPAID )
self . pending_payments [ payment_hash ] . set_result ( ( False , None , reason ) )
def payment_sent ( self , payment_hash : bytes ) :
self . set_payment_status ( payment_hash , PR_PAID )
preimage = self . get_preimage ( payment_hash )
self . pending_payments [ payment_hash ] . set_result ( ( True , preimage , None ) )
2019-10-11 10:11:41 +02:00
def payment_received ( self , payment_hash : bytes ) :
self . set_payment_status ( payment_hash , PR_PAID )
2019-10-09 19:23:09 +02:00
self . network . trigger_callback ( ' request_status ' , payment_hash . hex ( ) , PR_PAID )
2019-06-10 14:05:02 +02:00
2019-06-18 13:49:31 +02:00
async def _calc_routing_hints_for_invoice ( self , amount_sat ) :
2018-10-08 20:31:15 +02:00
""" calculate routing hints (BOLT-11 ' r ' field) """
routing_hints = [ ]
with self . lock :
channels = list ( self . channels . values ( ) )
# note: currently we add *all* our channels; but this might be a privacy leak?
for chan in channels :
# check channel is open
2019-06-18 13:49:31 +02:00
if chan . get_state ( ) != " OPEN " :
continue
2018-10-08 20:31:15 +02:00
# check channel has sufficient balance
# FIXME because of on-chain fees of ctx, this check is insufficient
2019-06-18 13:49:31 +02:00
if amount_sat and chan . balance ( REMOTE ) / / 1000 < amount_sat :
continue
2018-10-08 20:31:15 +02:00
chan_id = chan . short_channel_id
2019-09-06 18:09:05 +02:00
assert isinstance ( chan_id , bytes ) , chan_id
2018-10-08 20:31:15 +02:00
channel_info = self . channel_db . get_channel_info ( chan_id )
2018-10-16 21:37:57 +02:00
# note: as a fallback, if we don't have a channel update for the
2018-10-18 19:21:44 +02:00
# incoming direction of our private channel, we fill the invoice with garbage.
2018-10-16 21:37:57 +02:00
# the sender should still be able to pay us, but will incur an extra round trip
# (they will get the channel update from the onion error)
2018-10-18 19:21:44 +02:00
# at least, that's the theory. https://github.com/lightningnetwork/lnd/issues/2066
fee_base_msat = fee_proportional_millionths = 0
cltv_expiry_delta = 1 # lnd won't even try with zero
2018-10-16 21:37:57 +02:00
missing_info = True
if channel_info :
2019-08-16 22:03:20 +02:00
policy = self . channel_db . get_policy_for_node ( channel_info . short_channel_id , chan . node_id )
2018-10-16 21:37:57 +02:00
if policy :
fee_base_msat = policy . fee_base_msat
fee_proportional_millionths = policy . fee_proportional_millionths
cltv_expiry_delta = policy . cltv_expiry_delta
missing_info = False
if missing_info :
2019-09-06 18:09:05 +02:00
self . logger . info ( f " Warning. Missing channel update for our channel { chan_id } ; "
2018-10-16 21:37:57 +02:00
f " filling invoice with incorrect data. " )
2018-10-08 20:31:15 +02:00
routing_hints . append ( ( ' r ' , [ ( chan . node_id ,
chan_id ,
2018-10-16 21:37:57 +02:00
fee_base_msat ,
fee_proportional_millionths ,
cltv_expiry_delta ) ] ) )
2018-10-08 20:31:15 +02:00
return routing_hints
2019-10-09 20:16:11 +02:00
def delete_payment ( self , payment_hash_hex : str ) :
2018-06-27 21:54:57 +02:00
try :
lnworker.invoices access now uses lock
(qt gui thread vs asyncio thread race)
Traceback (most recent call last):
File "/home/user/wspace/electrum/electrum/gui/qt/main_window.py", line 1685, in do_send
self.pay_lightning_invoice(self.payto_e.lightning_invoice)
File "/home/user/wspace/electrum/electrum/gui/qt/main_window.py", line 1667, in pay_lightning_invoice
self.invoice_list.update()
File "/home/user/wspace/electrum/electrum/gui/qt/invoice_list.py", line 73, in update
_list = self.parent.wallet.get_invoices()
File "/home/user/wspace/electrum/electrum/wallet.py", line 525, in get_invoices
out += self.lnworker.get_invoices()
File "/home/user/wspace/electrum/electrum/util.py", line 401, in
return lambda *args, **kw_args: do_profile(args, kw_args)
File "/home/user/wspace/electrum/electrum/util.py", line 397, in do_profile
o = func(*args, **kw_args)
File "/home/user/wspace/electrum/electrum/lnworker.py", line 1007, in get_invoices
for key, (invoice, direction, status) in self.invoices.items():
RuntimeError: dictionary changed size during iteration
2019-09-06 18:27:47 +02:00
with self . lock :
2019-10-09 20:16:11 +02:00
del self . payments [ payment_hash_hex ]
2018-06-27 21:54:57 +02:00
except KeyError :
return
2019-10-09 20:16:11 +02:00
self . storage . put ( ' lightning_payments ' , self . payments )
2019-02-13 15:46:35 +01:00
self . storage . write ( )
2018-06-27 21:54:57 +02:00
2019-01-30 19:16:04 +01:00
def get_balance ( self ) :
with self . lock :
2019-01-30 19:40:20 +01:00
return Decimal ( sum ( chan . balance ( LOCAL ) if not chan . is_closed ( ) else 0 for chan in self . channels . values ( ) ) ) / 1000
2019-01-30 19:16:04 +01:00
2018-05-30 13:42:25 +02:00
def list_channels ( self ) :
2018-07-30 13:51:03 +02:00
with self . lock :
2018-09-21 19:18:34 +02:00
# we output the funding_outpoint instead of the channel_id because lnd uses channel_point (funding outpoint) to identify channels
2018-10-08 11:30:51 +02:00
for channel_id , chan in self . channels . items ( ) :
yield {
2019-05-06 16:52:25 +02:00
' local_htlcs ' : json . loads ( encoder . encode ( chan . hm . log [ LOCAL ] ) ) ,
2019-01-21 21:27:27 +01:00
' remote_htlcs ' : json . loads ( encoder . encode ( chan . hm . log [ REMOTE ] ) ) ,
2019-08-02 21:28:14 +02:00
' channel_id ' : format_short_channel_id ( chan . short_channel_id ) if chan . short_channel_id else None ,
2019-01-30 17:50:10 +01:00
' full_channel_id ' : bh2u ( chan . channel_id ) ,
2018-10-08 11:30:51 +02:00
' channel_point ' : chan . funding_outpoint . to_str ( ) ,
' state ' : chan . get_state ( ) ,
' remote_pubkey ' : bh2u ( chan . node_id ) ,
' local_balance ' : chan . balance ( LOCAL ) / / 1000 ,
' remote_balance ' : chan . balance ( REMOTE ) / / 1000 ,
}
2018-06-20 15:46:22 +02:00
2018-10-02 21:13:17 +02:00
async def close_channel ( self , chan_id ) :
2018-10-24 17:36:07 +02:00
chan = self . channels [ chan_id ]
peer = self . peers [ chan . node_id ]
2018-10-24 18:26:05 +02:00
return await peer . close_channel ( chan_id )
2018-10-24 17:36:07 +02:00
async def force_close_channel ( self , chan_id ) :
2018-06-20 15:46:22 +02:00
chan = self . channels [ chan_id ]
2018-11-05 17:23:49 +01:00
tx = chan . force_close_tx ( )
2019-05-19 11:55:55 +02:00
chan . set_force_closed ( )
2018-11-05 17:23:49 +01:00
self . save_channel ( chan )
2018-11-12 18:01:59 +01:00
self . on_channels_updated ( )
2019-08-30 09:54:32 +02:00
try :
await self . network . broadcast_transaction ( tx )
except Exception as e :
self . logger . info ( f ' could NOT publish { tx . txid ( ) } , { str ( e ) } ' )
return
2019-01-24 17:57:27 +01:00
return tx . txid ( )
2018-07-13 17:05:04 +02:00
2019-05-19 13:24:29 +02:00
def remove_channel ( self , chan_id ) :
# TODO: assert that closing tx is deep-mined and htlcs are swept
chan = self . channels [ chan_id ]
assert chan . is_closed ( )
2019-09-19 18:17:03 +02:00
with self . lock :
self . channels . pop ( chan_id )
2019-05-23 14:26:08 +02:00
self . save_channels ( )
2019-10-13 20:34:38 +02:00
self . network . trigger_callback ( ' channels_updated ' , self . wallet )
2019-05-23 14:26:08 +02:00
self . network . trigger_callback ( ' wallet_updated ' , self . wallet )
2019-05-19 13:24:29 +02:00
2019-06-08 17:46:53 +02:00
async def reestablish_peer_for_given_channel ( self , chan ) :
now = time . time ( )
# try last good address first
peer = self . channel_db . get_last_good_address ( chan . node_id )
if peer :
2018-07-30 13:51:03 +02:00
last_tried = self . _last_tried_peer . get ( peer , 0 )
if last_tried + PEER_RETRY_INTERVAL_FOR_CHANNELS < now :
2019-06-08 17:46:53 +02:00
await self . _add_peer ( peer . host , peer . port , peer . pubkey )
return
# try random address for node_id
2019-06-18 13:49:31 +02:00
addresses = self . channel_db . get_node_addresses ( chan . node_id )
2019-06-08 17:46:53 +02:00
if not addresses :
return
2019-06-18 13:49:31 +02:00
host , port , t = random . choice ( list ( addresses ) )
2019-06-08 17:46:53 +02:00
peer = LNPeerAddr ( host , port , chan . node_id )
last_tried = self . _last_tried_peer . get ( peer , 0 )
if last_tried + PEER_RETRY_INTERVAL_FOR_CHANNELS < now :
await self . _add_peer ( host , port , chan . node_id )
2018-07-30 13:51:03 +02:00
2019-06-08 17:46:53 +02:00
async def reestablish_peers_and_channels ( self ) :
2019-04-26 12:48:02 +02:00
while True :
await asyncio . sleep ( 1 )
2019-08-02 18:58:38 +02:00
# wait until on-chain state is synchronized
if not ( self . wallet . is_up_to_date ( ) and self . lnwatcher . is_up_to_date ( ) ) :
continue
2019-04-26 12:48:02 +02:00
with self . lock :
channels = list ( self . channels . values ( ) )
for chan in channels :
if chan . is_closed ( ) :
continue
if constants . net is not constants . BitcoinRegtest :
2019-07-28 16:49:56 +02:00
chan_feerate = chan . get_latest_feerate ( LOCAL )
ratio = chan_feerate / self . current_feerate_per_kw ( )
2019-04-26 12:48:02 +02:00
if ratio < 0.5 :
2019-07-28 16:49:56 +02:00
self . logger . warning ( f " fee level for channel { bh2u ( chan . channel_id ) } is { chan_feerate } sat/kiloweight, "
2019-05-02 17:59:11 +02:00
f " current recommended feerate is { self . current_feerate_per_kw ( ) } sat/kiloweight, consider force closing! " )
2019-04-26 12:48:02 +02:00
if not chan . should_try_to_reestablish_peer ( ) :
continue
peer = self . peers . get ( chan . node_id , None )
2019-08-30 19:51:17 +02:00
if peer :
await peer . group . spawn ( peer . reestablish_channel ( chan ) )
else :
await self . network . main_taskgroup . spawn (
self . reestablish_peer_for_given_channel ( chan ) )
2018-07-30 13:51:03 +02:00
2019-01-25 20:16:11 +01:00
def current_feerate_per_kw ( self ) :
from . simple_config import FEE_LN_ETA_TARGET , FEERATE_FALLBACK_STATIC_FEE , FEERATE_REGTEST_HARDCODED
if constants . net is constants . BitcoinRegtest :
return FEERATE_REGTEST_HARDCODED / / 4
feerate_per_kvbyte = self . network . config . eta_target_to_fee ( FEE_LN_ETA_TARGET )
if feerate_per_kvbyte is None :
feerate_per_kvbyte = FEERATE_FALLBACK_STATIC_FEE
return max ( 253 , feerate_per_kvbyte / / 4 )