2018-10-25 19:34:31 +02:00
# Copyright (C) 2018 The Electrum developers
# Distributed under the MIT software license, see the accompanying
# file LICENCE or http://www.opensource.org/licenses/mit-license.php
2018-04-16 10:24:03 +02:00
import asyncio
2018-04-30 23:34:33 +02:00
import os
2018-04-23 15:11:56 +02:00
from decimal import Decimal
2018-07-13 17:05:04 +02:00
import random
2018-07-27 20:59:04 +02:00
import time
2018-10-22 15:35:57 +02:00
from typing import Optional , Sequence , Tuple , List , Dict , TYPE_CHECKING
2018-07-30 13:51:03 +02:00
import threading
2018-09-27 16:43:33 +02:00
import socket
2018-10-25 00:22:42 +02:00
import json
2019-01-31 16:41:43 +01:00
import operator
2018-11-07 17:44:49 +01:00
from datetime import datetime , timezone
2018-11-19 18:09:43 +01:00
from functools import partial
2018-07-27 20:59:04 +02:00
import dns . resolver
import dns . exception
2018-05-28 09:39:05 +02:00
2018-05-28 18:22:45 +02:00
from . import constants
2018-10-05 15:37:47 +02:00
from . import keystore
2019-01-29 19:01:04 +01:00
from . util import PR_UNPAID , PR_EXPIRED , PR_PAID , PR_UNKNOWN , PR_INFLIGHT
2018-10-05 15:37:47 +02:00
from . keystore import BIP32_KeyStore
2018-10-25 23:30:36 +02:00
from . bitcoin import COIN
2018-12-12 18:02:55 +01:00
from . transaction import Transaction
2018-10-25 23:30:36 +02:00
from . crypto import sha256
from . bip32 import bip32_root
2018-10-12 19:40:12 +02:00
from . util import bh2u , bfh , PrintError , InvoiceError , resolve_dns_srv , is_ip_address , log_exceptions
2019-01-31 16:41:43 +01:00
from . util import timestamp_to_datetime
2019-02-01 20:21:59 +01:00
from . lntransport import LNTransport , LNResponderTransport
2018-10-15 11:05:53 +02:00
from . lnbase import Peer
2018-06-29 12:33:16 +02:00
from . lnaddr import lnencode , LnAddr , lndecode
2018-06-28 15:50:45 +02:00
from . ecc import der_sig_from_sig_string
2019-01-21 21:27:27 +01:00
from . lnchan import Channel , ChannelJsonEncoder
2018-09-27 16:43:33 +02:00
from . lnutil import ( Outpoint , calc_short_channel_id , LNPeerAddr ,
get_compressed_pubkey_from_bech32 , extract_nodeid ,
2018-10-05 15:37:47 +02:00
PaymentFailure , split_host_port , ConnStringFormatError ,
2018-10-18 22:56:40 +02:00
generate_keypair , LnKeyFamily , LOCAL , REMOTE ,
2018-10-19 21:47:51 +02:00
UnknownPaymentHash , MIN_FINAL_CLTV_EXPIRY_FOR_INVOICE ,
2019-01-21 21:27:27 +01:00
NUM_MAX_EDGES_IN_PAYMENT_PATH , SENT , RECEIVED , HTLCOwner ,
UpdateAddHtlc , Direction )
2018-07-17 21:27:59 +02:00
from . i18n import _
2018-10-19 21:47:51 +02:00
from . lnrouter import RouteEdge , is_route_sane_to_use
2018-11-05 17:23:49 +01:00
from . address_synchronizer import TX_HEIGHT_LOCAL
2018-05-28 09:39:05 +02:00
2018-10-22 15:35:57 +02:00
if TYPE_CHECKING :
from . network import Network
from . wallet import Abstract_Wallet
2018-07-27 20:59:04 +02:00
NUM_PEERS_TARGET = 4
PEER_RETRY_INTERVAL = 600 # seconds
2018-07-30 13:51:03 +02:00
PEER_RETRY_INTERVAL_FOR_CHANNELS = 30 # seconds
2018-11-27 00:40:55 +01:00
GRAPH_DOWNLOAD_SECONDS = 600
2018-07-27 20:59:04 +02:00
2018-10-14 10:24:06 +02:00
FALLBACK_NODE_LIST_TESTNET = (
2018-07-27 20:59:04 +02:00
LNPeerAddr ( ' ecdsa.net ' , 9735 , bfh ( ' 038370f0e7a03eded3e1d41dc081084a87f0afa1c5b22090b4f3abb391eb15d8ff ' ) ) ,
2019-01-28 20:13:09 +01:00
LNPeerAddr ( ' 180.181.208.42 ' , 9735 , bfh ( ' 038863cf8ab91046230f561cd5b386cbff8309fa02e3f0c3ed161a3aeb64a643b9 ' ) ) ,
2018-07-27 20:59:04 +02:00
)
2018-10-14 10:24:06 +02:00
FALLBACK_NODE_LIST_MAINNET = (
LNPeerAddr ( ' 104.198.32.198 ' , 9735 , bfh ( ' 02f6725f9c1c40333b67faea92fd211c183050f28df32cac3f9d69685fe9665432 ' ) ) , # Blockstream
LNPeerAddr ( ' 13.80.67.162 ' , 9735 , bfh ( ' 02c0ac82c33971de096d87ce5ed9b022c2de678f08002dc37fdb1b6886d12234b5 ' ) ) , # Stampery
)
2018-07-27 20:59:04 +02:00
2018-11-07 17:44:49 +01:00
encoder = ChannelJsonEncoder ( )
2018-05-29 18:12:48 +02:00
class LNWorker ( PrintError ) :
2018-05-28 09:39:05 +02:00
2019-01-26 17:57:00 +01:00
def __init__ ( self , wallet : ' Abstract_Wallet ' ) :
2018-05-28 09:39:05 +02:00
self . wallet = wallet
2019-01-30 09:26:27 +01:00
# type: Dict[str, Tuple[str,str,bool,int]] # RHASH -> (preimage, invoice, is_received, timestamp)
self . invoices = self . wallet . storage . get ( ' lightning_invoices ' , { } )
2018-09-12 16:17:10 +02:00
self . sweep_address = wallet . get_receiving_address ( )
2018-07-30 13:51:03 +02:00
self . lock = threading . RLock ( )
2018-10-05 15:37:47 +02:00
self . ln_keystore = self . _read_ln_keystore ( )
self . node_keypair = generate_keypair ( self . ln_keystore , LnKeyFamily . NODE_KEY , 0 )
2018-10-08 20:31:15 +02:00
self . peers = { } # type: Dict[bytes, Peer] # pubkey -> Peer
2018-11-19 18:09:43 +01:00
self . channels = { } # type: Dict[bytes, Channel]
for x in wallet . storage . get ( " channels " , [ ] ) :
2018-12-15 11:38:46 +01:00
c = Channel ( x , sweep_address = self . sweep_address , payment_completed = self . payment_completed )
2019-01-21 21:27:27 +01:00
c . get_preimage_and_invoice = self . get_invoice
self . channels [ c . channel_id ] = c
c . set_remote_commitment ( )
c . set_local_commitment ( c . current_commitment ( LOCAL ) )
2019-01-31 16:41:43 +01:00
# timestamps of opening and closing transactions
self . channel_timestamps = self . wallet . storage . get ( ' lightning_channel_timestamps ' , { } )
2019-01-26 17:57:00 +01:00
def start_network ( self , network : ' Network ' ) :
self . network = network
self . config = network . config
self . channel_db = self . network . channel_db
2018-06-22 10:57:11 +02:00
for chan_id , chan in self . channels . items ( ) :
2018-10-08 18:33:23 +02:00
self . network . lnwatcher . watch_channel ( chan . get_funding_address ( ) , chan . funding_outpoint . to_str ( ) )
2019-01-26 17:57:00 +01:00
chan . lnwatcher = network . lnwatcher
2018-07-27 20:59:04 +02:00
self . _last_tried_peer = { } # LNPeerAddr -> unix timestamp
self . _add_peers_from_config ( )
2018-05-28 11:55:20 +02:00
# wait until we see confirmations
2018-10-08 20:26:44 +02:00
self . network . register_callback ( self . on_network_update , [ ' wallet_updated ' , ' network_updated ' , ' verified ' , ' fee ' ] ) # thread safe
2018-12-04 20:50:24 +01:00
self . network . register_callback ( self . on_channel_open , [ ' channel_open ' ] )
self . network . register_callback ( self . on_channel_closed , [ ' channel_closed ' ] )
2018-09-10 18:01:55 +02:00
asyncio . run_coroutine_threadsafe ( self . network . main_taskgroup . spawn ( self . main_loop ( ) ) , self . network . asyncio_loop )
2018-11-27 00:40:55 +01:00
self . first_timestamp_requested = None
def get_first_timestamp ( self ) :
first_request = False
if self . first_timestamp_requested is None :
self . first_timestamp_requested = time . time ( )
first_request = True
first_timestamp = self . wallet . storage . get ( ' lightning_gossip_until ' , 0 )
if first_timestamp == 0 :
self . print_error ( ' requesting whole channel graph ' )
else :
self . print_error ( ' requesting channel graph since ' , datetime . fromtimestamp ( first_timestamp ) . ctime ( ) )
if first_request :
asyncio . run_coroutine_threadsafe ( self . save_gossip_timestamp ( ) , self . network . asyncio_loop )
return first_timestamp
@log_exceptions
async def save_gossip_timestamp ( self ) :
while True :
await asyncio . sleep ( GRAPH_DOWNLOAD_SECONDS )
yesterday = int ( time . time ( ) ) - 24 * 60 * 60 # now minus a day
self . wallet . storage . put ( ' lightning_gossip_until ' , yesterday )
self . wallet . storage . write ( )
self . print_error ( ' saved lightning gossip timestamp ' )
2018-05-28 09:39:05 +02:00
2019-01-29 19:01:04 +01:00
def payment_completed ( self , chan , direction , htlc , _preimage ) :
2018-11-19 18:09:43 +01:00
chan_id = chan . channel_id
2019-01-29 19:01:04 +01:00
key = bh2u ( htlc . payment_hash )
if key not in self . invoices :
return
2019-01-30 09:26:27 +01:00
preimage , invoice , is_received , timestamp = self . invoices . get ( key )
2018-11-07 17:44:49 +01:00
if direction == SENT :
2019-01-30 09:26:27 +01:00
preimage = bh2u ( _preimage )
2019-01-29 19:01:04 +01:00
now = time . time ( )
2019-01-30 09:26:27 +01:00
self . invoices [ key ] = preimage , invoice , is_received , now
2019-01-29 19:01:04 +01:00
self . wallet . storage . put ( ' lightning_invoices ' , self . invoices )
2018-11-07 17:44:49 +01:00
self . wallet . storage . write ( )
2019-01-29 19:01:04 +01:00
self . network . trigger_callback ( ' ln_payment_completed ' , now , direction , htlc , preimage , chan_id )
def get_invoice_status ( self , payment_hash ) :
if payment_hash not in self . invoices :
2019-01-28 11:14:30 +01:00
return PR_UNKNOWN
2019-01-30 09:26:27 +01:00
preimage , _addr , is_received , timestamp = self . invoices . get ( payment_hash )
2019-01-29 19:01:04 +01:00
if timestamp is None :
return PR_UNPAID
return PR_PAID
2018-11-07 17:44:49 +01:00
2019-01-29 19:01:04 +01:00
def get_payments ( self ) :
# note: with AMP we will have several channels per payment
out = { }
for chan in self . channels . values ( ) :
out . update ( chan . get_payments ( ) )
return out
2018-11-07 17:44:49 +01:00
2019-01-31 16:41:43 +01:00
def get_history ( self ) :
out = [ ]
for chan_id , htlc , direction , status in self . get_payments ( ) . values ( ) :
key = bh2u ( htlc . payment_hash )
timestamp = self . invoices [ key ] [ 3 ] if key in self . invoices else None
item = {
' type ' : ' payment ' ,
' timestamp ' : timestamp or 0 ,
' date ' : timestamp_to_datetime ( timestamp ) ,
' direction ' : ' sent ' if direction == SENT else ' received ' ,
' status ' : status ,
' amount_msat ' : htlc . amount_msat ,
' payment_hash ' : bh2u ( htlc . payment_hash ) ,
' channel_id ' : bh2u ( chan_id ) ,
' htlc_id ' : htlc . htlc_id ,
' cltv_expiry ' : htlc . cltv_expiry ,
}
out . append ( item )
# add funding events
for chan in self . channels . values ( ) :
funding_txid , funding_height , funding_timestamp , closing_txid , closing_height , closing_timestamp = self . channel_timestamps . get ( bh2u ( chan . channel_id ) )
item = {
' channel_id ' : bh2u ( chan . channel_id ) ,
' type ' : ' channel_opening ' ,
' label ' : _ ( ' Channel opening ' ) ,
' txid ' : funding_txid ,
' amount_msat ' : chan . balance ( LOCAL , ctn = 0 ) ,
' direction ' : ' received ' ,
' timestamp ' : funding_timestamp ,
}
out . append ( item )
if not chan . is_closed ( ) :
continue
item = {
' channel_id ' : bh2u ( chan . channel_id ) ,
' txid ' : closing_txid ,
' label ' : _ ( ' Channel closure ' ) ,
' type ' : ' channel_closure ' ,
' amount_msat ' : chan . balance ( LOCAL ) ,
' direction ' : ' sent ' ,
' timestamp ' : closing_timestamp ,
}
out . append ( item )
out . sort ( key = operator . itemgetter ( ' timestamp ' ) )
balance_msat = 0
for item in out :
balance_msat + = item [ ' amount_msat ' ] * ( 1 if item [ ' direction ' ] == ' received ' else - 1 )
item [ ' balance_msat ' ] = balance_msat
return out
2018-10-05 15:37:47 +02:00
def _read_ln_keystore ( self ) - > BIP32_KeyStore :
2018-10-05 15:45:12 +02:00
xprv = self . wallet . storage . get ( ' lightning_privkey2 ' )
2018-10-05 15:37:47 +02:00
if xprv is None :
2019-01-26 18:15:51 +01:00
# TODO derive this deterministically from wallet.keystore at keystore generation time
# probably along a hardened path ( lnd-equivalent would be m/1017'/coinType'/ )
seed = os . urandom ( 32 )
2018-10-25 23:30:36 +02:00
xprv , xpub = bip32_root ( seed , xtype = ' standard ' )
2018-10-05 15:45:12 +02:00
self . wallet . storage . put ( ' lightning_privkey2 ' , xprv )
2018-10-05 15:37:47 +02:00
return keystore . from_xprv ( xprv )
def get_and_inc_counter_for_channel_keys ( self ) :
with self . lock :
ctr = self . wallet . storage . get ( ' lightning_channel_key_der_ctr ' , - 1 )
ctr + = 1
self . wallet . storage . put ( ' lightning_channel_key_der_ctr ' , ctr )
self . wallet . storage . write ( )
return ctr
2018-07-27 20:59:04 +02:00
def _add_peers_from_config ( self ) :
peer_list = self . config . get ( ' lightning_peers ' , [ ] )
for host , port , pubkey in peer_list :
2018-10-16 16:30:18 +02:00
asyncio . run_coroutine_threadsafe (
self . add_peer ( host , int ( port ) , bfh ( pubkey ) ) ,
self . network . asyncio_loop )
2018-07-27 20:59:04 +02:00
2018-07-14 19:39:28 +02:00
def suggest_peer ( self ) :
for node_id , peer in self . peers . items ( ) :
2019-01-30 17:50:10 +01:00
if not peer . initialized . is_set ( ) :
2018-07-14 19:39:28 +02:00
continue
2019-01-30 19:40:20 +01:00
if not all ( [ chan . is_closed ( ) for chan in peer . channels . values ( ) ] ) :
2018-10-17 11:56:34 +02:00
continue
2018-07-14 19:39:28 +02:00
return node_id
2018-05-31 17:16:01 +02:00
def channels_for_peer ( self , node_id ) :
assert type ( node_id ) is bytes
2018-07-30 13:51:03 +02:00
with self . lock :
return { x : y for ( x , y ) in self . channels . items ( ) if y . node_id == node_id }
2018-05-31 17:16:01 +02:00
2018-10-16 16:30:18 +02:00
async def add_peer ( self , host , port , node_id ) :
2018-07-28 16:26:10 +02:00
if node_id in self . peers :
2019-01-30 17:50:10 +01:00
return self . peers [ node_id ]
2019-02-01 20:21:59 +01:00
port = int ( port )
peer_addr = LNPeerAddr ( host , port , node_id )
transport = LNTransport ( self . node_keypair . privkey , peer_addr )
await transport . handshake ( )
self . channel_db . add_recent_peer ( peer_addr )
2018-07-28 14:58:40 +02:00
self . _last_tried_peer [ peer_addr ] = time . time ( )
2018-07-28 16:26:10 +02:00
self . print_error ( " adding peer " , peer_addr )
2019-02-01 20:21:59 +01:00
peer = Peer ( self , node_id , transport , request_initial_sync = self . config . get ( " request_initial_sync " , True ) )
2018-10-17 01:50:36 +02:00
await self . network . main_taskgroup . spawn ( peer . main_loop ( ) )
2018-05-29 11:30:38 +02:00
self . peers [ node_id ] = peer
2018-07-13 12:28:00 +02:00
self . network . trigger_callback ( ' ln_status ' )
2018-09-27 16:43:33 +02:00
return peer
2018-05-28 09:39:05 +02:00
2018-05-28 10:43:50 +02:00
def save_channel ( self , openchannel ) :
2018-10-11 17:15:25 +02:00
assert type ( openchannel ) is Channel
2018-10-10 22:54:30 +02:00
if openchannel . config [ REMOTE ] . next_per_commitment_point == openchannel . config [ REMOTE ] . current_per_commitment_point :
2018-06-20 15:46:22 +02:00
raise Exception ( " Tried to save channel with next_point == current_point, this should not happen " )
2018-07-30 13:51:03 +02:00
with self . lock :
self . channels [ openchannel . channel_id ] = openchannel
dumped = [ x . serialize ( ) for x in self . channels . values ( ) ]
2018-05-28 10:43:50 +02:00
self . wallet . storage . put ( " channels " , dumped )
self . wallet . storage . write ( )
2018-06-27 20:23:03 +02:00
self . network . trigger_callback ( ' channel ' , openchannel )
2018-05-28 10:43:50 +02:00
2018-05-29 18:12:48 +02:00
def save_short_chan_id ( self , chan ) :
"""
2018-10-08 20:31:15 +02:00
Checks if Funding TX has been mined . If it has , save the short channel ID in chan ;
if it ' s also deep enough, also save to disk.
Returns tuple ( mined_deep_enough , num_confirmations ) .
2018-05-29 18:12:48 +02:00
"""
2018-07-30 13:51:03 +02:00
assert chan . get_state ( ) in [ " OPEN " , " OPENING " ]
2018-11-21 17:00:01 +01:00
lnwatcher = self . network . lnwatcher
conf = lnwatcher . get_tx_height ( chan . funding_outpoint . txid ) . conf
2018-10-08 20:31:15 +02:00
if conf > 0 :
2018-11-21 17:00:01 +01:00
block_height , tx_pos = lnwatcher . get_txpos ( chan . funding_outpoint . txid )
2018-10-08 20:31:15 +02:00
assert tx_pos > = 0
chan . short_channel_id_predicted = calc_short_channel_id ( block_height , tx_pos , chan . funding_outpoint . output_index )
if conf > = chan . constraints . funding_txn_minimum_depth > 0 :
chan . short_channel_id = chan . short_channel_id_predicted
2018-05-29 18:12:48 +02:00
self . save_channel ( chan )
2018-11-12 18:01:59 +01:00
self . on_channels_updated ( )
2018-10-04 14:03:29 +02:00
return True , conf
return False , conf
2018-05-29 18:12:48 +02:00
2018-12-04 20:50:24 +01:00
def channel_by_txo ( self , txo ) :
2018-10-08 18:33:23 +02:00
with self . lock :
channels = list ( self . channels . values ( ) )
for chan in channels :
if chan . funding_outpoint . to_str ( ) == txo :
2018-12-04 20:50:24 +01:00
return chan
2019-01-31 16:41:43 +01:00
def on_channel_open ( self , event , funding_outpoint , funding_txid , funding_height ) :
2018-12-04 20:50:24 +01:00
chan = self . channel_by_txo ( funding_outpoint )
if not chan :
2018-10-08 18:33:23 +02:00
return
2018-12-04 20:50:24 +01:00
self . print_error ( ' on_channel_open ' , funding_outpoint )
2019-01-31 16:41:43 +01:00
self . channel_timestamps [ bh2u ( chan . channel_id ) ] = funding_txid , funding_height . height , funding_height . timestamp , None , None , None
self . wallet . storage . put ( ' lightning_channel_timestamps ' , self . channel_timestamps )
2018-12-04 20:50:24 +01:00
chan . set_funding_txo_spentness ( False )
# send event to GUI
2018-07-16 18:31:24 +02:00
self . network . trigger_callback ( ' channel ' , chan )
2018-06-22 12:17:11 +02:00
2018-12-04 20:50:24 +01:00
@log_exceptions
2019-01-31 16:41:43 +01:00
async def on_channel_closed ( self , event , funding_outpoint , spenders , funding_txid , funding_height , closing_txid , closing_height ) :
2018-12-04 20:50:24 +01:00
chan = self . channel_by_txo ( funding_outpoint )
if not chan :
return
self . print_error ( ' on_channel_closed ' , funding_outpoint )
2019-01-31 16:41:43 +01:00
self . channel_timestamps [ bh2u ( chan . channel_id ) ] = funding_txid , funding_height . height , funding_height . timestamp , closing_txid , closing_height . height , closing_height . timestamp
self . wallet . storage . put ( ' lightning_channel_timestamps ' , self . channel_timestamps )
2018-12-04 20:50:24 +01:00
chan . set_funding_txo_spentness ( True )
if chan . get_state ( ) != ' FORCE_CLOSING ' :
chan . set_state ( " CLOSED " )
self . on_channels_updated ( )
self . network . trigger_callback ( ' channel ' , chan )
# remove from channel_db
self . channel_db . remove_channel ( chan . short_channel_id )
2018-12-15 11:38:46 +01:00
# detect who closed
2019-01-31 16:41:43 +01:00
if closing_txid == chan . local_commitment . txid ( ) :
2018-12-04 20:50:24 +01:00
self . print_error ( ' we force closed ' , funding_outpoint )
2018-12-15 11:38:46 +01:00
encumbered_sweeptxs = chan . local_sweeptxs
2019-01-31 16:41:43 +01:00
elif closing_txid == chan . remote_commitment . txid ( ) :
2018-12-04 20:50:24 +01:00
self . print_error ( ' they force closed ' , funding_outpoint )
2018-12-15 11:38:46 +01:00
encumbered_sweeptxs = chan . remote_sweeptxs
2018-12-04 20:50:24 +01:00
else :
2019-01-31 16:41:43 +01:00
self . print_error ( ' not sure who closed ' , funding_outpoint , closing_txid )
2018-12-15 11:38:46 +01:00
return
# sweep
for prevout , spender in spenders . items ( ) :
e_tx = encumbered_sweeptxs . get ( prevout )
if e_tx is None :
continue
2018-12-04 20:50:24 +01:00
if spender is not None :
2018-12-15 11:38:46 +01:00
self . print_error ( ' outpoint already spent ' , prevout )
2018-12-04 20:50:24 +01:00
continue
2018-12-15 11:38:46 +01:00
prev_txid , prev_index = prevout . split ( ' : ' )
2018-12-04 20:50:24 +01:00
broadcast = True
if e_tx . cltv_expiry :
2018-12-15 11:38:46 +01:00
local_height = self . network . get_local_height ( )
2018-12-04 20:50:24 +01:00
if local_height > e_tx . cltv_expiry :
self . print_error ( e_tx . name , ' CLTV ( {} > {} ) fulfilled ' . format ( local_height , e_tx . cltv_expiry ) )
else :
self . print_error ( e_tx . name , ' waiting for {} : CLTV ( {} > {} ), funding outpoint {} and tx {} '
. format ( e_tx . name , local_height , e_tx . cltv_expiry , funding_outpoint [ : 8 ] , prev_txid [ : 8 ] ) )
broadcast = False
if e_tx . csv_delay :
2018-12-15 11:38:46 +01:00
num_conf = self . network . lnwatcher . get_tx_height ( prev_txid ) . conf
2018-12-04 20:50:24 +01:00
if num_conf < e_tx . csv_delay :
self . print_error ( e_tx . name , ' waiting for {} : CSV ( {} >= {} ), funding outpoint {} and tx {} '
. format ( e_tx . name , num_conf , e_tx . csv_delay , funding_outpoint [ : 8 ] , prev_txid [ : 8 ] ) )
broadcast = False
if broadcast :
if not await self . network . lnwatcher . broadcast_or_log ( funding_outpoint , e_tx ) :
2018-12-17 13:26:57 +01:00
self . print_error ( e_tx . name , f ' could not publish encumbered tx: { str ( e_tx ) } , prevout: { prevout } ' )
2018-12-04 20:50:24 +01:00
2018-10-12 19:40:12 +02:00
@log_exceptions
2018-09-10 18:01:55 +02:00
async def on_network_update ( self , event , * args ) :
# TODO
2018-07-17 15:32:47 +02:00
# Race discovered in save_channel (assertion failing):
# since short_channel_id could be changed while saving.
2018-09-10 18:01:55 +02:00
with self . lock :
channels = list ( self . channels . values ( ) )
2018-11-21 17:00:01 +01:00
lnwatcher = self . network . lnwatcher
2018-10-08 20:26:44 +02:00
if event in ( ' verified ' , ' wallet_updated ' ) :
2018-11-21 17:00:01 +01:00
if args [ 0 ] != lnwatcher :
2018-10-08 20:26:44 +02:00
return
2018-09-10 18:01:55 +02:00
for chan in channels :
if chan . get_state ( ) == " OPENING " :
2018-10-04 14:03:29 +02:00
res , depth = self . save_short_chan_id ( chan )
2018-09-10 18:01:55 +02:00
if not res :
2018-10-04 14:03:29 +02:00
self . print_error ( " network update but funding tx is still not at sufficient depth. actual depth: " + str ( depth ) )
2018-09-10 18:01:55 +02:00
continue
# this results in the channel being marked OPEN
peer = self . peers [ chan . node_id ]
peer . funding_locked ( chan )
elif chan . get_state ( ) == " OPEN " :
peer = self . peers . get ( chan . node_id )
if peer is None :
self . print_error ( " peer not found for {} " . format ( bh2u ( chan . node_id ) ) )
return
if event == ' fee ' :
2018-09-26 19:15:59 +02:00
await peer . bitcoin_fee_update ( chan )
2018-11-21 17:00:01 +01:00
conf = lnwatcher . get_tx_height ( chan . funding_outpoint . txid ) . conf
2018-09-10 18:01:55 +02:00
peer . on_network_update ( chan , conf )
2018-11-05 17:23:49 +01:00
elif chan . get_state ( ) == ' FORCE_CLOSING ' :
txid = chan . force_close_tx ( ) . txid ( )
2018-11-21 17:00:01 +01:00
height = lnwatcher . get_tx_height ( txid ) . height
2018-11-05 17:23:49 +01:00
self . print_error ( " force closing tx " , txid , " height " , height )
if height == TX_HEIGHT_LOCAL :
self . print_error ( ' REBROADCASTING CLOSING TX ' )
await self . force_close_channel ( chan . channel_id )
2018-05-28 11:55:20 +02:00
2018-09-27 16:43:33 +02:00
async def _open_channel_coroutine ( self , peer , local_amount_sat , push_sat , password ) :
# peer might just have been connected to
2019-01-30 17:50:10 +01:00
await asyncio . wait_for ( peer . initialized . wait ( ) , 5 )
2018-10-07 12:19:34 +02:00
chan = await peer . channel_establishment_flow (
password ,
funding_sat = local_amount_sat + push_sat ,
push_msat = push_sat * 1000 ,
temp_channel_id = os . urandom ( 32 ) )
self . save_channel ( chan )
2018-10-08 18:33:23 +02:00
self . network . lnwatcher . watch_channel ( chan . get_funding_address ( ) , chan . funding_outpoint . to_str ( ) )
2018-05-31 12:38:02 +02:00
self . on_channels_updated ( )
2018-10-11 18:28:38 +02:00
return chan
2018-05-31 12:38:02 +02:00
def on_channels_updated ( self ) :
2018-06-06 17:42:06 +02:00
self . network . trigger_callback ( ' channels ' )
2018-05-28 11:55:20 +02:00
2018-09-27 16:43:33 +02:00
@staticmethod
def choose_preferred_address ( addr_list : List [ Tuple [ str , int ] ] ) - > Tuple [ str , int ] :
2018-10-03 19:56:07 +02:00
# choose first one that is an IP
2018-09-27 16:43:33 +02:00
for host , port in addr_list :
if is_ip_address ( host ) :
return host , port
2018-10-03 19:56:07 +02:00
# otherwise choose one at random
2018-09-27 16:43:33 +02:00
# TODO maybe filter out onion if not on tor?
2018-10-03 19:56:07 +02:00
return random . choice ( addr_list )
2018-09-27 16:43:33 +02:00
2018-10-11 18:10:31 +02:00
def open_channel ( self , connect_contents , local_amt_sat , push_amt_sat , password = None , timeout = 5 ) :
2018-09-27 16:43:33 +02:00
node_id , rest = extract_nodeid ( connect_contents )
peer = self . peers . get ( node_id )
if not peer :
all_nodes = self . network . channel_db . nodes
node_info = all_nodes . get ( node_id , None )
if rest is not None :
host , port = split_host_port ( rest )
elif node_info and len ( node_info . addresses ) > 0 :
host , port = self . choose_preferred_address ( node_info . addresses )
else :
raise ConnStringFormatError ( _ ( ' Unknown node: ' ) + ' ' + bh2u ( node_id ) )
try :
socket . getaddrinfo ( host , int ( port ) )
except socket . gaierror :
raise ConnStringFormatError ( _ ( ' Hostname does not resolve (getaddrinfo failed) ' ) )
2018-10-16 16:30:18 +02:00
peer_future = asyncio . run_coroutine_threadsafe ( self . add_peer ( host , port , node_id ) ,
self . network . asyncio_loop )
peer = peer_future . result ( timeout )
2018-10-11 18:10:31 +02:00
coro = self . _open_channel_coroutine ( peer , local_amt_sat , push_amt_sat , password )
2018-10-07 12:19:34 +02:00
f = asyncio . run_coroutine_threadsafe ( coro , self . network . asyncio_loop )
2018-10-14 22:36:23 +02:00
chan = f . result ( timeout )
2019-02-01 15:27:50 +01:00
return chan . funding_outpoint . to_str ( )
2018-05-28 18:22:45 +02:00
2018-07-17 21:27:59 +02:00
def pay ( self , invoice , amount_sat = None ) :
2018-10-25 21:59:16 +02:00
"""
This is not merged with _pay so that we can run the test with
one thread only .
"""
addr , peer , coro = self . _pay ( invoice , amount_sat )
fut = asyncio . run_coroutine_threadsafe ( coro , self . network . asyncio_loop )
return addr , peer , fut
def _pay ( self , invoice , amount_sat = None ) :
2018-11-02 19:16:42 +01:00
addr = self . _check_invoice ( invoice , amount_sat )
route = self . _create_route_from_invoice ( decoded_invoice = addr )
peer = self . peers [ route [ 0 ] . node_id ]
2018-11-19 18:09:43 +01:00
for chan in self . channels . values ( ) :
if chan . short_channel_id == route [ 0 ] . short_channel_id :
chan_id = chan . channel_id
break
else :
assert False , ' Found route with short channel ID we don \' t have: ' + repr ( route [ 0 ] . short_channel_id )
2018-11-02 19:16:42 +01:00
return addr , peer , self . _pay_to_route ( route , addr )
async def _pay_to_route ( self , route , addr ) :
short_channel_id = route [ 0 ] . short_channel_id
2018-07-30 13:51:03 +02:00
with self . lock :
channels = list ( self . channels . values ( ) )
for chan in channels :
2018-06-27 20:23:03 +02:00
if chan . short_channel_id == short_channel_id :
2018-06-05 13:57:04 +02:00
break
2018-06-18 19:46:25 +02:00
else :
2018-10-20 17:52:29 +02:00
raise Exception ( " PathFinder returned path with short_channel_id {} that is not in channel list " . format ( bh2u ( short_channel_id ) ) )
2018-11-02 19:16:42 +01:00
peer = self . peers [ route [ 0 ] . node_id ]
2018-11-19 18:09:43 +01:00
htlc = await peer . pay ( route , chan , int ( addr . amount * COIN * 1000 ) , addr . paymenthash , addr . get_min_final_cltv_expiry ( ) )
self . network . trigger_callback ( ' htlc_added ' , htlc , addr , SENT )
2018-05-28 10:43:50 +02:00
2018-11-02 19:16:42 +01:00
@staticmethod
def _check_invoice ( invoice , amount_sat = None ) :
addr = lndecode ( invoice , expected_hrp = constants . net . SEGWIT_HRP )
if amount_sat :
addr . amount = Decimal ( amount_sat ) / COIN
if addr . amount is None :
raise InvoiceError ( _ ( " Missing amount " ) )
if addr . get_min_final_cltv_expiry ( ) > 60 * 144 :
raise InvoiceError ( " {} \n {} " . format (
_ ( " Invoice wants us to risk locking funds for unreasonably long. " ) ,
f " min_final_cltv_expiry: { addr . get_min_final_cltv_expiry ( ) } " ) )
return addr
def _create_route_from_invoice ( self , decoded_invoice ) - > List [ RouteEdge ] :
amount_msat = int ( decoded_invoice . amount * COIN * 1000 )
2018-10-08 20:36:46 +02:00
invoice_pubkey = decoded_invoice . pubkey . serialize ( )
# use 'r' field from invoice
route = None # type: List[RouteEdge]
2018-10-08 21:14:56 +02:00
# only want 'r' tags
r_tags = list ( filter ( lambda x : x [ 0 ] == ' r ' , decoded_invoice . tags ) )
# strip the tag type, it's implicitly 'r' now
r_tags = list ( map ( lambda x : x [ 1 ] , r_tags ) )
# if there are multiple hints, we will use the first one that works,
# from a random permutation
random . shuffle ( r_tags )
2018-10-15 21:01:08 +02:00
with self . lock :
channels = list ( self . channels . values ( ) )
2018-10-08 21:14:56 +02:00
for private_route in r_tags :
2018-10-08 20:36:46 +02:00
if len ( private_route ) == 0 : continue
2018-10-20 16:50:43 +02:00
if len ( private_route ) > NUM_MAX_EDGES_IN_PAYMENT_PATH : continue
2018-10-08 20:36:46 +02:00
border_node_pubkey = private_route [ 0 ] [ 0 ]
2018-10-15 21:01:08 +02:00
path = self . network . path_finder . find_path_for_payment ( self . node_keypair . pubkey , border_node_pubkey , amount_msat , channels )
if not path : continue
2018-10-08 20:36:46 +02:00
route = self . network . path_finder . create_route_from_path ( path , self . node_keypair . pubkey )
# we need to shift the node pubkey by one towards the destination:
private_route_nodes = [ edge [ 0 ] for edge in private_route ] [ 1 : ] + [ invoice_pubkey ]
private_route_rest = [ edge [ 1 : ] for edge in private_route ]
2018-10-16 21:35:30 +02:00
prev_node_id = border_node_pubkey
2018-10-08 20:36:46 +02:00
for node_pubkey , edge_rest in zip ( private_route_nodes , private_route_rest ) :
short_channel_id , fee_base_msat , fee_proportional_millionths , cltv_expiry_delta = edge_rest
2018-10-16 21:35:30 +02:00
# if we have a routing policy for this edge in the db, that takes precedence,
# as it is likely from a previous failure
channel_policy = self . channel_db . get_routing_policy_for_channel ( prev_node_id , short_channel_id )
if channel_policy :
fee_base_msat = channel_policy . fee_base_msat
fee_proportional_millionths = channel_policy . fee_proportional_millionths
cltv_expiry_delta = channel_policy . cltv_expiry_delta
2018-10-08 20:36:46 +02:00
route . append ( RouteEdge ( node_pubkey , short_channel_id , fee_base_msat , fee_proportional_millionths ,
cltv_expiry_delta ) )
2018-10-16 21:35:30 +02:00
prev_node_id = node_pubkey
2018-10-19 21:47:51 +02:00
# test sanity
if not is_route_sane_to_use ( route , amount_msat , decoded_invoice . get_min_final_cltv_expiry ( ) ) :
self . print_error ( f " rejecting insane route { route } " )
route = None
continue
2018-10-08 20:36:46 +02:00
break
# if could not find route using any hint; try without hint now
if route is None :
2018-10-15 21:01:08 +02:00
path = self . network . path_finder . find_path_for_payment ( self . node_keypair . pubkey , invoice_pubkey , amount_msat , channels )
if not path :
2018-10-08 20:36:46 +02:00
raise PaymentFailure ( _ ( " No path found " ) )
route = self . network . path_finder . create_route_from_path ( path , self . node_keypair . pubkey )
2018-10-19 21:47:51 +02:00
if not is_route_sane_to_use ( route , amount_msat , decoded_invoice . get_min_final_cltv_expiry ( ) ) :
self . print_error ( f " rejecting insane route { route } " )
raise PaymentFailure ( _ ( " No path found " ) )
2018-10-08 20:36:46 +02:00
return route
2018-06-06 20:30:29 +02:00
def add_invoice ( self , amount_sat , message ) :
2018-05-28 10:43:50 +02:00
payment_preimage = os . urandom ( 32 )
RHASH = sha256 ( payment_preimage )
2018-06-27 21:54:57 +02:00
amount_btc = amount_sat / Decimal ( COIN ) if amount_sat else None
2018-10-08 20:31:15 +02:00
routing_hints = self . _calc_routing_hints_for_invoice ( amount_sat )
2018-10-17 17:32:58 +02:00
if not routing_hints :
self . print_error ( " Warning. No routing hints added to invoice. "
" Other clients will likely not be able to send to us. " )
2018-10-18 22:56:40 +02:00
pay_req = lnencode ( LnAddr ( RHASH , amount_btc ,
tags = [ ( ' d ' , message ) ,
( ' c ' , MIN_FINAL_CLTV_EXPIRY_FOR_INVOICE ) ]
+ routing_hints ) ,
2018-10-08 20:31:15 +02:00
self . node_keypair . privkey )
2019-01-29 19:01:04 +01:00
self . save_invoice ( bh2u ( payment_preimage ) , pay_req , RECEIVED )
return pay_req
def save_invoice ( self , preimage , invoice , direction ) :
lnaddr = lndecode ( invoice , expected_hrp = constants . net . SEGWIT_HRP )
key = bh2u ( lnaddr . paymenthash )
2019-01-30 09:26:27 +01:00
self . invoices [ key ] = preimage , invoice , direction == RECEIVED , None
2018-05-30 13:42:25 +02:00
self . wallet . storage . put ( ' lightning_invoices ' , self . invoices )
self . wallet . storage . write ( )
2018-05-28 09:39:05 +02:00
2018-10-18 22:56:40 +02:00
def get_invoice ( self , payment_hash : bytes ) - > Tuple [ bytes , LnAddr ] :
2018-10-23 18:54:23 +02:00
try :
2019-01-30 09:26:27 +01:00
preimage_hex , pay_req , is_received , timestamp = self . invoices [ bh2u ( payment_hash ) ]
2018-10-23 18:54:23 +02:00
preimage = bfh ( preimage_hex )
assert sha256 ( preimage ) == payment_hash
return preimage , lndecode ( pay_req , expected_hrp = constants . net . SEGWIT_HRP )
except KeyError as e :
raise UnknownPaymentHash ( payment_hash ) from e
2018-10-18 13:17:29 +02:00
2018-10-08 20:31:15 +02:00
def _calc_routing_hints_for_invoice ( self , amount_sat ) :
""" calculate routing hints (BOLT-11 ' r ' field) """
routing_hints = [ ]
with self . lock :
channels = list ( self . channels . values ( ) )
# note: currently we add *all* our channels; but this might be a privacy leak?
for chan in channels :
# check channel is open
if chan . get_state ( ) != " OPEN " : continue
# check channel has sufficient balance
# FIXME because of on-chain fees of ctx, this check is insufficient
if amount_sat and chan . balance ( REMOTE ) / / 1000 < amount_sat : continue
chan_id = chan . short_channel_id
assert type ( chan_id ) is bytes , chan_id
channel_info = self . channel_db . get_channel_info ( chan_id )
2018-10-16 21:37:57 +02:00
# note: as a fallback, if we don't have a channel update for the
2018-10-18 19:21:44 +02:00
# incoming direction of our private channel, we fill the invoice with garbage.
2018-10-16 21:37:57 +02:00
# the sender should still be able to pay us, but will incur an extra round trip
# (they will get the channel update from the onion error)
2018-10-18 19:21:44 +02:00
# at least, that's the theory. https://github.com/lightningnetwork/lnd/issues/2066
fee_base_msat = fee_proportional_millionths = 0
cltv_expiry_delta = 1 # lnd won't even try with zero
2018-10-16 21:37:57 +02:00
missing_info = True
if channel_info :
policy = channel_info . get_policy_for_node ( chan . node_id )
if policy :
fee_base_msat = policy . fee_base_msat
fee_proportional_millionths = policy . fee_proportional_millionths
cltv_expiry_delta = policy . cltv_expiry_delta
missing_info = False
if missing_info :
self . print_error ( f " Warning. Missing channel update for our channel { bh2u ( chan_id ) } ; "
f " filling invoice with incorrect data. " )
2018-10-08 20:31:15 +02:00
routing_hints . append ( ( ' r ' , [ ( chan . node_id ,
chan_id ,
2018-10-16 21:37:57 +02:00
fee_base_msat ,
fee_proportional_millionths ,
cltv_expiry_delta ) ] ) )
2018-10-08 20:31:15 +02:00
return routing_hints
2018-10-23 18:54:23 +02:00
def delete_invoice ( self , payment_hash_hex : str ) :
2018-10-23 16:44:39 +02:00
# FIXME we will now LOSE the preimage!! is this feature a good idea?
# maybe instead of deleting, we could have a feature to "hide" invoices (e.g. for GUI)
2018-06-27 21:54:57 +02:00
try :
2018-10-23 18:54:23 +02:00
del self . invoices [ payment_hash_hex ]
2018-06-27 21:54:57 +02:00
except KeyError :
return
self . wallet . storage . put ( ' lightning_invoices ' , self . invoices )
self . wallet . storage . write ( )
2019-01-30 19:16:04 +01:00
def get_balance ( self ) :
with self . lock :
2019-01-30 19:40:20 +01:00
return Decimal ( sum ( chan . balance ( LOCAL ) if not chan . is_closed ( ) else 0 for chan in self . channels . values ( ) ) ) / 1000
2019-01-30 19:16:04 +01:00
2018-05-30 13:42:25 +02:00
def list_channels ( self ) :
2018-07-30 13:51:03 +02:00
with self . lock :
2018-09-21 19:18:34 +02:00
# we output the funding_outpoint instead of the channel_id because lnd uses channel_point (funding outpoint) to identify channels
2018-10-08 11:30:51 +02:00
for channel_id , chan in self . channels . items ( ) :
yield {
2019-01-21 21:27:27 +01:00
' local_htlcs ' : json . loads ( encoder . encode ( chan . hm . log [ LOCAL ] ) ) ,
' remote_htlcs ' : json . loads ( encoder . encode ( chan . hm . log [ REMOTE ] ) ) ,
2019-01-30 17:50:10 +01:00
' channel_id ' : bh2u ( chan . short_channel_id ) if chan . short_channel_id else None ,
' full_channel_id ' : bh2u ( chan . channel_id ) ,
2018-10-08 11:30:51 +02:00
' channel_point ' : chan . funding_outpoint . to_str ( ) ,
' state ' : chan . get_state ( ) ,
' remote_pubkey ' : bh2u ( chan . node_id ) ,
' local_balance ' : chan . balance ( LOCAL ) / / 1000 ,
' remote_balance ' : chan . balance ( REMOTE ) / / 1000 ,
}
2018-06-20 15:46:22 +02:00
2018-10-02 21:13:17 +02:00
async def close_channel ( self , chan_id ) :
2018-10-24 17:36:07 +02:00
chan = self . channels [ chan_id ]
peer = self . peers [ chan . node_id ]
2018-10-24 18:26:05 +02:00
return await peer . close_channel ( chan_id )
2018-10-24 17:36:07 +02:00
async def force_close_channel ( self , chan_id ) :
2018-06-20 15:46:22 +02:00
chan = self . channels [ chan_id ]
2018-11-05 17:23:49 +01:00
tx = chan . force_close_tx ( )
chan . set_state ( ' FORCE_CLOSING ' )
self . save_channel ( chan )
2018-11-12 18:01:59 +01:00
self . on_channels_updated ( )
2019-01-24 17:57:27 +01:00
await self . network . broadcast_transaction ( tx )
return tx . txid ( )
2018-07-13 17:05:04 +02:00
2018-07-27 20:59:04 +02:00
def _get_next_peers_to_try ( self ) - > Sequence [ LNPeerAddr ] :
now = time . time ( )
recent_peers = self . channel_db . get_recent_peers ( )
# maintenance for last tried times
2018-07-28 14:58:40 +02:00
# due to this, below we can just test membership in _last_tried_peer
2018-07-27 20:59:04 +02:00
for peer in list ( self . _last_tried_peer ) :
if now > = self . _last_tried_peer [ peer ] + PEER_RETRY_INTERVAL :
del self . _last_tried_peer [ peer ]
# first try from recent peers
for peer in recent_peers :
2018-07-28 16:26:10 +02:00
if peer . pubkey in self . peers : continue
2018-07-28 14:58:40 +02:00
if peer in self . _last_tried_peer : continue
2018-07-27 20:59:04 +02:00
return [ peer ]
# try random peer from graph
all_nodes = self . channel_db . nodes
if all_nodes :
2018-07-30 13:51:03 +02:00
#self.print_error('trying to get ln peers from channel db')
2018-07-27 20:59:04 +02:00
node_ids = list ( all_nodes )
max_tries = min ( 200 , len ( all_nodes ) )
for i in range ( max_tries ) :
node_id = random . choice ( node_ids )
node = all_nodes . get ( node_id )
if node is None : continue
addresses = node . addresses
if not addresses : continue
2018-09-27 16:43:33 +02:00
host , port = self . choose_preferred_address ( addresses )
2018-07-27 20:59:04 +02:00
peer = LNPeerAddr ( host , port , node_id )
2018-07-28 16:26:10 +02:00
if peer . pubkey in self . peers : continue
2018-07-28 14:58:40 +02:00
if peer in self . _last_tried_peer : continue
2018-07-27 20:59:04 +02:00
self . print_error ( ' taking random ln peer from our channel db ' )
return [ peer ]
# TODO remove this. For some reason the dns seeds seem to ignore the realm byte
# and only return mainnet nodes. so for the time being dns seeding is disabled:
if constants . net in ( constants . BitcoinTestnet , ) :
2018-10-14 10:24:06 +02:00
return [ random . choice ( FALLBACK_NODE_LIST_TESTNET ) ]
elif constants . net in ( constants . BitcoinMainnet , ) :
return [ random . choice ( FALLBACK_NODE_LIST_MAINNET ) ]
2018-07-27 20:59:04 +02:00
else :
return [ ]
# try peers from dns seed.
# return several peers to reduce the number of dns queries.
if not constants . net . LN_DNS_SEEDS :
return [ ]
dns_seed = random . choice ( constants . net . LN_DNS_SEEDS )
self . print_error ( ' asking dns seed " {} " for ln peers ' . format ( dns_seed ) )
try :
# note: this might block for several seconds
# this will include bech32-encoded-pubkeys and ports
srv_answers = resolve_dns_srv ( ' r {} . {} ' . format (
constants . net . LN_REALM_BYTE , dns_seed ) )
except dns . exception . DNSException as e :
return [ ]
random . shuffle ( srv_answers )
num_peers = 2 * NUM_PEERS_TARGET
srv_answers = srv_answers [ : num_peers ]
# we now have pubkeys and ports but host is still needed
peers = [ ]
for srv_ans in srv_answers :
try :
# note: this might block for several seconds
answers = dns . resolver . query ( srv_ans [ ' host ' ] )
except dns . exception . DNSException :
continue
2018-07-28 14:58:40 +02:00
try :
2018-07-27 20:59:04 +02:00
ln_host = str ( answers [ 0 ] )
port = int ( srv_ans [ ' port ' ] )
bech32_pubkey = srv_ans [ ' host ' ] . split ( ' . ' ) [ 0 ]
pubkey = get_compressed_pubkey_from_bech32 ( bech32_pubkey )
peers . append ( LNPeerAddr ( ln_host , port , pubkey ) )
2018-07-28 14:58:40 +02:00
except Exception as e :
self . print_error ( ' error with parsing peer from dns seed: {} ' . format ( e ) )
continue
2018-07-27 20:59:04 +02:00
self . print_error ( ' got {} ln peers from dns seed ' . format ( len ( peers ) ) )
return peers
2018-10-16 16:30:18 +02:00
async def reestablish_peers_and_channels ( self ) :
async def reestablish_peer_for_given_channel ( ) :
2018-07-30 13:51:03 +02:00
# try last good address first
peer = self . channel_db . get_last_good_address ( chan . node_id )
if peer :
last_tried = self . _last_tried_peer . get ( peer , 0 )
if last_tried + PEER_RETRY_INTERVAL_FOR_CHANNELS < now :
2018-10-16 16:30:18 +02:00
await self . add_peer ( peer . host , peer . port , peer . pubkey )
2018-07-30 13:51:03 +02:00
return
# try random address for node_id
node_info = self . channel_db . nodes . get ( chan . node_id , None )
if not node_info : return
addresses = node_info . addresses
if not addresses : return
host , port = random . choice ( addresses )
peer = LNPeerAddr ( host , port , chan . node_id )
last_tried = self . _last_tried_peer . get ( peer , 0 )
if last_tried + PEER_RETRY_INTERVAL_FOR_CHANNELS < now :
2018-10-16 16:30:18 +02:00
await self . add_peer ( host , port , chan . node_id )
2018-07-30 13:51:03 +02:00
with self . lock :
channels = list ( self . channels . values ( ) )
now = time . time ( )
for chan in channels :
2019-01-25 20:16:11 +01:00
if constants . net is not constants . BitcoinRegtest :
ratio = chan . constraints . feerate / self . current_feerate_per_kw ( )
if ratio < 0.5 :
self . print_error ( f " WARNING: fee level for channel { bh2u ( chan . channel_id ) } is { chan . constraints . feerate } sat/kiloweight, current recommended feerate is { self . current_feerate_per_kw ( ) } sat/kiloweight, consider force closing! " )
2018-07-30 13:51:03 +02:00
if not chan . should_try_to_reestablish_peer ( ) :
continue
peer = self . peers . get ( chan . node_id , None )
if peer is None :
2018-10-16 16:30:18 +02:00
await reestablish_peer_for_given_channel ( )
2018-07-30 13:51:03 +02:00
else :
coro = peer . reestablish_channel ( chan )
asyncio . run_coroutine_threadsafe ( coro , self . network . asyncio_loop )
2019-01-25 20:16:11 +01:00
def current_feerate_per_kw ( self ) :
from . simple_config import FEE_LN_ETA_TARGET , FEERATE_FALLBACK_STATIC_FEE , FEERATE_REGTEST_HARDCODED
if constants . net is constants . BitcoinRegtest :
return FEERATE_REGTEST_HARDCODED / / 4
feerate_per_kvbyte = self . network . config . eta_target_to_fee ( FEE_LN_ETA_TARGET )
if feerate_per_kvbyte is None :
feerate_per_kvbyte = FEERATE_FALLBACK_STATIC_FEE
return max ( 253 , feerate_per_kvbyte / / 4 )
2018-07-13 17:05:04 +02:00
async def main_loop ( self ) :
2018-09-24 16:38:59 +02:00
await self . on_network_update ( ' network_updated ' ) # shortcut (don't block) if funding tx locked and verified
await self . network . lnwatcher . on_network_update ( ' network_updated ' ) # ping watcher to check our channels
2018-10-16 17:45:28 +02:00
listen_addr = self . config . get ( ' lightning_listen ' )
if listen_addr :
2019-01-31 19:43:42 +01:00
addr , port = listen_addr . rsplit ( ' : ' , 2 )
if addr [ 0 ] == ' [ ' :
2018-10-16 17:45:28 +02:00
# ipv6
2019-01-31 19:43:42 +01:00
addr = addr [ 1 : - 1 ]
2018-10-16 17:45:28 +02:00
async def cb ( reader , writer ) :
2019-02-01 20:21:59 +01:00
transport = LNResponderTransport ( self . node_keypair . privkey , reader , writer )
2019-02-01 15:27:50 +01:00
try :
2019-02-01 20:21:59 +01:00
node_id = await transport . handshake ( )
2019-02-01 15:27:50 +01:00
except :
self . print_error ( ' handshake failure from incoming connection ' )
return
2019-02-01 20:21:59 +01:00
peer = Peer ( self , node_id , transport , request_initial_sync = self . config . get ( " request_initial_sync " , True ) )
2018-10-16 17:45:28 +02:00
self . peers [ node_id ] = peer
await self . network . main_taskgroup . spawn ( peer . main_loop ( ) )
self . network . trigger_callback ( ' ln_status ' )
2019-01-31 19:43:42 +01:00
await asyncio . start_server ( cb , addr , int ( port ) )
2018-07-13 17:05:04 +02:00
while True :
await asyncio . sleep ( 1 )
2018-07-30 13:51:03 +02:00
now = time . time ( )
2018-10-16 16:30:18 +02:00
await self . reestablish_peers_and_channels ( )
2018-07-27 20:59:04 +02:00
if len ( self . peers ) > = NUM_PEERS_TARGET :
2018-07-13 17:05:04 +02:00
continue
2018-07-27 20:59:04 +02:00
peers = self . _get_next_peers_to_try ( )
for peer in peers :
2018-07-30 13:51:03 +02:00
last_tried = self . _last_tried_peer . get ( peer , 0 )
if last_tried + PEER_RETRY_INTERVAL < now :
2018-10-16 16:30:18 +02:00
await self . add_peer ( peer . host , peer . port , peer . pubkey )