2018-10-25 19:34:31 +02:00
# Copyright (C) 2018 The Electrum developers
# Distributed under the MIT software license, see the accompanying
# file LICENCE or http://www.opensource.org/licenses/mit-license.php
2018-04-16 10:24:03 +02:00
import asyncio
2018-04-30 23:34:33 +02:00
import os
2018-04-23 15:11:56 +02:00
from decimal import Decimal
2018-07-13 17:05:04 +02:00
import random
2018-07-27 20:59:04 +02:00
import time
2018-10-22 15:35:57 +02:00
from typing import Optional , Sequence , Tuple , List , Dict , TYPE_CHECKING
2018-07-30 13:51:03 +02:00
import threading
2018-09-27 16:43:33 +02:00
import socket
2018-10-25 00:22:42 +02:00
import json
2018-11-07 17:44:49 +01:00
from datetime import datetime , timezone
2018-11-19 18:09:43 +01:00
from functools import partial
2019-02-27 11:42:09 +01:00
from collections import defaultdict
2018-07-27 20:59:04 +02:00
import dns . resolver
import dns . exception
2018-05-28 09:39:05 +02:00
2018-05-28 18:22:45 +02:00
from . import constants
2018-10-05 15:37:47 +02:00
from . import keystore
2019-01-29 19:01:04 +01:00
from . util import PR_UNPAID , PR_EXPIRED , PR_PAID , PR_UNKNOWN , PR_INFLIGHT
2018-10-05 15:37:47 +02:00
from . keystore import BIP32_KeyStore
2018-10-25 23:30:36 +02:00
from . bitcoin import COIN
2018-12-12 18:02:55 +01:00
from . transaction import Transaction
2018-10-25 23:30:36 +02:00
from . crypto import sha256
2019-03-06 16:13:28 +01:00
from . bip32 import BIP32Node
2019-05-02 17:59:11 +02:00
from . util import bh2u , bfh , InvoiceError , resolve_dns_srv , is_ip_address , log_exceptions
2019-01-31 16:41:43 +01:00
from . util import timestamp_to_datetime
2019-05-02 17:59:11 +02:00
from . logging import Logger
2019-02-01 20:21:59 +01:00
from . lntransport import LNTransport , LNResponderTransport
2019-02-09 10:29:33 +01:00
from . lnpeer import Peer
2018-06-29 12:33:16 +02:00
from . lnaddr import lnencode , LnAddr , lndecode
2018-06-28 15:50:45 +02:00
from . ecc import der_sig_from_sig_string
2019-02-09 10:29:33 +01:00
from . lnchannel import Channel , ChannelJsonEncoder
2018-09-27 16:43:33 +02:00
from . lnutil import ( Outpoint , calc_short_channel_id , LNPeerAddr ,
get_compressed_pubkey_from_bech32 , extract_nodeid ,
2018-10-05 15:37:47 +02:00
PaymentFailure , split_host_port , ConnStringFormatError ,
2018-10-18 22:56:40 +02:00
generate_keypair , LnKeyFamily , LOCAL , REMOTE ,
2018-10-19 21:47:51 +02:00
UnknownPaymentHash , MIN_FINAL_CLTV_EXPIRY_FOR_INVOICE ,
2019-01-21 21:27:27 +01:00
NUM_MAX_EDGES_IN_PAYMENT_PATH , SENT , RECEIVED , HTLCOwner ,
2019-04-26 12:48:02 +02:00
UpdateAddHtlc , Direction , LnLocalFeatures )
2018-07-17 21:27:59 +02:00
from . i18n import _
2018-10-19 21:47:51 +02:00
from . lnrouter import RouteEdge , is_route_sane_to_use
2018-11-05 17:23:49 +01:00
from . address_synchronizer import TX_HEIGHT_LOCAL
2018-05-28 09:39:05 +02:00
2018-10-22 15:35:57 +02:00
if TYPE_CHECKING :
from . network import Network
from . wallet import Abstract_Wallet
2018-07-27 20:59:04 +02:00
NUM_PEERS_TARGET = 4
PEER_RETRY_INTERVAL = 600 # seconds
2018-07-30 13:51:03 +02:00
PEER_RETRY_INTERVAL_FOR_CHANNELS = 30 # seconds
2018-11-27 00:40:55 +01:00
GRAPH_DOWNLOAD_SECONDS = 600
2018-07-27 20:59:04 +02:00
2018-10-14 10:24:06 +02:00
FALLBACK_NODE_LIST_TESTNET = (
2018-07-27 20:59:04 +02:00
LNPeerAddr ( ' ecdsa.net ' , 9735 , bfh ( ' 038370f0e7a03eded3e1d41dc081084a87f0afa1c5b22090b4f3abb391eb15d8ff ' ) ) ,
2019-02-01 20:59:59 +01:00
LNPeerAddr ( ' 148.251.87.112 ' , 9735 , bfh ( ' 021a8bd8d8f1f2e208992a2eb755cdc74d44e66b6a0c924d3a3cce949123b9ce40 ' ) ) , # janus test server
LNPeerAddr ( ' 122.199.61.90 ' , 9735 , bfh ( ' 038863cf8ab91046230f561cd5b386cbff8309fa02e3f0c3ed161a3aeb64a643b9 ' ) ) , # popular node https://1ml.com/testnet/node/038863cf8ab91046230f561cd5b386cbff8309fa02e3f0c3ed161a3aeb64a643b9
2018-07-27 20:59:04 +02:00
)
2019-03-18 11:03:37 +01:00
FALLBACK_NODE_LIST_MAINNET = [
LNPeerAddr ( host = ' 52.168.166.221 ' , port = 9735 , pubkey = b ' \x02 \x14 8+ \xdc \xe7 u \r \xfc \xb8 \x12 m \xf8 \xe2 \xb1 - \xe3 \x85 6 \x90 - \xc3 j \xbc \xeb \xda \xee \xfd \xec \xa1 \xdf \x82 \x84 ' ) ,
LNPeerAddr ( host = ' 35.230.100.60 ' , port = 9735 , pubkey = b ' \x02 ?^5 \x82 qk \xed \x96 \xf6 \xf2 l \xfc \xd8 \x03 ~ \x07 GM { GC \xaf \xdc \x8b \x07 \xe6 \x92 \xdf cFM~ ' ) ,
LNPeerAddr ( host = ' 40.69.71.114 ' , port = 9735 , pubkey = b ' \x02 \x83 \x03 \x18 , \x98 \x85 \xda \x93 \xb3 \xb2 \\ \x96 ! \xd2 , \xf3 Du \xe6 < \x12 9B \xe4 \x02 \xab S \x0c \x05 V \xe6 u ' ) ,
LNPeerAddr ( host = ' 62.210.110.5 ' , port = 9735 , pubkey = b ' \x02 v \xe0 \x9a &u \x92 \xe7 E \x1a \x93 \x9c \x93 , \xf6 \x85 \xf0 uM \xe3 \x82 \xa3 \xca \x85 \xd2 \xfb : \x86 ML6Z \xd5 ' ) ,
LNPeerAddr ( host = ' 34.236.113.58 ' , port = 9735 , pubkey = b ' \x02 \xfa P \xc7 . \xe1 \xe2 \xeb _ \x1b m \x9c 02 \x08 \x0c L \x86 Cs \xc4 \x1d \xfa )f \xaa 4 \xee \xe1 \x05 \x1f \x97 ' ) ,
LNPeerAddr ( host = ' 52.168.166.221 ' , port = 9735 , pubkey = b ' \x02 \x14 8+ \xdc \xe7 u \r \xfc \xb8 \x12 m \xf8 \xe2 \xb1 - \xe3 \x85 6 \x90 - \xc3 j \xbc \xeb \xda \xee \xfd \xec \xa1 \xdf \x82 \x84 ' ) ,
LNPeerAddr ( host = ' 34.236.113.58 ' , port = 9735 , pubkey = b ' \x02 \xfa P \xc7 . \xe1 \xe2 \xeb _ \x1b m \x9c 02 \x08 \x0c L \x86 Cs \xc4 \x1d \xfa )f \xaa 4 \xee \xe1 \x05 \x1f \x97 ' ) ,
]
2018-07-27 20:59:04 +02:00
2018-11-07 17:44:49 +01:00
encoder = ChannelJsonEncoder ( )
2019-05-02 17:59:11 +02:00
class LNWorker ( Logger ) :
2018-05-28 09:39:05 +02:00
2019-04-26 12:48:02 +02:00
def __init__ ( self , xprv ) :
2019-05-02 17:59:11 +02:00
Logger . __init__ ( self )
2019-04-26 12:48:02 +02:00
self . node_keypair = generate_keypair ( keystore . from_xprv ( xprv ) , LnKeyFamily . NODE_KEY , 0 )
self . peers = { } # type: Dict[bytes, Peer] # pubkey -> Peer
self . localfeatures = LnLocalFeatures ( 0 )
async def maybe_listen ( self ) :
listen_addr = self . config . get ( ' lightning_listen ' )
if listen_addr :
addr , port = listen_addr . rsplit ( ' : ' , 2 )
if addr [ 0 ] == ' [ ' :
# ipv6
addr = addr [ 1 : - 1 ]
async def cb ( reader , writer ) :
transport = LNResponderTransport ( self . node_keypair . privkey , reader , writer )
try :
node_id = await transport . handshake ( )
except :
2019-05-02 17:59:11 +02:00
self . logger . info ( ' handshake failure from incoming connection ' )
2019-04-26 12:48:02 +02:00
return
peer = Peer ( self , node_id , transport )
self . peers [ node_id ] = peer
await self . network . main_taskgroup . spawn ( peer . main_loop ( ) )
self . network . trigger_callback ( ' ln_status ' )
await asyncio . start_server ( cb , addr , int ( port ) )
2019-05-13 22:33:56 +02:00
@log_exceptions
2019-04-26 12:48:02 +02:00
async def main_loop ( self ) :
while True :
await asyncio . sleep ( 1 )
now = time . time ( )
if len ( self . peers ) > = NUM_PEERS_TARGET :
continue
peers = self . _get_next_peers_to_try ( )
for peer in peers :
last_tried = self . _last_tried_peer . get ( peer , 0 )
if last_tried + PEER_RETRY_INTERVAL < now :
await self . add_peer ( peer . host , peer . port , peer . pubkey )
async def add_peer ( self , host , port , node_id ) :
if node_id in self . peers :
return self . peers [ node_id ]
port = int ( port )
peer_addr = LNPeerAddr ( host , port , node_id )
transport = LNTransport ( self . node_keypair . privkey , peer_addr )
self . _last_tried_peer [ peer_addr ] = time . time ( )
2019-05-02 17:59:11 +02:00
self . logger . info ( f " adding peer { peer_addr } " )
2019-04-26 12:48:02 +02:00
peer = Peer ( self , node_id , transport )
await self . network . main_taskgroup . spawn ( peer . main_loop ( ) )
self . peers [ node_id ] = peer
self . network . trigger_callback ( ' ln_status ' )
return peer
def start_network ( self , network : ' Network ' ) :
self . network = network
self . config = network . config
self . channel_db = self . network . channel_db
self . _last_tried_peer = { } # LNPeerAddr -> unix timestamp
self . _add_peers_from_config ( )
asyncio . run_coroutine_threadsafe ( self . network . main_taskgroup . spawn ( self . main_loop ( ) ) , self . network . asyncio_loop )
def _add_peers_from_config ( self ) :
peer_list = self . config . get ( ' lightning_peers ' , [ ] )
for host , port , pubkey in peer_list :
asyncio . run_coroutine_threadsafe (
self . add_peer ( host , int ( port ) , bfh ( pubkey ) ) ,
self . network . asyncio_loop )
def _get_next_peers_to_try ( self ) - > Sequence [ LNPeerAddr ] :
now = time . time ( )
recent_peers = self . channel_db . get_recent_peers ( )
# maintenance for last tried times
# due to this, below we can just test membership in _last_tried_peer
for peer in list ( self . _last_tried_peer ) :
if now > = self . _last_tried_peer [ peer ] + PEER_RETRY_INTERVAL :
del self . _last_tried_peer [ peer ]
# first try from recent peers
for peer in recent_peers :
if peer . pubkey in self . peers : continue
if peer in self . _last_tried_peer : continue
return [ peer ]
# try random peer from graph
unconnected_nodes = self . channel_db . get_200_randomly_sorted_nodes_not_in ( self . peers . keys ( ) )
if unconnected_nodes :
for node in unconnected_nodes :
addrs = self . channel_db . get_node_addresses ( node )
if not addrs :
continue
host , port = self . choose_preferred_address ( addrs )
peer = LNPeerAddr ( host , port , bytes . fromhex ( node . node_id ) )
if peer in self . _last_tried_peer : continue
2019-05-13 22:33:56 +02:00
#self.logger.info('taking random ln peer from our channel db')
2019-04-26 12:48:02 +02:00
return [ peer ]
# TODO remove this. For some reason the dns seeds seem to ignore the realm byte
# and only return mainnet nodes. so for the time being dns seeding is disabled:
if constants . net in ( constants . BitcoinTestnet , ) :
return [ random . choice ( FALLBACK_NODE_LIST_TESTNET ) ]
elif constants . net in ( constants . BitcoinMainnet , ) :
return [ random . choice ( FALLBACK_NODE_LIST_MAINNET ) ]
else :
return [ ]
# try peers from dns seed.
# return several peers to reduce the number of dns queries.
if not constants . net . LN_DNS_SEEDS :
return [ ]
dns_seed = random . choice ( constants . net . LN_DNS_SEEDS )
2019-05-02 17:59:11 +02:00
self . logger . info ( ' asking dns seed " {} " for ln peers ' . format ( dns_seed ) )
2019-04-26 12:48:02 +02:00
try :
# note: this might block for several seconds
# this will include bech32-encoded-pubkeys and ports
srv_answers = resolve_dns_srv ( ' r {} . {} ' . format (
constants . net . LN_REALM_BYTE , dns_seed ) )
except dns . exception . DNSException as e :
return [ ]
random . shuffle ( srv_answers )
num_peers = 2 * NUM_PEERS_TARGET
srv_answers = srv_answers [ : num_peers ]
# we now have pubkeys and ports but host is still needed
peers = [ ]
for srv_ans in srv_answers :
try :
# note: this might block for several seconds
answers = dns . resolver . query ( srv_ans [ ' host ' ] )
except dns . exception . DNSException :
continue
try :
ln_host = str ( answers [ 0 ] )
port = int ( srv_ans [ ' port ' ] )
bech32_pubkey = srv_ans [ ' host ' ] . split ( ' . ' ) [ 0 ]
pubkey = get_compressed_pubkey_from_bech32 ( bech32_pubkey )
peers . append ( LNPeerAddr ( ln_host , port , pubkey ) )
except Exception as e :
2019-05-02 17:59:11 +02:00
self . logger . info ( ' error with parsing peer from dns seed: {} ' . format ( e ) )
2019-04-26 12:48:02 +02:00
continue
2019-05-02 17:59:11 +02:00
self . logger . info ( ' got {} ln peers from dns seed ' . format ( len ( peers ) ) )
2019-04-26 12:48:02 +02:00
return peers
2019-05-13 14:30:02 +02:00
@staticmethod
def choose_preferred_address ( addr_list : List [ Tuple [ str , int ] ] ) - > Tuple [ str , int ] :
assert len ( addr_list ) > = 1
# choose first one that is an IP
for addr_in_db in addr_list :
host = addr_in_db . host
port = addr_in_db . port
if is_ip_address ( host ) :
return host , port
# otherwise choose one at random
# TODO maybe filter out onion if not on tor?
choice = random . choice ( addr_list )
return choice . host , choice . port
2019-04-26 12:48:02 +02:00
class LNGossip ( LNWorker ) :
2019-05-13 14:30:02 +02:00
# height of first channel announcements
first_block = 497000
2019-05-16 09:56:16 +02:00
max_age = 14 * 24 * 3600
2019-04-26 12:48:02 +02:00
def __init__ ( self , network ) :
seed = os . urandom ( 32 )
node = BIP32Node . from_rootseed ( seed , xtype = ' standard ' )
xprv = node . to_xprv ( )
super ( ) . __init__ ( xprv )
2019-05-13 22:33:56 +02:00
self . localfeatures | = LnLocalFeatures . GOSSIP_QUERIES_OPT
2019-04-26 12:48:02 +02:00
self . localfeatures | = LnLocalFeatures . GOSSIP_QUERIES_REQ
2019-05-13 22:33:56 +02:00
self . unknown_ids = set ( )
2019-04-26 12:48:02 +02:00
2019-05-13 14:30:02 +02:00
def start_network ( self , network : ' Network ' ) :
super ( ) . start_network ( network )
2019-05-16 09:56:16 +02:00
asyncio . run_coroutine_threadsafe ( self . network . main_taskgroup . spawn ( self . maintain_db ( ) ) , self . network . asyncio_loop )
def refresh_gui ( self ) :
# refresh gui
known = self . channel_db . num_channels
unknown = len ( self . unknown_ids )
self . logger . info ( f ' Channels: { known } of { known + unknown } ' )
self . network . trigger_callback ( ' ln_status ' )
async def maintain_db ( self ) :
n = self . channel_db . get_orphaned_channels ( )
if n :
self . logger . info ( f ' Deleting { n } orphaned channels ' )
self . channel_db . prune_orphaned_channels ( )
self . refresh_gui ( )
while True :
n = self . channel_db . get_old_policies ( self . max_age )
if n :
self . logger . info ( f ' Deleting { n } old channels ' )
self . channel_db . prune_old_policies ( self . max_age )
self . refresh_gui ( )
await asyncio . sleep ( 5 )
2019-05-13 14:30:02 +02:00
2019-05-13 22:33:56 +02:00
def add_new_ids ( self , ids ) :
2019-05-13 14:30:02 +02:00
known = self . channel_db . compare_channels ( ids )
2019-05-15 16:09:23 +02:00
new = set ( ids ) - set ( known )
2019-05-13 22:33:56 +02:00
self . unknown_ids . update ( new )
2019-05-13 14:30:02 +02:00
2019-05-13 22:33:56 +02:00
def get_ids_to_query ( self ) :
2019-05-16 09:56:16 +02:00
N = 500
2019-05-13 22:33:56 +02:00
l = list ( self . unknown_ids )
self . unknown_ids = set ( l [ N : ] )
return l [ 0 : N ]
def peer_closed ( self , peer ) :
self . peers . pop ( peer . pubkey )
2019-05-13 14:30:02 +02:00
2019-04-26 12:48:02 +02:00
class LNWallet ( LNWorker ) :
2019-01-26 17:57:00 +01:00
def __init__ ( self , wallet : ' Abstract_Wallet ' ) :
2019-05-02 17:59:11 +02:00
Logger . __init__ ( self )
2018-05-28 09:39:05 +02:00
self . wallet = wallet
2019-02-13 15:46:35 +01:00
self . storage = wallet . storage
2019-04-26 12:48:02 +02:00
xprv = self . storage . get ( ' lightning_privkey2 ' )
if xprv is None :
# TODO derive this deterministically from wallet.keystore at keystore generation time
# probably along a hardened path ( lnd-equivalent would be m/1017'/coinType'/ )
seed = os . urandom ( 32 )
node = BIP32Node . from_rootseed ( seed , xtype = ' standard ' )
xprv = node . to_xprv ( )
self . storage . put ( ' lightning_privkey2 ' , xprv )
super ( ) . __init__ ( xprv )
self . ln_keystore = keystore . from_xprv ( xprv )
#self.localfeatures |= LnLocalFeatures.OPTION_DATA_LOSS_PROTECT_REQ
#self.localfeatures |= LnLocalFeatures.OPTION_DATA_LOSS_PROTECT_OPT
2019-03-07 17:51:35 +01:00
self . invoices = self . storage . get ( ' lightning_invoices ' , { } ) # RHASH -> (invoice, direction, is_paid)
self . preimages = self . storage . get ( ' lightning_preimages ' , { } ) # RHASH -> preimage
2018-09-12 16:17:10 +02:00
self . sweep_address = wallet . get_receiving_address ( )
2018-07-30 13:51:03 +02:00
self . lock = threading . RLock ( )
2018-11-19 18:09:43 +01:00
self . channels = { } # type: Dict[bytes, Channel]
for x in wallet . storage . get ( " channels " , [ ] ) :
2019-02-27 10:51:40 +01:00
c = Channel ( x , sweep_address = self . sweep_address , lnworker = self )
2019-01-21 21:27:27 +01:00
self . channels [ c . channel_id ] = c
c . set_remote_commitment ( )
c . set_local_commitment ( c . current_commitment ( LOCAL ) )
2019-01-31 16:41:43 +01:00
# timestamps of opening and closing transactions
2019-02-13 15:46:35 +01:00
self . channel_timestamps = self . storage . get ( ' lightning_channel_timestamps ' , { } )
2019-01-26 17:57:00 +01:00
def start_network ( self , network : ' Network ' ) :
self . network = network
2018-10-08 20:26:44 +02:00
self . network . register_callback ( self . on_network_update , [ ' wallet_updated ' , ' network_updated ' , ' verified ' , ' fee ' ] ) # thread safe
2018-12-04 20:50:24 +01:00
self . network . register_callback ( self . on_channel_open , [ ' channel_open ' ] )
self . network . register_callback ( self . on_channel_closed , [ ' channel_closed ' ] )
2019-04-26 12:48:02 +02:00
for chan_id , chan in self . channels . items ( ) :
self . network . lnwatcher . add_channel ( chan . funding_outpoint . to_str ( ) , chan . get_funding_address ( ) )
chan . lnwatcher = network . lnwatcher
super ( ) . start_network ( network )
for coro in [
self . maybe_listen ( ) ,
self . on_network_update ( ' network_updated ' ) , # shortcut (don't block) if funding tx locked and verified
self . network . lnwatcher . on_network_update ( ' network_updated ' ) , # ping watcher to check our channels
self . reestablish_peers_and_channels ( )
] :
asyncio . run_coroutine_threadsafe ( self . network . main_taskgroup . spawn ( coro ) , self . network . asyncio_loop )
2018-11-27 00:40:55 +01:00
2019-05-13 22:33:56 +02:00
def peer_closed ( self , peer ) :
for chan in self . channels_for_peer ( peer . pubkey ) . values ( ) :
if chan . get_state ( ) != ' FORCE_CLOSING ' :
chan . set_state ( ' DISCONNECTED ' )
self . network . trigger_callback ( ' channel ' , chan )
self . peers . pop ( peer . pubkey )
2019-02-26 16:31:09 +01:00
def payment_completed ( self , chan : Channel , direction : Direction ,
2019-02-26 21:28:11 +01:00
htlc : UpdateAddHtlc ) :
2018-11-19 18:09:43 +01:00
chan_id = chan . channel_id
2019-02-26 21:28:11 +01:00
preimage = self . get_preimage ( htlc . payment_hash )
2019-02-26 16:31:09 +01:00
timestamp = int ( time . time ( ) )
2019-02-13 15:46:35 +01:00
self . network . trigger_callback ( ' ln_payment_completed ' , timestamp , direction , htlc , preimage , chan_id )
2019-01-29 19:01:04 +01:00
2019-03-07 17:51:35 +01:00
def get_invoice_status ( self , key ) :
if key not in self . invoices :
2019-01-28 11:14:30 +01:00
return PR_UNKNOWN
2019-03-07 17:51:35 +01:00
invoice , direction , is_paid = self . invoices [ key ]
return PR_PAID if is_paid else PR_UNPAID
2018-11-07 17:44:49 +01:00
2019-01-29 19:01:04 +01:00
def get_payments ( self ) :
2019-02-27 11:42:09 +01:00
# return one item per payment_hash
2019-01-29 19:01:04 +01:00
# note: with AMP we will have several channels per payment
2019-02-27 11:42:09 +01:00
out = defaultdict ( list )
2019-01-29 19:01:04 +01:00
for chan in self . channels . values ( ) :
2019-02-27 11:42:09 +01:00
d = chan . get_payments ( )
for k , v in d . items ( ) :
out [ k ] . append ( v )
2019-01-29 19:01:04 +01:00
return out
2018-11-07 17:44:49 +01:00
2019-01-31 16:41:43 +01:00
def get_history ( self ) :
out = [ ]
2019-02-27 11:42:09 +01:00
for payment_hash , plist in self . get_payments ( ) . items ( ) :
if len ( plist ) == 1 :
chan_id , htlc , _direction , status = plist [ 0 ]
direction = ' sent ' if _direction == SENT else ' received '
amount_msat = int ( _direction ) * htlc . amount_msat
2019-03-07 17:51:35 +01:00
timestamp = htlc . timestamp
2019-05-08 12:41:57 +02:00
label = self . wallet . get_label ( payment_hash )
2019-02-27 11:42:09 +01:00
else :
# assume forwarding
direction = ' forwarding '
amount_msat = sum ( [ int ( _direction ) * htlc . amount_msat for chan_id , htlc , _direction , status in plist ] )
status = ' '
label = _ ( ' Forwarding ' )
2019-03-07 17:51:35 +01:00
timestamp = min ( [ htlc . timestamp for chan_id , htlc , _direction , status in plist ] )
2019-02-27 11:42:09 +01:00
2019-01-31 16:41:43 +01:00
item = {
2019-02-27 11:42:09 +01:00
' type ' : ' payment ' ,
' label ' : label ,
2019-01-31 16:41:43 +01:00
' timestamp ' : timestamp or 0 ,
2019-02-27 11:42:09 +01:00
' date ' : timestamp_to_datetime ( timestamp ) ,
' direction ' : direction ,
' status ' : status ,
' amount_msat ' : amount_msat ,
' payment_hash ' : payment_hash
2019-01-31 16:41:43 +01:00
}
out . append ( item )
# add funding events
for chan in self . channels . values ( ) :
funding_txid , funding_height , funding_timestamp , closing_txid , closing_height , closing_timestamp = self . channel_timestamps . get ( bh2u ( chan . channel_id ) )
item = {
' channel_id ' : bh2u ( chan . channel_id ) ,
' type ' : ' channel_opening ' ,
2019-02-27 11:42:09 +01:00
' label ' : _ ( ' Open channel ' ) ,
2019-01-31 16:41:43 +01:00
' txid ' : funding_txid ,
' amount_msat ' : chan . balance ( LOCAL , ctn = 0 ) ,
' direction ' : ' received ' ,
' timestamp ' : funding_timestamp ,
}
out . append ( item )
if not chan . is_closed ( ) :
continue
item = {
' channel_id ' : bh2u ( chan . channel_id ) ,
2019-03-01 05:29:42 +01:00
' txid ' : closing_txid ,
2019-02-27 11:42:09 +01:00
' label ' : _ ( ' Close channel ' ) ,
2019-01-31 16:41:43 +01:00
' type ' : ' channel_closure ' ,
2019-05-06 15:58:12 +02:00
' amount_msat ' : - chan . balance_minus_outgoing_htlcs ( LOCAL ) ,
2019-01-31 16:41:43 +01:00
' direction ' : ' sent ' ,
' timestamp ' : closing_timestamp ,
}
out . append ( item )
2019-02-01 21:22:39 +01:00
# sort by timestamp
out . sort ( key = lambda x : ( x . get ( ' timestamp ' ) or float ( " inf " ) ) )
2019-01-31 16:41:43 +01:00
balance_msat = 0
for item in out :
2019-02-27 11:42:09 +01:00
balance_msat + = item [ ' amount_msat ' ]
2019-01-31 16:41:43 +01:00
item [ ' balance_msat ' ] = balance_msat
return out
2018-10-05 15:37:47 +02:00
def get_and_inc_counter_for_channel_keys ( self ) :
with self . lock :
2019-02-13 15:46:35 +01:00
ctr = self . storage . get ( ' lightning_channel_key_der_ctr ' , - 1 )
2018-10-05 15:37:47 +02:00
ctr + = 1
2019-02-13 15:46:35 +01:00
self . storage . put ( ' lightning_channel_key_der_ctr ' , ctr )
self . storage . write ( )
2018-10-05 15:37:47 +02:00
return ctr
2018-07-14 19:39:28 +02:00
def suggest_peer ( self ) :
for node_id , peer in self . peers . items ( ) :
2019-01-30 17:50:10 +01:00
if not peer . initialized . is_set ( ) :
2018-07-14 19:39:28 +02:00
continue
2019-01-30 19:40:20 +01:00
if not all ( [ chan . is_closed ( ) for chan in peer . channels . values ( ) ] ) :
2018-10-17 11:56:34 +02:00
continue
2018-07-14 19:39:28 +02:00
return node_id
2018-05-31 17:16:01 +02:00
def channels_for_peer ( self , node_id ) :
assert type ( node_id ) is bytes
2018-07-30 13:51:03 +02:00
with self . lock :
return { x : y for ( x , y ) in self . channels . items ( ) if y . node_id == node_id }
2018-05-31 17:16:01 +02:00
2018-05-28 10:43:50 +02:00
def save_channel ( self , openchannel ) :
2018-10-11 17:15:25 +02:00
assert type ( openchannel ) is Channel
2018-10-10 22:54:30 +02:00
if openchannel . config [ REMOTE ] . next_per_commitment_point == openchannel . config [ REMOTE ] . current_per_commitment_point :
2018-06-20 15:46:22 +02:00
raise Exception ( " Tried to save channel with next_point == current_point, this should not happen " )
2018-07-30 13:51:03 +02:00
with self . lock :
self . channels [ openchannel . channel_id ] = openchannel
dumped = [ x . serialize ( ) for x in self . channels . values ( ) ]
2019-02-13 15:46:35 +01:00
self . storage . put ( " channels " , dumped )
self . storage . write ( )
2018-06-27 20:23:03 +02:00
self . network . trigger_callback ( ' channel ' , openchannel )
2018-05-28 10:43:50 +02:00
2018-05-29 18:12:48 +02:00
def save_short_chan_id ( self , chan ) :
"""
2018-10-08 20:31:15 +02:00
Checks if Funding TX has been mined . If it has , save the short channel ID in chan ;
if it ' s also deep enough, also save to disk.
Returns tuple ( mined_deep_enough , num_confirmations ) .
2018-05-29 18:12:48 +02:00
"""
2018-11-21 17:00:01 +01:00
lnwatcher = self . network . lnwatcher
conf = lnwatcher . get_tx_height ( chan . funding_outpoint . txid ) . conf
2018-10-08 20:31:15 +02:00
if conf > 0 :
2018-11-21 17:00:01 +01:00
block_height , tx_pos = lnwatcher . get_txpos ( chan . funding_outpoint . txid )
2018-10-08 20:31:15 +02:00
assert tx_pos > = 0
chan . short_channel_id_predicted = calc_short_channel_id ( block_height , tx_pos , chan . funding_outpoint . output_index )
if conf > = chan . constraints . funding_txn_minimum_depth > 0 :
chan . short_channel_id = chan . short_channel_id_predicted
2018-05-29 18:12:48 +02:00
self . save_channel ( chan )
2018-11-12 18:01:59 +01:00
self . on_channels_updated ( )
2019-02-02 17:00:03 +01:00
else :
2019-05-02 17:59:11 +02:00
self . logger . info ( f " funding tx is still not at sufficient depth. actual depth: { conf } " )
2018-05-29 18:12:48 +02:00
2018-12-04 20:50:24 +01:00
def channel_by_txo ( self , txo ) :
2018-10-08 18:33:23 +02:00
with self . lock :
channels = list ( self . channels . values ( ) )
for chan in channels :
if chan . funding_outpoint . to_str ( ) == txo :
2018-12-04 20:50:24 +01:00
return chan
2019-01-31 16:41:43 +01:00
def on_channel_open ( self , event , funding_outpoint , funding_txid , funding_height ) :
2018-12-04 20:50:24 +01:00
chan = self . channel_by_txo ( funding_outpoint )
if not chan :
2018-10-08 18:33:23 +02:00
return
2019-05-02 17:59:11 +02:00
self . logger . info ( f ' on_channel_open { funding_outpoint } ' )
2019-01-31 16:41:43 +01:00
self . channel_timestamps [ bh2u ( chan . channel_id ) ] = funding_txid , funding_height . height , funding_height . timestamp , None , None , None
2019-02-13 15:46:35 +01:00
self . storage . put ( ' lightning_channel_timestamps ' , self . channel_timestamps )
2018-12-04 20:50:24 +01:00
chan . set_funding_txo_spentness ( False )
# send event to GUI
2018-07-16 18:31:24 +02:00
self . network . trigger_callback ( ' channel ' , chan )
2018-06-22 12:17:11 +02:00
2018-12-04 20:50:24 +01:00
@log_exceptions
2019-01-31 16:41:43 +01:00
async def on_channel_closed ( self , event , funding_outpoint , spenders , funding_txid , funding_height , closing_txid , closing_height ) :
2018-12-04 20:50:24 +01:00
chan = self . channel_by_txo ( funding_outpoint )
if not chan :
return
2019-05-02 18:33:29 +02:00
self . logger . info ( f ' on_channel_closed { funding_outpoint } ' )
2019-01-31 16:41:43 +01:00
self . channel_timestamps [ bh2u ( chan . channel_id ) ] = funding_txid , funding_height . height , funding_height . timestamp , closing_txid , closing_height . height , closing_height . timestamp
2019-02-13 15:46:35 +01:00
self . storage . put ( ' lightning_channel_timestamps ' , self . channel_timestamps )
2018-12-04 20:50:24 +01:00
chan . set_funding_txo_spentness ( True )
if chan . get_state ( ) != ' FORCE_CLOSING ' :
chan . set_state ( " CLOSED " )
self . on_channels_updated ( )
self . network . trigger_callback ( ' channel ' , chan )
# remove from channel_db
2019-02-02 13:49:04 +01:00
if chan . short_channel_id is not None :
self . channel_db . remove_channel ( chan . short_channel_id )
2018-12-15 11:38:46 +01:00
# detect who closed
2019-01-31 16:41:43 +01:00
if closing_txid == chan . local_commitment . txid ( ) :
2019-05-02 17:59:11 +02:00
self . logger . info ( f ' we force closed { funding_outpoint } ' )
2018-12-15 11:38:46 +01:00
encumbered_sweeptxs = chan . local_sweeptxs
2019-01-31 16:41:43 +01:00
elif closing_txid == chan . remote_commitment . txid ( ) :
2019-05-02 17:59:11 +02:00
self . logger . info ( f ' they force closed { funding_outpoint } ' )
2018-12-15 11:38:46 +01:00
encumbered_sweeptxs = chan . remote_sweeptxs
2018-12-04 20:50:24 +01:00
else :
2019-05-02 17:59:11 +02:00
self . logger . info ( f ' not sure who closed { funding_outpoint } { closing_txid } ' )
2018-12-15 11:38:46 +01:00
return
# sweep
for prevout , spender in spenders . items ( ) :
e_tx = encumbered_sweeptxs . get ( prevout )
if e_tx is None :
continue
2018-12-04 20:50:24 +01:00
if spender is not None :
2019-05-02 17:59:11 +02:00
self . logger . info ( f ' outpoint already spent { prevout } ' )
2018-12-04 20:50:24 +01:00
continue
2018-12-15 11:38:46 +01:00
prev_txid , prev_index = prevout . split ( ' : ' )
2018-12-04 20:50:24 +01:00
broadcast = True
if e_tx . cltv_expiry :
2018-12-15 11:38:46 +01:00
local_height = self . network . get_local_height ( )
2019-03-01 12:34:38 +01:00
remaining = e_tx . cltv_expiry - local_height
if remaining > 0 :
2019-05-02 18:33:29 +02:00
self . logger . info ( ' waiting for {} : CLTV ( {} > {} ), funding outpoint {} and tx {} '
2018-12-04 20:50:24 +01:00
. format ( e_tx . name , local_height , e_tx . cltv_expiry , funding_outpoint [ : 8 ] , prev_txid [ : 8 ] ) )
broadcast = False
if e_tx . csv_delay :
2019-03-01 12:34:38 +01:00
prev_height = self . network . lnwatcher . get_tx_height ( prev_txid )
remaining = e_tx . csv_delay - prev_height . conf
if remaining > 0 :
2019-05-02 18:33:29 +02:00
self . logger . info ( ' waiting for {} : CSV ( {} >= {} ), funding outpoint {} and tx {} '
2019-03-01 12:34:38 +01:00
. format ( e_tx . name , prev_height . conf , e_tx . csv_delay , funding_outpoint [ : 8 ] , prev_txid [ : 8 ] ) )
2018-12-04 20:50:24 +01:00
broadcast = False
if broadcast :
if not await self . network . lnwatcher . broadcast_or_log ( funding_outpoint , e_tx ) :
2019-05-02 17:59:11 +02:00
self . logger . info ( f ' { e_tx . name } could not publish encumbered tx: { str ( e_tx ) } , prevout: { prevout } ' )
2019-03-01 12:34:38 +01:00
else :
self . wallet . add_future_tx ( e_tx , remaining )
2018-12-04 20:50:24 +01:00
2019-03-01 19:32:49 +01:00
def is_dangerous ( self , chan ) :
for x in chan . get_unfulfilled_htlcs ( ) :
dust_limit = chan . config [ REMOTE ] . dust_limit_sat * 1000
delay = x . cltv_expiry - self . network . get_local_height ( )
if x . amount_msat > 10 * dust_limit and delay < 3 :
2019-05-02 17:59:11 +02:00
self . logger . info ( ' htlc is dangerous ' )
2019-03-01 19:32:49 +01:00
return True
else :
2019-05-02 17:59:11 +02:00
self . logger . info ( f ' htlc is not dangerous. delay { delay } ' )
2019-03-01 19:32:49 +01:00
return False
2018-12-04 20:50:24 +01:00
2018-10-12 19:40:12 +02:00
@log_exceptions
2018-09-10 18:01:55 +02:00
async def on_network_update ( self , event , * args ) :
# TODO
2018-07-17 15:32:47 +02:00
# Race discovered in save_channel (assertion failing):
# since short_channel_id could be changed while saving.
2018-09-10 18:01:55 +02:00
with self . lock :
channels = list ( self . channels . values ( ) )
2018-11-21 17:00:01 +01:00
lnwatcher = self . network . lnwatcher
2018-10-08 20:26:44 +02:00
if event in ( ' verified ' , ' wallet_updated ' ) :
2018-11-21 17:00:01 +01:00
if args [ 0 ] != lnwatcher :
2018-10-08 20:26:44 +02:00
return
2018-09-10 18:01:55 +02:00
for chan in channels :
2019-03-01 19:32:49 +01:00
if chan . get_state ( ) in [ " OPEN " , " DISCONNECTED " ] and self . is_dangerous ( chan ) :
await self . force_close_channel ( chan . channel_id )
continue
2019-02-02 17:00:03 +01:00
if chan . short_channel_id is None :
self . save_short_chan_id ( chan )
if chan . get_state ( ) == " OPENING " and chan . short_channel_id :
2018-09-10 18:01:55 +02:00
peer = self . peers [ chan . node_id ]
2019-02-02 12:06:12 +01:00
peer . send_funding_locked ( chan )
2018-09-10 18:01:55 +02:00
elif chan . get_state ( ) == " OPEN " :
peer = self . peers . get ( chan . node_id )
if peer is None :
2019-05-02 17:59:11 +02:00
self . logger . info ( " peer not found for {} " . format ( bh2u ( chan . node_id ) ) )
2018-09-10 18:01:55 +02:00
return
if event == ' fee ' :
2018-09-26 19:15:59 +02:00
await peer . bitcoin_fee_update ( chan )
2018-11-21 17:00:01 +01:00
conf = lnwatcher . get_tx_height ( chan . funding_outpoint . txid ) . conf
2018-09-10 18:01:55 +02:00
peer . on_network_update ( chan , conf )
2018-11-05 17:23:49 +01:00
elif chan . get_state ( ) == ' FORCE_CLOSING ' :
txid = chan . force_close_tx ( ) . txid ( )
2018-11-21 17:00:01 +01:00
height = lnwatcher . get_tx_height ( txid ) . height
2019-05-02 17:59:11 +02:00
self . logger . info ( f " force closing tx { txid } , height { height } " )
2018-11-05 17:23:49 +01:00
if height == TX_HEIGHT_LOCAL :
2019-05-02 17:59:11 +02:00
self . logger . info ( ' REBROADCASTING CLOSING TX ' )
2018-11-05 17:23:49 +01:00
await self . force_close_channel ( chan . channel_id )
2018-05-28 11:55:20 +02:00
2018-09-27 16:43:33 +02:00
async def _open_channel_coroutine ( self , peer , local_amount_sat , push_sat , password ) :
# peer might just have been connected to
2019-01-30 17:50:10 +01:00
await asyncio . wait_for ( peer . initialized . wait ( ) , 5 )
2018-10-07 12:19:34 +02:00
chan = await peer . channel_establishment_flow (
password ,
funding_sat = local_amount_sat + push_sat ,
push_msat = push_sat * 1000 ,
temp_channel_id = os . urandom ( 32 ) )
self . save_channel ( chan )
2019-03-12 18:33:36 +01:00
self . network . lnwatcher . add_channel ( chan . funding_outpoint . to_str ( ) , chan . get_funding_address ( ) )
2018-05-31 12:38:02 +02:00
self . on_channels_updated ( )
2018-10-11 18:28:38 +02:00
return chan
2018-05-31 12:38:02 +02:00
def on_channels_updated ( self ) :
2018-06-06 17:42:06 +02:00
self . network . trigger_callback ( ' channels ' )
2018-05-28 11:55:20 +02:00
2019-03-19 13:24:47 +01:00
def open_channel ( self , connect_contents , local_amt_sat , push_amt_sat , password = None , timeout = 20 ) :
2018-09-27 16:43:33 +02:00
node_id , rest = extract_nodeid ( connect_contents )
peer = self . peers . get ( node_id )
if not peer :
2019-02-01 20:59:59 +01:00
nodes_get = self . network . channel_db . nodes_get
node_info = nodes_get ( node_id )
2018-09-27 16:43:33 +02:00
if rest is not None :
host , port = split_host_port ( rest )
else :
2019-02-01 20:59:59 +01:00
if not node_info :
raise ConnStringFormatError ( _ ( ' Unknown node: ' ) + ' ' + bh2u ( node_id ) )
2019-03-05 11:22:00 +01:00
addrs = self . channel_db . get_node_addresses ( node_info )
2019-02-01 20:59:59 +01:00
if len ( addrs ) == 0 :
raise ConnStringFormatError ( _ ( ' Don \' t know any addresses for node: ' ) + ' ' + bh2u ( node_id ) )
host , port = self . choose_preferred_address ( addrs )
2018-09-27 16:43:33 +02:00
try :
socket . getaddrinfo ( host , int ( port ) )
except socket . gaierror :
raise ConnStringFormatError ( _ ( ' Hostname does not resolve (getaddrinfo failed) ' ) )
2018-10-16 16:30:18 +02:00
peer_future = asyncio . run_coroutine_threadsafe ( self . add_peer ( host , port , node_id ) ,
self . network . asyncio_loop )
peer = peer_future . result ( timeout )
2018-10-11 18:10:31 +02:00
coro = self . _open_channel_coroutine ( peer , local_amt_sat , push_amt_sat , password )
2018-10-07 12:19:34 +02:00
f = asyncio . run_coroutine_threadsafe ( coro , self . network . asyncio_loop )
2018-10-14 22:36:23 +02:00
chan = f . result ( timeout )
2019-02-01 15:27:50 +01:00
return chan . funding_outpoint . to_str ( )
2018-05-28 18:22:45 +02:00
2018-07-17 21:27:59 +02:00
def pay ( self , invoice , amount_sat = None ) :
2018-10-25 21:59:16 +02:00
"""
This is not merged with _pay so that we can run the test with
one thread only .
"""
2019-02-01 20:59:59 +01:00
addr , peer , coro = self . network . run_from_another_thread ( self . _pay ( invoice , amount_sat ) )
2018-10-25 21:59:16 +02:00
fut = asyncio . run_coroutine_threadsafe ( coro , self . network . asyncio_loop )
return addr , peer , fut
2019-02-02 22:47:45 +01:00
def get_channel_by_short_id ( self , short_channel_id ) :
with self . lock :
for chan in self . channels . values ( ) :
if chan . short_channel_id == short_channel_id :
return chan
2019-02-01 20:59:59 +01:00
async def _pay ( self , invoice , amount_sat = None , same_thread = False ) :
2018-11-02 19:16:42 +01:00
addr = self . _check_invoice ( invoice , amount_sat )
2019-05-08 12:41:57 +02:00
self . save_invoice ( addr . paymenthash , invoice , SENT , is_paid = False )
self . wallet . set_label ( bh2u ( addr . paymenthash ) , addr . get_description ( ) )
2019-02-01 20:59:59 +01:00
route = await self . _create_route_from_invoice ( decoded_invoice = addr )
2018-11-02 19:16:42 +01:00
peer = self . peers [ route [ 0 ] . node_id ]
2019-02-02 22:47:45 +01:00
if not self . get_channel_by_short_id ( route [ 0 ] . short_channel_id ) :
2018-11-19 18:09:43 +01:00
assert False , ' Found route with short channel ID we don \' t have: ' + repr ( route [ 0 ] . short_channel_id )
2019-02-08 20:32:06 +01:00
return addr , peer , self . _pay_to_route ( route , addr , invoice )
2018-11-02 19:16:42 +01:00
2019-02-08 20:32:06 +01:00
async def _pay_to_route ( self , route , addr , pay_req ) :
2018-11-02 19:16:42 +01:00
short_channel_id = route [ 0 ] . short_channel_id
2019-02-02 22:47:45 +01:00
chan = self . get_channel_by_short_id ( short_channel_id )
if not chan :
2018-10-20 17:52:29 +02:00
raise Exception ( " PathFinder returned path with short_channel_id {} that is not in channel list " . format ( bh2u ( short_channel_id ) ) )
2018-11-02 19:16:42 +01:00
peer = self . peers [ route [ 0 ] . node_id ]
2018-11-19 18:09:43 +01:00
htlc = await peer . pay ( route , chan , int ( addr . amount * COIN * 1000 ) , addr . paymenthash , addr . get_min_final_cltv_expiry ( ) )
self . network . trigger_callback ( ' htlc_added ' , htlc , addr , SENT )
2018-05-28 10:43:50 +02:00
2018-11-02 19:16:42 +01:00
@staticmethod
def _check_invoice ( invoice , amount_sat = None ) :
addr = lndecode ( invoice , expected_hrp = constants . net . SEGWIT_HRP )
if amount_sat :
addr . amount = Decimal ( amount_sat ) / COIN
if addr . amount is None :
raise InvoiceError ( _ ( " Missing amount " ) )
if addr . get_min_final_cltv_expiry ( ) > 60 * 144 :
raise InvoiceError ( " {} \n {} " . format (
_ ( " Invoice wants us to risk locking funds for unreasonably long. " ) ,
f " min_final_cltv_expiry: { addr . get_min_final_cltv_expiry ( ) } " ) )
return addr
2019-02-01 20:59:59 +01:00
async def _create_route_from_invoice ( self , decoded_invoice ) - > List [ RouteEdge ] :
2018-11-02 19:16:42 +01:00
amount_msat = int ( decoded_invoice . amount * COIN * 1000 )
2018-10-08 20:36:46 +02:00
invoice_pubkey = decoded_invoice . pubkey . serialize ( )
# use 'r' field from invoice
route = None # type: List[RouteEdge]
2018-10-08 21:14:56 +02:00
# only want 'r' tags
r_tags = list ( filter ( lambda x : x [ 0 ] == ' r ' , decoded_invoice . tags ) )
# strip the tag type, it's implicitly 'r' now
r_tags = list ( map ( lambda x : x [ 1 ] , r_tags ) )
# if there are multiple hints, we will use the first one that works,
# from a random permutation
random . shuffle ( r_tags )
2018-10-15 21:01:08 +02:00
with self . lock :
channels = list ( self . channels . values ( ) )
2018-10-08 21:14:56 +02:00
for private_route in r_tags :
2018-10-08 20:36:46 +02:00
if len ( private_route ) == 0 : continue
2018-10-20 16:50:43 +02:00
if len ( private_route ) > NUM_MAX_EDGES_IN_PAYMENT_PATH : continue
2018-10-08 20:36:46 +02:00
border_node_pubkey = private_route [ 0 ] [ 0 ]
2018-10-15 21:01:08 +02:00
path = self . network . path_finder . find_path_for_payment ( self . node_keypair . pubkey , border_node_pubkey , amount_msat , channels )
if not path : continue
2018-10-08 20:36:46 +02:00
route = self . network . path_finder . create_route_from_path ( path , self . node_keypair . pubkey )
# we need to shift the node pubkey by one towards the destination:
private_route_nodes = [ edge [ 0 ] for edge in private_route ] [ 1 : ] + [ invoice_pubkey ]
private_route_rest = [ edge [ 1 : ] for edge in private_route ]
2018-10-16 21:35:30 +02:00
prev_node_id = border_node_pubkey
2018-10-08 20:36:46 +02:00
for node_pubkey , edge_rest in zip ( private_route_nodes , private_route_rest ) :
short_channel_id , fee_base_msat , fee_proportional_millionths , cltv_expiry_delta = edge_rest
2018-10-16 21:35:30 +02:00
# if we have a routing policy for this edge in the db, that takes precedence,
# as it is likely from a previous failure
channel_policy = self . channel_db . get_routing_policy_for_channel ( prev_node_id , short_channel_id )
if channel_policy :
fee_base_msat = channel_policy . fee_base_msat
fee_proportional_millionths = channel_policy . fee_proportional_millionths
cltv_expiry_delta = channel_policy . cltv_expiry_delta
2018-10-08 20:36:46 +02:00
route . append ( RouteEdge ( node_pubkey , short_channel_id , fee_base_msat , fee_proportional_millionths ,
cltv_expiry_delta ) )
2018-10-16 21:35:30 +02:00
prev_node_id = node_pubkey
2018-10-19 21:47:51 +02:00
# test sanity
if not is_route_sane_to_use ( route , amount_msat , decoded_invoice . get_min_final_cltv_expiry ( ) ) :
2019-05-02 17:59:11 +02:00
self . logger . info ( f " rejecting insane route { route } " )
2018-10-19 21:47:51 +02:00
route = None
continue
2018-10-08 20:36:46 +02:00
break
# if could not find route using any hint; try without hint now
if route is None :
2018-10-15 21:01:08 +02:00
path = self . network . path_finder . find_path_for_payment ( self . node_keypair . pubkey , invoice_pubkey , amount_msat , channels )
if not path :
2018-10-08 20:36:46 +02:00
raise PaymentFailure ( _ ( " No path found " ) )
route = self . network . path_finder . create_route_from_path ( path , self . node_keypair . pubkey )
2018-10-19 21:47:51 +02:00
if not is_route_sane_to_use ( route , amount_msat , decoded_invoice . get_min_final_cltv_expiry ( ) ) :
2019-05-02 17:59:11 +02:00
self . logger . info ( f " rejecting insane route { route } " )
2018-10-19 21:47:51 +02:00
raise PaymentFailure ( _ ( " No path found " ) )
2018-10-08 20:36:46 +02:00
return route
2018-06-06 20:30:29 +02:00
def add_invoice ( self , amount_sat , message ) :
2018-05-28 10:43:50 +02:00
payment_preimage = os . urandom ( 32 )
2019-02-13 15:46:35 +01:00
payment_hash = sha256 ( payment_preimage )
2018-06-27 21:54:57 +02:00
amount_btc = amount_sat / Decimal ( COIN ) if amount_sat else None
2018-10-08 20:31:15 +02:00
routing_hints = self . _calc_routing_hints_for_invoice ( amount_sat )
2018-10-17 17:32:58 +02:00
if not routing_hints :
2019-05-02 17:59:11 +02:00
self . logger . info ( " Warning. No routing hints added to invoice. "
2018-10-17 17:32:58 +02:00
" Other clients will likely not be able to send to us. " )
2019-02-13 15:46:35 +01:00
invoice = lnencode ( LnAddr ( payment_hash , amount_btc ,
2018-10-18 22:56:40 +02:00
tags = [ ( ' d ' , message ) ,
( ' c ' , MIN_FINAL_CLTV_EXPIRY_FOR_INVOICE ) ]
+ routing_hints ) ,
2018-10-08 20:31:15 +02:00
self . node_keypair . privkey )
2019-03-07 17:51:35 +01:00
self . save_invoice ( payment_hash , invoice , RECEIVED , is_paid = False )
self . save_preimage ( payment_hash , payment_preimage )
2019-05-08 12:41:57 +02:00
self . wallet . set_label ( bh2u ( payment_hash ) , message )
2019-02-13 15:46:35 +01:00
return invoice
2019-03-07 17:51:35 +01:00
def save_preimage ( self , payment_hash : bytes , preimage : bytes ) :
2019-02-13 15:46:35 +01:00
assert sha256 ( preimage ) == payment_hash
key = bh2u ( payment_hash )
2019-03-07 17:51:35 +01:00
self . preimages [ key ] = bh2u ( preimage )
2019-02-13 15:46:35 +01:00
self . storage . put ( ' lightning_preimages ' , self . preimages )
self . storage . write ( )
2019-03-07 17:51:35 +01:00
def get_preimage ( self , payment_hash : bytes ) - > bytes :
2019-02-13 15:46:35 +01:00
try :
2019-03-07 17:51:35 +01:00
preimage = bfh ( self . preimages [ bh2u ( payment_hash ) ] )
2019-02-13 15:46:35 +01:00
assert sha256 ( preimage ) == payment_hash
2019-03-07 17:51:35 +01:00
return preimage
2019-02-13 15:46:35 +01:00
except KeyError as e :
raise UnknownPaymentHash ( payment_hash ) from e
2019-01-29 19:01:04 +01:00
2019-03-07 17:51:35 +01:00
def save_invoice ( self , payment_hash : bytes , invoice , direction , * , is_paid = False ) :
2019-02-13 15:46:35 +01:00
key = bh2u ( payment_hash )
2019-03-07 17:51:35 +01:00
self . invoices [ key ] = invoice , direction , is_paid
2019-02-13 15:46:35 +01:00
self . storage . put ( ' lightning_invoices ' , self . invoices )
self . storage . write ( )
2018-05-28 09:39:05 +02:00
2019-03-07 17:51:35 +01:00
def set_paid ( self , payment_hash ) :
key = bh2u ( payment_hash )
if key not in self . invoices :
# if we are forwarding
return
invoice , direction , _ = self . invoices [ key ]
self . save_invoice ( payment_hash , invoice , direction , is_paid = True )
2019-02-13 15:46:35 +01:00
def get_invoice ( self , payment_hash : bytes ) - > LnAddr :
2018-10-23 18:54:23 +02:00
try :
2019-03-07 17:51:35 +01:00
invoice , direction , is_paid = self . invoices [ bh2u ( payment_hash ) ]
2019-02-13 15:46:35 +01:00
return lndecode ( invoice , expected_hrp = constants . net . SEGWIT_HRP )
2018-10-23 18:54:23 +02:00
except KeyError as e :
raise UnknownPaymentHash ( payment_hash ) from e
2018-10-18 13:17:29 +02:00
2018-10-08 20:31:15 +02:00
def _calc_routing_hints_for_invoice ( self , amount_sat ) :
""" calculate routing hints (BOLT-11 ' r ' field) """
2019-03-21 12:44:32 +01:00
self . channel_db . load_data ( )
2018-10-08 20:31:15 +02:00
routing_hints = [ ]
with self . lock :
channels = list ( self . channels . values ( ) )
# note: currently we add *all* our channels; but this might be a privacy leak?
for chan in channels :
# check channel is open
if chan . get_state ( ) != " OPEN " : continue
# check channel has sufficient balance
# FIXME because of on-chain fees of ctx, this check is insufficient
if amount_sat and chan . balance ( REMOTE ) / / 1000 < amount_sat : continue
chan_id = chan . short_channel_id
assert type ( chan_id ) is bytes , chan_id
channel_info = self . channel_db . get_channel_info ( chan_id )
2018-10-16 21:37:57 +02:00
# note: as a fallback, if we don't have a channel update for the
2018-10-18 19:21:44 +02:00
# incoming direction of our private channel, we fill the invoice with garbage.
2018-10-16 21:37:57 +02:00
# the sender should still be able to pay us, but will incur an extra round trip
# (they will get the channel update from the onion error)
2018-10-18 19:21:44 +02:00
# at least, that's the theory. https://github.com/lightningnetwork/lnd/issues/2066
fee_base_msat = fee_proportional_millionths = 0
cltv_expiry_delta = 1 # lnd won't even try with zero
2018-10-16 21:37:57 +02:00
missing_info = True
if channel_info :
2019-03-05 17:37:01 +01:00
policy = self . channel_db . get_policy_for_node ( channel_info , chan . node_id )
2018-10-16 21:37:57 +02:00
if policy :
fee_base_msat = policy . fee_base_msat
fee_proportional_millionths = policy . fee_proportional_millionths
cltv_expiry_delta = policy . cltv_expiry_delta
missing_info = False
if missing_info :
2019-05-02 17:59:11 +02:00
self . logger . info ( f " Warning. Missing channel update for our channel { bh2u ( chan_id ) } ; "
2018-10-16 21:37:57 +02:00
f " filling invoice with incorrect data. " )
2018-10-08 20:31:15 +02:00
routing_hints . append ( ( ' r ' , [ ( chan . node_id ,
chan_id ,
2018-10-16 21:37:57 +02:00
fee_base_msat ,
fee_proportional_millionths ,
cltv_expiry_delta ) ] ) )
2018-10-08 20:31:15 +02:00
return routing_hints
2018-10-23 18:54:23 +02:00
def delete_invoice ( self , payment_hash_hex : str ) :
2018-06-27 21:54:57 +02:00
try :
2018-10-23 18:54:23 +02:00
del self . invoices [ payment_hash_hex ]
2018-06-27 21:54:57 +02:00
except KeyError :
return
2019-02-13 15:46:35 +01:00
self . storage . put ( ' lightning_invoices ' , self . invoices )
self . storage . write ( )
2018-06-27 21:54:57 +02:00
2019-01-30 19:16:04 +01:00
def get_balance ( self ) :
with self . lock :
2019-01-30 19:40:20 +01:00
return Decimal ( sum ( chan . balance ( LOCAL ) if not chan . is_closed ( ) else 0 for chan in self . channels . values ( ) ) ) / 1000
2019-01-30 19:16:04 +01:00
2018-05-30 13:42:25 +02:00
def list_channels ( self ) :
2018-07-30 13:51:03 +02:00
with self . lock :
2018-09-21 19:18:34 +02:00
# we output the funding_outpoint instead of the channel_id because lnd uses channel_point (funding outpoint) to identify channels
2018-10-08 11:30:51 +02:00
for channel_id , chan in self . channels . items ( ) :
yield {
2019-05-06 16:52:25 +02:00
' local_htlcs ' : json . loads ( encoder . encode ( chan . hm . log [ LOCAL ] ) ) ,
2019-01-21 21:27:27 +01:00
' remote_htlcs ' : json . loads ( encoder . encode ( chan . hm . log [ REMOTE ] ) ) ,
2019-01-30 17:50:10 +01:00
' channel_id ' : bh2u ( chan . short_channel_id ) if chan . short_channel_id else None ,
' full_channel_id ' : bh2u ( chan . channel_id ) ,
2018-10-08 11:30:51 +02:00
' channel_point ' : chan . funding_outpoint . to_str ( ) ,
' state ' : chan . get_state ( ) ,
' remote_pubkey ' : bh2u ( chan . node_id ) ,
' local_balance ' : chan . balance ( LOCAL ) / / 1000 ,
' remote_balance ' : chan . balance ( REMOTE ) / / 1000 ,
}
2018-06-20 15:46:22 +02:00
2018-10-02 21:13:17 +02:00
async def close_channel ( self , chan_id ) :
2018-10-24 17:36:07 +02:00
chan = self . channels [ chan_id ]
peer = self . peers [ chan . node_id ]
2018-10-24 18:26:05 +02:00
return await peer . close_channel ( chan_id )
2018-10-24 17:36:07 +02:00
async def force_close_channel ( self , chan_id ) :
2018-06-20 15:46:22 +02:00
chan = self . channels [ chan_id ]
2018-11-05 17:23:49 +01:00
tx = chan . force_close_tx ( )
chan . set_state ( ' FORCE_CLOSING ' )
self . save_channel ( chan )
2018-11-12 18:01:59 +01:00
self . on_channels_updated ( )
2019-01-24 17:57:27 +01:00
await self . network . broadcast_transaction ( tx )
return tx . txid ( )
2018-07-13 17:05:04 +02:00
2018-10-16 16:30:18 +02:00
async def reestablish_peers_and_channels ( self ) :
async def reestablish_peer_for_given_channel ( ) :
2018-07-30 13:51:03 +02:00
# try last good address first
peer = self . channel_db . get_last_good_address ( chan . node_id )
if peer :
last_tried = self . _last_tried_peer . get ( peer , 0 )
if last_tried + PEER_RETRY_INTERVAL_FOR_CHANNELS < now :
2018-10-16 16:30:18 +02:00
await self . add_peer ( peer . host , peer . port , peer . pubkey )
2018-07-30 13:51:03 +02:00
return
# try random address for node_id
2019-03-05 17:37:01 +01:00
node_info = self . channel_db . nodes_get ( chan . node_id )
2018-07-30 13:51:03 +02:00
if not node_info : return
2019-03-05 11:22:00 +01:00
addresses = self . channel_db . get_node_addresses ( node_info )
2018-07-30 13:51:03 +02:00
if not addresses : return
2019-02-01 20:59:59 +01:00
adr_obj = random . choice ( addresses )
host , port = adr_obj . host , adr_obj . port
2018-07-30 13:51:03 +02:00
peer = LNPeerAddr ( host , port , chan . node_id )
last_tried = self . _last_tried_peer . get ( peer , 0 )
if last_tried + PEER_RETRY_INTERVAL_FOR_CHANNELS < now :
2018-10-16 16:30:18 +02:00
await self . add_peer ( host , port , chan . node_id )
2018-07-30 13:51:03 +02:00
2019-04-26 12:48:02 +02:00
while True :
await asyncio . sleep ( 1 )
with self . lock :
channels = list ( self . channels . values ( ) )
now = time . time ( )
for chan in channels :
if chan . is_closed ( ) :
continue
if constants . net is not constants . BitcoinRegtest :
ratio = chan . constraints . feerate / self . current_feerate_per_kw ( )
if ratio < 0.5 :
2019-05-02 17:59:11 +02:00
self . logger . warning ( f " fee level for channel { bh2u ( chan . channel_id ) } is { chan . constraints . feerate } sat/kiloweight, "
f " current recommended feerate is { self . current_feerate_per_kw ( ) } sat/kiloweight, consider force closing! " )
2019-04-26 12:48:02 +02:00
if not chan . should_try_to_reestablish_peer ( ) :
continue
peer = self . peers . get ( chan . node_id , None )
coro = peer . reestablish_channel ( chan ) if peer else reestablish_peer_for_given_channel ( )
await self . network . main_taskgroup . spawn ( coro )
2018-07-30 13:51:03 +02:00
2019-01-25 20:16:11 +01:00
def current_feerate_per_kw ( self ) :
from . simple_config import FEE_LN_ETA_TARGET , FEERATE_FALLBACK_STATIC_FEE , FEERATE_REGTEST_HARDCODED
if constants . net is constants . BitcoinRegtest :
return FEERATE_REGTEST_HARDCODED / / 4
feerate_per_kvbyte = self . network . config . eta_target_to_fee ( FEE_LN_ETA_TARGET )
if feerate_per_kvbyte is None :
feerate_per_kvbyte = FEERATE_FALLBACK_STATIC_FEE
return max ( 253 , feerate_per_kvbyte / / 4 )