Files
pallectrum/electrum/lnwatcher.py

300 lines
12 KiB
Python
Raw Normal View History

2018-10-25 19:34:31 +02:00
# Copyright (C) 2018 The Electrum developers
# Distributed under the MIT software license, see the accompanying
# file LICENCE or http://www.opensource.org/licenses/mit-license.php
from typing import NamedTuple, Iterable, TYPE_CHECKING
2018-09-12 16:17:10 +02:00
import os
import queue
import threading
import concurrent
2018-09-12 16:17:10 +02:00
from collections import defaultdict
2018-10-12 14:53:22 +02:00
import asyncio
2018-10-30 20:18:26 +01:00
from enum import IntEnum, auto
from typing import NamedTuple, Dict
2018-10-12 14:53:22 +02:00
import jsonrpclib
2019-03-06 09:56:22 +01:00
from sqlalchemy import Column, ForeignKey, Integer, String, DateTime, Boolean
from sqlalchemy.orm.query import Query
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.sql import not_, or_
from .sql_db import SqlDB, sql
from .util import bh2u, bfh, log_exceptions, ignore_exceptions
from . import wallet
2018-09-12 16:17:10 +02:00
from .storage import WalletStorage
from .address_synchronizer import AddressSynchronizer, TX_HEIGHT_LOCAL, TX_HEIGHT_UNCONF_PARENT, TX_HEIGHT_UNCONFIRMED
2018-12-20 18:09:55 +01:00
from .transaction import Transaction
if TYPE_CHECKING:
from .network import Network
class ListenerItem(NamedTuple):
# this is triggered when the lnwatcher is all done with the outpoint used as index in LNWatcher.tx_progress
all_done : asyncio.Event
# txs we broadcast are put on this queue so that the test can wait for them to get mined
tx_queue : asyncio.Queue
2018-10-30 20:18:26 +01:00
class TxMinedDepth(IntEnum):
""" IntEnum because we call min() in get_deepest_tx_mined_depth_for_txids """
DEEP = auto()
SHALLOW = auto()
MEMPOOL = auto()
FREE = auto()
2018-06-22 10:57:11 +02:00
Base = declarative_base()
class SweepTx(Base):
__tablename__ = 'sweep_txs'
2019-03-12 18:33:36 +01:00
funding_outpoint = Column(String(34), primary_key=True)
index = Column(Integer(), primary_key=True)
prev_txid = Column(String(32))
tx = Column(String())
class ChannelInfo(Base):
__tablename__ = 'channel_info'
2019-03-12 18:33:36 +01:00
outpoint = Column(String(34), primary_key=True)
address = Column(String(32))
2019-03-06 09:56:22 +01:00
class SweepStore(SqlDB):
2019-03-06 09:56:22 +01:00
def __init__(self, path, network):
super().__init__(network, path, Base)
@sql
def get_sweep_tx(self, funding_outpoint, prev_txid):
return [Transaction(bh2u(r.tx)) for r in self.DBSession.query(SweepTx).filter(SweepTx.funding_outpoint==funding_outpoint, SweepTx.prev_txid==prev_txid).all()]
2019-03-12 18:33:36 +01:00
@sql
def get_tx_by_index(self, funding_outpoint, index):
r = self.DBSession.query(SweepTx).filter(SweepTx.funding_outpoint==funding_outpoint, SweepTx.index==index).one_or_none()
return r.prev_txid, bh2u(r.tx)
@sql
def list_sweep_tx(self):
return set(r.funding_outpoint for r in self.DBSession.query(SweepTx).all())
@sql
def add_sweep_tx(self, funding_outpoint, prev_txid, tx):
2019-03-12 18:33:36 +01:00
n = self.DBSession.query(SweepTx).filter(funding_outpoint==funding_outpoint).count()
self.DBSession.add(SweepTx(funding_outpoint=funding_outpoint, index=n, prev_txid=prev_txid, tx=bfh(tx)))
self.DBSession.commit()
@sql
2019-03-12 18:33:36 +01:00
def get_num_tx(self, funding_outpoint):
return self.DBSession.query(SweepTx).filter(funding_outpoint==funding_outpoint).count()
@sql
def remove_sweep_tx(self, funding_outpoint):
2019-03-20 09:53:08 +01:00
r = self.DBSession.query(SweepTx).filter(SweepTx.funding_outpoint==funding_outpoint).all()
for x in r:
self.DBSession.delete(x)
self.DBSession.commit()
@sql
2019-03-12 18:33:36 +01:00
def add_channel(self, outpoint, address):
self.DBSession.add(ChannelInfo(address=address, outpoint=outpoint))
self.DBSession.commit()
@sql
2019-03-12 18:33:36 +01:00
def remove_channel(self, outpoint):
v = self.DBSession.query(ChannelInfo).filter(ChannelInfo.outpoint==outpoint).one_or_none()
self.DBSession.delete(v)
self.DBSession.commit()
@sql
2019-03-12 18:33:36 +01:00
def has_channel(self, outpoint):
return bool(self.DBSession.query(ChannelInfo).filter(ChannelInfo.outpoint==outpoint).one_or_none())
@sql
2019-03-12 18:33:36 +01:00
def get_address(self, outpoint):
r = self.DBSession.query(ChannelInfo).filter(ChannelInfo.outpoint==outpoint).one_or_none()
return r.address if r else None
@sql
def list_channel_info(self):
return [(r.address, r.outpoint) for r in self.DBSession.query(ChannelInfo).all()]
class LNWatcher(AddressSynchronizer):
verbosity_filter = 'W'
def __init__(self, network: 'Network'):
path = os.path.join(network.config.path, "watchtower_wallet")
2018-09-12 16:17:10 +02:00
storage = WalletStorage(path)
AddressSynchronizer.__init__(self, storage)
self.config = network.config
self.start_network(network)
2018-09-12 16:17:10 +02:00
self.lock = threading.RLock()
self.sweepstore = SweepStore(os.path.join(network.config.path, "watchtower_db"), network)
self.network.register_callback(self.on_network_update,
['network_updated', 'blockchain_updated', 'verified', 'wallet_updated'])
2019-01-30 17:24:43 +01:00
self.set_remote_watchtower()
# this maps funding_outpoints to ListenerItems, which have an event for when the watcher is done,
# and a queue for seeing which txs are being published
self.tx_progress = {} # type: Dict[str, ListenerItem]
2019-02-28 06:08:58 +01:00
# status gets populated when we run
self.channel_status = {}
def get_channel_status(self, outpoint):
return self.channel_status.get(outpoint, 'unknown')
2018-10-12 14:53:22 +02:00
2019-01-30 17:24:43 +01:00
def set_remote_watchtower(self):
watchtower_url = self.config.get('watchtower_url')
2019-05-22 12:30:45 +02:00
try:
self.watchtower = jsonrpclib.Server(watchtower_url) if watchtower_url else None
except:
self.watchtower = None
2019-01-30 17:24:43 +01:00
self.watchtower_queue = asyncio.Queue()
2019-03-12 18:33:36 +01:00
def get_num_tx(self, outpoint):
return self.sweepstore.get_num_tx(outpoint)
2018-10-12 14:53:22 +02:00
2018-10-12 19:40:12 +02:00
@ignore_exceptions
@log_exceptions
2018-10-12 14:53:22 +02:00
async def watchtower_task(self):
self.logger.info('watchtower task started')
2019-03-12 18:33:36 +01:00
# initial check
for address, outpoint in self.sweepstore.list_channel_info():
await self.watchtower_queue.put(outpoint)
2018-10-12 14:53:22 +02:00
while True:
2019-03-12 18:33:36 +01:00
outpoint = await self.watchtower_queue.get()
if self.watchtower is None:
continue
2019-03-12 18:33:36 +01:00
# synchronize with remote
try:
2019-03-12 18:33:36 +01:00
local_n = self.sweepstore.get_num_tx(outpoint)
n = self.watchtower.get_num_tx(outpoint)
if n == 0:
address = self.sweepstore.get_address(outpoint)
self.watchtower.add_channel(outpoint, address)
self.logger.info("sending %d transactions to watchtower"%(local_n - n))
2019-03-12 18:33:36 +01:00
for index in range(n, local_n):
prev_txid, tx = self.sweepstore.get_tx_by_index(outpoint, index)
self.watchtower.add_sweep_tx(outpoint, prev_txid, tx)
except ConnectionRefusedError:
self.logger.info('could not reach watchtower, will retry in 5s')
await asyncio.sleep(5)
2019-03-12 18:33:36 +01:00
await self.watchtower_queue.put(outpoint)
2019-03-12 18:33:36 +01:00
def add_channel(self, outpoint, address):
2018-11-22 12:22:48 +01:00
self.add_address(address)
2018-09-12 16:17:10 +02:00
with self.lock:
2019-03-12 18:33:36 +01:00
if not self.sweepstore.has_channel(outpoint):
self.sweepstore.add_channel(outpoint, address)
2018-09-12 16:17:10 +02:00
def unwatch_channel(self, address, funding_outpoint):
self.logger.info(f'unwatching {funding_outpoint}')
self.sweepstore.remove_sweep_tx(funding_outpoint)
2019-03-20 09:53:08 +01:00
self.sweepstore.remove_channel(funding_outpoint)
if funding_outpoint in self.tx_progress:
self.tx_progress[funding_outpoint].all_done.set()
2018-10-12 19:40:12 +02:00
@log_exceptions
2018-09-12 16:17:10 +02:00
async def on_network_update(self, event, *args):
if event in ('verified', 'wallet_updated'):
if args[0] != self:
return
if not self.synchronizer:
self.logger.info("synchronizer not set yet")
2018-09-12 16:17:10 +02:00
return
if not self.synchronizer.is_up_to_date():
2018-09-12 16:17:10 +02:00
return
for address, outpoint in self.sweepstore.list_channel_info():
await self.check_onchain_situation(address, outpoint)
async def check_onchain_situation(self, address, funding_outpoint):
keep_watching, spenders = self.inspect_tx_candidate(funding_outpoint, 0)
funding_txid = funding_outpoint.split(':')[0]
funding_height = self.get_tx_height(funding_txid)
closing_txid = spenders.get(funding_outpoint)
if closing_txid is None:
self.network.trigger_callback('channel_open', funding_outpoint, funding_txid, funding_height)
else:
closing_height = self.get_tx_height(closing_txid)
self.network.trigger_callback('channel_closed', funding_outpoint, spenders, funding_txid, funding_height, closing_txid, closing_height)
await self.do_breach_remedy(funding_outpoint, spenders)
if not keep_watching:
self.unwatch_channel(address, funding_outpoint)
else:
#self.logger.info(f'we will keep_watching {funding_outpoint}')
2019-03-20 09:53:08 +01:00
pass
def inspect_tx_candidate(self, outpoint, n):
# FIXME: instead of stopping recursion at n == 2,
# we should detect which outputs are HTLCs
prev_txid, index = outpoint.split(':')
2019-03-06 16:13:28 +01:00
txid = self.db.get_spent_outpoint(prev_txid, int(index))
result = {outpoint:txid}
if txid is None:
2019-02-28 06:08:58 +01:00
self.channel_status[outpoint] = 'open'
#self.logger.info('keep watching because outpoint is unspent')
return True, result
keep_watching = (self.get_tx_mined_depth(txid) != TxMinedDepth.DEEP)
if keep_watching:
2019-02-28 06:08:58 +01:00
self.channel_status[outpoint] = 'closed (%d)' % self.get_tx_height(txid).conf
#self.logger.info('keep watching because spending tx is not deep')
2019-02-28 06:08:58 +01:00
else:
2019-02-28 18:29:07 +01:00
self.channel_status[outpoint] = 'closed (deep)'
2019-02-28 06:08:58 +01:00
2019-03-06 16:13:28 +01:00
tx = self.db.get_transaction(txid)
for i, o in enumerate(tx.outputs()):
2018-11-22 12:22:48 +01:00
if o.address not in self.get_addresses():
self.add_address(o.address)
keep_watching = True
elif n < 2:
k, r = self.inspect_tx_candidate(txid+':%d'%i, n+1)
keep_watching |= k
result.update(r)
return keep_watching, result
async def do_breach_remedy(self, funding_outpoint, spenders):
for prevout, spender in spenders.items():
if spender is not None:
continue
prev_txid, prev_n = prevout.split(':')
sweep_txns = self.sweepstore.get_sweep_tx(funding_outpoint, prev_txid)
for tx in sweep_txns:
if not await self.broadcast_or_log(funding_outpoint, tx):
self.logger.info(f'{tx.name} could not publish tx: {str(tx)}, prev_txid: {prev_txid}')
2018-09-12 16:17:10 +02:00
async def broadcast_or_log(self, funding_outpoint, tx):
height = self.get_tx_height(tx.txid()).height
if height != TX_HEIGHT_LOCAL:
return
try:
txid = await self.network.broadcast_transaction(tx)
except Exception as e:
self.logger.info(f'broadcast: {tx.name}: failure: {repr(e)}')
else:
self.logger.info(f'broadcast: {tx.name}: success. txid: {txid}')
if funding_outpoint in self.tx_progress:
await self.tx_progress[funding_outpoint].tx_queue.put(tx)
return txid
2019-03-12 18:33:36 +01:00
def add_sweep_tx(self, funding_outpoint: str, prev_txid: str, tx: str):
self.sweepstore.add_sweep_tx(funding_outpoint, prev_txid, tx)
2019-03-12 18:33:36 +01:00
if self.watchtower:
self.watchtower_queue.put_nowait(funding_outpoint)
2018-09-12 16:17:10 +02:00
2018-10-30 20:18:26 +01:00
def get_tx_mined_depth(self, txid: str):
if not txid:
return TxMinedDepth.FREE
tx_mined_depth = self.get_tx_height(txid)
2018-10-30 20:18:26 +01:00
height, conf = tx_mined_depth.height, tx_mined_depth.conf
if conf > 100:
2018-10-30 20:18:26 +01:00
return TxMinedDepth.DEEP
elif conf > 0:
2018-10-30 20:18:26 +01:00
return TxMinedDepth.SHALLOW
elif height in (TX_HEIGHT_UNCONFIRMED, TX_HEIGHT_UNCONF_PARENT):
2018-10-30 20:18:26 +01:00
return TxMinedDepth.MEMPOOL
elif height == TX_HEIGHT_LOCAL:
2018-10-30 20:18:26 +01:00
return TxMinedDepth.FREE
elif height > 0 and conf == 0:
# unverified but claimed to be mined
2018-10-30 20:18:26 +01:00
return TxMinedDepth.MEMPOOL
else:
raise NotImplementedError()