2014-04-30 10:32:40 +02:00
|
|
|
#!/usr/bin/env python
|
|
|
|
|
#
|
|
|
|
|
# Electrum - lightweight Bitcoin client
|
|
|
|
|
# Copyright (C) 2014 Thomas Voegtlin
|
|
|
|
|
#
|
2016-02-23 11:36:42 +01:00
|
|
|
# Permission is hereby granted, free of charge, to any person
|
|
|
|
|
# obtaining a copy of this software and associated documentation files
|
|
|
|
|
# (the "Software"), to deal in the Software without restriction,
|
|
|
|
|
# including without limitation the rights to use, copy, modify, merge,
|
|
|
|
|
# publish, distribute, sublicense, and/or sell copies of the Software,
|
|
|
|
|
# and to permit persons to whom the Software is furnished to do so,
|
|
|
|
|
# subject to the following conditions:
|
2014-04-30 10:32:40 +02:00
|
|
|
#
|
2016-02-23 11:36:42 +01:00
|
|
|
# The above copyright notice and this permission notice shall be
|
|
|
|
|
# included in all copies or substantial portions of the Software.
|
2014-04-30 10:32:40 +02:00
|
|
|
#
|
2016-02-23 11:36:42 +01:00
|
|
|
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
|
|
|
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
|
|
|
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
|
|
|
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
|
|
|
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
|
|
|
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
|
|
|
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
|
|
|
# SOFTWARE.
|
2018-07-27 12:29:04 +02:00
|
|
|
import asyncio
|
2016-05-30 16:11:49 +02:00
|
|
|
import hashlib
|
2021-02-24 12:32:54 +01:00
|
|
|
from typing import Dict, List, TYPE_CHECKING, Tuple, Set
|
2018-10-03 17:13:46 +02:00
|
|
|
from collections import defaultdict
|
2019-05-15 19:56:16 +02:00
|
|
|
import logging
|
2014-07-24 23:14:47 +02:00
|
|
|
|
2022-02-08 12:34:49 +01:00
|
|
|
from aiorpcx import run_in_thread, RPCError
|
2018-09-08 22:44:14 +02:00
|
|
|
|
2020-04-14 16:12:47 +02:00
|
|
|
from . import util
|
2019-12-16 21:15:20 +01:00
|
|
|
from .transaction import Transaction, PartialTransaction
|
2023-02-17 11:35:03 +00:00
|
|
|
from .util import make_aiohttp_session, NetworkJobOnDefaultServer, random_shuffled_copy, OldTaskGroup
|
2018-11-02 20:14:59 +01:00
|
|
|
from .bitcoin import address_to_scripthash, is_address
|
2019-04-26 18:52:26 +02:00
|
|
|
from .logging import Logger
|
2021-02-24 12:32:54 +01:00
|
|
|
from .interface import GracefulDisconnect, NetworkTimeout
|
2018-09-08 22:44:14 +02:00
|
|
|
|
2018-10-22 16:41:25 +02:00
|
|
|
if TYPE_CHECKING:
|
|
|
|
|
from .network import Network
|
|
|
|
|
from .address_synchronizer import AddressSynchronizer
|
|
|
|
|
|
2014-04-30 10:32:40 +02:00
|
|
|
|
2019-02-15 17:22:24 +01:00
|
|
|
class SynchronizerFailure(Exception): pass
|
|
|
|
|
|
|
|
|
|
|
2018-07-27 12:29:04 +02:00
|
|
|
def history_status(h):
|
|
|
|
|
if not h:
|
|
|
|
|
return None
|
|
|
|
|
status = ''
|
|
|
|
|
for tx_hash, height in h:
|
|
|
|
|
status += tx_hash + ':%d:' % height
|
2023-02-17 11:35:03 +00:00
|
|
|
return hashlib.sha256(status.encode('ascii')).digest().hex()
|
2018-07-27 12:29:04 +02:00
|
|
|
|
|
|
|
|
|
2018-10-03 17:13:46 +02:00
|
|
|
class SynchronizerBase(NetworkJobOnDefaultServer):
|
|
|
|
|
"""Subscribe over the network to a set of addresses, and monitor their statuses.
|
|
|
|
|
Every time a status changes, run a coroutine provided by the subclass.
|
|
|
|
|
"""
|
2018-10-22 16:41:25 +02:00
|
|
|
def __init__(self, network: 'Network'):
|
2018-10-03 17:13:46 +02:00
|
|
|
self.asyncio_loop = network.asyncio_loop
|
2021-02-12 21:31:18 +01:00
|
|
|
|
2019-12-06 20:17:52 +01:00
|
|
|
NetworkJobOnDefaultServer.__init__(self, network)
|
2018-10-03 17:13:46 +02:00
|
|
|
|
|
|
|
|
def _reset(self):
|
|
|
|
|
super()._reset()
|
2022-12-21 15:23:11 +00:00
|
|
|
self._adding_addrs = set()
|
2018-10-03 17:13:46 +02:00
|
|
|
self.requested_addrs = set()
|
2022-12-21 15:23:11 +00:00
|
|
|
self._handling_addr_statuses = set()
|
2018-10-03 17:13:46 +02:00
|
|
|
self.scripthash_to_address = {}
|
|
|
|
|
self._processed_some_notifications = False # so that we don't miss them
|
|
|
|
|
# Queues
|
|
|
|
|
self.status_queue = asyncio.Queue()
|
|
|
|
|
|
2021-03-05 20:46:41 +01:00
|
|
|
async def _run_tasks(self, *, taskgroup):
|
|
|
|
|
await super()._run_tasks(taskgroup=taskgroup)
|
2018-10-03 17:13:46 +02:00
|
|
|
try:
|
2021-03-05 20:46:41 +01:00
|
|
|
async with taskgroup as group:
|
2018-10-03 17:13:46 +02:00
|
|
|
await group.spawn(self.handle_status())
|
|
|
|
|
await group.spawn(self.main())
|
|
|
|
|
finally:
|
|
|
|
|
# we are being cancelled now
|
|
|
|
|
self.session.unsubscribe(self.status_queue)
|
|
|
|
|
|
Revert "wallet.is_up_to_date: fix flickering during sync due to race"
This reverts commit dc6c4814068809b6f6d2df9aac0fa90d827a15f2 as it introduced its own issue:
while add_address was running on one thread, synchronizer._reset could be running on another,
and by the time the "enqueue" coro would run, it would use a new add_queue and
addr would not be in requested_addrs anymore...
```
I/w | wallet.Standard_Wallet.[test_segwit_2] | starting taskgroup.
I | lnworker.LNWallet.[test_segwit_2] | starting taskgroup.
E/i | interface.[testnet.qtornado.com:51002] | Exception in run: KeyError('tb1q3wmgf8n5eettnj50pzgnfrrpdpjmwn37x7nzsc5780kk4je9v4hspym8mu')
Traceback (most recent call last):
File ".../electrum/electrum/util.py", line 1243, in wrapper
return await func(*args, **kwargs)
File ".../electrum/electrum/interface.py", line 506, in wrapper_func
return await func(self, *args, **kwargs)
File ".../electrum/electrum/interface.py", line 529, in run
await self.open_session(ssl_context)
File ".../electrum/electrum/interface.py", line 679, in open_session
async with self.taskgroup as group:
File ".../aiorpcX/aiorpcx/curio.py", line 304, in __aexit__
await self.join()
File ".../electrum/electrum/util.py", line 1339, in join
task.result()
File ".../electrum/electrum/synchronizer.py", line 80, in _run_tasks
async with taskgroup as group:
File ".../aiorpcX/aiorpcx/curio.py", line 304, in __aexit__
await self.join()
File ".../electrum/electrum/util.py", line 1339, in join
task.result()
File ".../electrum/electrum/synchronizer.py", line 127, in subscribe_to_address
self.requested_addrs.remove(addr)
KeyError: 'tb1q3wmgf8n5eettnj50pzgnfrrpdpjmwn37x7nzsc5780kk4je9v4hspym8mu'
```
2022-12-20 16:15:24 +00:00
|
|
|
def add(self, addr):
|
2022-12-21 15:23:11 +00:00
|
|
|
if not is_address(addr): raise ValueError(f"invalid bitcoin address {addr}")
|
|
|
|
|
self._adding_addrs.add(addr) # this lets is_up_to_date already know about addr
|
Revert "wallet.is_up_to_date: fix flickering during sync due to race"
This reverts commit dc6c4814068809b6f6d2df9aac0fa90d827a15f2 as it introduced its own issue:
while add_address was running on one thread, synchronizer._reset could be running on another,
and by the time the "enqueue" coro would run, it would use a new add_queue and
addr would not be in requested_addrs anymore...
```
I/w | wallet.Standard_Wallet.[test_segwit_2] | starting taskgroup.
I | lnworker.LNWallet.[test_segwit_2] | starting taskgroup.
E/i | interface.[testnet.qtornado.com:51002] | Exception in run: KeyError('tb1q3wmgf8n5eettnj50pzgnfrrpdpjmwn37x7nzsc5780kk4je9v4hspym8mu')
Traceback (most recent call last):
File ".../electrum/electrum/util.py", line 1243, in wrapper
return await func(*args, **kwargs)
File ".../electrum/electrum/interface.py", line 506, in wrapper_func
return await func(self, *args, **kwargs)
File ".../electrum/electrum/interface.py", line 529, in run
await self.open_session(ssl_context)
File ".../electrum/electrum/interface.py", line 679, in open_session
async with self.taskgroup as group:
File ".../aiorpcX/aiorpcx/curio.py", line 304, in __aexit__
await self.join()
File ".../electrum/electrum/util.py", line 1339, in join
task.result()
File ".../electrum/electrum/synchronizer.py", line 80, in _run_tasks
async with taskgroup as group:
File ".../aiorpcX/aiorpcx/curio.py", line 304, in __aexit__
await self.join()
File ".../electrum/electrum/util.py", line 1339, in join
task.result()
File ".../electrum/electrum/synchronizer.py", line 127, in subscribe_to_address
self.requested_addrs.remove(addr)
KeyError: 'tb1q3wmgf8n5eettnj50pzgnfrrpdpjmwn37x7nzsc5780kk4je9v4hspym8mu'
```
2022-12-20 16:15:24 +00:00
|
|
|
|
|
|
|
|
async def _add_address(self, addr: str):
|
2022-12-21 15:23:11 +00:00
|
|
|
try:
|
|
|
|
|
if not is_address(addr): raise ValueError(f"invalid bitcoin address {addr}")
|
|
|
|
|
if addr in self.requested_addrs: return
|
|
|
|
|
self.requested_addrs.add(addr)
|
|
|
|
|
await self.taskgroup.spawn(self._subscribe_to_address, addr)
|
|
|
|
|
finally:
|
|
|
|
|
self._adding_addrs.discard(addr) # ok for addr not to be present
|
2018-10-03 17:13:46 +02:00
|
|
|
|
|
|
|
|
async def _on_address_status(self, addr, status):
|
2022-12-21 15:23:11 +00:00
|
|
|
"""Handle the change of the status of an address.
|
|
|
|
|
Should remove addr from self._handling_addr_statuses when done.
|
|
|
|
|
"""
|
2018-10-03 17:13:46 +02:00
|
|
|
raise NotImplementedError() # implemented by subclasses
|
|
|
|
|
|
2022-12-21 15:23:11 +00:00
|
|
|
async def _subscribe_to_address(self, addr):
|
|
|
|
|
h = address_to_scripthash(addr)
|
|
|
|
|
self.scripthash_to_address[h] = addr
|
|
|
|
|
self._requests_sent += 1
|
|
|
|
|
try:
|
|
|
|
|
async with self._network_request_semaphore:
|
|
|
|
|
await self.session.subscribe('blockchain.scripthash.subscribe', [h], self.status_queue)
|
|
|
|
|
except RPCError as e:
|
|
|
|
|
if e.message == 'history too large': # no unique error code
|
|
|
|
|
raise GracefulDisconnect(e, log_level=logging.ERROR) from e
|
|
|
|
|
raise
|
|
|
|
|
self._requests_answered += 1
|
2018-10-03 17:13:46 +02:00
|
|
|
|
|
|
|
|
async def handle_status(self):
|
|
|
|
|
while True:
|
|
|
|
|
h, status = await self.status_queue.get()
|
|
|
|
|
addr = self.scripthash_to_address[h]
|
2022-12-21 15:23:11 +00:00
|
|
|
self._handling_addr_statuses.add(addr)
|
|
|
|
|
self.requested_addrs.discard(addr) # ok for addr not to be present
|
2020-02-27 19:00:59 +01:00
|
|
|
await self.taskgroup.spawn(self._on_address_status, addr, status)
|
2018-10-03 17:13:46 +02:00
|
|
|
self._processed_some_notifications = True
|
|
|
|
|
|
|
|
|
|
async def main(self):
|
|
|
|
|
raise NotImplementedError() # implemented by subclasses
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class Synchronizer(SynchronizerBase):
|
2015-06-08 16:56:04 +09:00
|
|
|
'''The synchronizer keeps the wallet up-to-date with its set of
|
|
|
|
|
addresses and their transactions. It subscribes over the network
|
|
|
|
|
to wallet addresses, gets the wallet to generate new addresses
|
|
|
|
|
when necessary, requests the transaction history of any addresses
|
|
|
|
|
we don't have the full history of, and requests binary transaction
|
|
|
|
|
data of any transactions the wallet doesn't have.
|
|
|
|
|
'''
|
2022-06-01 23:03:35 +02:00
|
|
|
def __init__(self, adb: 'AddressSynchronizer'):
|
|
|
|
|
self.adb = adb
|
|
|
|
|
SynchronizerBase.__init__(self, adb.network)
|
2018-10-03 17:13:46 +02:00
|
|
|
|
|
|
|
|
def _reset(self):
|
|
|
|
|
super()._reset()
|
2022-12-21 15:23:11 +00:00
|
|
|
self._init_done = False
|
2017-09-06 22:09:15 +02:00
|
|
|
self.requested_tx = {}
|
2019-06-29 06:03:14 +02:00
|
|
|
self.requested_histories = set()
|
2021-02-24 12:32:54 +01:00
|
|
|
self._stale_histories = dict() # type: Dict[str, asyncio.Task]
|
2018-04-05 08:32:02 +02:00
|
|
|
|
2018-09-16 22:21:49 +02:00
|
|
|
def diagnostic_name(self):
|
2022-06-01 23:03:35 +02:00
|
|
|
return self.adb.diagnostic_name()
|
2018-09-16 22:21:49 +02:00
|
|
|
|
2015-06-08 16:56:04 +09:00
|
|
|
def is_up_to_date(self):
|
2022-12-21 15:23:11 +00:00
|
|
|
return (self._init_done
|
|
|
|
|
and not self._adding_addrs
|
|
|
|
|
and not self.requested_addrs
|
|
|
|
|
and not self._handling_addr_statuses
|
2018-09-08 22:44:14 +02:00
|
|
|
and not self.requested_histories
|
2021-02-24 12:32:54 +01:00
|
|
|
and not self.requested_tx
|
2022-12-21 15:23:11 +00:00
|
|
|
and not self._stale_histories
|
|
|
|
|
and self.status_queue.empty())
|
2018-07-27 12:29:04 +02:00
|
|
|
|
2018-09-20 18:11:26 +02:00
|
|
|
async def _on_address_status(self, addr, status):
|
2022-12-21 15:23:11 +00:00
|
|
|
try:
|
|
|
|
|
history = self.adb.db.get_addr_history(addr)
|
|
|
|
|
if history_status(history) == status:
|
|
|
|
|
return
|
|
|
|
|
# No point in requesting history twice for the same announced status.
|
|
|
|
|
# However if we got announced a new status, we should request history again:
|
|
|
|
|
if (addr, status) in self.requested_histories:
|
|
|
|
|
return
|
|
|
|
|
# request address history
|
|
|
|
|
self.requested_histories.add((addr, status))
|
|
|
|
|
self._stale_histories.pop(addr, asyncio.Future()).cancel()
|
|
|
|
|
finally:
|
|
|
|
|
self._handling_addr_statuses.discard(addr)
|
2018-07-27 12:29:04 +02:00
|
|
|
h = address_to_scripthash(addr)
|
2019-05-07 17:00:48 +02:00
|
|
|
self._requests_sent += 1
|
2021-02-12 21:31:18 +01:00
|
|
|
async with self._network_request_semaphore:
|
|
|
|
|
result = await self.interface.get_history_for_scripthash(h)
|
2019-05-07 17:00:48 +02:00
|
|
|
self._requests_answered += 1
|
2019-04-26 18:52:26 +02:00
|
|
|
self.logger.info(f"receiving history {addr} {len(result)}")
|
2017-01-30 12:36:56 +03:00
|
|
|
hist = list(map(lambda item: (item['tx_hash'], item['height']), result))
|
2016-05-30 18:26:58 +02:00
|
|
|
# tx_fees
|
|
|
|
|
tx_fees = [(item['tx_hash'], item.get('fee')) for item in result]
|
|
|
|
|
tx_fees = dict(filter(lambda x:x[1] is not None, tx_fees))
|
2015-10-30 20:52:37 +01:00
|
|
|
# Check that the status corresponds to what was announced
|
2021-02-24 12:32:54 +01:00
|
|
|
if history_status(hist) != status:
|
|
|
|
|
# could happen naturally if history changed between getting status and history (race)
|
|
|
|
|
self.logger.info(f"error: status mismatch: {addr}. we'll wait a bit for status update.")
|
|
|
|
|
# The server is supposed to send a new status notification, which will trigger a new
|
|
|
|
|
# get_history. We shall wait a bit for this to happen, otherwise we disconnect.
|
|
|
|
|
async def disconnect_if_still_stale():
|
|
|
|
|
timeout = self.network.get_network_timeout_seconds(NetworkTimeout.Generic)
|
|
|
|
|
await asyncio.sleep(timeout)
|
|
|
|
|
raise SynchronizerFailure(f"timeout reached waiting for addr {addr}: history still stale")
|
|
|
|
|
self._stale_histories[addr] = await self.taskgroup.spawn(disconnect_if_still_stale)
|
2015-10-30 20:52:37 +01:00
|
|
|
else:
|
2021-02-24 12:32:54 +01:00
|
|
|
self._stale_histories.pop(addr, asyncio.Future()).cancel()
|
2015-10-30 20:52:37 +01:00
|
|
|
# Store received history
|
2022-06-01 23:03:35 +02:00
|
|
|
self.adb.receive_history_callback(addr, hist, tx_fees)
|
2015-10-30 20:52:37 +01:00
|
|
|
# Request transactions we don't have
|
2018-09-20 18:11:26 +02:00
|
|
|
await self._request_missing_txs(hist)
|
2018-07-27 12:29:04 +02:00
|
|
|
|
2015-10-30 20:52:37 +01:00
|
|
|
# Remove request; this allows up_to_date to be True
|
2019-06-29 06:03:14 +02:00
|
|
|
self.requested_histories.discard((addr, status))
|
2015-06-08 16:56:04 +09:00
|
|
|
|
2019-02-14 20:54:55 +01:00
|
|
|
async def _request_missing_txs(self, hist, *, allow_server_not_finding_tx=False):
|
2018-07-27 12:29:04 +02:00
|
|
|
# "hist" is a list of [tx_hash, tx_height] lists
|
|
|
|
|
transaction_hashes = []
|
|
|
|
|
for tx_hash, tx_height in hist:
|
|
|
|
|
if tx_hash in self.requested_tx:
|
|
|
|
|
continue
|
2022-06-01 23:03:35 +02:00
|
|
|
tx = self.adb.db.get_transaction(tx_hash)
|
2019-12-16 21:15:20 +01:00
|
|
|
if tx and not isinstance(tx, PartialTransaction):
|
|
|
|
|
continue # already have complete tx
|
2018-07-27 12:29:04 +02:00
|
|
|
transaction_hashes.append(tx_hash)
|
|
|
|
|
self.requested_tx[tx_hash] = tx_height
|
|
|
|
|
|
2018-09-20 18:11:26 +02:00
|
|
|
if not transaction_hashes: return
|
2022-02-08 12:34:49 +01:00
|
|
|
async with OldTaskGroup() as group:
|
2018-09-08 18:38:58 +02:00
|
|
|
for tx_hash in transaction_hashes:
|
2019-02-14 20:54:55 +01:00
|
|
|
await group.spawn(self._get_transaction(tx_hash, allow_server_not_finding_tx=allow_server_not_finding_tx))
|
2018-07-27 12:29:04 +02:00
|
|
|
|
2019-02-14 20:54:55 +01:00
|
|
|
async def _get_transaction(self, tx_hash, *, allow_server_not_finding_tx=False):
|
2019-05-07 17:00:48 +02:00
|
|
|
self._requests_sent += 1
|
2019-02-14 20:54:55 +01:00
|
|
|
try:
|
2021-02-12 21:31:18 +01:00
|
|
|
async with self._network_request_semaphore:
|
|
|
|
|
raw_tx = await self.interface.get_transaction(tx_hash)
|
2020-10-26 14:29:10 +01:00
|
|
|
except RPCError as e:
|
2019-02-14 20:54:55 +01:00
|
|
|
# most likely, "No such mempool or blockchain transaction"
|
|
|
|
|
if allow_server_not_finding_tx:
|
|
|
|
|
self.requested_tx.pop(tx_hash)
|
|
|
|
|
return
|
|
|
|
|
else:
|
|
|
|
|
raise
|
2019-05-07 17:00:48 +02:00
|
|
|
finally:
|
|
|
|
|
self._requests_answered += 1
|
2019-10-23 17:09:41 +02:00
|
|
|
tx = Transaction(raw_tx)
|
2018-07-02 21:38:39 +02:00
|
|
|
if tx_hash != tx.txid():
|
2019-02-15 17:22:24 +01:00
|
|
|
raise SynchronizerFailure(f"received tx does not match expected txid ({tx_hash} != {tx.txid()})")
|
2017-09-06 22:09:15 +02:00
|
|
|
tx_height = self.requested_tx.pop(tx_hash)
|
2023-10-24 16:07:30 +00:00
|
|
|
self.adb.receive_tx_callback(tx, tx_height)
|
2019-10-23 17:09:41 +02:00
|
|
|
self.logger.info(f"received tx {tx_hash} height: {tx_height} bytes: {len(raw_tx)}")
|
2018-07-27 12:29:04 +02:00
|
|
|
|
|
|
|
|
async def main(self):
|
2022-12-21 15:23:11 +00:00
|
|
|
self.adb.up_to_date_changed()
|
2018-09-08 22:44:14 +02:00
|
|
|
# request missing txns, if any
|
2022-06-01 23:03:35 +02:00
|
|
|
for addr in random_shuffled_copy(self.adb.db.get_history()):
|
|
|
|
|
history = self.adb.db.get_addr_history(addr)
|
2018-09-20 18:11:26 +02:00
|
|
|
# Old electrum servers returned ['*'] when all history for the address
|
|
|
|
|
# was pruned. This no longer happens but may remain in old wallets.
|
|
|
|
|
if history == ['*']: continue
|
2019-02-14 20:54:55 +01:00
|
|
|
await self._request_missing_txs(history, allow_server_not_finding_tx=True)
|
2018-09-08 22:44:14 +02:00
|
|
|
# add addresses to bootstrap
|
2022-06-01 23:03:35 +02:00
|
|
|
for addr in random_shuffled_copy(self.adb.get_addresses()):
|
Revert "wallet.is_up_to_date: fix flickering during sync due to race"
This reverts commit dc6c4814068809b6f6d2df9aac0fa90d827a15f2 as it introduced its own issue:
while add_address was running on one thread, synchronizer._reset could be running on another,
and by the time the "enqueue" coro would run, it would use a new add_queue and
addr would not be in requested_addrs anymore...
```
I/w | wallet.Standard_Wallet.[test_segwit_2] | starting taskgroup.
I | lnworker.LNWallet.[test_segwit_2] | starting taskgroup.
E/i | interface.[testnet.qtornado.com:51002] | Exception in run: KeyError('tb1q3wmgf8n5eettnj50pzgnfrrpdpjmwn37x7nzsc5780kk4je9v4hspym8mu')
Traceback (most recent call last):
File ".../electrum/electrum/util.py", line 1243, in wrapper
return await func(*args, **kwargs)
File ".../electrum/electrum/interface.py", line 506, in wrapper_func
return await func(self, *args, **kwargs)
File ".../electrum/electrum/interface.py", line 529, in run
await self.open_session(ssl_context)
File ".../electrum/electrum/interface.py", line 679, in open_session
async with self.taskgroup as group:
File ".../aiorpcX/aiorpcx/curio.py", line 304, in __aexit__
await self.join()
File ".../electrum/electrum/util.py", line 1339, in join
task.result()
File ".../electrum/electrum/synchronizer.py", line 80, in _run_tasks
async with taskgroup as group:
File ".../aiorpcX/aiorpcx/curio.py", line 304, in __aexit__
await self.join()
File ".../electrum/electrum/util.py", line 1339, in join
task.result()
File ".../electrum/electrum/synchronizer.py", line 127, in subscribe_to_address
self.requested_addrs.remove(addr)
KeyError: 'tb1q3wmgf8n5eettnj50pzgnfrrpdpjmwn37x7nzsc5780kk4je9v4hspym8mu'
```
2022-12-20 16:15:24 +00:00
|
|
|
await self._add_address(addr)
|
2018-09-08 22:44:14 +02:00
|
|
|
# main loop
|
2022-12-21 15:23:11 +00:00
|
|
|
self._init_done = True
|
|
|
|
|
prev_uptodate = False
|
2018-08-29 15:15:15 +02:00
|
|
|
while True:
|
2018-09-07 19:34:28 +02:00
|
|
|
await asyncio.sleep(0.1)
|
2023-01-05 14:11:13 +01:00
|
|
|
for addr in self._adding_addrs.copy(): # copy set to ensure iterator stability
|
|
|
|
|
await self._add_address(addr)
|
2022-12-21 15:23:11 +00:00
|
|
|
up_to_date = self.adb.is_up_to_date()
|
2022-04-04 19:24:29 +02:00
|
|
|
# see if status changed
|
2022-12-21 15:23:11 +00:00
|
|
|
if (up_to_date != prev_uptodate
|
2018-09-19 21:41:10 +02:00
|
|
|
or up_to_date and self._processed_some_notifications):
|
|
|
|
|
self._processed_some_notifications = False
|
2022-12-21 15:23:11 +00:00
|
|
|
self.adb.up_to_date_changed()
|
|
|
|
|
prev_uptodate = up_to_date
|
2018-10-03 17:13:46 +02:00
|
|
|
|
|
|
|
|
|
|
|
|
|
class Notifier(SynchronizerBase):
|
|
|
|
|
"""Watch addresses. Every time the status of an address changes,
|
|
|
|
|
an HTTP POST is sent to the corresponding URL.
|
|
|
|
|
"""
|
|
|
|
|
def __init__(self, network):
|
|
|
|
|
SynchronizerBase.__init__(self, network)
|
|
|
|
|
self.watched_addresses = defaultdict(list) # type: Dict[str, List[str]]
|
2020-04-24 15:34:55 +02:00
|
|
|
self._start_watching_queue = asyncio.Queue() # type: asyncio.Queue[Tuple[str, str]]
|
2018-10-03 17:13:46 +02:00
|
|
|
|
|
|
|
|
async def main(self):
|
|
|
|
|
# resend existing subscriptions if we were restarted
|
|
|
|
|
for addr in self.watched_addresses:
|
Revert "wallet.is_up_to_date: fix flickering during sync due to race"
This reverts commit dc6c4814068809b6f6d2df9aac0fa90d827a15f2 as it introduced its own issue:
while add_address was running on one thread, synchronizer._reset could be running on another,
and by the time the "enqueue" coro would run, it would use a new add_queue and
addr would not be in requested_addrs anymore...
```
I/w | wallet.Standard_Wallet.[test_segwit_2] | starting taskgroup.
I | lnworker.LNWallet.[test_segwit_2] | starting taskgroup.
E/i | interface.[testnet.qtornado.com:51002] | Exception in run: KeyError('tb1q3wmgf8n5eettnj50pzgnfrrpdpjmwn37x7nzsc5780kk4je9v4hspym8mu')
Traceback (most recent call last):
File ".../electrum/electrum/util.py", line 1243, in wrapper
return await func(*args, **kwargs)
File ".../electrum/electrum/interface.py", line 506, in wrapper_func
return await func(self, *args, **kwargs)
File ".../electrum/electrum/interface.py", line 529, in run
await self.open_session(ssl_context)
File ".../electrum/electrum/interface.py", line 679, in open_session
async with self.taskgroup as group:
File ".../aiorpcX/aiorpcx/curio.py", line 304, in __aexit__
await self.join()
File ".../electrum/electrum/util.py", line 1339, in join
task.result()
File ".../electrum/electrum/synchronizer.py", line 80, in _run_tasks
async with taskgroup as group:
File ".../aiorpcX/aiorpcx/curio.py", line 304, in __aexit__
await self.join()
File ".../electrum/electrum/util.py", line 1339, in join
task.result()
File ".../electrum/electrum/synchronizer.py", line 127, in subscribe_to_address
self.requested_addrs.remove(addr)
KeyError: 'tb1q3wmgf8n5eettnj50pzgnfrrpdpjmwn37x7nzsc5780kk4je9v4hspym8mu'
```
2022-12-20 16:15:24 +00:00
|
|
|
await self._add_address(addr)
|
2018-10-03 17:13:46 +02:00
|
|
|
# main loop
|
|
|
|
|
while True:
|
2020-04-24 15:34:55 +02:00
|
|
|
addr, url = await self._start_watching_queue.get()
|
2018-10-03 17:13:46 +02:00
|
|
|
self.watched_addresses[addr].append(url)
|
Revert "wallet.is_up_to_date: fix flickering during sync due to race"
This reverts commit dc6c4814068809b6f6d2df9aac0fa90d827a15f2 as it introduced its own issue:
while add_address was running on one thread, synchronizer._reset could be running on another,
and by the time the "enqueue" coro would run, it would use a new add_queue and
addr would not be in requested_addrs anymore...
```
I/w | wallet.Standard_Wallet.[test_segwit_2] | starting taskgroup.
I | lnworker.LNWallet.[test_segwit_2] | starting taskgroup.
E/i | interface.[testnet.qtornado.com:51002] | Exception in run: KeyError('tb1q3wmgf8n5eettnj50pzgnfrrpdpjmwn37x7nzsc5780kk4je9v4hspym8mu')
Traceback (most recent call last):
File ".../electrum/electrum/util.py", line 1243, in wrapper
return await func(*args, **kwargs)
File ".../electrum/electrum/interface.py", line 506, in wrapper_func
return await func(self, *args, **kwargs)
File ".../electrum/electrum/interface.py", line 529, in run
await self.open_session(ssl_context)
File ".../electrum/electrum/interface.py", line 679, in open_session
async with self.taskgroup as group:
File ".../aiorpcX/aiorpcx/curio.py", line 304, in __aexit__
await self.join()
File ".../electrum/electrum/util.py", line 1339, in join
task.result()
File ".../electrum/electrum/synchronizer.py", line 80, in _run_tasks
async with taskgroup as group:
File ".../aiorpcX/aiorpcx/curio.py", line 304, in __aexit__
await self.join()
File ".../electrum/electrum/util.py", line 1339, in join
task.result()
File ".../electrum/electrum/synchronizer.py", line 127, in subscribe_to_address
self.requested_addrs.remove(addr)
KeyError: 'tb1q3wmgf8n5eettnj50pzgnfrrpdpjmwn37x7nzsc5780kk4je9v4hspym8mu'
```
2022-12-20 16:15:24 +00:00
|
|
|
await self._add_address(addr)
|
2018-10-03 17:13:46 +02:00
|
|
|
|
2020-04-24 15:34:55 +02:00
|
|
|
async def start_watching_addr(self, addr: str, url: str):
|
|
|
|
|
await self._start_watching_queue.put((addr, url))
|
|
|
|
|
|
|
|
|
|
async def stop_watching_addr(self, addr: str):
|
|
|
|
|
self.watched_addresses.pop(addr, None)
|
|
|
|
|
# TODO blockchain.scripthash.unsubscribe
|
|
|
|
|
|
2018-10-03 17:13:46 +02:00
|
|
|
async def _on_address_status(self, addr, status):
|
2020-04-24 15:34:55 +02:00
|
|
|
if addr not in self.watched_addresses:
|
|
|
|
|
return
|
2019-04-26 18:52:26 +02:00
|
|
|
self.logger.info(f'new status for addr {addr}')
|
2018-10-03 17:13:46 +02:00
|
|
|
headers = {'content-type': 'application/json'}
|
|
|
|
|
data = {'address': addr, 'status': status}
|
|
|
|
|
for url in self.watched_addresses[addr]:
|
|
|
|
|
try:
|
|
|
|
|
async with make_aiohttp_session(proxy=self.network.proxy, headers=headers) as session:
|
|
|
|
|
async with session.post(url, json=data, headers=headers) as resp:
|
|
|
|
|
await resp.text()
|
|
|
|
|
except Exception as e:
|
2019-07-17 20:12:52 +02:00
|
|
|
self.logger.info(repr(e))
|
2018-10-03 17:13:46 +02:00
|
|
|
else:
|
2019-04-26 18:52:26 +02:00
|
|
|
self.logger.info(f'Got Response for {addr}')
|