2016-02-23 11:36:42 +01:00
|
|
|
# Electrum - Lightweight Bitcoin Client
|
|
|
|
|
# Copyright (c) 2011-2016 Thomas Voegtlin
|
|
|
|
|
#
|
|
|
|
|
# Permission is hereby granted, free of charge, to any person
|
|
|
|
|
# obtaining a copy of this software and associated documentation files
|
|
|
|
|
# (the "Software"), to deal in the Software without restriction,
|
|
|
|
|
# including without limitation the rights to use, copy, modify, merge,
|
|
|
|
|
# publish, distribute, sublicense, and/or sell copies of the Software,
|
|
|
|
|
# and to permit persons to whom the Software is furnished to do so,
|
|
|
|
|
# subject to the following conditions:
|
|
|
|
|
#
|
|
|
|
|
# The above copyright notice and this permission notice shall be
|
|
|
|
|
# included in all copies or substantial portions of the Software.
|
|
|
|
|
#
|
|
|
|
|
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
|
|
|
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
|
|
|
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
|
|
|
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
|
|
|
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
|
|
|
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
|
|
|
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
|
|
|
# SOFTWARE.
|
2018-03-15 17:01:54 +01:00
|
|
|
import asyncio
|
2015-01-29 11:32:58 +01:00
|
|
|
import time
|
|
|
|
|
import os
|
|
|
|
|
import random
|
2017-11-12 22:54:04 -06:00
|
|
|
import re
|
|
|
|
|
from collections import defaultdict
|
2017-05-29 09:03:39 +02:00
|
|
|
import threading
|
2015-04-02 10:12:51 +02:00
|
|
|
import json
|
2025-03-03 13:34:05 +01:00
|
|
|
from typing import (
|
|
|
|
|
NamedTuple, Optional, Sequence, List, Dict, Tuple, TYPE_CHECKING, Iterable, Set, Any, TypeVar,
|
|
|
|
|
Callable
|
|
|
|
|
)
|
2021-02-27 12:27:26 +01:00
|
|
|
import copy
|
2021-03-12 17:53:13 +01:00
|
|
|
import functools
|
2023-04-14 15:55:03 +02:00
|
|
|
from enum import IntEnum
|
2025-01-10 13:26:39 +00:00
|
|
|
from contextlib import nullcontext
|
2015-01-29 11:32:58 +01:00
|
|
|
|
2019-01-18 19:59:12 +01:00
|
|
|
import aiorpcx
|
2023-11-28 14:11:48 +01:00
|
|
|
from aiorpcx import ignore_after, NetAddress
|
2018-12-07 19:19:40 +01:00
|
|
|
from aiohttp import ClientResponse
|
2018-07-01 23:53:55 +02:00
|
|
|
|
2017-01-22 21:25:24 +03:00
|
|
|
from . import util
|
2025-04-23 16:09:31 +02:00
|
|
|
from .util import (
|
|
|
|
|
log_exceptions, ignore_exceptions, OldTaskGroup, make_aiohttp_session, send_exception_to_crash_reporter, MyEncoder,
|
|
|
|
|
NetworkRetryManager, error_text_str_to_safe_str, detect_tor_socks_proxy
|
|
|
|
|
)
|
|
|
|
|
from .bitcoin import DummyAddress, DummyAddressUsedInTxException
|
2018-03-04 22:10:59 +01:00
|
|
|
from . import constants
|
2017-01-22 21:25:24 +03:00
|
|
|
from . import blockchain
|
2020-01-01 07:21:08 +01:00
|
|
|
from . import dns_hacks
|
2020-01-09 19:22:58 +01:00
|
|
|
from .transaction import Transaction
|
2025-04-23 16:09:31 +02:00
|
|
|
from .blockchain import Blockchain
|
|
|
|
|
from .interface import (
|
|
|
|
|
Interface, PREFERRED_NETWORK_PROTOCOL, RequestTimedOut, NetworkTimeout, BUCKET_NAME_OF_ONION_SERVERS,
|
|
|
|
|
NetworkException, RequestCorrupted, ServerAddr
|
|
|
|
|
)
|
2018-08-29 18:41:51 +02:00
|
|
|
from .version import PROTOCOL_VERSION
|
2019-01-18 19:59:12 +01:00
|
|
|
from .i18n import _
|
2019-04-26 18:52:26 +02:00
|
|
|
from .logging import get_logger, Logger
|
2025-02-24 12:20:44 +01:00
|
|
|
from .fee_policy import FeeHistogram, FeeTimeEstimates, FEE_ETA_TARGETS
|
|
|
|
|
|
2019-04-26 18:52:26 +02:00
|
|
|
|
2019-08-16 22:03:20 +02:00
|
|
|
if TYPE_CHECKING:
|
2022-06-28 18:55:51 +02:00
|
|
|
from collections.abc import Coroutine
|
2019-08-16 22:03:20 +02:00
|
|
|
from .channel_db import ChannelDB
|
2021-04-26 08:43:50 +02:00
|
|
|
from .lnrouter import LNPathFinder
|
2019-09-09 19:38:35 +02:00
|
|
|
from .lnworker import LNGossip
|
2020-01-09 17:50:05 +01:00
|
|
|
from .daemon import Daemon
|
2023-05-31 16:30:00 +02:00
|
|
|
from .simple_config import SimpleConfig
|
2019-08-16 22:03:20 +02:00
|
|
|
|
2019-04-26 18:52:26 +02:00
|
|
|
|
|
|
|
|
_logger = get_logger(__name__)
|
|
|
|
|
|
2017-01-07 16:58:23 +01:00
|
|
|
|
2019-04-12 22:24:36 +02:00
|
|
|
NUM_TARGET_CONNECTED_SERVERS = 10
|
2020-04-14 19:58:22 +02:00
|
|
|
NUM_STICKY_SERVERS = 4
|
2019-04-12 22:24:36 +02:00
|
|
|
NUM_RECENT_SERVERS = 20
|
2014-07-30 10:43:15 +02:00
|
|
|
|
2022-06-28 18:55:51 +02:00
|
|
|
T = TypeVar('T')
|
|
|
|
|
|
2013-09-12 08:41:27 +02:00
|
|
|
|
2023-04-14 15:55:03 +02:00
|
|
|
class ConnectionState(IntEnum):
|
|
|
|
|
DISCONNECTED = 0
|
|
|
|
|
CONNECTING = 1
|
|
|
|
|
CONNECTED = 2
|
|
|
|
|
|
|
|
|
|
|
2018-11-02 20:14:59 +01:00
|
|
|
def parse_servers(result: Sequence[Tuple[str, str, List[str]]]) -> Dict[str, dict]:
|
2020-05-13 19:28:35 +02:00
|
|
|
"""Convert servers list (from protocol method "server.peers.subscribe") into dict format.
|
|
|
|
|
Also validate values, such as IP addresses and ports.
|
|
|
|
|
"""
|
2014-02-11 09:48:02 +01:00
|
|
|
servers = {}
|
|
|
|
|
for item in result:
|
|
|
|
|
host = item[1]
|
|
|
|
|
out = {}
|
|
|
|
|
version = None
|
|
|
|
|
pruning_level = '-'
|
|
|
|
|
if len(item) > 2:
|
|
|
|
|
for v in item[2]:
|
2018-09-20 18:31:17 +02:00
|
|
|
if re.match(r"[st]\d*", v):
|
2014-02-11 09:48:02 +01:00
|
|
|
protocol, port = v[0], v[1:]
|
2018-03-04 22:10:59 +01:00
|
|
|
if port == '': port = constants.net.DEFAULT_PORTS[protocol]
|
2020-05-13 19:28:35 +02:00
|
|
|
ServerAddr(host, port, protocol=protocol) # check if raises
|
2014-02-11 09:48:02 +01:00
|
|
|
out[protocol] = port
|
|
|
|
|
elif re.match("v(.?)+", v):
|
|
|
|
|
version = v[1:]
|
2018-09-20 18:31:17 +02:00
|
|
|
elif re.match(r"p\d*", v):
|
2014-02-11 09:48:02 +01:00
|
|
|
pruning_level = v[1:]
|
|
|
|
|
if pruning_level == '': pruning_level = '0'
|
2017-09-07 09:41:21 +02:00
|
|
|
if out:
|
2014-02-11 09:48:02 +01:00
|
|
|
out['pruning'] = pruning_level
|
2017-09-07 09:41:21 +02:00
|
|
|
out['version'] = version
|
2014-02-11 09:48:02 +01:00
|
|
|
servers[host] = out
|
|
|
|
|
return servers
|
|
|
|
|
|
2018-06-01 14:03:22 +02:00
|
|
|
|
2017-09-07 09:41:21 +02:00
|
|
|
def filter_version(servers):
|
|
|
|
|
def is_recent(version):
|
|
|
|
|
try:
|
2018-09-05 18:36:13 +02:00
|
|
|
return util.versiontuple(version) >= util.versiontuple(PROTOCOL_VERSION)
|
2017-09-07 09:41:21 +02:00
|
|
|
except Exception as e:
|
|
|
|
|
return False
|
|
|
|
|
return {k: v for k, v in servers.items() if is_recent(v.get('version'))}
|
|
|
|
|
|
|
|
|
|
|
2018-07-31 20:25:53 +02:00
|
|
|
def filter_noonion(servers):
|
|
|
|
|
return {k: v for k, v in servers.items() if not k.endswith('.onion')}
|
|
|
|
|
|
|
|
|
|
|
2020-04-16 19:56:30 +02:00
|
|
|
def filter_protocol(hostmap, *, allowed_protocols: Iterable[str] = None) -> Sequence[ServerAddr]:
|
2020-04-14 16:56:17 +02:00
|
|
|
"""Filters the hostmap for those implementing protocol."""
|
2020-04-16 19:56:30 +02:00
|
|
|
if allowed_protocols is None:
|
|
|
|
|
allowed_protocols = {PREFERRED_NETWORK_PROTOCOL}
|
2015-05-24 11:14:39 +09:00
|
|
|
eligible = []
|
|
|
|
|
for host, portmap in hostmap.items():
|
2020-04-16 19:56:30 +02:00
|
|
|
for protocol in allowed_protocols:
|
|
|
|
|
port = portmap.get(protocol)
|
|
|
|
|
if port:
|
|
|
|
|
eligible.append(ServerAddr(host, port, protocol=protocol))
|
2015-05-24 11:14:39 +09:00
|
|
|
return eligible
|
|
|
|
|
|
2018-06-01 14:03:22 +02:00
|
|
|
|
2020-04-16 19:56:30 +02:00
|
|
|
def pick_random_server(hostmap=None, *, allowed_protocols: Iterable[str],
|
2020-04-14 16:56:17 +02:00
|
|
|
exclude_set: Set[ServerAddr] = None) -> Optional[ServerAddr]:
|
2017-01-07 16:58:23 +01:00
|
|
|
if hostmap is None:
|
2018-03-04 22:10:59 +01:00
|
|
|
hostmap = constants.net.DEFAULT_SERVERS
|
2019-04-12 22:24:36 +02:00
|
|
|
if exclude_set is None:
|
|
|
|
|
exclude_set = set()
|
2020-04-16 19:56:30 +02:00
|
|
|
servers = set(filter_protocol(hostmap, allowed_protocols=allowed_protocols))
|
|
|
|
|
eligible = list(servers - exclude_set)
|
2015-05-24 11:14:39 +09:00
|
|
|
return random.choice(eligible) if eligible else None
|
2013-09-18 16:55:19 +00:00
|
|
|
|
2018-06-01 14:03:22 +02:00
|
|
|
|
2025-03-05 09:45:11 +01:00
|
|
|
def is_valid_port(ps: str):
|
|
|
|
|
try:
|
|
|
|
|
return 0 < int(ps) < 65535
|
|
|
|
|
except ValueError:
|
|
|
|
|
return False
|
2018-09-10 00:59:53 +02:00
|
|
|
|
2013-09-08 17:23:01 +02:00
|
|
|
|
2025-03-05 09:45:11 +01:00
|
|
|
def is_valid_host(ph: str):
|
|
|
|
|
try:
|
|
|
|
|
NetAddress(ph, '1')
|
|
|
|
|
except ValueError:
|
|
|
|
|
return False
|
|
|
|
|
return True
|
2015-03-12 12:56:06 +01:00
|
|
|
|
2017-01-30 12:36:56 +03:00
|
|
|
|
2025-03-03 13:34:05 +01:00
|
|
|
class ProxySettings:
|
|
|
|
|
MODES = ['socks4', 'socks5']
|
2015-03-12 12:56:06 +01:00
|
|
|
|
2025-03-05 14:31:53 +00:00
|
|
|
probe_fut = None
|
2017-01-30 12:36:56 +03:00
|
|
|
|
2025-03-03 13:34:05 +01:00
|
|
|
def __init__(self):
|
|
|
|
|
self.enabled = False
|
|
|
|
|
self.mode = 'socks5'
|
|
|
|
|
self.host = ''
|
|
|
|
|
self.port = ''
|
|
|
|
|
self.user = None
|
|
|
|
|
self.password = None
|
2023-11-28 14:11:48 +01:00
|
|
|
|
2025-03-03 13:34:05 +01:00
|
|
|
def set_defaults(self):
|
|
|
|
|
self.__init__() # call __init__ for default values
|
2023-11-28 14:11:48 +01:00
|
|
|
|
2025-03-03 13:34:05 +01:00
|
|
|
def serialize_proxy_cfgstr(self):
|
|
|
|
|
return ':'.join([self.mode, self.host, self.port])
|
2023-11-28 14:11:48 +01:00
|
|
|
|
2025-03-05 09:45:11 +01:00
|
|
|
def deserialize_proxy_cfgstr(self, s: Optional[str], user: str = None, password: str = None) -> None:
|
|
|
|
|
if s is None or (isinstance(s, str) and s.lower() == 'none'):
|
2025-03-03 13:34:05 +01:00
|
|
|
self.set_defaults()
|
|
|
|
|
self.user = user
|
|
|
|
|
self.password = password
|
|
|
|
|
return
|
2017-01-30 12:36:56 +03:00
|
|
|
|
2025-03-05 09:45:11 +01:00
|
|
|
if not isinstance(s, str):
|
|
|
|
|
raise ValueError('proxy config not a string')
|
|
|
|
|
|
2025-03-03 13:34:05 +01:00
|
|
|
args = s.split(':')
|
|
|
|
|
if args[0] in ProxySettings.MODES:
|
|
|
|
|
self.mode = args[0]
|
|
|
|
|
args = args[1:]
|
2023-11-28 14:11:48 +01:00
|
|
|
|
2025-03-03 13:34:05 +01:00
|
|
|
# detect migrate from old settings
|
|
|
|
|
if len(args) == 4 and is_valid_host(args[0]) and is_valid_port(args[1]): # host:port:user:pass,
|
|
|
|
|
self.host = args[0]
|
|
|
|
|
self.port = args[1]
|
|
|
|
|
self.user = args[2]
|
|
|
|
|
self.password = args[3]
|
|
|
|
|
else:
|
|
|
|
|
self.host = ':'.join(args[:-1])
|
|
|
|
|
self.port = args[-1]
|
|
|
|
|
self.user = user
|
|
|
|
|
self.password = password
|
|
|
|
|
|
|
|
|
|
if not is_valid_host(self.host) or not is_valid_port(self.port):
|
|
|
|
|
self.enabled = False
|
|
|
|
|
|
|
|
|
|
def to_dict(self):
|
|
|
|
|
return {
|
|
|
|
|
'enabled': self.enabled,
|
|
|
|
|
'mode': self.mode,
|
|
|
|
|
'host': self.host,
|
|
|
|
|
'port': self.port,
|
|
|
|
|
'user': self.user,
|
|
|
|
|
'password': self.password
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
|
def from_config(cls, config: 'SimpleConfig') -> 'ProxySettings':
|
|
|
|
|
proxy = ProxySettings()
|
|
|
|
|
proxy.deserialize_proxy_cfgstr(
|
|
|
|
|
config.NETWORK_PROXY, config.NETWORK_PROXY_USER, config.NETWORK_PROXY_PASSWORD
|
|
|
|
|
)
|
2025-03-05 09:45:11 +01:00
|
|
|
proxy.enabled = config.NETWORK_PROXY_ENABLED
|
2023-11-28 14:11:48 +01:00
|
|
|
return proxy
|
|
|
|
|
|
2025-03-03 13:34:05 +01:00
|
|
|
@classmethod
|
|
|
|
|
def from_dict(cls, d: dict) -> 'ProxySettings':
|
|
|
|
|
proxy = ProxySettings()
|
|
|
|
|
proxy.enabled = d.get('enabled', proxy.enabled)
|
|
|
|
|
proxy.mode = d.get('mode', proxy.mode)
|
|
|
|
|
proxy.host = d.get('host', proxy.host)
|
|
|
|
|
proxy.port = d.get('port', proxy.port)
|
|
|
|
|
proxy.user = d.get('user', proxy.user)
|
|
|
|
|
proxy.password = d.get('password', proxy.password)
|
|
|
|
|
return proxy
|
2023-11-28 14:11:48 +01:00
|
|
|
|
2025-03-03 13:34:05 +01:00
|
|
|
@classmethod
|
|
|
|
|
def probe_tor(cls, on_finished: Callable[[str | None, int | None], None]):
|
2025-03-05 14:31:53 +00:00
|
|
|
async def detect_task(finished: Callable[[str | None, int | None], None]):
|
2025-03-05 16:25:42 +00:00
|
|
|
try:
|
|
|
|
|
net_addr = await detect_tor_socks_proxy()
|
|
|
|
|
if net_addr is None:
|
|
|
|
|
finished('', -1)
|
|
|
|
|
else:
|
|
|
|
|
host = net_addr[0]
|
|
|
|
|
port = net_addr[1]
|
|
|
|
|
finished(host, port)
|
|
|
|
|
finally:
|
|
|
|
|
cls.probe_fut = None
|
2023-11-28 14:11:48 +01:00
|
|
|
|
2025-03-05 14:31:53 +00:00
|
|
|
if cls.probe_fut: # one probe at a time
|
2025-03-03 13:34:05 +01:00
|
|
|
return
|
2025-03-05 14:31:53 +00:00
|
|
|
cls.probe_fut = asyncio.run_coroutine_threadsafe(detect_task(on_finished), util.get_asyncio_loop())
|
2023-11-28 14:11:48 +01:00
|
|
|
|
2025-03-03 13:34:05 +01:00
|
|
|
def __eq__(self, other):
|
|
|
|
|
return self.enabled == other.enabled \
|
|
|
|
|
and self.mode == other.mode \
|
|
|
|
|
and self.host == other.host \
|
|
|
|
|
and self.port == other.port \
|
|
|
|
|
and self.user == other.user \
|
|
|
|
|
and self.password == other.password
|
|
|
|
|
|
|
|
|
|
def __str__(self):
|
|
|
|
|
return f'{self.enabled=} {self.mode=} {self.host=} {self.port=} {self.user=}'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class NetworkParameters(NamedTuple):
|
|
|
|
|
server: ServerAddr
|
|
|
|
|
proxy: ProxySettings
|
|
|
|
|
auto_connect: bool
|
|
|
|
|
oneserver: bool = False
|
2014-07-24 23:14:47 +02:00
|
|
|
|
2017-01-30 12:36:56 +03:00
|
|
|
|
2019-07-29 13:27:37 +02:00
|
|
|
class BestEffortRequestFailed(NetworkException): pass
|
2019-01-18 19:59:12 +01:00
|
|
|
|
|
|
|
|
|
2019-07-29 13:27:37 +02:00
|
|
|
class TxBroadcastError(NetworkException):
|
2019-01-18 19:59:12 +01:00
|
|
|
def get_message_for_gui(self):
|
|
|
|
|
raise NotImplementedError()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TxBroadcastHashMismatch(TxBroadcastError):
|
|
|
|
|
def get_message_for_gui(self):
|
|
|
|
|
return "{}\n{}\n\n{}" \
|
|
|
|
|
.format(_("The server returned an unexpected transaction ID when broadcasting the transaction."),
|
|
|
|
|
_("Consider trying to connect to a different server, or updating Electrum."),
|
|
|
|
|
str(self))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TxBroadcastServerReturnedError(TxBroadcastError):
|
|
|
|
|
def get_message_for_gui(self):
|
|
|
|
|
return "{}\n{}\n\n{}" \
|
|
|
|
|
.format(_("The server returned an error when broadcasting the transaction."),
|
|
|
|
|
_("Consider trying to connect to a different server, or updating Electrum."),
|
|
|
|
|
str(self))
|
|
|
|
|
|
|
|
|
|
|
2019-02-04 14:51:04 +01:00
|
|
|
class TxBroadcastUnknownError(TxBroadcastError):
|
|
|
|
|
def get_message_for_gui(self):
|
|
|
|
|
return "{}\n{}" \
|
|
|
|
|
.format(_("Unknown error when broadcasting the transaction."),
|
|
|
|
|
_("Consider trying to connect to a different server, or updating Electrum."))
|
|
|
|
|
|
|
|
|
|
|
2019-07-29 13:27:37 +02:00
|
|
|
class UntrustedServerReturnedError(NetworkException):
|
2019-02-12 17:02:15 +01:00
|
|
|
def __init__(self, *, original_exception):
|
|
|
|
|
self.original_exception = original_exception
|
|
|
|
|
|
2020-02-28 18:47:12 +01:00
|
|
|
def get_message_for_gui(self) -> str:
|
|
|
|
|
return str(self)
|
|
|
|
|
|
qt: do not show UntrustedServerReturnedError when sweeping
We should not show the untrusted text in the GUI...
With this change, we still log the text, but otherwise it should avoid
unintentionally showing it anywhere, as the original exception is
masked away.
related: https://github.com/spesmilo/electrum/issues/8599#issuecomment-1706775508
Example traceback (and the exc is then shown in main_window.on_error):
```
10.77 | D | n/network | got error from server for Network.listunspent_for_scripthash: <UntrustedServerReturnedError [DO NOT TRUST THIS MESSAGE] original_exception: "RPCError(0, 'heyheyhey')">
10.78 | E | gui.qt.main_window.[test_segwit_2] | on_error
Traceback (most recent call last):
File "...\electrum\electrum\network.py", line 898, in wrapper
return await func(self, *args, **kwargs)
File "...\electrum\electrum\network.py", line 1149, in listunspent_for_scripthash
return await self.interface.listunspent_for_scripthash(sh)
File "...\electrum\electrum\interface.py", line 1027, in listunspent_for_scripthash
raise aiorpcx.jsonrpc.RPCError(0, "heyheyhey")
aiorpcx.jsonrpc.RPCError: (0, 'heyheyhey')
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "...\electrum\electrum\gui\qt\util.py", line 925, in run
result = task.task()
File "...\electrum\electrum\gui\qt\main_window.py", line 2505, in <lambda>
task = lambda: self.network.run_from_another_thread(
File "...\electrum\electrum\network.py", line 383, in run_from_another_thread
return fut.result(timeout)
File "...\Python310\lib\concurrent\futures\_base.py", line 458, in result
return self.__get_result()
File "...\Python310\lib\concurrent\futures\_base.py", line 403, in __get_result
raise self._exception
File "...\electrum\electrum\wallet.py", line 151, in sweep_preparations
async with OldTaskGroup() as group:
File "...\aiorpcX\aiorpcx\curio.py", line 304, in __aexit__
await self.join()
File "...\electrum\electrum\util.py", line 1316, in join
task.result()
File "...\electrum\electrum\wallet.py", line 142, in find_utxos_for_privkey
await _append_utxos_to_inputs(
File "...\electrum\electrum\wallet.py", line 129, in _append_utxos_to_inputs
u = await network.listunspent_for_scripthash(scripthash)
File "...\electrum\electrum\network.py", line 872, in make_reliable_wrapper
async with OldTaskGroup(wait=any) as group:
File "...\aiorpcX\aiorpcx\curio.py", line 304, in __aexit__
await self.join()
File "...\electrum\electrum\util.py", line 1327, in join
self.completed.result()
File "...\electrum\electrum\network.py", line 903, in wrapper
raise wrapped_exc from e
electrum.network.UntrustedServerReturnedError: The server returned an error.
```
2023-09-05 15:47:33 +00:00
|
|
|
def get_untrusted_message(self) -> str:
|
|
|
|
|
e = self.original_exception
|
|
|
|
|
return (f"<UntrustedServerReturnedError "
|
|
|
|
|
f"[DO NOT TRUST THIS MESSAGE] original_exception: {error_text_str_to_safe_str(repr(e))}>")
|
|
|
|
|
|
2019-02-12 17:02:15 +01:00
|
|
|
def __str__(self):
|
qt: do not show UntrustedServerReturnedError when sweeping
We should not show the untrusted text in the GUI...
With this change, we still log the text, but otherwise it should avoid
unintentionally showing it anywhere, as the original exception is
masked away.
related: https://github.com/spesmilo/electrum/issues/8599#issuecomment-1706775508
Example traceback (and the exc is then shown in main_window.on_error):
```
10.77 | D | n/network | got error from server for Network.listunspent_for_scripthash: <UntrustedServerReturnedError [DO NOT TRUST THIS MESSAGE] original_exception: "RPCError(0, 'heyheyhey')">
10.78 | E | gui.qt.main_window.[test_segwit_2] | on_error
Traceback (most recent call last):
File "...\electrum\electrum\network.py", line 898, in wrapper
return await func(self, *args, **kwargs)
File "...\electrum\electrum\network.py", line 1149, in listunspent_for_scripthash
return await self.interface.listunspent_for_scripthash(sh)
File "...\electrum\electrum\interface.py", line 1027, in listunspent_for_scripthash
raise aiorpcx.jsonrpc.RPCError(0, "heyheyhey")
aiorpcx.jsonrpc.RPCError: (0, 'heyheyhey')
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "...\electrum\electrum\gui\qt\util.py", line 925, in run
result = task.task()
File "...\electrum\electrum\gui\qt\main_window.py", line 2505, in <lambda>
task = lambda: self.network.run_from_another_thread(
File "...\electrum\electrum\network.py", line 383, in run_from_another_thread
return fut.result(timeout)
File "...\Python310\lib\concurrent\futures\_base.py", line 458, in result
return self.__get_result()
File "...\Python310\lib\concurrent\futures\_base.py", line 403, in __get_result
raise self._exception
File "...\electrum\electrum\wallet.py", line 151, in sweep_preparations
async with OldTaskGroup() as group:
File "...\aiorpcX\aiorpcx\curio.py", line 304, in __aexit__
await self.join()
File "...\electrum\electrum\util.py", line 1316, in join
task.result()
File "...\electrum\electrum\wallet.py", line 142, in find_utxos_for_privkey
await _append_utxos_to_inputs(
File "...\electrum\electrum\wallet.py", line 129, in _append_utxos_to_inputs
u = await network.listunspent_for_scripthash(scripthash)
File "...\electrum\electrum\network.py", line 872, in make_reliable_wrapper
async with OldTaskGroup(wait=any) as group:
File "...\aiorpcX\aiorpcx\curio.py", line 304, in __aexit__
await self.join()
File "...\electrum\electrum\util.py", line 1327, in join
self.completed.result()
File "...\electrum\electrum\network.py", line 903, in wrapper
raise wrapped_exc from e
electrum.network.UntrustedServerReturnedError: The server returned an error.
```
2023-09-05 15:47:33 +00:00
|
|
|
# We should not show the untrusted text from self.original_exception,
|
|
|
|
|
# to avoid accidentally showing it in the GUI.
|
2019-02-12 17:02:15 +01:00
|
|
|
return _("The server returned an error.")
|
|
|
|
|
|
|
|
|
|
def __repr__(self):
|
qt: do not show UntrustedServerReturnedError when sweeping
We should not show the untrusted text in the GUI...
With this change, we still log the text, but otherwise it should avoid
unintentionally showing it anywhere, as the original exception is
masked away.
related: https://github.com/spesmilo/electrum/issues/8599#issuecomment-1706775508
Example traceback (and the exc is then shown in main_window.on_error):
```
10.77 | D | n/network | got error from server for Network.listunspent_for_scripthash: <UntrustedServerReturnedError [DO NOT TRUST THIS MESSAGE] original_exception: "RPCError(0, 'heyheyhey')">
10.78 | E | gui.qt.main_window.[test_segwit_2] | on_error
Traceback (most recent call last):
File "...\electrum\electrum\network.py", line 898, in wrapper
return await func(self, *args, **kwargs)
File "...\electrum\electrum\network.py", line 1149, in listunspent_for_scripthash
return await self.interface.listunspent_for_scripthash(sh)
File "...\electrum\electrum\interface.py", line 1027, in listunspent_for_scripthash
raise aiorpcx.jsonrpc.RPCError(0, "heyheyhey")
aiorpcx.jsonrpc.RPCError: (0, 'heyheyhey')
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "...\electrum\electrum\gui\qt\util.py", line 925, in run
result = task.task()
File "...\electrum\electrum\gui\qt\main_window.py", line 2505, in <lambda>
task = lambda: self.network.run_from_another_thread(
File "...\electrum\electrum\network.py", line 383, in run_from_another_thread
return fut.result(timeout)
File "...\Python310\lib\concurrent\futures\_base.py", line 458, in result
return self.__get_result()
File "...\Python310\lib\concurrent\futures\_base.py", line 403, in __get_result
raise self._exception
File "...\electrum\electrum\wallet.py", line 151, in sweep_preparations
async with OldTaskGroup() as group:
File "...\aiorpcX\aiorpcx\curio.py", line 304, in __aexit__
await self.join()
File "...\electrum\electrum\util.py", line 1316, in join
task.result()
File "...\electrum\electrum\wallet.py", line 142, in find_utxos_for_privkey
await _append_utxos_to_inputs(
File "...\electrum\electrum\wallet.py", line 129, in _append_utxos_to_inputs
u = await network.listunspent_for_scripthash(scripthash)
File "...\electrum\electrum\network.py", line 872, in make_reliable_wrapper
async with OldTaskGroup(wait=any) as group:
File "...\aiorpcX\aiorpcx\curio.py", line 304, in __aexit__
await self.join()
File "...\electrum\electrum\util.py", line 1327, in join
self.completed.result()
File "...\electrum\electrum\network.py", line 903, in wrapper
raise wrapped_exc from e
electrum.network.UntrustedServerReturnedError: The server returned an error.
```
2023-09-05 15:47:33 +00:00
|
|
|
# We should not show the untrusted text from self.original_exception,
|
|
|
|
|
# to avoid accidentally showing it in the GUI.
|
|
|
|
|
return f"<UntrustedServerReturnedError {str(self)!r}>"
|
2019-02-12 17:02:15 +01:00
|
|
|
|
|
|
|
|
|
2019-09-10 18:01:10 +02:00
|
|
|
_INSTANCE = None
|
2018-09-07 11:34:56 +02:00
|
|
|
|
2018-09-09 23:08:44 +02:00
|
|
|
|
2020-04-15 17:17:11 +02:00
|
|
|
class Network(Logger, NetworkRetryManager[ServerAddr]):
|
2015-06-03 00:03:33 +09:00
|
|
|
"""The Network class manages a set of connections to remote electrum
|
|
|
|
|
servers, each connected socket is handled by an Interface() object.
|
2015-05-22 10:36:45 +09:00
|
|
|
"""
|
2013-09-08 17:23:01 +02:00
|
|
|
|
2019-05-07 21:07:18 +02:00
|
|
|
LOGGING_SHORTCUT = 'n'
|
|
|
|
|
|
2022-02-08 12:34:49 +01:00
|
|
|
taskgroup: Optional[OldTaskGroup]
|
2020-04-14 16:56:17 +02:00
|
|
|
interface: Optional[Interface]
|
|
|
|
|
interfaces: Dict[ServerAddr, Interface]
|
2021-01-21 03:58:42 +01:00
|
|
|
_connecting_ifaces: Set[ServerAddr]
|
|
|
|
|
_closing_ifaces: Set[ServerAddr]
|
2020-04-14 16:56:17 +02:00
|
|
|
default_server: ServerAddr
|
2020-04-14 18:28:41 +02:00
|
|
|
_recent_servers: List[ServerAddr]
|
2020-04-14 16:56:17 +02:00
|
|
|
|
2021-03-09 17:52:36 +01:00
|
|
|
channel_db: Optional['ChannelDB'] = None
|
|
|
|
|
lngossip: Optional['LNGossip'] = None
|
2021-04-26 08:43:50 +02:00
|
|
|
path_finder: Optional['LNPathFinder'] = None
|
2021-03-09 17:52:36 +01:00
|
|
|
|
2023-05-31 16:30:00 +02:00
|
|
|
def __init__(self, config: 'SimpleConfig', *, daemon: 'Daemon' = None):
|
2019-09-10 18:01:10 +02:00
|
|
|
global _INSTANCE
|
|
|
|
|
assert _INSTANCE is None, "Network is a singleton!"
|
|
|
|
|
_INSTANCE = self
|
2018-11-01 16:30:03 +01:00
|
|
|
|
2019-04-26 18:52:26 +02:00
|
|
|
Logger.__init__(self)
|
2020-04-15 17:17:11 +02:00
|
|
|
NetworkRetryManager.__init__(
|
|
|
|
|
self,
|
|
|
|
|
max_retry_delay_normal=600,
|
|
|
|
|
init_retry_delay_normal=15,
|
|
|
|
|
max_retry_delay_urgent=10,
|
|
|
|
|
init_retry_delay_urgent=1,
|
|
|
|
|
)
|
2019-04-26 18:52:26 +02:00
|
|
|
|
asyncio: stop using get_event_loop(). introduce ~singleton loop.
asyncio.get_event_loop() became deprecated in python3.10. (see https://github.com/python/cpython/issues/83710)
```
.../electrum/electrum/daemon.py:470: DeprecationWarning: There is no current event loop
self.asyncio_loop = asyncio.get_event_loop()
.../electrum/electrum/network.py:276: DeprecationWarning: There is no current event loop
self.asyncio_loop = asyncio.get_event_loop()
```
Also, according to that thread, "set_event_loop() [... is] not deprecated by oversight".
So, we stop using get_event_loop() and set_event_loop() in our own code.
Note that libraries we use (such as the stdlib for python <3.10), might call get_event_loop,
which then relies on us having called set_event_loop e.g. for the GUI thread. To work around
this, a custom event loop policy providing a get_event_loop implementation is used.
Previously, we have been using a single asyncio event loop, created with
util.create_and_start_event_loop, and code in many places got a reference to this loop
using asyncio.get_event_loop().
Now, we still use a single asyncio event loop, but it is now stored as a global in
util._asyncio_event_loop (access with util.get_asyncio_loop()).
I believe these changes also fix https://github.com/spesmilo/electrum/issues/5376
2022-04-29 18:24:49 +02:00
|
|
|
self.asyncio_loop = util.get_asyncio_loop()
|
2018-11-02 20:14:59 +01:00
|
|
|
assert self.asyncio_loop.is_running(), "event loop not running"
|
2018-11-01 16:30:03 +01:00
|
|
|
|
2019-09-10 16:38:10 +02:00
|
|
|
self.config = config
|
2020-01-09 17:50:05 +01:00
|
|
|
self.daemon = daemon
|
|
|
|
|
|
2018-11-20 18:57:16 +01:00
|
|
|
blockchain.read_blockchains(self.config)
|
2020-02-27 20:45:29 +01:00
|
|
|
blockchain.init_headers_file_for_best_chain()
|
2019-04-26 18:52:26 +02:00
|
|
|
self.logger.info(f"blockchains {list(map(lambda b: b.forkpoint, blockchain.blockchains.values()))}")
|
2023-05-24 17:41:44 +00:00
|
|
|
self._blockchain_preferred_block = self.config.BLOCKCHAIN_PREFERRED_BLOCK # type: Dict[str, Any]
|
2020-06-21 11:31:54 +02:00
|
|
|
if self._blockchain_preferred_block is None:
|
|
|
|
|
self._set_preferred_chain(None)
|
2018-11-20 18:57:16 +01:00
|
|
|
self._blockchain = blockchain.get_best_chain()
|
2020-04-16 19:56:30 +02:00
|
|
|
|
|
|
|
|
self._allowed_protocols = {PREFERRED_NETWORK_PROTOCOL}
|
|
|
|
|
|
2025-03-03 13:34:05 +01:00
|
|
|
self.proxy = ProxySettings()
|
2025-03-05 16:25:42 +00:00
|
|
|
self.is_proxy_tor = None # type: Optional[bool] # tri-state. None means unknown.
|
2023-03-29 21:48:46 +00:00
|
|
|
self._init_parameters_from_config()
|
2018-06-21 21:06:56 +02:00
|
|
|
|
2020-04-14 16:56:17 +02:00
|
|
|
self.taskgroup = None
|
2018-09-25 16:38:26 +02:00
|
|
|
|
|
|
|
|
# locks
|
|
|
|
|
self.restart_lock = asyncio.Lock()
|
2018-08-29 18:41:51 +02:00
|
|
|
self.bhi_lock = asyncio.Lock()
|
2018-06-21 21:06:56 +02:00
|
|
|
self.recent_servers_lock = threading.RLock() # <- re-entrant
|
2018-09-25 16:38:26 +02:00
|
|
|
self.interfaces_lock = threading.Lock() # for mutating/iterating self.interfaces
|
2018-06-21 21:06:56 +02:00
|
|
|
|
2018-09-13 16:06:41 +02:00
|
|
|
self.server_peers = {} # returned by interface (servers that the main interface knows about)
|
2020-04-14 18:28:41 +02:00
|
|
|
self._recent_servers = self._read_recent_servers() # note: needs self.recent_servers_lock
|
2013-10-04 14:30:23 +02:00
|
|
|
|
2013-09-12 08:41:27 +02:00
|
|
|
self.banner = ''
|
2016-02-15 16:17:07 +01:00
|
|
|
self.donation_address = ''
|
2018-10-25 23:08:59 +02:00
|
|
|
self.relay_fee = None # type: Optional[int]
|
2013-09-08 17:23:01 +02:00
|
|
|
|
2018-06-01 14:03:22 +02:00
|
|
|
dir_path = os.path.join(self.config.path, 'certs')
|
2018-05-28 14:22:54 +02:00
|
|
|
util.make_dir(dir_path)
|
2013-10-01 11:24:55 +02:00
|
|
|
|
2018-10-11 19:42:38 +02:00
|
|
|
# the main server we are currently communicating with
|
2020-04-14 16:56:17 +02:00
|
|
|
self.interface = None
|
2019-12-01 23:24:43 +01:00
|
|
|
self.default_server_changed_event = asyncio.Event()
|
2021-01-21 03:58:42 +01:00
|
|
|
# Set of servers we have an ongoing connection with.
|
|
|
|
|
# For any ServerAddr, at most one corresponding Interface object
|
|
|
|
|
# can exist at any given time. Depending on the state of that Interface,
|
|
|
|
|
# the ServerAddr can be found in one of the following sets.
|
|
|
|
|
# Note: during a transition, the ServerAddr can appear in two sets momentarily.
|
|
|
|
|
self._connecting_ifaces = set()
|
|
|
|
|
self.interfaces = {} # these are the ifaces in "initialised and usable" state
|
|
|
|
|
self._closing_ifaces = set()
|
|
|
|
|
|
2019-02-12 20:23:43 +01:00
|
|
|
# Dump network messages (all interfaces). Set at runtime from the console.
|
|
|
|
|
self.debug = False
|
|
|
|
|
|
2023-04-14 15:55:03 +02:00
|
|
|
self._set_status(ConnectionState.DISCONNECTED)
|
2020-10-14 19:30:10 +02:00
|
|
|
self._has_ever_managed_to_connect_to_server = False
|
2023-03-29 22:09:46 +00:00
|
|
|
self._was_started = False
|
2018-11-02 20:14:59 +01:00
|
|
|
|
2025-02-24 12:20:44 +01:00
|
|
|
self.mempool_fees = FeeHistogram()
|
|
|
|
|
self.fee_estimates = FeeTimeEstimates()
|
|
|
|
|
self.last_time_fee_estimates_requested = 0 # zero ensures immediate fees
|
|
|
|
|
|
2020-10-14 19:30:10 +02:00
|
|
|
def has_internet_connection(self) -> bool:
|
|
|
|
|
"""Our guess whether the device has Internet-connectivity."""
|
|
|
|
|
return self._has_ever_managed_to_connect_to_server
|
|
|
|
|
|
2020-10-08 06:36:02 +02:00
|
|
|
def has_channel_db(self):
|
2020-03-06 11:23:26 +01:00
|
|
|
return self.channel_db is not None
|
|
|
|
|
|
2020-11-11 11:03:31 +01:00
|
|
|
def start_gossip(self):
|
|
|
|
|
from . import lnrouter
|
|
|
|
|
from . import channel_db
|
|
|
|
|
from . import lnworker
|
2023-05-24 17:41:44 +00:00
|
|
|
if not self.config.LIGHTNING_USE_GOSSIP:
|
2020-11-11 11:03:31 +01:00
|
|
|
return
|
|
|
|
|
if self.lngossip is None:
|
2019-06-22 09:47:08 +02:00
|
|
|
self.channel_db = channel_db.ChannelDB(self)
|
2019-05-03 20:52:11 +02:00
|
|
|
self.path_finder = lnrouter.LNPathFinder(self.channel_db)
|
2020-11-20 08:35:57 +01:00
|
|
|
self.channel_db.load_data()
|
2023-11-13 14:54:40 +01:00
|
|
|
self.lngossip = lnworker.LNGossip(self.config)
|
2020-10-22 18:17:13 +02:00
|
|
|
self.lngossip.start_network(self)
|
2020-10-02 07:39:36 +02:00
|
|
|
|
2021-03-09 17:52:36 +01:00
|
|
|
async def stop_gossip(self, *, full_shutdown: bool = False):
|
2020-11-11 11:03:31 +01:00
|
|
|
if self.lngossip:
|
2021-03-09 17:52:36 +01:00
|
|
|
await self.lngossip.stop()
|
2020-11-11 11:03:31 +01:00
|
|
|
self.lngossip = None
|
|
|
|
|
self.channel_db.stop()
|
2021-03-09 17:52:36 +01:00
|
|
|
if full_shutdown:
|
|
|
|
|
await self.channel_db.stopped_event.wait()
|
2020-11-11 11:03:31 +01:00
|
|
|
self.channel_db = None
|
2021-04-26 17:06:52 +02:00
|
|
|
self.path_finder = None
|
2018-06-26 12:10:03 +02:00
|
|
|
|
asyncio: stop using get_event_loop(). introduce ~singleton loop.
asyncio.get_event_loop() became deprecated in python3.10. (see https://github.com/python/cpython/issues/83710)
```
.../electrum/electrum/daemon.py:470: DeprecationWarning: There is no current event loop
self.asyncio_loop = asyncio.get_event_loop()
.../electrum/electrum/network.py:276: DeprecationWarning: There is no current event loop
self.asyncio_loop = asyncio.get_event_loop()
```
Also, according to that thread, "set_event_loop() [... is] not deprecated by oversight".
So, we stop using get_event_loop() and set_event_loop() in our own code.
Note that libraries we use (such as the stdlib for python <3.10), might call get_event_loop,
which then relies on us having called set_event_loop e.g. for the GUI thread. To work around
this, a custom event loop policy providing a get_event_loop implementation is used.
Previously, we have been using a single asyncio event loop, created with
util.create_and_start_event_loop, and code in many places got a reference to this loop
using asyncio.get_event_loop().
Now, we still use a single asyncio event loop, but it is now stored as a global in
util._asyncio_event_loop (access with util.get_asyncio_loop()).
I believe these changes also fix https://github.com/spesmilo/electrum/issues/5376
2022-04-29 18:24:49 +02:00
|
|
|
@classmethod
|
2022-06-28 18:55:51 +02:00
|
|
|
def run_from_another_thread(cls, coro: 'Coroutine[Any, Any, T]', *, timeout=None) -> T:
|
asyncio: stop using get_event_loop(). introduce ~singleton loop.
asyncio.get_event_loop() became deprecated in python3.10. (see https://github.com/python/cpython/issues/83710)
```
.../electrum/electrum/daemon.py:470: DeprecationWarning: There is no current event loop
self.asyncio_loop = asyncio.get_event_loop()
.../electrum/electrum/network.py:276: DeprecationWarning: There is no current event loop
self.asyncio_loop = asyncio.get_event_loop()
```
Also, according to that thread, "set_event_loop() [... is] not deprecated by oversight".
So, we stop using get_event_loop() and set_event_loop() in our own code.
Note that libraries we use (such as the stdlib for python <3.10), might call get_event_loop,
which then relies on us having called set_event_loop e.g. for the GUI thread. To work around
this, a custom event loop policy providing a get_event_loop implementation is used.
Previously, we have been using a single asyncio event loop, created with
util.create_and_start_event_loop, and code in many places got a reference to this loop
using asyncio.get_event_loop().
Now, we still use a single asyncio event loop, but it is now stored as a global in
util._asyncio_event_loop (access with util.get_asyncio_loop()).
I believe these changes also fix https://github.com/spesmilo/electrum/issues/5376
2022-04-29 18:24:49 +02:00
|
|
|
loop = util.get_asyncio_loop()
|
|
|
|
|
assert util.get_running_loop() != loop, 'must not be called from asyncio thread'
|
|
|
|
|
fut = asyncio.run_coroutine_threadsafe(coro, loop)
|
2019-07-23 20:14:59 +02:00
|
|
|
return fut.result(timeout)
|
2015-03-12 12:56:06 +01:00
|
|
|
|
2018-09-07 11:34:56 +02:00
|
|
|
@staticmethod
|
2019-02-27 21:48:33 +01:00
|
|
|
def get_instance() -> Optional["Network"]:
|
2023-12-01 17:37:58 +00:00
|
|
|
"""Return the global singleton network instance.
|
|
|
|
|
Note that this can return None! If we are run with the --offline flag, there is no network.
|
|
|
|
|
"""
|
2019-09-10 18:01:10 +02:00
|
|
|
return _INSTANCE
|
2018-09-07 11:34:56 +02:00
|
|
|
|
2018-06-21 21:06:56 +02:00
|
|
|
def with_recent_servers_lock(func):
|
|
|
|
|
def func_wrapper(self, *args, **kwargs):
|
|
|
|
|
with self.recent_servers_lock:
|
|
|
|
|
return func(self, *args, **kwargs)
|
|
|
|
|
return func_wrapper
|
|
|
|
|
|
2020-04-14 16:56:17 +02:00
|
|
|
def _read_recent_servers(self) -> List[ServerAddr]:
|
2015-04-02 10:12:51 +02:00
|
|
|
if not self.config.path:
|
|
|
|
|
return []
|
|
|
|
|
path = os.path.join(self.config.path, "recent_servers")
|
|
|
|
|
try:
|
2018-03-23 21:47:51 +01:00
|
|
|
with open(path, "r", encoding='utf-8') as f:
|
2015-04-02 10:12:51 +02:00
|
|
|
data = f.read()
|
2020-04-14 16:56:17 +02:00
|
|
|
servers_list = json.loads(data)
|
|
|
|
|
return [ServerAddr.from_str(s) for s in servers_list]
|
2023-04-23 01:33:12 +00:00
|
|
|
except Exception:
|
2015-04-02 10:12:51 +02:00
|
|
|
return []
|
|
|
|
|
|
2018-06-21 21:06:56 +02:00
|
|
|
@with_recent_servers_lock
|
2018-09-25 16:38:26 +02:00
|
|
|
def _save_recent_servers(self):
|
2015-04-02 10:12:51 +02:00
|
|
|
if not self.config.path:
|
|
|
|
|
return
|
|
|
|
|
path = os.path.join(self.config.path, "recent_servers")
|
2020-04-14 18:28:41 +02:00
|
|
|
s = json.dumps(self._recent_servers, indent=4, sort_keys=True, cls=MyEncoder)
|
2015-04-02 10:12:51 +02:00
|
|
|
try:
|
2018-03-23 21:47:51 +01:00
|
|
|
with open(path, "w", encoding='utf-8') as f:
|
2015-04-02 10:12:51 +02:00
|
|
|
f.write(s)
|
2023-04-23 01:33:12 +00:00
|
|
|
except Exception:
|
2015-04-02 10:12:51 +02:00
|
|
|
pass
|
|
|
|
|
|
2020-06-21 08:20:56 +02:00
|
|
|
async def _server_is_lagging(self) -> bool:
|
2015-05-25 17:45:01 +09:00
|
|
|
sh = self.get_server_height()
|
|
|
|
|
if not sh:
|
2019-04-26 18:52:26 +02:00
|
|
|
self.logger.info('no height for main interface')
|
2016-02-15 15:58:08 +01:00
|
|
|
return True
|
2015-05-25 17:45:01 +09:00
|
|
|
lh = self.get_local_height()
|
|
|
|
|
result = (lh - sh) > 1
|
|
|
|
|
if result:
|
2019-04-26 18:52:26 +02:00
|
|
|
self.logger.info(f'{self.default_server} is lagging ({sh} vs {lh})')
|
2015-05-25 17:45:01 +09:00
|
|
|
return result
|
2014-07-25 16:32:19 +02:00
|
|
|
|
2018-09-25 16:38:26 +02:00
|
|
|
def _set_status(self, status):
|
2014-07-24 23:14:47 +02:00
|
|
|
self.connection_status = status
|
2023-04-10 21:23:50 +02:00
|
|
|
util.trigger_callback('status')
|
2014-07-24 23:14:47 +02:00
|
|
|
|
2013-10-04 13:51:46 +02:00
|
|
|
def is_connected(self):
|
2018-09-19 16:35:30 +02:00
|
|
|
interface = self.interface
|
network: test if interface is alive before iface.taskgroup.spawn
closes https://github.com/spesmilo/electrum/issues/7677
```
E/n | network | taskgroup died.
Traceback (most recent call last):
File "/opt/electrum/electrum/network.py", line 1204, in main
[await group.spawn(job) for job in self._jobs]
File "/home/voegtlin/.local/lib/python3.8/site-packages/aiorpcx/curio.py", line 297, in __aexit__
await self.join()
File "/opt/electrum/electrum/util.py", line 1255, in join
task.result()
File "/opt/electrum/electrum/network.py", line 1277, in _maintain_sessions
await maintain_main_interface()
File "/opt/electrum/electrum/network.py", line 1268, in maintain_main_interface
await self._ensure_there_is_a_main_interface()
File "/opt/electrum/electrum/network.py", line 1245, in _ensure_there_is_a_main_interface
await self._switch_to_random_interface()
File "/opt/electrum/electrum/network.py", line 648, in _switch_to_random_interface
await self.switch_to_interface(random.choice(servers))
File "/opt/electrum/electrum/network.py", line 714, in switch_to_interface
await i.taskgroup.spawn(self._request_server_info(i))
File "/home/voegtlin/.local/lib/python3.8/site-packages/aiorpcx/curio.py", line 204, in spawn
self._add_task(task)
File "/home/voegtlin/.local/lib/python3.8/site-packages/aiorpcx/curio.py", line 150, in _add_task
raise RuntimeError('task group terminated')
RuntimeError: task group terminated
```
I believe the "suppress spurious cancellations" block was added as SilentTaskGroup raised
CancelledError instead of RuntimeError for this scenario.
2022-02-21 20:09:26 +01:00
|
|
|
return interface is not None and interface.is_connected_and_ready()
|
2015-06-03 00:03:33 +09:00
|
|
|
|
2015-08-30 21:18:10 +09:00
|
|
|
def is_connecting(self):
|
2023-04-14 15:55:03 +02:00
|
|
|
return self.connection_status == ConnectionState.CONNECTING
|
|
|
|
|
|
|
|
|
|
def get_connection_status_for_GUI(self):
|
|
|
|
|
ConnectionStates = {
|
|
|
|
|
ConnectionState.DISCONNECTED: _('Disconnected'),
|
|
|
|
|
ConnectionState.CONNECTING: _('Connecting'),
|
|
|
|
|
ConnectionState.CONNECTED: _('Connected'),
|
|
|
|
|
}
|
|
|
|
|
return ConnectionStates[self.connection_status]
|
2015-08-30 21:18:10 +09:00
|
|
|
|
2020-10-16 19:30:42 +02:00
|
|
|
async def _request_server_info(self, interface: 'Interface'):
|
2018-08-31 16:46:49 +02:00
|
|
|
await interface.ready
|
|
|
|
|
session = interface.session
|
2018-09-12 21:18:08 +02:00
|
|
|
|
|
|
|
|
async def get_banner():
|
2020-10-16 19:30:42 +02:00
|
|
|
self.banner = await interface.get_server_banner()
|
2023-04-09 11:29:14 +02:00
|
|
|
util.trigger_callback('banner', self.banner)
|
2025-04-23 16:09:31 +02:00
|
|
|
|
2018-09-12 21:18:08 +02:00
|
|
|
async def get_donation_address():
|
2020-10-16 19:30:42 +02:00
|
|
|
self.donation_address = await interface.get_donation_address()
|
2025-04-23 16:09:31 +02:00
|
|
|
|
2018-09-12 21:18:08 +02:00
|
|
|
async def get_server_peers():
|
2019-04-12 22:24:36 +02:00
|
|
|
server_peers = await session.send_request('server.peers.subscribe')
|
|
|
|
|
random.shuffle(server_peers)
|
|
|
|
|
max_accepted_peers = len(constants.net.DEFAULT_SERVERS) + NUM_RECENT_SERVERS
|
|
|
|
|
server_peers = server_peers[:max_accepted_peers]
|
2020-05-13 19:28:35 +02:00
|
|
|
# note that 'parse_servers' also validates the data (which is untrusted input!)
|
2019-04-12 22:24:36 +02:00
|
|
|
self.server_peers = parse_servers(server_peers)
|
2023-04-09 11:29:14 +02:00
|
|
|
util.trigger_callback('servers', self.get_servers())
|
2025-04-23 16:09:31 +02:00
|
|
|
|
2018-09-12 21:18:08 +02:00
|
|
|
async def get_relay_fee():
|
2020-10-16 19:30:42 +02:00
|
|
|
self.relay_fee = await interface.get_relay_fee()
|
2018-09-12 21:18:08 +02:00
|
|
|
|
2022-02-08 12:34:49 +01:00
|
|
|
async with OldTaskGroup() as group:
|
2018-09-12 21:18:08 +02:00
|
|
|
await group.spawn(get_banner)
|
|
|
|
|
await group.spawn(get_donation_address)
|
|
|
|
|
await group.spawn(get_server_peers)
|
|
|
|
|
await group.spawn(get_relay_fee)
|
2018-09-25 16:38:26 +02:00
|
|
|
await group.spawn(self._request_fee_estimates(interface))
|
2018-08-31 16:46:49 +02:00
|
|
|
|
2018-09-25 16:38:26 +02:00
|
|
|
async def _request_fee_estimates(self, interface):
|
2025-02-24 12:20:44 +01:00
|
|
|
self.requested_fee_estimates()
|
2020-10-16 19:30:42 +02:00
|
|
|
histogram = await interface.get_fee_histogram()
|
2025-02-24 12:20:44 +01:00
|
|
|
self.mempool_fees.set_data(histogram)
|
2023-08-14 09:58:17 +02:00
|
|
|
self.logger.info(f'fee_histogram {len(histogram)}')
|
2025-02-24 12:20:44 +01:00
|
|
|
util.trigger_callback('fee_histogram', self.mempool_fees)
|
|
|
|
|
|
|
|
|
|
def is_fee_estimates_update_required(self):
|
|
|
|
|
"""Checks time since last requested and updated fee estimates.
|
|
|
|
|
Returns True if an update should be requested.
|
|
|
|
|
"""
|
|
|
|
|
now = time.time()
|
|
|
|
|
return now - self.last_time_fee_estimates_requested > 60
|
|
|
|
|
|
|
|
|
|
def has_fee_etas(self):
|
|
|
|
|
return self.fee_estimates.has_data()
|
|
|
|
|
|
|
|
|
|
def has_fee_mempool(self) -> bool:
|
|
|
|
|
return self.mempool_fees.has_data()
|
|
|
|
|
|
|
|
|
|
def requested_fee_estimates(self):
|
|
|
|
|
self.last_time_fee_estimates_requested = time.time()
|
2013-09-08 17:23:01 +02:00
|
|
|
|
2018-09-10 00:59:53 +02:00
|
|
|
def get_parameters(self) -> NetworkParameters:
|
2020-04-16 20:30:53 +02:00
|
|
|
return NetworkParameters(server=self.default_server,
|
2018-11-03 17:11:08 +01:00
|
|
|
proxy=self.proxy,
|
2018-11-03 17:19:51 +01:00
|
|
|
auto_connect=self.auto_connect,
|
|
|
|
|
oneserver=self.oneserver)
|
2014-07-25 09:11:56 +02:00
|
|
|
|
2023-03-29 21:48:46 +00:00
|
|
|
def _init_parameters_from_config(self) -> None:
|
2023-11-30 12:12:45 +01:00
|
|
|
dns_hacks.configure_dns_resolver()
|
2023-05-24 17:41:44 +00:00
|
|
|
self.auto_connect = self.config.NETWORK_AUTO_CONNECT
|
2023-03-29 21:48:46 +00:00
|
|
|
self._set_default_server()
|
2025-03-03 13:34:05 +01:00
|
|
|
self._set_proxy(ProxySettings.from_config(self.config))
|
2023-03-29 21:48:46 +00:00
|
|
|
self._maybe_set_oneserver()
|
|
|
|
|
|
2016-02-15 16:17:07 +01:00
|
|
|
def get_donation_address(self):
|
|
|
|
|
if self.is_connected():
|
|
|
|
|
return self.donation_address
|
|
|
|
|
|
2020-04-14 16:56:17 +02:00
|
|
|
def get_interfaces(self) -> List[ServerAddr]:
|
2018-09-25 16:38:26 +02:00
|
|
|
"""The list of servers for the connected interfaces."""
|
|
|
|
|
with self.interfaces_lock:
|
|
|
|
|
return list(self.interfaces)
|
2013-09-08 17:23:01 +02:00
|
|
|
|
2023-03-17 10:15:07 +01:00
|
|
|
def get_status(self):
|
|
|
|
|
n = len(self.get_interfaces())
|
|
|
|
|
return _("Connected to {0} nodes.").format(n) if n > 1 else _("Connected to {0} node.").format(n) if n == 1 else _("Not connected")
|
|
|
|
|
|
2020-03-12 14:38:35 +01:00
|
|
|
def get_fee_estimates(self):
|
|
|
|
|
from statistics import median
|
|
|
|
|
if self.auto_connect:
|
|
|
|
|
with self.interfaces_lock:
|
|
|
|
|
out = {}
|
2025-02-24 12:20:44 +01:00
|
|
|
for n in FEE_ETA_TARGETS[0:-1]:
|
2020-03-12 14:38:35 +01:00
|
|
|
try:
|
|
|
|
|
out[n] = int(median(filter(None, [i.fee_estimates_eta.get(n) for i in self.interfaces.values()])))
|
2023-04-23 01:33:12 +00:00
|
|
|
except Exception:
|
2020-03-12 14:38:35 +01:00
|
|
|
continue
|
|
|
|
|
return out
|
|
|
|
|
else:
|
2020-03-13 18:07:05 +01:00
|
|
|
if not self.interface:
|
|
|
|
|
return {}
|
2020-03-12 14:38:35 +01:00
|
|
|
return self.interface.fee_estimates_eta
|
|
|
|
|
|
2021-04-15 19:00:46 +02:00
|
|
|
def update_fee_estimates(self, *, fee_est: Dict[int, int] = None):
|
2021-03-25 19:33:40 +01:00
|
|
|
if fee_est is None:
|
|
|
|
|
fee_est = self.get_fee_estimates()
|
|
|
|
|
for nblock_target, fee in fee_est.items():
|
2025-02-24 12:20:44 +01:00
|
|
|
self.fee_estimates.set_data(nblock_target, fee)
|
2021-03-25 19:33:40 +01:00
|
|
|
if not hasattr(self, "_prev_fee_est") or self._prev_fee_est != fee_est:
|
|
|
|
|
self._prev_fee_est = copy.copy(fee_est)
|
|
|
|
|
self.logger.info(f'fee_estimates {fee_est}')
|
2025-02-24 12:20:44 +01:00
|
|
|
util.trigger_callback('fee', self.fee_estimates)
|
|
|
|
|
|
2020-03-12 14:38:35 +01:00
|
|
|
|
2018-06-21 21:06:56 +02:00
|
|
|
@with_recent_servers_lock
|
2013-09-12 08:41:27 +02:00
|
|
|
def get_servers(self):
|
2019-06-27 19:10:25 +02:00
|
|
|
# note: order of sources when adding servers here is crucial!
|
|
|
|
|
# don't let "server_peers" overwrite anything,
|
|
|
|
|
# otherwise main server can eclipse the client
|
|
|
|
|
out = dict()
|
|
|
|
|
# add servers received from main interface
|
|
|
|
|
server_peers = self.server_peers
|
|
|
|
|
if server_peers:
|
|
|
|
|
out.update(filter_version(server_peers.copy()))
|
|
|
|
|
# hardcoded servers
|
|
|
|
|
out.update(constants.net.DEFAULT_SERVERS)
|
2018-09-13 16:06:41 +02:00
|
|
|
# add recent servers
|
2020-04-14 18:28:41 +02:00
|
|
|
for server in self._recent_servers:
|
2020-04-14 16:56:17 +02:00
|
|
|
port = str(server.port)
|
|
|
|
|
if server.host in out:
|
|
|
|
|
out[server.host].update({server.protocol: port})
|
2019-06-27 19:10:25 +02:00
|
|
|
else:
|
2020-04-14 16:56:17 +02:00
|
|
|
out[server.host] = {server.protocol: port}
|
2024-02-03 11:30:10 +00:00
|
|
|
# add bookmarks
|
|
|
|
|
bookmarks = self.config.NETWORK_BOOKMARKED_SERVERS or []
|
|
|
|
|
for server_str in bookmarks:
|
|
|
|
|
try:
|
|
|
|
|
server = ServerAddr.from_str(server_str)
|
|
|
|
|
except ValueError:
|
|
|
|
|
continue
|
|
|
|
|
port = str(server.port)
|
|
|
|
|
if server.host in out:
|
|
|
|
|
out[server.host].update({server.protocol: port})
|
|
|
|
|
else:
|
|
|
|
|
out[server.host] = {server.protocol: port}
|
2018-09-13 16:06:41 +02:00
|
|
|
# potentially filter out some
|
2023-05-24 17:41:44 +00:00
|
|
|
if self.config.NETWORK_NOONION:
|
2018-07-31 20:25:53 +02:00
|
|
|
out = filter_noonion(out)
|
2013-10-04 14:30:23 +02:00
|
|
|
return out
|
2013-09-12 08:41:27 +02:00
|
|
|
|
2020-04-14 18:28:41 +02:00
|
|
|
def _get_next_server_to_try(self) -> Optional[ServerAddr]:
|
|
|
|
|
now = time.time()
|
2018-09-25 16:38:26 +02:00
|
|
|
with self.interfaces_lock:
|
2021-01-21 03:58:42 +01:00
|
|
|
connected_servers = set(self.interfaces) | self._connecting_ifaces | self._closing_ifaces
|
2020-04-14 19:58:22 +02:00
|
|
|
# First try from recent servers. (which are persisted)
|
|
|
|
|
# As these are servers we successfully connected to recently, they are
|
|
|
|
|
# most likely to work. This also makes servers "sticky".
|
|
|
|
|
# Note: with sticky servers, it is more difficult for an attacker to eclipse the client,
|
|
|
|
|
# however if they succeed, the eclipsing would persist. To try to balance this,
|
|
|
|
|
# we only give priority to recent_servers up to NUM_STICKY_SERVERS.
|
2020-04-14 18:28:41 +02:00
|
|
|
with self.recent_servers_lock:
|
|
|
|
|
recent_servers = list(self._recent_servers)
|
2020-04-16 19:56:30 +02:00
|
|
|
recent_servers = [s for s in recent_servers if s.protocol in self._allowed_protocols]
|
2020-04-14 19:58:22 +02:00
|
|
|
if len(connected_servers & set(recent_servers)) < NUM_STICKY_SERVERS:
|
|
|
|
|
for server in recent_servers:
|
|
|
|
|
if server in connected_servers:
|
|
|
|
|
continue
|
2020-04-15 17:17:11 +02:00
|
|
|
if not self._can_retry_addr(server, now=now):
|
2020-04-14 19:58:22 +02:00
|
|
|
continue
|
|
|
|
|
return server
|
|
|
|
|
# try all servers we know about, pick one at random
|
2020-04-14 18:28:41 +02:00
|
|
|
hostmap = self.get_servers()
|
2020-04-16 19:56:30 +02:00
|
|
|
servers = list(set(filter_protocol(hostmap, allowed_protocols=self._allowed_protocols)) - connected_servers)
|
2020-04-14 19:58:22 +02:00
|
|
|
random.shuffle(servers)
|
2020-04-14 18:28:41 +02:00
|
|
|
for server in servers:
|
2020-04-15 17:17:11 +02:00
|
|
|
if not self._can_retry_addr(server, now=now):
|
2020-04-14 18:28:41 +02:00
|
|
|
continue
|
|
|
|
|
return server
|
|
|
|
|
return None
|
2013-09-10 17:52:43 +02:00
|
|
|
|
2023-03-29 21:48:46 +00:00
|
|
|
def _set_default_server(self) -> None:
|
|
|
|
|
# Server for addresses and transactions
|
2023-05-24 17:41:44 +00:00
|
|
|
server = self.config.NETWORK_SERVER
|
2023-03-29 21:48:46 +00:00
|
|
|
# Sanitize default server
|
|
|
|
|
if server:
|
|
|
|
|
try:
|
|
|
|
|
self.default_server = ServerAddr.from_str(server)
|
2023-04-23 01:33:12 +00:00
|
|
|
except Exception:
|
2023-03-29 21:48:46 +00:00
|
|
|
self.logger.warning(f'failed to parse server-string ({server!r}); falling back to localhost:1:s.')
|
|
|
|
|
self.default_server = ServerAddr.from_str("localhost:1:s")
|
|
|
|
|
else:
|
|
|
|
|
self.default_server = pick_random_server(allowed_protocols=self._allowed_protocols)
|
|
|
|
|
assert isinstance(self.default_server, ServerAddr), f"invalid type for default_server: {self.default_server!r}"
|
|
|
|
|
|
2025-03-03 13:34:05 +01:00
|
|
|
def _set_proxy(self, proxy: ProxySettings):
|
2023-11-30 12:12:45 +01:00
|
|
|
if self.proxy == proxy:
|
|
|
|
|
return
|
2023-01-13 09:47:18 +01:00
|
|
|
|
2020-01-01 07:21:08 +01:00
|
|
|
self.logger.info(f'setting proxy {proxy}')
|
2023-11-30 12:12:45 +01:00
|
|
|
self.proxy = proxy
|
2024-04-25 22:48:56 +00:00
|
|
|
|
|
|
|
|
# reset is_proxy_tor to unknown, and re-detect it:
|
2023-12-05 11:08:20 +01:00
|
|
|
self.is_proxy_tor = None
|
2024-04-25 22:48:56 +00:00
|
|
|
self._detect_if_proxy_is_tor()
|
|
|
|
|
|
|
|
|
|
util.trigger_callback('proxy_set', self.proxy)
|
2023-01-13 09:47:18 +01:00
|
|
|
|
2024-04-25 22:48:56 +00:00
|
|
|
def _detect_if_proxy_is_tor(self) -> None:
|
2025-03-05 14:31:53 +00:00
|
|
|
async def tor_probe_task(p):
|
2023-11-30 12:12:45 +01:00
|
|
|
assert p is not None
|
2025-03-05 14:31:53 +00:00
|
|
|
is_tor = await util.is_tor_socks_port(p.host, int(p.port))
|
2023-11-30 12:12:45 +01:00
|
|
|
if self.proxy == p: # is this the proxy we probed?
|
2024-04-25 22:48:56 +00:00
|
|
|
if self.is_proxy_tor != is_tor:
|
|
|
|
|
self.logger.info(f'Proxy is {"" if is_tor else "not "}TOR')
|
|
|
|
|
self.is_proxy_tor = is_tor
|
|
|
|
|
util.trigger_callback('tor_probed', is_tor)
|
2023-11-30 12:12:45 +01:00
|
|
|
|
2024-04-25 22:48:56 +00:00
|
|
|
proxy = self.proxy
|
2025-03-03 13:34:05 +01:00
|
|
|
if proxy and proxy.enabled and proxy.mode == 'socks5':
|
2025-03-05 14:31:53 +00:00
|
|
|
asyncio.run_coroutine_threadsafe(tor_probe_task(proxy), self.asyncio_loop)
|
2023-01-13 09:47:18 +01:00
|
|
|
|
2018-10-12 18:29:59 +02:00
|
|
|
@log_exceptions
|
2020-04-16 20:30:53 +02:00
|
|
|
async def set_parameters(self, net_params: NetworkParameters):
|
2018-09-10 00:59:53 +02:00
|
|
|
proxy = net_params.proxy
|
2025-03-03 13:34:05 +01:00
|
|
|
proxy_str = proxy.serialize_proxy_cfgstr()
|
|
|
|
|
proxy_enabled = proxy.enabled
|
|
|
|
|
proxy_user = proxy.user
|
|
|
|
|
proxy_pass = proxy.password
|
2020-04-16 20:30:53 +02:00
|
|
|
server = net_params.server
|
2017-03-27 18:59:48 +02:00
|
|
|
# sanitize parameters
|
|
|
|
|
try:
|
|
|
|
|
if proxy:
|
2025-03-03 13:34:05 +01:00
|
|
|
# proxy_modes.index(proxy['mode']) + 1
|
|
|
|
|
ProxySettings.MODES.index(proxy.mode) + 1
|
|
|
|
|
# int(proxy['port'])
|
|
|
|
|
int(proxy.port)
|
2023-04-23 01:33:12 +00:00
|
|
|
except Exception:
|
2025-03-03 13:34:05 +01:00
|
|
|
proxy.enabled = False
|
|
|
|
|
# return
|
2023-05-24 17:41:44 +00:00
|
|
|
self.config.NETWORK_AUTO_CONNECT = net_params.auto_connect
|
|
|
|
|
self.config.NETWORK_ONESERVER = net_params.oneserver
|
2025-03-05 09:45:11 +01:00
|
|
|
self.config.NETWORK_PROXY_ENABLED = proxy_enabled
|
2023-05-24 17:41:44 +00:00
|
|
|
self.config.NETWORK_PROXY = proxy_str
|
2023-11-28 14:11:48 +01:00
|
|
|
self.config.NETWORK_PROXY_USER = proxy_user
|
|
|
|
|
self.config.NETWORK_PROXY_PASSWORD = proxy_pass
|
2023-05-24 17:41:44 +00:00
|
|
|
self.config.NETWORK_SERVER = str(server)
|
2015-08-30 21:18:10 +09:00
|
|
|
# abort if changes were not allowed by config
|
2023-05-24 17:41:44 +00:00
|
|
|
if self.config.NETWORK_SERVER != str(server) \
|
2025-03-05 09:45:11 +01:00
|
|
|
or self.config.NETWORK_PROXY_ENABLED != proxy_enabled \
|
2023-05-24 17:41:44 +00:00
|
|
|
or self.config.NETWORK_PROXY != proxy_str \
|
2023-11-28 14:11:48 +01:00
|
|
|
or self.config.NETWORK_PROXY_USER != proxy_user \
|
|
|
|
|
or self.config.NETWORK_PROXY_PASSWORD != proxy_pass \
|
2023-05-24 17:41:44 +00:00
|
|
|
or self.config.NETWORK_ONESERVER != net_params.oneserver:
|
2015-08-30 21:18:10 +09:00
|
|
|
return
|
2018-09-25 16:38:26 +02:00
|
|
|
|
2023-03-29 21:48:46 +00:00
|
|
|
proxy_changed = self.proxy != proxy
|
|
|
|
|
oneserver_changed = self.oneserver != net_params.oneserver
|
|
|
|
|
default_server_changed = self.default_server != server
|
|
|
|
|
self._init_parameters_from_config()
|
2023-03-29 22:09:46 +00:00
|
|
|
if not self._was_started:
|
|
|
|
|
return
|
2023-03-29 21:48:46 +00:00
|
|
|
|
2018-09-25 16:38:26 +02:00
|
|
|
async with self.restart_lock:
|
2023-03-29 21:48:46 +00:00
|
|
|
if proxy_changed or oneserver_changed:
|
|
|
|
|
# Restart the network
|
2021-03-09 17:52:36 +01:00
|
|
|
await self.stop(full_shutdown=False)
|
2018-09-25 16:38:26 +02:00
|
|
|
await self._start()
|
2023-03-29 21:48:46 +00:00
|
|
|
elif default_server_changed:
|
2020-04-14 16:56:17 +02:00
|
|
|
await self.switch_to_interface(server)
|
2018-09-25 16:38:26 +02:00
|
|
|
else:
|
|
|
|
|
await self.switch_lagging_interface()
|
2021-03-17 13:40:27 +01:00
|
|
|
util.trigger_callback('network_updated')
|
2013-10-05 10:01:33 +02:00
|
|
|
|
2020-05-01 03:27:31 +02:00
|
|
|
def _maybe_set_oneserver(self) -> None:
|
2023-05-24 17:41:44 +00:00
|
|
|
oneserver = self.config.NETWORK_ONESERVER
|
2020-05-01 03:27:31 +02:00
|
|
|
self.oneserver = oneserver
|
2019-04-12 22:24:36 +02:00
|
|
|
self.num_server = NUM_TARGET_CONNECTED_SERVERS if not oneserver else 0
|
2018-11-03 17:19:51 +01:00
|
|
|
|
2024-02-03 11:30:10 +00:00
|
|
|
def is_server_bookmarked(self, server: ServerAddr) -> bool:
|
|
|
|
|
bookmarks = self.config.NETWORK_BOOKMARKED_SERVERS or []
|
|
|
|
|
return str(server) in bookmarks
|
|
|
|
|
|
|
|
|
|
def set_server_bookmark(self, server: ServerAddr, *, add: bool) -> None:
|
|
|
|
|
server_str = str(server)
|
|
|
|
|
with self.config.lock:
|
|
|
|
|
bookmarks = self.config.NETWORK_BOOKMARKED_SERVERS or []
|
|
|
|
|
if add:
|
|
|
|
|
if server_str not in bookmarks:
|
|
|
|
|
bookmarks.append(server_str)
|
|
|
|
|
else: # remove
|
|
|
|
|
if server_str in bookmarks:
|
|
|
|
|
bookmarks.remove(server_str)
|
|
|
|
|
self.config.NETWORK_BOOKMARKED_SERVERS = bookmarks
|
|
|
|
|
|
2018-09-25 16:38:26 +02:00
|
|
|
async def _switch_to_random_interface(self):
|
2015-06-03 00:03:33 +09:00
|
|
|
'''Switch to a random connected server other than the current one'''
|
2015-05-23 22:59:29 +09:00
|
|
|
servers = self.get_interfaces() # Those in connected state
|
2015-06-03 00:03:33 +09:00
|
|
|
if self.default_server in servers:
|
2017-09-14 02:06:08 +02:00
|
|
|
servers.remove(self.default_server)
|
2015-05-23 22:59:29 +09:00
|
|
|
if servers:
|
2018-09-25 16:38:26 +02:00
|
|
|
await self.switch_to_interface(random.choice(servers))
|
2013-10-05 10:01:33 +02:00
|
|
|
|
2018-09-25 16:38:26 +02:00
|
|
|
async def switch_lagging_interface(self):
|
2020-06-21 11:31:54 +02:00
|
|
|
"""If auto_connect and lagging, switch interface (only within fork)."""
|
2018-10-11 19:42:38 +02:00
|
|
|
if self.auto_connect and await self._server_is_lagging():
|
2017-05-29 09:03:39 +02:00
|
|
|
# switch to one that has the correct header (not height)
|
2020-06-21 08:20:56 +02:00
|
|
|
best_header = self.blockchain().header_at_tip()
|
2018-10-11 19:42:38 +02:00
|
|
|
with self.interfaces_lock: interfaces = list(self.interfaces.values())
|
|
|
|
|
filtered = list(filter(lambda iface: iface.tip_header == best_header, interfaces))
|
2017-05-29 09:03:39 +02:00
|
|
|
if filtered:
|
2018-10-11 19:42:38 +02:00
|
|
|
chosen_iface = random.choice(filtered)
|
|
|
|
|
await self.switch_to_interface(chosen_iface.server)
|
|
|
|
|
|
2020-06-21 11:31:54 +02:00
|
|
|
async def switch_unwanted_fork_interface(self) -> None:
|
|
|
|
|
"""If auto_connect, maybe switch to another fork/chain."""
|
2018-10-14 04:23:10 +02:00
|
|
|
if not self.auto_connect or not self.interface:
|
2018-10-11 19:42:38 +02:00
|
|
|
return
|
|
|
|
|
with self.interfaces_lock: interfaces = list(self.interfaces.values())
|
2020-06-21 11:31:54 +02:00
|
|
|
pref_height = self._blockchain_preferred_block['height']
|
|
|
|
|
pref_hash = self._blockchain_preferred_block['hash']
|
|
|
|
|
# shortcut for common case
|
|
|
|
|
if pref_height == 0:
|
|
|
|
|
return
|
|
|
|
|
# maybe try switching chains; starting with most desirable first
|
|
|
|
|
matching_chains = blockchain.get_chains_that_contain_header(pref_height, pref_hash)
|
|
|
|
|
chains_to_try = list(matching_chains) + [blockchain.get_best_chain()]
|
|
|
|
|
for rank, chain in enumerate(chains_to_try):
|
|
|
|
|
# check if main interface is already on this fork
|
|
|
|
|
if self.interface.blockchain == chain:
|
|
|
|
|
return
|
|
|
|
|
# switch to another random interface that is on this fork, if any
|
|
|
|
|
filtered = [iface for iface in interfaces
|
|
|
|
|
if iface.blockchain == chain]
|
2018-10-11 19:42:38 +02:00
|
|
|
if filtered:
|
2020-06-21 11:31:54 +02:00
|
|
|
self.logger.info(f"switching to (more) preferred fork (rank {rank})")
|
2018-10-11 19:42:38 +02:00
|
|
|
chosen_iface = random.choice(filtered)
|
|
|
|
|
await self.switch_to_interface(chosen_iface.server)
|
|
|
|
|
return
|
2020-06-21 11:31:54 +02:00
|
|
|
self.logger.info("tried to switch to (more) preferred fork but no interfaces are on any")
|
2018-09-25 16:38:26 +02:00
|
|
|
|
2020-04-14 16:56:17 +02:00
|
|
|
async def switch_to_interface(self, server: ServerAddr):
|
2018-09-25 16:38:26 +02:00
|
|
|
"""Switch to server as our main interface. If no connection exists,
|
|
|
|
|
queue interface to be started. The actual switch will
|
|
|
|
|
happen when the interface becomes ready.
|
|
|
|
|
"""
|
2013-10-05 13:48:02 +02:00
|
|
|
self.default_server = server
|
2018-09-25 16:38:26 +02:00
|
|
|
old_interface = self.interface
|
|
|
|
|
old_server = old_interface.server if old_interface else None
|
|
|
|
|
|
|
|
|
|
# Stop any current interface in order to terminate subscriptions,
|
2020-02-27 19:00:59 +01:00
|
|
|
# and to cancel tasks in interface.taskgroup.
|
2018-09-25 16:38:26 +02:00
|
|
|
if old_server and old_server != server:
|
2020-12-13 18:08:36 +01:00
|
|
|
# don't wait for old_interface to close as that might be slow:
|
|
|
|
|
await self.taskgroup.spawn(self._close_interface(old_interface))
|
2018-09-25 16:38:26 +02:00
|
|
|
|
2015-06-10 18:24:57 +02:00
|
|
|
if server not in self.interfaces:
|
2015-09-09 22:13:09 +09:00
|
|
|
self.interface = None
|
2020-04-14 18:44:45 +02:00
|
|
|
await self.taskgroup.spawn(self._run_new_interface(server))
|
2015-06-10 18:24:57 +02:00
|
|
|
return
|
2018-06-01 14:03:22 +02:00
|
|
|
|
2015-06-10 18:24:57 +02:00
|
|
|
i = self.interfaces[server]
|
2018-09-25 16:38:26 +02:00
|
|
|
if old_interface != i:
|
network: test if interface is alive before iface.taskgroup.spawn
closes https://github.com/spesmilo/electrum/issues/7677
```
E/n | network | taskgroup died.
Traceback (most recent call last):
File "/opt/electrum/electrum/network.py", line 1204, in main
[await group.spawn(job) for job in self._jobs]
File "/home/voegtlin/.local/lib/python3.8/site-packages/aiorpcx/curio.py", line 297, in __aexit__
await self.join()
File "/opt/electrum/electrum/util.py", line 1255, in join
task.result()
File "/opt/electrum/electrum/network.py", line 1277, in _maintain_sessions
await maintain_main_interface()
File "/opt/electrum/electrum/network.py", line 1268, in maintain_main_interface
await self._ensure_there_is_a_main_interface()
File "/opt/electrum/electrum/network.py", line 1245, in _ensure_there_is_a_main_interface
await self._switch_to_random_interface()
File "/opt/electrum/electrum/network.py", line 648, in _switch_to_random_interface
await self.switch_to_interface(random.choice(servers))
File "/opt/electrum/electrum/network.py", line 714, in switch_to_interface
await i.taskgroup.spawn(self._request_server_info(i))
File "/home/voegtlin/.local/lib/python3.8/site-packages/aiorpcx/curio.py", line 204, in spawn
self._add_task(task)
File "/home/voegtlin/.local/lib/python3.8/site-packages/aiorpcx/curio.py", line 150, in _add_task
raise RuntimeError('task group terminated')
RuntimeError: task group terminated
```
I believe the "suppress spurious cancellations" block was added as SilentTaskGroup raised
CancelledError instead of RuntimeError for this scenario.
2022-02-21 20:09:26 +01:00
|
|
|
if not i.is_connected_and_ready():
|
|
|
|
|
return
|
2019-04-26 18:52:26 +02:00
|
|
|
self.logger.info(f"switching to {server}")
|
2018-09-25 16:38:26 +02:00
|
|
|
blockchain_updated = i.blockchain != self.blockchain()
|
2015-06-10 18:24:57 +02:00
|
|
|
self.interface = i
|
2022-03-11 15:39:03 +01:00
|
|
|
try:
|
|
|
|
|
await i.taskgroup.spawn(self._request_server_info(i))
|
|
|
|
|
except RuntimeError as e: # see #7677
|
|
|
|
|
if len(e.args) >= 1 and e.args[0] == 'task group terminated':
|
|
|
|
|
self.logger.warning(f"tried to switch to {server} but interface.taskgroup is already dead.")
|
|
|
|
|
self.interface = None
|
|
|
|
|
return
|
|
|
|
|
raise
|
2020-04-14 16:12:47 +02:00
|
|
|
util.trigger_callback('default_server_changed')
|
2019-12-01 23:24:43 +01:00
|
|
|
self.default_server_changed_event.set()
|
|
|
|
|
self.default_server_changed_event.clear()
|
2023-04-14 15:55:03 +02:00
|
|
|
self._set_status(ConnectionState.CONNECTED)
|
2020-04-14 16:12:47 +02:00
|
|
|
util.trigger_callback('network_updated')
|
|
|
|
|
if blockchain_updated:
|
|
|
|
|
util.trigger_callback('blockchain_updated')
|
2013-10-02 12:13:07 +02:00
|
|
|
|
2021-01-21 03:58:42 +01:00
|
|
|
async def _close_interface(self, interface: Optional[Interface]):
|
|
|
|
|
if not interface:
|
|
|
|
|
return
|
|
|
|
|
if interface.server in self._closing_ifaces:
|
|
|
|
|
return
|
|
|
|
|
self._closing_ifaces.add(interface.server)
|
|
|
|
|
with self.interfaces_lock:
|
|
|
|
|
if self.interfaces.get(interface.server) == interface:
|
|
|
|
|
self.interfaces.pop(interface.server)
|
|
|
|
|
if interface == self.interface:
|
|
|
|
|
self.interface = None
|
|
|
|
|
try:
|
2020-12-13 18:08:36 +01:00
|
|
|
# this can take some time if server/connection is slow:
|
2018-09-25 16:38:26 +02:00
|
|
|
await interface.close()
|
2021-01-21 03:58:42 +01:00
|
|
|
await interface.got_disconnected.wait()
|
|
|
|
|
finally:
|
|
|
|
|
self._closing_ifaces.discard(interface.server)
|
2013-10-09 10:04:32 +02:00
|
|
|
|
2018-06-21 21:06:56 +02:00
|
|
|
@with_recent_servers_lock
|
2020-04-15 17:17:11 +02:00
|
|
|
def _add_recent_server(self, server: ServerAddr) -> None:
|
|
|
|
|
self._on_connection_successfully_established(server)
|
2013-10-04 14:30:23 +02:00
|
|
|
# list is ordered
|
2020-04-14 18:28:41 +02:00
|
|
|
if server in self._recent_servers:
|
|
|
|
|
self._recent_servers.remove(server)
|
|
|
|
|
self._recent_servers.insert(0, server)
|
|
|
|
|
self._recent_servers = self._recent_servers[:NUM_RECENT_SERVERS]
|
2018-09-25 16:38:26 +02:00
|
|
|
self._save_recent_servers()
|
2013-09-10 19:59:58 +02:00
|
|
|
|
2018-12-10 08:03:42 +01:00
|
|
|
async def connection_down(self, interface: Interface):
|
2015-06-03 00:03:33 +09:00
|
|
|
'''A connection to server either went down, or was never made.
|
|
|
|
|
We distinguish by whether it is in self.interfaces.'''
|
2018-12-10 08:03:42 +01:00
|
|
|
if not interface: return
|
2021-01-21 03:58:42 +01:00
|
|
|
if interface.server == self.default_server:
|
2023-04-14 15:55:03 +02:00
|
|
|
self._set_status(ConnectionState.DISCONNECTED)
|
2018-12-10 08:03:42 +01:00
|
|
|
await self._close_interface(interface)
|
2020-04-14 16:12:47 +02:00
|
|
|
util.trigger_callback('network_updated')
|
2017-05-29 09:03:39 +02:00
|
|
|
|
2018-12-20 16:49:17 +01:00
|
|
|
def get_network_timeout_seconds(self, request_type=NetworkTimeout.Generic) -> int:
|
2023-05-24 17:41:44 +00:00
|
|
|
if self.config.NETWORK_TIMEOUT:
|
|
|
|
|
return self.config.NETWORK_TIMEOUT
|
2018-12-20 16:49:17 +01:00
|
|
|
if self.oneserver and not self.auto_connect:
|
|
|
|
|
return request_type.MOST_RELAXED
|
2025-03-03 13:34:05 +01:00
|
|
|
if self.proxy and self.proxy.enabled:
|
2018-12-20 16:49:17 +01:00
|
|
|
return request_type.RELAXED
|
|
|
|
|
return request_type.NORMAL
|
|
|
|
|
|
2020-04-14 18:44:45 +02:00
|
|
|
@ignore_exceptions # do not kill outer taskgroup
|
2018-10-12 18:29:59 +02:00
|
|
|
@log_exceptions
|
2020-04-14 16:56:17 +02:00
|
|
|
async def _run_new_interface(self, server: ServerAddr):
|
2021-01-21 03:58:42 +01:00
|
|
|
if (server in self.interfaces
|
|
|
|
|
or server in self._connecting_ifaces
|
|
|
|
|
or server in self._closing_ifaces):
|
2020-04-14 18:44:45 +02:00
|
|
|
return
|
2021-01-21 03:58:42 +01:00
|
|
|
self._connecting_ifaces.add(server)
|
2020-04-14 18:44:45 +02:00
|
|
|
if server == self.default_server:
|
|
|
|
|
self.logger.info(f"connecting to {server} as new interface")
|
2023-04-14 15:55:03 +02:00
|
|
|
self._set_status(ConnectionState.CONNECTING)
|
2020-04-15 17:17:11 +02:00
|
|
|
self._trying_addr_now(server)
|
2020-04-14 18:44:45 +02:00
|
|
|
|
2024-10-14 13:32:22 +02:00
|
|
|
interface = Interface(network=self, server=server)
|
2019-04-18 23:05:35 +02:00
|
|
|
# note: using longer timeouts here as DNS can sometimes be slow!
|
|
|
|
|
timeout = self.get_network_timeout_seconds(NetworkTimeout.Generic)
|
2018-08-29 18:41:51 +02:00
|
|
|
try:
|
2023-08-04 17:59:47 +00:00
|
|
|
await util.wait_for2(interface.ready, timeout)
|
2018-08-29 18:41:51 +02:00
|
|
|
except BaseException as e:
|
2019-04-26 18:52:26 +02:00
|
|
|
self.logger.info(f"couldn't launch iface {server} -- {repr(e)}")
|
2018-09-25 16:38:26 +02:00
|
|
|
await interface.close()
|
2017-03-31 12:45:09 +02:00
|
|
|
return
|
2018-09-20 21:07:31 +02:00
|
|
|
else:
|
2018-09-25 16:38:26 +02:00
|
|
|
with self.interfaces_lock:
|
|
|
|
|
assert server not in self.interfaces
|
2018-09-20 21:07:31 +02:00
|
|
|
self.interfaces[server] = interface
|
2018-08-29 18:41:51 +02:00
|
|
|
finally:
|
2021-01-21 03:58:42 +01:00
|
|
|
self._connecting_ifaces.discard(server)
|
2015-05-17 22:54:20 +09:00
|
|
|
|
2018-08-29 18:41:51 +02:00
|
|
|
if server == self.default_server:
|
2018-09-25 16:38:26 +02:00
|
|
|
await self.switch_to_interface(server)
|
2018-08-29 18:41:51 +02:00
|
|
|
|
2020-10-14 19:30:10 +02:00
|
|
|
self._has_ever_managed_to_connect_to_server = True
|
2018-09-25 16:38:26 +02:00
|
|
|
self._add_recent_server(server)
|
2020-04-14 16:12:47 +02:00
|
|
|
util.trigger_callback('network_updated')
|
2024-04-25 22:48:56 +00:00
|
|
|
# When the proxy settings were set, the proxy (if any) might have been unreachable,
|
|
|
|
|
# resulting in a false-negative for Tor-detection. Given we just connected to a server, re-test now.
|
|
|
|
|
self._detect_if_proxy_is_tor()
|
2015-06-03 00:03:33 +09:00
|
|
|
|
2020-04-14 18:28:41 +02:00
|
|
|
def check_interface_against_healthy_spread_of_connected_servers(self, iface_to_check: Interface) -> bool:
|
2019-04-12 22:32:36 +02:00
|
|
|
# main interface is exempt. this makes switching servers easier
|
|
|
|
|
if iface_to_check.is_main_server():
|
|
|
|
|
return True
|
2019-04-15 10:49:09 +02:00
|
|
|
if not iface_to_check.bucket_based_on_ipaddress():
|
|
|
|
|
return True
|
2019-04-12 22:32:36 +02:00
|
|
|
# bucket connected interfaces
|
|
|
|
|
with self.interfaces_lock:
|
|
|
|
|
interfaces = list(self.interfaces.values())
|
|
|
|
|
if iface_to_check in interfaces:
|
|
|
|
|
interfaces.remove(iface_to_check)
|
|
|
|
|
buckets = defaultdict(list)
|
|
|
|
|
for iface in interfaces:
|
|
|
|
|
buckets[iface.bucket_based_on_ipaddress()].append(iface)
|
|
|
|
|
# check proposed server against buckets
|
|
|
|
|
onion_servers = buckets[BUCKET_NAME_OF_ONION_SERVERS]
|
|
|
|
|
if iface_to_check.is_tor():
|
|
|
|
|
# keep number of onion servers below half of all connected servers
|
|
|
|
|
if len(onion_servers) > NUM_TARGET_CONNECTED_SERVERS // 2:
|
|
|
|
|
return False
|
|
|
|
|
else:
|
|
|
|
|
bucket = iface_to_check.bucket_based_on_ipaddress()
|
|
|
|
|
if len(buckets[bucket]) > 0:
|
|
|
|
|
return False
|
|
|
|
|
return True
|
|
|
|
|
|
2018-09-27 18:01:25 +02:00
|
|
|
def best_effort_reliable(func):
|
2021-03-12 17:53:13 +01:00
|
|
|
@functools.wraps(func)
|
2019-12-01 23:24:43 +01:00
|
|
|
async def make_reliable_wrapper(self: 'Network', *args, **kwargs):
|
2018-09-27 18:01:25 +02:00
|
|
|
for i in range(10):
|
|
|
|
|
iface = self.interface
|
2018-09-27 21:15:07 +02:00
|
|
|
# retry until there is a main interface
|
|
|
|
|
if not iface:
|
2021-03-12 17:53:13 +01:00
|
|
|
async with ignore_after(1):
|
|
|
|
|
await self.default_server_changed_event.wait()
|
2018-09-27 21:15:07 +02:00
|
|
|
continue # try again
|
2019-12-01 23:24:43 +01:00
|
|
|
assert iface.ready.done(), "interface not ready yet"
|
2018-09-27 21:15:07 +02:00
|
|
|
# try actual request
|
2021-03-12 17:53:13 +01:00
|
|
|
try:
|
2022-02-08 12:34:49 +01:00
|
|
|
async with OldTaskGroup(wait=any) as group:
|
2021-03-12 17:53:13 +01:00
|
|
|
task = await group.spawn(func(self, *args, **kwargs))
|
|
|
|
|
await group.spawn(iface.got_disconnected.wait())
|
|
|
|
|
except RequestTimedOut:
|
|
|
|
|
await iface.close()
|
|
|
|
|
await iface.got_disconnected.wait()
|
|
|
|
|
continue # try again
|
|
|
|
|
except RequestCorrupted as e:
|
|
|
|
|
# TODO ban server?
|
|
|
|
|
iface.logger.exception(f"RequestCorrupted: {e}")
|
|
|
|
|
await iface.close()
|
|
|
|
|
await iface.got_disconnected.wait()
|
|
|
|
|
continue # try again
|
|
|
|
|
if task.done() and not task.cancelled():
|
|
|
|
|
return task.result()
|
2018-09-27 20:04:36 +02:00
|
|
|
# otherwise; try again
|
2022-07-22 13:01:21 +08:00
|
|
|
raise BestEffortRequestFailed('cannot establish a connection... gave up.')
|
2018-09-27 18:01:25 +02:00
|
|
|
return make_reliable_wrapper
|
|
|
|
|
|
2019-02-12 17:02:15 +01:00
|
|
|
def catch_server_exceptions(func):
|
2023-05-16 15:10:12 +00:00
|
|
|
"""Decorator that wraps server errors in UntrustedServerReturnedError,
|
|
|
|
|
to avoid showing untrusted arbitrary text to users.
|
|
|
|
|
"""
|
2021-03-12 17:53:13 +01:00
|
|
|
@functools.wraps(func)
|
2019-02-12 17:02:15 +01:00
|
|
|
async def wrapper(self, *args, **kwargs):
|
|
|
|
|
try:
|
2019-02-12 19:23:58 +01:00
|
|
|
return await func(self, *args, **kwargs)
|
2019-02-12 17:02:15 +01:00
|
|
|
except aiorpcx.jsonrpc.CodeMessageError as e:
|
2023-05-16 15:10:12 +00:00
|
|
|
wrapped_exc = UntrustedServerReturnedError(original_exception=e)
|
|
|
|
|
# log (sanitized) untrusted error text now, to ease debugging
|
qt: do not show UntrustedServerReturnedError when sweeping
We should not show the untrusted text in the GUI...
With this change, we still log the text, but otherwise it should avoid
unintentionally showing it anywhere, as the original exception is
masked away.
related: https://github.com/spesmilo/electrum/issues/8599#issuecomment-1706775508
Example traceback (and the exc is then shown in main_window.on_error):
```
10.77 | D | n/network | got error from server for Network.listunspent_for_scripthash: <UntrustedServerReturnedError [DO NOT TRUST THIS MESSAGE] original_exception: "RPCError(0, 'heyheyhey')">
10.78 | E | gui.qt.main_window.[test_segwit_2] | on_error
Traceback (most recent call last):
File "...\electrum\electrum\network.py", line 898, in wrapper
return await func(self, *args, **kwargs)
File "...\electrum\electrum\network.py", line 1149, in listunspent_for_scripthash
return await self.interface.listunspent_for_scripthash(sh)
File "...\electrum\electrum\interface.py", line 1027, in listunspent_for_scripthash
raise aiorpcx.jsonrpc.RPCError(0, "heyheyhey")
aiorpcx.jsonrpc.RPCError: (0, 'heyheyhey')
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "...\electrum\electrum\gui\qt\util.py", line 925, in run
result = task.task()
File "...\electrum\electrum\gui\qt\main_window.py", line 2505, in <lambda>
task = lambda: self.network.run_from_another_thread(
File "...\electrum\electrum\network.py", line 383, in run_from_another_thread
return fut.result(timeout)
File "...\Python310\lib\concurrent\futures\_base.py", line 458, in result
return self.__get_result()
File "...\Python310\lib\concurrent\futures\_base.py", line 403, in __get_result
raise self._exception
File "...\electrum\electrum\wallet.py", line 151, in sweep_preparations
async with OldTaskGroup() as group:
File "...\aiorpcX\aiorpcx\curio.py", line 304, in __aexit__
await self.join()
File "...\electrum\electrum\util.py", line 1316, in join
task.result()
File "...\electrum\electrum\wallet.py", line 142, in find_utxos_for_privkey
await _append_utxos_to_inputs(
File "...\electrum\electrum\wallet.py", line 129, in _append_utxos_to_inputs
u = await network.listunspent_for_scripthash(scripthash)
File "...\electrum\electrum\network.py", line 872, in make_reliable_wrapper
async with OldTaskGroup(wait=any) as group:
File "...\aiorpcX\aiorpcx\curio.py", line 304, in __aexit__
await self.join()
File "...\electrum\electrum\util.py", line 1327, in join
self.completed.result()
File "...\electrum\electrum\network.py", line 903, in wrapper
raise wrapped_exc from e
electrum.network.UntrustedServerReturnedError: The server returned an error.
```
2023-09-05 15:47:33 +00:00
|
|
|
self.logger.debug(f"got error from server for {func.__qualname__}: {wrapped_exc.get_untrusted_message()!r}")
|
2023-05-16 15:10:12 +00:00
|
|
|
raise wrapped_exc from e
|
2019-02-12 17:02:15 +01:00
|
|
|
return wrapper
|
|
|
|
|
|
2018-09-27 18:01:25 +02:00
|
|
|
@best_effort_reliable
|
2019-02-12 17:02:15 +01:00
|
|
|
@catch_server_exceptions
|
2018-09-27 18:01:25 +02:00
|
|
|
async def get_merkle_for_transaction(self, tx_hash: str, tx_height: int) -> dict:
|
network: fix bug in best_effort_reliable
self.interface might get set to None after decorator checks it but before func gets scheduled:
125.04 | E | asyncio | Task exception was never retrieved
future: <Task finished name='Task-408' coro=<Transaction.add_info_from_network.<locals>.add_info_to_txin() done, defined at ...\electrum\electrum\transaction.py:976> exception=AttributeError("'NoneType' object has no attribute 'get_transaction'")>
Traceback (most recent call last):
File "...\electrum\electrum\transaction.py", line 980, in add_info_to_txin
await txin.add_info_from_network(network=network, ignore_network_issues=ignore_network_issues)
File "...\electrum\electrum\transaction.py", line 375, in add_info_from_network
self.utxo = await fetch_from_network(txid=self.prevout.txid.hex())
File "...\electrum\electrum\transaction.py", line 362, in fetch_from_network
raw_tx = await network.get_transaction(txid, timeout=10)
File "...\electrum\electrum\network.py", line 866, in make_reliable_wrapper
async with OldTaskGroup(wait=any) as group:
File "...\aiorpcX\aiorpcx\curio.py", line 304, in __aexit__
await self.join()
File "...\electrum\electrum\util.py", line 1410, in join
self.completed.result()
File "...\electrum\electrum\network.py", line 889, in wrapper
return await func(self, *args, **kwargs)
File "...\electrum\electrum\network.py", line 1114, in get_transaction
return await self.interface.get_transaction(tx_hash=tx_hash, timeout=timeout)
AttributeError: 'NoneType' object has no attribute 'get_transaction'
2023-03-11 18:32:38 +00:00
|
|
|
if self.interface is None: # handled by best_effort_reliable
|
|
|
|
|
raise RequestTimedOut()
|
2020-07-02 15:31:35 +02:00
|
|
|
return await self.interface.get_merkle_for_transaction(tx_hash=tx_hash, tx_height=tx_height)
|
2018-08-14 17:50:20 +02:00
|
|
|
|
2018-09-27 18:01:25 +02:00
|
|
|
@best_effort_reliable
|
2019-10-23 17:09:41 +02:00
|
|
|
async def broadcast_transaction(self, tx: 'Transaction', *, timeout=None) -> None:
|
2024-06-05 19:00:51 +00:00
|
|
|
"""caller should handle TxBroadcastError"""
|
network: fix bug in best_effort_reliable
self.interface might get set to None after decorator checks it but before func gets scheduled:
125.04 | E | asyncio | Task exception was never retrieved
future: <Task finished name='Task-408' coro=<Transaction.add_info_from_network.<locals>.add_info_to_txin() done, defined at ...\electrum\electrum\transaction.py:976> exception=AttributeError("'NoneType' object has no attribute 'get_transaction'")>
Traceback (most recent call last):
File "...\electrum\electrum\transaction.py", line 980, in add_info_to_txin
await txin.add_info_from_network(network=network, ignore_network_issues=ignore_network_issues)
File "...\electrum\electrum\transaction.py", line 375, in add_info_from_network
self.utxo = await fetch_from_network(txid=self.prevout.txid.hex())
File "...\electrum\electrum\transaction.py", line 362, in fetch_from_network
raw_tx = await network.get_transaction(txid, timeout=10)
File "...\electrum\electrum\network.py", line 866, in make_reliable_wrapper
async with OldTaskGroup(wait=any) as group:
File "...\aiorpcX\aiorpcx\curio.py", line 304, in __aexit__
await self.join()
File "...\electrum\electrum\util.py", line 1410, in join
self.completed.result()
File "...\electrum\electrum\network.py", line 889, in wrapper
return await func(self, *args, **kwargs)
File "...\electrum\electrum\network.py", line 1114, in get_transaction
return await self.interface.get_transaction(tx_hash=tx_hash, timeout=timeout)
AttributeError: 'NoneType' object has no attribute 'get_transaction'
2023-03-11 18:32:38 +00:00
|
|
|
if self.interface is None: # handled by best_effort_reliable
|
|
|
|
|
raise RequestTimedOut()
|
2018-12-20 16:49:17 +01:00
|
|
|
if timeout is None:
|
|
|
|
|
timeout = self.get_network_timeout_seconds(NetworkTimeout.Urgent)
|
2023-09-16 04:36:08 +00:00
|
|
|
if any(DummyAddress.is_dummy_address(txout.address) for txout in tx.outputs()):
|
|
|
|
|
raise DummyAddressUsedInTxException("tried to broadcast tx with dummy address!")
|
2019-01-18 19:59:12 +01:00
|
|
|
try:
|
2019-10-23 17:09:41 +02:00
|
|
|
out = await self.interface.session.send_request('blockchain.transaction.broadcast', [tx.serialize()], timeout=timeout)
|
2019-01-18 19:59:12 +01:00
|
|
|
# note: both 'out' and exception messages are untrusted input from the server
|
2019-02-04 14:51:04 +01:00
|
|
|
except (RequestTimedOut, asyncio.CancelledError, asyncio.TimeoutError):
|
|
|
|
|
raise # pass-through
|
|
|
|
|
except aiorpcx.jsonrpc.CodeMessageError as e:
|
2025-05-16 15:49:54 +00:00
|
|
|
self.logger.info(f"broadcast_transaction error [DO NOT TRUST THIS MESSAGE]: {error_text_str_to_safe_str(repr(e))}. tx={str(tx)}")
|
2019-01-18 19:59:12 +01:00
|
|
|
raise TxBroadcastServerReturnedError(self.sanitize_tx_broadcast_response(e.message)) from e
|
2019-02-04 14:51:04 +01:00
|
|
|
except BaseException as e: # intentional BaseException for sanity!
|
2025-05-16 15:49:54 +00:00
|
|
|
self.logger.info(f"broadcast_transaction error2 [DO NOT TRUST THIS MESSAGE]: {error_text_str_to_safe_str(repr(e))}. tx={str(tx)}")
|
2019-02-04 14:51:04 +01:00
|
|
|
send_exception_to_crash_reporter(e)
|
|
|
|
|
raise TxBroadcastUnknownError() from e
|
2018-09-07 17:07:15 +02:00
|
|
|
if out != tx.txid():
|
2023-04-06 13:53:40 +00:00
|
|
|
self.logger.info(f"unexpected txid for broadcast_transaction [DO NOT TRUST THIS MESSAGE]: "
|
2025-05-16 15:49:54 +00:00
|
|
|
f"{error_text_str_to_safe_str(out)} != {tx.txid()}. tx={str(tx)}")
|
2019-01-18 19:59:12 +01:00
|
|
|
raise TxBroadcastHashMismatch(_("Server returned unexpected transaction ID."))
|
|
|
|
|
|
2022-06-10 16:00:30 +02:00
|
|
|
async def try_broadcasting(self, tx, name) -> bool:
|
2020-02-16 12:59:09 +01:00
|
|
|
try:
|
|
|
|
|
await self.broadcast_transaction(tx)
|
|
|
|
|
except Exception as e:
|
|
|
|
|
self.logger.info(f'error: could not broadcast {name} {tx.txid()}, {str(e)}')
|
2022-06-10 16:00:30 +02:00
|
|
|
return False
|
2020-02-16 12:59:09 +01:00
|
|
|
else:
|
|
|
|
|
self.logger.info(f'success: broadcasting {name} {tx.txid()}')
|
2022-06-10 16:00:30 +02:00
|
|
|
return True
|
2020-02-16 12:59:09 +01:00
|
|
|
|
2019-01-18 19:59:12 +01:00
|
|
|
@staticmethod
|
|
|
|
|
def sanitize_tx_broadcast_response(server_msg) -> str:
|
|
|
|
|
# Unfortunately, bitcoind and hence the Electrum protocol doesn't return a useful error code.
|
|
|
|
|
# So, we use substring matching to grok the error message.
|
|
|
|
|
# server_msg is untrusted input so it should not be shown to the user. see #4968
|
|
|
|
|
server_msg = str(server_msg)
|
|
|
|
|
server_msg = server_msg.replace("\n", r"\n")
|
2021-09-08 18:18:08 +02:00
|
|
|
|
2021-02-21 04:48:36 +01:00
|
|
|
# https://github.com/bitcoin/bitcoin/blob/5bb64acd9d3ced6e6f95df282a1a0f8b98522cb0/src/script/script_error.cpp
|
2019-01-18 19:59:12 +01:00
|
|
|
script_error_messages = {
|
|
|
|
|
r"Script evaluated without error but finished with a false/empty top stack element",
|
|
|
|
|
r"Script failed an OP_VERIFY operation",
|
|
|
|
|
r"Script failed an OP_EQUALVERIFY operation",
|
|
|
|
|
r"Script failed an OP_CHECKMULTISIGVERIFY operation",
|
|
|
|
|
r"Script failed an OP_CHECKSIGVERIFY operation",
|
|
|
|
|
r"Script failed an OP_NUMEQUALVERIFY operation",
|
|
|
|
|
r"Script is too big",
|
|
|
|
|
r"Push value size limit exceeded",
|
|
|
|
|
r"Operation limit exceeded",
|
|
|
|
|
r"Stack size limit exceeded",
|
|
|
|
|
r"Signature count negative or greater than pubkey count",
|
|
|
|
|
r"Pubkey count negative or limit exceeded",
|
|
|
|
|
r"Opcode missing or not understood",
|
|
|
|
|
r"Attempted to use a disabled opcode",
|
|
|
|
|
r"Operation not valid with the current stack size",
|
|
|
|
|
r"Operation not valid with the current altstack size",
|
|
|
|
|
r"OP_RETURN was encountered",
|
|
|
|
|
r"Invalid OP_IF construction",
|
|
|
|
|
r"Negative locktime",
|
|
|
|
|
r"Locktime requirement not satisfied",
|
|
|
|
|
r"Signature hash type missing or not understood",
|
|
|
|
|
r"Non-canonical DER signature",
|
|
|
|
|
r"Data push larger than necessary",
|
2020-03-31 07:08:31 +02:00
|
|
|
r"Only push operators allowed in signatures",
|
2019-01-18 19:59:12 +01:00
|
|
|
r"Non-canonical signature: S value is unnecessarily high",
|
|
|
|
|
r"Dummy CHECKMULTISIG argument must be zero",
|
|
|
|
|
r"OP_IF/NOTIF argument must be minimal",
|
|
|
|
|
r"Signature must be zero for failed CHECK(MULTI)SIG operation",
|
|
|
|
|
r"NOPx reserved for soft-fork upgrades",
|
|
|
|
|
r"Witness version reserved for soft-fork upgrades",
|
2021-02-21 04:48:36 +01:00
|
|
|
r"Taproot version reserved for soft-fork upgrades",
|
|
|
|
|
r"OP_SUCCESSx reserved for soft-fork upgrades",
|
|
|
|
|
r"Public key version reserved for soft-fork upgrades",
|
2019-01-18 19:59:12 +01:00
|
|
|
r"Public key is neither compressed or uncompressed",
|
2021-02-21 04:48:36 +01:00
|
|
|
r"Stack size must be exactly one after execution",
|
2019-01-18 19:59:12 +01:00
|
|
|
r"Extra items left on stack after execution",
|
|
|
|
|
r"Witness program has incorrect length",
|
|
|
|
|
r"Witness program was passed an empty witness",
|
|
|
|
|
r"Witness program hash mismatch",
|
|
|
|
|
r"Witness requires empty scriptSig",
|
|
|
|
|
r"Witness requires only-redeemscript scriptSig",
|
|
|
|
|
r"Witness provided for non-witness script",
|
|
|
|
|
r"Using non-compressed keys in segwit",
|
2021-02-21 04:48:36 +01:00
|
|
|
r"Invalid Schnorr signature size",
|
|
|
|
|
r"Invalid Schnorr signature hash type",
|
|
|
|
|
r"Invalid Schnorr signature",
|
|
|
|
|
r"Invalid Taproot control block size",
|
|
|
|
|
r"Too much signature validation relative to witness weight",
|
|
|
|
|
r"OP_CHECKMULTISIG(VERIFY) is not available in tapscript",
|
|
|
|
|
r"OP_IF/NOTIF argument must be minimal in tapscript",
|
2019-01-18 19:59:12 +01:00
|
|
|
r"Using OP_CODESEPARATOR in non-witness script",
|
|
|
|
|
r"Signature is found in scriptCode",
|
|
|
|
|
}
|
|
|
|
|
for substring in script_error_messages:
|
|
|
|
|
if substring in server_msg:
|
|
|
|
|
return substring
|
2021-02-21 04:48:36 +01:00
|
|
|
# https://github.com/bitcoin/bitcoin/blob/5bb64acd9d3ced6e6f95df282a1a0f8b98522cb0/src/validation.cpp
|
2019-01-18 19:59:12 +01:00
|
|
|
# grep "REJECT_"
|
2021-02-21 04:48:36 +01:00
|
|
|
# grep "TxValidationResult"
|
2021-09-08 18:18:08 +02:00
|
|
|
# should come after script_error.cpp (due to e.g. "non-mandatory-script-verify-flag")
|
2019-01-18 19:59:12 +01:00
|
|
|
validation_error_messages = {
|
2021-02-21 04:48:36 +01:00
|
|
|
r"coinbase": None,
|
|
|
|
|
r"tx-size-small": None,
|
|
|
|
|
r"non-final": None,
|
|
|
|
|
r"txn-already-in-mempool": None,
|
|
|
|
|
r"txn-mempool-conflict": None,
|
|
|
|
|
r"txn-already-known": None,
|
|
|
|
|
r"non-BIP68-final": None,
|
|
|
|
|
r"bad-txns-nonstandard-inputs": None,
|
|
|
|
|
r"bad-witness-nonstandard": None,
|
|
|
|
|
r"bad-txns-too-many-sigops": None,
|
|
|
|
|
r"mempool min fee not met":
|
|
|
|
|
("mempool min fee not met\n" +
|
|
|
|
|
_("Your transaction is paying a fee that is so low that the bitcoin node cannot "
|
|
|
|
|
"fit it into its mempool. The mempool is already full of hundreds of megabytes "
|
|
|
|
|
"of transactions that all pay higher fees. Try to increase the fee.")),
|
|
|
|
|
r"min relay fee not met": None,
|
|
|
|
|
r"absurdly-high-fee": None,
|
|
|
|
|
r"max-fee-exceeded": None,
|
|
|
|
|
r"too-long-mempool-chain": None,
|
|
|
|
|
r"bad-txns-spends-conflicting-tx": None,
|
|
|
|
|
r"insufficient fee": ("insufficient fee\n" +
|
|
|
|
|
_("Your transaction is trying to replace another one in the mempool but it "
|
|
|
|
|
"does not meet the rules to do so. Try to increase the fee.")),
|
|
|
|
|
r"too many potential replacements": None,
|
|
|
|
|
r"replacement-adds-unconfirmed": None,
|
|
|
|
|
r"mempool full": None,
|
|
|
|
|
r"non-mandatory-script-verify-flag": None,
|
|
|
|
|
r"mandatory-script-verify-flag-failed": None,
|
|
|
|
|
r"Transaction check failed": None,
|
2019-01-18 19:59:12 +01:00
|
|
|
}
|
|
|
|
|
for substring in validation_error_messages:
|
|
|
|
|
if substring in server_msg:
|
2021-02-21 04:48:36 +01:00
|
|
|
msg = validation_error_messages[substring]
|
|
|
|
|
return msg if msg else substring
|
|
|
|
|
# https://github.com/bitcoin/bitcoin/blob/5bb64acd9d3ced6e6f95df282a1a0f8b98522cb0/src/rpc/rawtransaction.cpp
|
|
|
|
|
# https://github.com/bitcoin/bitcoin/blob/5bb64acd9d3ced6e6f95df282a1a0f8b98522cb0/src/util/error.cpp
|
2025-05-16 15:48:12 +00:00
|
|
|
# https://github.com/bitcoin/bitcoin/blob/3f83c744ac28b700090e15b5dda2260724a56f49/src/common/messages.cpp#L126
|
2019-01-18 19:59:12 +01:00
|
|
|
# grep "RPC_TRANSACTION"
|
|
|
|
|
# grep "RPC_DESERIALIZATION_ERROR"
|
2025-05-16 15:48:12 +00:00
|
|
|
# grep "TransactionError"
|
2019-01-18 19:59:12 +01:00
|
|
|
rawtransaction_error_messages = {
|
2021-02-21 04:48:36 +01:00
|
|
|
r"Missing inputs": None,
|
|
|
|
|
r"Inputs missing or spent": None,
|
|
|
|
|
r"transaction already in block chain": None,
|
|
|
|
|
r"Transaction already in block chain": None,
|
2025-05-16 15:48:12 +00:00
|
|
|
r"Transaction outputs already in utxo set": None,
|
2021-02-21 04:48:36 +01:00
|
|
|
r"TX decode failed": None,
|
|
|
|
|
r"Peer-to-peer functionality missing or disabled": None,
|
|
|
|
|
r"Transaction rejected by AcceptToMemoryPool": None,
|
|
|
|
|
r"AcceptToMemoryPool failed": None,
|
2025-05-16 15:48:12 +00:00
|
|
|
r"Transaction rejected by mempool": None,
|
|
|
|
|
r"Mempool internal error": None,
|
2021-02-21 04:48:36 +01:00
|
|
|
r"Fee exceeds maximum configured by user": None,
|
2025-05-16 15:48:12 +00:00
|
|
|
r"Unspendable output exceeds maximum configured by user": None,
|
|
|
|
|
r"Transaction rejected due to invalid package": None,
|
2019-01-18 19:59:12 +01:00
|
|
|
}
|
|
|
|
|
for substring in rawtransaction_error_messages:
|
|
|
|
|
if substring in server_msg:
|
2021-02-21 04:48:36 +01:00
|
|
|
msg = rawtransaction_error_messages[substring]
|
|
|
|
|
return msg if msg else substring
|
|
|
|
|
# https://github.com/bitcoin/bitcoin/blob/5bb64acd9d3ced6e6f95df282a1a0f8b98522cb0/src/consensus/tx_verify.cpp
|
|
|
|
|
# https://github.com/bitcoin/bitcoin/blob/c7ad94428ab6f54661d7a5441e1fdd0ebf034903/src/consensus/tx_check.cpp
|
2019-01-18 19:59:12 +01:00
|
|
|
# grep "REJECT_"
|
2021-02-21 04:48:36 +01:00
|
|
|
# grep "TxValidationResult"
|
2019-01-18 19:59:12 +01:00
|
|
|
tx_verify_error_messages = {
|
2021-02-21 04:48:36 +01:00
|
|
|
r"bad-txns-vin-empty": None,
|
|
|
|
|
r"bad-txns-vout-empty": None,
|
|
|
|
|
r"bad-txns-oversize": None,
|
|
|
|
|
r"bad-txns-vout-negative": None,
|
|
|
|
|
r"bad-txns-vout-toolarge": None,
|
|
|
|
|
r"bad-txns-txouttotal-toolarge": None,
|
|
|
|
|
r"bad-txns-inputs-duplicate": None,
|
|
|
|
|
r"bad-cb-length": None,
|
|
|
|
|
r"bad-txns-prevout-null": None,
|
|
|
|
|
r"bad-txns-inputs-missingorspent":
|
|
|
|
|
("bad-txns-inputs-missingorspent\n" +
|
|
|
|
|
_("You might have a local transaction in your wallet that this transaction "
|
|
|
|
|
"builds on top. You need to either broadcast or remove the local tx.")),
|
|
|
|
|
r"bad-txns-premature-spend-of-coinbase": None,
|
|
|
|
|
r"bad-txns-inputvalues-outofrange": None,
|
|
|
|
|
r"bad-txns-in-belowout": None,
|
|
|
|
|
r"bad-txns-fee-outofrange": None,
|
2019-01-18 19:59:12 +01:00
|
|
|
}
|
|
|
|
|
for substring in tx_verify_error_messages:
|
|
|
|
|
if substring in server_msg:
|
2021-02-21 04:48:36 +01:00
|
|
|
msg = tx_verify_error_messages[substring]
|
|
|
|
|
return msg if msg else substring
|
2021-09-08 18:18:08 +02:00
|
|
|
# https://github.com/bitcoin/bitcoin/blob/5bb64acd9d3ced6e6f95df282a1a0f8b98522cb0/src/policy/policy.cpp
|
|
|
|
|
# grep "reason ="
|
|
|
|
|
# should come after validation.cpp (due to "tx-size" vs "tx-size-small")
|
|
|
|
|
# should come after script_error.cpp (due to e.g. "version")
|
|
|
|
|
policy_error_messages = {
|
|
|
|
|
r"version": _("Transaction uses non-standard version."),
|
|
|
|
|
r"tx-size": _("The transaction was rejected because it is too large (in bytes)."),
|
|
|
|
|
r"scriptsig-size": None,
|
|
|
|
|
r"scriptsig-not-pushonly": None,
|
|
|
|
|
r"scriptpubkey":
|
|
|
|
|
("scriptpubkey\n" +
|
|
|
|
|
_("Some of the outputs pay to a non-standard script.")),
|
|
|
|
|
r"bare-multisig": None,
|
|
|
|
|
r"dust":
|
|
|
|
|
(_("Transaction could not be broadcast due to dust outputs.\n"
|
|
|
|
|
"Some of the outputs are too small in value, probably lower than 1000 satoshis.\n"
|
|
|
|
|
"Check the units, make sure you haven't confused e.g. mBTC and BTC.")),
|
|
|
|
|
r"multi-op-return": _("The transaction was rejected because it contains multiple OP_RETURN outputs."),
|
|
|
|
|
}
|
|
|
|
|
for substring in policy_error_messages:
|
|
|
|
|
if substring in server_msg:
|
|
|
|
|
msg = policy_error_messages[substring]
|
|
|
|
|
return msg if msg else substring
|
2019-01-18 19:59:12 +01:00
|
|
|
# otherwise:
|
|
|
|
|
return _("Unknown error")
|
2018-08-14 17:50:20 +02:00
|
|
|
|
2018-09-27 18:01:25 +02:00
|
|
|
@best_effort_reliable
|
2019-02-12 17:02:15 +01:00
|
|
|
@catch_server_exceptions
|
|
|
|
|
async def request_chunk(self, height: int, tip=None, *, can_return_early=False):
|
network: fix bug in best_effort_reliable
self.interface might get set to None after decorator checks it but before func gets scheduled:
125.04 | E | asyncio | Task exception was never retrieved
future: <Task finished name='Task-408' coro=<Transaction.add_info_from_network.<locals>.add_info_to_txin() done, defined at ...\electrum\electrum\transaction.py:976> exception=AttributeError("'NoneType' object has no attribute 'get_transaction'")>
Traceback (most recent call last):
File "...\electrum\electrum\transaction.py", line 980, in add_info_to_txin
await txin.add_info_from_network(network=network, ignore_network_issues=ignore_network_issues)
File "...\electrum\electrum\transaction.py", line 375, in add_info_from_network
self.utxo = await fetch_from_network(txid=self.prevout.txid.hex())
File "...\electrum\electrum\transaction.py", line 362, in fetch_from_network
raw_tx = await network.get_transaction(txid, timeout=10)
File "...\electrum\electrum\network.py", line 866, in make_reliable_wrapper
async with OldTaskGroup(wait=any) as group:
File "...\aiorpcX\aiorpcx\curio.py", line 304, in __aexit__
await self.join()
File "...\electrum\electrum\util.py", line 1410, in join
self.completed.result()
File "...\electrum\electrum\network.py", line 889, in wrapper
return await func(self, *args, **kwargs)
File "...\electrum\electrum\network.py", line 1114, in get_transaction
return await self.interface.get_transaction(tx_hash=tx_hash, timeout=timeout)
AttributeError: 'NoneType' object has no attribute 'get_transaction'
2023-03-11 18:32:38 +00:00
|
|
|
if self.interface is None: # handled by best_effort_reliable
|
|
|
|
|
raise RequestTimedOut()
|
2018-09-16 06:09:14 +02:00
|
|
|
return await self.interface.request_chunk(height, tip=tip, can_return_early=can_return_early)
|
2018-08-14 17:50:20 +02:00
|
|
|
|
2018-09-27 18:01:25 +02:00
|
|
|
@best_effort_reliable
|
2019-02-12 17:02:15 +01:00
|
|
|
@catch_server_exceptions
|
2018-10-30 19:07:37 +01:00
|
|
|
async def get_transaction(self, tx_hash: str, *, timeout=None) -> str:
|
network: fix bug in best_effort_reliable
self.interface might get set to None after decorator checks it but before func gets scheduled:
125.04 | E | asyncio | Task exception was never retrieved
future: <Task finished name='Task-408' coro=<Transaction.add_info_from_network.<locals>.add_info_to_txin() done, defined at ...\electrum\electrum\transaction.py:976> exception=AttributeError("'NoneType' object has no attribute 'get_transaction'")>
Traceback (most recent call last):
File "...\electrum\electrum\transaction.py", line 980, in add_info_to_txin
await txin.add_info_from_network(network=network, ignore_network_issues=ignore_network_issues)
File "...\electrum\electrum\transaction.py", line 375, in add_info_from_network
self.utxo = await fetch_from_network(txid=self.prevout.txid.hex())
File "...\electrum\electrum\transaction.py", line 362, in fetch_from_network
raw_tx = await network.get_transaction(txid, timeout=10)
File "...\electrum\electrum\network.py", line 866, in make_reliable_wrapper
async with OldTaskGroup(wait=any) as group:
File "...\aiorpcX\aiorpcx\curio.py", line 304, in __aexit__
await self.join()
File "...\electrum\electrum\util.py", line 1410, in join
self.completed.result()
File "...\electrum\electrum\network.py", line 889, in wrapper
return await func(self, *args, **kwargs)
File "...\electrum\electrum\network.py", line 1114, in get_transaction
return await self.interface.get_transaction(tx_hash=tx_hash, timeout=timeout)
AttributeError: 'NoneType' object has no attribute 'get_transaction'
2023-03-11 18:32:38 +00:00
|
|
|
if self.interface is None: # handled by best_effort_reliable
|
|
|
|
|
raise RequestTimedOut()
|
2020-07-02 15:31:35 +02:00
|
|
|
return await self.interface.get_transaction(tx_hash=tx_hash, timeout=timeout)
|
2018-09-27 18:01:25 +02:00
|
|
|
|
|
|
|
|
@best_effort_reliable
|
2019-02-12 17:02:15 +01:00
|
|
|
@catch_server_exceptions
|
2018-09-27 18:01:25 +02:00
|
|
|
async def get_history_for_scripthash(self, sh: str) -> List[dict]:
|
network: fix bug in best_effort_reliable
self.interface might get set to None after decorator checks it but before func gets scheduled:
125.04 | E | asyncio | Task exception was never retrieved
future: <Task finished name='Task-408' coro=<Transaction.add_info_from_network.<locals>.add_info_to_txin() done, defined at ...\electrum\electrum\transaction.py:976> exception=AttributeError("'NoneType' object has no attribute 'get_transaction'")>
Traceback (most recent call last):
File "...\electrum\electrum\transaction.py", line 980, in add_info_to_txin
await txin.add_info_from_network(network=network, ignore_network_issues=ignore_network_issues)
File "...\electrum\electrum\transaction.py", line 375, in add_info_from_network
self.utxo = await fetch_from_network(txid=self.prevout.txid.hex())
File "...\electrum\electrum\transaction.py", line 362, in fetch_from_network
raw_tx = await network.get_transaction(txid, timeout=10)
File "...\electrum\electrum\network.py", line 866, in make_reliable_wrapper
async with OldTaskGroup(wait=any) as group:
File "...\aiorpcX\aiorpcx\curio.py", line 304, in __aexit__
await self.join()
File "...\electrum\electrum\util.py", line 1410, in join
self.completed.result()
File "...\electrum\electrum\network.py", line 889, in wrapper
return await func(self, *args, **kwargs)
File "...\electrum\electrum\network.py", line 1114, in get_transaction
return await self.interface.get_transaction(tx_hash=tx_hash, timeout=timeout)
AttributeError: 'NoneType' object has no attribute 'get_transaction'
2023-03-11 18:32:38 +00:00
|
|
|
if self.interface is None: # handled by best_effort_reliable
|
|
|
|
|
raise RequestTimedOut()
|
2020-07-02 15:31:35 +02:00
|
|
|
return await self.interface.get_history_for_scripthash(sh)
|
2018-09-27 18:01:25 +02:00
|
|
|
|
|
|
|
|
@best_effort_reliable
|
2019-02-12 17:02:15 +01:00
|
|
|
@catch_server_exceptions
|
2018-09-27 18:01:25 +02:00
|
|
|
async def listunspent_for_scripthash(self, sh: str) -> List[dict]:
|
network: fix bug in best_effort_reliable
self.interface might get set to None after decorator checks it but before func gets scheduled:
125.04 | E | asyncio | Task exception was never retrieved
future: <Task finished name='Task-408' coro=<Transaction.add_info_from_network.<locals>.add_info_to_txin() done, defined at ...\electrum\electrum\transaction.py:976> exception=AttributeError("'NoneType' object has no attribute 'get_transaction'")>
Traceback (most recent call last):
File "...\electrum\electrum\transaction.py", line 980, in add_info_to_txin
await txin.add_info_from_network(network=network, ignore_network_issues=ignore_network_issues)
File "...\electrum\electrum\transaction.py", line 375, in add_info_from_network
self.utxo = await fetch_from_network(txid=self.prevout.txid.hex())
File "...\electrum\electrum\transaction.py", line 362, in fetch_from_network
raw_tx = await network.get_transaction(txid, timeout=10)
File "...\electrum\electrum\network.py", line 866, in make_reliable_wrapper
async with OldTaskGroup(wait=any) as group:
File "...\aiorpcX\aiorpcx\curio.py", line 304, in __aexit__
await self.join()
File "...\electrum\electrum\util.py", line 1410, in join
self.completed.result()
File "...\electrum\electrum\network.py", line 889, in wrapper
return await func(self, *args, **kwargs)
File "...\electrum\electrum\network.py", line 1114, in get_transaction
return await self.interface.get_transaction(tx_hash=tx_hash, timeout=timeout)
AttributeError: 'NoneType' object has no attribute 'get_transaction'
2023-03-11 18:32:38 +00:00
|
|
|
if self.interface is None: # handled by best_effort_reliable
|
|
|
|
|
raise RequestTimedOut()
|
2020-07-02 15:31:35 +02:00
|
|
|
return await self.interface.listunspent_for_scripthash(sh)
|
2018-09-27 18:01:25 +02:00
|
|
|
|
|
|
|
|
@best_effort_reliable
|
2019-02-12 17:02:15 +01:00
|
|
|
@catch_server_exceptions
|
2018-09-27 18:01:25 +02:00
|
|
|
async def get_balance_for_scripthash(self, sh: str) -> dict:
|
network: fix bug in best_effort_reliable
self.interface might get set to None after decorator checks it but before func gets scheduled:
125.04 | E | asyncio | Task exception was never retrieved
future: <Task finished name='Task-408' coro=<Transaction.add_info_from_network.<locals>.add_info_to_txin() done, defined at ...\electrum\electrum\transaction.py:976> exception=AttributeError("'NoneType' object has no attribute 'get_transaction'")>
Traceback (most recent call last):
File "...\electrum\electrum\transaction.py", line 980, in add_info_to_txin
await txin.add_info_from_network(network=network, ignore_network_issues=ignore_network_issues)
File "...\electrum\electrum\transaction.py", line 375, in add_info_from_network
self.utxo = await fetch_from_network(txid=self.prevout.txid.hex())
File "...\electrum\electrum\transaction.py", line 362, in fetch_from_network
raw_tx = await network.get_transaction(txid, timeout=10)
File "...\electrum\electrum\network.py", line 866, in make_reliable_wrapper
async with OldTaskGroup(wait=any) as group:
File "...\aiorpcX\aiorpcx\curio.py", line 304, in __aexit__
await self.join()
File "...\electrum\electrum\util.py", line 1410, in join
self.completed.result()
File "...\electrum\electrum\network.py", line 889, in wrapper
return await func(self, *args, **kwargs)
File "...\electrum\electrum\network.py", line 1114, in get_transaction
return await self.interface.get_transaction(tx_hash=tx_hash, timeout=timeout)
AttributeError: 'NoneType' object has no attribute 'get_transaction'
2023-03-11 18:32:38 +00:00
|
|
|
if self.interface is None: # handled by best_effort_reliable
|
|
|
|
|
raise RequestTimedOut()
|
2020-07-02 15:31:35 +02:00
|
|
|
return await self.interface.get_balance_for_scripthash(sh)
|
2018-09-27 18:01:25 +02:00
|
|
|
|
2018-07-23 20:44:08 +02:00
|
|
|
@best_effort_reliable
|
2020-07-02 15:31:35 +02:00
|
|
|
@catch_server_exceptions
|
2018-07-23 20:44:08 +02:00
|
|
|
async def get_txid_from_txpos(self, tx_height, tx_pos, merkle):
|
network: fix bug in best_effort_reliable
self.interface might get set to None after decorator checks it but before func gets scheduled:
125.04 | E | asyncio | Task exception was never retrieved
future: <Task finished name='Task-408' coro=<Transaction.add_info_from_network.<locals>.add_info_to_txin() done, defined at ...\electrum\electrum\transaction.py:976> exception=AttributeError("'NoneType' object has no attribute 'get_transaction'")>
Traceback (most recent call last):
File "...\electrum\electrum\transaction.py", line 980, in add_info_to_txin
await txin.add_info_from_network(network=network, ignore_network_issues=ignore_network_issues)
File "...\electrum\electrum\transaction.py", line 375, in add_info_from_network
self.utxo = await fetch_from_network(txid=self.prevout.txid.hex())
File "...\electrum\electrum\transaction.py", line 362, in fetch_from_network
raw_tx = await network.get_transaction(txid, timeout=10)
File "...\electrum\electrum\network.py", line 866, in make_reliable_wrapper
async with OldTaskGroup(wait=any) as group:
File "...\aiorpcX\aiorpcx\curio.py", line 304, in __aexit__
await self.join()
File "...\electrum\electrum\util.py", line 1410, in join
self.completed.result()
File "...\electrum\electrum\network.py", line 889, in wrapper
return await func(self, *args, **kwargs)
File "...\electrum\electrum\network.py", line 1114, in get_transaction
return await self.interface.get_transaction(tx_hash=tx_hash, timeout=timeout)
AttributeError: 'NoneType' object has no attribute 'get_transaction'
2023-03-11 18:32:38 +00:00
|
|
|
if self.interface is None: # handled by best_effort_reliable
|
|
|
|
|
raise RequestTimedOut()
|
2020-07-02 15:31:35 +02:00
|
|
|
return await self.interface.get_txid_from_txpos(tx_height, tx_pos, merkle)
|
2018-07-23 20:44:08 +02:00
|
|
|
|
2018-09-27 18:01:25 +02:00
|
|
|
def blockchain(self) -> Blockchain:
|
2018-09-25 16:38:26 +02:00
|
|
|
interface = self.interface
|
|
|
|
|
if interface and interface.blockchain is not None:
|
2018-11-20 18:57:16 +01:00
|
|
|
self._blockchain = interface.blockchain
|
|
|
|
|
return self._blockchain
|
2014-03-10 20:53:05 +01:00
|
|
|
|
2017-07-18 21:37:04 +02:00
|
|
|
def get_blockchains(self):
|
2018-09-19 17:56:42 +02:00
|
|
|
out = {} # blockchain_id -> list(interfaces)
|
2018-09-16 18:26:40 +02:00
|
|
|
with blockchain.blockchains_lock: blockchain_items = list(blockchain.blockchains.items())
|
2018-09-25 16:38:26 +02:00
|
|
|
with self.interfaces_lock: interfaces_values = list(self.interfaces.values())
|
2018-09-19 17:56:42 +02:00
|
|
|
for chain_id, bc in blockchain_items:
|
2018-09-25 16:38:26 +02:00
|
|
|
r = list(filter(lambda i: i.blockchain==bc, interfaces_values))
|
2017-07-18 21:37:04 +02:00
|
|
|
if r:
|
2018-09-19 17:56:42 +02:00
|
|
|
out[chain_id] = r
|
2017-07-18 21:37:04 +02:00
|
|
|
return out
|
|
|
|
|
|
2020-06-21 11:31:54 +02:00
|
|
|
def _set_preferred_chain(self, chain: Optional[Blockchain]):
|
|
|
|
|
if chain:
|
|
|
|
|
height = chain.get_max_forkpoint()
|
|
|
|
|
header_hash = chain.get_hash(height)
|
|
|
|
|
else:
|
|
|
|
|
height = 0
|
|
|
|
|
header_hash = constants.net.GENESIS
|
2018-10-11 19:42:38 +02:00
|
|
|
self._blockchain_preferred_block = {
|
|
|
|
|
'height': height,
|
|
|
|
|
'hash': header_hash,
|
|
|
|
|
}
|
2023-05-24 17:41:44 +00:00
|
|
|
self.config.BLOCKCHAIN_PREFERRED_BLOCK = self._blockchain_preferred_block
|
2017-07-04 11:35:04 +02:00
|
|
|
|
2018-11-20 18:57:16 +01:00
|
|
|
async def follow_chain_given_id(self, chain_id: str) -> None:
|
2018-10-11 19:42:38 +02:00
|
|
|
bc = blockchain.blockchains.get(chain_id)
|
|
|
|
|
if not bc:
|
|
|
|
|
raise Exception('blockchain {} not found'.format(chain_id))
|
|
|
|
|
self._set_preferred_chain(bc)
|
|
|
|
|
# select server on this chain
|
|
|
|
|
with self.interfaces_lock: interfaces = list(self.interfaces.values())
|
|
|
|
|
interfaces_on_selected_chain = list(filter(lambda iface: iface.blockchain == bc, interfaces))
|
|
|
|
|
if len(interfaces_on_selected_chain) == 0: return
|
2020-04-14 16:56:17 +02:00
|
|
|
chosen_iface = random.choice(interfaces_on_selected_chain) # type: Interface
|
2018-10-11 19:42:38 +02:00
|
|
|
# switch to server (and save to config)
|
|
|
|
|
net_params = self.get_parameters()
|
2020-04-16 20:30:53 +02:00
|
|
|
net_params = net_params._replace(server=chosen_iface.server)
|
2018-10-11 19:42:38 +02:00
|
|
|
await self.set_parameters(net_params)
|
|
|
|
|
|
2020-04-14 16:56:17 +02:00
|
|
|
async def follow_chain_given_server(self, server: ServerAddr) -> None:
|
2018-10-11 19:42:38 +02:00
|
|
|
# note that server_str should correspond to a connected interface
|
2020-04-14 16:56:17 +02:00
|
|
|
iface = self.interfaces.get(server)
|
2018-10-11 19:42:38 +02:00
|
|
|
if iface is None:
|
|
|
|
|
return
|
|
|
|
|
self._set_preferred_chain(iface.blockchain)
|
|
|
|
|
# switch to server (and save to config)
|
|
|
|
|
net_params = self.get_parameters()
|
2020-04-16 20:30:53 +02:00
|
|
|
net_params = net_params._replace(server=server)
|
2018-10-11 19:42:38 +02:00
|
|
|
await self.set_parameters(net_params)
|
2017-07-10 13:51:13 +02:00
|
|
|
|
2020-06-21 08:20:56 +02:00
|
|
|
def get_server_height(self) -> int:
|
|
|
|
|
"""Length of header chain, as claimed by main interface."""
|
|
|
|
|
interface = self.interface
|
|
|
|
|
return interface.tip if interface else 0
|
|
|
|
|
|
2023-05-03 13:49:44 +00:00
|
|
|
def get_local_height(self) -> int:
|
2020-06-21 08:20:56 +02:00
|
|
|
"""Length of header chain, POW-verified.
|
|
|
|
|
In case of a chain split, this is for the branch the main interface is on,
|
|
|
|
|
but it is the tip of that branch (even if main interface is behind).
|
|
|
|
|
"""
|
2017-05-29 09:03:39 +02:00
|
|
|
return self.blockchain().height()
|
2015-08-30 21:18:10 +09:00
|
|
|
|
2017-12-12 11:10:50 +01:00
|
|
|
def export_checkpoints(self, path):
|
2018-09-25 16:38:26 +02:00
|
|
|
"""Run manually to generate blockchain checkpoints.
|
|
|
|
|
Kept for console use only.
|
|
|
|
|
"""
|
2017-12-05 18:03:07 +01:00
|
|
|
cp = self.blockchain().get_checkpoints()
|
2018-03-23 21:47:51 +01:00
|
|
|
with open(path, 'w', encoding='utf-8') as f:
|
2017-12-05 18:03:07 +01:00
|
|
|
f.write(json.dumps(cp, indent=4))
|
|
|
|
|
|
2018-10-12 19:03:36 +02:00
|
|
|
async def _start(self):
|
2020-02-27 19:00:59 +01:00
|
|
|
assert not self.taskgroup
|
2022-02-08 12:34:49 +01:00
|
|
|
self.taskgroup = taskgroup = OldTaskGroup()
|
2018-11-03 17:11:08 +01:00
|
|
|
assert not self.interface and not self.interfaces
|
2021-01-21 03:58:42 +01:00
|
|
|
assert not self._connecting_ifaces
|
|
|
|
|
assert not self._closing_ifaces
|
2019-04-26 18:52:26 +02:00
|
|
|
self.logger.info('starting network')
|
2020-04-15 17:17:11 +02:00
|
|
|
self._clear_addr_retry_times()
|
2023-03-29 21:48:46 +00:00
|
|
|
self._init_parameters_from_config()
|
2020-04-14 18:44:45 +02:00
|
|
|
await self.taskgroup.spawn(self._run_new_interface(self.default_server))
|
2018-09-25 16:38:26 +02:00
|
|
|
|
2018-09-09 05:05:08 +02:00
|
|
|
async def main():
|
network: test if interface is alive before iface.taskgroup.spawn
closes https://github.com/spesmilo/electrum/issues/7677
```
E/n | network | taskgroup died.
Traceback (most recent call last):
File "/opt/electrum/electrum/network.py", line 1204, in main
[await group.spawn(job) for job in self._jobs]
File "/home/voegtlin/.local/lib/python3.8/site-packages/aiorpcx/curio.py", line 297, in __aexit__
await self.join()
File "/opt/electrum/electrum/util.py", line 1255, in join
task.result()
File "/opt/electrum/electrum/network.py", line 1277, in _maintain_sessions
await maintain_main_interface()
File "/opt/electrum/electrum/network.py", line 1268, in maintain_main_interface
await self._ensure_there_is_a_main_interface()
File "/opt/electrum/electrum/network.py", line 1245, in _ensure_there_is_a_main_interface
await self._switch_to_random_interface()
File "/opt/electrum/electrum/network.py", line 648, in _switch_to_random_interface
await self.switch_to_interface(random.choice(servers))
File "/opt/electrum/electrum/network.py", line 714, in switch_to_interface
await i.taskgroup.spawn(self._request_server_info(i))
File "/home/voegtlin/.local/lib/python3.8/site-packages/aiorpcx/curio.py", line 204, in spawn
self._add_task(task)
File "/home/voegtlin/.local/lib/python3.8/site-packages/aiorpcx/curio.py", line 150, in _add_task
raise RuntimeError('task group terminated')
RuntimeError: task group terminated
```
I believe the "suppress spurious cancellations" block was added as SilentTaskGroup raised
CancelledError instead of RuntimeError for this scenario.
2022-02-21 20:09:26 +01:00
|
|
|
self.logger.info(f"starting taskgroup ({hex(id(taskgroup))}).")
|
2018-09-25 16:38:26 +02:00
|
|
|
try:
|
2018-12-09 20:04:42 +01:00
|
|
|
# note: if a task finishes with CancelledError, that
|
|
|
|
|
# will NOT raise, and the group will keep the other tasks running
|
2020-02-27 19:00:59 +01:00
|
|
|
async with taskgroup as group:
|
2018-09-25 16:38:26 +02:00
|
|
|
await group.spawn(self._maintain_sessions())
|
2018-10-12 19:03:36 +02:00
|
|
|
[await group.spawn(job) for job in self._jobs]
|
2020-02-27 20:22:49 +01:00
|
|
|
except Exception as e:
|
network: test if interface is alive before iface.taskgroup.spawn
closes https://github.com/spesmilo/electrum/issues/7677
```
E/n | network | taskgroup died.
Traceback (most recent call last):
File "/opt/electrum/electrum/network.py", line 1204, in main
[await group.spawn(job) for job in self._jobs]
File "/home/voegtlin/.local/lib/python3.8/site-packages/aiorpcx/curio.py", line 297, in __aexit__
await self.join()
File "/opt/electrum/electrum/util.py", line 1255, in join
task.result()
File "/opt/electrum/electrum/network.py", line 1277, in _maintain_sessions
await maintain_main_interface()
File "/opt/electrum/electrum/network.py", line 1268, in maintain_main_interface
await self._ensure_there_is_a_main_interface()
File "/opt/electrum/electrum/network.py", line 1245, in _ensure_there_is_a_main_interface
await self._switch_to_random_interface()
File "/opt/electrum/electrum/network.py", line 648, in _switch_to_random_interface
await self.switch_to_interface(random.choice(servers))
File "/opt/electrum/electrum/network.py", line 714, in switch_to_interface
await i.taskgroup.spawn(self._request_server_info(i))
File "/home/voegtlin/.local/lib/python3.8/site-packages/aiorpcx/curio.py", line 204, in spawn
self._add_task(task)
File "/home/voegtlin/.local/lib/python3.8/site-packages/aiorpcx/curio.py", line 150, in _add_task
raise RuntimeError('task group terminated')
RuntimeError: task group terminated
```
I believe the "suppress spurious cancellations" block was added as SilentTaskGroup raised
CancelledError instead of RuntimeError for this scenario.
2022-02-21 20:09:26 +01:00
|
|
|
self.logger.exception(f"taskgroup died ({hex(id(taskgroup))}).")
|
2020-02-27 20:22:49 +01:00
|
|
|
finally:
|
network: test if interface is alive before iface.taskgroup.spawn
closes https://github.com/spesmilo/electrum/issues/7677
```
E/n | network | taskgroup died.
Traceback (most recent call last):
File "/opt/electrum/electrum/network.py", line 1204, in main
[await group.spawn(job) for job in self._jobs]
File "/home/voegtlin/.local/lib/python3.8/site-packages/aiorpcx/curio.py", line 297, in __aexit__
await self.join()
File "/opt/electrum/electrum/util.py", line 1255, in join
task.result()
File "/opt/electrum/electrum/network.py", line 1277, in _maintain_sessions
await maintain_main_interface()
File "/opt/electrum/electrum/network.py", line 1268, in maintain_main_interface
await self._ensure_there_is_a_main_interface()
File "/opt/electrum/electrum/network.py", line 1245, in _ensure_there_is_a_main_interface
await self._switch_to_random_interface()
File "/opt/electrum/electrum/network.py", line 648, in _switch_to_random_interface
await self.switch_to_interface(random.choice(servers))
File "/opt/electrum/electrum/network.py", line 714, in switch_to_interface
await i.taskgroup.spawn(self._request_server_info(i))
File "/home/voegtlin/.local/lib/python3.8/site-packages/aiorpcx/curio.py", line 204, in spawn
self._add_task(task)
File "/home/voegtlin/.local/lib/python3.8/site-packages/aiorpcx/curio.py", line 150, in _add_task
raise RuntimeError('task group terminated')
RuntimeError: task group terminated
```
I believe the "suppress spurious cancellations" block was added as SilentTaskGroup raised
CancelledError instead of RuntimeError for this scenario.
2022-02-21 20:09:26 +01:00
|
|
|
self.logger.info(f"taskgroup stopped ({hex(id(taskgroup))}).")
|
2018-09-25 16:38:26 +02:00
|
|
|
asyncio.run_coroutine_threadsafe(main(), self.asyncio_loop)
|
|
|
|
|
|
2020-04-14 16:12:47 +02:00
|
|
|
util.trigger_callback('network_updated')
|
2018-09-25 16:38:26 +02:00
|
|
|
|
2020-01-09 17:50:05 +01:00
|
|
|
def start(self, jobs: Iterable = None):
|
|
|
|
|
"""Schedule starting the network, along with the given job co-routines.
|
|
|
|
|
|
|
|
|
|
Note: the jobs will *restart* every time the network restarts, e.g. on proxy
|
|
|
|
|
setting changes.
|
|
|
|
|
"""
|
2023-03-29 22:09:46 +00:00
|
|
|
self._was_started = True
|
2018-10-12 19:03:36 +02:00
|
|
|
self._jobs = jobs or []
|
|
|
|
|
asyncio.run_coroutine_threadsafe(self._start(), self.asyncio_loop)
|
|
|
|
|
|
2018-10-26 22:43:33 +02:00
|
|
|
@log_exceptions
|
2021-03-09 17:52:36 +01:00
|
|
|
async def stop(self, *, full_shutdown: bool = True):
|
2023-03-29 22:09:46 +00:00
|
|
|
if not self._was_started:
|
|
|
|
|
self.logger.info("not stopping network as it was never started")
|
|
|
|
|
return
|
2019-04-26 18:52:26 +02:00
|
|
|
self.logger.info("stopping network")
|
2021-03-10 19:01:30 +01:00
|
|
|
# timeout: if full_shutdown, it is up to the caller to time us out,
|
|
|
|
|
# otherwise if e.g. restarting due to proxy changes, we time out fast
|
|
|
|
|
async with (nullcontext() if full_shutdown else ignore_after(1)):
|
2022-02-08 12:34:49 +01:00
|
|
|
async with OldTaskGroup() as group:
|
2021-03-10 19:01:30 +01:00
|
|
|
await group.spawn(self.taskgroup.cancel_remaining())
|
|
|
|
|
if full_shutdown:
|
|
|
|
|
await group.spawn(self.stop_gossip(full_shutdown=full_shutdown))
|
2020-04-14 16:56:17 +02:00
|
|
|
self.taskgroup = None
|
|
|
|
|
self.interface = None
|
|
|
|
|
self.interfaces = {}
|
2021-01-21 03:58:42 +01:00
|
|
|
self._connecting_ifaces.clear()
|
|
|
|
|
self._closing_ifaces.clear()
|
2018-11-01 16:30:03 +01:00
|
|
|
if not full_shutdown:
|
2020-04-14 16:12:47 +02:00
|
|
|
util.trigger_callback('network_updated')
|
2018-08-15 19:01:28 +02:00
|
|
|
|
2018-10-01 18:16:37 +02:00
|
|
|
async def _ensure_there_is_a_main_interface(self):
|
network: test if interface is alive before iface.taskgroup.spawn
closes https://github.com/spesmilo/electrum/issues/7677
```
E/n | network | taskgroup died.
Traceback (most recent call last):
File "/opt/electrum/electrum/network.py", line 1204, in main
[await group.spawn(job) for job in self._jobs]
File "/home/voegtlin/.local/lib/python3.8/site-packages/aiorpcx/curio.py", line 297, in __aexit__
await self.join()
File "/opt/electrum/electrum/util.py", line 1255, in join
task.result()
File "/opt/electrum/electrum/network.py", line 1277, in _maintain_sessions
await maintain_main_interface()
File "/opt/electrum/electrum/network.py", line 1268, in maintain_main_interface
await self._ensure_there_is_a_main_interface()
File "/opt/electrum/electrum/network.py", line 1245, in _ensure_there_is_a_main_interface
await self._switch_to_random_interface()
File "/opt/electrum/electrum/network.py", line 648, in _switch_to_random_interface
await self.switch_to_interface(random.choice(servers))
File "/opt/electrum/electrum/network.py", line 714, in switch_to_interface
await i.taskgroup.spawn(self._request_server_info(i))
File "/home/voegtlin/.local/lib/python3.8/site-packages/aiorpcx/curio.py", line 204, in spawn
self._add_task(task)
File "/home/voegtlin/.local/lib/python3.8/site-packages/aiorpcx/curio.py", line 150, in _add_task
raise RuntimeError('task group terminated')
RuntimeError: task group terminated
```
I believe the "suppress spurious cancellations" block was added as SilentTaskGroup raised
CancelledError instead of RuntimeError for this scenario.
2022-02-21 20:09:26 +01:00
|
|
|
if self.interface:
|
2018-10-01 18:16:37 +02:00
|
|
|
return
|
|
|
|
|
# if auto_connect is set, try a different server
|
|
|
|
|
if self.auto_connect and not self.is_connecting():
|
|
|
|
|
await self._switch_to_random_interface()
|
|
|
|
|
# if auto_connect is not set, or still no main interface, retry current
|
network: test if interface is alive before iface.taskgroup.spawn
closes https://github.com/spesmilo/electrum/issues/7677
```
E/n | network | taskgroup died.
Traceback (most recent call last):
File "/opt/electrum/electrum/network.py", line 1204, in main
[await group.spawn(job) for job in self._jobs]
File "/home/voegtlin/.local/lib/python3.8/site-packages/aiorpcx/curio.py", line 297, in __aexit__
await self.join()
File "/opt/electrum/electrum/util.py", line 1255, in join
task.result()
File "/opt/electrum/electrum/network.py", line 1277, in _maintain_sessions
await maintain_main_interface()
File "/opt/electrum/electrum/network.py", line 1268, in maintain_main_interface
await self._ensure_there_is_a_main_interface()
File "/opt/electrum/electrum/network.py", line 1245, in _ensure_there_is_a_main_interface
await self._switch_to_random_interface()
File "/opt/electrum/electrum/network.py", line 648, in _switch_to_random_interface
await self.switch_to_interface(random.choice(servers))
File "/opt/electrum/electrum/network.py", line 714, in switch_to_interface
await i.taskgroup.spawn(self._request_server_info(i))
File "/home/voegtlin/.local/lib/python3.8/site-packages/aiorpcx/curio.py", line 204, in spawn
self._add_task(task)
File "/home/voegtlin/.local/lib/python3.8/site-packages/aiorpcx/curio.py", line 150, in _add_task
raise RuntimeError('task group terminated')
RuntimeError: task group terminated
```
I believe the "suppress spurious cancellations" block was added as SilentTaskGroup raised
CancelledError instead of RuntimeError for this scenario.
2022-02-21 20:09:26 +01:00
|
|
|
if not self.interface and not self.is_connecting():
|
2020-04-15 17:17:11 +02:00
|
|
|
if self._can_retry_addr(self.default_server, urgent=True):
|
2018-10-01 18:16:37 +02:00
|
|
|
await self.switch_to_interface(self.default_server)
|
|
|
|
|
|
2018-09-25 16:38:26 +02:00
|
|
|
async def _maintain_sessions(self):
|
2020-04-14 18:44:45 +02:00
|
|
|
async def maybe_start_new_interfaces():
|
2021-01-21 03:58:42 +01:00
|
|
|
num_existing_ifaces = len(self.interfaces) + len(self._connecting_ifaces) + len(self._closing_ifaces)
|
|
|
|
|
for i in range(self.num_server - num_existing_ifaces):
|
2019-04-12 22:32:36 +02:00
|
|
|
# FIXME this should try to honour "healthy spread of connected servers"
|
2020-04-14 18:28:41 +02:00
|
|
|
server = self._get_next_server_to_try()
|
|
|
|
|
if server:
|
2020-04-14 18:44:45 +02:00
|
|
|
await self.taskgroup.spawn(self._run_new_interface(server))
|
2019-04-12 22:32:36 +02:00
|
|
|
async def maintain_healthy_spread_of_connected_servers():
|
|
|
|
|
with self.interfaces_lock: interfaces = list(self.interfaces.values())
|
|
|
|
|
random.shuffle(interfaces)
|
|
|
|
|
for iface in interfaces:
|
|
|
|
|
if not self.check_interface_against_healthy_spread_of_connected_servers(iface):
|
2019-04-26 18:52:26 +02:00
|
|
|
self.logger.info(f"disconnecting from {iface.server}. too many connected "
|
2019-04-12 22:32:36 +02:00
|
|
|
f"servers already in bucket {iface.bucket_based_on_ipaddress()}")
|
|
|
|
|
await self._close_interface(iface)
|
2018-12-09 20:04:42 +01:00
|
|
|
async def maintain_main_interface():
|
2018-10-01 18:16:37 +02:00
|
|
|
await self._ensure_there_is_a_main_interface()
|
|
|
|
|
if self.is_connected():
|
2025-02-24 12:20:44 +01:00
|
|
|
if self.is_fee_estimates_update_required():
|
2020-02-27 19:00:59 +01:00
|
|
|
await self.interface.taskgroup.spawn(self._request_fee_estimates, self.interface)
|
2018-08-31 15:25:09 +02:00
|
|
|
|
2018-12-09 20:04:42 +01:00
|
|
|
while True:
|
network: test if interface is alive before iface.taskgroup.spawn
closes https://github.com/spesmilo/electrum/issues/7677
```
E/n | network | taskgroup died.
Traceback (most recent call last):
File "/opt/electrum/electrum/network.py", line 1204, in main
[await group.spawn(job) for job in self._jobs]
File "/home/voegtlin/.local/lib/python3.8/site-packages/aiorpcx/curio.py", line 297, in __aexit__
await self.join()
File "/opt/electrum/electrum/util.py", line 1255, in join
task.result()
File "/opt/electrum/electrum/network.py", line 1277, in _maintain_sessions
await maintain_main_interface()
File "/opt/electrum/electrum/network.py", line 1268, in maintain_main_interface
await self._ensure_there_is_a_main_interface()
File "/opt/electrum/electrum/network.py", line 1245, in _ensure_there_is_a_main_interface
await self._switch_to_random_interface()
File "/opt/electrum/electrum/network.py", line 648, in _switch_to_random_interface
await self.switch_to_interface(random.choice(servers))
File "/opt/electrum/electrum/network.py", line 714, in switch_to_interface
await i.taskgroup.spawn(self._request_server_info(i))
File "/home/voegtlin/.local/lib/python3.8/site-packages/aiorpcx/curio.py", line 204, in spawn
self._add_task(task)
File "/home/voegtlin/.local/lib/python3.8/site-packages/aiorpcx/curio.py", line 150, in _add_task
raise RuntimeError('task group terminated')
RuntimeError: task group terminated
```
I believe the "suppress spurious cancellations" block was added as SilentTaskGroup raised
CancelledError instead of RuntimeError for this scenario.
2022-02-21 20:09:26 +01:00
|
|
|
await maybe_start_new_interfaces()
|
|
|
|
|
await maintain_healthy_spread_of_connected_servers()
|
|
|
|
|
await maintain_main_interface()
|
2018-09-09 05:32:07 +02:00
|
|
|
await asyncio.sleep(0.1)
|
2018-12-07 19:19:40 +01:00
|
|
|
|
2019-02-27 21:48:33 +01:00
|
|
|
@classmethod
|
2022-06-28 18:51:21 +02:00
|
|
|
async def async_send_http_on_proxy(
|
|
|
|
|
cls, method: str, url: str, *,
|
2022-06-28 18:37:02 +02:00
|
|
|
params: dict = None,
|
2022-06-28 18:51:21 +02:00
|
|
|
body: bytes = None,
|
|
|
|
|
json: dict = None,
|
|
|
|
|
headers=None,
|
|
|
|
|
on_finish=None,
|
|
|
|
|
timeout=None,
|
|
|
|
|
):
|
2018-12-07 19:19:40 +01:00
|
|
|
async def default_on_finish(resp: ClientResponse):
|
|
|
|
|
resp.raise_for_status()
|
|
|
|
|
return await resp.text()
|
|
|
|
|
if headers is None:
|
|
|
|
|
headers = {}
|
|
|
|
|
if on_finish is None:
|
|
|
|
|
on_finish = default_on_finish
|
2019-02-27 21:48:33 +01:00
|
|
|
network = cls.get_instance()
|
|
|
|
|
proxy = network.proxy if network else None
|
2019-03-25 23:36:52 +01:00
|
|
|
async with make_aiohttp_session(proxy, timeout=timeout) as session:
|
2018-12-07 19:19:40 +01:00
|
|
|
if method == 'get':
|
|
|
|
|
async with session.get(url, params=params, headers=headers) as resp:
|
|
|
|
|
return await on_finish(resp)
|
|
|
|
|
elif method == 'post':
|
|
|
|
|
assert body is not None or json is not None, 'body or json must be supplied if method is post'
|
|
|
|
|
if body is not None:
|
|
|
|
|
async with session.post(url, data=body, headers=headers) as resp:
|
|
|
|
|
return await on_finish(resp)
|
|
|
|
|
elif json is not None:
|
|
|
|
|
async with session.post(url, json=json, headers=headers) as resp:
|
|
|
|
|
return await on_finish(resp)
|
|
|
|
|
else:
|
2023-04-23 01:45:14 +00:00
|
|
|
raise Exception(f"unexpected {method=!r}")
|
2018-12-07 19:19:40 +01:00
|
|
|
|
2019-02-27 21:48:33 +01:00
|
|
|
@classmethod
|
|
|
|
|
def send_http_on_proxy(cls, method, url, **kwargs):
|
2024-06-04 13:48:37 +00:00
|
|
|
loop = util.get_asyncio_loop()
|
|
|
|
|
assert util.get_running_loop() != loop, 'must not be called from asyncio thread'
|
2022-06-28 18:51:21 +02:00
|
|
|
coro = asyncio.run_coroutine_threadsafe(cls.async_send_http_on_proxy(method, url, **kwargs), loop)
|
2019-03-25 23:36:52 +01:00
|
|
|
# note: _send_http_on_proxy has its own timeout, so no timeout here:
|
|
|
|
|
return coro.result()
|
2019-02-05 20:33:50 +01:00
|
|
|
|
|
|
|
|
# methods used in scripts
|
|
|
|
|
async def get_peers(self):
|
|
|
|
|
while not self.is_connected():
|
|
|
|
|
await asyncio.sleep(1)
|
|
|
|
|
session = self.interface.session
|
|
|
|
|
return parse_servers(await session.send_request('server.peers.subscribe'))
|
|
|
|
|
|
2022-01-18 19:50:46 +01:00
|
|
|
async def send_multiple_requests(
|
|
|
|
|
self,
|
|
|
|
|
servers: Sequence[ServerAddr],
|
|
|
|
|
method: str,
|
|
|
|
|
params: Sequence,
|
|
|
|
|
*,
|
|
|
|
|
timeout: int = None,
|
|
|
|
|
):
|
|
|
|
|
if timeout is None:
|
|
|
|
|
timeout = self.get_network_timeout_seconds(NetworkTimeout.Urgent)
|
2019-02-05 20:33:50 +01:00
|
|
|
responses = dict()
|
2020-04-14 16:56:17 +02:00
|
|
|
async def get_response(server: ServerAddr):
|
2024-10-14 13:32:22 +02:00
|
|
|
interface = Interface(network=self, server=server)
|
2019-02-15 10:47:21 +01:00
|
|
|
try:
|
2023-08-04 17:59:47 +00:00
|
|
|
await util.wait_for2(interface.ready, timeout)
|
2019-02-15 10:47:21 +01:00
|
|
|
except BaseException as e:
|
|
|
|
|
await interface.close()
|
|
|
|
|
return
|
2019-02-05 20:33:50 +01:00
|
|
|
try:
|
2019-02-15 10:47:21 +01:00
|
|
|
res = await interface.session.send_request(method, params, timeout=10)
|
2019-02-05 20:33:50 +01:00
|
|
|
except Exception as e:
|
|
|
|
|
res = e
|
2019-02-15 10:47:21 +01:00
|
|
|
responses[interface.server] = res
|
2022-02-08 12:34:49 +01:00
|
|
|
async with OldTaskGroup() as group:
|
2019-02-07 13:30:14 +01:00
|
|
|
for server in servers:
|
2019-02-15 10:47:21 +01:00
|
|
|
await group.spawn(get_response(server))
|
2019-02-05 20:33:50 +01:00
|
|
|
return responses
|
2022-01-18 19:50:46 +01:00
|
|
|
|
|
|
|
|
async def prune_offline_servers(self, hostmap):
|
|
|
|
|
peers = filter_protocol(hostmap, allowed_protocols=("t", "s",))
|
|
|
|
|
timeout = self.get_network_timeout_seconds(NetworkTimeout.Generic)
|
|
|
|
|
replies = await self.send_multiple_requests(peers, 'blockchain.headers.subscribe', [], timeout=timeout)
|
|
|
|
|
servers_replied = {serveraddr.host for serveraddr in replies.keys()}
|
|
|
|
|
servers_dict = {k: v for k, v in hostmap.items()
|
|
|
|
|
if k in servers_replied}
|
|
|
|
|
return servers_dict
|