2016-02-23 11:36:42 +01:00
|
|
|
# Electrum - Lightweight Bitcoin Client
|
|
|
|
|
# Copyright (c) 2011-2016 Thomas Voegtlin
|
|
|
|
|
#
|
|
|
|
|
# Permission is hereby granted, free of charge, to any person
|
|
|
|
|
# obtaining a copy of this software and associated documentation files
|
|
|
|
|
# (the "Software"), to deal in the Software without restriction,
|
|
|
|
|
# including without limitation the rights to use, copy, modify, merge,
|
|
|
|
|
# publish, distribute, sublicense, and/or sell copies of the Software,
|
|
|
|
|
# and to permit persons to whom the Software is furnished to do so,
|
|
|
|
|
# subject to the following conditions:
|
|
|
|
|
#
|
|
|
|
|
# The above copyright notice and this permission notice shall be
|
|
|
|
|
# included in all copies or substantial portions of the Software.
|
|
|
|
|
#
|
|
|
|
|
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
|
|
|
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
|
|
|
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
|
|
|
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
|
|
|
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
|
|
|
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
|
|
|
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
|
|
|
# SOFTWARE.
|
2015-01-29 11:32:58 +01:00
|
|
|
import time
|
2017-09-04 14:43:31 +02:00
|
|
|
import queue
|
2015-01-29 11:32:58 +01:00
|
|
|
import os
|
|
|
|
|
import random
|
2017-11-12 22:54:04 -06:00
|
|
|
import re
|
|
|
|
|
from collections import defaultdict
|
2017-05-29 09:03:39 +02:00
|
|
|
import threading
|
2015-03-12 12:56:06 +01:00
|
|
|
import socket
|
2015-04-02 10:12:51 +02:00
|
|
|
import json
|
2018-07-01 23:53:55 +02:00
|
|
|
import sys
|
2018-07-02 00:59:28 +02:00
|
|
|
import ipaddress
|
2018-09-08 00:25:38 +02:00
|
|
|
import asyncio
|
2018-09-25 16:38:26 +02:00
|
|
|
from typing import NamedTuple, Optional, Sequence, List
|
|
|
|
|
import traceback
|
2015-01-29 11:32:58 +01:00
|
|
|
|
2018-07-01 23:53:55 +02:00
|
|
|
import dns
|
|
|
|
|
import dns.resolver
|
2018-09-08 15:36:16 +02:00
|
|
|
from aiorpcx import TaskGroup
|
2018-07-01 23:53:55 +02:00
|
|
|
|
2017-01-22 21:25:24 +03:00
|
|
|
from . import util
|
2018-09-25 16:38:26 +02:00
|
|
|
from .util import PrintError, print_error, aiosafe, bfh, SilentTaskGroup
|
2018-06-19 17:36:01 +01:00
|
|
|
from .bitcoin import COIN
|
2018-03-04 22:10:59 +01:00
|
|
|
from . import constants
|
2017-01-22 21:25:24 +03:00
|
|
|
from . import blockchain
|
2018-09-25 16:38:26 +02:00
|
|
|
from .blockchain import Blockchain, HEADER_SIZE
|
2018-09-27 18:01:25 +02:00
|
|
|
from .interface import Interface, serialize_server, deserialize_server, RequestTimedOut
|
2018-08-29 18:41:51 +02:00
|
|
|
from .version import PROTOCOL_VERSION
|
2018-09-10 00:59:53 +02:00
|
|
|
from .simple_config import SimpleConfig
|
2017-01-07 16:58:23 +01:00
|
|
|
|
2015-04-02 15:12:01 +02:00
|
|
|
NODES_RETRY_INTERVAL = 60
|
|
|
|
|
SERVER_RETRY_INTERVAL = 10
|
2014-07-30 10:43:15 +02:00
|
|
|
|
2013-09-12 08:41:27 +02:00
|
|
|
|
2014-02-11 09:48:02 +01:00
|
|
|
def parse_servers(result):
|
|
|
|
|
""" parse servers list into dict format"""
|
|
|
|
|
servers = {}
|
|
|
|
|
for item in result:
|
|
|
|
|
host = item[1]
|
|
|
|
|
out = {}
|
|
|
|
|
version = None
|
|
|
|
|
pruning_level = '-'
|
|
|
|
|
if len(item) > 2:
|
|
|
|
|
for v in item[2]:
|
2018-09-20 18:31:17 +02:00
|
|
|
if re.match(r"[st]\d*", v):
|
2014-02-11 09:48:02 +01:00
|
|
|
protocol, port = v[0], v[1:]
|
2018-03-04 22:10:59 +01:00
|
|
|
if port == '': port = constants.net.DEFAULT_PORTS[protocol]
|
2014-02-11 09:48:02 +01:00
|
|
|
out[protocol] = port
|
|
|
|
|
elif re.match("v(.?)+", v):
|
|
|
|
|
version = v[1:]
|
2018-09-20 18:31:17 +02:00
|
|
|
elif re.match(r"p\d*", v):
|
2014-02-11 09:48:02 +01:00
|
|
|
pruning_level = v[1:]
|
|
|
|
|
if pruning_level == '': pruning_level = '0'
|
2017-09-07 09:41:21 +02:00
|
|
|
if out:
|
2014-02-11 09:48:02 +01:00
|
|
|
out['pruning'] = pruning_level
|
2017-09-07 09:41:21 +02:00
|
|
|
out['version'] = version
|
2014-02-11 09:48:02 +01:00
|
|
|
servers[host] = out
|
|
|
|
|
return servers
|
|
|
|
|
|
2018-06-01 14:03:22 +02:00
|
|
|
|
2017-09-07 09:41:21 +02:00
|
|
|
def filter_version(servers):
|
|
|
|
|
def is_recent(version):
|
|
|
|
|
try:
|
2018-09-05 18:36:13 +02:00
|
|
|
return util.versiontuple(version) >= util.versiontuple(PROTOCOL_VERSION)
|
2017-09-07 09:41:21 +02:00
|
|
|
except Exception as e:
|
|
|
|
|
return False
|
|
|
|
|
return {k: v for k, v in servers.items() if is_recent(v.get('version'))}
|
|
|
|
|
|
|
|
|
|
|
2018-07-31 20:25:53 +02:00
|
|
|
def filter_noonion(servers):
|
|
|
|
|
return {k: v for k, v in servers.items() if not k.endswith('.onion')}
|
|
|
|
|
|
|
|
|
|
|
2018-06-01 14:03:22 +02:00
|
|
|
def filter_protocol(hostmap, protocol='s'):
|
2015-05-24 11:14:39 +09:00
|
|
|
'''Filters the hostmap for those implementing protocol.
|
|
|
|
|
The result is a list in serialized form.'''
|
|
|
|
|
eligible = []
|
|
|
|
|
for host, portmap in hostmap.items():
|
|
|
|
|
port = portmap.get(protocol)
|
|
|
|
|
if port:
|
|
|
|
|
eligible.append(serialize_server(host, port, protocol))
|
|
|
|
|
return eligible
|
|
|
|
|
|
2018-06-01 14:03:22 +02:00
|
|
|
|
2017-01-07 16:58:23 +01:00
|
|
|
def pick_random_server(hostmap = None, protocol = 's', exclude_set = set()):
|
|
|
|
|
if hostmap is None:
|
2018-03-04 22:10:59 +01:00
|
|
|
hostmap = constants.net.DEFAULT_SERVERS
|
2015-05-24 11:14:39 +09:00
|
|
|
eligible = list(set(filter_protocol(hostmap, protocol)) - exclude_set)
|
|
|
|
|
return random.choice(eligible) if eligible else None
|
2013-09-18 16:55:19 +00:00
|
|
|
|
2018-06-01 14:03:22 +02:00
|
|
|
|
2018-09-10 00:59:53 +02:00
|
|
|
NetworkParameters = NamedTuple("NetworkParameters", [("host", str),
|
|
|
|
|
("port", str),
|
|
|
|
|
("protocol", str),
|
|
|
|
|
("proxy", Optional[dict]),
|
|
|
|
|
("auto_connect", bool)])
|
|
|
|
|
|
2013-09-08 17:23:01 +02:00
|
|
|
|
2018-09-06 16:18:45 +02:00
|
|
|
proxy_modes = ['socks4', 'socks5']
|
2015-03-12 12:56:06 +01:00
|
|
|
|
2017-01-30 12:36:56 +03:00
|
|
|
|
2015-03-12 12:56:06 +01:00
|
|
|
def serialize_proxy(p):
|
2017-01-30 12:36:56 +03:00
|
|
|
if not isinstance(p, dict):
|
2015-03-12 12:56:06 +01:00
|
|
|
return None
|
2017-12-21 23:31:59 +01:00
|
|
|
return ':'.join([p.get('mode'), p.get('host'), p.get('port'),
|
|
|
|
|
p.get('user', ''), p.get('password', '')])
|
2015-03-12 12:56:06 +01:00
|
|
|
|
2017-01-30 12:36:56 +03:00
|
|
|
|
2018-09-10 00:59:53 +02:00
|
|
|
def deserialize_proxy(s: str) -> Optional[dict]:
|
2017-01-30 12:36:56 +03:00
|
|
|
if not isinstance(s, str):
|
2015-03-12 12:56:06 +01:00
|
|
|
return None
|
|
|
|
|
if s.lower() == 'none':
|
|
|
|
|
return None
|
|
|
|
|
proxy = { "mode":"socks5", "host":"localhost" }
|
2018-09-09 23:08:44 +02:00
|
|
|
# FIXME raw IPv6 address fails here
|
2015-03-12 12:56:06 +01:00
|
|
|
args = s.split(':')
|
|
|
|
|
n = 0
|
|
|
|
|
if proxy_modes.count(args[n]) == 1:
|
|
|
|
|
proxy["mode"] = args[n]
|
|
|
|
|
n += 1
|
|
|
|
|
if len(args) > n:
|
|
|
|
|
proxy["host"] = args[n]
|
|
|
|
|
n += 1
|
|
|
|
|
if len(args) > n:
|
|
|
|
|
proxy["port"] = args[n]
|
2017-02-23 22:35:41 +01:00
|
|
|
n += 1
|
2015-03-12 12:56:06 +01:00
|
|
|
else:
|
|
|
|
|
proxy["port"] = "8080" if proxy["mode"] == "http" else "1080"
|
2017-02-23 22:35:41 +01:00
|
|
|
if len(args) > n:
|
|
|
|
|
proxy["user"] = args[n]
|
|
|
|
|
n += 1
|
|
|
|
|
if len(args) > n:
|
|
|
|
|
proxy["password"] = args[n]
|
2015-03-12 12:56:06 +01:00
|
|
|
return proxy
|
2014-07-24 23:14:47 +02:00
|
|
|
|
2017-01-30 12:36:56 +03:00
|
|
|
|
2018-09-07 11:34:56 +02:00
|
|
|
INSTANCE = None
|
|
|
|
|
|
2018-09-09 23:08:44 +02:00
|
|
|
|
2018-08-15 19:01:28 +02:00
|
|
|
class Network(PrintError):
|
2015-06-03 00:03:33 +09:00
|
|
|
"""The Network class manages a set of connections to remote electrum
|
|
|
|
|
servers, each connected socket is handled by an Interface() object.
|
2015-05-22 10:36:45 +09:00
|
|
|
"""
|
2018-07-18 13:31:41 +02:00
|
|
|
verbosity_filter = 'n'
|
2013-09-08 17:23:01 +02:00
|
|
|
|
2015-12-03 11:18:10 +01:00
|
|
|
def __init__(self, config=None):
|
2018-09-07 11:34:56 +02:00
|
|
|
global INSTANCE
|
|
|
|
|
INSTANCE = self
|
2014-06-25 17:43:45 +02:00
|
|
|
if config is None:
|
|
|
|
|
config = {} # Do not use mutables as default values!
|
2017-01-30 12:36:56 +03:00
|
|
|
self.config = SimpleConfig(config) if isinstance(config, dict) else config
|
2017-07-13 17:23:24 +02:00
|
|
|
self.num_server = 10 if not self.config.get('oneserver') else 0
|
2018-09-16 18:26:40 +02:00
|
|
|
blockchain.blockchains = blockchain.read_blockchains(self.config)
|
2018-08-29 18:41:51 +02:00
|
|
|
self.print_error("blockchains", list(blockchain.blockchains.keys()))
|
2017-05-29 09:03:39 +02:00
|
|
|
self.blockchain_index = config.get('blockchain_index', 0)
|
2018-08-29 18:41:51 +02:00
|
|
|
if self.blockchain_index not in blockchain.blockchains.keys():
|
2017-05-29 09:03:39 +02:00
|
|
|
self.blockchain_index = 0
|
2013-10-08 16:52:50 +02:00
|
|
|
# Server for addresses and transactions
|
2018-01-16 12:36:39 +01:00
|
|
|
self.default_server = self.config.get('server', None)
|
2015-02-25 17:14:31 +01:00
|
|
|
# Sanitize default server
|
2018-01-16 12:36:39 +01:00
|
|
|
if self.default_server:
|
|
|
|
|
try:
|
|
|
|
|
deserialize_server(self.default_server)
|
|
|
|
|
except:
|
|
|
|
|
self.print_error('Warning: failed to parse server-string; falling back to random.')
|
|
|
|
|
self.default_server = None
|
2013-10-08 16:52:50 +02:00
|
|
|
if not self.default_server:
|
2015-05-24 11:14:39 +09:00
|
|
|
self.default_server = pick_random_server()
|
2018-06-21 21:06:56 +02:00
|
|
|
|
2018-09-25 16:38:26 +02:00
|
|
|
self.main_taskgroup = None
|
|
|
|
|
self._jobs = []
|
|
|
|
|
|
|
|
|
|
# locks
|
|
|
|
|
self.restart_lock = asyncio.Lock()
|
2018-08-29 18:41:51 +02:00
|
|
|
self.bhi_lock = asyncio.Lock()
|
2018-06-21 21:06:56 +02:00
|
|
|
self.callback_lock = threading.Lock()
|
|
|
|
|
self.recent_servers_lock = threading.RLock() # <- re-entrant
|
2018-09-25 16:38:26 +02:00
|
|
|
self.interfaces_lock = threading.Lock() # for mutating/iterating self.interfaces
|
2018-06-21 21:06:56 +02:00
|
|
|
|
2018-09-13 16:06:41 +02:00
|
|
|
self.server_peers = {} # returned by interface (servers that the main interface knows about)
|
2018-09-25 16:38:26 +02:00
|
|
|
self.recent_servers = self._read_recent_servers() # note: needs self.recent_servers_lock
|
2013-10-04 14:30:23 +02:00
|
|
|
|
2013-09-12 08:41:27 +02:00
|
|
|
self.banner = ''
|
2016-02-15 16:17:07 +01:00
|
|
|
self.donation_address = ''
|
2016-01-14 16:06:22 +01:00
|
|
|
self.relay_fee = None
|
2015-11-26 10:57:43 +01:00
|
|
|
# callbacks set by the GUI
|
2018-06-21 21:06:56 +02:00
|
|
|
self.callbacks = defaultdict(list) # note: needs self.callback_lock
|
2013-09-08 17:23:01 +02:00
|
|
|
|
2018-06-01 14:03:22 +02:00
|
|
|
dir_path = os.path.join(self.config.path, 'certs')
|
2018-05-28 14:22:54 +02:00
|
|
|
util.make_dir(dir_path)
|
2013-10-01 11:24:55 +02:00
|
|
|
|
2015-05-08 20:13:18 +02:00
|
|
|
# retry times
|
|
|
|
|
self.server_retry_time = time.time()
|
|
|
|
|
self.nodes_retry_time = time.time()
|
Remove pending_servers, self.interfaces is now the complete set of
interfaces we have created.
Existing code has the concept of pending servers, where a connection
thread is started but has not sent a connection notification, and
and interfaces which have received the notification.
This separation caused a couple of minor bugs, and given the cleaner
semantics of unifying the two I don't think the separation is beneficial.
The bugs:
1) When stopping the network, we only stopped the connected interface
threads, not the pending ones. This would leave Python hanging
on exit if we don't make them daemon threads.
2) start_interface() did not check pending servers before starting
a new thread. Some of its callers did, but not all, so it was
possible to initiate two threads to one server and "lose" one thread.
Apart form fixing the above two issues, unification causes one more
change in semantics: we are now willing to switch to a connection
that is pending (we don't switch to failed interfaces). I don't
think that is a problem: if it times out we'll just switch
again when we receive the disconnect notification, and previously the
fact that an interface was in the interaces dictionary wasn't a
guarantee the connection was good anyway: we might not have processed
a pending disconnection notification.
2015-05-23 15:43:58 +09:00
|
|
|
# kick off the network. interface is the main server we are currently
|
|
|
|
|
# communicating with. interfaces is the set of servers we are connecting
|
|
|
|
|
# to or have an ongoing connection with
|
2018-09-27 20:04:36 +02:00
|
|
|
self.interface = None # type: Interface
|
2018-09-25 16:38:26 +02:00
|
|
|
self.interfaces = {}
|
2016-03-08 10:00:47 +01:00
|
|
|
self.auto_connect = self.config.get('auto_connect', True)
|
2015-10-17 07:07:10 +02:00
|
|
|
self.connecting = set()
|
2018-09-13 21:20:55 +02:00
|
|
|
self.server_queue = None
|
2018-09-25 16:38:26 +02:00
|
|
|
self.proxy = None
|
|
|
|
|
|
2018-09-13 21:20:55 +02:00
|
|
|
self.asyncio_loop = asyncio.get_event_loop()
|
2018-09-25 16:38:26 +02:00
|
|
|
#self.asyncio_loop.set_debug(1)
|
|
|
|
|
self._run_forever = asyncio.Future()
|
|
|
|
|
self._thread = threading.Thread(target=self.asyncio_loop.run_until_complete,
|
|
|
|
|
args=(self._run_forever,),
|
|
|
|
|
name='Network')
|
|
|
|
|
self._thread.start()
|
|
|
|
|
|
|
|
|
|
def run_from_another_thread(self, coro):
|
|
|
|
|
assert self._thread != threading.current_thread(), 'must not be called from network thread'
|
|
|
|
|
fut = asyncio.run_coroutine_threadsafe(coro, self.asyncio_loop)
|
|
|
|
|
return fut.result()
|
2015-03-12 12:56:06 +01:00
|
|
|
|
2018-09-07 11:34:56 +02:00
|
|
|
@staticmethod
|
|
|
|
|
def get_instance():
|
|
|
|
|
return INSTANCE
|
|
|
|
|
|
2018-06-21 21:06:56 +02:00
|
|
|
def with_recent_servers_lock(func):
|
|
|
|
|
def func_wrapper(self, *args, **kwargs):
|
|
|
|
|
with self.recent_servers_lock:
|
|
|
|
|
return func(self, *args, **kwargs)
|
|
|
|
|
return func_wrapper
|
|
|
|
|
|
2015-11-13 22:42:21 +09:00
|
|
|
def register_callback(self, callback, events):
|
2018-06-21 21:06:56 +02:00
|
|
|
with self.callback_lock:
|
2015-11-13 22:42:21 +09:00
|
|
|
for event in events:
|
|
|
|
|
self.callbacks[event].append(callback)
|
|
|
|
|
|
|
|
|
|
def unregister_callback(self, callback):
|
2018-06-21 21:06:56 +02:00
|
|
|
with self.callback_lock:
|
2015-11-13 22:42:21 +09:00
|
|
|
for callbacks in self.callbacks.values():
|
|
|
|
|
if callback in callbacks:
|
|
|
|
|
callbacks.remove(callback)
|
2015-08-30 21:18:10 +09:00
|
|
|
|
2015-11-12 16:08:37 +09:00
|
|
|
def trigger_callback(self, event, *args):
|
2018-06-21 21:06:56 +02:00
|
|
|
with self.callback_lock:
|
2015-08-30 21:18:10 +09:00
|
|
|
callbacks = self.callbacks[event][:]
|
2018-09-07 19:34:28 +02:00
|
|
|
for callback in callbacks:
|
2018-09-20 21:07:31 +02:00
|
|
|
# FIXME: if callback throws, we will lose the traceback
|
2018-09-07 19:34:28 +02:00
|
|
|
if asyncio.iscoroutinefunction(callback):
|
|
|
|
|
asyncio.run_coroutine_threadsafe(callback(event, *args), self.asyncio_loop)
|
|
|
|
|
else:
|
2018-09-20 21:07:31 +02:00
|
|
|
self.asyncio_loop.call_soon_threadsafe(callback, event, *args)
|
2015-08-30 21:18:10 +09:00
|
|
|
|
2018-09-25 16:38:26 +02:00
|
|
|
def _read_recent_servers(self):
|
2015-04-02 10:12:51 +02:00
|
|
|
if not self.config.path:
|
|
|
|
|
return []
|
|
|
|
|
path = os.path.join(self.config.path, "recent_servers")
|
|
|
|
|
try:
|
2018-03-23 21:47:51 +01:00
|
|
|
with open(path, "r", encoding='utf-8') as f:
|
2015-04-02 10:12:51 +02:00
|
|
|
data = f.read()
|
|
|
|
|
return json.loads(data)
|
|
|
|
|
except:
|
|
|
|
|
return []
|
|
|
|
|
|
2018-06-21 21:06:56 +02:00
|
|
|
@with_recent_servers_lock
|
2018-09-25 16:38:26 +02:00
|
|
|
def _save_recent_servers(self):
|
2015-04-02 10:12:51 +02:00
|
|
|
if not self.config.path:
|
|
|
|
|
return
|
|
|
|
|
path = os.path.join(self.config.path, "recent_servers")
|
|
|
|
|
s = json.dumps(self.recent_servers, indent=4, sort_keys=True)
|
|
|
|
|
try:
|
2018-03-23 21:47:51 +01:00
|
|
|
with open(path, "w", encoding='utf-8') as f:
|
2015-04-02 10:12:51 +02:00
|
|
|
f.write(s)
|
|
|
|
|
except:
|
|
|
|
|
pass
|
|
|
|
|
|
2014-07-25 16:32:19 +02:00
|
|
|
def get_server_height(self):
|
2018-09-25 16:38:26 +02:00
|
|
|
interface = self.interface
|
|
|
|
|
return interface.tip if interface else 0
|
2014-07-25 16:32:19 +02:00
|
|
|
|
2018-09-25 16:38:26 +02:00
|
|
|
async def _server_is_lagging(self):
|
2015-05-25 17:45:01 +09:00
|
|
|
sh = self.get_server_height()
|
|
|
|
|
if not sh:
|
2015-03-12 21:39:05 +01:00
|
|
|
self.print_error('no height for main interface')
|
2016-02-15 15:58:08 +01:00
|
|
|
return True
|
2015-05-25 17:45:01 +09:00
|
|
|
lh = self.get_local_height()
|
|
|
|
|
result = (lh - sh) > 1
|
|
|
|
|
if result:
|
|
|
|
|
self.print_error('%s is lagging (%d vs %d)' % (self.default_server, sh, lh))
|
|
|
|
|
return result
|
2014-07-25 16:32:19 +02:00
|
|
|
|
2018-09-25 16:38:26 +02:00
|
|
|
def _set_status(self, status):
|
2014-07-24 23:14:47 +02:00
|
|
|
self.connection_status = status
|
2014-07-30 10:19:15 +02:00
|
|
|
self.notify('status')
|
2014-07-24 23:14:47 +02:00
|
|
|
|
2013-10-04 13:51:46 +02:00
|
|
|
def is_connected(self):
|
2018-09-19 16:35:30 +02:00
|
|
|
interface = self.interface
|
|
|
|
|
return interface is not None and interface.ready.done()
|
2015-06-03 00:03:33 +09:00
|
|
|
|
2015-08-30 21:18:10 +09:00
|
|
|
def is_connecting(self):
|
|
|
|
|
return self.connection_status == 'connecting'
|
|
|
|
|
|
2018-09-25 16:38:26 +02:00
|
|
|
async def _request_server_info(self, interface):
|
2018-08-31 16:46:49 +02:00
|
|
|
await interface.ready
|
|
|
|
|
session = interface.session
|
2018-09-12 21:18:08 +02:00
|
|
|
|
|
|
|
|
async def get_banner():
|
|
|
|
|
self.banner = await session.send_request('server.banner')
|
|
|
|
|
self.notify('banner')
|
|
|
|
|
async def get_donation_address():
|
|
|
|
|
self.donation_address = await session.send_request('server.donation_address')
|
|
|
|
|
async def get_server_peers():
|
2018-09-13 16:06:41 +02:00
|
|
|
self.server_peers = parse_servers(await session.send_request('server.peers.subscribe'))
|
2018-09-12 21:18:08 +02:00
|
|
|
self.notify('servers')
|
|
|
|
|
async def get_relay_fee():
|
|
|
|
|
relayfee = await session.send_request('blockchain.relayfee')
|
|
|
|
|
if relayfee is None:
|
|
|
|
|
self.relay_fee = None
|
|
|
|
|
else:
|
|
|
|
|
relayfee = int(relayfee * COIN)
|
|
|
|
|
self.relay_fee = max(0, relayfee)
|
|
|
|
|
|
|
|
|
|
async with TaskGroup() as group:
|
|
|
|
|
await group.spawn(get_banner)
|
|
|
|
|
await group.spawn(get_donation_address)
|
|
|
|
|
await group.spawn(get_server_peers)
|
|
|
|
|
await group.spawn(get_relay_fee)
|
2018-09-25 16:38:26 +02:00
|
|
|
await group.spawn(self._request_fee_estimates(interface))
|
2018-08-31 16:46:49 +02:00
|
|
|
|
2018-09-25 16:38:26 +02:00
|
|
|
async def _request_fee_estimates(self, interface):
|
2018-08-31 16:46:49 +02:00
|
|
|
session = interface.session
|
|
|
|
|
from .simple_config import FEE_ETA_TARGETS
|
|
|
|
|
self.config.requested_fee_estimates()
|
2018-09-08 15:36:16 +02:00
|
|
|
async with TaskGroup() as group:
|
|
|
|
|
histogram_task = await group.spawn(session.send_request('mempool.get_fee_histogram'))
|
|
|
|
|
fee_tasks = []
|
|
|
|
|
for i in FEE_ETA_TARGETS:
|
|
|
|
|
fee_tasks.append((i, await group.spawn(session.send_request('blockchain.estimatefee', [i]))))
|
2018-09-12 21:18:08 +02:00
|
|
|
self.config.mempool_fees = histogram = histogram_task.result()
|
|
|
|
|
self.print_error('fee_histogram', histogram)
|
2018-08-31 16:46:49 +02:00
|
|
|
self.notify('fee_histogram')
|
2018-09-08 15:36:16 +02:00
|
|
|
for i, task in fee_tasks:
|
|
|
|
|
fee = int(task.result() * COIN)
|
2018-08-31 16:46:49 +02:00
|
|
|
self.print_error("fee_estimates[%d]" % i, fee)
|
2018-10-09 12:03:38 +02:00
|
|
|
if fee < 0: continue
|
|
|
|
|
self.config.update_fee_estimates(i, fee)
|
2018-08-31 16:46:49 +02:00
|
|
|
self.notify('fee')
|
|
|
|
|
|
2014-07-27 11:33:02 +02:00
|
|
|
def get_status_value(self, key):
|
|
|
|
|
if key == 'status':
|
|
|
|
|
value = self.connection_status
|
|
|
|
|
elif key == 'banner':
|
|
|
|
|
value = self.banner
|
2015-08-04 07:15:54 +02:00
|
|
|
elif key == 'fee':
|
2017-01-09 09:22:17 +01:00
|
|
|
value = self.config.fee_estimates
|
2017-11-22 12:09:56 +01:00
|
|
|
elif key == 'fee_histogram':
|
|
|
|
|
value = self.config.mempool_fees
|
2014-07-27 11:33:02 +02:00
|
|
|
elif key == 'servers':
|
|
|
|
|
value = self.get_servers()
|
2018-09-18 16:49:48 +02:00
|
|
|
else:
|
|
|
|
|
raise Exception('unexpected trigger key {}'.format(key))
|
2014-07-27 11:33:02 +02:00
|
|
|
return value
|
|
|
|
|
|
2014-07-30 10:19:15 +02:00
|
|
|
def notify(self, key):
|
2015-08-30 21:18:10 +09:00
|
|
|
if key in ['status', 'updated']:
|
|
|
|
|
self.trigger_callback(key)
|
|
|
|
|
else:
|
2015-11-12 16:08:37 +09:00
|
|
|
self.trigger_callback(key, self.get_status_value(key))
|
2013-09-08 17:23:01 +02:00
|
|
|
|
2018-09-10 00:59:53 +02:00
|
|
|
def get_parameters(self) -> NetworkParameters:
|
2015-03-12 18:06:28 +01:00
|
|
|
host, port, protocol = deserialize_server(self.default_server)
|
2018-09-10 00:59:53 +02:00
|
|
|
return NetworkParameters(host, port, protocol, self.proxy, self.auto_connect)
|
2014-07-25 09:11:56 +02:00
|
|
|
|
2016-02-15 16:17:07 +01:00
|
|
|
def get_donation_address(self):
|
|
|
|
|
if self.is_connected():
|
|
|
|
|
return self.donation_address
|
|
|
|
|
|
2018-09-25 16:38:26 +02:00
|
|
|
def get_interfaces(self) -> List[str]:
|
|
|
|
|
"""The list of servers for the connected interfaces."""
|
|
|
|
|
with self.interfaces_lock:
|
|
|
|
|
return list(self.interfaces)
|
2013-09-08 17:23:01 +02:00
|
|
|
|
2018-06-21 21:06:56 +02:00
|
|
|
@with_recent_servers_lock
|
2013-09-12 08:41:27 +02:00
|
|
|
def get_servers(self):
|
2018-09-13 16:06:41 +02:00
|
|
|
# start with hardcoded servers
|
2018-03-04 22:10:59 +01:00
|
|
|
out = constants.net.DEFAULT_SERVERS
|
2018-09-13 16:06:41 +02:00
|
|
|
# add recent servers
|
|
|
|
|
for s in self.recent_servers:
|
|
|
|
|
try:
|
|
|
|
|
host, port, protocol = deserialize_server(s)
|
|
|
|
|
except:
|
|
|
|
|
continue
|
|
|
|
|
if host not in out:
|
|
|
|
|
out[host] = {protocol: port}
|
|
|
|
|
# add servers received from main interface
|
2018-09-25 16:38:26 +02:00
|
|
|
server_peers = self.server_peers
|
|
|
|
|
if server_peers:
|
|
|
|
|
out.update(filter_version(server_peers.copy()))
|
2018-09-13 16:06:41 +02:00
|
|
|
# potentially filter out some
|
2018-07-31 20:25:53 +02:00
|
|
|
if self.config.get('noonion'):
|
|
|
|
|
out = filter_noonion(out)
|
2013-10-04 14:30:23 +02:00
|
|
|
return out
|
2013-09-12 08:41:27 +02:00
|
|
|
|
2018-09-25 16:38:26 +02:00
|
|
|
def _start_interface(self, server):
|
2018-09-13 21:20:55 +02:00
|
|
|
if server not in self.interfaces and server not in self.connecting:
|
2015-05-22 09:45:51 +09:00
|
|
|
if server == self.default_server:
|
2015-09-01 17:28:10 +09:00
|
|
|
self.print_error("connecting to %s as new interface" % server)
|
2018-09-25 16:38:26 +02:00
|
|
|
self._set_status('connecting')
|
2015-10-17 07:07:10 +02:00
|
|
|
self.connecting.add(server)
|
2018-09-13 21:20:55 +02:00
|
|
|
self.server_queue.put(server)
|
2013-09-10 17:52:43 +02:00
|
|
|
|
2018-09-25 16:38:26 +02:00
|
|
|
def _start_random_interface(self):
|
|
|
|
|
with self.interfaces_lock:
|
2018-09-20 21:07:31 +02:00
|
|
|
exclude_set = self.disconnected_servers | set(self.interfaces) | self.connecting
|
2015-05-24 11:14:39 +09:00
|
|
|
server = pick_random_server(self.get_servers(), self.protocol, exclude_set)
|
2013-09-10 17:52:43 +02:00
|
|
|
if server:
|
2018-09-25 16:38:26 +02:00
|
|
|
self._start_interface(server)
|
2018-08-27 20:39:36 +02:00
|
|
|
return server
|
2013-09-10 17:52:43 +02:00
|
|
|
|
2018-09-25 16:38:26 +02:00
|
|
|
def _set_proxy(self, proxy: Optional[dict]):
|
2015-03-12 12:56:06 +01:00
|
|
|
self.proxy = proxy
|
2017-03-01 10:11:16 +01:00
|
|
|
# Store these somewhere so we can un-monkey-patch
|
2018-09-10 01:08:28 +02:00
|
|
|
if not hasattr(socket, "_getaddrinfo"):
|
2017-03-01 10:11:16 +01:00
|
|
|
socket._getaddrinfo = socket.getaddrinfo
|
2015-03-12 12:56:06 +01:00
|
|
|
if proxy:
|
2015-12-03 11:18:10 +01:00
|
|
|
self.print_error('setting proxy', proxy)
|
2015-03-12 12:56:06 +01:00
|
|
|
# prevent dns leaks, see http://stackoverflow.com/questions/13184205/dns-over-proxy
|
2015-07-01 09:09:00 +02:00
|
|
|
socket.getaddrinfo = lambda *args: [(socket.AF_INET, socket.SOCK_STREAM, 6, '', (args[0], args[1]))]
|
2015-03-12 12:56:06 +01:00
|
|
|
else:
|
2018-07-01 23:53:55 +02:00
|
|
|
if sys.platform == 'win32':
|
|
|
|
|
# On Windows, socket.getaddrinfo takes a mutex, and might hold it for up to 10 seconds
|
|
|
|
|
# when dns-resolving. To speed it up drastically, we resolve dns ourselves, outside that lock.
|
|
|
|
|
# see #4421
|
2018-07-02 00:59:28 +02:00
|
|
|
socket.getaddrinfo = self._fast_getaddrinfo
|
2018-07-01 23:53:55 +02:00
|
|
|
else:
|
|
|
|
|
socket.getaddrinfo = socket._getaddrinfo
|
2018-09-06 16:18:45 +02:00
|
|
|
self.trigger_callback('proxy_set', self.proxy)
|
2015-03-13 12:00:08 +01:00
|
|
|
|
2018-07-02 00:59:28 +02:00
|
|
|
@staticmethod
|
|
|
|
|
def _fast_getaddrinfo(host, *args, **kwargs):
|
|
|
|
|
def needs_dns_resolving(host2):
|
|
|
|
|
try:
|
|
|
|
|
ipaddress.ip_address(host2)
|
|
|
|
|
return False # already valid IP
|
|
|
|
|
except ValueError:
|
|
|
|
|
pass # not an IP
|
|
|
|
|
if str(host) in ('localhost', 'localhost.',):
|
|
|
|
|
return False
|
|
|
|
|
return True
|
|
|
|
|
try:
|
|
|
|
|
if needs_dns_resolving(host):
|
|
|
|
|
answers = dns.resolver.query(host)
|
|
|
|
|
addr = str(answers[0])
|
|
|
|
|
else:
|
|
|
|
|
addr = host
|
2018-09-25 16:38:26 +02:00
|
|
|
except dns.exception.DNSException as e:
|
2018-07-02 00:59:28 +02:00
|
|
|
# dns failed for some reason, e.g. dns.resolver.NXDOMAIN
|
|
|
|
|
# this is normal. Simply report back failure:
|
2018-09-25 16:38:26 +02:00
|
|
|
raise socket.gaierror(11001, 'getaddrinfo failed') from e
|
2018-07-02 00:59:28 +02:00
|
|
|
except BaseException as e:
|
|
|
|
|
# Possibly internal error in dnspython :( see #4483
|
|
|
|
|
# Fall back to original socket.getaddrinfo to resolve dns.
|
|
|
|
|
print_error('dnspython failed to resolve dns with error:', e)
|
|
|
|
|
addr = host
|
|
|
|
|
return socket._getaddrinfo(addr, *args, **kwargs)
|
|
|
|
|
|
2018-09-25 16:38:26 +02:00
|
|
|
@aiosafe
|
|
|
|
|
async def set_parameters(self, net_params: NetworkParameters):
|
2018-09-10 00:59:53 +02:00
|
|
|
proxy = net_params.proxy
|
2015-08-30 21:18:10 +09:00
|
|
|
proxy_str = serialize_proxy(proxy)
|
2018-09-10 00:59:53 +02:00
|
|
|
host, port, protocol = net_params.host, net_params.port, net_params.protocol
|
|
|
|
|
server_str = serialize_server(host, port, protocol)
|
2017-03-27 18:59:48 +02:00
|
|
|
# sanitize parameters
|
|
|
|
|
try:
|
|
|
|
|
deserialize_server(serialize_server(host, port, protocol))
|
|
|
|
|
if proxy:
|
|
|
|
|
proxy_modes.index(proxy["mode"]) + 1
|
|
|
|
|
int(proxy['port'])
|
|
|
|
|
except:
|
|
|
|
|
return
|
2018-09-10 00:59:53 +02:00
|
|
|
self.config.set_key('auto_connect', net_params.auto_connect, False)
|
2015-08-30 21:18:10 +09:00
|
|
|
self.config.set_key("proxy", proxy_str, False)
|
2018-09-10 00:59:53 +02:00
|
|
|
self.config.set_key("server", server_str, True)
|
2015-08-30 21:18:10 +09:00
|
|
|
# abort if changes were not allowed by config
|
2018-09-10 00:59:53 +02:00
|
|
|
if self.config.get('server') != server_str or self.config.get('proxy') != proxy_str:
|
2015-08-30 21:18:10 +09:00
|
|
|
return
|
2018-09-25 16:38:26 +02:00
|
|
|
|
|
|
|
|
async with self.restart_lock:
|
|
|
|
|
self.auto_connect = net_params.auto_connect
|
|
|
|
|
if self.proxy != proxy or self.protocol != protocol:
|
|
|
|
|
# Restart the network defaulting to the given server
|
|
|
|
|
await self._stop()
|
2018-09-10 00:59:53 +02:00
|
|
|
self.default_server = server_str
|
2018-09-25 16:38:26 +02:00
|
|
|
await self._start()
|
|
|
|
|
elif self.default_server != server_str:
|
|
|
|
|
await self.switch_to_interface(server_str)
|
|
|
|
|
else:
|
|
|
|
|
await self.switch_lagging_interface()
|
2013-10-05 10:01:33 +02:00
|
|
|
|
2018-09-25 16:38:26 +02:00
|
|
|
async def _switch_to_random_interface(self):
|
2015-06-03 00:03:33 +09:00
|
|
|
'''Switch to a random connected server other than the current one'''
|
2015-05-23 22:59:29 +09:00
|
|
|
servers = self.get_interfaces() # Those in connected state
|
2015-06-03 00:03:33 +09:00
|
|
|
if self.default_server in servers:
|
2017-09-14 02:06:08 +02:00
|
|
|
servers.remove(self.default_server)
|
2015-05-23 22:59:29 +09:00
|
|
|
if servers:
|
2018-09-25 16:38:26 +02:00
|
|
|
await self.switch_to_interface(random.choice(servers))
|
2013-10-05 10:01:33 +02:00
|
|
|
|
2018-09-25 16:38:26 +02:00
|
|
|
async def switch_lagging_interface(self):
|
2015-05-25 17:45:01 +09:00
|
|
|
'''If auto_connect and lagging, switch interface'''
|
2018-09-25 16:38:26 +02:00
|
|
|
if await self._server_is_lagging() and self.auto_connect:
|
2017-05-29 09:03:39 +02:00
|
|
|
# switch to one that has the correct header (not height)
|
|
|
|
|
header = self.blockchain().read_header(self.get_local_height())
|
2018-08-14 17:50:20 +02:00
|
|
|
def filt(x):
|
|
|
|
|
a = x[1].tip_header
|
|
|
|
|
b = header
|
|
|
|
|
assert type(a) is type(b)
|
|
|
|
|
return a == b
|
2018-09-25 16:38:26 +02:00
|
|
|
|
|
|
|
|
with self.interfaces_lock: interfaces_items = list(self.interfaces.items())
|
|
|
|
|
filtered = list(map(lambda x: x[0], filter(filt, interfaces_items)))
|
2017-05-29 09:03:39 +02:00
|
|
|
if filtered:
|
|
|
|
|
choice = random.choice(filtered)
|
2018-09-25 16:38:26 +02:00
|
|
|
await self.switch_to_interface(choice)
|
|
|
|
|
|
|
|
|
|
async def switch_to_interface(self, server: str):
|
|
|
|
|
"""Switch to server as our main interface. If no connection exists,
|
|
|
|
|
queue interface to be started. The actual switch will
|
|
|
|
|
happen when the interface becomes ready.
|
|
|
|
|
"""
|
2013-10-05 13:48:02 +02:00
|
|
|
self.default_server = server
|
2018-09-25 16:38:26 +02:00
|
|
|
old_interface = self.interface
|
|
|
|
|
old_server = old_interface.server if old_interface else None
|
|
|
|
|
|
|
|
|
|
# Stop any current interface in order to terminate subscriptions,
|
|
|
|
|
# and to cancel tasks in interface.group.
|
|
|
|
|
# However, for headers sub, give preference to this interface
|
|
|
|
|
# over unknown ones, i.e. start it again right away.
|
|
|
|
|
if old_server and old_server != server:
|
|
|
|
|
await self._close_interface(old_interface)
|
|
|
|
|
if len(self.interfaces) <= self.num_server:
|
|
|
|
|
self._start_interface(old_server)
|
|
|
|
|
|
2015-06-10 18:24:57 +02:00
|
|
|
if server not in self.interfaces:
|
2015-09-09 22:13:09 +09:00
|
|
|
self.interface = None
|
2018-09-25 16:38:26 +02:00
|
|
|
self._start_interface(server)
|
2015-06-10 18:24:57 +02:00
|
|
|
return
|
2018-06-01 14:03:22 +02:00
|
|
|
|
2015-06-10 18:24:57 +02:00
|
|
|
i = self.interfaces[server]
|
2018-09-25 16:38:26 +02:00
|
|
|
if old_interface != i:
|
2015-06-10 18:24:57 +02:00
|
|
|
self.print_error("switching to", server)
|
2018-09-25 16:38:26 +02:00
|
|
|
blockchain_updated = i.blockchain != self.blockchain()
|
2015-06-10 18:24:57 +02:00
|
|
|
self.interface = i
|
2018-09-25 16:38:26 +02:00
|
|
|
await i.group.spawn(self._request_server_info(i))
|
2018-08-29 18:41:51 +02:00
|
|
|
self.trigger_callback('default_server_changed')
|
2018-09-25 16:38:26 +02:00
|
|
|
self._set_status('connected')
|
2018-09-18 16:49:48 +02:00
|
|
|
self.trigger_callback('network_updated')
|
2018-09-19 21:56:09 +02:00
|
|
|
if blockchain_updated: self.trigger_callback('blockchain_updated')
|
2013-10-02 12:13:07 +02:00
|
|
|
|
2018-09-25 16:38:26 +02:00
|
|
|
async def _close_interface(self, interface):
|
2015-06-03 00:03:33 +09:00
|
|
|
if interface:
|
2018-09-25 16:38:26 +02:00
|
|
|
with self.interfaces_lock:
|
|
|
|
|
if self.interfaces.get(interface.server) == interface:
|
|
|
|
|
self.interfaces.pop(interface.server)
|
2015-06-03 00:03:33 +09:00
|
|
|
if interface.server == self.default_server:
|
|
|
|
|
self.interface = None
|
2018-09-25 16:38:26 +02:00
|
|
|
await interface.close()
|
2013-10-09 10:04:32 +02:00
|
|
|
|
2018-06-21 21:06:56 +02:00
|
|
|
@with_recent_servers_lock
|
2018-09-25 16:38:26 +02:00
|
|
|
def _add_recent_server(self, server):
|
2013-10-04 14:30:23 +02:00
|
|
|
# list is ordered
|
2015-06-03 00:03:33 +09:00
|
|
|
if server in self.recent_servers:
|
|
|
|
|
self.recent_servers.remove(server)
|
|
|
|
|
self.recent_servers.insert(0, server)
|
2013-10-04 14:30:23 +02:00
|
|
|
self.recent_servers = self.recent_servers[0:20]
|
2018-09-25 16:38:26 +02:00
|
|
|
self._save_recent_servers()
|
2013-09-10 19:59:58 +02:00
|
|
|
|
2018-09-25 16:38:26 +02:00
|
|
|
async def connection_down(self, server):
|
2015-06-03 00:03:33 +09:00
|
|
|
'''A connection to server either went down, or was never made.
|
|
|
|
|
We distinguish by whether it is in self.interfaces.'''
|
|
|
|
|
self.disconnected_servers.add(server)
|
|
|
|
|
if server == self.default_server:
|
2018-09-25 16:38:26 +02:00
|
|
|
self._set_status('disconnected')
|
|
|
|
|
interface = self.interfaces.get(server, None)
|
|
|
|
|
if interface:
|
|
|
|
|
await self._close_interface(interface)
|
2018-09-18 16:49:48 +02:00
|
|
|
self.trigger_callback('network_updated')
|
2017-05-29 09:03:39 +02:00
|
|
|
|
2018-08-14 17:50:20 +02:00
|
|
|
@aiosafe
|
2018-09-25 16:38:26 +02:00
|
|
|
async def _run_new_interface(self, server):
|
2018-08-29 18:41:51 +02:00
|
|
|
interface = Interface(self, server, self.config.path, self.proxy)
|
2018-09-10 19:03:06 +02:00
|
|
|
timeout = 10 if not self.proxy else 20
|
2018-08-29 18:41:51 +02:00
|
|
|
try:
|
2018-09-10 19:03:06 +02:00
|
|
|
await asyncio.wait_for(interface.ready, timeout)
|
2018-08-29 18:41:51 +02:00
|
|
|
except BaseException as e:
|
2018-08-14 17:50:20 +02:00
|
|
|
#traceback.print_exc()
|
2018-09-20 21:07:31 +02:00
|
|
|
self.print_error(server, "couldn't launch because", str(e), str(type(e)))
|
2018-09-25 16:38:26 +02:00
|
|
|
await interface.close()
|
2017-03-31 12:45:09 +02:00
|
|
|
return
|
2018-09-20 21:07:31 +02:00
|
|
|
else:
|
2018-09-25 16:38:26 +02:00
|
|
|
with self.interfaces_lock:
|
|
|
|
|
assert server not in self.interfaces
|
2018-09-20 21:07:31 +02:00
|
|
|
self.interfaces[server] = interface
|
2018-08-29 18:41:51 +02:00
|
|
|
finally:
|
2018-09-25 16:38:26 +02:00
|
|
|
try: self.connecting.remove(server)
|
|
|
|
|
except KeyError: pass
|
2015-05-17 22:54:20 +09:00
|
|
|
|
2018-08-29 18:41:51 +02:00
|
|
|
if server == self.default_server:
|
2018-09-25 16:38:26 +02:00
|
|
|
await self.switch_to_interface(server)
|
2018-08-29 18:41:51 +02:00
|
|
|
|
2018-09-25 16:38:26 +02:00
|
|
|
self._add_recent_server(server)
|
2018-09-18 16:49:48 +02:00
|
|
|
self.trigger_callback('network_updated')
|
2015-06-03 00:03:33 +09:00
|
|
|
|
2018-09-25 16:38:26 +02:00
|
|
|
async def _init_headers_file(self):
|
2018-08-29 18:41:51 +02:00
|
|
|
b = blockchain.blockchains[0]
|
2017-08-03 14:25:50 +02:00
|
|
|
filename = b.path()
|
2018-09-25 16:38:26 +02:00
|
|
|
length = HEADER_SIZE * len(constants.net.CHECKPOINTS) * 2016
|
2017-12-20 12:37:45 +01:00
|
|
|
if not os.path.exists(filename) or os.path.getsize(filename) < length:
|
2017-12-05 18:03:07 +01:00
|
|
|
with open(filename, 'wb') as f:
|
2018-09-19 22:09:54 +02:00
|
|
|
if length > 0:
|
2017-12-05 18:03:07 +01:00
|
|
|
f.seek(length-1)
|
|
|
|
|
f.write(b'\x00')
|
2018-09-19 22:09:54 +02:00
|
|
|
util.ensure_sparse_file(filename)
|
2017-12-05 18:03:07 +01:00
|
|
|
with b.lock:
|
|
|
|
|
b.update_size()
|
2017-05-29 09:03:39 +02:00
|
|
|
|
2018-09-27 18:01:25 +02:00
|
|
|
def best_effort_reliable(func):
|
|
|
|
|
async def make_reliable_wrapper(self, *args, **kwargs):
|
|
|
|
|
for i in range(10):
|
|
|
|
|
iface = self.interface
|
2018-09-27 21:15:07 +02:00
|
|
|
# retry until there is a main interface
|
|
|
|
|
if not iface:
|
2018-09-27 18:01:25 +02:00
|
|
|
await asyncio.sleep(0.1)
|
2018-09-27 21:15:07 +02:00
|
|
|
continue # try again
|
|
|
|
|
# wait for it to be usable
|
|
|
|
|
iface_ready = iface.ready
|
|
|
|
|
iface_disconnected = iface.got_disconnected
|
|
|
|
|
await asyncio.wait([iface_ready, iface_disconnected], return_when=asyncio.FIRST_COMPLETED)
|
|
|
|
|
if not iface_ready.done() or iface_ready.cancelled():
|
|
|
|
|
await asyncio.sleep(0.1)
|
|
|
|
|
continue # try again
|
|
|
|
|
# try actual request
|
2018-09-27 20:04:36 +02:00
|
|
|
success_fut = asyncio.ensure_future(func(self, *args, **kwargs))
|
2018-09-27 21:15:07 +02:00
|
|
|
await asyncio.wait([success_fut, iface_disconnected], return_when=asyncio.FIRST_COMPLETED)
|
|
|
|
|
if success_fut.done() and not success_fut.cancelled():
|
2018-09-27 20:04:36 +02:00
|
|
|
if success_fut.exception():
|
2018-09-27 21:15:07 +02:00
|
|
|
try:
|
|
|
|
|
raise success_fut.exception()
|
|
|
|
|
except RequestTimedOut:
|
|
|
|
|
await iface.close()
|
|
|
|
|
await iface_disconnected
|
|
|
|
|
continue # try again
|
2018-09-27 20:04:36 +02:00
|
|
|
return success_fut.result()
|
|
|
|
|
# otherwise; try again
|
2018-09-27 18:01:25 +02:00
|
|
|
raise Exception('no interface to do request on... gave up.')
|
|
|
|
|
return make_reliable_wrapper
|
|
|
|
|
|
|
|
|
|
@best_effort_reliable
|
|
|
|
|
async def get_merkle_for_transaction(self, tx_hash: str, tx_height: int) -> dict:
|
2018-08-14 17:50:20 +02:00
|
|
|
return await self.interface.session.send_request('blockchain.transaction.get_merkle', [tx_hash, tx_height])
|
|
|
|
|
|
2018-09-27 18:01:25 +02:00
|
|
|
@best_effort_reliable
|
2018-09-10 18:39:10 +02:00
|
|
|
async def broadcast_transaction(self, tx, timeout=10):
|
2018-09-07 17:07:15 +02:00
|
|
|
try:
|
2018-09-10 18:39:10 +02:00
|
|
|
out = await self.interface.session.send_request('blockchain.transaction.broadcast', [str(tx)], timeout=timeout)
|
2018-09-27 18:01:25 +02:00
|
|
|
except RequestTimedOut as e:
|
2018-09-07 17:07:15 +02:00
|
|
|
return False, "error: operation timed out"
|
|
|
|
|
except Exception as e:
|
|
|
|
|
return False, "error: " + str(e)
|
|
|
|
|
|
|
|
|
|
if out != tx.txid():
|
|
|
|
|
return False, "error: " + out
|
|
|
|
|
return True, out
|
2018-08-14 17:50:20 +02:00
|
|
|
|
2018-09-27 18:01:25 +02:00
|
|
|
@best_effort_reliable
|
2018-09-16 06:09:14 +02:00
|
|
|
async def request_chunk(self, height, tip=None, *, can_return_early=False):
|
|
|
|
|
return await self.interface.request_chunk(height, tip=tip, can_return_early=can_return_early)
|
2018-08-14 17:50:20 +02:00
|
|
|
|
2018-09-27 18:01:25 +02:00
|
|
|
@best_effort_reliable
|
|
|
|
|
async def get_transaction(self, tx_hash: str) -> str:
|
|
|
|
|
return await self.interface.session.send_request('blockchain.transaction.get', [tx_hash])
|
|
|
|
|
|
|
|
|
|
@best_effort_reliable
|
|
|
|
|
async def get_history_for_scripthash(self, sh: str) -> List[dict]:
|
|
|
|
|
return await self.interface.session.send_request('blockchain.scripthash.get_history', [sh])
|
|
|
|
|
|
|
|
|
|
@best_effort_reliable
|
|
|
|
|
async def listunspent_for_scripthash(self, sh: str) -> List[dict]:
|
|
|
|
|
return await self.interface.session.send_request('blockchain.scripthash.listunspent', [sh])
|
|
|
|
|
|
|
|
|
|
@best_effort_reliable
|
|
|
|
|
async def get_balance_for_scripthash(self, sh: str) -> dict:
|
|
|
|
|
return await self.interface.session.send_request('blockchain.scripthash.get_balance', [sh])
|
|
|
|
|
|
|
|
|
|
def blockchain(self) -> Blockchain:
|
2018-09-25 16:38:26 +02:00
|
|
|
interface = self.interface
|
|
|
|
|
if interface and interface.blockchain is not None:
|
|
|
|
|
self.blockchain_index = interface.blockchain.forkpoint
|
2018-08-29 18:41:51 +02:00
|
|
|
return blockchain.blockchains[self.blockchain_index]
|
2014-03-10 20:53:05 +01:00
|
|
|
|
2017-07-18 21:37:04 +02:00
|
|
|
def get_blockchains(self):
|
2018-09-19 17:56:42 +02:00
|
|
|
out = {} # blockchain_id -> list(interfaces)
|
2018-09-16 18:26:40 +02:00
|
|
|
with blockchain.blockchains_lock: blockchain_items = list(blockchain.blockchains.items())
|
2018-09-25 16:38:26 +02:00
|
|
|
with self.interfaces_lock: interfaces_values = list(self.interfaces.values())
|
2018-09-19 17:56:42 +02:00
|
|
|
for chain_id, bc in blockchain_items:
|
2018-09-25 16:38:26 +02:00
|
|
|
r = list(filter(lambda i: i.blockchain==bc, interfaces_values))
|
2017-07-18 21:37:04 +02:00
|
|
|
if r:
|
2018-09-19 17:56:42 +02:00
|
|
|
out[chain_id] = r
|
2017-07-18 21:37:04 +02:00
|
|
|
return out
|
|
|
|
|
|
2018-09-25 16:38:26 +02:00
|
|
|
async def disconnect_from_interfaces_on_given_blockchain(self, chain: Blockchain) -> Sequence[Interface]:
|
2018-09-19 17:56:42 +02:00
|
|
|
chain_id = chain.forkpoint
|
|
|
|
|
ifaces = self.get_blockchains().get(chain_id) or []
|
|
|
|
|
for interface in ifaces:
|
2018-09-25 16:38:26 +02:00
|
|
|
await self.connection_down(interface.server)
|
2018-09-19 17:56:42 +02:00
|
|
|
return ifaces
|
|
|
|
|
|
2018-09-25 16:38:26 +02:00
|
|
|
async def follow_chain(self, chain_id):
|
|
|
|
|
bc = blockchain.blockchains.get(chain_id)
|
2018-08-29 18:41:51 +02:00
|
|
|
if bc:
|
2018-09-25 16:38:26 +02:00
|
|
|
self.blockchain_index = chain_id
|
|
|
|
|
self.config.set_key('blockchain_index', chain_id)
|
|
|
|
|
with self.interfaces_lock: interfaces_values = list(self.interfaces.values())
|
|
|
|
|
for iface in interfaces_values:
|
|
|
|
|
if iface.blockchain == bc:
|
|
|
|
|
await self.switch_to_interface(iface.server)
|
2017-07-04 11:35:04 +02:00
|
|
|
break
|
|
|
|
|
else:
|
2018-09-25 16:38:26 +02:00
|
|
|
raise Exception('blockchain not found', chain_id)
|
2017-07-04 11:35:04 +02:00
|
|
|
|
2018-09-25 16:38:26 +02:00
|
|
|
if self.interface:
|
|
|
|
|
net_params = self.get_parameters()
|
|
|
|
|
host, port, protocol = deserialize_server(self.interface.server)
|
|
|
|
|
net_params = net_params._replace(host=host, port=port, protocol=protocol)
|
|
|
|
|
await self.set_parameters(net_params)
|
2017-07-10 13:51:13 +02:00
|
|
|
|
2014-03-10 20:53:05 +01:00
|
|
|
def get_local_height(self):
|
2017-05-29 09:03:39 +02:00
|
|
|
return self.blockchain().height()
|
2015-08-30 21:18:10 +09:00
|
|
|
|
2017-12-12 11:10:50 +01:00
|
|
|
def export_checkpoints(self, path):
|
2018-09-25 16:38:26 +02:00
|
|
|
"""Run manually to generate blockchain checkpoints.
|
|
|
|
|
Kept for console use only.
|
|
|
|
|
"""
|
2017-12-05 18:03:07 +01:00
|
|
|
cp = self.blockchain().get_checkpoints()
|
2018-03-23 21:47:51 +01:00
|
|
|
with open(path, 'w', encoding='utf-8') as f:
|
2017-12-05 18:03:07 +01:00
|
|
|
f.write(json.dumps(cp, indent=4))
|
|
|
|
|
|
2018-09-25 16:38:26 +02:00
|
|
|
async def _start(self, jobs=None):
|
|
|
|
|
if jobs is None: jobs = self._jobs
|
|
|
|
|
self._jobs = jobs
|
|
|
|
|
assert not self.main_taskgroup
|
|
|
|
|
self.main_taskgroup = SilentTaskGroup()
|
|
|
|
|
|
2018-09-09 05:05:08 +02:00
|
|
|
async def main():
|
2018-09-25 16:38:26 +02:00
|
|
|
try:
|
|
|
|
|
await self._init_headers_file()
|
|
|
|
|
async with self.main_taskgroup as group:
|
|
|
|
|
await group.spawn(self._maintain_sessions())
|
|
|
|
|
[await group.spawn(job) for job in jobs]
|
|
|
|
|
except Exception as e:
|
|
|
|
|
traceback.print_exc(file=sys.stderr)
|
|
|
|
|
raise e
|
|
|
|
|
asyncio.run_coroutine_threadsafe(main(), self.asyncio_loop)
|
|
|
|
|
|
|
|
|
|
assert not self.interface and not self.interfaces
|
|
|
|
|
assert not self.connecting and not self.server_queue
|
|
|
|
|
self.print_error('starting network')
|
|
|
|
|
self.disconnected_servers = set([])
|
|
|
|
|
self.protocol = deserialize_server(self.default_server)[2]
|
|
|
|
|
self.server_queue = queue.Queue()
|
|
|
|
|
self._set_proxy(deserialize_proxy(self.config.get('proxy')))
|
|
|
|
|
self._start_interface(self.default_server)
|
|
|
|
|
self.trigger_callback('network_updated')
|
|
|
|
|
|
|
|
|
|
def start(self, jobs=None):
|
|
|
|
|
asyncio.run_coroutine_threadsafe(self._start(jobs=jobs), self.asyncio_loop)
|
|
|
|
|
|
|
|
|
|
async def _stop(self, full_shutdown=False):
|
|
|
|
|
self.print_error("stopping network")
|
|
|
|
|
try:
|
|
|
|
|
asyncio.wait_for(await self.main_taskgroup.cancel_remaining(), timeout=2)
|
|
|
|
|
except asyncio.TimeoutError: pass
|
|
|
|
|
self.main_taskgroup = None
|
|
|
|
|
|
|
|
|
|
assert self.interface is None
|
|
|
|
|
assert not self.interfaces
|
|
|
|
|
self.connecting.clear()
|
|
|
|
|
self.server_queue = None
|
|
|
|
|
self.trigger_callback('network_updated')
|
|
|
|
|
|
|
|
|
|
if full_shutdown:
|
|
|
|
|
self._run_forever.set_result(1)
|
2018-08-15 19:01:28 +02:00
|
|
|
|
|
|
|
|
def stop(self):
|
2018-09-25 16:38:26 +02:00
|
|
|
assert self._thread != threading.current_thread(), 'must not be called from network thread'
|
|
|
|
|
fut = asyncio.run_coroutine_threadsafe(self._stop(full_shutdown=True), self.asyncio_loop)
|
|
|
|
|
fut.result()
|
2018-08-15 19:01:28 +02:00
|
|
|
|
|
|
|
|
def join(self):
|
2018-09-25 16:38:26 +02:00
|
|
|
self._thread.join(1)
|
2018-08-15 19:01:28 +02:00
|
|
|
|
2018-10-01 18:16:37 +02:00
|
|
|
async def _ensure_there_is_a_main_interface(self):
|
|
|
|
|
if self.is_connected():
|
|
|
|
|
return
|
|
|
|
|
now = time.time()
|
|
|
|
|
# if auto_connect is set, try a different server
|
|
|
|
|
if self.auto_connect and not self.is_connecting():
|
|
|
|
|
await self._switch_to_random_interface()
|
|
|
|
|
# if auto_connect is not set, or still no main interface, retry current
|
|
|
|
|
if not self.is_connected() and not self.is_connecting():
|
|
|
|
|
if self.default_server in self.disconnected_servers:
|
|
|
|
|
if now - self.server_retry_time > SERVER_RETRY_INTERVAL:
|
|
|
|
|
self.disconnected_servers.remove(self.default_server)
|
|
|
|
|
self.server_retry_time = now
|
|
|
|
|
else:
|
|
|
|
|
await self.switch_to_interface(self.default_server)
|
|
|
|
|
|
2018-09-25 16:38:26 +02:00
|
|
|
async def _maintain_sessions(self):
|
2018-08-15 19:01:28 +02:00
|
|
|
while True:
|
2018-09-25 16:38:26 +02:00
|
|
|
# launch already queued up new interfaces
|
2018-09-13 21:20:55 +02:00
|
|
|
while self.server_queue.qsize() > 0:
|
|
|
|
|
server = self.server_queue.get()
|
2018-09-25 16:38:26 +02:00
|
|
|
await self.main_taskgroup.spawn(self._run_new_interface(server))
|
|
|
|
|
|
|
|
|
|
# maybe queue new interfaces to be launched later
|
2018-08-31 15:25:09 +02:00
|
|
|
now = time.time()
|
2018-08-27 20:39:36 +02:00
|
|
|
for i in range(self.num_server - len(self.interfaces) - len(self.connecting)):
|
2018-09-25 16:38:26 +02:00
|
|
|
self._start_random_interface()
|
2018-09-07 19:35:35 +02:00
|
|
|
if now - self.nodes_retry_time > NODES_RETRY_INTERVAL:
|
|
|
|
|
self.print_error('network: retrying connections')
|
|
|
|
|
self.disconnected_servers = set([])
|
|
|
|
|
self.nodes_retry_time = now
|
2018-08-31 15:25:09 +02:00
|
|
|
|
|
|
|
|
# main interface
|
2018-10-01 18:16:37 +02:00
|
|
|
await self._ensure_there_is_a_main_interface()
|
|
|
|
|
if self.is_connected():
|
2018-08-31 16:46:49 +02:00
|
|
|
if self.config.is_fee_estimates_update_required():
|
2018-09-25 16:38:26 +02:00
|
|
|
await self.interface.group.spawn(self._request_fee_estimates, self.interface)
|
2018-08-31 15:25:09 +02:00
|
|
|
|
2018-09-09 05:32:07 +02:00
|
|
|
await asyncio.sleep(0.1)
|