Files
pallectrum/electrum/network.py

815 lines
32 KiB
Python
Raw Normal View History

2016-02-23 11:36:42 +01:00
# Electrum - Lightweight Bitcoin Client
# Copyright (c) 2011-2016 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import time
2017-09-04 14:43:31 +02:00
import queue
import os
import random
import re
from collections import defaultdict
import threading
import socket
2015-04-02 10:12:51 +02:00
import json
import sys
2018-07-02 00:59:28 +02:00
import ipaddress
import asyncio
2018-09-10 00:59:53 +02:00
from typing import NamedTuple, Optional
import dns
import dns.resolver
2018-09-08 15:36:16 +02:00
from aiorpcx import TaskGroup
2017-01-22 21:25:24 +03:00
from . import util
2018-08-14 17:50:20 +02:00
from .util import PrintError, print_error, aiosafe, bfh
2018-06-19 17:36:01 +01:00
from .bitcoin import COIN
2018-03-04 22:10:59 +01:00
from . import constants
2017-01-22 21:25:24 +03:00
from . import blockchain
from .interface import Interface, serialize_server, deserialize_server
from .version import PROTOCOL_VERSION
2018-09-10 00:59:53 +02:00
from .simple_config import SimpleConfig
2017-01-07 16:58:23 +01:00
NODES_RETRY_INTERVAL = 60
SERVER_RETRY_INTERVAL = 10
2014-07-30 10:43:15 +02:00
2013-09-12 08:41:27 +02:00
2014-02-11 09:48:02 +01:00
def parse_servers(result):
""" parse servers list into dict format"""
servers = {}
for item in result:
host = item[1]
out = {}
version = None
pruning_level = '-'
if len(item) > 2:
for v in item[2]:
if re.match("[st]\d*", v):
2014-02-11 09:48:02 +01:00
protocol, port = v[0], v[1:]
2018-03-04 22:10:59 +01:00
if port == '': port = constants.net.DEFAULT_PORTS[protocol]
2014-02-11 09:48:02 +01:00
out[protocol] = port
elif re.match("v(.?)+", v):
version = v[1:]
elif re.match("p\d*", v):
pruning_level = v[1:]
if pruning_level == '': pruning_level = '0'
if out:
2014-02-11 09:48:02 +01:00
out['pruning'] = pruning_level
out['version'] = version
2014-02-11 09:48:02 +01:00
servers[host] = out
return servers
2018-06-01 14:03:22 +02:00
def filter_version(servers):
def is_recent(version):
try:
2018-09-05 18:36:13 +02:00
return util.versiontuple(version) >= util.versiontuple(PROTOCOL_VERSION)
except Exception as e:
return False
return {k: v for k, v in servers.items() if is_recent(v.get('version'))}
def filter_noonion(servers):
return {k: v for k, v in servers.items() if not k.endswith('.onion')}
2018-06-01 14:03:22 +02:00
def filter_protocol(hostmap, protocol='s'):
'''Filters the hostmap for those implementing protocol.
The result is a list in serialized form.'''
eligible = []
for host, portmap in hostmap.items():
port = portmap.get(protocol)
if port:
eligible.append(serialize_server(host, port, protocol))
return eligible
2018-06-01 14:03:22 +02:00
2017-01-07 16:58:23 +01:00
def pick_random_server(hostmap = None, protocol = 's', exclude_set = set()):
if hostmap is None:
2018-03-04 22:10:59 +01:00
hostmap = constants.net.DEFAULT_SERVERS
eligible = list(set(filter_protocol(hostmap, protocol)) - exclude_set)
return random.choice(eligible) if eligible else None
2018-06-01 14:03:22 +02:00
2018-09-10 00:59:53 +02:00
NetworkParameters = NamedTuple("NetworkParameters", [("host", str),
("port", str),
("protocol", str),
("proxy", Optional[dict]),
("auto_connect", bool)])
2013-09-08 17:23:01 +02:00
proxy_modes = ['socks4', 'socks5']
2017-01-30 12:36:56 +03:00
def serialize_proxy(p):
2017-01-30 12:36:56 +03:00
if not isinstance(p, dict):
return None
2017-12-21 23:31:59 +01:00
return ':'.join([p.get('mode'), p.get('host'), p.get('port'),
p.get('user', ''), p.get('password', '')])
2017-01-30 12:36:56 +03:00
2018-09-10 00:59:53 +02:00
def deserialize_proxy(s: str) -> Optional[dict]:
2017-01-30 12:36:56 +03:00
if not isinstance(s, str):
return None
if s.lower() == 'none':
return None
proxy = { "mode":"socks5", "host":"localhost" }
# FIXME raw IPv6 address fails here
args = s.split(':')
n = 0
if proxy_modes.count(args[n]) == 1:
proxy["mode"] = args[n]
n += 1
if len(args) > n:
proxy["host"] = args[n]
n += 1
if len(args) > n:
proxy["port"] = args[n]
n += 1
else:
proxy["port"] = "8080" if proxy["mode"] == "http" else "1080"
if len(args) > n:
proxy["user"] = args[n]
n += 1
if len(args) > n:
proxy["password"] = args[n]
return proxy
2014-07-24 23:14:47 +02:00
2017-01-30 12:36:56 +03:00
INSTANCE = None
class Network(PrintError):
2015-06-03 00:03:33 +09:00
"""The Network class manages a set of connections to remote electrum
servers, each connected socket is handled by an Interface() object.
Connections are initiated by a Connection() thread which stops once
the connection succeeds or fails.
Our external API:
2015-08-30 21:18:10 +09:00
- Member functions get_header(), get_interfaces(), get_local_height(),
get_parameters(), get_server_height(), get_status_value(),
2016-02-21 17:42:33 +01:00
is_connected(), set_parameters(), stop()
"""
verbosity_filter = 'n'
2013-09-08 17:23:01 +02:00
2015-12-03 11:18:10 +01:00
def __init__(self, config=None):
global INSTANCE
INSTANCE = self
if config is None:
config = {} # Do not use mutables as default values!
2017-01-30 12:36:56 +03:00
self.config = SimpleConfig(config) if isinstance(config, dict) else config
self.num_server = 10 if not self.config.get('oneserver') else 0
blockchain.blockchains = blockchain.read_blockchains(self.config) # note: needs self.blockchains_lock
self.print_error("blockchains", list(blockchain.blockchains.keys()))
self.blockchain_index = config.get('blockchain_index', 0)
if self.blockchain_index not in blockchain.blockchains.keys():
self.blockchain_index = 0
# Server for addresses and transactions
self.default_server = self.config.get('server', None)
2015-02-25 17:14:31 +01:00
# Sanitize default server
if self.default_server:
try:
deserialize_server(self.default_server)
except:
self.print_error('Warning: failed to parse server-string; falling back to random.')
self.default_server = None
if not self.default_server:
self.default_server = pick_random_server()
2018-06-21 21:06:56 +02:00
# locks: if you need to take multiple ones, acquire them in the order they are defined here!
self.bhi_lock = asyncio.Lock()
2018-06-21 21:06:56 +02:00
self.interface_lock = threading.RLock() # <- re-entrant
self.callback_lock = threading.Lock()
self.recent_servers_lock = threading.RLock() # <- re-entrant
self.blockchains_lock = threading.Lock()
self.server_peers = {} # returned by interface (servers that the main interface knows about)
2018-06-21 21:06:56 +02:00
self.recent_servers = self.read_recent_servers() # note: needs self.recent_servers_lock
2013-09-12 08:41:27 +02:00
self.banner = ''
2016-02-15 16:17:07 +01:00
self.donation_address = ''
self.relay_fee = None
# callbacks set by the GUI
2018-06-21 21:06:56 +02:00
self.callbacks = defaultdict(list) # note: needs self.callback_lock
2013-09-08 17:23:01 +02:00
2018-06-01 14:03:22 +02:00
dir_path = os.path.join(self.config.path, 'certs')
util.make_dir(dir_path)
# retry times
self.server_retry_time = time.time()
self.nodes_retry_time = time.time()
# kick off the network. interface is the main server we are currently
# communicating with. interfaces is the set of servers we are connecting
# to or have an ongoing connection with
2018-06-21 21:06:56 +02:00
self.interface = None # note: needs self.interface_lock
self.interfaces = {} # note: needs self.interface_lock
self.auto_connect = self.config.get('auto_connect', True)
self.connecting = set()
self.server_queue = None
self.server_queue_group = None
self.asyncio_loop = asyncio.get_event_loop()
self.start_network(deserialize_server(self.default_server)[2],
deserialize_proxy(self.config.get('proxy')))
@staticmethod
def get_instance():
return INSTANCE
2018-06-21 21:06:56 +02:00
def with_interface_lock(func):
def func_wrapper(self, *args, **kwargs):
with self.interface_lock:
return func(self, *args, **kwargs)
return func_wrapper
def with_recent_servers_lock(func):
def func_wrapper(self, *args, **kwargs):
with self.recent_servers_lock:
return func(self, *args, **kwargs)
return func_wrapper
def register_callback(self, callback, events):
2018-06-21 21:06:56 +02:00
with self.callback_lock:
for event in events:
self.callbacks[event].append(callback)
def unregister_callback(self, callback):
2018-06-21 21:06:56 +02:00
with self.callback_lock:
for callbacks in self.callbacks.values():
if callback in callbacks:
callbacks.remove(callback)
2015-08-30 21:18:10 +09:00
def trigger_callback(self, event, *args):
2018-06-21 21:06:56 +02:00
with self.callback_lock:
2015-08-30 21:18:10 +09:00
callbacks = self.callbacks[event][:]
for callback in callbacks:
if asyncio.iscoroutinefunction(callback):
# FIXME: if callback throws, we will lose the traceback
asyncio.run_coroutine_threadsafe(callback(event, *args), self.asyncio_loop)
else:
callback(event, *args)
2015-08-30 21:18:10 +09:00
2015-04-02 10:12:51 +02:00
def read_recent_servers(self):
if not self.config.path:
return []
path = os.path.join(self.config.path, "recent_servers")
try:
with open(path, "r", encoding='utf-8') as f:
2015-04-02 10:12:51 +02:00
data = f.read()
return json.loads(data)
except:
return []
2018-06-21 21:06:56 +02:00
@with_recent_servers_lock
2015-04-02 10:12:51 +02:00
def save_recent_servers(self):
if not self.config.path:
return
path = os.path.join(self.config.path, "recent_servers")
s = json.dumps(self.recent_servers, indent=4, sort_keys=True)
try:
with open(path, "w", encoding='utf-8') as f:
2015-04-02 10:12:51 +02:00
f.write(s)
except:
pass
2018-06-21 21:06:56 +02:00
@with_interface_lock
def get_server_height(self):
2017-07-17 09:32:12 +02:00
return self.interface.tip if self.interface else 0
def server_is_lagging(self):
sh = self.get_server_height()
if not sh:
self.print_error('no height for main interface')
return True
lh = self.get_local_height()
result = (lh - sh) > 1
if result:
self.print_error('%s is lagging (%d vs %d)' % (self.default_server, sh, lh))
return result
2014-07-24 23:14:47 +02:00
def set_status(self, status):
self.connection_status = status
2014-07-30 10:19:15 +02:00
self.notify('status')
2014-07-24 23:14:47 +02:00
2013-10-04 13:51:46 +02:00
def is_connected(self):
return self.interface is not None and self.interface.ready.done()
2015-06-03 00:03:33 +09:00
2015-08-30 21:18:10 +09:00
def is_connecting(self):
return self.connection_status == 'connecting'
async def request_server_info(self, interface):
await interface.ready
session = interface.session
async def get_banner():
self.banner = await session.send_request('server.banner')
self.notify('banner')
async def get_donation_address():
self.donation_address = await session.send_request('server.donation_address')
async def get_server_peers():
self.server_peers = parse_servers(await session.send_request('server.peers.subscribe'))
self.notify('servers')
async def get_relay_fee():
relayfee = await session.send_request('blockchain.relayfee')
if relayfee is None:
self.relay_fee = None
else:
relayfee = int(relayfee * COIN)
self.relay_fee = max(0, relayfee)
async with TaskGroup() as group:
await group.spawn(get_banner)
await group.spawn(get_donation_address)
await group.spawn(get_server_peers)
await group.spawn(get_relay_fee)
await group.spawn(self.request_fee_estimates(interface))
async def request_fee_estimates(self, interface):
session = interface.session
from .simple_config import FEE_ETA_TARGETS
self.config.requested_fee_estimates()
2018-09-08 15:36:16 +02:00
async with TaskGroup() as group:
histogram_task = await group.spawn(session.send_request('mempool.get_fee_histogram'))
fee_tasks = []
for i in FEE_ETA_TARGETS:
fee_tasks.append((i, await group.spawn(session.send_request('blockchain.estimatefee', [i]))))
self.config.mempool_fees = histogram = histogram_task.result()
self.print_error('fee_histogram', histogram)
self.notify('fee_histogram')
2018-09-08 15:36:16 +02:00
for i, task in fee_tasks:
fee = int(task.result() * COIN)
self.config.update_fee_estimates(i, fee)
self.print_error("fee_estimates[%d]" % i, fee)
self.notify('fee')
def get_status_value(self, key):
if key == 'status':
value = self.connection_status
elif key == 'banner':
value = self.banner
2015-08-04 07:15:54 +02:00
elif key == 'fee':
value = self.config.fee_estimates
elif key == 'fee_histogram':
value = self.config.mempool_fees
elif key == 'updated':
value = (self.get_local_height(), self.get_server_height())
elif key == 'servers':
value = self.get_servers()
elif key == 'interfaces':
value = self.get_interfaces()
return value
2014-07-30 10:19:15 +02:00
def notify(self, key):
2015-08-30 21:18:10 +09:00
if key in ['status', 'updated']:
self.trigger_callback(key)
else:
self.trigger_callback(key, self.get_status_value(key))
2013-09-08 17:23:01 +02:00
2018-09-10 00:59:53 +02:00
def get_parameters(self) -> NetworkParameters:
host, port, protocol = deserialize_server(self.default_server)
2018-09-10 00:59:53 +02:00
return NetworkParameters(host, port, protocol, self.proxy, self.auto_connect)
2014-07-25 09:11:56 +02:00
2016-02-15 16:17:07 +01:00
def get_donation_address(self):
if self.is_connected():
return self.donation_address
2018-06-21 21:06:56 +02:00
@with_interface_lock
2014-07-25 09:11:56 +02:00
def get_interfaces(self):
'''The interfaces that are in connected state'''
2017-01-30 12:36:56 +03:00
return list(self.interfaces.keys())
2013-09-08 17:23:01 +02:00
2018-06-21 21:06:56 +02:00
@with_recent_servers_lock
2013-09-12 08:41:27 +02:00
def get_servers(self):
# start with hardcoded servers
2018-03-04 22:10:59 +01:00
out = constants.net.DEFAULT_SERVERS
# add recent servers
for s in self.recent_servers:
try:
host, port, protocol = deserialize_server(s)
except:
continue
if host not in out:
out[host] = {protocol: port}
# add servers received from main interface
if self.server_peers:
out.update(filter_version(self.server_peers.copy()))
# potentially filter out some
if self.config.get('noonion'):
out = filter_noonion(out)
return out
2013-09-12 08:41:27 +02:00
2018-06-21 21:06:56 +02:00
@with_interface_lock
def start_interface(self, server):
if server not in self.interfaces and server not in self.connecting:
if server == self.default_server:
self.print_error("connecting to %s as new interface" % server)
self.set_status('connecting')
self.connecting.add(server)
self.server_queue.put(server)
def start_random_interface(self):
2018-06-21 21:06:56 +02:00
with self.interface_lock:
exclude_set = self.disconnected_servers.union(set(self.interfaces))
server = pick_random_server(self.get_servers(), self.protocol, exclude_set)
if server:
self.start_interface(server)
2018-08-27 20:39:36 +02:00
return server
2018-09-10 00:59:53 +02:00
def set_proxy(self, proxy: Optional[dict]):
self.proxy = proxy
# Store these somewhere so we can un-monkey-patch
if not hasattr(socket, "_getaddrinfo"):
socket._getaddrinfo = socket.getaddrinfo
if proxy:
2015-12-03 11:18:10 +01:00
self.print_error('setting proxy', proxy)
# prevent dns leaks, see http://stackoverflow.com/questions/13184205/dns-over-proxy
socket.getaddrinfo = lambda *args: [(socket.AF_INET, socket.SOCK_STREAM, 6, '', (args[0], args[1]))]
else:
if sys.platform == 'win32':
# On Windows, socket.getaddrinfo takes a mutex, and might hold it for up to 10 seconds
# when dns-resolving. To speed it up drastically, we resolve dns ourselves, outside that lock.
# see #4421
2018-07-02 00:59:28 +02:00
socket.getaddrinfo = self._fast_getaddrinfo
else:
socket.getaddrinfo = socket._getaddrinfo
self.trigger_callback('proxy_set', self.proxy)
2015-03-13 12:00:08 +01:00
2018-07-02 00:59:28 +02:00
@staticmethod
def _fast_getaddrinfo(host, *args, **kwargs):
def needs_dns_resolving(host2):
try:
ipaddress.ip_address(host2)
return False # already valid IP
except ValueError:
pass # not an IP
if str(host) in ('localhost', 'localhost.',):
return False
return True
try:
if needs_dns_resolving(host):
answers = dns.resolver.query(host)
addr = str(answers[0])
else:
addr = host
except dns.exception.DNSException:
# dns failed for some reason, e.g. dns.resolver.NXDOMAIN
# this is normal. Simply report back failure:
raise socket.gaierror(11001, 'getaddrinfo failed')
except BaseException as e:
# Possibly internal error in dnspython :( see #4483
# Fall back to original socket.getaddrinfo to resolve dns.
print_error('dnspython failed to resolve dns with error:', e)
addr = host
return socket._getaddrinfo(addr, *args, **kwargs)
2018-06-21 21:06:56 +02:00
@with_interface_lock
2018-09-10 00:59:53 +02:00
def start_network(self, protocol: str, proxy: Optional[dict]):
assert not self.interface and not self.interfaces
assert not self.connecting and not self.server_queue
assert not self.server_queue_group
self.print_error('starting network')
2018-06-21 21:06:56 +02:00
self.disconnected_servers = set([]) # note: needs self.interface_lock
self.protocol = protocol
self._init_server_queue()
self.set_proxy(proxy)
2018-08-31 14:43:02 +02:00
self.start_interface(self.default_server)
def _init_server_queue(self):
self.server_queue = queue.Queue()
self.server_queue_group = server_queue_group = TaskGroup()
async def job():
forever = asyncio.Event()
async with server_queue_group as group:
await group.spawn(forever.wait())
asyncio.run_coroutine_threadsafe(job(), self.asyncio_loop)
2018-06-21 21:06:56 +02:00
@with_interface_lock
def stop_network(self):
self.print_error("stopping network")
2017-01-30 12:36:56 +03:00
for interface in list(self.interfaces.values()):
2015-06-03 00:03:33 +09:00
self.close_interface(interface)
if self.interface:
self.close_interface(self.interface)
2015-06-03 00:03:33 +09:00
assert self.interface is None
assert not self.interfaces
2018-08-16 18:16:25 +02:00
self.connecting.clear()
self._stop_server_queue()
def _stop_server_queue(self):
2015-06-03 00:03:33 +09:00
# Get a new queue - no old pending connections thanks!
self.server_queue = None
asyncio.run_coroutine_threadsafe(self.server_queue_group.cancel_remaining(), self.asyncio_loop)
self.server_queue_group = None
2018-09-10 00:59:53 +02:00
def set_parameters(self, net_params: NetworkParameters):
proxy = net_params.proxy
2015-08-30 21:18:10 +09:00
proxy_str = serialize_proxy(proxy)
2018-09-10 00:59:53 +02:00
host, port, protocol = net_params.host, net_params.port, net_params.protocol
server_str = serialize_server(host, port, protocol)
2017-03-27 18:59:48 +02:00
# sanitize parameters
try:
deserialize_server(serialize_server(host, port, protocol))
if proxy:
proxy_modes.index(proxy["mode"]) + 1
int(proxy['port'])
except:
return
2018-09-10 00:59:53 +02:00
self.config.set_key('auto_connect', net_params.auto_connect, False)
2015-08-30 21:18:10 +09:00
self.config.set_key("proxy", proxy_str, False)
2018-09-10 00:59:53 +02:00
self.config.set_key("server", server_str, True)
2015-08-30 21:18:10 +09:00
# abort if changes were not allowed by config
2018-09-10 00:59:53 +02:00
if self.config.get('server') != server_str or self.config.get('proxy') != proxy_str:
2015-08-30 21:18:10 +09:00
return
2018-09-10 00:59:53 +02:00
self.auto_connect = net_params.auto_connect
if self.proxy != proxy or self.protocol != protocol:
# Restart the network defaulting to the given server
2018-07-10 18:07:52 +02:00
with self.interface_lock:
self.stop_network()
2018-09-10 00:59:53 +02:00
self.default_server = server_str
2018-07-10 18:07:52 +02:00
self.start_network(protocol, proxy)
2018-09-10 00:59:53 +02:00
elif self.default_server != server_str:
self.switch_to_interface(server_str)
else:
self.switch_lagging_interface()
self.notify('updated')
2013-10-05 10:01:33 +02:00
def switch_to_random_interface(self):
2015-06-03 00:03:33 +09:00
'''Switch to a random connected server other than the current one'''
servers = self.get_interfaces() # Those in connected state
2015-06-03 00:03:33 +09:00
if self.default_server in servers:
2017-09-14 02:06:08 +02:00
servers.remove(self.default_server)
if servers:
self.switch_to_interface(random.choice(servers))
2013-10-05 10:01:33 +02:00
2018-06-21 21:06:56 +02:00
@with_interface_lock
def switch_lagging_interface(self):
'''If auto_connect and lagging, switch interface'''
if self.server_is_lagging() and self.auto_connect:
# switch to one that has the correct header (not height)
header = self.blockchain().read_header(self.get_local_height())
2018-08-14 17:50:20 +02:00
def filt(x):
a = x[1].tip_header
b = header
assert type(a) is type(b)
return a == b
filtered = list(map(lambda x: x[0], filter(filt, self.interfaces.items())))
if filtered:
choice = random.choice(filtered)
self.switch_to_interface(choice)
2018-06-21 21:06:56 +02:00
@with_interface_lock
def switch_to_interface(self, server):
'''Switch to server as our interface. If no connection exists nor
being opened, start a thread to connect. The actual switch will
happen on receipt of the connection notification. Do nothing
if server already is our interface.'''
self.default_server = server
if server not in self.interfaces:
self.interface = None
self.start_interface(server)
return
2018-06-01 14:03:22 +02:00
i = self.interfaces[server]
if self.interface != i:
self.print_error("switching to", server)
if self.interface is not None:
# Stop any current interface in order to terminate subscriptions,
# and to cancel tasks in interface.group.
# However, for headers sub, give preference to this interface
# over unknown ones, i.e. start it again right away.
old_server = self.interface.server
self.close_interface(self.interface)
if old_server != server and len(self.interfaces) <= self.num_server:
self.start_interface(old_server)
self.interface = i
asyncio.get_event_loop().create_task(
i.group.spawn(self.request_server_info(i)))
self.trigger_callback('default_server_changed')
self.set_status('connected')
self.notify('updated')
2018-06-25 01:02:21 +02:00
self.notify('interfaces')
2013-10-02 12:13:07 +02:00
2018-06-21 21:06:56 +02:00
@with_interface_lock
2015-06-03 00:03:33 +09:00
def close_interface(self, interface):
if interface:
if interface.server in self.interfaces:
self.interfaces.pop(interface.server)
2015-06-03 00:03:33 +09:00
if interface.server == self.default_server:
self.interface = None
interface.close()
2018-06-21 21:06:56 +02:00
@with_recent_servers_lock
2015-06-03 00:03:33 +09:00
def add_recent_server(self, server):
# list is ordered
2015-06-03 00:03:33 +09:00
if server in self.recent_servers:
self.recent_servers.remove(server)
self.recent_servers.insert(0, server)
self.recent_servers = self.recent_servers[0:20]
2015-04-02 10:12:51 +02:00
self.save_recent_servers()
2013-09-10 19:59:58 +02:00
2018-06-21 21:06:56 +02:00
@with_interface_lock
2015-06-03 00:03:33 +09:00
def connection_down(self, server):
'''A connection to server either went down, or was never made.
We distinguish by whether it is in self.interfaces.'''
self.disconnected_servers.add(server)
if server == self.default_server:
self.set_status('disconnected')
if server in self.interfaces:
self.close_interface(self.interfaces[server])
self.notify('interfaces')
2018-08-14 17:50:20 +02:00
@aiosafe
2018-08-27 20:39:36 +02:00
async def new_interface(self, server):
interface = Interface(self, server, self.config.path, self.proxy)
timeout = 10 if not self.proxy else 20
try:
await asyncio.wait_for(interface.ready, timeout)
except BaseException as e:
2018-08-14 17:50:20 +02:00
#import traceback
#traceback.print_exc()
self.print_error(interface.server, "couldn't launch because", str(e), str(type(e)))
# note: connection_down will not call interface.close() as
# interface is not yet in self.interfaces. OTOH, calling
# interface.close() here will sometimes raise deep inside the
# asyncio internal select.select... instead, interface will close
# itself when it detects the cancellation of interface.ready;
# however this might take several seconds...
self.connection_down(interface.server)
return
finally:
try: self.connecting.remove(server)
except KeyError: pass
2018-06-11 10:26:01 +02:00
2018-06-21 21:06:56 +02:00
with self.interface_lock:
self.interfaces[server] = interface
if server == self.default_server:
self.switch_to_interface(server)
self.add_recent_server(server)
self.notify('interfaces')
2015-06-03 00:03:33 +09:00
def init_headers_file(self):
b = blockchain.blockchains[0]
filename = b.path()
2018-03-04 22:10:59 +01:00
length = 80 * len(constants.net.CHECKPOINTS) * 2016
2017-12-20 12:37:45 +01:00
if not os.path.exists(filename) or os.path.getsize(filename) < length:
with open(filename, 'wb') as f:
if length>0:
f.seek(length-1)
f.write(b'\x00')
with b.lock:
b.update_size()
2018-08-14 17:50:20 +02:00
async def get_merkle_for_transaction(self, tx_hash, tx_height):
return await self.interface.session.send_request('blockchain.transaction.get_merkle', [tx_hash, tx_height])
def broadcast_transaction_from_non_network_thread(self, tx, timeout=10):
# note: calling this from the network thread will deadlock it
fut = asyncio.run_coroutine_threadsafe(self.broadcast_transaction(tx, timeout=timeout), self.asyncio_loop)
return fut.result()
async def broadcast_transaction(self, tx, timeout=10):
try:
out = await self.interface.session.send_request('blockchain.transaction.broadcast', [str(tx)], timeout=timeout)
except asyncio.TimeoutError as e:
return False, "error: operation timed out"
except Exception as e:
return False, "error: " + str(e)
if out != tx.txid():
return False, "error: " + out
return True, out
2018-08-14 17:50:20 +02:00
async def request_chunk(self, height, tip=None, *, can_return_early=False):
return await self.interface.request_chunk(height, tip=tip, can_return_early=can_return_early)
2018-08-14 17:50:20 +02:00
2018-06-21 21:06:56 +02:00
@with_interface_lock
def blockchain(self):
if self.interface and self.interface.blockchain is not None:
self.blockchain_index = self.interface.blockchain.forkpoint
return blockchain.blockchains[self.blockchain_index]
2014-03-10 20:53:05 +01:00
2018-06-21 21:06:56 +02:00
@with_interface_lock
2017-07-18 21:37:04 +02:00
def get_blockchains(self):
out = {}
2018-06-21 21:06:56 +02:00
with self.blockchains_lock:
blockchain_items = list(blockchain.blockchains.items())
2018-06-21 21:06:56 +02:00
for k, b in blockchain_items:
2017-10-13 13:47:32 +02:00
r = list(filter(lambda i: i.blockchain==b, list(self.interfaces.values())))
2017-07-18 21:37:04 +02:00
if r:
out[k] = r
return out
def follow_chain(self, index):
bc = blockchain.blockchains.get(index)
if bc:
self.blockchain_index = index
self.config.set_key('blockchain_index', index)
2018-06-21 21:06:56 +02:00
with self.interface_lock:
interfaces = list(self.interfaces.values())
for i in interfaces:
if i.blockchain == bc:
self.switch_to_interface(i.server)
break
else:
2018-04-07 17:10:30 +02:00
raise Exception('blockchain not found', index)
2018-06-21 21:06:56 +02:00
with self.interface_lock:
if self.interface:
2018-09-10 00:59:53 +02:00
net_params = self.get_parameters()
host, port, protocol = deserialize_server(self.interface.server)
net_params = net_params._replace(host=host, port=port, protocol=protocol)
self.set_parameters(net_params)
2014-03-10 20:53:05 +01:00
def get_local_height(self):
return self.blockchain().height()
2015-08-30 21:18:10 +09:00
def export_checkpoints(self, path):
# run manually from the console to generate checkpoints
cp = self.blockchain().get_checkpoints()
with open(path, 'w', encoding='utf-8') as f:
f.write(json.dumps(cp, indent=4))
def start(self, fx=None):
2018-09-09 05:05:08 +02:00
self.main_taskgroup = TaskGroup()
async def main():
self.init_headers_file()
async with self.main_taskgroup as group:
await group.spawn(self.maintain_sessions())
if fx: await group.spawn(fx)
self._wrapper_thread = threading.Thread(target=self.asyncio_loop.run_until_complete, args=(main(),))
self._wrapper_thread.start()
def stop(self):
2018-09-09 05:05:08 +02:00
asyncio.run_coroutine_threadsafe(self.main_taskgroup.cancel_remaining(), self.asyncio_loop)
def join(self):
2018-09-09 05:05:08 +02:00
self._wrapper_thread.join(1)
async def maintain_sessions(self):
while True:
while self.server_queue.qsize() > 0:
server = self.server_queue.get()
await self.server_queue_group.spawn(self.new_interface(server))
remove = []
for k, i in self.interfaces.items():
2018-08-31 16:46:03 +02:00
if i.fut.done() and not i.exception:
assert False, "interface future should not finish without exception"
if i.exception:
if not i.fut.done():
try: i.fut.cancel()
except Exception as e: self.print_error('exception while cancelling fut', e)
try:
raise i.exception
except BaseException as e:
self.print_error(i.server, "errored because:", str(e), str(type(e)))
remove.append(k)
for k in remove:
self.connection_down(k)
# nodes
now = time.time()
2018-08-27 20:39:36 +02:00
for i in range(self.num_server - len(self.interfaces) - len(self.connecting)):
self.start_random_interface()
2018-09-07 19:35:35 +02:00
if now - self.nodes_retry_time > NODES_RETRY_INTERVAL:
self.print_error('network: retrying connections')
self.disconnected_servers = set([])
self.nodes_retry_time = now
# main interface
if not self.is_connected():
if self.auto_connect:
if not self.is_connecting():
self.switch_to_random_interface()
else:
if self.default_server in self.disconnected_servers:
if now - self.server_retry_time > SERVER_RETRY_INTERVAL:
self.disconnected_servers.remove(self.default_server)
self.server_retry_time = now
else:
self.switch_to_interface(self.default_server)
else:
if self.config.is_fee_estimates_update_required():
2018-09-12 01:40:54 +02:00
await self.interface.group.spawn(self.request_fee_estimates(self.interface))
await asyncio.sleep(0.1)