2016-02-23 11:36:42 +01:00
|
|
|
# Electrum - Lightweight Bitcoin Client
|
|
|
|
|
# Copyright (c) 2011-2016 Thomas Voegtlin
|
|
|
|
|
#
|
|
|
|
|
# Permission is hereby granted, free of charge, to any person
|
|
|
|
|
# obtaining a copy of this software and associated documentation files
|
|
|
|
|
# (the "Software"), to deal in the Software without restriction,
|
|
|
|
|
# including without limitation the rights to use, copy, modify, merge,
|
|
|
|
|
# publish, distribute, sublicense, and/or sell copies of the Software,
|
|
|
|
|
# and to permit persons to whom the Software is furnished to do so,
|
|
|
|
|
# subject to the following conditions:
|
|
|
|
|
#
|
|
|
|
|
# The above copyright notice and this permission notice shall be
|
|
|
|
|
# included in all copies or substantial portions of the Software.
|
|
|
|
|
#
|
|
|
|
|
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
|
|
|
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
|
|
|
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
|
|
|
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
|
|
|
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
|
|
|
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
|
|
|
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
|
|
|
# SOFTWARE.
|
2018-08-29 18:41:51 +02:00
|
|
|
import concurrent.futures
|
2015-01-29 11:32:58 +01:00
|
|
|
import time
|
2017-09-04 14:43:31 +02:00
|
|
|
import queue
|
2015-01-29 11:32:58 +01:00
|
|
|
import os
|
|
|
|
|
import random
|
2017-11-12 22:54:04 -06:00
|
|
|
import re
|
|
|
|
|
from collections import defaultdict
|
2017-05-29 09:03:39 +02:00
|
|
|
import threading
|
2015-03-12 12:56:06 +01:00
|
|
|
import socket
|
2015-04-02 10:12:51 +02:00
|
|
|
import json
|
2018-07-01 23:53:55 +02:00
|
|
|
import sys
|
2018-07-02 00:59:28 +02:00
|
|
|
import ipaddress
|
2018-09-08 00:25:38 +02:00
|
|
|
import asyncio
|
|
|
|
|
import concurrent.futures
|
2015-01-29 11:32:58 +01:00
|
|
|
|
2018-07-01 23:53:55 +02:00
|
|
|
import dns
|
|
|
|
|
import dns.resolver
|
2018-09-08 15:36:16 +02:00
|
|
|
from aiorpcx import TaskGroup
|
2018-07-01 23:53:55 +02:00
|
|
|
|
2017-01-22 21:25:24 +03:00
|
|
|
from . import util
|
2018-08-14 17:50:20 +02:00
|
|
|
from .util import PrintError, print_error, aiosafe, bfh
|
2018-06-19 17:36:01 +01:00
|
|
|
from .bitcoin import COIN
|
2018-03-04 22:10:59 +01:00
|
|
|
from . import constants
|
2017-01-22 21:25:24 +03:00
|
|
|
from . import blockchain
|
2018-08-16 18:16:25 +02:00
|
|
|
from .interface import Interface
|
2018-08-29 18:41:51 +02:00
|
|
|
from .version import PROTOCOL_VERSION
|
2017-01-07 16:58:23 +01:00
|
|
|
|
2015-04-02 15:12:01 +02:00
|
|
|
NODES_RETRY_INTERVAL = 60
|
|
|
|
|
SERVER_RETRY_INTERVAL = 10
|
2014-07-30 10:43:15 +02:00
|
|
|
|
2013-09-12 08:41:27 +02:00
|
|
|
|
2014-02-11 09:48:02 +01:00
|
|
|
def parse_servers(result):
|
|
|
|
|
""" parse servers list into dict format"""
|
|
|
|
|
servers = {}
|
|
|
|
|
for item in result:
|
|
|
|
|
host = item[1]
|
|
|
|
|
out = {}
|
|
|
|
|
version = None
|
|
|
|
|
pruning_level = '-'
|
|
|
|
|
if len(item) > 2:
|
|
|
|
|
for v in item[2]:
|
2017-01-07 15:59:09 +01:00
|
|
|
if re.match("[st]\d*", v):
|
2014-02-11 09:48:02 +01:00
|
|
|
protocol, port = v[0], v[1:]
|
2018-03-04 22:10:59 +01:00
|
|
|
if port == '': port = constants.net.DEFAULT_PORTS[protocol]
|
2014-02-11 09:48:02 +01:00
|
|
|
out[protocol] = port
|
|
|
|
|
elif re.match("v(.?)+", v):
|
|
|
|
|
version = v[1:]
|
|
|
|
|
elif re.match("p\d*", v):
|
|
|
|
|
pruning_level = v[1:]
|
|
|
|
|
if pruning_level == '': pruning_level = '0'
|
2017-09-07 09:41:21 +02:00
|
|
|
if out:
|
2014-02-11 09:48:02 +01:00
|
|
|
out['pruning'] = pruning_level
|
2017-09-07 09:41:21 +02:00
|
|
|
out['version'] = version
|
2014-02-11 09:48:02 +01:00
|
|
|
servers[host] = out
|
|
|
|
|
return servers
|
|
|
|
|
|
2018-06-01 14:03:22 +02:00
|
|
|
|
2017-09-07 09:41:21 +02:00
|
|
|
def filter_version(servers):
|
|
|
|
|
def is_recent(version):
|
|
|
|
|
try:
|
2018-09-05 18:36:13 +02:00
|
|
|
return util.versiontuple(version) >= util.versiontuple(PROTOCOL_VERSION)
|
2017-09-07 09:41:21 +02:00
|
|
|
except Exception as e:
|
|
|
|
|
return False
|
|
|
|
|
return {k: v for k, v in servers.items() if is_recent(v.get('version'))}
|
|
|
|
|
|
|
|
|
|
|
2018-07-31 20:25:53 +02:00
|
|
|
def filter_noonion(servers):
|
|
|
|
|
return {k: v for k, v in servers.items() if not k.endswith('.onion')}
|
|
|
|
|
|
|
|
|
|
|
2018-06-01 14:03:22 +02:00
|
|
|
def filter_protocol(hostmap, protocol='s'):
|
2015-05-24 11:14:39 +09:00
|
|
|
'''Filters the hostmap for those implementing protocol.
|
|
|
|
|
The result is a list in serialized form.'''
|
|
|
|
|
eligible = []
|
|
|
|
|
for host, portmap in hostmap.items():
|
|
|
|
|
port = portmap.get(protocol)
|
|
|
|
|
if port:
|
|
|
|
|
eligible.append(serialize_server(host, port, protocol))
|
|
|
|
|
return eligible
|
|
|
|
|
|
2018-06-01 14:03:22 +02:00
|
|
|
|
2017-01-07 16:58:23 +01:00
|
|
|
def pick_random_server(hostmap = None, protocol = 's', exclude_set = set()):
|
|
|
|
|
if hostmap is None:
|
2018-03-04 22:10:59 +01:00
|
|
|
hostmap = constants.net.DEFAULT_SERVERS
|
2015-05-24 11:14:39 +09:00
|
|
|
eligible = list(set(filter_protocol(hostmap, protocol)) - exclude_set)
|
|
|
|
|
return random.choice(eligible) if eligible else None
|
2013-09-18 16:55:19 +00:00
|
|
|
|
2018-06-01 14:03:22 +02:00
|
|
|
|
2017-01-22 21:25:24 +03:00
|
|
|
from .simple_config import SimpleConfig
|
2013-09-08 17:23:01 +02:00
|
|
|
|
2018-09-06 16:18:45 +02:00
|
|
|
proxy_modes = ['socks4', 'socks5']
|
2015-03-12 12:56:06 +01:00
|
|
|
|
2017-01-30 12:36:56 +03:00
|
|
|
|
2015-03-12 12:56:06 +01:00
|
|
|
def serialize_proxy(p):
|
2017-01-30 12:36:56 +03:00
|
|
|
if not isinstance(p, dict):
|
2015-03-12 12:56:06 +01:00
|
|
|
return None
|
2017-12-21 23:31:59 +01:00
|
|
|
return ':'.join([p.get('mode'), p.get('host'), p.get('port'),
|
|
|
|
|
p.get('user', ''), p.get('password', '')])
|
2015-03-12 12:56:06 +01:00
|
|
|
|
2017-01-30 12:36:56 +03:00
|
|
|
|
2015-03-12 12:56:06 +01:00
|
|
|
def deserialize_proxy(s):
|
2017-01-30 12:36:56 +03:00
|
|
|
if not isinstance(s, str):
|
2015-03-12 12:56:06 +01:00
|
|
|
return None
|
|
|
|
|
if s.lower() == 'none':
|
|
|
|
|
return None
|
|
|
|
|
proxy = { "mode":"socks5", "host":"localhost" }
|
|
|
|
|
args = s.split(':')
|
|
|
|
|
n = 0
|
|
|
|
|
if proxy_modes.count(args[n]) == 1:
|
|
|
|
|
proxy["mode"] = args[n]
|
|
|
|
|
n += 1
|
|
|
|
|
if len(args) > n:
|
|
|
|
|
proxy["host"] = args[n]
|
|
|
|
|
n += 1
|
|
|
|
|
if len(args) > n:
|
|
|
|
|
proxy["port"] = args[n]
|
2017-02-23 22:35:41 +01:00
|
|
|
n += 1
|
2015-03-12 12:56:06 +01:00
|
|
|
else:
|
|
|
|
|
proxy["port"] = "8080" if proxy["mode"] == "http" else "1080"
|
2017-02-23 22:35:41 +01:00
|
|
|
if len(args) > n:
|
|
|
|
|
proxy["user"] = args[n]
|
|
|
|
|
n += 1
|
|
|
|
|
if len(args) > n:
|
|
|
|
|
proxy["password"] = args[n]
|
2015-03-12 12:56:06 +01:00
|
|
|
return proxy
|
2014-07-24 23:14:47 +02:00
|
|
|
|
2017-01-30 12:36:56 +03:00
|
|
|
|
2015-03-12 18:06:28 +01:00
|
|
|
def deserialize_server(server_str):
|
2018-01-16 12:36:39 +01:00
|
|
|
host, port, protocol = str(server_str).rsplit(':', 2)
|
2018-04-07 17:01:38 +02:00
|
|
|
if protocol not in 'st':
|
|
|
|
|
raise ValueError('invalid network protocol: {}'.format(protocol))
|
2015-05-25 17:45:01 +09:00
|
|
|
int(port) # Throw if cannot be converted to int
|
2015-03-12 18:06:28 +01:00
|
|
|
return host, port, protocol
|
|
|
|
|
|
2017-01-30 12:36:56 +03:00
|
|
|
|
2015-03-12 18:06:28 +01:00
|
|
|
def serialize_server(host, port, protocol):
|
|
|
|
|
return str(':'.join([host, port, protocol]))
|
|
|
|
|
|
2018-09-07 11:34:56 +02:00
|
|
|
INSTANCE = None
|
|
|
|
|
|
2018-08-15 19:01:28 +02:00
|
|
|
class Network(PrintError):
|
2015-06-03 00:03:33 +09:00
|
|
|
"""The Network class manages a set of connections to remote electrum
|
|
|
|
|
servers, each connected socket is handled by an Interface() object.
|
|
|
|
|
Connections are initiated by a Connection() thread which stops once
|
|
|
|
|
the connection succeeds or fails.
|
|
|
|
|
|
|
|
|
|
Our external API:
|
2015-05-22 10:36:45 +09:00
|
|
|
|
2015-08-30 21:18:10 +09:00
|
|
|
- Member functions get_header(), get_interfaces(), get_local_height(),
|
|
|
|
|
get_parameters(), get_server_height(), get_status_value(),
|
2016-02-21 17:42:33 +01:00
|
|
|
is_connected(), set_parameters(), stop()
|
2015-05-22 10:36:45 +09:00
|
|
|
"""
|
2018-07-18 13:31:41 +02:00
|
|
|
verbosity_filter = 'n'
|
2013-09-08 17:23:01 +02:00
|
|
|
|
2015-12-03 11:18:10 +01:00
|
|
|
def __init__(self, config=None):
|
2018-09-07 11:34:56 +02:00
|
|
|
global INSTANCE
|
|
|
|
|
INSTANCE = self
|
2014-06-25 17:43:45 +02:00
|
|
|
if config is None:
|
|
|
|
|
config = {} # Do not use mutables as default values!
|
2017-01-30 12:36:56 +03:00
|
|
|
self.config = SimpleConfig(config) if isinstance(config, dict) else config
|
2017-07-13 17:23:24 +02:00
|
|
|
self.num_server = 10 if not self.config.get('oneserver') else 0
|
2018-08-29 18:41:51 +02:00
|
|
|
blockchain.blockchains = blockchain.read_blockchains(self.config) # note: needs self.blockchains_lock
|
|
|
|
|
self.print_error("blockchains", list(blockchain.blockchains.keys()))
|
2017-05-29 09:03:39 +02:00
|
|
|
self.blockchain_index = config.get('blockchain_index', 0)
|
2018-08-29 18:41:51 +02:00
|
|
|
if self.blockchain_index not in blockchain.blockchains.keys():
|
2017-05-29 09:03:39 +02:00
|
|
|
self.blockchain_index = 0
|
2013-10-08 16:52:50 +02:00
|
|
|
# Server for addresses and transactions
|
2018-01-16 12:36:39 +01:00
|
|
|
self.default_server = self.config.get('server', None)
|
2015-02-25 17:14:31 +01:00
|
|
|
# Sanitize default server
|
2018-01-16 12:36:39 +01:00
|
|
|
if self.default_server:
|
|
|
|
|
try:
|
|
|
|
|
deserialize_server(self.default_server)
|
|
|
|
|
except:
|
|
|
|
|
self.print_error('Warning: failed to parse server-string; falling back to random.')
|
|
|
|
|
self.default_server = None
|
2013-10-08 16:52:50 +02:00
|
|
|
if not self.default_server:
|
2015-05-24 11:14:39 +09:00
|
|
|
self.default_server = pick_random_server()
|
2018-06-21 21:06:56 +02:00
|
|
|
|
|
|
|
|
# locks: if you need to take multiple ones, acquire them in the order they are defined here!
|
2018-08-29 18:41:51 +02:00
|
|
|
self.bhi_lock = asyncio.Lock()
|
2018-06-21 21:06:56 +02:00
|
|
|
self.interface_lock = threading.RLock() # <- re-entrant
|
|
|
|
|
self.callback_lock = threading.Lock()
|
|
|
|
|
self.pending_sends_lock = threading.Lock()
|
|
|
|
|
self.recent_servers_lock = threading.RLock() # <- re-entrant
|
|
|
|
|
self.blockchains_lock = threading.Lock()
|
|
|
|
|
|
2015-08-30 21:18:10 +09:00
|
|
|
self.pending_sends = []
|
|
|
|
|
self.message_id = 0
|
|
|
|
|
self.debug = False
|
2018-06-01 14:03:22 +02:00
|
|
|
self.irc_servers = {} # returned by interface (list from irc)
|
2018-06-21 21:06:56 +02:00
|
|
|
self.recent_servers = self.read_recent_servers() # note: needs self.recent_servers_lock
|
2013-10-04 14:30:23 +02:00
|
|
|
|
2013-09-12 08:41:27 +02:00
|
|
|
self.banner = ''
|
2016-02-15 16:17:07 +01:00
|
|
|
self.donation_address = ''
|
2016-01-14 16:06:22 +01:00
|
|
|
self.relay_fee = None
|
2015-11-26 10:57:43 +01:00
|
|
|
# callbacks passed with subscriptions
|
2018-06-21 21:06:56 +02:00
|
|
|
self.subscriptions = defaultdict(list) # note: needs self.callback_lock
|
|
|
|
|
self.sub_cache = {} # note: needs self.interface_lock
|
2015-11-26 10:57:43 +01:00
|
|
|
# callbacks set by the GUI
|
2018-06-21 21:06:56 +02:00
|
|
|
self.callbacks = defaultdict(list) # note: needs self.callback_lock
|
2013-09-08 17:23:01 +02:00
|
|
|
|
2018-06-01 14:03:22 +02:00
|
|
|
dir_path = os.path.join(self.config.path, 'certs')
|
2018-05-28 14:22:54 +02:00
|
|
|
util.make_dir(dir_path)
|
2013-10-01 11:24:55 +02:00
|
|
|
|
2015-05-15 14:47:59 +02:00
|
|
|
# subscriptions and requests
|
2017-08-29 11:53:49 +02:00
|
|
|
self.h2addr = {}
|
2015-06-03 00:03:33 +09:00
|
|
|
# Requests from client we've not seen a response to
|
2015-05-15 14:47:59 +02:00
|
|
|
self.unanswered_requests = {}
|
2015-05-08 20:13:18 +02:00
|
|
|
# retry times
|
|
|
|
|
self.server_retry_time = time.time()
|
|
|
|
|
self.nodes_retry_time = time.time()
|
Remove pending_servers, self.interfaces is now the complete set of
interfaces we have created.
Existing code has the concept of pending servers, where a connection
thread is started but has not sent a connection notification, and
and interfaces which have received the notification.
This separation caused a couple of minor bugs, and given the cleaner
semantics of unifying the two I don't think the separation is beneficial.
The bugs:
1) When stopping the network, we only stopped the connected interface
threads, not the pending ones. This would leave Python hanging
on exit if we don't make them daemon threads.
2) start_interface() did not check pending servers before starting
a new thread. Some of its callers did, but not all, so it was
possible to initiate two threads to one server and "lose" one thread.
Apart form fixing the above two issues, unification causes one more
change in semantics: we are now willing to switch to a connection
that is pending (we don't switch to failed interfaces). I don't
think that is a problem: if it times out we'll just switch
again when we receive the disconnect notification, and previously the
fact that an interface was in the interaces dictionary wasn't a
guarantee the connection was good anyway: we might not have processed
a pending disconnection notification.
2015-05-23 15:43:58 +09:00
|
|
|
# kick off the network. interface is the main server we are currently
|
|
|
|
|
# communicating with. interfaces is the set of servers we are connecting
|
|
|
|
|
# to or have an ongoing connection with
|
2018-06-21 21:06:56 +02:00
|
|
|
self.interface = None # note: needs self.interface_lock
|
|
|
|
|
self.interfaces = {} # note: needs self.interface_lock
|
2016-03-08 10:00:47 +01:00
|
|
|
self.auto_connect = self.config.get('auto_connect', True)
|
2015-10-17 07:07:10 +02:00
|
|
|
self.connecting = set()
|
2018-01-13 17:09:25 +01:00
|
|
|
self.requested_chunks = set()
|
2017-01-22 21:25:24 +03:00
|
|
|
self.socket_queue = queue.Queue()
|
2018-01-16 12:36:39 +01:00
|
|
|
self.start_network(deserialize_server(self.default_server)[2],
|
|
|
|
|
deserialize_proxy(self.config.get('proxy')))
|
2018-08-15 19:01:28 +02:00
|
|
|
self.asyncio_loop = asyncio.get_event_loop()
|
2015-03-12 12:56:06 +01:00
|
|
|
|
2018-09-07 11:34:56 +02:00
|
|
|
@staticmethod
|
|
|
|
|
def get_instance():
|
|
|
|
|
return INSTANCE
|
|
|
|
|
|
2018-06-21 21:06:56 +02:00
|
|
|
def with_interface_lock(func):
|
|
|
|
|
def func_wrapper(self, *args, **kwargs):
|
|
|
|
|
with self.interface_lock:
|
|
|
|
|
return func(self, *args, **kwargs)
|
|
|
|
|
return func_wrapper
|
|
|
|
|
|
|
|
|
|
def with_recent_servers_lock(func):
|
|
|
|
|
def func_wrapper(self, *args, **kwargs):
|
|
|
|
|
with self.recent_servers_lock:
|
|
|
|
|
return func(self, *args, **kwargs)
|
|
|
|
|
return func_wrapper
|
|
|
|
|
|
2015-11-13 22:42:21 +09:00
|
|
|
def register_callback(self, callback, events):
|
2018-06-21 21:06:56 +02:00
|
|
|
with self.callback_lock:
|
2015-11-13 22:42:21 +09:00
|
|
|
for event in events:
|
|
|
|
|
self.callbacks[event].append(callback)
|
|
|
|
|
|
|
|
|
|
def unregister_callback(self, callback):
|
2018-06-21 21:06:56 +02:00
|
|
|
with self.callback_lock:
|
2015-11-13 22:42:21 +09:00
|
|
|
for callbacks in self.callbacks.values():
|
|
|
|
|
if callback in callbacks:
|
|
|
|
|
callbacks.remove(callback)
|
2015-08-30 21:18:10 +09:00
|
|
|
|
2015-11-12 16:08:37 +09:00
|
|
|
def trigger_callback(self, event, *args):
|
2018-06-21 21:06:56 +02:00
|
|
|
with self.callback_lock:
|
2015-08-30 21:18:10 +09:00
|
|
|
callbacks = self.callbacks[event][:]
|
2018-09-07 19:34:28 +02:00
|
|
|
for callback in callbacks:
|
|
|
|
|
if asyncio.iscoroutinefunction(callback):
|
|
|
|
|
asyncio.run_coroutine_threadsafe(callback(event, *args), self.asyncio_loop)
|
|
|
|
|
else:
|
|
|
|
|
callback(event, *args)
|
2015-08-30 21:18:10 +09:00
|
|
|
|
2015-04-02 10:12:51 +02:00
|
|
|
def read_recent_servers(self):
|
|
|
|
|
if not self.config.path:
|
|
|
|
|
return []
|
|
|
|
|
path = os.path.join(self.config.path, "recent_servers")
|
|
|
|
|
try:
|
2018-03-23 21:47:51 +01:00
|
|
|
with open(path, "r", encoding='utf-8') as f:
|
2015-04-02 10:12:51 +02:00
|
|
|
data = f.read()
|
|
|
|
|
return json.loads(data)
|
|
|
|
|
except:
|
|
|
|
|
return []
|
|
|
|
|
|
2018-06-21 21:06:56 +02:00
|
|
|
@with_recent_servers_lock
|
2015-04-02 10:12:51 +02:00
|
|
|
def save_recent_servers(self):
|
|
|
|
|
if not self.config.path:
|
|
|
|
|
return
|
|
|
|
|
path = os.path.join(self.config.path, "recent_servers")
|
|
|
|
|
s = json.dumps(self.recent_servers, indent=4, sort_keys=True)
|
|
|
|
|
try:
|
2018-03-23 21:47:51 +01:00
|
|
|
with open(path, "w", encoding='utf-8') as f:
|
2015-04-02 10:12:51 +02:00
|
|
|
f.write(s)
|
|
|
|
|
except:
|
|
|
|
|
pass
|
|
|
|
|
|
2018-06-21 21:06:56 +02:00
|
|
|
@with_interface_lock
|
2014-07-25 16:32:19 +02:00
|
|
|
def get_server_height(self):
|
2017-07-17 09:32:12 +02:00
|
|
|
return self.interface.tip if self.interface else 0
|
2014-07-25 16:32:19 +02:00
|
|
|
|
|
|
|
|
def server_is_lagging(self):
|
2015-05-25 17:45:01 +09:00
|
|
|
sh = self.get_server_height()
|
|
|
|
|
if not sh:
|
2015-03-12 21:39:05 +01:00
|
|
|
self.print_error('no height for main interface')
|
2016-02-15 15:58:08 +01:00
|
|
|
return True
|
2015-05-25 17:45:01 +09:00
|
|
|
lh = self.get_local_height()
|
|
|
|
|
result = (lh - sh) > 1
|
|
|
|
|
if result:
|
|
|
|
|
self.print_error('%s is lagging (%d vs %d)' % (self.default_server, sh, lh))
|
|
|
|
|
return result
|
2014-07-25 16:32:19 +02:00
|
|
|
|
2014-07-24 23:14:47 +02:00
|
|
|
def set_status(self, status):
|
|
|
|
|
self.connection_status = status
|
2014-07-30 10:19:15 +02:00
|
|
|
self.notify('status')
|
2014-07-24 23:14:47 +02:00
|
|
|
|
2013-10-04 13:51:46 +02:00
|
|
|
def is_connected(self):
|
2018-08-31 16:46:49 +02:00
|
|
|
return self.interface is not None and self.interface.ready.done()
|
2015-06-03 00:03:33 +09:00
|
|
|
|
2015-08-30 21:18:10 +09:00
|
|
|
def is_connecting(self):
|
|
|
|
|
return self.connection_status == 'connecting'
|
|
|
|
|
|
2018-08-31 16:46:49 +02:00
|
|
|
async def request_server_info(self, interface):
|
|
|
|
|
await interface.ready
|
|
|
|
|
session = interface.session
|
|
|
|
|
self.banner = await session.send_request('server.banner')
|
|
|
|
|
self.notify('banner')
|
|
|
|
|
self.donation_address = await session.send_request('server.donation_address')
|
|
|
|
|
self.irc_servers = parse_servers(await session.send_request('server.peers.subscribe'))
|
|
|
|
|
self.notify('servers')
|
|
|
|
|
await self.request_fee_estimates(interface)
|
|
|
|
|
relayfee = await session.send_request('blockchain.relayfee')
|
|
|
|
|
self.relay_fee = int(relayfee * COIN) if relayfee is not None else None
|
|
|
|
|
|
|
|
|
|
async def request_fee_estimates(self, interface):
|
|
|
|
|
session = interface.session
|
|
|
|
|
from .simple_config import FEE_ETA_TARGETS
|
|
|
|
|
self.config.requested_fee_estimates()
|
2018-09-08 15:36:16 +02:00
|
|
|
async with TaskGroup() as group:
|
|
|
|
|
histogram_task = await group.spawn(session.send_request('mempool.get_fee_histogram'))
|
|
|
|
|
fee_tasks = []
|
|
|
|
|
for i in FEE_ETA_TARGETS:
|
|
|
|
|
fee_tasks.append((i, await group.spawn(session.send_request('blockchain.estimatefee', [i]))))
|
|
|
|
|
self.config.mempool_fees = histogram_task.result()
|
2018-08-31 16:46:49 +02:00
|
|
|
self.notify('fee_histogram')
|
2018-09-08 15:36:16 +02:00
|
|
|
for i, task in fee_tasks:
|
|
|
|
|
fee = int(task.result() * COIN)
|
2018-08-31 16:46:49 +02:00
|
|
|
self.config.update_fee_estimates(i, fee)
|
|
|
|
|
self.print_error("fee_estimates[%d]" % i, fee)
|
|
|
|
|
self.notify('fee')
|
|
|
|
|
|
2014-07-27 11:33:02 +02:00
|
|
|
def get_status_value(self, key):
|
|
|
|
|
if key == 'status':
|
|
|
|
|
value = self.connection_status
|
|
|
|
|
elif key == 'banner':
|
|
|
|
|
value = self.banner
|
2015-08-04 07:15:54 +02:00
|
|
|
elif key == 'fee':
|
2017-01-09 09:22:17 +01:00
|
|
|
value = self.config.fee_estimates
|
2017-11-22 12:09:56 +01:00
|
|
|
elif key == 'fee_histogram':
|
|
|
|
|
value = self.config.mempool_fees
|
2014-07-27 11:33:02 +02:00
|
|
|
elif key == 'updated':
|
|
|
|
|
value = (self.get_local_height(), self.get_server_height())
|
|
|
|
|
elif key == 'servers':
|
|
|
|
|
value = self.get_servers()
|
|
|
|
|
elif key == 'interfaces':
|
|
|
|
|
value = self.get_interfaces()
|
|
|
|
|
return value
|
|
|
|
|
|
2014-07-30 10:19:15 +02:00
|
|
|
def notify(self, key):
|
2015-08-30 21:18:10 +09:00
|
|
|
if key in ['status', 'updated']:
|
|
|
|
|
self.trigger_callback(key)
|
|
|
|
|
else:
|
2015-11-12 16:08:37 +09:00
|
|
|
self.trigger_callback(key, self.get_status_value(key))
|
2013-09-08 17:23:01 +02:00
|
|
|
|
2014-07-25 09:11:56 +02:00
|
|
|
def get_parameters(self):
|
2015-03-12 18:06:28 +01:00
|
|
|
host, port, protocol = deserialize_server(self.default_server)
|
2015-07-03 11:50:53 +09:00
|
|
|
return host, port, protocol, self.proxy, self.auto_connect
|
2014-07-25 09:11:56 +02:00
|
|
|
|
2016-02-15 16:17:07 +01:00
|
|
|
def get_donation_address(self):
|
|
|
|
|
if self.is_connected():
|
|
|
|
|
return self.donation_address
|
|
|
|
|
|
2018-06-21 21:06:56 +02:00
|
|
|
@with_interface_lock
|
2014-07-25 09:11:56 +02:00
|
|
|
def get_interfaces(self):
|
2015-05-23 22:59:29 +09:00
|
|
|
'''The interfaces that are in connected state'''
|
2017-01-30 12:36:56 +03:00
|
|
|
return list(self.interfaces.keys())
|
2013-09-08 17:23:01 +02:00
|
|
|
|
2018-06-21 21:06:56 +02:00
|
|
|
@with_recent_servers_lock
|
2013-09-12 08:41:27 +02:00
|
|
|
def get_servers(self):
|
2018-03-04 22:10:59 +01:00
|
|
|
out = constants.net.DEFAULT_SERVERS
|
2014-03-13 19:23:26 +01:00
|
|
|
if self.irc_servers:
|
2017-09-12 15:04:57 +02:00
|
|
|
out.update(filter_version(self.irc_servers.copy()))
|
2014-03-13 19:23:26 +01:00
|
|
|
else:
|
|
|
|
|
for s in self.recent_servers:
|
2015-03-28 19:17:07 +01:00
|
|
|
try:
|
|
|
|
|
host, port, protocol = deserialize_server(s)
|
|
|
|
|
except:
|
|
|
|
|
continue
|
2014-03-13 19:23:26 +01:00
|
|
|
if host not in out:
|
2018-06-01 14:03:22 +02:00
|
|
|
out[host] = {protocol: port}
|
2018-07-31 20:25:53 +02:00
|
|
|
if self.config.get('noonion'):
|
|
|
|
|
out = filter_noonion(out)
|
2013-10-04 14:30:23 +02:00
|
|
|
return out
|
2013-09-12 08:41:27 +02:00
|
|
|
|
2018-06-21 21:06:56 +02:00
|
|
|
@with_interface_lock
|
2013-09-10 17:52:43 +02:00
|
|
|
def start_interface(self, server):
|
2015-06-03 00:03:33 +09:00
|
|
|
if (not server in self.interfaces and not server in self.connecting):
|
2015-05-22 09:45:51 +09:00
|
|
|
if server == self.default_server:
|
2015-09-01 17:28:10 +09:00
|
|
|
self.print_error("connecting to %s as new interface" % server)
|
2015-05-22 09:45:51 +09:00
|
|
|
self.set_status('connecting')
|
2015-10-17 07:07:10 +02:00
|
|
|
self.connecting.add(server)
|
2018-08-15 19:01:28 +02:00
|
|
|
self.socket_queue.put(server)
|
2013-09-10 17:52:43 +02:00
|
|
|
|
|
|
|
|
def start_random_interface(self):
|
2018-06-21 21:06:56 +02:00
|
|
|
with self.interface_lock:
|
|
|
|
|
exclude_set = self.disconnected_servers.union(set(self.interfaces))
|
2015-05-24 11:14:39 +09:00
|
|
|
server = pick_random_server(self.get_servers(), self.protocol, exclude_set)
|
2013-09-10 17:52:43 +02:00
|
|
|
if server:
|
|
|
|
|
self.start_interface(server)
|
2018-08-27 20:39:36 +02:00
|
|
|
return server
|
2013-09-10 17:52:43 +02:00
|
|
|
|
2015-03-12 12:56:06 +01:00
|
|
|
def set_proxy(self, proxy):
|
|
|
|
|
self.proxy = proxy
|
2017-03-01 10:11:16 +01:00
|
|
|
# Store these somewhere so we can un-monkey-patch
|
|
|
|
|
if not hasattr(socket, "_socketobject"):
|
|
|
|
|
socket._getaddrinfo = socket.getaddrinfo
|
2015-03-12 12:56:06 +01:00
|
|
|
if proxy:
|
2015-12-03 11:18:10 +01:00
|
|
|
self.print_error('setting proxy', proxy)
|
2015-03-12 12:56:06 +01:00
|
|
|
proxy_mode = proxy_modes.index(proxy["mode"]) + 1
|
|
|
|
|
# prevent dns leaks, see http://stackoverflow.com/questions/13184205/dns-over-proxy
|
2015-07-01 09:09:00 +02:00
|
|
|
socket.getaddrinfo = lambda *args: [(socket.AF_INET, socket.SOCK_STREAM, 6, '', (args[0], args[1]))]
|
2015-03-12 12:56:06 +01:00
|
|
|
else:
|
2018-07-01 23:53:55 +02:00
|
|
|
if sys.platform == 'win32':
|
|
|
|
|
# On Windows, socket.getaddrinfo takes a mutex, and might hold it for up to 10 seconds
|
|
|
|
|
# when dns-resolving. To speed it up drastically, we resolve dns ourselves, outside that lock.
|
|
|
|
|
# see #4421
|
2018-07-02 00:59:28 +02:00
|
|
|
socket.getaddrinfo = self._fast_getaddrinfo
|
2018-07-01 23:53:55 +02:00
|
|
|
else:
|
|
|
|
|
socket.getaddrinfo = socket._getaddrinfo
|
2018-09-06 16:18:45 +02:00
|
|
|
self.trigger_callback('proxy_set', self.proxy)
|
2015-03-13 12:00:08 +01:00
|
|
|
|
2018-07-02 00:59:28 +02:00
|
|
|
@staticmethod
|
|
|
|
|
def _fast_getaddrinfo(host, *args, **kwargs):
|
|
|
|
|
def needs_dns_resolving(host2):
|
|
|
|
|
try:
|
|
|
|
|
ipaddress.ip_address(host2)
|
|
|
|
|
return False # already valid IP
|
|
|
|
|
except ValueError:
|
|
|
|
|
pass # not an IP
|
|
|
|
|
if str(host) in ('localhost', 'localhost.',):
|
|
|
|
|
return False
|
|
|
|
|
return True
|
|
|
|
|
try:
|
|
|
|
|
if needs_dns_resolving(host):
|
|
|
|
|
answers = dns.resolver.query(host)
|
|
|
|
|
addr = str(answers[0])
|
|
|
|
|
else:
|
|
|
|
|
addr = host
|
|
|
|
|
except dns.exception.DNSException:
|
|
|
|
|
# dns failed for some reason, e.g. dns.resolver.NXDOMAIN
|
|
|
|
|
# this is normal. Simply report back failure:
|
|
|
|
|
raise socket.gaierror(11001, 'getaddrinfo failed')
|
|
|
|
|
except BaseException as e:
|
|
|
|
|
# Possibly internal error in dnspython :( see #4483
|
|
|
|
|
# Fall back to original socket.getaddrinfo to resolve dns.
|
|
|
|
|
print_error('dnspython failed to resolve dns with error:', e)
|
|
|
|
|
addr = host
|
|
|
|
|
return socket._getaddrinfo(addr, *args, **kwargs)
|
|
|
|
|
|
2018-06-21 21:06:56 +02:00
|
|
|
@with_interface_lock
|
2015-05-22 07:43:09 +09:00
|
|
|
def start_network(self, protocol, proxy):
|
|
|
|
|
assert not self.interface and not self.interfaces
|
2015-06-03 00:03:33 +09:00
|
|
|
assert not self.connecting and self.socket_queue.empty()
|
2015-05-22 07:43:09 +09:00
|
|
|
self.print_error('starting network')
|
2018-06-21 21:06:56 +02:00
|
|
|
self.disconnected_servers = set([]) # note: needs self.interface_lock
|
2015-05-22 07:43:09 +09:00
|
|
|
self.protocol = protocol
|
|
|
|
|
self.set_proxy(proxy)
|
2018-08-31 14:43:02 +02:00
|
|
|
self.start_interface(self.default_server)
|
2015-05-22 07:43:09 +09:00
|
|
|
|
2018-06-21 21:06:56 +02:00
|
|
|
@with_interface_lock
|
2015-05-22 07:43:09 +09:00
|
|
|
def stop_network(self):
|
|
|
|
|
self.print_error("stopping network")
|
2017-01-30 12:36:56 +03:00
|
|
|
for interface in list(self.interfaces.values()):
|
2015-06-03 00:03:33 +09:00
|
|
|
self.close_interface(interface)
|
2017-03-04 08:38:14 -08:00
|
|
|
if self.interface:
|
|
|
|
|
self.close_interface(self.interface)
|
2015-06-03 00:03:33 +09:00
|
|
|
assert self.interface is None
|
|
|
|
|
assert not self.interfaces
|
2018-08-16 18:16:25 +02:00
|
|
|
self.connecting.clear()
|
2015-06-03 00:03:33 +09:00
|
|
|
# Get a new queue - no old pending connections thanks!
|
2017-01-22 21:25:24 +03:00
|
|
|
self.socket_queue = queue.Queue()
|
2015-03-12 12:56:06 +01:00
|
|
|
|
2013-10-05 11:16:09 +02:00
|
|
|
def set_parameters(self, host, port, protocol, proxy, auto_connect):
|
2015-08-30 21:18:10 +09:00
|
|
|
proxy_str = serialize_proxy(proxy)
|
2015-05-23 23:21:59 +09:00
|
|
|
server = serialize_server(host, port, protocol)
|
2017-03-27 18:59:48 +02:00
|
|
|
# sanitize parameters
|
|
|
|
|
try:
|
|
|
|
|
deserialize_server(serialize_server(host, port, protocol))
|
|
|
|
|
if proxy:
|
|
|
|
|
proxy_modes.index(proxy["mode"]) + 1
|
|
|
|
|
int(proxy['port'])
|
|
|
|
|
except:
|
|
|
|
|
return
|
2015-08-30 21:18:10 +09:00
|
|
|
self.config.set_key('auto_connect', auto_connect, False)
|
|
|
|
|
self.config.set_key("proxy", proxy_str, False)
|
|
|
|
|
self.config.set_key("server", server, True)
|
|
|
|
|
# abort if changes were not allowed by config
|
2015-08-31 14:05:38 +09:00
|
|
|
if self.config.get('server') != server or self.config.get('proxy') != proxy_str:
|
2015-08-30 21:18:10 +09:00
|
|
|
return
|
|
|
|
|
self.auto_connect = auto_connect
|
2015-02-28 20:04:03 +01:00
|
|
|
if self.proxy != proxy or self.protocol != protocol:
|
2015-05-23 23:21:59 +09:00
|
|
|
# Restart the network defaulting to the given server
|
2018-07-10 18:07:52 +02:00
|
|
|
with self.interface_lock:
|
|
|
|
|
self.stop_network()
|
|
|
|
|
self.default_server = server
|
|
|
|
|
self.start_network(protocol, proxy)
|
2015-05-23 23:21:59 +09:00
|
|
|
elif self.default_server != server:
|
|
|
|
|
self.switch_to_interface(server)
|
2015-05-25 17:45:01 +09:00
|
|
|
else:
|
|
|
|
|
self.switch_lagging_interface()
|
2017-07-19 18:32:48 +02:00
|
|
|
self.notify('updated')
|
2013-10-05 10:01:33 +02:00
|
|
|
|
|
|
|
|
def switch_to_random_interface(self):
|
2015-06-03 00:03:33 +09:00
|
|
|
'''Switch to a random connected server other than the current one'''
|
2015-05-23 22:59:29 +09:00
|
|
|
servers = self.get_interfaces() # Those in connected state
|
2015-06-03 00:03:33 +09:00
|
|
|
if self.default_server in servers:
|
2017-09-14 02:06:08 +02:00
|
|
|
servers.remove(self.default_server)
|
2015-05-23 22:59:29 +09:00
|
|
|
if servers:
|
|
|
|
|
self.switch_to_interface(random.choice(servers))
|
2013-10-05 10:01:33 +02:00
|
|
|
|
2018-06-21 21:06:56 +02:00
|
|
|
@with_interface_lock
|
2017-05-29 09:03:39 +02:00
|
|
|
def switch_lagging_interface(self):
|
2015-05-25 17:45:01 +09:00
|
|
|
'''If auto_connect and lagging, switch interface'''
|
2015-07-03 11:50:53 +09:00
|
|
|
if self.server_is_lagging() and self.auto_connect:
|
2017-05-29 09:03:39 +02:00
|
|
|
# switch to one that has the correct header (not height)
|
|
|
|
|
header = self.blockchain().read_header(self.get_local_height())
|
2018-08-14 17:50:20 +02:00
|
|
|
def filt(x):
|
|
|
|
|
a = x[1].tip_header
|
|
|
|
|
b = header
|
|
|
|
|
assert type(a) is type(b)
|
|
|
|
|
return a == b
|
|
|
|
|
filtered = list(map(lambda x: x[0], filter(filt, self.interfaces.items())))
|
2017-05-29 09:03:39 +02:00
|
|
|
if filtered:
|
|
|
|
|
choice = random.choice(filtered)
|
|
|
|
|
self.switch_to_interface(choice)
|
2015-05-25 17:45:01 +09:00
|
|
|
|
2018-06-21 21:06:56 +02:00
|
|
|
@with_interface_lock
|
2015-05-22 09:13:09 +09:00
|
|
|
def switch_to_interface(self, server):
|
Remove pending_servers, self.interfaces is now the complete set of
interfaces we have created.
Existing code has the concept of pending servers, where a connection
thread is started but has not sent a connection notification, and
and interfaces which have received the notification.
This separation caused a couple of minor bugs, and given the cleaner
semantics of unifying the two I don't think the separation is beneficial.
The bugs:
1) When stopping the network, we only stopped the connected interface
threads, not the pending ones. This would leave Python hanging
on exit if we don't make them daemon threads.
2) start_interface() did not check pending servers before starting
a new thread. Some of its callers did, but not all, so it was
possible to initiate two threads to one server and "lose" one thread.
Apart form fixing the above two issues, unification causes one more
change in semantics: we are now willing to switch to a connection
that is pending (we don't switch to failed interfaces). I don't
think that is a problem: if it times out we'll just switch
again when we receive the disconnect notification, and previously the
fact that an interface was in the interaces dictionary wasn't a
guarantee the connection was good anyway: we might not have processed
a pending disconnection notification.
2015-05-23 15:43:58 +09:00
|
|
|
'''Switch to server as our interface. If no connection exists nor
|
|
|
|
|
being opened, start a thread to connect. The actual switch will
|
|
|
|
|
happen on receipt of the connection notification. Do nothing
|
|
|
|
|
if server already is our interface.'''
|
2013-10-05 13:48:02 +02:00
|
|
|
self.default_server = server
|
2015-06-10 18:24:57 +02:00
|
|
|
if server not in self.interfaces:
|
2015-09-09 22:13:09 +09:00
|
|
|
self.interface = None
|
2015-05-22 16:22:24 +09:00
|
|
|
self.start_interface(server)
|
2015-06-10 18:24:57 +02:00
|
|
|
return
|
2018-06-01 14:03:22 +02:00
|
|
|
|
2015-06-10 18:24:57 +02:00
|
|
|
i = self.interfaces[server]
|
|
|
|
|
if self.interface != i:
|
|
|
|
|
self.print_error("switching to", server)
|
2018-09-08 01:34:33 +02:00
|
|
|
if self.interface is not None:
|
|
|
|
|
# Stop any current interface in order to terminate subscriptions,
|
|
|
|
|
# and to cancel tasks in interface.group.
|
|
|
|
|
# However, for headers sub, give preference to this interface
|
|
|
|
|
# over unknown ones, i.e. start it again right away.
|
|
|
|
|
self.close_interface(self.interface)
|
|
|
|
|
if len(self.interfaces) <= self.num_server:
|
|
|
|
|
self.start_interface(self.interface.server)
|
|
|
|
|
|
2015-06-10 18:24:57 +02:00
|
|
|
self.interface = i
|
2018-09-08 01:34:33 +02:00
|
|
|
asyncio.get_event_loop().create_task(
|
|
|
|
|
i.group.spawn(self.request_server_info(i)))
|
2018-08-29 18:41:51 +02:00
|
|
|
self.trigger_callback('default_server_changed')
|
2015-06-10 18:24:57 +02:00
|
|
|
self.set_status('connected')
|
|
|
|
|
self.notify('updated')
|
2018-06-25 01:02:21 +02:00
|
|
|
self.notify('interfaces')
|
2013-10-02 12:13:07 +02:00
|
|
|
|
2018-06-21 21:06:56 +02:00
|
|
|
@with_interface_lock
|
2015-06-03 00:03:33 +09:00
|
|
|
def close_interface(self, interface):
|
|
|
|
|
if interface:
|
2017-03-06 07:53:43 -08:00
|
|
|
if interface.server in self.interfaces:
|
|
|
|
|
self.interfaces.pop(interface.server)
|
2015-06-03 00:03:33 +09:00
|
|
|
if interface.server == self.default_server:
|
|
|
|
|
self.interface = None
|
|
|
|
|
interface.close()
|
2013-10-09 10:04:32 +02:00
|
|
|
|
2018-06-21 21:06:56 +02:00
|
|
|
@with_recent_servers_lock
|
2015-06-03 00:03:33 +09:00
|
|
|
def add_recent_server(self, server):
|
2013-10-04 14:30:23 +02:00
|
|
|
# list is ordered
|
2015-06-03 00:03:33 +09:00
|
|
|
if server in self.recent_servers:
|
|
|
|
|
self.recent_servers.remove(server)
|
|
|
|
|
self.recent_servers.insert(0, server)
|
2013-10-04 14:30:23 +02:00
|
|
|
self.recent_servers = self.recent_servers[0:20]
|
2015-04-02 10:12:51 +02:00
|
|
|
self.save_recent_servers()
|
2013-09-10 19:59:58 +02:00
|
|
|
|
2015-11-27 11:30:14 +01:00
|
|
|
def process_response(self, interface, response, callbacks):
|
2015-08-30 21:18:10 +09:00
|
|
|
if self.debug:
|
2018-06-11 10:26:01 +02:00
|
|
|
self.print_error(interface.host, "<--", response)
|
2015-06-03 00:03:33 +09:00
|
|
|
error = response.get('error')
|
2015-05-22 20:47:51 +02:00
|
|
|
result = response.get('result')
|
2015-06-03 00:03:33 +09:00
|
|
|
method = response.get('method')
|
2015-11-26 11:26:01 +01:00
|
|
|
params = response.get('params')
|
2015-06-03 00:03:33 +09:00
|
|
|
|
|
|
|
|
# We handle some responses; return the rest to the client.
|
|
|
|
|
if method == 'server.version':
|
|
|
|
|
interface.server_version = result
|
|
|
|
|
elif method == 'blockchain.headers.subscribe':
|
|
|
|
|
if error is None:
|
2017-03-31 12:45:09 +02:00
|
|
|
self.on_notify_header(interface, result)
|
2018-07-01 04:52:01 +02:00
|
|
|
else:
|
|
|
|
|
# no point in keeping this connection without headers sub
|
|
|
|
|
self.connection_down(interface.server)
|
|
|
|
|
return
|
2014-07-28 00:13:40 +02:00
|
|
|
elif method == 'server.peers.subscribe':
|
2015-06-03 00:03:33 +09:00
|
|
|
if error is None:
|
2017-09-12 15:04:57 +02:00
|
|
|
self.irc_servers = parse_servers(result)
|
2015-06-03 00:03:33 +09:00
|
|
|
self.notify('servers')
|
2014-07-28 06:42:05 +02:00
|
|
|
elif method == 'server.banner':
|
2015-06-03 00:03:33 +09:00
|
|
|
if error is None:
|
|
|
|
|
self.banner = result
|
|
|
|
|
self.notify('banner')
|
2016-02-15 16:17:07 +01:00
|
|
|
elif method == 'server.donation_address':
|
|
|
|
|
if error is None:
|
|
|
|
|
self.donation_address = result
|
2017-11-22 12:09:56 +01:00
|
|
|
elif method == 'mempool.get_fee_histogram':
|
|
|
|
|
if error is None:
|
2018-02-22 16:44:22 +01:00
|
|
|
self.print_error('fee_histogram', result)
|
2017-11-22 12:09:56 +01:00
|
|
|
self.config.mempool_fees = result
|
|
|
|
|
self.notify('fee_histogram')
|
2015-08-04 07:15:54 +02:00
|
|
|
elif method == 'blockchain.estimatefee':
|
2017-03-15 12:21:26 +01:00
|
|
|
if error is None and result > 0:
|
2016-05-31 20:26:09 +02:00
|
|
|
i = params[0]
|
2017-03-15 12:21:26 +01:00
|
|
|
fee = int(result*COIN)
|
2017-09-21 01:19:05 +02:00
|
|
|
self.config.update_fee_estimates(i, fee)
|
2017-03-15 12:21:26 +01:00
|
|
|
self.print_error("fee_estimates[%d]" % i, fee)
|
2015-06-03 00:03:33 +09:00
|
|
|
self.notify('fee')
|
2016-01-14 16:06:22 +01:00
|
|
|
elif method == 'blockchain.relayfee':
|
|
|
|
|
if error is None:
|
2018-03-16 22:06:38 +01:00
|
|
|
self.relay_fee = int(result * COIN) if result is not None else None
|
2016-01-14 16:06:22 +01:00
|
|
|
self.print_error("relayfee", self.relay_fee)
|
2018-05-30 14:06:40 +08:00
|
|
|
elif method == 'blockchain.block.headers':
|
|
|
|
|
self.on_block_headers(interface, response)
|
2015-05-17 22:54:20 +09:00
|
|
|
elif method == 'blockchain.block.get_header':
|
2015-06-03 00:03:33 +09:00
|
|
|
self.on_get_header(interface, response)
|
2015-11-26 11:26:01 +01:00
|
|
|
|
2015-11-27 11:30:14 +01:00
|
|
|
for callback in callbacks:
|
|
|
|
|
callback(response)
|
2014-07-28 00:13:40 +02:00
|
|
|
|
2018-06-21 21:06:56 +02:00
|
|
|
@classmethod
|
|
|
|
|
def get_index(cls, method, params):
|
2015-12-06 13:32:50 +01:00
|
|
|
""" hashable index for subscriptions and cache"""
|
2017-01-30 12:36:56 +03:00
|
|
|
return str(method) + (':' + str(params[0]) if params else '')
|
2015-12-06 13:32:50 +01:00
|
|
|
|
2015-11-12 08:40:58 +09:00
|
|
|
def unsubscribe(self, callback):
|
|
|
|
|
'''Unsubscribe a callback to free object references to enable GC.'''
|
|
|
|
|
# Note: we can't unsubscribe from the server, so if we receive
|
|
|
|
|
# subsequent notifications process_response() will emit a harmless
|
|
|
|
|
# "received unexpected notification" warning
|
2018-06-21 21:06:56 +02:00
|
|
|
with self.callback_lock:
|
2015-11-26 10:57:43 +01:00
|
|
|
for v in self.subscriptions.values():
|
|
|
|
|
if callback in v:
|
|
|
|
|
v.remove(callback)
|
2015-11-12 08:40:58 +09:00
|
|
|
|
2018-06-21 21:06:56 +02:00
|
|
|
@with_interface_lock
|
2015-06-03 00:03:33 +09:00
|
|
|
def connection_down(self, server):
|
|
|
|
|
'''A connection to server either went down, or was never made.
|
|
|
|
|
We distinguish by whether it is in self.interfaces.'''
|
|
|
|
|
self.disconnected_servers.add(server)
|
|
|
|
|
if server == self.default_server:
|
|
|
|
|
self.set_status('disconnected')
|
|
|
|
|
if server in self.interfaces:
|
|
|
|
|
self.close_interface(self.interfaces[server])
|
|
|
|
|
self.notify('interfaces')
|
2018-06-21 21:06:56 +02:00
|
|
|
with self.blockchains_lock:
|
2018-08-29 18:41:51 +02:00
|
|
|
for b in blockchain.blockchains.values():
|
2018-06-21 21:06:56 +02:00
|
|
|
if b.catch_up == server:
|
|
|
|
|
b.catch_up = None
|
2017-05-29 09:03:39 +02:00
|
|
|
|
2018-08-14 17:50:20 +02:00
|
|
|
@aiosafe
|
2018-08-27 20:39:36 +02:00
|
|
|
async def new_interface(self, server):
|
2017-07-15 17:20:06 +02:00
|
|
|
# todo: get tip first, then decide which checkpoint to use.
|
2015-06-03 00:03:33 +09:00
|
|
|
self.add_recent_server(server)
|
2015-05-08 20:13:18 +02:00
|
|
|
|
2018-08-29 18:41:51 +02:00
|
|
|
interface = Interface(self, server, self.config.path, self.proxy)
|
|
|
|
|
try:
|
|
|
|
|
await asyncio.wait_for(interface.ready, 5)
|
|
|
|
|
except BaseException as e:
|
2018-08-14 17:50:20 +02:00
|
|
|
#import traceback
|
|
|
|
|
#traceback.print_exc()
|
2018-08-29 18:41:51 +02:00
|
|
|
self.print_error(interface.server, "couldn't launch because", str(e), str(type(e)))
|
2017-03-31 12:45:09 +02:00
|
|
|
self.connection_down(interface.server)
|
|
|
|
|
return
|
2018-08-29 18:41:51 +02:00
|
|
|
finally:
|
2018-09-08 00:25:38 +02:00
|
|
|
try: self.connecting.remove(server)
|
|
|
|
|
except KeyError: pass
|
2018-06-11 10:26:01 +02:00
|
|
|
|
2018-06-21 21:06:56 +02:00
|
|
|
with self.interface_lock:
|
2018-08-29 18:41:51 +02:00
|
|
|
self.interfaces[server] = interface
|
2015-05-17 22:54:20 +09:00
|
|
|
|
2018-08-29 18:41:51 +02:00
|
|
|
if server == self.default_server:
|
|
|
|
|
self.switch_to_interface(server)
|
|
|
|
|
|
|
|
|
|
#self.notify('interfaces')
|
2015-06-03 00:03:33 +09:00
|
|
|
|
2017-05-29 09:03:39 +02:00
|
|
|
def init_headers_file(self):
|
2018-08-29 18:41:51 +02:00
|
|
|
b = blockchain.blockchains[0]
|
2017-08-03 14:25:50 +02:00
|
|
|
filename = b.path()
|
2018-03-04 22:10:59 +01:00
|
|
|
length = 80 * len(constants.net.CHECKPOINTS) * 2016
|
2017-12-20 12:37:45 +01:00
|
|
|
if not os.path.exists(filename) or os.path.getsize(filename) < length:
|
2017-12-05 18:03:07 +01:00
|
|
|
with open(filename, 'wb') as f:
|
|
|
|
|
if length>0:
|
|
|
|
|
f.seek(length-1)
|
|
|
|
|
f.write(b'\x00')
|
|
|
|
|
with b.lock:
|
|
|
|
|
b.update_size()
|
2017-05-29 09:03:39 +02:00
|
|
|
|
2018-08-14 17:50:20 +02:00
|
|
|
async def get_merkle_for_transaction(self, tx_hash, tx_height):
|
|
|
|
|
return await self.interface.session.send_request('blockchain.transaction.get_merkle', [tx_hash, tx_height])
|
|
|
|
|
|
2018-09-07 17:07:15 +02:00
|
|
|
def broadcast_transaction(self, tx, timeout=5):
|
2018-08-14 17:50:20 +02:00
|
|
|
fut = asyncio.run_coroutine_threadsafe(self.interface.session.send_request('blockchain.transaction.broadcast', [str(tx)]), self.asyncio_loop)
|
2018-09-07 17:07:15 +02:00
|
|
|
try:
|
|
|
|
|
out = fut.result(timeout)
|
|
|
|
|
except asyncio.TimeoutError as e:
|
|
|
|
|
return False, "error: operation timed out"
|
|
|
|
|
except Exception as e:
|
|
|
|
|
return False, "error: " + str(e)
|
|
|
|
|
|
|
|
|
|
if out != tx.txid():
|
|
|
|
|
return False, "error: " + out
|
|
|
|
|
return True, out
|
2018-08-14 17:50:20 +02:00
|
|
|
|
2018-09-08 19:11:02 +02:00
|
|
|
async def request_chunk(self, height, tip, session=None, can_return_early=False):
|
2018-08-14 17:50:20 +02:00
|
|
|
if session is None: session = self.interface.session
|
|
|
|
|
index = height // 2016
|
2018-09-08 19:11:02 +02:00
|
|
|
if can_return_early and index in self.requested_chunks:
|
|
|
|
|
return
|
2018-08-14 17:50:20 +02:00
|
|
|
size = 2016
|
2018-08-29 18:54:25 +02:00
|
|
|
if tip is not None:
|
|
|
|
|
size = min(size, tip - index * 2016)
|
|
|
|
|
size = max(size, 0)
|
2018-09-08 19:11:02 +02:00
|
|
|
try:
|
|
|
|
|
self.requested_chunks.add(index)
|
|
|
|
|
res = await asyncio.wait_for(session.send_request('blockchain.block.headers', [index * 2016, size]), 20)
|
|
|
|
|
finally:
|
|
|
|
|
try: self.requested_chunks.remove(index)
|
|
|
|
|
except KeyError: pass
|
2018-08-14 17:50:20 +02:00
|
|
|
conn = self.blockchain().connect_chunk(index, res['hex'])
|
|
|
|
|
if not conn:
|
|
|
|
|
return conn, 0
|
|
|
|
|
return conn, res['count']
|
|
|
|
|
|
2018-06-21 21:06:56 +02:00
|
|
|
@with_interface_lock
|
2017-05-29 09:03:39 +02:00
|
|
|
def blockchain(self):
|
|
|
|
|
if self.interface and self.interface.blockchain is not None:
|
2018-08-03 18:25:53 +02:00
|
|
|
self.blockchain_index = self.interface.blockchain.forkpoint
|
2018-08-29 18:41:51 +02:00
|
|
|
return blockchain.blockchains[self.blockchain_index]
|
2014-03-10 20:53:05 +01:00
|
|
|
|
2018-06-21 21:06:56 +02:00
|
|
|
@with_interface_lock
|
2017-07-18 21:37:04 +02:00
|
|
|
def get_blockchains(self):
|
|
|
|
|
out = {}
|
2018-06-21 21:06:56 +02:00
|
|
|
with self.blockchains_lock:
|
2018-08-29 18:41:51 +02:00
|
|
|
blockchain_items = list(blockchain.blockchains.items())
|
2018-06-21 21:06:56 +02:00
|
|
|
for k, b in blockchain_items:
|
2017-10-13 13:47:32 +02:00
|
|
|
r = list(filter(lambda i: i.blockchain==b, list(self.interfaces.values())))
|
2017-07-18 21:37:04 +02:00
|
|
|
if r:
|
|
|
|
|
out[k] = r
|
|
|
|
|
return out
|
|
|
|
|
|
2017-07-04 11:35:04 +02:00
|
|
|
def follow_chain(self, index):
|
2018-08-29 18:41:51 +02:00
|
|
|
bc = blockchain.blockchains.get(index)
|
|
|
|
|
if bc:
|
2017-07-04 11:35:04 +02:00
|
|
|
self.blockchain_index = index
|
|
|
|
|
self.config.set_key('blockchain_index', index)
|
2018-06-21 21:06:56 +02:00
|
|
|
with self.interface_lock:
|
|
|
|
|
interfaces = list(self.interfaces.values())
|
|
|
|
|
for i in interfaces:
|
2018-08-29 18:41:51 +02:00
|
|
|
if i.blockchain == bc:
|
2017-07-04 11:35:04 +02:00
|
|
|
self.switch_to_interface(i.server)
|
|
|
|
|
break
|
|
|
|
|
else:
|
2018-04-07 17:10:30 +02:00
|
|
|
raise Exception('blockchain not found', index)
|
2017-07-04 11:35:04 +02:00
|
|
|
|
2018-06-21 21:06:56 +02:00
|
|
|
with self.interface_lock:
|
|
|
|
|
if self.interface:
|
|
|
|
|
server = self.interface.server
|
|
|
|
|
host, port, protocol, proxy, auto_connect = self.get_parameters()
|
|
|
|
|
host, port, protocol = server.split(':')
|
|
|
|
|
self.set_parameters(host, port, protocol, proxy, auto_connect)
|
2017-07-10 13:51:13 +02:00
|
|
|
|
2014-03-10 20:53:05 +01:00
|
|
|
def get_local_height(self):
|
2017-05-29 09:03:39 +02:00
|
|
|
return self.blockchain().height()
|
2015-08-30 21:18:10 +09:00
|
|
|
|
2017-12-12 11:10:50 +01:00
|
|
|
def export_checkpoints(self, path):
|
|
|
|
|
# run manually from the console to generate checkpoints
|
2017-12-05 18:03:07 +01:00
|
|
|
cp = self.blockchain().get_checkpoints()
|
2018-03-23 21:47:51 +01:00
|
|
|
with open(path, 'w', encoding='utf-8') as f:
|
2017-12-05 18:03:07 +01:00
|
|
|
f.write(json.dumps(cp, indent=4))
|
|
|
|
|
|
2018-09-06 16:18:45 +02:00
|
|
|
def start(self, fx=None):
|
2018-09-09 05:05:08 +02:00
|
|
|
self.main_taskgroup = TaskGroup()
|
|
|
|
|
async def main():
|
|
|
|
|
self.init_headers_file()
|
|
|
|
|
async with self.main_taskgroup as group:
|
|
|
|
|
await group.spawn(self.maintain_sessions())
|
|
|
|
|
if fx: await group.spawn(fx)
|
|
|
|
|
self._wrapper_thread = threading.Thread(target=self.asyncio_loop.run_until_complete, args=(main(),))
|
|
|
|
|
self._wrapper_thread.start()
|
2018-08-15 19:01:28 +02:00
|
|
|
|
|
|
|
|
def stop(self):
|
2018-09-09 05:05:08 +02:00
|
|
|
asyncio.run_coroutine_threadsafe(self.main_taskgroup.cancel_remaining(), self.asyncio_loop)
|
2018-08-15 19:01:28 +02:00
|
|
|
|
|
|
|
|
def join(self):
|
2018-09-09 05:05:08 +02:00
|
|
|
self._wrapper_thread.join(1)
|
2018-08-15 19:01:28 +02:00
|
|
|
|
|
|
|
|
async def maintain_sessions(self):
|
|
|
|
|
while True:
|
|
|
|
|
while self.socket_queue.qsize() > 0:
|
|
|
|
|
server = self.socket_queue.get()
|
2018-08-27 20:39:36 +02:00
|
|
|
asyncio.get_event_loop().create_task(self.new_interface(server))
|
2018-08-15 19:01:28 +02:00
|
|
|
remove = []
|
|
|
|
|
for k, i in self.interfaces.items():
|
2018-08-31 16:46:03 +02:00
|
|
|
if i.fut.done() and not i.exception:
|
|
|
|
|
assert False, "interface future should not finish without exception"
|
|
|
|
|
if i.exception:
|
|
|
|
|
if not i.fut.done():
|
|
|
|
|
try: i.fut.cancel()
|
|
|
|
|
except Exception as e: self.print_error('exception while cancelling fut', e)
|
|
|
|
|
try:
|
|
|
|
|
raise i.exception
|
|
|
|
|
except BaseException as e:
|
2018-09-06 16:45:43 +02:00
|
|
|
self.print_error(i.server, "errored because:", str(e), str(type(e)))
|
2018-08-15 19:01:28 +02:00
|
|
|
remove.append(k)
|
2018-08-16 18:16:25 +02:00
|
|
|
changed = False
|
2018-08-15 19:01:28 +02:00
|
|
|
for k in remove:
|
|
|
|
|
self.connection_down(k)
|
2018-08-16 18:16:25 +02:00
|
|
|
changed = True
|
2018-08-31 15:25:09 +02:00
|
|
|
|
|
|
|
|
# nodes
|
|
|
|
|
now = time.time()
|
2018-08-27 20:39:36 +02:00
|
|
|
for i in range(self.num_server - len(self.interfaces) - len(self.connecting)):
|
|
|
|
|
if self.start_random_interface():
|
|
|
|
|
changed = True
|
2018-09-07 19:35:35 +02:00
|
|
|
if now - self.nodes_retry_time > NODES_RETRY_INTERVAL:
|
|
|
|
|
self.print_error('network: retrying connections')
|
|
|
|
|
self.disconnected_servers = set([])
|
|
|
|
|
self.nodes_retry_time = now
|
2018-08-31 15:25:09 +02:00
|
|
|
|
|
|
|
|
# main interface
|
|
|
|
|
if not self.is_connected():
|
|
|
|
|
if self.auto_connect:
|
|
|
|
|
if not self.is_connecting():
|
|
|
|
|
self.switch_to_random_interface()
|
|
|
|
|
changed = True
|
|
|
|
|
else:
|
|
|
|
|
if self.default_server in self.disconnected_servers:
|
|
|
|
|
if now - self.server_retry_time > SERVER_RETRY_INTERVAL:
|
|
|
|
|
self.disconnected_servers.remove(self.default_server)
|
|
|
|
|
self.server_retry_time = now
|
|
|
|
|
changed = True
|
|
|
|
|
else:
|
|
|
|
|
self.switch_to_interface(self.default_server)
|
|
|
|
|
changed = True
|
2018-08-31 16:46:49 +02:00
|
|
|
else:
|
|
|
|
|
if self.config.is_fee_estimates_update_required():
|
2018-09-08 01:34:33 +02:00
|
|
|
await self.interface.group.spawn(self.attempt_fee_estimate_update())
|
2018-08-31 15:25:09 +02:00
|
|
|
|
2018-08-27 20:39:36 +02:00
|
|
|
if changed:
|
|
|
|
|
self.notify('updated')
|
2018-08-15 19:01:28 +02:00
|
|
|
await asyncio.sleep(1)
|
2018-08-29 18:41:51 +02:00
|
|
|
|
2018-08-31 16:46:49 +02:00
|
|
|
async def attempt_fee_estimate_update(self):
|
|
|
|
|
await asyncio.wait_for(self.request_fee_estimates(self.interface), 5)
|