2018-08-14 17:50:20 +02:00
|
|
|
import asyncio
|
|
|
|
|
import tempfile
|
|
|
|
|
import unittest
|
2025-06-09 18:59:31 +00:00
|
|
|
from typing import List
|
2018-08-14 17:50:20 +02:00
|
|
|
|
2018-09-09 05:00:09 +02:00
|
|
|
from electrum import constants
|
2018-08-14 17:50:20 +02:00
|
|
|
from electrum.simple_config import SimpleConfig
|
|
|
|
|
from electrum import blockchain
|
2025-06-06 16:42:15 +00:00
|
|
|
from electrum.interface import Interface, ServerAddr, ChainResolutionMode
|
2018-11-20 18:57:16 +01:00
|
|
|
from electrum.crypto import sha256
|
2023-09-06 15:52:33 +00:00
|
|
|
from electrum.util import OldTaskGroup
|
asyncio: stop using get_event_loop(). introduce ~singleton loop.
asyncio.get_event_loop() became deprecated in python3.10. (see https://github.com/python/cpython/issues/83710)
```
.../electrum/electrum/daemon.py:470: DeprecationWarning: There is no current event loop
self.asyncio_loop = asyncio.get_event_loop()
.../electrum/electrum/network.py:276: DeprecationWarning: There is no current event loop
self.asyncio_loop = asyncio.get_event_loop()
```
Also, according to that thread, "set_event_loop() [... is] not deprecated by oversight".
So, we stop using get_event_loop() and set_event_loop() in our own code.
Note that libraries we use (such as the stdlib for python <3.10), might call get_event_loop,
which then relies on us having called set_event_loop e.g. for the GUI thread. To work around
this, a custom event loop policy providing a get_event_loop implementation is used.
Previously, we have been using a single asyncio event loop, created with
util.create_and_start_event_loop, and code in many places got a reference to this loop
using asyncio.get_event_loop().
Now, we still use a single asyncio event loop, but it is now stored as a global in
util._asyncio_event_loop (access with util.get_asyncio_loop()).
I believe these changes also fix https://github.com/spesmilo/electrum/issues/5376
2022-04-29 18:24:49 +02:00
|
|
|
from electrum import util
|
2018-11-20 18:57:16 +01:00
|
|
|
|
2019-09-22 20:46:01 +02:00
|
|
|
from . import ElectrumTestCase
|
|
|
|
|
|
2018-08-14 17:50:20 +02:00
|
|
|
|
2025-06-06 16:42:15 +00:00
|
|
|
CRM = ChainResolutionMode
|
|
|
|
|
|
|
|
|
|
|
2025-06-09 18:59:31 +00:00
|
|
|
class MockBlockchain:
|
|
|
|
|
|
|
|
|
|
def __init__(self, headers: List[str]):
|
|
|
|
|
self._headers = headers
|
|
|
|
|
self.forkpoint = len(headers)
|
|
|
|
|
|
|
|
|
|
def height(self) -> int:
|
|
|
|
|
return len(self._headers) - 1
|
|
|
|
|
|
|
|
|
|
def save_header(self, header: dict) -> None:
|
|
|
|
|
assert header['block_height'] == self.height()+1, f"new {header['block_height']=}, cur {self.height()=}"
|
|
|
|
|
self._headers.append(header['mock']['id'])
|
|
|
|
|
|
|
|
|
|
def check_header(self, header: dict) -> bool:
|
|
|
|
|
return header['mock']['id'] in self._headers
|
|
|
|
|
|
|
|
|
|
def can_connect(self, header: dict, *, check_height: bool = True) -> bool:
|
|
|
|
|
height = header['block_height']
|
|
|
|
|
if check_height and self.height() != height - 1:
|
|
|
|
|
return False
|
|
|
|
|
if self.check_header(header):
|
|
|
|
|
return True
|
|
|
|
|
return header['mock']['prev_id'] in self._headers
|
|
|
|
|
|
|
|
|
|
def fork(parent, header: dict) -> 'MockBlockchain':
|
|
|
|
|
if not parent.can_connect(header, check_height=False):
|
|
|
|
|
raise Exception("forking header does not connect to parent chain")
|
|
|
|
|
forkpoint = header.get('block_height')
|
|
|
|
|
self = MockBlockchain(parent._headers[:forkpoint])
|
|
|
|
|
self.save_header(header)
|
|
|
|
|
chain_id = header['mock']['id']
|
|
|
|
|
with blockchain.blockchains_lock:
|
|
|
|
|
blockchain.blockchains[chain_id] = self
|
|
|
|
|
return self
|
|
|
|
|
|
|
|
|
|
|
2018-09-25 17:00:43 +02:00
|
|
|
class MockNetwork:
|
asyncio: stop using get_event_loop(). introduce ~singleton loop.
asyncio.get_event_loop() became deprecated in python3.10. (see https://github.com/python/cpython/issues/83710)
```
.../electrum/electrum/daemon.py:470: DeprecationWarning: There is no current event loop
self.asyncio_loop = asyncio.get_event_loop()
.../electrum/electrum/network.py:276: DeprecationWarning: There is no current event loop
self.asyncio_loop = asyncio.get_event_loop()
```
Also, according to that thread, "set_event_loop() [... is] not deprecated by oversight".
So, we stop using get_event_loop() and set_event_loop() in our own code.
Note that libraries we use (such as the stdlib for python <3.10), might call get_event_loop,
which then relies on us having called set_event_loop e.g. for the GUI thread. To work around
this, a custom event loop policy providing a get_event_loop implementation is used.
Previously, we have been using a single asyncio event loop, created with
util.create_and_start_event_loop, and code in many places got a reference to this loop
using asyncio.get_event_loop().
Now, we still use a single asyncio event loop, but it is now stored as a global in
util._asyncio_event_loop (access with util.get_asyncio_loop()).
I believe these changes also fix https://github.com/spesmilo/electrum/issues/5376
2022-04-29 18:24:49 +02:00
|
|
|
|
2025-06-06 16:42:15 +00:00
|
|
|
def __init__(self, config: SimpleConfig):
|
|
|
|
|
self.config = config
|
asyncio: stop using get_event_loop(). introduce ~singleton loop.
asyncio.get_event_loop() became deprecated in python3.10. (see https://github.com/python/cpython/issues/83710)
```
.../electrum/electrum/daemon.py:470: DeprecationWarning: There is no current event loop
self.asyncio_loop = asyncio.get_event_loop()
.../electrum/electrum/network.py:276: DeprecationWarning: There is no current event loop
self.asyncio_loop = asyncio.get_event_loop()
```
Also, according to that thread, "set_event_loop() [... is] not deprecated by oversight".
So, we stop using get_event_loop() and set_event_loop() in our own code.
Note that libraries we use (such as the stdlib for python <3.10), might call get_event_loop,
which then relies on us having called set_event_loop e.g. for the GUI thread. To work around
this, a custom event loop policy providing a get_event_loop implementation is used.
Previously, we have been using a single asyncio event loop, created with
util.create_and_start_event_loop, and code in many places got a reference to this loop
using asyncio.get_event_loop().
Now, we still use a single asyncio event loop, but it is now stored as a global in
util._asyncio_event_loop (access with util.get_asyncio_loop()).
I believe these changes also fix https://github.com/spesmilo/electrum/issues/5376
2022-04-29 18:24:49 +02:00
|
|
|
self.asyncio_loop = util.get_asyncio_loop()
|
2023-09-06 15:52:33 +00:00
|
|
|
self.taskgroup = OldTaskGroup()
|
2024-10-14 13:32:22 +02:00
|
|
|
self.proxy = None
|
2018-09-25 17:00:43 +02:00
|
|
|
|
2018-09-04 16:57:07 +02:00
|
|
|
class MockInterface(Interface):
|
2025-06-06 16:42:15 +00:00
|
|
|
def __init__(self, config: SimpleConfig):
|
2018-09-04 16:57:07 +02:00
|
|
|
self.config = config
|
2025-06-06 16:42:15 +00:00
|
|
|
network = MockNetwork(config)
|
2024-10-14 13:32:22 +02:00
|
|
|
super().__init__(network=network, server=ServerAddr.from_str('mock-server:50000:t'))
|
2018-08-14 17:50:20 +02:00
|
|
|
self.q = asyncio.Queue()
|
2023-09-06 15:52:33 +00:00
|
|
|
|
2025-06-06 16:42:15 +00:00
|
|
|
async def get_block_header(self, height: int, *, mode: ChainResolutionMode) -> dict:
|
|
|
|
|
assert self.q.qsize() > 0, (height, mode)
|
2018-08-14 17:50:20 +02:00
|
|
|
item = await self.q.get()
|
2025-06-09 20:20:47 +00:00
|
|
|
self.logger.debug(f"step with {height=}. {mode=}. will get {item=}")
|
2018-08-14 17:50:20 +02:00
|
|
|
assert item['block_height'] == height, (item['block_height'], height)
|
2025-06-06 16:42:15 +00:00
|
|
|
assert mode in item['mock'], (mode, item)
|
2018-08-14 17:50:20 +02:00
|
|
|
return item
|
|
|
|
|
|
2023-09-06 15:52:33 +00:00
|
|
|
async def run(self):
|
|
|
|
|
return
|
|
|
|
|
|
2025-06-07 16:28:46 +00:00
|
|
|
async def _maybe_warm_headers_cache(self, *args, **kwargs):
|
|
|
|
|
return
|
|
|
|
|
|
2023-09-06 15:52:33 +00:00
|
|
|
|
2025-06-09 20:20:47 +00:00
|
|
|
class TestHeaderChainResolution(ElectrumTestCase):
|
2018-09-09 05:00:09 +02:00
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
|
def setUpClass(cls):
|
|
|
|
|
super().setUpClass()
|
2024-09-16 15:27:45 +00:00
|
|
|
constants.BitcoinRegtest.set_as_network()
|
2018-09-09 05:00:09 +02:00
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
|
def tearDownClass(cls):
|
|
|
|
|
super().tearDownClass()
|
2024-09-16 15:27:45 +00:00
|
|
|
constants.BitcoinMainnet.set_as_network()
|
2018-09-09 05:00:09 +02:00
|
|
|
|
2025-06-09 18:59:31 +00:00
|
|
|
def tearDown(self):
|
|
|
|
|
blockchain.blockchains = {}
|
|
|
|
|
super().tearDown()
|
|
|
|
|
|
2023-02-18 10:01:21 +00:00
|
|
|
async def asyncSetUp(self):
|
|
|
|
|
await super().asyncSetUp()
|
2019-09-22 20:46:01 +02:00
|
|
|
self.config = SimpleConfig({'electrum_path': self.electrum_path})
|
2018-09-04 16:57:07 +02:00
|
|
|
self.interface = MockInterface(self.config)
|
2018-08-14 17:50:20 +02:00
|
|
|
|
2025-06-09 20:20:47 +00:00
|
|
|
async def test_catchup_one_block_behind(self):
|
|
|
|
|
"""Single chain, but client is behind. The client's height is 5, server is on block 6.
|
|
|
|
|
- first missing block found during *catchup* phase
|
|
|
|
|
"""
|
|
|
|
|
ifa = self.interface
|
|
|
|
|
ifa.tip = 6
|
|
|
|
|
ifa.blockchain = MockBlockchain(["00a", "01a", "02a", "03a", "04a", "05a"])
|
|
|
|
|
blockchain.blockchains = {
|
|
|
|
|
"00a": ifa.blockchain,
|
|
|
|
|
}
|
|
|
|
|
ifa.q.put_nowait({'block_height': 6, 'mock': {CRM.CATCHUP:1, 'id': '06a', 'prev_id': '05a'}})
|
|
|
|
|
res = await ifa.sync_until(ifa.tip)
|
|
|
|
|
self.assertEqual((CRM.CATCHUP, 7), res)
|
|
|
|
|
self.assertEqual(ifa.q.qsize(), 0)
|
|
|
|
|
self.assertEqual(len(blockchain.blockchains), 1)
|
|
|
|
|
|
|
|
|
|
async def test_catchup_already_up_to_date(self):
|
|
|
|
|
"""Single chain, local chain tip already matches server tip."""
|
|
|
|
|
ifa = self.interface
|
|
|
|
|
ifa.tip = 5
|
|
|
|
|
ifa.blockchain = MockBlockchain(["00a", "01a", "02a", "03a", "04a", "05a"])
|
|
|
|
|
blockchain.blockchains = {
|
|
|
|
|
"00a": ifa.blockchain,
|
|
|
|
|
}
|
|
|
|
|
ifa.q.put_nowait({'block_height': 5, 'mock': {CRM.CATCHUP:1, 'id': '05a', 'prev_id': '04a'}})
|
|
|
|
|
res = await ifa.sync_until(ifa.tip)
|
|
|
|
|
self.assertEqual((CRM.CATCHUP, 6), res)
|
|
|
|
|
self.assertEqual(ifa.q.qsize(), 0)
|
|
|
|
|
self.assertEqual(len(blockchain.blockchains), 1)
|
|
|
|
|
|
|
|
|
|
async def test_catchup_client_ahead_of_lagging_server(self):
|
|
|
|
|
"""Single chain, server is lagging."""
|
|
|
|
|
ifa = self.interface
|
|
|
|
|
ifa.tip = 3
|
|
|
|
|
ifa.blockchain = MockBlockchain(["00a", "01a", "02a", "03a", "04a", "05a"])
|
|
|
|
|
blockchain.blockchains = {
|
|
|
|
|
"00a": ifa.blockchain,
|
|
|
|
|
}
|
|
|
|
|
ifa.q.put_nowait({'block_height': 3, 'mock': {CRM.CATCHUP:1, 'id': '03a', 'prev_id': '02a'}})
|
|
|
|
|
res = await ifa.sync_until(ifa.tip)
|
|
|
|
|
self.assertEqual((CRM.CATCHUP, 4), res)
|
|
|
|
|
self.assertEqual(ifa.q.qsize(), 0)
|
|
|
|
|
self.assertEqual(len(blockchain.blockchains), 1)
|
|
|
|
|
|
|
|
|
|
async def test_catchup_fast_forward(self):
|
|
|
|
|
"""Single chain, but client is behind. The client's height is 5, server is already on block 12.
|
|
|
|
|
- first missing block found during *backward* phase
|
|
|
|
|
"""
|
|
|
|
|
ifa = self.interface
|
|
|
|
|
ifa.tip = 12
|
|
|
|
|
ifa.blockchain = MockBlockchain(["00a", "01a", "02a", "03a", "04a", "05a"])
|
|
|
|
|
blockchain.blockchains = {
|
|
|
|
|
"00a": ifa.blockchain,
|
|
|
|
|
}
|
|
|
|
|
ifa.q.put_nowait({'block_height': 12, 'mock': {CRM.CATCHUP:1, 'id': '12a', 'prev_id': '11a'}})
|
|
|
|
|
ifa.q.put_nowait({'block_height': 6, 'mock': {CRM.BACKWARD:1, 'id': '06a', 'prev_id': '05a'}})
|
|
|
|
|
ifa.q.put_nowait({'block_height': 7, 'mock': {CRM.CATCHUP: 1, 'id': '07a', 'prev_id': '06a'}})
|
|
|
|
|
ifa.q.put_nowait({'block_height': 8, 'mock': {CRM.CATCHUP: 1, 'id': '08a', 'prev_id': '07a'}})
|
|
|
|
|
ifa.q.put_nowait({'block_height': 9, 'mock': {CRM.CATCHUP: 1, 'id': '09a', 'prev_id': '08a'}})
|
|
|
|
|
res = await ifa.sync_until(ifa.tip, next_height=9)
|
|
|
|
|
self.assertEqual((CRM.CATCHUP, 10), res)
|
|
|
|
|
self.assertEqual(ifa.q.qsize(), 0)
|
|
|
|
|
self.assertEqual(len(blockchain.blockchains), 1)
|
|
|
|
|
|
2025-06-09 15:46:02 +00:00
|
|
|
async def test_fork(self):
|
2025-06-09 17:57:36 +00:00
|
|
|
"""client starts on main chain, has no knowledge of any fork.
|
|
|
|
|
server is on other side of chain split, the last common block is height 6.
|
2025-06-09 20:20:47 +00:00
|
|
|
- first missing block found during *binary* phase
|
|
|
|
|
- is *new* fork
|
2025-06-09 17:57:36 +00:00
|
|
|
"""
|
2025-06-09 17:22:49 +00:00
|
|
|
ifa = self.interface
|
interface: _search_headers_backwards: start at small delta
- interface.tip is the server's tip.
- consider scenario:
- client has chain len 800_000, is up to date
- client goes offline
- suddenly there is a short reorg
e.g. blocks 799_998, 799_999, 800_000 are reorged
- client was offline for long time, finally comes back online again
- server tip is 1_000_000, tip_header does not connect to client's local chain
- PREVIOUSLY before commit, client would start backwards search
- first it asks for header 800_001, which does not connect
- then client asks for header ~600k, which checks
- client will do long binary search to find the forkpoint
- AFTER commit, client starts backwards search
- first it asks for header 800_001, which does not connect
- then client asks for header 799_999, etc
- that is, previously, on average, client did a short backwards search, followed by a long binary search
- now, on average, client does a longer backwards search, followed by a shorter binary search
- this works much nicer with the headers_cache
(- and thomasv said the old behaviour was not intentional)
2025-06-09 19:33:16 +00:00
|
|
|
ifa.tip = 8
|
2025-06-09 18:59:31 +00:00
|
|
|
ifa.blockchain = MockBlockchain(["00a", "01a", "02a", "03a", "04a", "05a", "06a", "07a", "08a", "09a", "10a", "11a", "12a"])
|
interface: _search_headers_backwards: start at small delta
- interface.tip is the server's tip.
- consider scenario:
- client has chain len 800_000, is up to date
- client goes offline
- suddenly there is a short reorg
e.g. blocks 799_998, 799_999, 800_000 are reorged
- client was offline for long time, finally comes back online again
- server tip is 1_000_000, tip_header does not connect to client's local chain
- PREVIOUSLY before commit, client would start backwards search
- first it asks for header 800_001, which does not connect
- then client asks for header ~600k, which checks
- client will do long binary search to find the forkpoint
- AFTER commit, client starts backwards search
- first it asks for header 800_001, which does not connect
- then client asks for header 799_999, etc
- that is, previously, on average, client did a short backwards search, followed by a long binary search
- now, on average, client does a longer backwards search, followed by a shorter binary search
- this works much nicer with the headers_cache
(- and thomasv said the old behaviour was not intentional)
2025-06-09 19:33:16 +00:00
|
|
|
blockchain.blockchains = {
|
|
|
|
|
"00a": ifa.blockchain,
|
|
|
|
|
}
|
2025-06-09 18:59:31 +00:00
|
|
|
ifa.q.put_nowait({'block_height': 8, 'mock': {CRM.CATCHUP:1, 'id': '08b', 'prev_id': '07b'}})
|
|
|
|
|
ifa.q.put_nowait({'block_height': 7, 'mock': {CRM.BACKWARD:1, 'id': '07b', 'prev_id': '06a'}})
|
interface: _search_headers_backwards: start at small delta
- interface.tip is the server's tip.
- consider scenario:
- client has chain len 800_000, is up to date
- client goes offline
- suddenly there is a short reorg
e.g. blocks 799_998, 799_999, 800_000 are reorged
- client was offline for long time, finally comes back online again
- server tip is 1_000_000, tip_header does not connect to client's local chain
- PREVIOUSLY before commit, client would start backwards search
- first it asks for header 800_001, which does not connect
- then client asks for header ~600k, which checks
- client will do long binary search to find the forkpoint
- AFTER commit, client starts backwards search
- first it asks for header 800_001, which does not connect
- then client asks for header 799_999, etc
- that is, previously, on average, client did a short backwards search, followed by a long binary search
- now, on average, client does a longer backwards search, followed by a shorter binary search
- this works much nicer with the headers_cache
(- and thomasv said the old behaviour was not intentional)
2025-06-09 19:33:16 +00:00
|
|
|
ifa.q.put_nowait({'block_height': 5, 'mock': {CRM.BACKWARD:1, 'id': '05a', 'prev_id': '04a'}})
|
2025-06-09 18:59:31 +00:00
|
|
|
ifa.q.put_nowait({'block_height': 6, 'mock': {CRM.BINARY:1, 'id': '06a', 'prev_id': '05a'}})
|
2025-06-09 20:20:47 +00:00
|
|
|
res = await ifa.sync_until(ifa.tip, next_height=7)
|
2025-06-06 16:42:15 +00:00
|
|
|
self.assertEqual((CRM.FORK, 8), res)
|
2025-06-09 17:22:49 +00:00
|
|
|
self.assertEqual(ifa.q.qsize(), 0)
|
2025-06-09 20:20:47 +00:00
|
|
|
self.assertEqual(len(blockchain.blockchains), 2)
|
2018-09-17 22:21:55 +02:00
|
|
|
|
2023-02-18 10:01:21 +00:00
|
|
|
async def test_can_connect_during_backward(self):
|
interface: _search_headers_backwards: start at small delta
- interface.tip is the server's tip.
- consider scenario:
- client has chain len 800_000, is up to date
- client goes offline
- suddenly there is a short reorg
e.g. blocks 799_998, 799_999, 800_000 are reorged
- client was offline for long time, finally comes back online again
- server tip is 1_000_000, tip_header does not connect to client's local chain
- PREVIOUSLY before commit, client would start backwards search
- first it asks for header 800_001, which does not connect
- then client asks for header ~600k, which checks
- client will do long binary search to find the forkpoint
- AFTER commit, client starts backwards search
- first it asks for header 800_001, which does not connect
- then client asks for header 799_999, etc
- that is, previously, on average, client did a short backwards search, followed by a long binary search
- now, on average, client does a longer backwards search, followed by a shorter binary search
- this works much nicer with the headers_cache
(- and thomasv said the old behaviour was not intentional)
2025-06-09 19:33:16 +00:00
|
|
|
"""client starts on main chain. client already knows about another fork, which has local height 4.
|
2025-06-09 17:57:36 +00:00
|
|
|
server is on that fork but has more blocks.
|
2025-06-09 20:20:47 +00:00
|
|
|
- first missing block found during *backward* phase
|
|
|
|
|
- is *existing* fork
|
2025-06-09 17:57:36 +00:00
|
|
|
"""
|
2025-06-09 17:22:49 +00:00
|
|
|
ifa = self.interface
|
interface: _search_headers_backwards: start at small delta
- interface.tip is the server's tip.
- consider scenario:
- client has chain len 800_000, is up to date
- client goes offline
- suddenly there is a short reorg
e.g. blocks 799_998, 799_999, 800_000 are reorged
- client was offline for long time, finally comes back online again
- server tip is 1_000_000, tip_header does not connect to client's local chain
- PREVIOUSLY before commit, client would start backwards search
- first it asks for header 800_001, which does not connect
- then client asks for header ~600k, which checks
- client will do long binary search to find the forkpoint
- AFTER commit, client starts backwards search
- first it asks for header 800_001, which does not connect
- then client asks for header 799_999, etc
- that is, previously, on average, client did a short backwards search, followed by a long binary search
- now, on average, client does a longer backwards search, followed by a shorter binary search
- this works much nicer with the headers_cache
(- and thomasv said the old behaviour was not intentional)
2025-06-09 19:33:16 +00:00
|
|
|
ifa.tip = 8
|
2025-06-09 18:59:31 +00:00
|
|
|
ifa.blockchain = MockBlockchain(["00a", "01a", "02a", "03a", "04a", "05a", "06a", "07a", "08a", "09a", "10a", "11a", "12a"])
|
|
|
|
|
blockchain.blockchains = {
|
|
|
|
|
"00a": ifa.blockchain,
|
interface: _search_headers_backwards: start at small delta
- interface.tip is the server's tip.
- consider scenario:
- client has chain len 800_000, is up to date
- client goes offline
- suddenly there is a short reorg
e.g. blocks 799_998, 799_999, 800_000 are reorged
- client was offline for long time, finally comes back online again
- server tip is 1_000_000, tip_header does not connect to client's local chain
- PREVIOUSLY before commit, client would start backwards search
- first it asks for header 800_001, which does not connect
- then client asks for header ~600k, which checks
- client will do long binary search to find the forkpoint
- AFTER commit, client starts backwards search
- first it asks for header 800_001, which does not connect
- then client asks for header 799_999, etc
- that is, previously, on average, client did a short backwards search, followed by a long binary search
- now, on average, client does a longer backwards search, followed by a shorter binary search
- this works much nicer with the headers_cache
(- and thomasv said the old behaviour was not intentional)
2025-06-09 19:33:16 +00:00
|
|
|
"03b": MockBlockchain(["00a", "01a", "02a", "03b", "04b"]),
|
2025-06-09 18:59:31 +00:00
|
|
|
}
|
|
|
|
|
ifa.q.put_nowait({'block_height': 8, 'mock': {CRM.CATCHUP:1, 'id': '08b', 'prev_id': '07b'}})
|
|
|
|
|
ifa.q.put_nowait({'block_height': 7, 'mock': {CRM.BACKWARD:1, 'id': '07b', 'prev_id': '06b'}})
|
interface: _search_headers_backwards: start at small delta
- interface.tip is the server's tip.
- consider scenario:
- client has chain len 800_000, is up to date
- client goes offline
- suddenly there is a short reorg
e.g. blocks 799_998, 799_999, 800_000 are reorged
- client was offline for long time, finally comes back online again
- server tip is 1_000_000, tip_header does not connect to client's local chain
- PREVIOUSLY before commit, client would start backwards search
- first it asks for header 800_001, which does not connect
- then client asks for header ~600k, which checks
- client will do long binary search to find the forkpoint
- AFTER commit, client starts backwards search
- first it asks for header 800_001, which does not connect
- then client asks for header 799_999, etc
- that is, previously, on average, client did a short backwards search, followed by a long binary search
- now, on average, client does a longer backwards search, followed by a shorter binary search
- this works much nicer with the headers_cache
(- and thomasv said the old behaviour was not intentional)
2025-06-09 19:33:16 +00:00
|
|
|
ifa.q.put_nowait({'block_height': 5, 'mock': {CRM.BACKWARD:1, 'id': '05b', 'prev_id': '04b'}})
|
|
|
|
|
ifa.q.put_nowait({'block_height': 6, 'mock': {CRM.CATCHUP:1, 'id': '06b', 'prev_id': '05b'}})
|
2025-06-09 20:20:47 +00:00
|
|
|
res = await ifa.sync_until(ifa.tip, next_height=6)
|
interface: _search_headers_backwards: start at small delta
- interface.tip is the server's tip.
- consider scenario:
- client has chain len 800_000, is up to date
- client goes offline
- suddenly there is a short reorg
e.g. blocks 799_998, 799_999, 800_000 are reorged
- client was offline for long time, finally comes back online again
- server tip is 1_000_000, tip_header does not connect to client's local chain
- PREVIOUSLY before commit, client would start backwards search
- first it asks for header 800_001, which does not connect
- then client asks for header ~600k, which checks
- client will do long binary search to find the forkpoint
- AFTER commit, client starts backwards search
- first it asks for header 800_001, which does not connect
- then client asks for header 799_999, etc
- that is, previously, on average, client did a short backwards search, followed by a long binary search
- now, on average, client does a longer backwards search, followed by a shorter binary search
- this works much nicer with the headers_cache
(- and thomasv said the old behaviour was not intentional)
2025-06-09 19:33:16 +00:00
|
|
|
self.assertEqual((CRM.CATCHUP, 7), res)
|
2025-06-09 17:22:49 +00:00
|
|
|
self.assertEqual(ifa.q.qsize(), 0)
|
2025-06-09 20:20:47 +00:00
|
|
|
self.assertEqual(len(blockchain.blockchains), 2)
|
2018-08-14 17:50:20 +02:00
|
|
|
|
2023-02-18 10:01:21 +00:00
|
|
|
async def test_chain_false_during_binary(self):
|
2025-06-09 17:57:36 +00:00
|
|
|
"""client starts on main chain, has no knowledge of any fork.
|
|
|
|
|
server is on other side of chain split, the last common block is height 3.
|
2025-06-09 20:20:47 +00:00
|
|
|
- first missing block found during *binary* phase
|
|
|
|
|
- is *new* fork
|
2025-06-09 17:57:36 +00:00
|
|
|
"""
|
2025-06-09 17:22:49 +00:00
|
|
|
ifa = self.interface
|
interface: _search_headers_backwards: start at small delta
- interface.tip is the server's tip.
- consider scenario:
- client has chain len 800_000, is up to date
- client goes offline
- suddenly there is a short reorg
e.g. blocks 799_998, 799_999, 800_000 are reorged
- client was offline for long time, finally comes back online again
- server tip is 1_000_000, tip_header does not connect to client's local chain
- PREVIOUSLY before commit, client would start backwards search
- first it asks for header 800_001, which does not connect
- then client asks for header ~600k, which checks
- client will do long binary search to find the forkpoint
- AFTER commit, client starts backwards search
- first it asks for header 800_001, which does not connect
- then client asks for header 799_999, etc
- that is, previously, on average, client did a short backwards search, followed by a long binary search
- now, on average, client does a longer backwards search, followed by a shorter binary search
- this works much nicer with the headers_cache
(- and thomasv said the old behaviour was not intentional)
2025-06-09 19:33:16 +00:00
|
|
|
ifa.tip = 8
|
2025-06-09 18:59:31 +00:00
|
|
|
ifa.blockchain = MockBlockchain(["00a", "01a", "02a", "03a", "04a", "05a", "06a", "07a", "08a", "09a", "10a", "11a", "12a"])
|
interface: _search_headers_backwards: start at small delta
- interface.tip is the server's tip.
- consider scenario:
- client has chain len 800_000, is up to date
- client goes offline
- suddenly there is a short reorg
e.g. blocks 799_998, 799_999, 800_000 are reorged
- client was offline for long time, finally comes back online again
- server tip is 1_000_000, tip_header does not connect to client's local chain
- PREVIOUSLY before commit, client would start backwards search
- first it asks for header 800_001, which does not connect
- then client asks for header ~600k, which checks
- client will do long binary search to find the forkpoint
- AFTER commit, client starts backwards search
- first it asks for header 800_001, which does not connect
- then client asks for header 799_999, etc
- that is, previously, on average, client did a short backwards search, followed by a long binary search
- now, on average, client does a longer backwards search, followed by a shorter binary search
- this works much nicer with the headers_cache
(- and thomasv said the old behaviour was not intentional)
2025-06-09 19:33:16 +00:00
|
|
|
blockchain.blockchains = {
|
|
|
|
|
"00a": ifa.blockchain,
|
|
|
|
|
}
|
2025-06-09 18:59:31 +00:00
|
|
|
ifa.q.put_nowait({'block_height': 8, 'mock': {CRM.CATCHUP:1, 'id': '08b', 'prev_id': '07b'}})
|
|
|
|
|
ifa.q.put_nowait({'block_height': 7, 'mock': {CRM.BACKWARD:1, 'id': '07b', 'prev_id': '06b'}})
|
interface: _search_headers_backwards: start at small delta
- interface.tip is the server's tip.
- consider scenario:
- client has chain len 800_000, is up to date
- client goes offline
- suddenly there is a short reorg
e.g. blocks 799_998, 799_999, 800_000 are reorged
- client was offline for long time, finally comes back online again
- server tip is 1_000_000, tip_header does not connect to client's local chain
- PREVIOUSLY before commit, client would start backwards search
- first it asks for header 800_001, which does not connect
- then client asks for header ~600k, which checks
- client will do long binary search to find the forkpoint
- AFTER commit, client starts backwards search
- first it asks for header 800_001, which does not connect
- then client asks for header 799_999, etc
- that is, previously, on average, client did a short backwards search, followed by a long binary search
- now, on average, client does a longer backwards search, followed by a shorter binary search
- this works much nicer with the headers_cache
(- and thomasv said the old behaviour was not intentional)
2025-06-09 19:33:16 +00:00
|
|
|
ifa.q.put_nowait({'block_height': 5, 'mock': {CRM.BACKWARD:1, 'id': '05b', 'prev_id': '04b'}})
|
|
|
|
|
ifa.q.put_nowait({'block_height': 1, 'mock': {CRM.BACKWARD:1, 'id': '01a', 'prev_id': '00a'}})
|
2025-06-09 18:59:31 +00:00
|
|
|
ifa.q.put_nowait({'block_height': 3, 'mock': {CRM.BINARY:1, 'id': '03a', 'prev_id': '02a'}})
|
interface: _search_headers_backwards: start at small delta
- interface.tip is the server's tip.
- consider scenario:
- client has chain len 800_000, is up to date
- client goes offline
- suddenly there is a short reorg
e.g. blocks 799_998, 799_999, 800_000 are reorged
- client was offline for long time, finally comes back online again
- server tip is 1_000_000, tip_header does not connect to client's local chain
- PREVIOUSLY before commit, client would start backwards search
- first it asks for header 800_001, which does not connect
- then client asks for header ~600k, which checks
- client will do long binary search to find the forkpoint
- AFTER commit, client starts backwards search
- first it asks for header 800_001, which does not connect
- then client asks for header 799_999, etc
- that is, previously, on average, client did a short backwards search, followed by a long binary search
- now, on average, client does a longer backwards search, followed by a shorter binary search
- this works much nicer with the headers_cache
(- and thomasv said the old behaviour was not intentional)
2025-06-09 19:33:16 +00:00
|
|
|
ifa.q.put_nowait({'block_height': 4, 'mock': {CRM.BINARY:1, 'id': '04b', 'prev_id': '03a'}})
|
2025-06-09 18:59:31 +00:00
|
|
|
ifa.q.put_nowait({'block_height': 5, 'mock': {CRM.CATCHUP:1, 'id': '05b', 'prev_id': '04b'}})
|
|
|
|
|
ifa.q.put_nowait({'block_height': 6, 'mock': {CRM.CATCHUP:1, 'id': '06b', 'prev_id': '05b'}})
|
2025-06-09 20:20:47 +00:00
|
|
|
res = await ifa.sync_until(ifa.tip, next_height=6)
|
2025-06-06 16:42:15 +00:00
|
|
|
self.assertEqual((CRM.CATCHUP, 7), res)
|
2025-06-09 17:22:49 +00:00
|
|
|
self.assertEqual(ifa.q.qsize(), 0)
|
2025-06-09 20:20:47 +00:00
|
|
|
self.assertEqual(len(blockchain.blockchains), 2)
|
|
|
|
|
|
|
|
|
|
async def test_chain_true_during_binary(self):
|
|
|
|
|
"""client starts on main chain. client already knows about another fork, which has local height 10.
|
|
|
|
|
server is on that fork but has more blocks.
|
|
|
|
|
- first missing block found during *binary* phase
|
|
|
|
|
- is *existing* fork
|
|
|
|
|
"""
|
|
|
|
|
ifa = self.interface
|
|
|
|
|
ifa.tip = 20
|
|
|
|
|
ifa.blockchain = MockBlockchain(["00a", "01a", "02a", "03a", "04a", "05a", "06a", "07a", "08a", "09a", "10a", "11a", "12a", "13a", "14a"])
|
|
|
|
|
blockchain.blockchains = {
|
|
|
|
|
"00a": ifa.blockchain,
|
|
|
|
|
"07b": MockBlockchain(["00a", "01a", "02a", "03a", "04a", "05a", "06a", "07b", "08b", "09b", "10b"]),
|
|
|
|
|
}
|
|
|
|
|
ifa.q.put_nowait({'block_height': 20, 'mock': {CRM.CATCHUP:1, 'id': '20b', 'prev_id': '19b'}})
|
|
|
|
|
ifa.q.put_nowait({'block_height': 15, 'mock': {CRM.BACKWARD:1, 'id': '15b', 'prev_id': '14b'}})
|
|
|
|
|
ifa.q.put_nowait({'block_height': 13, 'mock': {CRM.BACKWARD:1, 'id': '13b', 'prev_id': '12b'}})
|
|
|
|
|
ifa.q.put_nowait({'block_height': 9, 'mock': {CRM.BACKWARD:1, 'id': '09b', 'prev_id': '08b'}})
|
|
|
|
|
ifa.q.put_nowait({'block_height': 11, 'mock': {CRM.BINARY:1, 'id': '11b', 'prev_id': '10b'}})
|
|
|
|
|
ifa.q.put_nowait({'block_height': 10, 'mock': {CRM.BINARY:1, 'id': '10b', 'prev_id': '09b'}})
|
|
|
|
|
ifa.q.put_nowait({'block_height': 11, 'mock': {CRM.CATCHUP:1, 'id': '11b', 'prev_id': '10b'}})
|
|
|
|
|
ifa.q.put_nowait({'block_height': 12, 'mock': {CRM.CATCHUP:1, 'id': '12b', 'prev_id': '11b'}})
|
|
|
|
|
ifa.q.put_nowait({'block_height': 13, 'mock': {CRM.CATCHUP:1, 'id': '13b', 'prev_id': '12b'}})
|
|
|
|
|
res = await ifa.sync_until(ifa.tip, next_height=13)
|
|
|
|
|
self.assertEqual((CRM.CATCHUP, 14), res)
|
|
|
|
|
self.assertEqual(ifa.q.qsize(), 0)
|
|
|
|
|
self.assertEqual(len(blockchain.blockchains), 2)
|
2018-08-14 17:50:20 +02:00
|
|
|
|
|
|
|
|
|
2024-09-16 15:27:45 +00:00
|
|
|
if __name__ == "__main__":
|
|
|
|
|
constants.BitcoinRegtest.set_as_network()
|
2018-08-14 17:50:20 +02:00
|
|
|
unittest.main()
|