1717from dataclasses import dataclass
1818from glob import glob
1919from typing import Dict , List , Sequence , Tuple , Optional , TYPE_CHECKING , Union
20+ from functools import partial
2021
2122import attr
2223from aiorpcx import run_in_thread , sleep
@@ -54,6 +55,7 @@ class FlushData:
5455 headers = attr .ib ()
5556 block_tx_hashes = attr .ib () # type: List[bytes]
5657 undo_block_tx_hashes = attr .ib () # type: List[bytes]
58+ block_wtxids = attr .ib () # type: List[bytes]
5759 undo_historical_spends = attr .ib () # type: List[bytes]
5860 # The following are flushed to the UTXO DB if undo_infos is not None
5961 undo_infos = attr .ib () # type: List[Tuple[Sequence[bytes], int]]
@@ -132,6 +134,8 @@ def __init__(self, env: 'Env'):
132134 self .tx_counts_file = util .LogicalFile ('meta/txcounts' , 2 , 2000000 )
133135 # on-disk: 32 byte txids in chain order, allows (tx_num -> txid) map
134136 self .hashes_file = util .LogicalFile ('meta/hashes' , 4 , 16000000 )
137+ # on-disk: 32 byte wtxids in chain order, allows (tx_num -> wtxid) map
138+ self .wtxids_file = util .LogicalFile ('meta/wtxids' , 4 , 16000000 )
135139 if not self .coin .STATIC_BLOCK_HEADERS :
136140 self .headers_offsets_file = util .LogicalFile (
137141 'meta/headers_offsets' , 2 , 16000000 )
@@ -221,6 +225,7 @@ def assert_flushed(self, flush_data: FlushData):
221225 assert not flush_data .headers
222226 assert not flush_data .block_tx_hashes
223227 assert not flush_data .undo_block_tx_hashes
228+ assert not flush_data .block_wtxids
224229 assert not flush_data .undo_historical_spends
225230 assert not flush_data .adds
226231 assert not flush_data .deletes
@@ -280,15 +285,21 @@ def flush_fs(self, flush_data: FlushData):
280285 prior_tx_count = (self .tx_counts [self .fs_height ]
281286 if self .fs_height >= 0 else 0 )
282287 assert len (flush_data .block_tx_hashes ) == len (flush_data .headers )
288+ assert len (flush_data .block_wtxids ) == len (flush_data .headers )
283289 assert flush_data .height == self .fs_height + len (flush_data .headers )
284290 assert flush_data .tx_count == (self .tx_counts [- 1 ] if self .tx_counts
285291 else 0 )
286292 assert len (self .tx_counts ) == flush_data .height + 1
293+
287294 hashes = b'' .join (flush_data .block_tx_hashes )
288295 flush_data .block_tx_hashes .clear ()
289296 assert len (hashes ) % 32 == 0
290297 assert len (hashes ) // 32 == flush_data .tx_count - prior_tx_count
291298
299+ wtxids = b'' .join (flush_data .block_wtxids )
300+ flush_data .block_wtxids .clear ()
301+ assert len (wtxids ) == len (hashes )
302+
292303 # Write the headers, tx counts, and tx hashes
293304 start_time = time .monotonic ()
294305 height_start = self .fs_height + 1
@@ -302,6 +313,8 @@ def flush_fs(self, flush_data: FlushData):
302313 self .tx_counts [height_start :].tobytes ())
303314 offset = prior_tx_count * 32
304315 self .hashes_file .write (offset , hashes )
316+ offset = prior_tx_count * 32
317+ self .wtxids_file .write (offset , wtxids )
305318
306319 self .fs_height = flush_data .height
307320 self .fs_tx_count = flush_data .tx_count
@@ -370,6 +383,7 @@ def flush_backup(self, flush_data: FlushData, touched_hashxs):
370383 '''Like flush_dbs() but when backing up. All UTXOs are flushed.'''
371384 assert not flush_data .headers
372385 assert not flush_data .block_tx_hashes
386+ assert not flush_data .block_wtxids
373387 assert flush_data .height < self .db_height
374388 self .history .assert_flushed ()
375389 assert len (flush_data .undo_block_tx_hashes ) == self .db_height - flush_data .height
@@ -461,18 +475,19 @@ def read_headers():
461475
462476 return await run_in_thread (read_headers )
463477
464- def fs_tx_hash (self , tx_num : int ) -> Tuple [Optional [bytes ], int ]:
478+ def fs_tx_hash (self , tx_num : int , * , wtxid : bool = False ) -> Tuple [Optional [bytes ], int ]:
465479 '''Return a pair (tx_hash, tx_height) for the given tx number.
466480
467481 If the tx_height is not on disk, returns (None, tx_height).'''
482+ file = self .wtxids_file if wtxid else self .hashes_file
468483 tx_height = bisect_right (self .tx_counts , tx_num )
469484 if tx_height > self .db_height :
470485 tx_hash = None
471486 else :
472- tx_hash = self . hashes_file .read (tx_num * 32 , 32 )
487+ tx_hash = file .read (tx_num * 32 , 32 )
473488 return tx_hash , tx_height
474489
475- def fs_tx_hashes_at_blockheight (self , block_height ) :
490+ def fs_tx_hashes_at_blockheight (self , block_height , * , wtxid : bool = False ) -> Sequence [ bytes ] :
476491 '''Return a list of tx_hashes at given block height,
477492 in the same order as in the block.
478493 '''
@@ -484,12 +499,16 @@ def fs_tx_hashes_at_blockheight(self, block_height):
484499 else :
485500 first_tx_num = 0
486501 num_txs_in_block = self .tx_counts [block_height ] - first_tx_num
487- tx_hashes = self .hashes_file .read (first_tx_num * 32 , num_txs_in_block * 32 )
502+ file = self .wtxids_file if wtxid else self .hashes_file
503+ tx_hashes = file .read (first_tx_num * 32 , num_txs_in_block * 32 )
488504 assert num_txs_in_block == len (tx_hashes ) // 32
489505 return [tx_hashes [idx * 32 : (idx + 1 ) * 32 ] for idx in range (num_txs_in_block )]
490506
491- async def tx_hashes_at_blockheight (self , block_height ):
492- return await run_in_thread (self .fs_tx_hashes_at_blockheight , block_height )
507+ async def tx_hashes_at_blockheight (
508+ self , block_height , * , wtxid : bool = False ,
509+ ) -> Sequence [bytes ]:
510+ func = partial (self .fs_tx_hashes_at_blockheight , block_height , wtxid = wtxid )
511+ return await run_in_thread (func )
493512
494513 async def fs_block_hashes (self , height , count ):
495514 headers_concat , headers_count = await self .read_headers (height , count )
0 commit comments