2024-06-26 00:27:48 +00:00
|
|
|
# Nimbus
|
|
|
|
# Copyright (c) 2024 Status Research & Development GmbH
|
|
|
|
# Licensed under either of
|
|
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
|
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
|
|
|
# http://opensource.org/licenses/MIT)
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except
|
|
|
|
# according to those terms.
|
|
|
|
|
|
|
|
{.push raises: [].}
|
|
|
|
|
|
|
|
import
|
2024-10-27 22:20:04 +00:00
|
|
|
chronicles,
|
2024-06-26 00:27:48 +00:00
|
|
|
std/tables,
|
|
|
|
../../common,
|
|
|
|
../../db/core_db,
|
|
|
|
../../evm/types,
|
|
|
|
../../evm/state,
|
|
|
|
../validate,
|
2024-12-02 17:49:53 +00:00
|
|
|
../executor/process_block,
|
2024-12-19 12:02:36 +00:00
|
|
|
./forked_chain/[chain_desc, chain_kvt]
|
2024-06-26 00:27:48 +00:00
|
|
|
|
2024-06-30 07:40:14 +00:00
|
|
|
export
|
2024-12-02 17:49:53 +00:00
|
|
|
BlockDesc,
|
|
|
|
ForkedChainRef,
|
2024-12-19 12:02:36 +00:00
|
|
|
chain_kvt,
|
2024-06-30 07:40:14 +00:00
|
|
|
common,
|
|
|
|
core_db
|
|
|
|
|
2024-06-26 00:27:48 +00:00
|
|
|
const
|
|
|
|
BaseDistance = 128
|
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
2024-12-02 08:25:58 +00:00
|
|
|
# Private helpers
|
2024-06-26 00:27:48 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
2024-12-02 08:25:58 +00:00
|
|
|
|
|
|
|
template shouldNotKeyError(info: string, body: untyped) =
|
2024-06-26 00:27:48 +00:00
|
|
|
try:
|
|
|
|
body
|
|
|
|
except KeyError as exc:
|
2024-12-02 08:25:58 +00:00
|
|
|
raiseAssert info & ": name=" & $exc.name & " msg=" & exc.msg
|
|
|
|
|
|
|
|
proc deleteLineage(c: ForkedChainRef; top: Hash32) =
|
|
|
|
## Starting at argument `top`, delete all entries from `c.blocks[]` along
|
|
|
|
## the ancestor chain.
|
|
|
|
##
|
|
|
|
var parent = top
|
|
|
|
while true:
|
|
|
|
c.blocks.withValue(parent, val):
|
|
|
|
let w = parent
|
|
|
|
parent = val.blk.header.parentHash
|
|
|
|
c.blocks.del(w)
|
|
|
|
continue
|
|
|
|
break
|
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private functions
|
|
|
|
# ------------------------------------------------------------------------------
|
2024-06-26 00:27:48 +00:00
|
|
|
|
2024-06-27 05:54:52 +00:00
|
|
|
proc processBlock(c: ForkedChainRef,
|
2024-10-16 01:34:12 +00:00
|
|
|
parent: Header,
|
|
|
|
blk: Block): Result[seq[Receipt], string] =
|
|
|
|
template header(): Header =
|
2024-06-26 00:27:48 +00:00
|
|
|
blk.header
|
|
|
|
|
|
|
|
let vmState = BaseVMState()
|
|
|
|
vmState.init(parent, header, c.com)
|
|
|
|
|
2024-06-30 07:40:14 +00:00
|
|
|
if c.extraValidation:
|
2024-12-13 05:12:57 +00:00
|
|
|
?c.com.validateHeaderAndKinship(blk, vmState.parent)
|
2024-06-26 00:27:48 +00:00
|
|
|
|
|
|
|
?vmState.processBlock(
|
|
|
|
blk,
|
|
|
|
skipValidation = false,
|
|
|
|
skipReceipts = false,
|
|
|
|
skipUncles = true,
|
|
|
|
)
|
|
|
|
|
|
|
|
# We still need to write header to database
|
|
|
|
# because validateUncles still need it
|
|
|
|
let blockHash = header.blockHash()
|
2024-11-07 01:24:21 +00:00
|
|
|
?c.db.persistHeader(
|
|
|
|
blockHash,
|
|
|
|
header,
|
|
|
|
c.com.startOfHistory)
|
2024-06-26 00:27:48 +00:00
|
|
|
|
2024-06-30 07:40:14 +00:00
|
|
|
# update currentBlock *after* we persist it
|
|
|
|
# so the rpc return consistent result
|
|
|
|
# between eth_blockNumber and eth_syncing
|
|
|
|
c.com.syncCurrent = header.number
|
|
|
|
|
2024-06-26 00:27:48 +00:00
|
|
|
ok(move(vmState.receipts))
|
|
|
|
|
2024-06-27 05:54:52 +00:00
|
|
|
func updateCursorHeads(c: ForkedChainRef,
|
2024-10-16 01:34:12 +00:00
|
|
|
cursorHash: Hash32,
|
|
|
|
header: Header) =
|
2024-06-26 00:27:48 +00:00
|
|
|
# Example of cursorHeads and cursor
|
|
|
|
#
|
|
|
|
# -- A1 - A2 - A3 -- D5 - D6
|
|
|
|
# / /
|
|
|
|
# base - B1 - B2 - B3 - B4
|
|
|
|
# \
|
|
|
|
# --- C3 - C4
|
|
|
|
#
|
|
|
|
# A3, B4, C4, and D6, are in cursorHeads
|
|
|
|
# Any one of them with blockHash == cursorHash
|
|
|
|
# is the active chain with cursor pointing to the
|
|
|
|
# latest block of that chain.
|
|
|
|
|
|
|
|
for i in 0..<c.cursorHeads.len:
|
|
|
|
if c.cursorHeads[i].hash == header.parentHash:
|
|
|
|
c.cursorHeads[i].hash = cursorHash
|
|
|
|
return
|
|
|
|
|
|
|
|
c.cursorHeads.add CursorDesc(
|
|
|
|
hash: cursorHash,
|
|
|
|
forkJunction: header.number,
|
|
|
|
)
|
|
|
|
|
2024-06-27 05:54:52 +00:00
|
|
|
func updateCursor(c: ForkedChainRef,
|
2024-10-16 01:34:12 +00:00
|
|
|
blk: Block,
|
2024-06-26 00:27:48 +00:00
|
|
|
receipts: sink seq[Receipt]) =
|
2024-10-16 01:34:12 +00:00
|
|
|
template header(): Header =
|
2024-06-26 00:27:48 +00:00
|
|
|
blk.header
|
|
|
|
|
|
|
|
c.cursorHeader = header
|
|
|
|
c.cursorHash = header.blockHash
|
2024-12-02 08:25:58 +00:00
|
|
|
|
|
|
|
c.blocks.withValue(c.cursorHash, val):
|
|
|
|
# Block exists alrady, so update only
|
|
|
|
val.receipts = receipts
|
|
|
|
do:
|
|
|
|
# New block => update head
|
|
|
|
c.blocks[c.cursorHash] = BlockDesc(
|
|
|
|
blk: blk,
|
|
|
|
receipts: move(receipts))
|
|
|
|
c.updateCursorHeads(c.cursorHash, header)
|
2024-06-26 00:27:48 +00:00
|
|
|
|
2024-06-27 05:54:52 +00:00
|
|
|
proc validateBlock(c: ForkedChainRef,
|
2024-10-16 01:34:12 +00:00
|
|
|
parent: Header,
|
|
|
|
blk: Block,
|
2024-06-26 00:27:48 +00:00
|
|
|
updateCursor: bool = true): Result[void, string] =
|
2024-12-18 16:03:51 +00:00
|
|
|
let dbTx = c.db.ctx.txFrameBegin()
|
2024-06-26 00:27:48 +00:00
|
|
|
defer:
|
|
|
|
dbTx.dispose()
|
|
|
|
|
|
|
|
var res = c.processBlock(parent, blk)
|
|
|
|
if res.isErr:
|
|
|
|
dbTx.rollback()
|
|
|
|
return err(res.error)
|
|
|
|
|
|
|
|
dbTx.commit()
|
|
|
|
if updateCursor:
|
|
|
|
c.updateCursor(blk, move(res.value))
|
|
|
|
|
2024-12-18 19:43:12 +00:00
|
|
|
let blkHash = blk.header.blockHash
|
2024-10-04 07:59:38 +00:00
|
|
|
for i, tx in blk.transactions:
|
2024-12-18 19:43:12 +00:00
|
|
|
c.txRecords[rlpHash(tx)] = (blkHash, uint64(i))
|
2024-10-04 07:59:38 +00:00
|
|
|
|
2024-06-26 00:27:48 +00:00
|
|
|
ok()
|
|
|
|
|
2024-11-02 09:30:45 +00:00
|
|
|
proc replaySegment*(c: ForkedChainRef, target: Hash32) =
|
2024-06-26 00:27:48 +00:00
|
|
|
# Replay from base+1 to target block
|
|
|
|
var
|
|
|
|
prevHash = target
|
2024-10-16 01:34:12 +00:00
|
|
|
chain = newSeq[Block]()
|
2024-06-26 00:27:48 +00:00
|
|
|
|
2024-12-02 08:25:58 +00:00
|
|
|
shouldNotKeyError "replaySegment(target)":
|
2024-06-26 00:27:48 +00:00
|
|
|
while prevHash != c.baseHash:
|
|
|
|
chain.add c.blocks[prevHash].blk
|
|
|
|
prevHash = chain[^1].header.parentHash
|
|
|
|
|
|
|
|
c.stagingTx.rollback()
|
2024-12-18 16:03:51 +00:00
|
|
|
c.stagingTx = c.db.ctx.txFrameBegin()
|
2024-06-26 00:27:48 +00:00
|
|
|
c.cursorHeader = c.baseHeader
|
|
|
|
for i in countdown(chain.high, chain.low):
|
|
|
|
c.validateBlock(c.cursorHeader, chain[i],
|
|
|
|
updateCursor = false).expect("have been validated before")
|
|
|
|
c.cursorHeader = chain[i].header
|
2024-06-30 07:40:14 +00:00
|
|
|
c.cursorHash = target
|
|
|
|
|
|
|
|
proc replaySegment(c: ForkedChainRef,
|
2024-10-16 01:34:12 +00:00
|
|
|
target: Hash32,
|
|
|
|
parent: Header,
|
|
|
|
parentHash: Hash32) =
|
2024-06-30 07:40:14 +00:00
|
|
|
# Replay from parent+1 to target block
|
|
|
|
# with assumption last state is at parent
|
|
|
|
var
|
|
|
|
prevHash = target
|
2024-10-16 01:34:12 +00:00
|
|
|
chain = newSeq[Block]()
|
2024-06-30 07:40:14 +00:00
|
|
|
|
2024-12-02 08:25:58 +00:00
|
|
|
shouldNotKeyError "replaySegment(target,parent)":
|
2024-06-30 07:40:14 +00:00
|
|
|
while prevHash != parentHash:
|
|
|
|
chain.add c.blocks[prevHash].blk
|
|
|
|
prevHash = chain[^1].header.parentHash
|
|
|
|
|
|
|
|
c.cursorHeader = parent
|
|
|
|
for i in countdown(chain.high, chain.low):
|
|
|
|
c.validateBlock(c.cursorHeader, chain[i],
|
|
|
|
updateCursor = false).expect("have been validated before")
|
|
|
|
c.cursorHeader = chain[i].header
|
|
|
|
c.cursorHash = target
|
2024-06-26 00:27:48 +00:00
|
|
|
|
2024-10-16 01:34:12 +00:00
|
|
|
proc writeBaggage(c: ForkedChainRef, target: Hash32) =
|
2024-06-26 00:27:48 +00:00
|
|
|
# Write baggage from base+1 to target block
|
2024-10-16 01:34:12 +00:00
|
|
|
template header(): Header =
|
2024-06-29 05:43:17 +00:00
|
|
|
blk.blk.header
|
|
|
|
|
2024-12-02 08:25:58 +00:00
|
|
|
shouldNotKeyError "writeBaggage":
|
2024-06-26 00:27:48 +00:00
|
|
|
var prevHash = target
|
2024-10-27 22:20:04 +00:00
|
|
|
var count = 0'u64
|
2024-06-26 00:27:48 +00:00
|
|
|
while prevHash != c.baseHash:
|
|
|
|
let blk = c.blocks[prevHash]
|
2024-06-29 05:43:17 +00:00
|
|
|
c.db.persistTransactions(header.number, header.txRoot, blk.blk.transactions)
|
|
|
|
c.db.persistReceipts(header.receiptsRoot, blk.receipts)
|
2024-06-26 00:27:48 +00:00
|
|
|
discard c.db.persistUncles(blk.blk.uncles)
|
|
|
|
if blk.blk.withdrawals.isSome:
|
2024-06-29 05:43:17 +00:00
|
|
|
c.db.persistWithdrawals(
|
|
|
|
header.withdrawalsRoot.expect("WithdrawalsRoot should be verified before"),
|
|
|
|
blk.blk.withdrawals.get)
|
2024-10-04 07:59:38 +00:00
|
|
|
for tx in blk.blk.transactions:
|
|
|
|
c.txRecords.del(rlpHash(tx))
|
2024-06-29 05:43:17 +00:00
|
|
|
prevHash = header.parentHash
|
2024-10-27 22:20:04 +00:00
|
|
|
count.inc
|
|
|
|
|
|
|
|
notice "Finalized blocks persisted",
|
|
|
|
numberOfBlocks = count,
|
|
|
|
last = target.short,
|
|
|
|
baseNumber = c.baseHeader.number,
|
|
|
|
baseHash = c.baseHash.short
|
2024-06-26 00:27:48 +00:00
|
|
|
|
2024-12-05 06:01:57 +00:00
|
|
|
func updateBase(c: ForkedChainRef, pvarc: PivotArc) =
|
2024-12-02 08:25:58 +00:00
|
|
|
## Remove obsolete chains, example:
|
|
|
|
##
|
|
|
|
## A1 - A2 - A3 D5 - D6
|
|
|
|
## / /
|
|
|
|
## base - B1 - B2 - [B3] - B4 - B5
|
|
|
|
## \ \
|
|
|
|
## C2 - C3 E4 - E5
|
|
|
|
##
|
2024-12-05 06:01:57 +00:00
|
|
|
## where `B1..B5` is the `pvarc.cursor` arc and `[B5]` is the `pvarc.pv`.
|
|
|
|
#
|
|
|
|
## The `base` will be moved to position `[B3]`. Both chains `A` and `C`
|
|
|
|
## will be removed but not so for `D` and `E`, and `pivot` arc `B` will
|
|
|
|
## be curtailed below `B4`.
|
2024-12-02 08:25:58 +00:00
|
|
|
##
|
|
|
|
var newCursorHeads: seq[CursorDesc] # Will become new `c.cursorHeads`
|
|
|
|
for ch in c.cursorHeads:
|
2024-12-05 06:01:57 +00:00
|
|
|
if pvarc.pvNumber < ch.forkJunction:
|
2024-12-02 08:25:58 +00:00
|
|
|
# On the example, this would be any of chain `D` or `E`.
|
|
|
|
newCursorHeads.add ch
|
|
|
|
|
2024-12-05 06:01:57 +00:00
|
|
|
elif ch.hash == pvarc.cursor.hash:
|
2024-12-02 08:25:58 +00:00
|
|
|
# On the example, this would be chain `B`.
|
|
|
|
newCursorHeads.add CursorDesc(
|
|
|
|
hash: ch.hash,
|
2024-12-05 06:01:57 +00:00
|
|
|
forkJunction: pvarc.pvNumber + 1)
|
2024-12-02 08:25:58 +00:00
|
|
|
|
|
|
|
else:
|
|
|
|
# On the example, this would be either chain `A` or `B`.
|
|
|
|
c.deleteLineage ch.hash
|
2024-06-26 00:27:48 +00:00
|
|
|
|
|
|
|
# Cleanup in-memory blocks starting from newBase backward
|
|
|
|
# while blocks from newBase+1 to canonicalCursor not deleted
|
|
|
|
# e.g. B4 onward
|
2024-12-05 06:01:57 +00:00
|
|
|
c.deleteLineage pvarc.pvHash
|
2024-12-02 08:25:58 +00:00
|
|
|
|
|
|
|
# Implied deletion of chain heads (if any)
|
|
|
|
c.cursorHeads.swap newCursorHeads
|
2024-06-26 00:27:48 +00:00
|
|
|
|
2024-12-05 06:01:57 +00:00
|
|
|
c.baseHeader = pvarc.pvHeader
|
|
|
|
c.baseHash = pvarc.pvHash
|
2024-06-26 00:27:48 +00:00
|
|
|
|
2024-12-05 06:01:57 +00:00
|
|
|
func findCursorArc(c: ForkedChainRef, hash: Hash32): Result[PivotArc, string] =
|
2024-12-02 08:25:58 +00:00
|
|
|
## Find the `cursor` arc that contains the block relative to the
|
|
|
|
## argument `hash`.
|
2024-12-05 06:01:57 +00:00
|
|
|
##
|
2024-06-26 00:27:48 +00:00
|
|
|
if hash == c.baseHash:
|
|
|
|
# The cursorHash here should not be used for next step
|
|
|
|
# because it not point to any active chain
|
2024-12-05 06:01:57 +00:00
|
|
|
return ok PivotArc(
|
|
|
|
pvHash: c.baseHash,
|
|
|
|
pvHeader: c.baseHeader,
|
|
|
|
cursor: CursorDesc(
|
|
|
|
forkJunction: c.baseHeader.number,
|
|
|
|
hash: c.baseHash))
|
2024-06-26 00:27:48 +00:00
|
|
|
|
2024-12-02 08:25:58 +00:00
|
|
|
for ch in c.cursorHeads:
|
|
|
|
var top = ch.hash
|
|
|
|
while true:
|
|
|
|
c.blocks.withValue(top, val):
|
|
|
|
if ch.forkJunction <= val.blk.header.number:
|
|
|
|
if top == hash:
|
2024-12-05 06:01:57 +00:00
|
|
|
return ok PivotArc(
|
|
|
|
pvHash: hash,
|
|
|
|
pvHeader: val.blk.header,
|
|
|
|
cursor: ch)
|
2024-12-02 08:25:58 +00:00
|
|
|
if ch.forkJunction < val.blk.header.number:
|
|
|
|
top = val.blk.header.parentHash
|
|
|
|
continue
|
|
|
|
break
|
2024-06-26 00:27:48 +00:00
|
|
|
|
|
|
|
err("Block hash is not part of any active chain")
|
|
|
|
|
2024-12-05 06:01:57 +00:00
|
|
|
func findHeader(
|
|
|
|
c: ForkedChainRef;
|
|
|
|
itHash: Hash32;
|
|
|
|
headHash: Hash32;
|
|
|
|
): Result[Header, string] =
|
|
|
|
## Find header for argument `itHash` on argument `headHash` ancestor chain.
|
|
|
|
##
|
|
|
|
if itHash == c.baseHash:
|
2024-06-26 00:27:48 +00:00
|
|
|
return ok(c.baseHeader)
|
|
|
|
|
2024-12-05 06:01:57 +00:00
|
|
|
# Find `pvHash` on the ancestor lineage of `headHash`
|
|
|
|
var prevHash = headHash
|
|
|
|
while true:
|
|
|
|
c.blocks.withValue(prevHash, val):
|
|
|
|
if prevHash == itHash:
|
|
|
|
return ok(val.blk.header)
|
|
|
|
prevHash = val.blk.header.parentHash
|
|
|
|
continue
|
|
|
|
break
|
2024-06-26 00:27:48 +00:00
|
|
|
|
2024-12-05 06:01:57 +00:00
|
|
|
err("Block not in argument head ancestor lineage")
|
2024-06-26 00:27:48 +00:00
|
|
|
|
2024-12-05 06:01:57 +00:00
|
|
|
func calculateNewBase(
|
|
|
|
c: ForkedChainRef;
|
|
|
|
finalized: BlockNumber;
|
|
|
|
pvarc: PivotArc;
|
|
|
|
): PivotArc =
|
|
|
|
## It is required that the `finalized` argument is on the `pvarc` arc, i.e.
|
|
|
|
## it ranges beween `pvarc.cursor.forkJunction` and
|
|
|
|
## `c.blocks[pvarc.cursor.head].number`.
|
2024-12-02 08:25:58 +00:00
|
|
|
##
|
2024-12-05 06:01:57 +00:00
|
|
|
## The function returns a cursor arc containing a new base position. It is
|
|
|
|
## calculated as follows.
|
2024-12-02 08:25:58 +00:00
|
|
|
##
|
2024-12-05 06:01:57 +00:00
|
|
|
## Starting at the argument `pvarc.pvHead` searching backwards, the new base
|
|
|
|
## is the position of the block with number `finalized`.
|
2024-12-02 08:25:58 +00:00
|
|
|
##
|
2024-12-05 06:01:57 +00:00
|
|
|
## Before searching backwards, the `finalized` argument might be adjusted
|
|
|
|
## and made smaller so that a minimum distance to the head on the cursor arc
|
|
|
|
## applies.
|
2024-12-02 08:25:58 +00:00
|
|
|
##
|
2024-06-30 07:40:14 +00:00
|
|
|
# It's important to have base at least `baseDistance` behind head
|
2024-06-26 00:27:48 +00:00
|
|
|
# so we can answer state queries about history that deep.
|
2024-12-02 08:25:58 +00:00
|
|
|
let target = min(finalized,
|
2024-12-05 06:01:57 +00:00
|
|
|
max(pvarc.pvNumber, c.baseDistance) - c.baseDistance)
|
|
|
|
|
|
|
|
# Can only increase base block number.
|
|
|
|
if target <= c.baseHeader.number:
|
|
|
|
return PivotArc(
|
|
|
|
pvHash: c.baseHash,
|
|
|
|
pvHeader: c.baseHeader,
|
|
|
|
cursor: CursorDesc(
|
|
|
|
forkJunction: c.baseHeader.number,
|
|
|
|
hash: c.baseHash))
|
|
|
|
|
|
|
|
var prevHash = pvarc.pvHash
|
2024-12-02 08:25:58 +00:00
|
|
|
while true:
|
|
|
|
c.blocks.withValue(prevHash, val):
|
|
|
|
if target == val.blk.header.number:
|
2024-12-05 06:01:57 +00:00
|
|
|
if pvarc.cursor.forkJunction <= target:
|
|
|
|
# OK, new base stays on the argument pivot arc.
|
|
|
|
# ::
|
|
|
|
# B1 - B2 - B3 - B4
|
|
|
|
# / ^ ^ ^
|
|
|
|
# base - A1 - A2 - A3 | | |
|
|
|
|
# | pv CCH
|
|
|
|
# |
|
|
|
|
# target
|
|
|
|
#
|
|
|
|
return PivotArc(
|
|
|
|
pvHash: prevHash,
|
|
|
|
pvHeader: val.blk.header,
|
|
|
|
cursor: pvarc.cursor)
|
|
|
|
else:
|
|
|
|
# The new base (aka target) falls out of the argument pivot branch,
|
|
|
|
# ending up somewhere on a parent branch.
|
|
|
|
# ::
|
|
|
|
# B1 - B2 - B3 - B4
|
|
|
|
# / ^ ^
|
|
|
|
# base - A1 - A2 - A3 | |
|
|
|
|
# ^ pv CCH
|
|
|
|
# |
|
|
|
|
# target
|
|
|
|
#
|
|
|
|
return c.findCursorArc(prevHash).expect "valid cursor arc"
|
2024-12-02 08:25:58 +00:00
|
|
|
prevHash = val.blk.header.parentHash
|
|
|
|
continue
|
|
|
|
break
|
2024-06-26 00:27:48 +00:00
|
|
|
|
2024-12-02 08:25:58 +00:00
|
|
|
doAssert(false, "Unreachable code, finalized block outside cursor arc")
|
2024-06-26 00:27:48 +00:00
|
|
|
|
2024-12-05 06:01:57 +00:00
|
|
|
func trimCursorArc(c: ForkedChainRef, pvarc: PivotArc) =
|
|
|
|
## Curb argument `pvarc.cursor` head so that it ends up at `pvarc.pv`.
|
|
|
|
##
|
2024-06-26 00:27:48 +00:00
|
|
|
# Maybe the current active chain is longer than canonical chain
|
2024-12-02 08:25:58 +00:00
|
|
|
shouldNotKeyError "trimCanonicalChain":
|
2024-12-05 06:01:57 +00:00
|
|
|
var prevHash = pvarc.cursor.hash
|
2024-06-26 00:27:48 +00:00
|
|
|
while prevHash != c.baseHash:
|
|
|
|
let header = c.blocks[prevHash].blk.header
|
2024-12-05 06:01:57 +00:00
|
|
|
if header.number > pvarc.pvNumber:
|
2024-06-26 00:27:48 +00:00
|
|
|
c.blocks.del(prevHash)
|
|
|
|
else:
|
|
|
|
break
|
|
|
|
prevHash = header.parentHash
|
|
|
|
|
2024-06-30 07:40:14 +00:00
|
|
|
if c.cursorHeads.len == 0:
|
|
|
|
return
|
|
|
|
|
|
|
|
# Update cursorHeads if indeed we trim
|
|
|
|
for i in 0..<c.cursorHeads.len:
|
2024-12-05 06:01:57 +00:00
|
|
|
if c.cursorHeads[i].hash == pvarc.cursor.hash:
|
|
|
|
c.cursorHeads[i].hash = pvarc.pvHash
|
2024-06-30 07:40:14 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
doAssert(false, "Unreachable code")
|
|
|
|
|
2024-12-05 06:01:57 +00:00
|
|
|
proc setHead(c: ForkedChainRef, pvarc: PivotArc) =
|
2024-06-30 07:40:14 +00:00
|
|
|
# TODO: db.setHead should not read from db anymore
|
2024-07-26 05:32:01 +00:00
|
|
|
# all canonical chain marking
|
2024-06-30 07:40:14 +00:00
|
|
|
# should be done from here.
|
2024-12-05 06:01:57 +00:00
|
|
|
discard c.db.setHead(pvarc.pvHash)
|
2024-06-30 07:40:14 +00:00
|
|
|
|
|
|
|
# update global syncHighest
|
2024-12-05 06:01:57 +00:00
|
|
|
c.com.syncHighest = pvarc.pvNumber
|
2024-06-30 07:40:14 +00:00
|
|
|
|
2024-12-05 06:01:57 +00:00
|
|
|
proc updateHeadIfNecessary(c: ForkedChainRef, pvarc: PivotArc) =
|
2024-09-27 00:53:27 +00:00
|
|
|
# update head if the new head is different
|
|
|
|
# from current head or current chain
|
2024-12-05 06:01:57 +00:00
|
|
|
if c.cursorHash != pvarc.cursor.hash:
|
2024-09-27 00:53:27 +00:00
|
|
|
if not c.stagingTx.isNil:
|
|
|
|
c.stagingTx.rollback()
|
2024-12-18 16:03:51 +00:00
|
|
|
c.stagingTx = c.db.ctx.txFrameBegin()
|
2024-12-05 06:01:57 +00:00
|
|
|
c.replaySegment(pvarc.pvHash)
|
2024-09-27 00:53:27 +00:00
|
|
|
|
2024-12-05 06:01:57 +00:00
|
|
|
c.trimCursorArc(pvarc)
|
|
|
|
if c.cursorHash != pvarc.pvHash:
|
|
|
|
c.cursorHeader = pvarc.pvHeader
|
|
|
|
c.cursorHash = pvarc.pvHash
|
2024-09-27 00:53:27 +00:00
|
|
|
|
|
|
|
if c.stagingTx.isNil:
|
|
|
|
# setHead below don't go straight to db
|
2024-12-18 16:03:51 +00:00
|
|
|
c.stagingTx = c.db.ctx.txFrameBegin()
|
2024-09-27 00:53:27 +00:00
|
|
|
|
2024-12-05 06:01:57 +00:00
|
|
|
c.setHead(pvarc)
|
2024-09-27 00:53:27 +00:00
|
|
|
|
2024-12-02 17:49:53 +00:00
|
|
|
proc autoUpdateBase(c: ForkedChainRef): Result[void, string] =
|
|
|
|
## To be called after`importBlock()` for implied `base` update so that
|
|
|
|
## there is no need to know about a finalised block. Here the `base` is
|
|
|
|
## kept at a certain distance from the current `latest` cursor head.
|
|
|
|
##
|
|
|
|
# This function code is a tweaked version of `importBlockBlindly()`
|
|
|
|
# from draft PR #2845.
|
|
|
|
#
|
|
|
|
let
|
|
|
|
distanceFromBase = c.cursorHeader.number - c.baseHeader.number
|
|
|
|
hysteresis = max(1'u64, min(c.baseDistance div 4'u64, 32'u64))
|
|
|
|
# Finalizer threshold is baseDistance + 25% of baseDistancce capped at 32.
|
|
|
|
if distanceFromBase < c.baseDistance + hysteresis:
|
|
|
|
return ok()
|
|
|
|
|
|
|
|
# Move the base forward and stay away `baseDistance` blocks from
|
|
|
|
# the top block.
|
|
|
|
let
|
|
|
|
target = c.cursorHeader.number - c.baseDistance
|
|
|
|
pvarc = ?c.findCursorArc(c.cursorHash)
|
|
|
|
newBase = c.calculateNewBase(target, pvarc)
|
|
|
|
|
|
|
|
doAssert newBase.pvHash != c.baseHash
|
|
|
|
|
|
|
|
# Write segment from base+1 to newBase into database
|
|
|
|
c.stagingTx.rollback()
|
|
|
|
c.stagingTx = c.db.ctx.txFrameBegin()
|
|
|
|
c.replaySegment(newBase.pvHash)
|
|
|
|
c.writeBaggage(newBase.pvHash)
|
|
|
|
c.stagingTx.commit()
|
|
|
|
c.stagingTx = nil
|
|
|
|
|
|
|
|
# Update base forward to newBase
|
|
|
|
c.updateBase(newBase)
|
|
|
|
c.db.persistent(newBase.pvNumber).isOkOr:
|
|
|
|
return err("Failed to save state: " & $$error)
|
|
|
|
|
|
|
|
# Move chain state forward to current head
|
|
|
|
c.stagingTx = c.db.ctx.txFrameBegin()
|
|
|
|
c.replaySegment(pvarc.pvHash)
|
|
|
|
c.setHead(pvarc)
|
|
|
|
|
|
|
|
ok()
|
|
|
|
|
2024-06-26 00:27:48 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public functions
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2024-10-17 12:14:09 +00:00
|
|
|
proc init*(
|
|
|
|
T: type ForkedChainRef;
|
|
|
|
com: CommonRef;
|
|
|
|
baseDistance = BaseDistance.uint64;
|
|
|
|
extraValidation = true;
|
|
|
|
): T =
|
|
|
|
## Constructor that uses the current database ledger state for initialising.
|
|
|
|
## This state coincides with the canonical head that would be used for
|
|
|
|
## setting up the descriptor.
|
|
|
|
##
|
|
|
|
## With `ForkedChainRef` based import, the canonical state lives only inside
|
|
|
|
## a level one database transaction. Thus it will readily be available on the
|
|
|
|
## running system with tools such as `getCanonicalHead()`. But it will never
|
|
|
|
## be saved on the database.
|
|
|
|
##
|
|
|
|
## This constructor also works well when resuming import after running
|
|
|
|
## `persistentBlocks()` used for `Era1` or `Era` import.
|
|
|
|
##
|
|
|
|
let
|
|
|
|
base = com.db.getSavedStateBlockNumber
|
2024-11-07 01:24:21 +00:00
|
|
|
baseHash = com.db.getBlockHash(base).expect("baseHash exists")
|
|
|
|
baseHeader = com.db.getBlockHeader(baseHash).expect("base header exists")
|
2024-10-17 12:14:09 +00:00
|
|
|
|
|
|
|
# update global syncStart
|
|
|
|
com.syncStart = baseHeader.number
|
|
|
|
|
|
|
|
T(com: com,
|
|
|
|
db: com.db,
|
|
|
|
baseHeader: baseHeader,
|
|
|
|
cursorHash: baseHash,
|
|
|
|
baseHash: baseHash,
|
|
|
|
cursorHeader: baseHeader,
|
|
|
|
extraValidation: extraValidation,
|
2024-12-02 17:49:53 +00:00
|
|
|
baseDistance: baseDistance)
|
2024-10-17 12:14:09 +00:00
|
|
|
|
2024-06-30 07:40:14 +00:00
|
|
|
proc newForkedChain*(com: CommonRef,
|
2024-10-16 01:34:12 +00:00
|
|
|
baseHeader: Header,
|
2024-06-30 07:40:14 +00:00
|
|
|
baseDistance: uint64 = BaseDistance,
|
|
|
|
extraValidation: bool = true): ForkedChainRef =
|
2024-10-17 12:14:09 +00:00
|
|
|
## This constructor allows to set up the base state which might be needed
|
|
|
|
## for some particular test or other applications. Otherwise consider
|
|
|
|
## `init()`.
|
2024-06-30 07:40:14 +00:00
|
|
|
let baseHash = baseHeader.blockHash
|
2024-11-07 01:24:21 +00:00
|
|
|
let chain = ForkedChainRef(
|
2024-06-30 07:40:14 +00:00
|
|
|
com: com,
|
|
|
|
db : com.db,
|
|
|
|
baseHeader : baseHeader,
|
|
|
|
cursorHash : baseHash,
|
|
|
|
baseHash : baseHash,
|
|
|
|
cursorHeader: baseHeader,
|
|
|
|
extraValidation: extraValidation,
|
2024-12-02 17:49:53 +00:00
|
|
|
baseDistance: baseDistance)
|
2024-06-30 07:40:14 +00:00
|
|
|
|
|
|
|
# update global syncStart
|
|
|
|
com.syncStart = baseHeader.number
|
|
|
|
chain
|
2024-06-26 00:27:48 +00:00
|
|
|
|
2024-12-02 17:49:53 +00:00
|
|
|
proc importBlock*(
|
|
|
|
c: ForkedChainRef;
|
|
|
|
blk: Block;
|
|
|
|
autoRebase = false;
|
|
|
|
): Result[void, string] =
|
2024-06-26 00:27:48 +00:00
|
|
|
# Try to import block to canonical or side chain.
|
|
|
|
# return error if the block is invalid
|
|
|
|
if c.stagingTx.isNil:
|
2024-12-18 16:03:51 +00:00
|
|
|
c.stagingTx = c.db.ctx.txFrameBegin()
|
2024-06-26 00:27:48 +00:00
|
|
|
|
2024-10-16 01:34:12 +00:00
|
|
|
template header(): Header =
|
2024-06-26 00:27:48 +00:00
|
|
|
blk.header
|
|
|
|
|
|
|
|
if header.parentHash == c.cursorHash:
|
2024-12-02 17:49:53 +00:00
|
|
|
?c.validateBlock(c.cursorHeader, blk)
|
|
|
|
if autoRebase:
|
|
|
|
return c.autoUpdateBase()
|
|
|
|
return ok()
|
2024-06-26 00:27:48 +00:00
|
|
|
|
|
|
|
if header.parentHash == c.baseHash:
|
|
|
|
c.stagingTx.rollback()
|
2024-12-18 16:03:51 +00:00
|
|
|
c.stagingTx = c.db.ctx.txFrameBegin()
|
2024-06-26 00:27:48 +00:00
|
|
|
return c.validateBlock(c.baseHeader, blk)
|
|
|
|
|
|
|
|
if header.parentHash notin c.blocks:
|
|
|
|
# If it's parent is an invalid block
|
|
|
|
# there is no hope the descendant is valid
|
2024-11-25 20:10:03 +00:00
|
|
|
debug "Parent block not found",
|
|
|
|
blockHash = header.blockHash.short,
|
|
|
|
parentHash = header.parentHash.short
|
2024-06-26 00:27:48 +00:00
|
|
|
return err("Block is not part of valid chain")
|
|
|
|
|
|
|
|
# TODO: If engine API keep importing blocks
|
|
|
|
# but not finalized it, e.g. current chain length > StagedBlocksThreshold
|
|
|
|
# We need to persist some of the in-memory stuff
|
|
|
|
# to a "staging area" or disk-backed memory but it must not afect `base`.
|
|
|
|
# `base` is the point of no return, we only update it on finality.
|
|
|
|
|
|
|
|
c.replaySegment(header.parentHash)
|
2024-12-02 17:49:53 +00:00
|
|
|
?c.validateBlock(c.cursorHeader, blk)
|
|
|
|
if autoRebase:
|
|
|
|
return c.autoUpdateBase()
|
|
|
|
|
|
|
|
ok()
|
|
|
|
|
2024-06-26 00:27:48 +00:00
|
|
|
|
2024-06-27 05:54:52 +00:00
|
|
|
proc forkChoice*(c: ForkedChainRef,
|
2024-10-16 01:34:12 +00:00
|
|
|
headHash: Hash32,
|
|
|
|
finalizedHash: Hash32): Result[void, string] =
|
|
|
|
if headHash == c.cursorHash and finalizedHash == static(default(Hash32)):
|
2024-09-27 00:53:27 +00:00
|
|
|
# Do nothing if the new head already our current head
|
|
|
|
# and there is no request to new finality
|
|
|
|
return ok()
|
|
|
|
|
2024-12-05 06:01:57 +00:00
|
|
|
# Find the unique cursor arc where `headHash` is a member of.
|
|
|
|
let pvarc = ?c.findCursorArc(headHash)
|
2024-06-26 00:27:48 +00:00
|
|
|
|
2024-10-16 01:34:12 +00:00
|
|
|
if finalizedHash == static(default(Hash32)):
|
2024-09-27 00:53:27 +00:00
|
|
|
# skip newBase calculation and skip chain finalization
|
|
|
|
# if finalizedHash is zero
|
2024-12-05 06:01:57 +00:00
|
|
|
c.updateHeadIfNecessary(pvarc)
|
2024-09-27 00:53:27 +00:00
|
|
|
return ok()
|
|
|
|
|
2024-12-05 06:01:57 +00:00
|
|
|
# Finalized block must be parent or on the new canonical chain which is
|
|
|
|
# represented by `pvarc`.
|
|
|
|
let finalizedHeader = ?c.findHeader(finalizedHash, pvarc.pvHash)
|
2024-06-26 00:27:48 +00:00
|
|
|
|
2024-12-05 06:01:57 +00:00
|
|
|
let newBase = c.calculateNewBase(finalizedHeader.number, pvarc)
|
2024-06-26 00:27:48 +00:00
|
|
|
|
2024-12-05 06:01:57 +00:00
|
|
|
if newBase.pvHash == c.baseHash:
|
2024-06-26 00:27:48 +00:00
|
|
|
# The base is not updated but the cursor maybe need update
|
2024-12-05 06:01:57 +00:00
|
|
|
c.updateHeadIfNecessary(pvarc)
|
2024-06-26 00:27:48 +00:00
|
|
|
return ok()
|
|
|
|
|
|
|
|
# At this point cursorHeader.number > baseHeader.number
|
2024-12-05 06:01:57 +00:00
|
|
|
if newBase.pvHash == c.cursorHash:
|
2024-06-30 07:40:14 +00:00
|
|
|
# Paranoid check, guaranteed by `newBase.hash == c.cursorHash`
|
|
|
|
doAssert(not c.stagingTx.isNil)
|
|
|
|
|
|
|
|
# CL decide to move backward and then forward?
|
2024-12-05 06:01:57 +00:00
|
|
|
if c.cursorHeader.number < pvarc.pvNumber:
|
|
|
|
c.replaySegment(pvarc.pvHash, c.cursorHeader, c.cursorHash)
|
2024-06-26 00:27:48 +00:00
|
|
|
|
|
|
|
# Current segment is canonical chain
|
2024-12-05 06:01:57 +00:00
|
|
|
c.writeBaggage(newBase.pvHash)
|
|
|
|
c.setHead(pvarc)
|
2024-06-26 00:27:48 +00:00
|
|
|
|
|
|
|
c.stagingTx.commit()
|
|
|
|
c.stagingTx = nil
|
|
|
|
|
|
|
|
# Move base to newBase
|
2024-12-05 06:01:57 +00:00
|
|
|
c.updateBase(newBase)
|
2024-06-26 00:27:48 +00:00
|
|
|
|
|
|
|
# Save and record the block number before the last saved block state.
|
2024-12-05 06:01:57 +00:00
|
|
|
c.db.persistent(newBase.pvNumber).isOkOr:
|
2024-06-26 00:27:48 +00:00
|
|
|
return err("Failed to save state: " & $$error)
|
|
|
|
|
|
|
|
return ok()
|
|
|
|
|
|
|
|
# At this point finalizedHeader.number is <= headHeader.number
|
|
|
|
# and possibly switched to other chain beside the one with cursor
|
2024-12-05 06:01:57 +00:00
|
|
|
doAssert(finalizedHeader.number <= pvarc.pvNumber)
|
|
|
|
doAssert(newBase.pvNumber <= finalizedHeader.number)
|
2024-06-26 00:27:48 +00:00
|
|
|
|
|
|
|
# Write segment from base+1 to newBase into database
|
|
|
|
c.stagingTx.rollback()
|
2024-12-18 16:03:51 +00:00
|
|
|
c.stagingTx = c.db.ctx.txFrameBegin()
|
2024-06-30 07:40:14 +00:00
|
|
|
|
2024-12-05 06:01:57 +00:00
|
|
|
if newBase.pvNumber > c.baseHeader.number:
|
|
|
|
c.replaySegment(newBase.pvHash)
|
|
|
|
c.writeBaggage(newBase.pvHash)
|
2024-06-26 00:27:48 +00:00
|
|
|
c.stagingTx.commit()
|
|
|
|
c.stagingTx = nil
|
|
|
|
# Update base forward to newBase
|
2024-12-05 06:01:57 +00:00
|
|
|
c.updateBase(newBase)
|
|
|
|
c.db.persistent(newBase.pvNumber).isOkOr:
|
2024-06-26 00:27:48 +00:00
|
|
|
return err("Failed to save state: " & $$error)
|
|
|
|
|
2024-06-30 07:40:14 +00:00
|
|
|
if c.stagingTx.isNil:
|
|
|
|
# replaySegment or setHead below don't
|
|
|
|
# go straight to db
|
2024-12-18 16:03:51 +00:00
|
|
|
c.stagingTx = c.db.ctx.txFrameBegin()
|
2024-06-30 07:40:14 +00:00
|
|
|
|
2024-06-26 00:27:48 +00:00
|
|
|
# Move chain state forward to current head
|
2024-12-05 06:01:57 +00:00
|
|
|
if newBase.pvNumber < pvarc.pvNumber:
|
|
|
|
c.replaySegment(pvarc.pvHash)
|
2024-06-26 00:27:48 +00:00
|
|
|
|
2024-12-05 06:01:57 +00:00
|
|
|
c.setHead(pvarc)
|
2024-06-30 07:40:14 +00:00
|
|
|
|
2024-06-26 00:27:48 +00:00
|
|
|
# Move cursor to current head
|
2024-12-05 06:01:57 +00:00
|
|
|
c.trimCursorArc(pvarc)
|
|
|
|
if c.cursorHash != pvarc.pvHash:
|
|
|
|
c.cursorHeader = pvarc.pvHeader
|
|
|
|
c.cursorHash = pvarc.pvHash
|
2024-06-26 00:27:48 +00:00
|
|
|
|
|
|
|
ok()
|
2024-06-30 07:40:14 +00:00
|
|
|
|
2024-10-16 01:34:12 +00:00
|
|
|
func haveBlockAndState*(c: ForkedChainRef, blockHash: Hash32): bool =
|
2024-09-04 09:54:54 +00:00
|
|
|
if c.blocks.hasKey(blockHash):
|
2024-06-30 07:40:14 +00:00
|
|
|
return true
|
2024-09-04 09:54:54 +00:00
|
|
|
if c.baseHash == blockHash:
|
2024-06-30 07:40:14 +00:00
|
|
|
return true
|
|
|
|
false
|
|
|
|
|
2024-10-16 01:34:12 +00:00
|
|
|
proc haveBlockLocally*(c: ForkedChainRef, blockHash: Hash32): bool =
|
2024-09-04 09:54:54 +00:00
|
|
|
if c.blocks.hasKey(blockHash):
|
|
|
|
return true
|
|
|
|
if c.baseHash == blockHash:
|
|
|
|
return true
|
|
|
|
c.db.headerExists(blockHash)
|
|
|
|
|
2024-10-16 01:34:12 +00:00
|
|
|
func stateReady*(c: ForkedChainRef, header: Header): bool =
|
2024-06-30 07:40:14 +00:00
|
|
|
let blockHash = header.blockHash
|
|
|
|
blockHash == c.cursorHash
|
|
|
|
|
|
|
|
func com*(c: ForkedChainRef): CommonRef =
|
|
|
|
c.com
|
|
|
|
|
|
|
|
func db*(c: ForkedChainRef): CoreDbRef =
|
|
|
|
c.db
|
|
|
|
|
2024-10-16 01:34:12 +00:00
|
|
|
func latestHeader*(c: ForkedChainRef): Header =
|
2024-06-30 07:40:14 +00:00
|
|
|
c.cursorHeader
|
|
|
|
|
2024-09-04 09:54:54 +00:00
|
|
|
func latestNumber*(c: ForkedChainRef): BlockNumber =
|
|
|
|
c.cursorHeader.number
|
|
|
|
|
2024-10-16 01:34:12 +00:00
|
|
|
func latestHash*(c: ForkedChainRef): Hash32 =
|
2024-06-30 07:40:14 +00:00
|
|
|
c.cursorHash
|
|
|
|
|
2024-09-04 09:54:54 +00:00
|
|
|
func baseNumber*(c: ForkedChainRef): BlockNumber =
|
|
|
|
c.baseHeader.number
|
|
|
|
|
2024-10-27 22:20:04 +00:00
|
|
|
func baseHash*(c: ForkedChainRef): Hash32 =
|
|
|
|
c.baseHash
|
|
|
|
|
2024-10-16 01:34:12 +00:00
|
|
|
func txRecords*(c: ForkedChainRef, txHash: Hash32): (Hash32, uint64) =
|
|
|
|
c.txRecords.getOrDefault(txHash, (Hash32.default, 0'u64))
|
2024-10-04 07:59:38 +00:00
|
|
|
|
2024-11-02 09:30:45 +00:00
|
|
|
func isInMemory*(c: ForkedChainRef, blockHash: Hash32): bool =
|
|
|
|
c.blocks.hasKey(blockHash)
|
|
|
|
|
2024-10-16 01:34:12 +00:00
|
|
|
func memoryBlock*(c: ForkedChainRef, blockHash: Hash32): BlockDesc =
|
2024-10-04 07:59:38 +00:00
|
|
|
c.blocks.getOrDefault(blockHash)
|
|
|
|
|
2024-11-02 09:30:45 +00:00
|
|
|
func memoryTransaction*(c: ForkedChainRef, txHash: Hash32): Opt[Transaction] =
|
|
|
|
let (blockHash, index) = c.txRecords.getOrDefault(txHash, (Hash32.default, 0'u64))
|
|
|
|
c.blocks.withValue(blockHash, val) do:
|
|
|
|
return Opt.some(val.blk.transactions[index])
|
|
|
|
return Opt.none(Transaction)
|
|
|
|
|
2024-10-16 01:34:12 +00:00
|
|
|
proc latestBlock*(c: ForkedChainRef): Block =
|
2024-09-04 09:54:54 +00:00
|
|
|
c.blocks.withValue(c.cursorHash, val) do:
|
|
|
|
return val.blk
|
2024-12-02 08:25:58 +00:00
|
|
|
c.db.getEthBlock(c.cursorHash).expect("cursorBlock exists")
|
2024-09-04 09:54:54 +00:00
|
|
|
|
2024-10-16 01:34:12 +00:00
|
|
|
proc headerByNumber*(c: ForkedChainRef, number: BlockNumber): Result[Header, string] =
|
2024-06-30 07:40:14 +00:00
|
|
|
if number > c.cursorHeader.number:
|
|
|
|
return err("Requested block number not exists: " & $number)
|
|
|
|
|
|
|
|
if number == c.cursorHeader.number:
|
|
|
|
return ok(c.cursorHeader)
|
|
|
|
|
|
|
|
if number == c.baseHeader.number:
|
|
|
|
return ok(c.baseHeader)
|
|
|
|
|
|
|
|
if number < c.baseHeader.number:
|
2024-11-07 01:24:21 +00:00
|
|
|
return c.db.getBlockHeader(number)
|
2024-06-30 07:40:14 +00:00
|
|
|
|
2024-12-02 08:25:58 +00:00
|
|
|
shouldNotKeyError "headerByNumber":
|
2024-06-30 07:40:14 +00:00
|
|
|
var prevHash = c.cursorHeader.parentHash
|
|
|
|
while prevHash != c.baseHash:
|
|
|
|
let header = c.blocks[prevHash].blk.header
|
|
|
|
if header.number == number:
|
|
|
|
return ok(header)
|
|
|
|
prevHash = header.parentHash
|
|
|
|
|
|
|
|
doAssert(false, "headerByNumber: Unreachable code")
|
2024-09-04 09:54:54 +00:00
|
|
|
|
2024-10-16 01:34:12 +00:00
|
|
|
proc headerByHash*(c: ForkedChainRef, blockHash: Hash32): Result[Header, string] =
|
2024-09-04 09:54:54 +00:00
|
|
|
c.blocks.withValue(blockHash, val) do:
|
|
|
|
return ok(val.blk.header)
|
|
|
|
do:
|
|
|
|
if c.baseHash == blockHash:
|
|
|
|
return ok(c.baseHeader)
|
2024-11-07 01:24:21 +00:00
|
|
|
return c.db.getBlockHeader(blockHash)
|
2024-09-04 09:54:54 +00:00
|
|
|
|
2024-11-07 01:24:21 +00:00
|
|
|
proc blockByHash*(c: ForkedChainRef, blockHash: Hash32): Result[Block, string] =
|
2024-09-04 09:54:54 +00:00
|
|
|
# used by getPayloadBodiesByHash
|
|
|
|
# https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.4/src/engine/shanghai.md#specification-3
|
|
|
|
# 4. Client software MAY NOT respond to requests for finalized blocks by hash.
|
|
|
|
c.blocks.withValue(blockHash, val) do:
|
2024-11-07 01:24:21 +00:00
|
|
|
return ok(val.blk)
|
2024-09-04 09:54:54 +00:00
|
|
|
do:
|
2024-11-07 01:24:21 +00:00
|
|
|
return c.db.getEthBlock(blockHash)
|
2024-09-04 09:54:54 +00:00
|
|
|
|
2024-10-16 01:34:12 +00:00
|
|
|
proc blockByNumber*(c: ForkedChainRef, number: BlockNumber): Result[Block, string] =
|
2024-10-04 07:59:38 +00:00
|
|
|
if number > c.cursorHeader.number:
|
|
|
|
return err("Requested block number not exists: " & $number)
|
|
|
|
|
|
|
|
if number < c.baseHeader.number:
|
2024-11-07 01:24:21 +00:00
|
|
|
return c.db.getEthBlock(number)
|
2024-10-04 07:59:38 +00:00
|
|
|
|
2024-12-02 08:25:58 +00:00
|
|
|
shouldNotKeyError "blockByNumber":
|
2024-09-04 09:54:54 +00:00
|
|
|
var prevHash = c.cursorHash
|
|
|
|
while prevHash != c.baseHash:
|
|
|
|
c.blocks.withValue(prevHash, item):
|
|
|
|
if item.blk.header.number == number:
|
|
|
|
return ok(item.blk)
|
|
|
|
prevHash = item.blk.header.parentHash
|
|
|
|
return err("Block not found, number = " & $number)
|
|
|
|
|
2024-10-16 01:34:12 +00:00
|
|
|
func blockFromBaseTo*(c: ForkedChainRef, number: BlockNumber): seq[Block] =
|
2024-09-04 09:54:54 +00:00
|
|
|
# return block in reverse order
|
2024-12-02 08:25:58 +00:00
|
|
|
shouldNotKeyError "blockFromBaseTo":
|
2024-09-04 09:54:54 +00:00
|
|
|
var prevHash = c.cursorHash
|
|
|
|
while prevHash != c.baseHash:
|
|
|
|
c.blocks.withValue(prevHash, item):
|
|
|
|
if item.blk.header.number <= number:
|
|
|
|
result.add item.blk
|
|
|
|
prevHash = item.blk.header.parentHash
|
|
|
|
|
2024-10-16 01:34:12 +00:00
|
|
|
func isCanonical*(c: ForkedChainRef, blockHash: Hash32): bool =
|
2024-11-06 12:38:35 +00:00
|
|
|
if blockHash == c.baseHash:
|
|
|
|
return true
|
|
|
|
|
2024-12-02 08:25:58 +00:00
|
|
|
shouldNotKeyError "isCanonical":
|
2024-09-04 09:54:54 +00:00
|
|
|
var prevHash = c.cursorHash
|
|
|
|
while prevHash != c.baseHash:
|
|
|
|
c.blocks.withValue(prevHash, item):
|
|
|
|
if blockHash == prevHash:
|
|
|
|
return true
|
|
|
|
prevHash = item.blk.header.parentHash
|
2024-09-24 10:53:18 +00:00
|
|
|
|
|
|
|
proc isCanonicalAncestor*(c: ForkedChainRef,
|
|
|
|
blockNumber: BlockNumber,
|
2024-10-16 01:34:12 +00:00
|
|
|
blockHash: Hash32): bool =
|
2024-09-24 10:53:18 +00:00
|
|
|
if blockNumber >= c.cursorHeader.number:
|
|
|
|
return false
|
|
|
|
|
|
|
|
if blockHash == c.cursorHash:
|
|
|
|
return false
|
|
|
|
|
|
|
|
if c.baseHeader.number < c.cursorHeader.number:
|
|
|
|
# The current canonical chain in memory is headed by
|
|
|
|
# cursorHeader
|
2024-12-02 08:25:58 +00:00
|
|
|
shouldNotKeyError "isCanonicalAncestor":
|
2024-09-24 10:53:18 +00:00
|
|
|
var prevHash = c.cursorHeader.parentHash
|
|
|
|
while prevHash != c.baseHash:
|
|
|
|
var header = c.blocks[prevHash].blk.header
|
|
|
|
if prevHash == blockHash and blockNumber == header.number:
|
|
|
|
return true
|
|
|
|
prevHash = header.parentHash
|
|
|
|
|
|
|
|
# canonical chain in database should have a marker
|
|
|
|
# and the marker is block number
|
2024-11-07 01:24:21 +00:00
|
|
|
let canonHash = c.db.getBlockHash(blockNumber).valueOr:
|
|
|
|
return false
|
|
|
|
canonHash == blockHash
|