mirror of
https://github.com/status-im/nimbus-eth2.git
synced 2025-01-09 13:56:23 +00:00
18409a69e1
* Initial commit. * Add hybrid syncing. * Compilation fixes. * Cast custom event for our purposes. * Instantiate AsyncEventQueue properly. * Fix mistype. * Further research on optimistic updates. * Fixing circular deps. * Add backfilling. * Add block download feature. * Add block store. * Update backfill information before storing block. * Use custom block verifier for backfilling sync. * Skip signature verification in backfilling. * Add one more generic reload to storeBackfillBlock(). * Add block verification debugging statements. * Add more debugging * Do not use database for backfilling, part 1. * Fix for stash. * Stash fixes part 2. * Prepare for testing. * Fix assertion. * Fix post-restart syncing process. * Update backfill loading log statement. Use proper backfill slot callback for sync manager. * Add handling of Duplicates. * Fix store duration and block backfilled log statements. * Add proper syncing state log statement. * Add snappy compression to beaconchain_file. Format syncing speed properly. * Add blobs verification. * Add `slot` number to file structure for easy navigation over stream of compressed objects. * Change database filename. * Fix structure size. * Add more consistency properties. * Fix checkRepair() issues. * Preparation to state rebuild process. * Add plain & compressed size. * Debugging snappy encode process. * Add one more debugging line. * Dump blocks. * One more filedump. * Fix chunk corruption code. * Fix detection issue. * Some fixes in state rebuilding process. * Add more clearance steps. * Move updateHead() back to block_processor. * Fix compilation issues. * Make code more async friendly. * Fix async issues. Add more information when proposer verification failed. * Fix 8192 slots issue. * Fix Future double completion issue. * Pass updateFlags to some of the core procedures. * Fix tests. * Improve initial sync handling mechanism. * Fix checkStateTransition() performance improvements. * Add some performance tuning and meters. * Light client performance tuning. * Remove debugging statement. * Use single file descriptor for blockchain file. * Attempt to fix LC. * Fix timeleft calculation when untrusted sync backfilling started right after LC block received. * Workaround for `chronicles` + `results` `error` issue. Remove some compilation warnings. Fix `CatchableError` leaks on Windows. * Address review comments. * Address review comments part 2. * Address review comments part 1. * Rebase and fix the issues. * Address review comments part 3. * Add tests and fix some issues in auto-repair mechanism. * Add tests to all_tests. * Rename binary test file to pass restrictions. * Add `bin` extension to excluded list. Recover binary test data. * Rename fixture file to .bin again. * Update AllTests. * Address review comments part 4. * Address review comments part 5 and fix tests. * Address review comments part 6. * Eliminate foldl and combine from blobs processing. Add some tests to ensure that checkResponse() also checks for correct order. * Fix forgotten place. * Post rebase fixes. * Add unique slots tests. * Optimize updateHead() code. * Add forgotten changes. * Address review comments on state as argument.
248 lines
7.9 KiB
Nim
248 lines
7.9 KiB
Nim
# beacon_chain
|
|
# Copyright (c) 2018-2024 Status Research & Development GmbH
|
|
# Licensed and distributed under either of
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
{.push raises: [].}
|
|
|
|
import std/sequtils, stew/io2, chronicles, chronos, metrics,
|
|
../spec/forks,
|
|
../[beacon_chain_file, beacon_clock],
|
|
../sszdump
|
|
|
|
from ./block_pools_types import VerifierError, BlockData
|
|
from ../spec/state_transition_block import validate_blobs
|
|
from std/os import `/`
|
|
|
|
export beacon_chain_file
|
|
|
|
const
|
|
ChainFileName = "nbc.bfdata"
|
|
|
|
type
|
|
ChainListRef* = ref object
|
|
path*: string
|
|
handle*: Opt[ChainFileHandle]
|
|
|
|
template chainFilePath*(directory: string): string =
|
|
directory / ChainFileName
|
|
|
|
template filePath*(clist: ChainListRef): string =
|
|
chainFilePath(clist.path)
|
|
|
|
proc init*(T: type ChainListRef, directory: string): ChainListRef =
|
|
let
|
|
filename = directory.chainFilePath()
|
|
handle =
|
|
if not(isFile(filename)):
|
|
Opt.none(ChainFileHandle)
|
|
else:
|
|
let
|
|
flags = {ChainFileFlag.Repair}
|
|
res = ChainFileHandle.init(filename, flags)
|
|
if res.isErr():
|
|
fatal "Unexpected failure while loading backfill data",
|
|
filename = filename, reason = res.error
|
|
quit 1
|
|
Opt.some(res.get())
|
|
ChainListRef(path: directory, handle: handle)
|
|
|
|
proc init*(T: type ChainListRef, directory: string,
|
|
slot: Slot): Result[ChainListRef, string] =
|
|
let
|
|
flags = {ChainFileFlag.Repair, ChainFileFlag.OpenAlways}
|
|
filename = directory.chainFilePath()
|
|
handle = ? ChainFileHandle.init(filename, flags)
|
|
offset {.used.} = ? seekForSlot(handle, slot)
|
|
ok(ChainListRef(path: directory, handle: Opt.some(handle)))
|
|
|
|
proc seekForSlot*(clist: ChainListRef, slot: Slot): Result[void, string] =
|
|
if clist.handle.isNone():
|
|
let
|
|
flags = {ChainFileFlag.Repair, ChainFileFlag.OpenAlways}
|
|
filename = clist.path.chainFilePath()
|
|
handle = ? ChainFileHandle.init(filename, flags)
|
|
clist.handle = Opt.some(handle)
|
|
|
|
let offset {.used.} = ? seekForSlot(clist.handle.get(), slot)
|
|
ok()
|
|
|
|
proc close*(clist: ChainListRef): Result[void, string] =
|
|
if clist.handle.isNone():
|
|
return ok()
|
|
? clist.handle.get().close()
|
|
ok()
|
|
|
|
proc clear*(clist: ChainListRef): Result[void, string] =
|
|
? clist.close()
|
|
? clearFile(clist.path.chainFilePath())
|
|
clist.handle = Opt.none(ChainFileHandle)
|
|
ok()
|
|
|
|
template slot*(data: BlockData): Slot =
|
|
data.blck.slot
|
|
|
|
template parent_root*(data: ForkedSignedBeaconBlock): Eth2Digest =
|
|
withBlck(data): forkyBlck.message.parent_root
|
|
|
|
template parent_root*(data: BlockData): Eth2Digest =
|
|
data.blck.parent_root()
|
|
|
|
template root*(data: BlockData): Eth2Digest =
|
|
withBlck(data.blck): forkyBlck.root
|
|
|
|
template shortLog*(x: BlockData): string =
|
|
let count = if x.blob.isSome(): $len(x.blob.get()) else: "0"
|
|
$(x.slot()) & "@" & shortLog(x.parent_root()) & "#" & count
|
|
|
|
template shortLog*(x: Opt[BlockData]): string =
|
|
if x.isNone():
|
|
"[none]"
|
|
else:
|
|
shortLog(x.get())
|
|
|
|
func tail*(clist: ChainListRef): Opt[BlockData] =
|
|
if clist.handle.isSome():
|
|
clist.handle.get().data.tail
|
|
else:
|
|
Opt.none(BlockData)
|
|
|
|
func head*(clist: ChainListRef): Opt[BlockData] =
|
|
if clist.handle.isSome():
|
|
clist.handle.get().data.head
|
|
else:
|
|
Opt.none(BlockData)
|
|
|
|
proc setHead*(clist: ChainListRef, bdata: BlockData) =
|
|
doAssert(clist.handle.isSome())
|
|
var handle = clist.handle.get()
|
|
handle.setHead(bdata)
|
|
clist.handle = Opt.some(handle)
|
|
|
|
proc setTail*(clist: ChainListRef, bdata: BlockData) =
|
|
doAssert(clist.handle.isSome())
|
|
var handle = clist.handle.get()
|
|
handle.setTail(bdata)
|
|
clist.handle = Opt.some(handle)
|
|
|
|
proc store*(clist: ChainListRef, signedBlock: ForkedSignedBeaconBlock,
|
|
blobs: Opt[BlobSidecars]): Result[void, string] =
|
|
if clist.handle.isNone():
|
|
let
|
|
filename = clist.path.chainFilePath()
|
|
flags = {ChainFileFlag.Repair, ChainFileFlag.OpenAlways}
|
|
handle = ? ChainFileHandle.init(filename, flags)
|
|
clist.handle = Opt.some(handle)
|
|
store(handle, signedBlock, blobs)
|
|
else:
|
|
store(clist.handle.get(), signedBlock, blobs)
|
|
|
|
proc checkBlobs(signedBlock: ForkedSignedBeaconBlock,
|
|
blobsOpt: Opt[BlobSidecars]): Result[void, VerifierError] =
|
|
withBlck(signedBlock):
|
|
when consensusFork >= ConsensusFork.Deneb:
|
|
if blobsOpt.isSome():
|
|
let blobs = blobsOpt.get()
|
|
|
|
template blob_kzg_commitments(): untyped =
|
|
forkyBlck.message.body.blob_kzg_commitments.asSeq
|
|
|
|
if len(blobs) > 0:
|
|
if len(blobs) != len(blob_kzg_commitments):
|
|
return err(VerifierError.Invalid)
|
|
let res =
|
|
validate_blobs(blob_kzg_commitments,
|
|
blobs.mapIt(KzgBlob(bytes: it.blob)),
|
|
blobs.mapIt(it.kzg_proof))
|
|
if res.isErr():
|
|
debug "Blob validation failed",
|
|
block_root = shortLog(forkyBlck.root),
|
|
blobs = shortLog(blobs),
|
|
blck = shortLog(forkyBlck.message),
|
|
kzg_commits = mapIt(blob_kzg_commitments, shortLog(it)),
|
|
signature = shortLog(forkyBlck.signature),
|
|
msg = res.error()
|
|
return err(VerifierError.Invalid)
|
|
ok()
|
|
|
|
proc addBackfillBlockData*(
|
|
clist: ChainListRef, signedBlock: ForkedSignedBeaconBlock,
|
|
blobsOpt: Opt[BlobSidecars]): Result[void, VerifierError] =
|
|
doAssert(not(isNil(clist)))
|
|
|
|
logScope:
|
|
backfill_tail = shortLog(clist.tail)
|
|
signed_block_slot = signedBlock.slot
|
|
signed_block_root = signedBlock.root
|
|
signed_block_parent_root = signedBlock.parent_root
|
|
|
|
let verifyBlockTick = Moment.now()
|
|
|
|
if clist.tail.isNone():
|
|
? checkBlobs(signedBlock, blobsOpt)
|
|
|
|
let storeBlockTick = Moment.now()
|
|
|
|
store(clist, signedBlock, blobsOpt).isOkOr:
|
|
fatal "Unexpected failure while trying to store data",
|
|
filename = chainFilePath(clist.path), reason = error
|
|
quit 1
|
|
|
|
let bdata = BlockData(blck: signedBlock, blob: blobsOpt)
|
|
clist.setTail(bdata)
|
|
if clist.head.isNone():
|
|
clist.setHead(bdata)
|
|
|
|
debug "Initial block backfilled",
|
|
verify_block_duration = shortLog(storeBlockTick - verifyBlockTick),
|
|
store_block_duration = shortLog(Moment.now() - storeBlockTick)
|
|
|
|
return ok()
|
|
|
|
let tail = clist.tail.get()
|
|
|
|
if signedBlock.slot == tail.slot:
|
|
if signedBlock.root == tail.root:
|
|
debug "Duplicate block"
|
|
return err(VerifierError.Duplicate)
|
|
else:
|
|
debug "Block from unviable fork"
|
|
return err(VerifierError.UnviableFork)
|
|
elif signedBlock.slot > tail.slot:
|
|
debug "Block from unviable fork"
|
|
return err(VerifierError.UnviableFork)
|
|
|
|
if tail.parent_root != signedBlock.root:
|
|
debug "Block does not match expected backfill root"
|
|
return err(VerifierError.MissingParent)
|
|
|
|
? checkBlobs(signedBlock, blobsOpt)
|
|
|
|
let storeBlockTick = Moment.now()
|
|
|
|
store(clist, signedBlock, blobsOpt).isOkOr:
|
|
fatal "Unexpected failure while trying to store data",
|
|
filename = chainFilePath(clist.path), reason = error
|
|
quit 1
|
|
|
|
debug "Block backfilled",
|
|
verify_block_duration = shortLog(storeBlockTick - verifyBlockTick),
|
|
store_block_duration = shortLog(Moment.now() - storeBlockTick)
|
|
|
|
clist.setTail(BlockData(blck: signedBlock, blob: blobsOpt))
|
|
|
|
ok()
|
|
|
|
proc untrustedBackfillVerifier*(
|
|
clist: ChainListRef,
|
|
signedBlock: ForkedSignedBeaconBlock,
|
|
blobs: Opt[BlobSidecars],
|
|
maybeFinalized: bool
|
|
): Future[Result[void, VerifierError]] {.
|
|
async: (raises: [CancelledError], raw: true).} =
|
|
let retFuture = newFuture[Result[void, VerifierError]]()
|
|
retFuture.complete(clist.addBackfillBlockData(signedBlock, blobs))
|
|
retFuture
|