nimbus-eth2/tests/test_beacon_chain_file.nim
Eugene Kabanov 18409a69e1
Light forward sync mechanism (#6515)
* Initial commit.

* Add hybrid syncing.

* Compilation fixes.

* Cast custom event for our purposes.

* Instantiate AsyncEventQueue properly.

* Fix mistype.

* Further research on optimistic updates.

* Fixing circular deps.

* Add backfilling.

* Add block download feature.

* Add block store.

* Update backfill information before storing block.

* Use custom block verifier for backfilling sync.

* Skip signature verification in backfilling.

* Add one more generic reload to storeBackfillBlock().

* Add block verification debugging statements.

* Add more debugging

* Do not use database for backfilling, part 1.

* Fix for stash.

* Stash fixes part 2.

* Prepare for testing.

* Fix assertion.

* Fix post-restart syncing process.

* Update backfill loading log statement.
Use proper backfill slot callback for sync manager.

* Add handling of Duplicates.

* Fix store duration and block backfilled log statements.

* Add proper syncing state log statement.

* Add snappy compression to beaconchain_file.
Format syncing speed properly.

* Add blobs verification.

* Add `slot` number to file structure for easy navigation over stream of compressed objects.

* Change database filename.

* Fix structure size.

* Add more consistency properties.

* Fix checkRepair() issues.

* Preparation to state rebuild process.

* Add plain & compressed size.

* Debugging snappy encode process.

* Add one more debugging line.

* Dump blocks.

* One more filedump.

* Fix chunk corruption code.

* Fix detection issue.

* Some fixes in state rebuilding process.

* Add more clearance steps.

* Move updateHead() back to block_processor.

* Fix compilation issues.

* Make code more async friendly.

* Fix async issues.
Add more information when proposer verification failed.

* Fix 8192 slots issue.

* Fix Future double completion issue.

* Pass updateFlags to some of the core procedures.

* Fix tests.

* Improve initial sync handling mechanism.

* Fix checkStateTransition() performance improvements.

* Add some performance tuning and meters.

* Light client performance tuning.

* Remove debugging statement.

* Use single file descriptor for blockchain file.

* Attempt to fix LC.

* Fix timeleft calculation when untrusted sync backfilling started right after LC block received.

* Workaround for `chronicles` + `results` `error` issue.
Remove some compilation warnings.
Fix `CatchableError` leaks on Windows.

* Address review comments.

* Address review comments part 2.

* Address review comments part 1.

* Rebase and fix the issues.

* Address review comments part 3.

* Add tests and fix some issues in auto-repair mechanism.

* Add tests to all_tests.

* Rename binary test file to pass restrictions.

* Add `bin` extension to excluded list.
Recover binary test data.

* Rename fixture file to .bin again.

* Update AllTests.

* Address review comments part 4.

* Address review comments part 5 and fix tests.

* Address review comments part 6.

* Eliminate foldl and combine from blobs processing.
Add some tests to ensure that checkResponse() also checks for correct order.

* Fix forgotten place.

* Post rebase fixes.

* Add unique slots tests.

* Optimize updateHead() code.

* Add forgotten changes.

* Address review comments on state as argument.
2024-10-30 05:38:53 +00:00

343 lines
10 KiB
Nim

# beacon_chain
# Copyright (c) 2024 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [].}
{.used.}
import
results, unittest2, stew/io2, nimcrypto/hash,
../beacon_chain/spec/forks,
../beacon_chain/beacon_chain_file
template onDiskChunkSize(data: int): int =
sizeof(ChainFileFooter) + sizeof(ChainFileHeader) + data
const
FixtureFile =
currentSourcePath().dirname() & DirSep & "fixtures" & DirSep &
"bfdata-test.bin"
Block0Root =
"4bbd1c7468626d6520e27a534ce9f3ee305160860367431528404697c60ce222".toDigest
Block0BlobsCount = 1
Block0BlockChunkSize = 45127
Block0Blob0ChunkSize = 7043
Block1Root =
"133a92629a94cb9664eea57a649ee2d4a16fa48cac93aa5ccc0e9df727b5d9bd".toDigest
Block1BlobsCount = 3
Block1BlockChunkSize = 36321
Block1Blob0ChunkSize = 7090
Block1Blob1ChunkSize = 7016
Block1Blob2ChunkSize = 131886
Block2Root =
"f92b453230c5b1914c5b8f868bdd9692d38b5231b8e365f2b8049b1d22cca396".toDigest
Block2BlobsCount = 3
Block2BlockChunkSize = 36248
Block2Blob0ChunkSize = 7090
Block2Blob1ChunkSize = 7090
Block2Blob2ChunkSize = 7056
Block0FullSize = onDiskChunkSize(Block0BlockChunkSize) +
onDiskChunkSize(Block0Blob0ChunkSize)
Block1FullSize = onDiskChunkSize(Block1BlockChunkSize) +
onDiskChunkSize(Block1Blob0ChunkSize) +
onDiskChunkSize(Block1Blob1ChunkSize) +
onDiskChunkSize(Block1Blob2ChunkSize)
Block2FullSize = onDiskChunkSize(Block2BlockChunkSize) +
onDiskChunkSize(Block2Blob0ChunkSize) +
onDiskChunkSize(Block2Blob1ChunkSize) +
onDiskChunkSize(Block2Blob2ChunkSize)
type
AutoRepairObject = object
data: ChainFileData
size: int64
suite "Beacon chain file test suite":
var fixtureData: seq[byte]
proc doAutoCheckRepairTest(id, size: int): Result[AutoRepairObject, string] =
let path =
block:
let res = getTempPath().valueOr:
return err(ioErrorMsg(error))
res & DirSep & "tmp_" & $id & "_" & $size & ".tmp"
discard removeFile(path)
io2.writeFile(path, fixtureData.toOpenArray(0, size - 1)).isOkOr:
return err(ioErrorMsg(error))
let
flags = {ChainFileFlag.Repair}
fres = ? ChainFileHandle.init(path, flags)
closeFile(fres.handle).isOkOr:
return err(ioErrorMsg(error))
let filesize = getFileSize(path).valueOr:
return err(ioErrorMsg(error))
removeFile(path).isOkOr:
return err(ioErrorMsg(error))
ok(AutoRepairObject(data: fres.data, size: filesize))
template check01(adata: untyped): untyped =
check:
adata.data.head.isSome()
adata.data.tail.isSome()
let
head = adata.data.head.get()
tail = adata.data.tail.get()
headRoot = withBlck(head.blck): forkyBlck.root
tailRoot = withBlck(tail.blck): forkyBlck.root
check:
head.blob.isSome()
tail.blob.isSome()
headRoot == Block0Root
tailRoot == Block1Root
len(head.blob.get()) == Block0BlobsCount
len(tail.blob.get()) == Block1BlobsCount
adata.size == Block0FullSize + Block1FullSize
template check0(adata: untyped): untyped =
check:
adata.data.head.isSome()
adata.data.tail.isSome()
let
head = adata.data.head.get()
tail = adata.data.tail.get()
headRoot = withBlck(head.blck): forkyBlck.root
tailRoot = withBlck(tail.blck): forkyBlck.root
check:
head.blob.isSome()
tail.blob.isSome()
headRoot == Block0Root
tailRoot == Block0Root
len(head.blob.get()) == Block0BlobsCount
len(tail.blob.get()) == Block0BlobsCount
adata.size == Block0FullSize
test "Fixture file validation":
check isFile(FixtureFile) == true
fixtureData = readAllBytes(FixtureFile).valueOr:
default(seq[byte])
check len(fixtureData) > 0
let hres = ChainFileHandle.init(FixtureFile, {})
check hres.isOk()
let handle = hres.get()
check:
handle.head.isSome()
handle.tail.isSome()
let
head = handle.head.get()
tail = handle.tail.get()
headRoot = withBlck(head.blck): forkyBlck.root
tailRoot = withBlck(tail.blck): forkyBlck.root
check:
head.blob.isSome()
tail.blob.isSome()
headRoot == Block0Root
tailRoot == Block2Root
len(head.blob.get()) == Block0BlobsCount
len(tail.blob.get()) == Block2BlobsCount
let cres = close(handle)
check cres.isOk()
test "Auto check/repair test (missing footer)":
let
hiLimit = len(fixtureData) - 1
loLimit = len(fixtureData) - sizeof(ChainFileFooter)
var counter = 1
for size in countdown(hiLimit, loLimit):
let tres = doAutoCheckRepairTest(counter, size)
check tres.isOk()
let adata = tres.get()
check01(adata)
inc(counter)
test "Auto check/repair test (missing last chunk)":
var size = len(fixtureData)
block:
size -= onDiskChunkSize(Block2Blob2ChunkSize)
let tres = doAutoCheckRepairTest(1, size)
check tres.isOk()
let adata = tres.get()
check01(adata)
block:
size -= onDiskChunkSize(Block2Blob1ChunkSize)
let tres = doAutoCheckRepairTest(2, size)
check tres.isOk()
let adata = tres.get()
check01(adata)
block:
size -= onDiskChunkSize(Block2Blob0ChunkSize)
let tres = doAutoCheckRepairTest(3, size)
check tres.isOk()
let adata = tres.get()
check01(adata)
block:
size -= onDiskChunkSize(Block2BlockChunkSize)
let tres = doAutoCheckRepairTest(4, size)
check tres.isOk()
let adata = tres.get()
check01(adata)
block:
size -= onDiskChunkSize(Block1Blob2ChunkSize)
let tres = doAutoCheckRepairTest(5, size)
check tres.isOk()
let adata = tres.get()
check0(adata)
block:
size -= onDiskChunkSize(Block1Blob1ChunkSize)
let tres = doAutoCheckRepairTest(6, size)
check tres.isOk()
let adata = tres.get()
check0(adata)
block:
size -= onDiskChunkSize(Block1Blob0ChunkSize)
let tres = doAutoCheckRepairTest(7, size)
check tres.isOk()
let adata = tres.get()
check0(adata)
block:
size -= onDiskChunkSize(Block1BlockChunkSize)
let tres = doAutoCheckRepairTest(8, size)
check tres.isOk()
let adata = tres.get()
check0(adata)
block:
size -= onDiskChunkSize(Block0Blob0ChunkSize)
let tres = doAutoCheckRepairTest(9, size)
check tres.isOk()
let adata = tres.get()
check:
adata.data.head.isNone()
adata.data.tail.isNone()
adata.size == 0
block:
size -= onDiskChunkSize(Block0BlockChunkSize)
let tres = doAutoCheckRepairTest(10, size)
check tres.isOk()
let adata = tres.get()
check:
adata.data.head.isNone()
adata.data.tail.isNone()
adata.size == 0
test "Auto check/repair test (only header)":
var size = len(fixtureData)
block:
size -= onDiskChunkSize(Block2Blob2ChunkSize)
let tres = doAutoCheckRepairTest(1, size + sizeof(ChainFileHeader))
check tres.isOk()
let adata = tres.get()
check01(adata)
block:
size -= onDiskChunkSize(Block2Blob1ChunkSize)
let tres = doAutoCheckRepairTest(2, size + sizeof(ChainFileHeader))
check tres.isOk()
let adata = tres.get()
check01(adata)
block:
size -= onDiskChunkSize(Block2Blob0ChunkSize)
let tres = doAutoCheckRepairTest(3, size + sizeof(ChainFileHeader))
check tres.isOk()
let adata = tres.get()
check01(adata)
block:
size -= onDiskChunkSize(Block2BlockChunkSize)
let tres = doAutoCheckRepairTest(4, size + sizeof(ChainFileHeader))
check tres.isOk()
let adata = tres.get()
check01(adata)
block:
size -= onDiskChunkSize(Block1Blob2ChunkSize)
let tres = doAutoCheckRepairTest(5, size + sizeof(ChainFileHeader))
check tres.isOk()
let adata = tres.get()
check0(adata)
block:
size -= onDiskChunkSize(Block1Blob1ChunkSize)
let tres = doAutoCheckRepairTest(6, size + sizeof(ChainFileHeader))
check tres.isOk()
let adata = tres.get()
check0(adata)
block:
size -= onDiskChunkSize(Block1Blob0ChunkSize)
let tres = doAutoCheckRepairTest(7, size + sizeof(ChainFileHeader))
check tres.isOk()
let adata = tres.get()
check0(adata)
block:
size -= onDiskChunkSize(Block1BlockChunkSize)
let tres = doAutoCheckRepairTest(8, size + sizeof(ChainFileHeader))
check tres.isOk()
let adata = tres.get()
check0(adata)
block:
size -= onDiskChunkSize(Block0Blob0ChunkSize)
let tres = doAutoCheckRepairTest(9, size + sizeof(ChainFileHeader))
check tres.isOk()
let adata = tres.get()
check:
adata.data.head.isNone()
adata.data.tail.isNone()
adata.size == 0
block:
size -= onDiskChunkSize(Block0BlockChunkSize)
let tres = doAutoCheckRepairTest(10, size + sizeof(ChainFileHeader))
check tres.isOk()
let adata = tres.get()
check:
adata.data.head.isNone()
adata.data.tail.isNone()
adata.size == 0
test "Auto check/repair test (missing data)":
let
limit1 = Block0FullSize + Block1FullSize + Block2FullSize
limit2 = Block0FullSize + Block1FullSize
limit3 = Block0FullSize
var
size = len(fixtureData)
counter = 1
while size > 0:
size = max(0, size - 4096)
let tres = doAutoCheckRepairTest(counter, size)
check tres.isOk()
let adata = tres.get()
if (size < limit1) and (size >= limit2):
check01(adata)
elif (size < limit2) and (size >= limit3):
check0(adata)
else:
check:
adata.data.head.isNone()
adata.data.tail.isNone()
adata.size == 0
inc(counter)