nimbus-eth2/tests/test_sync_manager.nim

1487 lines
54 KiB
Nim
Raw Normal View History

# beacon_chain
# Copyright (c) 2020-2024 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [].}
{.used.}
import std/[strutils, sequtils]
import unittest2
2024-11-22 19:51:18 +00:00
import chronos, chronos/unittest2/asynctests
import ../beacon_chain/networking/peer_scores
import ../beacon_chain/gossip_processing/block_processor,
../beacon_chain/sync/sync_manager,
2024-11-22 19:51:18 +00:00
../beacon_chain/sync/sync_queue,
../beacon_chain/spec/forks
2020-04-20 14:59:18 +00:00
type
SomeTPeer = ref object
2024-11-22 19:51:18 +00:00
id: string
score: int
2024-11-22 19:51:18 +00:00
func init(t: typedesc[SomeTPeer], id: string, score = 1000): SomeTPeer =
SomeTPeer(id: id, score: score)
2024-03-03 01:04:45 +00:00
func `$`(peer: SomeTPeer): string =
2024-11-22 19:51:18 +00:00
"peer#" & peer.id
template shortLog(peer: SomeTPeer): string =
$peer
2024-03-03 01:04:45 +00:00
func updateScore(peer: SomeTPeer, score: int) =
peer[].score += score
2024-03-03 01:04:45 +00:00
func updateStats(peer: SomeTPeer, index: SyncResponseKind, score: uint64) =
2022-09-19 09:37:42 +00:00
discard
2024-03-03 01:04:45 +00:00
func getStats(peer: SomeTPeer, index: SyncResponseKind): uint64 =
2022-09-19 09:37:42 +00:00
0
func getStaticSlotCb(slot: Slot): GetSlotCallback =
proc getSlot(): Slot =
slot
getSlot
type
BlockEntry = object
blck*: ForkedSignedBeaconBlock
resfut*: Future[Result[void, VerifierError]]
2024-11-22 19:51:18 +00:00
func createChain(slots: Slice[Slot]): seq[ref ForkedSignedBeaconBlock] =
var res = newSeqOfCap[ref ForkedSignedBeaconBlock](len(slots))
for slot in slots:
let item = newClone ForkedSignedBeaconBlock(kind: ConsensusFork.Deneb)
item[].denebData.message.slot = slot
res.add(item)
res
proc createChain(srange: SyncRange): seq[ref ForkedSignedBeaconBlock] =
createChain(srange.slot .. (srange.slot + srange.count - 1))
func createBlobs(
blocks: var seq[ref ForkedSignedBeaconBlock],
slots: openArray[Slot]
): seq[ref BlobSidecar] =
var res = newSeq[ref BlobSidecar](len(slots))
for blck in blocks:
withBlck(blck[]):
when consensusFork >= ConsensusFork.Deneb:
template kzgs: untyped = forkyBlck.message.body.blob_kzg_commitments
for i, slot in slots:
if slot == forkyBlck.message.slot:
doAssert kzgs.add default(KzgCommitment)
if kzgs.len > 0:
forkyBlck.root = hash_tree_root(forkyBlck.message)
var
kzg_proofs: KzgProofs
blobs: Blobs
for _ in kzgs:
doAssert kzg_proofs.add default(KzgProof)
doAssert blobs.add default(Blob)
let sidecars = forkyBlck.create_blob_sidecars(kzg_proofs, blobs)
var sidecarIdx = 0
for i, slot in slots:
if slot == forkyBlck.message.slot:
2024-11-22 19:51:18 +00:00
res[i] = newClone sidecars[sidecarIdx]
inc sidecarIdx
res
2024-11-22 19:51:18 +00:00
func collector(queue: AsyncQueue[BlockEntry]): BlockVerifier =
proc verify(
signedBlock: ForkedSignedBeaconBlock,
blobs: Opt[BlobSidecars],
maybeFinalized: bool
): Future[Result[void, VerifierError]] {.
async: (raises: [CancelledError], raw: true).} =
let fut =
Future[Result[void, VerifierError]].Raising([CancelledError]).init()
try:
queue.addLastNoWait(BlockEntry(blck: signedBlock, resfut: fut))
except CatchableError as exc:
raiseAssert exc.msg
fut
verify
proc setupVerifier(
skind: SyncQueueKind,
sc: openArray[tuple[slots: Slice[Slot], code: Opt[VerifierError]]]
): tuple[collector: BlockVerifier, verifier: Future[void]] =
doAssert(len(sc) > 0, "Empty scenarios are not allowed")
var
scenario = @sc
aq = newAsyncQueue[BlockEntry]()
template done(b: BlockEntry) =
b.resfut.complete(Result[void, VerifierError].ok())
template fail(b: BlockEntry, e: untyped) =
b.resfut.complete(Result[void, VerifierError].err(e))
2024-11-22 19:51:18 +00:00
template verifyBlock(i, e, s, v: untyped): untyped =
let item = await queue.popFirst()
if item.blck.slot == s:
if e.code.isSome():
item.fail(e.code.get())
else:
item.done()
else:
raiseAssert "Verifier got block from incorrect slot, " &
"expected " & $s & ", got " &
$item.blck.slot & ", position [" &
$i & ", " & $s & "]"
inc(v)
proc verifier(queue: AsyncQueue[BlockEntry]) {.async: (raises: []).} =
var slotsVerified = 0
try:
for index, entry in scenario.pairs():
case skind
of SyncQueueKind.Forward:
2024-11-22 19:51:18 +00:00
for slot in countup(entry.slots.a, entry.slots.b):
verifyBlock(index, entry, slot, slotsVerified)
of SyncQueueKind.Backward:
2024-11-22 19:51:18 +00:00
for slot in countdown(entry.slots.b, entry.slots.a):
verifyBlock(index, entry, slot, slotsVerified)
except CancelledError:
raiseAssert "Scenario is not completed, " &
"number of slots passed " & $slotsVerified
2024-11-22 19:51:18 +00:00
(collector(aq), verifier(aq))
2024-11-22 19:51:18 +00:00
suite "SyncManager test suite":
for kind in [SyncQueueKind.Forward, SyncQueueKind.Backward]:
asyncTest "[SyncQueue# & " & $kind & "] Smoke [single peer] test":
# Four ranges was distributed to single peer only.
let
scenario = [
(Slot(0) .. Slot(127), Opt.none(VerifierError))
]
verifier = setupVerifier(kind, scenario)
sq =
case kind
of SyncQueueKind.Forward:
SyncQueue.init(SomeTPeer, kind, Slot(0), Slot(127),
32'u64, # 32 slots per request
3, # 3 concurrent requests
2, # 2 failures allowed
getStaticSlotCb(Slot(0)),
verifier.collector)
of SyncQueueKind.Backward:
SyncQueue.init(SomeTPeer, kind, Slot(127), Slot(0),
32'u64, # 32 slots per request
3, # 3 concurrent requests
2, # 2 failures allowed
getStaticSlotCb(Slot(127)),
verifier.collector)
peer = SomeTPeer.init("1")
r1 = sq.pop(Slot(127), peer)
r2 = sq.pop(Slot(127), peer)
r3 = sq.pop(Slot(127), peer)
d1 = createChain(r1.data)
d2 = createChain(r2.data)
d3 = createChain(r3.data)
let
f1 = sq.push(r1, d1, Opt.none(seq[BlobSidecars]))
f2 = sq.push(r2, d2, Opt.none(seq[BlobSidecars]))
f3 = sq.push(r3, d3, Opt.none(seq[BlobSidecars]))
check:
2024-11-22 19:51:18 +00:00
f1.finished == false
f2.finished == false
f3.finished == false
await noCancel f1
check:
2024-11-22 19:51:18 +00:00
f1.finished == true
f2.finished == false
f3.finished == false
2024-11-22 19:51:18 +00:00
await noCancel f2
2024-11-22 19:51:18 +00:00
check:
f1.finished == true
f2.finished == true
f3.finished == false
2024-11-22 19:51:18 +00:00
await noCancel f3
2024-11-22 19:51:18 +00:00
check:
f1.finished == true
f2.finished == true
f3.finished == true
2024-11-22 19:51:18 +00:00
let
r4 = sq.pop(Slot(127), peer)
d4 = createChain(r4.data)
f4 = sq.push(r4, d4, Opt.none(seq[BlobSidecars]))
2024-11-22 19:51:18 +00:00
await noCancel f4
2024-11-22 19:51:18 +00:00
check:
f1.finished == true
f2.finished == true
f3.finished == true
f4.finished == true
await noCancel wait(verifier.verifier, 2.seconds)
asyncTest "[SyncQueue# & " & $kind & "] Smoke [3 peers] test":
# Three ranges was distributed between 3 peers, every range is going to
# be pushed by all peers.
let
scenario = [
(Slot(0) .. Slot(127), Opt.none(VerifierError))
]
verifier = setupVerifier(kind, scenario)
sq =
case kind
of SyncQueueKind.Forward:
SyncQueue.init(SomeTPeer, kind, Slot(0), Slot(127),
32'u64, # 32 slots per request
3, # 3 concurrent requests
2, # 2 failures allowed
getStaticSlotCb(Slot(0)),
verifier.collector)
of SyncQueueKind.Backward:
SyncQueue.init(SomeTPeer, kind, Slot(127), Slot(0),
32'u64, # 32 slots per request
3, # 3 concurrent requests
2, # 2 failures allowed
getStaticSlotCb(Slot(127)),
verifier.collector)
peer1 = SomeTPeer.init("1")
peer2 = SomeTPeer.init("2")
peer3 = SomeTPeer.init("3")
r11 = sq.pop(Slot(127), peer1)
r12 = sq.pop(Slot(127), peer2)
r13 = sq.pop(Slot(127), peer3)
d11 = createChain(r11.data)
d12 = createChain(r12.data)
d13 = createChain(r13.data)
r21 = sq.pop(Slot(127), peer1)
r22 = sq.pop(Slot(127), peer2)
r23 = sq.pop(Slot(127), peer3)
d21 = createChain(r21.data)
d22 = createChain(r22.data)
d23 = createChain(r23.data)
r31 = sq.pop(Slot(127), peer1)
r32 = sq.pop(Slot(127), peer2)
r33 = sq.pop(Slot(127), peer3)
d31 = createChain(r31.data)
d32 = createChain(r32.data)
d33 = createChain(r33.data)
let
f11 = sq.push(r11, d11, Opt.none(seq[BlobSidecars]))
f12 = sq.push(r12, d12, Opt.none(seq[BlobSidecars]))
f13 = sq.push(r13, d13, Opt.none(seq[BlobSidecars]))
f22 = sq.push(r22, d22, Opt.none(seq[BlobSidecars]))
f21 = sq.push(r21, d21, Opt.none(seq[BlobSidecars]))
f23 = sq.push(r23, d23, Opt.none(seq[BlobSidecars]))
f33 = sq.push(r33, d33, Opt.none(seq[BlobSidecars]))
f32 = sq.push(r32, d32, Opt.none(seq[BlobSidecars]))
f31 = sq.push(r31, d31, Opt.none(seq[BlobSidecars]))
await noCancel f11
check:
f11.finished == true
# We do not check f12 and f13 here because their state is undefined
# at this time.
f21.finished == false
f22.finished == false
f23.finished == false
f31.finished == false
f32.finished == false
f33.finished == false
await noCancel f22
check:
f11.finished == true
f12.finished == true
f13.finished == true
f22.finished == true
# We do not check f21 and f23 here because their state is undefined
# at this time.
f31.finished == false
f32.finished == false
f33.finished == false
await noCancel f33
check:
f11.finished == true
f12.finished == true
f13.finished == true
f21.finished == true
f22.finished == true
f23.finished == true
f33.finished == true
# We do not check f31 and f32 here because their state is undefined
# at this time.
2024-11-22 19:51:18 +00:00
let
r41 = sq.pop(Slot(127), peer1)
d41 = createChain(r41.data)
2024-11-22 19:51:18 +00:00
await noCancel sq.push(r41, d41, Opt.none(seq[BlobSidecars]))
check:
2024-11-22 19:51:18 +00:00
f11.finished == true
f12.finished == true
f13.finished == true
f21.finished == true
f22.finished == true
f23.finished == true
f31.finished == true
f32.finished == true
f33.finished == true
await noCancel wait(verifier.verifier, 2.seconds)
asyncTest "[SyncQueue# & " & $kind & "] Failure request push test":
let
scenario =
case kind
of SyncQueueKind.Forward:
[
(Slot(0) .. Slot(31), Opt.none(VerifierError)),
(Slot(32) .. Slot(63), Opt.none(VerifierError))
]
of SyncQueueKind.Backward:
[
(Slot(32) .. Slot(63), Opt.none(VerifierError)),
(Slot(0) .. Slot(31), Opt.none(VerifierError))
]
verifier = setupVerifier(kind, scenario)
sq =
case kind
of SyncQueueKind.Forward:
SyncQueue.init(SomeTPeer, kind, Slot(0), Slot(63),
32'u64, # 32 slots per request
3, # 3 concurrent requests
2, # 2 failures allowed
getStaticSlotCb(Slot(0)),
verifier.collector)
of SyncQueueKind.Backward:
SyncQueue.init(SomeTPeer, kind, Slot(63), Slot(0),
32'u64, # 32 slots per request
3, # 3 concurrent requests
2, # 2 failures allowed
getStaticSlotCb(Slot(63)),
verifier.collector)
peer1 = SomeTPeer.init("1")
peer2 = SomeTPeer.init("2")
peer3 = SomeTPeer.init("3")
block:
let
r11 = sq.pop(Slot(63), peer1)
r12 = sq.pop(Slot(63), peer2)
r13 = sq.pop(Slot(63), peer3)
sq.push(r11)
sq.push(r12)
sq.push(r13)
# Next couple of calls should be detected as non relevant
sq.push(r11)
sq.push(r12)
sq.push(r13)
block:
let
r11 = sq.pop(Slot(63), peer1)
r12 = sq.pop(Slot(63), peer2)
r13 = sq.pop(Slot(63), peer3)
d12 = createChain(r12.data)
sq.push(r11)
await noCancel sq.push(r12, d12, Opt.none(seq[BlobSidecars]))
sq.push(r13)
# Next couple of calls should be detected as non relevant
sq.push(r11)
sq.push(r12)
sq.push(r13)
block:
let
r11 = sq.pop(Slot(63), peer1)
r12 = sq.pop(Slot(63), peer2)
r13 = sq.pop(Slot(63), peer3)
d13 = createChain(r13.data)
sq.push(r11)
sq.push(r12)
await noCancel sq.push(r13, d13, Opt.none(seq[BlobSidecars]))
# Next couple of calls should be detected as non relevant
sq.push(r11)
sq.push(r12)
sq.push(r13)
await noCancel wait(verifier.verifier, 2.seconds)
asyncTest "[SyncQueue# & " & $kind & "] Invalid block [3 peers] test":
# This scenario performs test for 2 cases.
# 1. When first error encountered it just drops the the response and
# increases `failuresCounter`.
# 2. When another error encountered it will reset whole queue to the
# last known good/safe point (rewind process).
let
scenario =
case kind
of SyncQueueKind.Forward:
[
(Slot(0) .. Slot(31), Opt.none(VerifierError)),
(Slot(32) .. Slot(40), Opt.none(VerifierError)),
(Slot(41) .. Slot(41), Opt.some(VerifierError.Invalid)),
(Slot(32) .. Slot(40), Opt.some(VerifierError.Duplicate)),
(Slot(41) .. Slot(41), Opt.some(VerifierError.Invalid)),
(Slot(0) .. Slot(31), Opt.some(VerifierError.Duplicate)),
(Slot(32) .. Slot(40), Opt.some(VerifierError.Duplicate)),
(Slot(41) .. Slot(41), Opt.none(VerifierError)),
(Slot(42) .. Slot(63), Opt.none(VerifierError))
]
of SyncQueueKind.Backward:
[
(Slot(32) .. Slot(63), Opt.none(VerifierError)),
(Slot(22) .. Slot(31), Opt.none(VerifierError)),
(Slot(21) .. Slot(21), Opt.some(VerifierError.Invalid)),
(Slot(22) .. Slot(31), Opt.some(VerifierError.Duplicate)),
(Slot(21) .. Slot(21), Opt.some(VerifierError.Invalid)),
(Slot(32) .. Slot(63), Opt.some(VerifierError.Duplicate)),
(Slot(22) .. Slot(31), Opt.some(VerifierError.Duplicate)),
(Slot(21) .. Slot(21), Opt.none(VerifierError)),
(Slot(0) .. Slot(20), Opt.none(VerifierError)),
]
verifier = setupVerifier(kind, scenario)
sq =
case kind
of SyncQueueKind.Forward:
SyncQueue.init(SomeTPeer, kind, Slot(0), Slot(63),
32'u64, # 32 slots per request
3, # 3 concurrent requests
2, # 2 failures allowed
getStaticSlotCb(Slot(0)),
verifier.collector)
of SyncQueueKind.Backward:
SyncQueue.init(SomeTPeer, kind, Slot(63), Slot(0),
32'u64, # 32 slots per request
3, # 3 concurrent requests
2, # 2 failures allowed
getStaticSlotCb(Slot(63)),
verifier.collector)
peer1 = SomeTPeer.init("1")
peer2 = SomeTPeer.init("2")
peer3 = SomeTPeer.init("3")
r11 = sq.pop(Slot(63), peer1)
r12 = sq.pop(Slot(63), peer2)
r13 = sq.pop(Slot(63), peer3)
d11 = createChain(r11.data)
d12 = createChain(r12.data)
d13 = createChain(r13.data)
r21 = sq.pop(Slot(63), peer1)
r22 = sq.pop(Slot(63), peer2)
r23 = sq.pop(Slot(63), peer3)
d21 = createChain(r21.data)
d22 = createChain(r22.data)
d23 = createChain(r23.data)
let
f11 = sq.push(r11, d11, Opt.none(seq[BlobSidecars]))
f12 = sq.push(r12, d12, Opt.none(seq[BlobSidecars]))
f13 = sq.push(r13, d13, Opt.none(seq[BlobSidecars]))
await noCancel f11
check f11.finished == true
let
f21 = sq.push(r21, d21, Opt.none(seq[BlobSidecars]))
f22 = sq.push(r22, d22, Opt.none(seq[BlobSidecars]))
f23 = sq.push(r23, d23, Opt.none(seq[BlobSidecars]))
await noCancel f21
check:
2024-11-22 19:51:18 +00:00
f21.finished == true
f11.finished == true
f12.finished == true
f13.finished == true
2024-11-22 19:51:18 +00:00
await noCancel f22
check:
2024-11-22 19:51:18 +00:00
f21.finished == true
f22.finished == true
f11.finished == true
f12.finished == true
f13.finished == true
await noCancel f23
check:
2024-11-22 19:51:18 +00:00
f21.finished == true
f22.finished == true
f23.finished == true
f11.finished == true
f12.finished == true
f13.finished == true
let
r31 = sq.pop(Slot(63), peer1)
r32 = sq.pop(Slot(63), peer2)
r33 = sq.pop(Slot(63), peer3)
d31 = createChain(r31.data)
d32 = createChain(r32.data)
d33 = createChain(r33.data)
r41 = sq.pop(Slot(63), peer1)
r42 = sq.pop(Slot(63), peer2)
r43 = sq.pop(Slot(63), peer3)
d41 = createChain(r41.data)
d42 = createChain(r42.data)
d43 = createChain(r43.data)
let
f31 = sq.push(r31, d31, Opt.none(seq[BlobSidecars]))
f32 = sq.push(r32, d32, Opt.none(seq[BlobSidecars]))
f33 = sq.push(r33, d33, Opt.none(seq[BlobSidecars]))
f42 = sq.push(r42, d42, Opt.none(seq[BlobSidecars]))
f41 = sq.push(r41, d41, Opt.none(seq[BlobSidecars]))
f43 = sq.push(r43, d43, Opt.none(seq[BlobSidecars]))
await noCancel f31
check:
2024-11-22 19:51:18 +00:00
f31.finished == true
2024-11-22 19:51:18 +00:00
await noCancel f42
check:
f31.finished == true
f32.finished == true
f33.finished == true
f42.finished == true
2024-11-22 19:51:18 +00:00
await noCancel f43
check:
f31.finished == true
f32.finished == true
f33.finished == true
f41.finished == true
f42.finished == true
f43.finished == true
await noCancel wait(verifier.verifier, 2.seconds)
asyncTest "[SyncQueue# & " & $kind & "] Unviable block [3 peers] test":
# This scenario performs test for 2 cases.
# 1. When first error encountered it just drops the the response and
# increases `failuresCounter`.
# 2. When another error encountered it will reset whole queue to the
# last known good/safe point (rewind process).
# Unviable fork blocks processed differently from invalid blocks, all
# this blocks should be added to quarantine, so blocks range is not get
# failed immediately.
let
scenario =
case kind
of SyncQueueKind.Forward:
[
(Slot(0) .. Slot(31), Opt.none(VerifierError)),
(Slot(32) .. Slot(40), Opt.none(VerifierError)),
(Slot(41) .. Slot(63), Opt.some(VerifierError.UnviableFork)),
(Slot(32) .. Slot(40), Opt.some(VerifierError.Duplicate)),
(Slot(41) .. Slot(63), Opt.some(VerifierError.UnviableFork)),
(Slot(0) .. Slot(31), Opt.some(VerifierError.Duplicate)),
(Slot(32) .. Slot(40), Opt.some(VerifierError.Duplicate)),
(Slot(41) .. Slot(63), Opt.none(VerifierError))
]
of SyncQueueKind.Backward:
[
(Slot(32) .. Slot(63), Opt.none(VerifierError)),
(Slot(22) .. Slot(31), Opt.none(VerifierError)),
(Slot(0) .. Slot(21), Opt.some(VerifierError.UnviableFork)),
(Slot(22) .. Slot(31), Opt.some(VerifierError.Duplicate)),
(Slot(0) .. Slot(21), Opt.some(VerifierError.UnviableFork)),
(Slot(32) .. Slot(63), Opt.some(VerifierError.Duplicate)),
(Slot(22) .. Slot(31), Opt.some(VerifierError.Duplicate)),
(Slot(0) .. Slot(21), Opt.none(VerifierError))
]
verifier = setupVerifier(kind, scenario)
sq =
case kind
of SyncQueueKind.Forward:
SyncQueue.init(SomeTPeer, kind, Slot(0), Slot(63),
32'u64, # 32 slots per request
3, # 3 concurrent requests
2, # 2 failures allowed
getStaticSlotCb(Slot(0)),
verifier.collector)
of SyncQueueKind.Backward:
SyncQueue.init(SomeTPeer, kind, Slot(63), Slot(0),
32'u64, # 32 slots per request
3, # 3 concurrent requests
2, # 2 failures allowed
getStaticSlotCb(Slot(63)),
verifier.collector)
peer1 = SomeTPeer.init("1")
peer2 = SomeTPeer.init("2")
peer3 = SomeTPeer.init("3")
r11 = sq.pop(Slot(63), peer1)
r12 = sq.pop(Slot(63), peer2)
r13 = sq.pop(Slot(63), peer3)
d11 = createChain(r11.data)
d12 = createChain(r12.data)
d13 = createChain(r13.data)
r21 = sq.pop(Slot(63), peer1)
r22 = sq.pop(Slot(63), peer2)
r23 = sq.pop(Slot(63), peer3)
d21 = createChain(r21.data)
d22 = createChain(r22.data)
d23 = createChain(r23.data)
let
f11 = sq.push(r11, d11, Opt.none(seq[BlobSidecars]))
f12 = sq.push(r12, d12, Opt.none(seq[BlobSidecars]))
f13 = sq.push(r13, d13, Opt.none(seq[BlobSidecars]))
await noCancel f11
check f11.finished == true
let
f21 = sq.push(r21, d21, Opt.none(seq[BlobSidecars]))
f22 = sq.push(r22, d22, Opt.none(seq[BlobSidecars]))
f23 = sq.push(r23, d23, Opt.none(seq[BlobSidecars]))
await noCancel f21
check:
f21.finished == true
f11.finished == true
f12.finished == true
f13.finished == true
2024-11-22 19:51:18 +00:00
await noCancel f22
check:
f21.finished == true
f22.finished == true
f11.finished == true
f12.finished == true
f13.finished == true
2024-11-22 19:51:18 +00:00
await noCancel f23
check:
f21.finished == true
f22.finished == true
f23.finished == true
f11.finished == true
f12.finished == true
f13.finished == true
let
r31 = sq.pop(Slot(63), peer1)
r32 = sq.pop(Slot(63), peer2)
r33 = sq.pop(Slot(63), peer3)
let
d31 = createChain(r31.data)
d32 = createChain(r32.data)
d33 = createChain(r33.data)
r41 = sq.pop(Slot(63), peer1)
r42 = sq.pop(Slot(63), peer2)
r43 = sq.pop(Slot(63), peer3)
d41 = createChain(r41.data)
d42 = createChain(r42.data)
d43 = createChain(r43.data)
let
f31 = sq.push(r31, d31, Opt.none(seq[BlobSidecars]))
f32 = sq.push(r32, d32, Opt.none(seq[BlobSidecars]))
f33 = sq.push(r33, d33, Opt.none(seq[BlobSidecars]))
f42 = sq.push(r42, d42, Opt.none(seq[BlobSidecars]))
f41 = sq.push(r41, d41, Opt.none(seq[BlobSidecars]))
f43 = sq.push(r43, d43, Opt.none(seq[BlobSidecars]))
await noCancel f31
check:
f31.finished == true
2024-11-22 19:51:18 +00:00
await noCancel f42
check:
f31.finished == true
f32.finished == true
f33.finished == true
f42.finished == true
2024-11-22 19:51:18 +00:00
await noCancel f43
check:
2024-11-22 19:51:18 +00:00
f31.finished == true
f32.finished == true
f33.finished == true
f41.finished == true
f42.finished == true
f43.finished == true
await noCancel wait(verifier.verifier, 2.seconds)
asyncTest "[SyncQueue# & " & $kind & "] Combination of missing parent " &
"and good blocks [3 peers] test":
let
scenario =
case kind
of SyncQueueKind.Forward:
[
(Slot(0) .. Slot(31), Opt.none(VerifierError)),
(Slot(32) .. Slot(40), Opt.none(VerifierError)),
(Slot(41) .. Slot(41), Opt.some(VerifierError.MissingParent)),
(Slot(32) .. Slot(40), Opt.some(VerifierError.Duplicate)),
(Slot(41) .. Slot(41), Opt.some(VerifierError.MissingParent)),
(Slot(32) .. Slot(40), Opt.some(VerifierError.Duplicate)),
(Slot(41) .. Slot(41), Opt.some(VerifierError.MissingParent)),
(Slot(32) .. Slot(40), Opt.some(VerifierError.Duplicate)),
(Slot(41) .. Slot(41), Opt.some(VerifierError.MissingParent)),
(Slot(32) .. Slot(40), Opt.some(VerifierError.Duplicate)),
(Slot(41) .. Slot(41), Opt.some(VerifierError.MissingParent)),
(Slot(32) .. Slot(40), Opt.some(VerifierError.Duplicate)),
(Slot(41) .. Slot(41), Opt.some(VerifierError.MissingParent)),
(Slot(32) .. Slot(40), Opt.some(VerifierError.Duplicate)),
(Slot(41) .. Slot(63), Opt.none(VerifierError))
]
of SyncQueueKind.Backward:
[
(Slot(32) .. Slot(63), Opt.none(VerifierError)),
(Slot(22) .. Slot(31), Opt.none(VerifierError)),
(Slot(21) .. Slot(21), Opt.some(VerifierError.MissingParent)),
(Slot(22) .. Slot(31), Opt.some(VerifierError.Duplicate)),
(Slot(21) .. Slot(21), Opt.some(VerifierError.MissingParent)),
(Slot(22) .. Slot(31), Opt.some(VerifierError.Duplicate)),
(Slot(21) .. Slot(21), Opt.some(VerifierError.MissingParent)),
(Slot(22) .. Slot(31), Opt.some(VerifierError.Duplicate)),
(Slot(21) .. Slot(21), Opt.some(VerifierError.MissingParent)),
(Slot(22) .. Slot(31), Opt.some(VerifierError.Duplicate)),
(Slot(21) .. Slot(21), Opt.some(VerifierError.MissingParent)),
(Slot(22) .. Slot(31), Opt.some(VerifierError.Duplicate)),
(Slot(21) .. Slot(21), Opt.some(VerifierError.MissingParent)),
(Slot(22) .. Slot(31), Opt.some(VerifierError.Duplicate)),
(Slot(0) .. Slot(21), Opt.none(VerifierError)),
]
verifier = setupVerifier(kind, scenario)
sq =
case kind
of SyncQueueKind.Forward:
SyncQueue.init(SomeTPeer, kind, Slot(0), Slot(63),
32'u64, # 32 slots per request
3, # 3 concurrent requests
2, # 2 failures allowed
getStaticSlotCb(Slot(0)),
verifier.collector)
of SyncQueueKind.Backward:
SyncQueue.init(SomeTPeer, kind, Slot(63), Slot(0),
32'u64, # 32 slots per request
3, # 3 concurrent requests
2, # 2 failures allowed
getStaticSlotCb(Slot(63)),
verifier.collector)
peer1 = SomeTPeer.init("1")
peer2 = SomeTPeer.init("2")
peer3 = SomeTPeer.init("3")
r11 = sq.pop(Slot(63), peer1)
r12 = sq.pop(Slot(63), peer2)
r13 = sq.pop(Slot(63), peer3)
d11 = createChain(r11.data)
d12 = createChain(r12.data)
d13 = createChain(r13.data)
r21 = sq.pop(Slot(63), peer1)
r22 = sq.pop(Slot(63), peer2)
r23 = sq.pop(Slot(63), peer3)
d21 = createChain(r21.data)
d22 = createChain(r22.data)
d23 = createChain(r23.data)
let
f11 = sq.push(r11, d11, Opt.none(seq[BlobSidecars]))
f12 = sq.push(r12, d12, Opt.none(seq[BlobSidecars]))
f13 = sq.push(r13, d13, Opt.none(seq[BlobSidecars]))
await noCancel f11
check f11.finished == true
let
f21 = sq.push(r21, d21, Opt.none(seq[BlobSidecars]))
f22 = sq.push(r22, d22, Opt.none(seq[BlobSidecars]))
f23 = sq.push(r23, d23, Opt.none(seq[BlobSidecars]))
await noCancel f21
check:
f21.finished == true
f11.finished == true
2024-11-22 19:51:18 +00:00
f12.finished == true
f13.finished == true
2024-11-22 19:51:18 +00:00
await noCancel f22
check:
f21.finished == true
f22.finished == true
f11.finished == true
f12.finished == true
f13.finished == true
2024-11-22 19:51:18 +00:00
await noCancel f23
check:
f21.finished == true
f22.finished == true
f23.finished == true
f11.finished == true
f12.finished == true
f13.finished == true
let
r31 = sq.pop(Slot(63), peer1)
r32 = sq.pop(Slot(63), peer2)
r33 = sq.pop(Slot(63), peer3)
d31 = createChain(r31.data)
d32 = createChain(r32.data)
d33 = createChain(r33.data)
f31 = sq.push(r31, d31, Opt.none(seq[BlobSidecars]))
f32 = sq.push(r32, d32, Opt.none(seq[BlobSidecars]))
f33 = sq.push(r33, d33, Opt.none(seq[BlobSidecars]))
await noCancel f31
await noCancel f32
await noCancel f33
let
r41 = sq.pop(Slot(63), peer1)
r42 = sq.pop(Slot(63), peer2)
r43 = sq.pop(Slot(63), peer3)
d41 = createChain(r41.data)
d42 = createChain(r42.data)
d43 = createChain(r43.data)
f42 = sq.push(r32, d42, Opt.none(seq[BlobSidecars]))
f41 = sq.push(r31, d41, Opt.none(seq[BlobSidecars]))
f43 = sq.push(r33, d43, Opt.none(seq[BlobSidecars]))
await noCancel allFutures(f42, f41, f43)
await noCancel wait(verifier.verifier, 2.seconds)
asyncTest "[SyncQueue#Forward] Missing parent and exponential rewind " &
"[3 peers] test":
let
scenario =
[
(Slot(0) .. Slot(31), Opt.none(VerifierError)),
# .. 3 ranges are empty
(Slot(128) .. Slot(128), Opt.some(VerifierError.MissingParent)),
(Slot(128) .. Slot(128), Opt.some(VerifierError.MissingParent)),
# 1st rewind should be to (failed_slot - 1 * epoch) = 96
(Slot(128) .. Slot(128), Opt.some(VerifierError.MissingParent)),
(Slot(128) .. Slot(128), Opt.some(VerifierError.MissingParent)),
# 2nd rewind should be to (failed_slot - 2 * epoch) = 64
(Slot(128) .. Slot(128), Opt.some(VerifierError.MissingParent)),
(Slot(128) .. Slot(128), Opt.some(VerifierError.MissingParent)),
# 3rd rewind should be to (failed_slot - 4 * epoch) = 0
(Slot(0) .. Slot(31), Opt.some(VerifierError.Duplicate)),
(Slot(32) .. Slot(63), Opt.none(VerifierError)),
(Slot(64) .. Slot(95), Opt.none(VerifierError)),
(Slot(96) .. Slot(127), Opt.none(VerifierError)),
(Slot(128) .. Slot(159), Opt.none(VerifierError)),
]
kind = SyncQueueKind.Forward
verifier = setupVerifier(kind, scenario)
sq = SyncQueue.init(SomeTPeer, kind, Slot(0), Slot(159),
32'u64, # 32 slots per request
3, # 3 concurrent requests
2, # 2 failures allowed
getStaticSlotCb(Slot(0)),
verifier.collector)
peer1 = SomeTPeer.init("1")
peer2 = SomeTPeer.init("2")
peer3 = SomeTPeer.init("3")
r11 = sq.pop(Slot(159), peer1)
r12 = sq.pop(Slot(159), peer2)
r13 = sq.pop(Slot(159), peer3)
d11 = createChain(r11.data)
d12 = createChain(r12.data)
d13 = createChain(r13.data)
f11 = sq.push(r11, d11, Opt.none(seq[BlobSidecars]))
f12 = sq.push(r12, d12, Opt.none(seq[BlobSidecars]))
f13 = sq.push(r13, d13, Opt.none(seq[BlobSidecars]))
await noCancel f11
await noCancel f12
await noCancel f13
for i in 0 ..< 3:
let
re1 = sq.pop(Slot(159), peer1)
re2 = sq.pop(Slot(159), peer2)
re3 = sq.pop(Slot(159), peer3)
de1 = default(seq[ref ForkedSignedBeaconBlock])
de2 = default(seq[ref ForkedSignedBeaconBlock])
de3 = default(seq[ref ForkedSignedBeaconBlock])
fe1 = sq.push(re1, de1, Opt.none(seq[BlobSidecars]))
fe2 = sq.push(re2, de2, Opt.none(seq[BlobSidecars]))
fe3 = sq.push(re3, de3, Opt.none(seq[BlobSidecars]))
await noCancel fe1
await noCancel fe2
await noCancel fe3
let
2024-11-22 19:51:18 +00:00
r21 = sq.pop(Slot(159), peer1)
r22 = sq.pop(Slot(159), peer2)
r23 = sq.pop(Slot(159), peer3)
d21 = createChain(r21.data)
d22 = createChain(r22.data)
d23 = createChain(r23.data)
f21 = sq.push(r21, d21, Opt.none(seq[BlobSidecars]))
f22 = sq.push(r22, d22, Opt.none(seq[BlobSidecars]))
f23 = sq.push(r23, d23, Opt.none(seq[BlobSidecars]))
await noCancel f21
await noCancel f22
await noCancel f23
for i in 0 ..< 1:
let
re1 = sq.pop(Slot(159), peer1)
re2 = sq.pop(Slot(159), peer2)
re3 = sq.pop(Slot(159), peer3)
de1 = default(seq[ref ForkedSignedBeaconBlock])
de2 = default(seq[ref ForkedSignedBeaconBlock])
de3 = default(seq[ref ForkedSignedBeaconBlock])
fe1 = sq.push(re1, de1, Opt.none(seq[BlobSidecars]))
fe2 = sq.push(re2, de2, Opt.none(seq[BlobSidecars]))
fe3 = sq.push(re3, de3, Opt.none(seq[BlobSidecars]))
await noCancel fe1
await noCancel fe2
await noCancel fe3
2024-11-22 19:51:18 +00:00
let
r31 = sq.pop(Slot(159), peer1)
r32 = sq.pop(Slot(159), peer2)
r33 = sq.pop(Slot(159), peer3)
d31 = createChain(r31.data)
d32 = createChain(r32.data)
d33 = createChain(r33.data)
f31 = sq.push(r31, d31, Opt.none(seq[BlobSidecars]))
f32 = sq.push(r32, d32, Opt.none(seq[BlobSidecars]))
f33 = sq.push(r33, d33, Opt.none(seq[BlobSidecars]))
await noCancel f31
await noCancel f32
await noCancel f33
for i in 0 ..< 2:
let
re1 = sq.pop(Slot(159), peer1)
re2 = sq.pop(Slot(159), peer2)
re3 = sq.pop(Slot(159), peer3)
de1 = default(seq[ref ForkedSignedBeaconBlock])
de2 = default(seq[ref ForkedSignedBeaconBlock])
de3 = default(seq[ref ForkedSignedBeaconBlock])
fe1 = sq.push(re1, de1, Opt.none(seq[BlobSidecars]))
fe2 = sq.push(re2, de2, Opt.none(seq[BlobSidecars]))
fe3 = sq.push(re3, de3, Opt.none(seq[BlobSidecars]))
await noCancel fe1
await noCancel fe2
await noCancel fe3
2024-11-22 19:51:18 +00:00
let
r41 = sq.pop(Slot(159), peer1)
r42 = sq.pop(Slot(159), peer2)
r43 = sq.pop(Slot(159), peer3)
d41 = createChain(r41.data)
d42 = createChain(r42.data)
d43 = createChain(r43.data)
f41 = sq.push(r41, d41, Opt.none(seq[BlobSidecars]))
f42 = sq.push(r42, d42, Opt.none(seq[BlobSidecars]))
f43 = sq.push(r43, d43, Opt.none(seq[BlobSidecars]))
await noCancel f41
await noCancel f42
await noCancel f43
for i in 0 ..< 5:
let
rf1 = sq.pop(Slot(159), peer1)
rf2 = sq.pop(Slot(159), peer2)
rf3 = sq.pop(Slot(159), peer3)
df1 = createChain(rf1.data)
df2 = createChain(rf2.data)
df3 = createChain(rf3.data)
ff1 = sq.push(rf1, df1, Opt.none(seq[BlobSidecars]))
ff2 = sq.push(rf2, df2, Opt.none(seq[BlobSidecars]))
ff3 = sq.push(rf3, df3, Opt.none(seq[BlobSidecars]))
await noCancel ff1
await noCancel ff2
await noCancel ff3
await noCancel wait(verifier.verifier, 2.seconds)
asyncTest "[SyncQueue#Backward] Missing parent and exponential rewind " &
"[3 peers] test":
let
scenario =
[
(Slot(128) .. Slot(159), Opt.none(VerifierError)),
# .. 3 ranges are empty
(Slot(31) .. Slot(31), Opt.some(VerifierError.MissingParent)),
(Slot(31) .. Slot(31), Opt.some(VerifierError.MissingParent)),
(Slot(128) .. Slot(159), Opt.some(VerifierError.Duplicate)),
(Slot(96) .. Slot(127), Opt.none(VerifierError)),
# .. 2 ranges are empty
(Slot(31) .. Slot(31), Opt.some(VerifierError.MissingParent)),
(Slot(31) .. Slot(31), Opt.some(VerifierError.MissingParent)),
(Slot(128) .. Slot(159), Opt.some(VerifierError.Duplicate)),
(Slot(96) .. Slot(127), Opt.some(VerifierError.Duplicate)),
(Slot(64) .. Slot(95), Opt.none(VerifierError)),
# .. 1 range is empty
(Slot(31) .. Slot(31), Opt.some(VerifierError.MissingParent)),
(Slot(31) .. Slot(31), Opt.some(VerifierError.MissingParent)),
(Slot(128) .. Slot(159), Opt.some(VerifierError.Duplicate)),
(Slot(96) .. Slot(127), Opt.some(VerifierError.Duplicate)),
(Slot(64) .. Slot(95), Opt.some(VerifierError.Duplicate)),
(Slot(32) .. Slot(63), Opt.none(VerifierError)),
(Slot(0) .. Slot(31), Opt.none(VerifierError))
]
kind = SyncQueueKind.Backward
verifier = setupVerifier(kind, scenario)
sq = SyncQueue.init(SomeTPeer, kind, Slot(159), Slot(0),
32'u64, # 32 slots per request
3, # 3 concurrent requests
2, # 2 failures allowed
getStaticSlotCb(Slot(159)),
verifier.collector)
peer1 = SomeTPeer.init("1")
peer2 = SomeTPeer.init("2")
peer3 = SomeTPeer.init("3")
r11 = sq.pop(Slot(159), peer1)
r12 = sq.pop(Slot(159), peer2)
r13 = sq.pop(Slot(159), peer3)
d11 = createChain(r11.data)
d12 = createChain(r12.data)
d13 = createChain(r13.data)
f11 = sq.push(r11, d11, Opt.none(seq[BlobSidecars]))
f12 = sq.push(r12, d12, Opt.none(seq[BlobSidecars]))
f13 = sq.push(r13, d13, Opt.none(seq[BlobSidecars]))
await noCancel f11
await noCancel f12
await noCancel f13
for i in 0 ..< 3:
let
re1 = sq.pop(Slot(159), peer1)
re2 = sq.pop(Slot(159), peer2)
re3 = sq.pop(Slot(159), peer3)
de1 = default(seq[ref ForkedSignedBeaconBlock])
de2 = default(seq[ref ForkedSignedBeaconBlock])
de3 = default(seq[ref ForkedSignedBeaconBlock])
fe1 = sq.push(re1, de1, Opt.none(seq[BlobSidecars]))
fe2 = sq.push(re2, de2, Opt.none(seq[BlobSidecars]))
fe3 = sq.push(re3, de3, Opt.none(seq[BlobSidecars]))
await noCancel fe1
await noCancel fe2
await noCancel fe3
let
2024-11-22 19:51:18 +00:00
r21 = sq.pop(Slot(159), peer1)
r22 = sq.pop(Slot(159), peer2)
r23 = sq.pop(Slot(159), peer3)
d21 = createChain(r21.data)
d22 = createChain(r22.data)
d23 = createChain(r23.data)
f21 = sq.push(r21, d21, Opt.none(seq[BlobSidecars]))
f22 = sq.push(r22, d22, Opt.none(seq[BlobSidecars]))
f23 = sq.push(r23, d23, Opt.none(seq[BlobSidecars]))
await noCancel f21
await noCancel f22
await noCancel f23
for i in 0 ..< 2:
let
r31 = sq.pop(Slot(159), peer1)
r32 = sq.pop(Slot(159), peer2)
r33 = sq.pop(Slot(159), peer3)
d31 = createChain(r31.data)
d32 = createChain(r32.data)
d33 = createChain(r33.data)
f31 = sq.push(r31, d31, Opt.none(seq[BlobSidecars]))
f32 = sq.push(r32, d32, Opt.none(seq[BlobSidecars]))
f33 = sq.push(r33, d33, Opt.none(seq[BlobSidecars]))
await noCancel f31
await noCancel f32
await noCancel f33
for i in 0 ..< 2:
let
re1 = sq.pop(Slot(159), peer1)
re2 = sq.pop(Slot(159), peer2)
re3 = sq.pop(Slot(159), peer3)
de1 = default(seq[ref ForkedSignedBeaconBlock])
de2 = default(seq[ref ForkedSignedBeaconBlock])
de3 = default(seq[ref ForkedSignedBeaconBlock])
fe1 = sq.push(re1, de1, Opt.none(seq[BlobSidecars]))
fe2 = sq.push(re2, de2, Opt.none(seq[BlobSidecars]))
fe3 = sq.push(re3, de3, Opt.none(seq[BlobSidecars]))
await noCancel fe1
await noCancel fe2
await noCancel fe3
2024-11-22 19:51:18 +00:00
let
r41 = sq.pop(Slot(159), peer1)
r42 = sq.pop(Slot(159), peer2)
r43 = sq.pop(Slot(159), peer3)
d41 = createChain(r41.data)
d42 = createChain(r42.data)
d43 = createChain(r43.data)
f41 = sq.push(r41, d41, Opt.none(seq[BlobSidecars]))
f42 = sq.push(r42, d42, Opt.none(seq[BlobSidecars]))
f43 = sq.push(r43, d43, Opt.none(seq[BlobSidecars]))
await noCancel f41
await noCancel f42
await noCancel f43
for i in 0 ..< 3:
let
r51 = sq.pop(Slot(159), peer1)
r52 = sq.pop(Slot(159), peer2)
r53 = sq.pop(Slot(159), peer3)
d51 = createChain(r51.data)
d52 = createChain(r52.data)
d53 = createChain(r53.data)
f51 = sq.push(r51, d51, Opt.none(seq[BlobSidecars]))
f52 = sq.push(r52, d52, Opt.none(seq[BlobSidecars]))
f53 = sq.push(r53, d53, Opt.none(seq[BlobSidecars]))
await noCancel f51
await noCancel f52
await noCancel f53
for i in 0 ..< 1:
let
re1 = sq.pop(Slot(159), peer1)
re2 = sq.pop(Slot(159), peer2)
re3 = sq.pop(Slot(159), peer3)
de1 = default(seq[ref ForkedSignedBeaconBlock])
de2 = default(seq[ref ForkedSignedBeaconBlock])
de3 = default(seq[ref ForkedSignedBeaconBlock])
fe1 = sq.push(re1, de1, Opt.none(seq[BlobSidecars]))
fe2 = sq.push(re2, de2, Opt.none(seq[BlobSidecars]))
fe3 = sq.push(re3, de3, Opt.none(seq[BlobSidecars]))
await noCancel fe1
await noCancel fe2
await noCancel fe3
2024-11-22 19:51:18 +00:00
let
r61 = sq.pop(Slot(159), peer1)
r62 = sq.pop(Slot(159), peer2)
r63 = sq.pop(Slot(159), peer3)
d61 = createChain(r61.data)
d62 = createChain(r62.data)
d63 = createChain(r63.data)
f61 = sq.push(r61, d61, Opt.none(seq[BlobSidecars]))
f62 = sq.push(r62, d62, Opt.none(seq[BlobSidecars]))
f63 = sq.push(r63, d63, Opt.none(seq[BlobSidecars]))
await noCancel f61
await noCancel f62
await noCancel f63
for i in 0 ..< 5:
let
r71 = sq.pop(Slot(159), peer1)
r72 = sq.pop(Slot(159), peer2)
r73 = sq.pop(Slot(159), peer3)
d71 = createChain(r71.data)
d72 = createChain(r72.data)
d73 = createChain(r73.data)
f71 = sq.push(r71, d71, Opt.none(seq[BlobSidecars]))
f72 = sq.push(r72, d72, Opt.none(seq[BlobSidecars]))
f73 = sq.push(r73, d73, Opt.none(seq[BlobSidecars]))
await noCancel f71
await noCancel f72
await noCancel f73
await noCancel wait(verifier.verifier, 2.seconds)
2024-11-22 19:51:18 +00:00
test "[SyncQueue#Forward] getRewindPoint() test":
let aq = newAsyncQueue[BlockEntry]()
block:
let
queue = SyncQueue.init(SomeTPeer, SyncQueueKind.Forward,
Slot(0), Slot(0xFFFF_FFFF_FFFF_FFFFF'u64),
1'u64, 3, 2, getStaticSlotCb(Slot(0)),
collector(aq))
finalizedSlot = start_slot(Epoch(0'u64))
epochStartSlot = start_slot(Epoch(0'u64)) + 1'u64
finishSlot = start_slot(Epoch(2'u64))
2024-11-22 19:51:18 +00:00
for i in uint64(epochStartSlot) ..< uint64(finishSlot):
check queue.getRewindPoint(Slot(i), finalizedSlot) == finalizedSlot
2024-11-22 19:51:18 +00:00
block:
let
queue = SyncQueue.init(SomeTPeer, SyncQueueKind.Forward,
Slot(0), Slot(0xFFFF_FFFF_FFFF_FFFFF'u64),
1'u64, 3, 2, getStaticSlotCb(Slot(0)),
collector(aq))
finalizedSlot = start_slot(Epoch(1'u64))
epochStartSlot = start_slot(Epoch(1'u64)) + 1'u64
finishSlot = start_slot(Epoch(3'u64))
2024-11-22 19:51:18 +00:00
for i in uint64(epochStartSlot) ..< uint64(finishSlot) :
check queue.getRewindPoint(Slot(i), finalizedSlot) == finalizedSlot
2024-11-22 19:51:18 +00:00
block:
let
queue = SyncQueue.init(SomeTPeer, SyncQueueKind.Forward,
Slot(0), Slot(0xFFFF_FFFF_FFFF_FFFFF'u64),
1'u64, 3, 2, getStaticSlotCb(Slot(0)),
collector(aq))
finalizedSlot = start_slot(Epoch(0'u64))
failSlot = Slot(0xFFFF_FFFF_FFFF_FFFFF'u64)
failEpoch = epoch(failSlot)
2024-11-22 19:51:18 +00:00
var counter = 1'u64
for i in 0 ..< 64:
if counter >= failEpoch:
break
let rewindEpoch = failEpoch - counter
let rewindSlot = start_slot(rewindEpoch)
check queue.getRewindPoint(failSlot, finalizedSlot) == rewindSlot
counter = counter shl 1
2024-11-22 19:51:18 +00:00
block:
let
queue = SyncQueue.init(SomeTPeer, SyncQueueKind.Forward,
Slot(0), Slot(0xFFFF_FFFF_FFFF_FFFFF'u64),
1'u64, 3, 2, getStaticSlotCb(Slot(0)),
collector(aq))
let
finalizedSlot = start_slot(Epoch(1'u64))
failSlot = Slot(0xFFFF_FFFF_FFFF_FFFFF'u64)
failEpoch = epoch(failSlot)
2024-11-22 19:51:18 +00:00
var counter = 1'u64
for i in 0 ..< 64:
if counter >= failEpoch:
break
let
rewindEpoch = failEpoch - counter
rewindSlot = start_slot(rewindEpoch)
check queue.getRewindPoint(failSlot, finalizedSlot) == rewindSlot
counter = counter shl 1
2024-11-22 19:51:18 +00:00
test "[SyncQueue#Backward] getRewindPoint() test":
let aq = newAsyncQueue[BlockEntry]()
block:
let
getSafeSlot = getStaticSlotCb(Slot(1024))
queue = SyncQueue.init(SomeTPeer, SyncQueueKind.Backward,
Slot(1024), Slot(0),
1'u64, 3, 2, getSafeSlot, collector(aq))
safeSlot = getSafeSlot()
2024-11-22 19:51:18 +00:00
for i in countdown(1023, 0):
check queue.getRewindPoint(Slot(i), safeSlot) == safeSlot
2024-11-22 19:51:18 +00:00
test "[SyncQueue] hasEndGap() test":
let
chain1 = createChain(Slot(1) .. Slot(1))
chain2 = newSeq[ref ForkedSignedBeaconBlock]()
for counter in countdown(32'u64, 2'u64):
2024-11-22 19:51:18 +00:00
let
srange = SyncRange.init(Slot(1), counter)
req = SyncRequest[SomeTPeer](data: srange)
check req.hasEndGap(chain1) == true
2024-11-22 19:51:18 +00:00
let req = SyncRequest[SomeTPeer](data: SyncRange.init(Slot(1), 1'u64))
check:
2024-11-22 19:51:18 +00:00
req.hasEndGap(chain1) == false
req.hasEndGap(chain2) == true
test "[SyncQueue] checkResponse() test":
Light forward sync mechanism (#6515) * Initial commit. * Add hybrid syncing. * Compilation fixes. * Cast custom event for our purposes. * Instantiate AsyncEventQueue properly. * Fix mistype. * Further research on optimistic updates. * Fixing circular deps. * Add backfilling. * Add block download feature. * Add block store. * Update backfill information before storing block. * Use custom block verifier for backfilling sync. * Skip signature verification in backfilling. * Add one more generic reload to storeBackfillBlock(). * Add block verification debugging statements. * Add more debugging * Do not use database for backfilling, part 1. * Fix for stash. * Stash fixes part 2. * Prepare for testing. * Fix assertion. * Fix post-restart syncing process. * Update backfill loading log statement. Use proper backfill slot callback for sync manager. * Add handling of Duplicates. * Fix store duration and block backfilled log statements. * Add proper syncing state log statement. * Add snappy compression to beaconchain_file. Format syncing speed properly. * Add blobs verification. * Add `slot` number to file structure for easy navigation over stream of compressed objects. * Change database filename. * Fix structure size. * Add more consistency properties. * Fix checkRepair() issues. * Preparation to state rebuild process. * Add plain & compressed size. * Debugging snappy encode process. * Add one more debugging line. * Dump blocks. * One more filedump. * Fix chunk corruption code. * Fix detection issue. * Some fixes in state rebuilding process. * Add more clearance steps. * Move updateHead() back to block_processor. * Fix compilation issues. * Make code more async friendly. * Fix async issues. Add more information when proposer verification failed. * Fix 8192 slots issue. * Fix Future double completion issue. * Pass updateFlags to some of the core procedures. * Fix tests. * Improve initial sync handling mechanism. * Fix checkStateTransition() performance improvements. * Add some performance tuning and meters. * Light client performance tuning. * Remove debugging statement. * Use single file descriptor for blockchain file. * Attempt to fix LC. * Fix timeleft calculation when untrusted sync backfilling started right after LC block received. * Workaround for `chronicles` + `results` `error` issue. Remove some compilation warnings. Fix `CatchableError` leaks on Windows. * Address review comments. * Address review comments part 2. * Address review comments part 1. * Rebase and fix the issues. * Address review comments part 3. * Add tests and fix some issues in auto-repair mechanism. * Add tests to all_tests. * Rename binary test file to pass restrictions. * Add `bin` extension to excluded list. Recover binary test data. * Rename fixture file to .bin again. * Update AllTests. * Address review comments part 4. * Address review comments part 5 and fix tests. * Address review comments part 6. * Eliminate foldl and combine from blobs processing. Add some tests to ensure that checkResponse() also checks for correct order. * Fix forgotten place. * Post rebase fixes. * Add unique slots tests. * Optimize updateHead() code. * Add forgotten changes. * Address review comments on state as argument.
2024-10-30 05:38:53 +00:00
let
2024-11-22 19:51:18 +00:00
r1 = SyncRequest[SomeTPeer](data: SyncRange.init(Slot(11), 1'u64))
r2 = SyncRequest[SomeTPeer](data: SyncRange.init(Slot(11), 2'u64))
r3 = SyncRequest[SomeTPeer](data: SyncRange.init(Slot(11), 3'u64))
Light forward sync mechanism (#6515) * Initial commit. * Add hybrid syncing. * Compilation fixes. * Cast custom event for our purposes. * Instantiate AsyncEventQueue properly. * Fix mistype. * Further research on optimistic updates. * Fixing circular deps. * Add backfilling. * Add block download feature. * Add block store. * Update backfill information before storing block. * Use custom block verifier for backfilling sync. * Skip signature verification in backfilling. * Add one more generic reload to storeBackfillBlock(). * Add block verification debugging statements. * Add more debugging * Do not use database for backfilling, part 1. * Fix for stash. * Stash fixes part 2. * Prepare for testing. * Fix assertion. * Fix post-restart syncing process. * Update backfill loading log statement. Use proper backfill slot callback for sync manager. * Add handling of Duplicates. * Fix store duration and block backfilled log statements. * Add proper syncing state log statement. * Add snappy compression to beaconchain_file. Format syncing speed properly. * Add blobs verification. * Add `slot` number to file structure for easy navigation over stream of compressed objects. * Change database filename. * Fix structure size. * Add more consistency properties. * Fix checkRepair() issues. * Preparation to state rebuild process. * Add plain & compressed size. * Debugging snappy encode process. * Add one more debugging line. * Dump blocks. * One more filedump. * Fix chunk corruption code. * Fix detection issue. * Some fixes in state rebuilding process. * Add more clearance steps. * Move updateHead() back to block_processor. * Fix compilation issues. * Make code more async friendly. * Fix async issues. Add more information when proposer verification failed. * Fix 8192 slots issue. * Fix Future double completion issue. * Pass updateFlags to some of the core procedures. * Fix tests. * Improve initial sync handling mechanism. * Fix checkStateTransition() performance improvements. * Add some performance tuning and meters. * Light client performance tuning. * Remove debugging statement. * Use single file descriptor for blockchain file. * Attempt to fix LC. * Fix timeleft calculation when untrusted sync backfilling started right after LC block received. * Workaround for `chronicles` + `results` `error` issue. Remove some compilation warnings. Fix `CatchableError` leaks on Windows. * Address review comments. * Address review comments part 2. * Address review comments part 1. * Rebase and fix the issues. * Address review comments part 3. * Add tests and fix some issues in auto-repair mechanism. * Add tests to all_tests. * Rename binary test file to pass restrictions. * Add `bin` extension to excluded list. Recover binary test data. * Rename fixture file to .bin again. * Update AllTests. * Address review comments part 4. * Address review comments part 5 and fix tests. * Address review comments part 6. * Eliminate foldl and combine from blobs processing. Add some tests to ensure that checkResponse() also checks for correct order. * Fix forgotten place. * Post rebase fixes. * Add unique slots tests. * Optimize updateHead() code. * Add forgotten changes. * Address review comments on state as argument.
2024-10-30 05:38:53 +00:00
check:
checkResponse(r1, [Slot(11)]).isOk() == true
checkResponse(r1, @[]).isOk() == true
checkResponse(r1, @[Slot(11), Slot(11)]).isOk() == false
checkResponse(r1, [Slot(10)]).isOk() == false
checkResponse(r1, [Slot(12)]).isOk() == false
checkResponse(r2, [Slot(11)]).isOk() == true
checkResponse(r2, [Slot(12)]).isOk() == true
checkResponse(r2, @[]).isOk() == true
checkResponse(r2, [Slot(11), Slot(12)]).isOk() == true
checkResponse(r2, [Slot(12)]).isOk() == true
checkResponse(r2, [Slot(11), Slot(12), Slot(13)]).isOk() == false
checkResponse(r2, [Slot(10), Slot(11)]).isOk() == false
checkResponse(r2, [Slot(10)]).isOk() == false
checkResponse(r2, [Slot(12), Slot(11)]).isOk() == false
checkResponse(r2, [Slot(12), Slot(13)]).isOk() == false
checkResponse(r2, [Slot(13)]).isOk() == false
checkResponse(r2, [Slot(11), Slot(11)]).isOk() == false
checkResponse(r2, [Slot(12), Slot(12)]).isOk() == false
checkResponse(r3, @[Slot(11)]).isOk() == true
checkResponse(r3, @[Slot(12)]).isOk() == true
checkResponse(r3, @[Slot(13)]).isOk() == true
checkResponse(r3, @[Slot(11), Slot(12)]).isOk() == true
checkResponse(r3, @[Slot(11), Slot(13)]).isOk() == true
checkResponse(r3, @[Slot(12), Slot(13)]).isOk() == true
checkResponse(r3, @[Slot(11), Slot(13), Slot(12)]).isOk() == false
checkResponse(r3, @[Slot(12), Slot(13), Slot(11)]).isOk() == false
checkResponse(r3, @[Slot(13), Slot(12), Slot(11)]).isOk() == false
checkResponse(r3, @[Slot(13), Slot(11)]).isOk() == false
checkResponse(r3, @[Slot(13), Slot(12)]).isOk() == false
checkResponse(r3, @[Slot(12), Slot(11)]).isOk() == false
checkResponse(r3, @[Slot(11), Slot(11), Slot(11)]).isOk() == false
checkResponse(r3, @[Slot(11), Slot(12), Slot(12)]).isOk() == false
checkResponse(r3, @[Slot(11), Slot(13), Slot(13)]).isOk() == false
checkResponse(r3, @[Slot(12), Slot(13), Slot(13)]).isOk() == false
checkResponse(r3, @[Slot(12), Slot(12), Slot(12)]).isOk() == false
checkResponse(r3, @[Slot(13), Slot(13), Slot(13)]).isOk() == false
checkResponse(r3, @[Slot(11), Slot(11)]).isOk() == false
checkResponse(r3, @[Slot(12), Slot(12)]).isOk() == false
checkResponse(r3, @[Slot(13), Slot(13)]).isOk() == false
test "[SyncQueue] checkBlobsResponse() test":
let
2024-11-22 19:51:18 +00:00
r1 = SyncRequest[SomeTPeer](data: SyncRange.init(Slot(11), 1'u64))
r2 = SyncRequest[SomeTPeer](data: SyncRange.init(Slot(11), 2'u64))
r3 = SyncRequest[SomeTPeer](data: SyncRange.init(Slot(11), 3'u64))
d1 = Slot(11).repeat(MAX_BLOBS_PER_BLOCK)
d2 = Slot(12).repeat(MAX_BLOBS_PER_BLOCK)
d3 = Slot(13).repeat(MAX_BLOBS_PER_BLOCK)
check:
checkBlobsResponse(r1, [Slot(11)]).isOk() == true
checkBlobsResponse(r1, @[]).isOk() == true
checkBlobsResponse(r1, [Slot(11), Slot(11)]).isOk() == true
checkBlobsResponse(r1, [Slot(11), Slot(11), Slot(11)]).isOk() == true
checkBlobsResponse(r1, d1).isOk() == true
checkBlobsResponse(r1, d1 & @[Slot(11)]).isOk() == false
checkBlobsResponse(r1, [Slot(10)]).isOk() == false
checkBlobsResponse(r1, [Slot(12)]).isOk() == false
checkBlobsResponse(r2, [Slot(11)]).isOk() == true
checkBlobsResponse(r2, [Slot(12)]).isOk() == true
checkBlobsResponse(r2, @[]).isOk() == true
checkBlobsResponse(r2, [Slot(11), Slot(12)]).isOk() == true
checkBlobsResponse(r2, [Slot(11), Slot(11)]).isOk() == true
checkBlobsResponse(r2, [Slot(12), Slot(12)]).isOk() == true
checkBlobsResponse(r2, d1).isOk() == true
checkBlobsResponse(r2, d2).isOk() == true
checkBlobsResponse(r2, d1 & d2).isOk() == true
checkBlobsResponse(r2, [Slot(11), Slot(12), Slot(11)]).isOk() == false
checkBlobsResponse(r2, [Slot(12), Slot(11)]).isOk() == false
checkBlobsResponse(r2, d1 & @[Slot(11)]).isOk() == false
checkBlobsResponse(r2, d2 & @[Slot(12)]).isOk() == false
checkBlobsResponse(r2, @[Slot(11)] & d2 & @[Slot(12)]).isOk() == false
checkBlobsResponse(r2, d1 & d2 & @[Slot(12)]).isOk() == false
checkBlobsResponse(r2, d2 & d1).isOk() == false
checkBlobsResponse(r3, [Slot(11)]).isOk() == true
checkBlobsResponse(r3, [Slot(12)]).isOk() == true
checkBlobsResponse(r3, [Slot(13)]).isOk() == true
checkBlobsResponse(r3, @[]).isOk() == true
checkBlobsResponse(r3, [Slot(11), Slot(12)]).isOk() == true
checkBlobsResponse(r3, [Slot(11), Slot(11)]).isOk() == true
checkBlobsResponse(r3, [Slot(12), Slot(12)]).isOk() == true
checkBlobsResponse(r3, [Slot(11), Slot(13)]).isOk() == true
checkBlobsResponse(r3, [Slot(12), Slot(13)]).isOk() == true
checkBlobsResponse(r3, [Slot(13), Slot(13)]).isOk() == true
checkBlobsResponse(r3, d1).isOk() == true
checkBlobsResponse(r3, d2).isOk() == true
checkBlobsResponse(r3, d3).isOk() == true
checkBlobsResponse(r3, d1 & d2).isOk() == true
checkBlobsResponse(r3, d1 & d3).isOk() == true
checkBlobsResponse(r3, d2 & d3).isOk() == true
checkBlobsResponse(r3, [Slot(11), Slot(12), Slot(11)]).isOk() == false
checkBlobsResponse(r3, [Slot(11), Slot(13), Slot(12)]).isOk() == false
checkBlobsResponse(r3, [Slot(12), Slot(13), Slot(11)]).isOk() == false
checkBlobsResponse(r3, [Slot(12), Slot(11)]).isOk() == false
checkBlobsResponse(r3, [Slot(13), Slot(12)]).isOk() == false
checkBlobsResponse(r3, [Slot(13), Slot(11)]).isOk() == false
checkBlobsResponse(r3, d1 & @[Slot(11)]).isOk() == false
checkBlobsResponse(r3, d2 & @[Slot(12)]).isOk() == false
checkBlobsResponse(r3, d3 & @[Slot(13)]).isOk() == false
checkBlobsResponse(r3, @[Slot(11)] & d2 & @[Slot(12)]).isOk() == false
checkBlobsResponse(r3, @[Slot(12)] & d3 & @[Slot(13)]).isOk() == false
checkBlobsResponse(r3, @[Slot(11)] & d3 & @[Slot(13)]).isOk() == false
checkBlobsResponse(r2, d1 & d2 & @[Slot(12)]).isOk() == false
checkBlobsResponse(r2, d1 & d3 & @[Slot(13)]).isOk() == false
checkBlobsResponse(r2, d2 & d3 & @[Slot(13)]).isOk() == false
checkBlobsResponse(r2, d2 & d1).isOk() == false
checkBlobsResponse(r2, d3 & d2).isOk() == false
checkBlobsResponse(r2, d3 & d1).isOk() == false
Light forward sync mechanism (#6515) * Initial commit. * Add hybrid syncing. * Compilation fixes. * Cast custom event for our purposes. * Instantiate AsyncEventQueue properly. * Fix mistype. * Further research on optimistic updates. * Fixing circular deps. * Add backfilling. * Add block download feature. * Add block store. * Update backfill information before storing block. * Use custom block verifier for backfilling sync. * Skip signature verification in backfilling. * Add one more generic reload to storeBackfillBlock(). * Add block verification debugging statements. * Add more debugging * Do not use database for backfilling, part 1. * Fix for stash. * Stash fixes part 2. * Prepare for testing. * Fix assertion. * Fix post-restart syncing process. * Update backfill loading log statement. Use proper backfill slot callback for sync manager. * Add handling of Duplicates. * Fix store duration and block backfilled log statements. * Add proper syncing state log statement. * Add snappy compression to beaconchain_file. Format syncing speed properly. * Add blobs verification. * Add `slot` number to file structure for easy navigation over stream of compressed objects. * Change database filename. * Fix structure size. * Add more consistency properties. * Fix checkRepair() issues. * Preparation to state rebuild process. * Add plain & compressed size. * Debugging snappy encode process. * Add one more debugging line. * Dump blocks. * One more filedump. * Fix chunk corruption code. * Fix detection issue. * Some fixes in state rebuilding process. * Add more clearance steps. * Move updateHead() back to block_processor. * Fix compilation issues. * Make code more async friendly. * Fix async issues. Add more information when proposer verification failed. * Fix 8192 slots issue. * Fix Future double completion issue. * Pass updateFlags to some of the core procedures. * Fix tests. * Improve initial sync handling mechanism. * Fix checkStateTransition() performance improvements. * Add some performance tuning and meters. * Light client performance tuning. * Remove debugging statement. * Use single file descriptor for blockchain file. * Attempt to fix LC. * Fix timeleft calculation when untrusted sync backfilling started right after LC block received. * Workaround for `chronicles` + `results` `error` issue. Remove some compilation warnings. Fix `CatchableError` leaks on Windows. * Address review comments. * Address review comments part 2. * Address review comments part 1. * Rebase and fix the issues. * Address review comments part 3. * Add tests and fix some issues in auto-repair mechanism. * Add tests to all_tests. * Rename binary test file to pass restrictions. * Add `bin` extension to excluded list. Recover binary test data. * Rename fixture file to .bin again. * Update AllTests. * Address review comments part 4. * Address review comments part 5 and fix tests. * Address review comments part 6. * Eliminate foldl and combine from blobs processing. Add some tests to ensure that checkResponse() also checks for correct order. * Fix forgotten place. * Post rebase fixes. * Add unique slots tests. * Optimize updateHead() code. * Add forgotten changes. * Address review comments on state as argument.
2024-10-30 05:38:53 +00:00
test "[SyncManager] groupBlobs() test":
Light forward sync mechanism (#6515) * Initial commit. * Add hybrid syncing. * Compilation fixes. * Cast custom event for our purposes. * Instantiate AsyncEventQueue properly. * Fix mistype. * Further research on optimistic updates. * Fixing circular deps. * Add backfilling. * Add block download feature. * Add block store. * Update backfill information before storing block. * Use custom block verifier for backfilling sync. * Skip signature verification in backfilling. * Add one more generic reload to storeBackfillBlock(). * Add block verification debugging statements. * Add more debugging * Do not use database for backfilling, part 1. * Fix for stash. * Stash fixes part 2. * Prepare for testing. * Fix assertion. * Fix post-restart syncing process. * Update backfill loading log statement. Use proper backfill slot callback for sync manager. * Add handling of Duplicates. * Fix store duration and block backfilled log statements. * Add proper syncing state log statement. * Add snappy compression to beaconchain_file. Format syncing speed properly. * Add blobs verification. * Add `slot` number to file structure for easy navigation over stream of compressed objects. * Change database filename. * Fix structure size. * Add more consistency properties. * Fix checkRepair() issues. * Preparation to state rebuild process. * Add plain & compressed size. * Debugging snappy encode process. * Add one more debugging line. * Dump blocks. * One more filedump. * Fix chunk corruption code. * Fix detection issue. * Some fixes in state rebuilding process. * Add more clearance steps. * Move updateHead() back to block_processor. * Fix compilation issues. * Make code more async friendly. * Fix async issues. Add more information when proposer verification failed. * Fix 8192 slots issue. * Fix Future double completion issue. * Pass updateFlags to some of the core procedures. * Fix tests. * Improve initial sync handling mechanism. * Fix checkStateTransition() performance improvements. * Add some performance tuning and meters. * Light client performance tuning. * Remove debugging statement. * Use single file descriptor for blockchain file. * Attempt to fix LC. * Fix timeleft calculation when untrusted sync backfilling started right after LC block received. * Workaround for `chronicles` + `results` `error` issue. Remove some compilation warnings. Fix `CatchableError` leaks on Windows. * Address review comments. * Address review comments part 2. * Address review comments part 1. * Rebase and fix the issues. * Address review comments part 3. * Add tests and fix some issues in auto-repair mechanism. * Add tests to all_tests. * Rename binary test file to pass restrictions. * Add `bin` extension to excluded list. Recover binary test data. * Rename fixture file to .bin again. * Update AllTests. * Address review comments part 4. * Address review comments part 5 and fix tests. * Address review comments part 6. * Eliminate foldl and combine from blobs processing. Add some tests to ensure that checkResponse() also checks for correct order. * Fix forgotten place. * Post rebase fixes. * Add unique slots tests. * Optimize updateHead() code. * Add forgotten changes. * Address review comments on state as argument.
2024-10-30 05:38:53 +00:00
var
2024-11-22 19:51:18 +00:00
blocks = createChain(Slot(10) .. Slot(15))
Light forward sync mechanism (#6515) * Initial commit. * Add hybrid syncing. * Compilation fixes. * Cast custom event for our purposes. * Instantiate AsyncEventQueue properly. * Fix mistype. * Further research on optimistic updates. * Fixing circular deps. * Add backfilling. * Add block download feature. * Add block store. * Update backfill information before storing block. * Use custom block verifier for backfilling sync. * Skip signature verification in backfilling. * Add one more generic reload to storeBackfillBlock(). * Add block verification debugging statements. * Add more debugging * Do not use database for backfilling, part 1. * Fix for stash. * Stash fixes part 2. * Prepare for testing. * Fix assertion. * Fix post-restart syncing process. * Update backfill loading log statement. Use proper backfill slot callback for sync manager. * Add handling of Duplicates. * Fix store duration and block backfilled log statements. * Add proper syncing state log statement. * Add snappy compression to beaconchain_file. Format syncing speed properly. * Add blobs verification. * Add `slot` number to file structure for easy navigation over stream of compressed objects. * Change database filename. * Fix structure size. * Add more consistency properties. * Fix checkRepair() issues. * Preparation to state rebuild process. * Add plain & compressed size. * Debugging snappy encode process. * Add one more debugging line. * Dump blocks. * One more filedump. * Fix chunk corruption code. * Fix detection issue. * Some fixes in state rebuilding process. * Add more clearance steps. * Move updateHead() back to block_processor. * Fix compilation issues. * Make code more async friendly. * Fix async issues. Add more information when proposer verification failed. * Fix 8192 slots issue. * Fix Future double completion issue. * Pass updateFlags to some of the core procedures. * Fix tests. * Improve initial sync handling mechanism. * Fix checkStateTransition() performance improvements. * Add some performance tuning and meters. * Light client performance tuning. * Remove debugging statement. * Use single file descriptor for blockchain file. * Attempt to fix LC. * Fix timeleft calculation when untrusted sync backfilling started right after LC block received. * Workaround for `chronicles` + `results` `error` issue. Remove some compilation warnings. Fix `CatchableError` leaks on Windows. * Address review comments. * Address review comments part 2. * Address review comments part 1. * Rebase and fix the issues. * Address review comments part 3. * Add tests and fix some issues in auto-repair mechanism. * Add tests to all_tests. * Rename binary test file to pass restrictions. * Add `bin` extension to excluded list. Recover binary test data. * Rename fixture file to .bin again. * Update AllTests. * Address review comments part 4. * Address review comments part 5 and fix tests. * Address review comments part 6. * Eliminate foldl and combine from blobs processing. Add some tests to ensure that checkResponse() also checks for correct order. * Fix forgotten place. * Post rebase fixes. * Add unique slots tests. * Optimize updateHead() code. * Add forgotten changes. * Address review comments on state as argument.
2024-10-30 05:38:53 +00:00
blobs = createBlobs(blocks, @[Slot(11), Slot(11), Slot(12), Slot(14)])
Light forward sync mechanism (#6515) * Initial commit. * Add hybrid syncing. * Compilation fixes. * Cast custom event for our purposes. * Instantiate AsyncEventQueue properly. * Fix mistype. * Further research on optimistic updates. * Fixing circular deps. * Add backfilling. * Add block download feature. * Add block store. * Update backfill information before storing block. * Use custom block verifier for backfilling sync. * Skip signature verification in backfilling. * Add one more generic reload to storeBackfillBlock(). * Add block verification debugging statements. * Add more debugging * Do not use database for backfilling, part 1. * Fix for stash. * Stash fixes part 2. * Prepare for testing. * Fix assertion. * Fix post-restart syncing process. * Update backfill loading log statement. Use proper backfill slot callback for sync manager. * Add handling of Duplicates. * Fix store duration and block backfilled log statements. * Add proper syncing state log statement. * Add snappy compression to beaconchain_file. Format syncing speed properly. * Add blobs verification. * Add `slot` number to file structure for easy navigation over stream of compressed objects. * Change database filename. * Fix structure size. * Add more consistency properties. * Fix checkRepair() issues. * Preparation to state rebuild process. * Add plain & compressed size. * Debugging snappy encode process. * Add one more debugging line. * Dump blocks. * One more filedump. * Fix chunk corruption code. * Fix detection issue. * Some fixes in state rebuilding process. * Add more clearance steps. * Move updateHead() back to block_processor. * Fix compilation issues. * Make code more async friendly. * Fix async issues. Add more information when proposer verification failed. * Fix 8192 slots issue. * Fix Future double completion issue. * Pass updateFlags to some of the core procedures. * Fix tests. * Improve initial sync handling mechanism. * Fix checkStateTransition() performance improvements. * Add some performance tuning and meters. * Light client performance tuning. * Remove debugging statement. * Use single file descriptor for blockchain file. * Attempt to fix LC. * Fix timeleft calculation when untrusted sync backfilling started right after LC block received. * Workaround for `chronicles` + `results` `error` issue. Remove some compilation warnings. Fix `CatchableError` leaks on Windows. * Address review comments. * Address review comments part 2. * Address review comments part 1. * Rebase and fix the issues. * Address review comments part 3. * Add tests and fix some issues in auto-repair mechanism. * Add tests to all_tests. * Rename binary test file to pass restrictions. * Add `bin` extension to excluded list. Recover binary test data. * Rename fixture file to .bin again. * Update AllTests. * Address review comments part 4. * Address review comments part 5 and fix tests. * Address review comments part 6. * Eliminate foldl and combine from blobs processing. Add some tests to ensure that checkResponse() also checks for correct order. * Fix forgotten place. * Post rebase fixes. * Add unique slots tests. * Optimize updateHead() code. * Add forgotten changes. * Address review comments on state as argument.
2024-10-30 05:38:53 +00:00
let groupedRes = groupBlobs(blocks, blobs)
2024-11-22 19:51:18 +00:00
check groupedRes.isOk()
let grouped = groupedRes.get()
check:
len(grouped) == 6
# slot 10
len(grouped[0]) == 0
# slot 11
len(grouped[1]) == 2
grouped[1][0].signed_block_header.message.slot == Slot(11)
grouped[1][1].signed_block_header.message.slot == Slot(11)
# slot 12
len(grouped[2]) == 1
grouped[2][0].signed_block_header.message.slot == Slot(12)
# slot 13
len(grouped[3]) == 0
# slot 14
len(grouped[4]) == 1
grouped[4][0].signed_block_header.message.slot == Slot(14)
# slot 15
len(grouped[5]) == 0
# Add block with a gap from previous block.
let block17 = newClone ForkedSignedBeaconBlock(kind: ConsensusFork.Deneb)
block17[].denebData.message.slot = Slot(17)
blocks.add(block17)
Light forward sync mechanism (#6515) * Initial commit. * Add hybrid syncing. * Compilation fixes. * Cast custom event for our purposes. * Instantiate AsyncEventQueue properly. * Fix mistype. * Further research on optimistic updates. * Fixing circular deps. * Add backfilling. * Add block download feature. * Add block store. * Update backfill information before storing block. * Use custom block verifier for backfilling sync. * Skip signature verification in backfilling. * Add one more generic reload to storeBackfillBlock(). * Add block verification debugging statements. * Add more debugging * Do not use database for backfilling, part 1. * Fix for stash. * Stash fixes part 2. * Prepare for testing. * Fix assertion. * Fix post-restart syncing process. * Update backfill loading log statement. Use proper backfill slot callback for sync manager. * Add handling of Duplicates. * Fix store duration and block backfilled log statements. * Add proper syncing state log statement. * Add snappy compression to beaconchain_file. Format syncing speed properly. * Add blobs verification. * Add `slot` number to file structure for easy navigation over stream of compressed objects. * Change database filename. * Fix structure size. * Add more consistency properties. * Fix checkRepair() issues. * Preparation to state rebuild process. * Add plain & compressed size. * Debugging snappy encode process. * Add one more debugging line. * Dump blocks. * One more filedump. * Fix chunk corruption code. * Fix detection issue. * Some fixes in state rebuilding process. * Add more clearance steps. * Move updateHead() back to block_processor. * Fix compilation issues. * Make code more async friendly. * Fix async issues. Add more information when proposer verification failed. * Fix 8192 slots issue. * Fix Future double completion issue. * Pass updateFlags to some of the core procedures. * Fix tests. * Improve initial sync handling mechanism. * Fix checkStateTransition() performance improvements. * Add some performance tuning and meters. * Light client performance tuning. * Remove debugging statement. * Use single file descriptor for blockchain file. * Attempt to fix LC. * Fix timeleft calculation when untrusted sync backfilling started right after LC block received. * Workaround for `chronicles` + `results` `error` issue. Remove some compilation warnings. Fix `CatchableError` leaks on Windows. * Address review comments. * Address review comments part 2. * Address review comments part 1. * Rebase and fix the issues. * Address review comments part 3. * Add tests and fix some issues in auto-repair mechanism. * Add tests to all_tests. * Rename binary test file to pass restrictions. * Add `bin` extension to excluded list. Recover binary test data. * Rename fixture file to .bin again. * Update AllTests. * Address review comments part 4. * Address review comments part 5 and fix tests. * Address review comments part 6. * Eliminate foldl and combine from blobs processing. Add some tests to ensure that checkResponse() also checks for correct order. * Fix forgotten place. * Post rebase fixes. * Add unique slots tests. * Optimize updateHead() code. * Add forgotten changes. * Address review comments on state as argument.
2024-10-30 05:38:53 +00:00
let groupedRes2 = groupBlobs(blocks, blobs)
check:
groupedRes2.isOk()
let grouped2 = groupedRes2.get()
check:
len(grouped2) == 7
len(grouped2[6]) == 0 # slot 17
let blob18 = new (ref BlobSidecar)
blob18[].signed_block_header.message.slot = Slot(18)
blobs.add(blob18)
Light forward sync mechanism (#6515) * Initial commit. * Add hybrid syncing. * Compilation fixes. * Cast custom event for our purposes. * Instantiate AsyncEventQueue properly. * Fix mistype. * Further research on optimistic updates. * Fixing circular deps. * Add backfilling. * Add block download feature. * Add block store. * Update backfill information before storing block. * Use custom block verifier for backfilling sync. * Skip signature verification in backfilling. * Add one more generic reload to storeBackfillBlock(). * Add block verification debugging statements. * Add more debugging * Do not use database for backfilling, part 1. * Fix for stash. * Stash fixes part 2. * Prepare for testing. * Fix assertion. * Fix post-restart syncing process. * Update backfill loading log statement. Use proper backfill slot callback for sync manager. * Add handling of Duplicates. * Fix store duration and block backfilled log statements. * Add proper syncing state log statement. * Add snappy compression to beaconchain_file. Format syncing speed properly. * Add blobs verification. * Add `slot` number to file structure for easy navigation over stream of compressed objects. * Change database filename. * Fix structure size. * Add more consistency properties. * Fix checkRepair() issues. * Preparation to state rebuild process. * Add plain & compressed size. * Debugging snappy encode process. * Add one more debugging line. * Dump blocks. * One more filedump. * Fix chunk corruption code. * Fix detection issue. * Some fixes in state rebuilding process. * Add more clearance steps. * Move updateHead() back to block_processor. * Fix compilation issues. * Make code more async friendly. * Fix async issues. Add more information when proposer verification failed. * Fix 8192 slots issue. * Fix Future double completion issue. * Pass updateFlags to some of the core procedures. * Fix tests. * Improve initial sync handling mechanism. * Fix checkStateTransition() performance improvements. * Add some performance tuning and meters. * Light client performance tuning. * Remove debugging statement. * Use single file descriptor for blockchain file. * Attempt to fix LC. * Fix timeleft calculation when untrusted sync backfilling started right after LC block received. * Workaround for `chronicles` + `results` `error` issue. Remove some compilation warnings. Fix `CatchableError` leaks on Windows. * Address review comments. * Address review comments part 2. * Address review comments part 1. * Rebase and fix the issues. * Address review comments part 3. * Add tests and fix some issues in auto-repair mechanism. * Add tests to all_tests. * Rename binary test file to pass restrictions. * Add `bin` extension to excluded list. Recover binary test data. * Rename fixture file to .bin again. * Update AllTests. * Address review comments part 4. * Address review comments part 5 and fix tests. * Address review comments part 6. * Eliminate foldl and combine from blobs processing. Add some tests to ensure that checkResponse() also checks for correct order. * Fix forgotten place. * Post rebase fixes. * Add unique slots tests. * Optimize updateHead() code. * Add forgotten changes. * Address review comments on state as argument.
2024-10-30 05:38:53 +00:00
let groupedRes3 = groupBlobs(blocks, blobs)
check:
groupedRes3.isErr()