Make accumulator finite at merge block (#1256)

- Let accumulator finish its last pre merge epoch (hash_tree_root
on incomplete epoch).
- Adjust code to use isPreMerge and remove isCurrentEpoch
- Split up tests to a set that runs with a mainnet merge block
number and a set that runs with a testing value.
This commit is contained in:
Kim De Mey 2022-10-10 12:59:55 +02:00 committed by GitHub
parent 538efad325
commit 78f7de1344
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 310 additions and 279 deletions

View File

@ -14,14 +14,21 @@ import
../../common/common_types,
./history_content
export ssz_serialization, merkleization, proofs
export ssz_serialization, merkleization, proofs, eth_types_rlp
# Header Accumulator, as per specification:
# https://github.com/ethereum/portal-network-specs/blob/master/history-network.md#the-header-accumulator
# But with the adjustment to finish the accumulator at merge point.
const
epochSize* = 8192 # blocks
maxHistoricalEpochs = 131072 # 2^17
# Allow this to be adjusted at compile time. If more constants need to be
# adjusted we can add some presets file.
mergeBlockNumber* {.intdefine.}: uint64 = 15537394
# Note: This is like a ceil(mergeBlockNumber / epochSize)
# Could use ceilDiv(mergeBlockNumber, epochSize) in future versions
preMergeEpochs* = (mergeBlockNumber + epochSize - 1) div epochSize
type
HeaderRecord* = object
@ -31,36 +38,39 @@ type
EpochAccumulator* = List[HeaderRecord, epochSize]
Accumulator* = object
historicalEpochs*: List[Bytes32, maxHistoricalEpochs]
historicalEpochs*: List[Bytes32, int(preMergeEpochs)]
currentEpoch*: EpochAccumulator
BlockHashResultType* = enum
BHash, HEpoch, UnknownBlockNumber
FinishedAccumulator* = object
historicalEpochs*: List[Bytes32, int(preMergeEpochs)]
BlockHashResult* = object
case kind*: BlockHashResultType
of BHash:
blockHash*: BlockHash
of HEpoch:
epochHash*: Bytes32
epochIndex*: uint64
blockRelativeIndex*: uint64
of UnknownBlockNumber:
discard
BlockEpochData* = object
epochHash*: Bytes32
blockRelativeIndex*: uint64
func init*(T: type Accumulator): T =
Accumulator(
historicalEpochs: List[Bytes32, maxHistoricalEpochs].init(@[]),
historicalEpochs: List[Bytes32, int(preMergeEpochs)].init(@[]),
currentEpoch: EpochAccumulator.init(@[])
)
func updateAccumulator*(a: var Accumulator, header: BlockHeader) =
# TODO:
# Could probably also make this work with TTD instead of merge block number.
func updateAccumulator*(
a: var Accumulator, header: BlockHeader) =
doAssert(header.blockNumber.truncate(uint64) < mergeBlockNumber,
"No post merge blocks for header accumulator")
let lastTotalDifficulty =
if a.currentEpoch.len() == 0:
0.stuint(256)
else:
a.currentEpoch[^1].totalDifficulty
# TODO: It is a bit annoying to require an extra header + update call to
# finish an epoch. However, if we were to move this after adding the
# `HeaderRecord`, there would be no way to get the current total difficulty,
# unless another field is introduced in the `Accumulator` object.
if a.currentEpoch.len() == epochSize:
let epochHash = hash_tree_root(a.currentEpoch)
@ -75,6 +85,14 @@ func updateAccumulator*(a: var Accumulator, header: BlockHeader) =
let res = a.currentEpoch.add(headerRecord)
doAssert(res, "Can't fail because of currentEpoch length check")
func isFinished*(a: Accumulator): bool =
a.historicalEpochs.len() == (int)(preMergeEpochs)
func finishAccumulator*(a: var Accumulator) =
let epochHash = hash_tree_root(a.currentEpoch)
doAssert(a.historicalEpochs.add(epochHash.data))
func hash*(a: Accumulator): hashes.Hash =
# TODO: This is used for the CountTable but it will be expensive.
hash(hash_tree_root(a).data)
@ -84,6 +102,9 @@ func buildAccumulator*(headers: seq[BlockHeader]): Accumulator =
for header in headers:
updateAccumulator(accumulator, header)
if header.blockNumber.truncate(uint64) == mergeBlockNumber - 1:
finishAccumulator(accumulator)
accumulator
func buildAccumulatorData*(headers: seq[BlockHeader]):
@ -93,6 +114,9 @@ func buildAccumulatorData*(headers: seq[BlockHeader]):
for header in headers:
updateAccumulator(accumulator, header)
# TODO: By allowing updateAccumulator and finishAccumulator to return
# optionally the finished epoch accumulators we would avoid double
# hash_tree_root computations.
if accumulator.currentEpoch.len() == epochSize:
let
rootHash = accumulator.currentEpoch.hash_tree_root()
@ -103,26 +127,23 @@ func buildAccumulatorData*(headers: seq[BlockHeader]):
epochAccumulators.add((key, accumulator.currentEpoch))
if header.blockNumber.truncate(uint64) == mergeBlockNumber - 1:
let
rootHash = accumulator.currentEpoch.hash_tree_root()
key = ContentKey(
contentType: epochAccumulator,
epochAccumulatorKey: EpochAccumulatorKey(
epochHash: rootHash))
epochAccumulators.add((key, accumulator.currentEpoch))
finishAccumulator(accumulator)
epochAccumulators
## Calls and helper calls for building header proofs and verifying headers
## against the Accumulator and the header proofs.
func inCurrentEpoch*(blockNumber: uint64, a: Accumulator): bool =
# Note:
# Block numbers start at 0, so historical epochs are set as:
# 0 -> 8191 -> len = 1 * 8192
# 8192 -> 16383 -> len = 2 * 8192
# ...
# A block number is in the current epoch if it is bigger than the last block
# number in the last historical epoch. Which is the same as being equal or
# bigger than current length of historical epochs * epochSize.
blockNumber >= uint64(a.historicalEpochs.len() * epochSize)
func inCurrentEpoch*(header: BlockHeader, a: Accumulator): bool =
let blockNumber = header.blockNumber.truncate(uint64)
blockNumber.inCurrentEpoch(a)
func getEpochIndex*(blockNumber: uint64): uint64 =
blockNumber div epochSize
@ -139,6 +160,12 @@ func getHeaderRecordIndex*(header: BlockHeader, epochIndex: uint64): uint64 =
## Get the relative header index for the epoch accumulator
getHeaderRecordIndex(header.blockNumber.truncate(uint64), epochIndex)
func isPreMerge*(blockNumber: uint64): bool =
blockNumber < mergeBlockNumber
func isPreMerge*(header: BlockHeader): bool =
isPreMerge(header.blockNumber.truncate(uint64))
func verifyProof*(
a: Accumulator, header: BlockHeader, proof: openArray[Digest]): bool =
let
@ -154,42 +181,26 @@ func verifyProof*(
verify_merkle_multiproof(@[leave], proof, @[gIndex], epochAccumulatorHash)
func verifyHeader*(
accumulator: Accumulator, header: BlockHeader, proof: Option[seq[Digest]]):
a: Accumulator, header: BlockHeader, proof: openArray[Digest]):
Result[void, string] =
if header.inCurrentEpoch(accumulator):
let blockNumber = header.blockNumber.truncate(uint64)
let relIndex = blockNumber - uint64(accumulator.historicalEpochs.len()) * epochSize
if relIndex > uint64(accumulator.currentEpoch.len() - 1):
return err("Blocknumber ahead of accumulator")
if accumulator.currentEpoch[relIndex].blockHash == header.blockHash():
if header.isPreMerge():
if a.verifyProof(header, proof):
ok()
else:
err("Header not part of canonical chain")
err("Proof verification failed")
else:
if proof.isSome():
if accumulator.verifyProof(header, proof.get):
ok()
else:
err("Proof verification failed")
else:
err("Need proof to verify header")
err("Cannot verify post merge header with accumulator proof")
func getHeaderHashForBlockNumber*(a: Accumulator, bn: UInt256): BlockHashResult=
func getBlockEpochDataForBlockNumber*(
a: Accumulator, bn: UInt256): Result[BlockEpochData, string] =
let blockNumber = bn.truncate(uint64)
if blockNumber.inCurrentEpoch(a):
let relIndex = blockNumber - uint64(a.historicalEpochs.len()) * epochSize
if relIndex > uint64(a.currentEpoch.len() - 1):
return BlockHashResult(kind: UnknownBlockNumber)
return BlockHashResult(kind: BHash, blockHash: a.currentEpoch[relIndex].blockHash)
else:
if blockNumber.isPreMerge:
let epochIndex = getEpochIndex(blockNumber)
return BlockHashResult(
kind: HEpoch,
ok(BlockEpochData(
epochHash: a.historicalEpochs[epochIndex],
epochIndex: epochIndex,
blockRelativeIndex: getHeaderRecordIndex(blockNumber, epochIndex)
)
blockRelativeIndex: getHeaderRecordIndex(blockNumber, epochIndex))
)
else:
err("Block number is post merge: " & $blockNumber)

View File

@ -38,7 +38,7 @@ type
blockHash*: BlockHash
EpochAccumulatorKey* = object
epochHash*: Digest
epochHash*: Digest # TODO: Perhaps this should be called epochRoot in the spec instead
MasterAccumulatorKeyType* = enum
latest = 0x00 # An SSZ Union None

View File

@ -514,30 +514,26 @@ proc getBlock*(
let accumulator = accumulatorOpt.unsafeGet()
let hashResponse = accumulator.getHeaderHashForBlockNumber(bn)
let epochDataRes = accumulator.getBlockEpochDataForBlockNumber(bn)
case hashResponse.kind
of BHash:
# we got header hash in current epoch accumulator, try to retrieve it from network
let blockResponse = await n.getBlock(hashResponse.blockHash)
return ok(blockResponse)
of HEpoch:
let digest = Digest(data: hashResponse.epochHash)
let epochOpt = await n.getEpochAccumulator(digest)
if epochDataRes.isOk():
let
epochData = epochDataRes.get()
digest = Digest(data: epochData.epochHash)
epochOpt = await n.getEpochAccumulator(digest)
if epochOpt.isNone():
return err("Cannot retrieve epoch accumulator for given block number")
let
epoch = epochOpt.unsafeGet()
blockHash = epoch[hashResponse.blockRelativeIndex].blockHash
blockHash = epoch[epochData.blockRelativeIndex].blockHash
let maybeBlock = await n.getBlock(blockHash)
return ok(maybeBlock)
of UnknownBlockNumber:
return err("Block number not included in master accumulator")
else:
return err(epochDataRes.error)
proc getInitialMasterAccumulator*(
n: HistoryNetwork):
@ -615,18 +611,12 @@ proc verifyCanonicalChain(
# epoch accumulators for it, and could just verify it with those. But the
# idea here is that eventually this gets changed so that the proof is send
# together with the header.
let proofOpt =
if header.inCurrentEpoch(accumulator):
none(seq[Digest])
else:
let proof = await n.buildProof(header)
if proof.isErr():
# Can't verify without master and epoch accumulators
return err("Cannot build proof: " & proof.error)
else:
some(proof.get())
return verifyHeader(accumulator, header, proofOpt)
let proof = await n.buildProof(header)
if proof.isOk():
return verifyHeader(accumulator, header, proof.get())
else:
# Can't verify without master and epoch accumulators
return err("Cannot build proof: " & proof.error)
proc validateContent(
n: HistoryNetwork, content: seq[byte], contentKey: ByteList):

View File

@ -8,16 +8,12 @@
{. warning[UnusedImport]:off .}
import
./test_portal_wire_encoding,
./test_portal_wire_protocol,
./test_state_distance,
./test_state_content,
./test_state_network,
./test_history_content,
./test_accumulator,
./test_history_validation,
./test_history_network,
./test_header_content,
./test_accumulator,
./test_content_db,
./test_discovery_rpc,
./test_bridge_parser

View File

@ -0,0 +1,15 @@
# Nimbus
# Copyright (c) 2022 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{. warning[UnusedImport]:off .}
import
./test_portal_wire_encoding,
./test_history_content,
./test_header_content,
./test_state_content,
./test_accumulator_root

View File

@ -0,0 +1,50 @@
# Nimbus
# Copyright (c) 2022 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.used.}
{.push raises: [Defect].}
import
unittest2, stint, stew/byteutils,
eth/common/eth_types_rlp,
../../../data/history_data_parser,
../../../network/history/[history_content, accumulator]
suite "Header Accumulator Root":
test "Header Accumulator Update":
const
hashTreeRoots = [
"53521984da4bbdbb011fe8a1473bf71bdf1040b14214a05cd1ce6932775bc7fa",
"ae48c6d4e1b0a68324f346755645ba7e5d99da3dd1c38a9acd10e2fe4f43cfb4",
"52f7bd6204be2d98cb9d09aa375b4355140e0d65744ce7b2f3ea34d8e6453572"]
dataFile = "./fluffy/tests/blocks/mainnet_blocks_1-2.json"
let blockDataRes = readJsonType(dataFile, BlockDataTable)
check blockDataRes.isOk()
let blockData = blockDataRes.get()
var headers: seq[BlockHeader]
# Len of headers from blockdata + genesis header
headers.setLen(blockData.len() + 1)
headers[0] = getGenesisHeader()
for k, v in blockData.pairs:
let res = v.readBlockHeader()
check res.isOk()
let header = res.get()
headers[header.blockNumber.truncate(int)] = header
var accumulator: Accumulator
for i, hash in hashTreeRoots:
updateAccumulator(accumulator, headers[i])
check accumulator.hash_tree_root().data.toHex() == hashTreeRoots[i]

View File

@ -11,7 +11,7 @@
import
unittest2, stew/byteutils,
../network/header/header_content
../../../network/header/header_content
suite "Header Gossip ContentKey Encodings":
test "BlockHeader":

View File

@ -9,7 +9,8 @@
import
unittest2, stew/byteutils, stint,
../network/history/history_content
ssz_serialization, ssz_serialization/[proofs, merkleization],
../../../network/history/[history_content, accumulator]
# According to test vectors:
# https://github.com/ethereum/portal-network-specs/blob/master/content-keys-test-vectors.md#history-network-keys

View File

@ -9,7 +9,7 @@
import
unittest2, stint, stew/[byteutils, results], eth/p2p/discoveryv5/enr,
../network/wire/messages
../../../network/wire/messages
# According to test vectors:
# https://github.com/ethereum/portal-network-specs/blob/master/portal-wire-test-vectors.md

View File

@ -9,7 +9,7 @@
import
unittest2, stew/byteutils,
../network/state/state_content
../../../network/state/state_content
# According to test vectors:
# https://github.com/ethereum/portal-network-specs/blob/master/content-keys-test-vectors.md#state-network-keys

View File

@ -10,7 +10,7 @@
{.push raises: [Defect].}
import
unittest2, stint, stew/byteutils,
unittest2, stint,
eth/common/eth_types_rlp,
../data/history_data_parser,
../network/history/[history_content, accumulator]
@ -30,44 +30,12 @@ func buildProof(
return epochAccumulator.build_proof(gIndex)
suite "Header Accumulator":
test "Header Accumulator Update":
const
hashTreeRoots = [
"b629833240bb2f5eabfb5245be63d730ca4ed30d6a418340ca476e7c1f1d98c0",
"00cbebed829e1babb93f2300bebe7905a98cb86993c7fc09bb5b04626fd91ae5",
"88cce8439ebc0c1d007177ffb6831c15c07b4361984cc52235b6fd728434f0c7"]
dataFile = "./fluffy/tests/blocks/mainnet_blocks_1-2.json"
let blockDataRes = readJsonType(dataFile, BlockDataTable)
check blockDataRes.isOk()
let blockData = blockDataRes.get()
var headers: seq[BlockHeader]
# Len of headers from blockdata + genesis header
headers.setLen(blockData.len() + 1)
headers[0] = getGenesisHeader()
for k, v in blockData.pairs:
let res = v.readBlockHeader()
check res.isOk()
let header = res.get()
headers[header.blockNumber.truncate(int)] = header
var accumulator: Accumulator
for i, hash in hashTreeRoots:
updateAccumulator(accumulator, headers[i])
check accumulator.hash_tree_root().data.toHex() == hashTreeRoots[i]
test "Header Accumulator Canonical Verification":
const
# Amount of headers to be created and added to the accumulator
amount = 25000
# Headers to test verification for
amount = mergeBlockNumber
# Headers to test verification for.
# Note: This test assumes at least 5 epochs
headersToTest = [
0,
epochSize - 1,
@ -77,7 +45,7 @@ suite "Header Accumulator":
epochSize*3 - 1,
epochSize*3,
epochSize*3 + 1,
amount - 1]
int(amount) - 1]
var headers: seq[BlockHeader]
for i in 0..<amount:
@ -94,125 +62,81 @@ suite "Header Accumulator":
block: # Test valid headers
for i in headersToTest:
let header = headers[i]
let proofOpt =
if header.inCurrentEpoch(accumulator):
none(seq[Digest])
else:
let proof = buildProof(accumulator, epochAccumulators, header)
check proof.isOk()
let proof = buildProof(accumulator, epochAccumulators, header)
check:
proof.isOk()
verifyHeader(accumulator, header, proof.get()).isOk()
some(proof.get())
block: # Test invalid headers
# Post merge block number must fail (> than latest header in accumulator)
let header = BlockHeader(blockNumber: mergeBlockNumber.stuint(256))
check verifyHeader(accumulator, header, @[]).isErr()
check verifyHeader(accumulator, header, proofOpt).isOk()
block: # Test some invalid headers
# Test a header with block number > than latest in accumulator
let header = BlockHeader(blockNumber: 25000.stuint(256))
check verifyHeader(accumulator, header, none(seq[Digest])).isErr()
# Test different block headers by altering the difficulty
# Test altered block headers by altering the difficulty
for i in headersToTest:
let proof = buildProof(accumulator, epochAccumulators, headers[i])
check:
proof.isOk()
# Alter the block header so the proof no longer matches
let header = BlockHeader(
blockNumber: i.stuint(256), difficulty: 2.stuint(256))
check verifyHeader(accumulator, header, none(seq[Digest])).isErr()
check verifyHeader(accumulator, header, proof.get()).isErr()
test "Header Accumulator Canonical Verification - No Historical Epochs":
const
# Amount of headers to be created and added to the accumulator
amount = epochSize
# Headers to test verification for
headersToTest = [
0,
epochSize - 1]
block: # Test invalid proofs
var proof: seq[Digest]
for i in 0..14:
var digest: Digest
proof.add(digest)
for i in headersToTest:
check verifyHeader(accumulator, headers[i], proof).isErr()
test "Header BlockNumber to EpochAccumulator Root":
# Note: This test assumes at least 3 epochs
const amount = mergeBlockNumber
var
headerHashes: seq[Hash256] = @[]
headers: seq[BlockHeader]
var headers: seq[BlockHeader]
for i in 0..<amount:
# Note: These test headers will not be a blockchain, as the parent hashes
# are not properly filled in. That's fine however for this test, as that
# is not the way the headers are verified with the accumulator.
headers.add(BlockHeader(
blockNumber: i.stuint(256), difficulty: 1.stuint(256)))
let header = BlockHeader(blockNumber: u256(i), difficulty: u256(1))
headers.add(header)
headerHashes.add(header.blockHash())
let
accumulator = buildAccumulator(headers)
epochAccumulators = buildAccumulatorData(headers)
let accumulator = buildAccumulator(headers)
block: # Test valid headers
for i in headersToTest:
let header = headers[i]
let proofOpt =
if header.inCurrentEpoch(accumulator):
none(seq[Digest])
else:
let proof = buildProof(accumulator, epochAccumulators, header)
check proof.isOk()
some(proof.get())
check verifyHeader(accumulator, header, proofOpt).isOk()
block: # Test some invalid headers
# Test a header with block number > than latest in accumulator
let header = BlockHeader(blockNumber: (amount).stuint(256))
check verifyHeader(accumulator, header, none(seq[Digest])).isErr()
# Test different block headers by altering the difficulty
for i in headersToTest:
let header = BlockHeader(
blockNumber: i.stuint(256), difficulty: 2.stuint(256))
check verifyHeader(accumulator, header, none(seq[Digest])).isErr()
test "Header Accumulator Blocknumber to Header Hash":
var acc = Accumulator.init()
let
numEpochs = 2
numHeadersInCurrentEpoch = 5
numHeaders = numEpochs * epochSize + numHeadersInCurrentEpoch
var headerHashes: seq[Hash256] = @[]
for i in 0..numHeaders:
var bh = BlockHeader()
bh.blockNumber = u256(i)
bh.difficulty = u256(1)
headerHashes.add(bh.blockHash())
acc.updateAccumulator(bh)
# get valid response for epoch 0
# Valid response for block numbers in epoch 0
block:
for i in 0..epochSize-1:
let res = acc.getHeaderHashForBlockNumber(u256(i))
for i in 0..<epochSize:
let res = accumulator.getBlockEpochDataForBlockNumber(u256(i))
check:
res.kind == HEpoch
res.epochIndex == 0
res.isOk()
res.get().epochHash == accumulator.historicalEpochs[0]
# get valid response for epoch 1
# Valid response for block numbers in epoch 1
block:
for i in epochSize..(2 * epochSize)-1:
let res = acc.getHeaderHashForBlockNumber(u256(i))
for i in epochSize..<(2 * epochSize):
let res = accumulator.getBlockEpochDataForBlockNumber(u256(i))
check:
res.kind == HEpoch
res.epochIndex == 1
res.isOk()
res.get().epochHash == accumulator.historicalEpochs[1]
# get valid response from current epoch (epoch 3)
# Valid response for block numbers in the incomplete (= last) epoch
block:
for i in (2 * epochSize)..(2 * epochSize) + numHeadersInCurrentEpoch:
let res = acc.getHeaderHashForBlockNumber(u256(i))
const startIndex = mergeBlockNumber - (mergeBlockNumber mod epochSize)
for i in startIndex..<mergeBlockNumber:
let res = accumulator.getBlockEpochDataForBlockNumber(u256(i))
check:
res.kind == BHash
res.blockHash == headerHashes[i]
res.isOk()
res.get().epochHash ==
accumulator.historicalEpochs[preMergeEpochs - 1]
# get valid response when getting unknown hash
# Error for block number at and past merge
block:
let firstUknownBlockNumber = (2 * epochSize) + numHeadersInCurrentEpoch + 1
let res = acc.getHeaderHashForBlockNumber(u256(firstUknownBlockNumber))
check:
res.kind == UnknownBlockNumber
let res1 = acc.getHeaderHashForBlockNumber(u256(3 * epochSize))
check:
res1.kind == UnknownBlockNumber
accumulator.getBlockEpochDataForBlockNumber(
u256(mergeBlockNumber)).isErr()
accumulator.getBlockEpochDataForBlockNumber(
u256(mergeBlockNumber + 1)).isErr()

View File

@ -74,14 +74,25 @@ procSuite "History Content Network":
let rng = newRng()
asyncTest "Get Block by Number":
# enough headers for one historical epoch in the master accumulator
const lastBlockNumber = 9000
const
lastBlockNumber = mergeBlockNumber - 1
headersToTest = [
0,
epochSize - 1,
epochSize,
epochSize*2 - 1,
epochSize*2,
epochSize*3 - 1,
epochSize*3,
epochSize*3 + 1,
int(lastBlockNumber)]
let
historyNode1 = newHistoryNode(rng, 20302)
historyNode2 = newHistoryNode(rng, 20303)
headers = createEmptyHeaders(0, lastBlockNumber)
headers = createEmptyHeaders(0, int(lastBlockNumber))
masterAccumulator = buildAccumulator(headers)
epochAccumulators = buildAccumulatorData(headers)
@ -115,7 +126,7 @@ procSuite "History Content Network":
(await historyNode1.portalProtocol().ping(historyNode2.localNode())).isOk()
(await historyNode2.portalProtocol().ping(historyNode1.localNode())).isOk()
for i in 0..lastBlockNumber:
for i in headersToTest:
let blockResponse = await historyNode1.historyNetwork.getBlock(u256(i))
check blockResponse.isOk()
@ -132,7 +143,15 @@ procSuite "History Content Network":
await historyNode2.stop()
asyncTest "Offer - Maximum Content Keys in 1 Message":
# Need to provide enough headers to have 1 epoch accumulator "finalized" as
# else no headers with proofs can be generated.
const lastBlockNumber = epochSize
let
headers = createEmptyHeaders(0, lastBlockNumber)
masterAccumulator = buildAccumulator(headers)
epochAccumulators = buildAccumulatorData(headers)
historyNode1 = newHistoryNode(rng, 20302)
historyNode2 = newHistoryNode(rng, 20303)
@ -150,57 +169,65 @@ procSuite "History Content Network":
let maxOfferedHistoryContent = getMaxOfferedContentKeys(
uint32(len(historyProtocolId)), maxContentKeySize)
# one header too many to fit an offer message, talkReq with this amount of
# headers must fail
let headers = createEmptyHeaders(0, maxOfferedHistoryContent)
let masterAccumulator = buildAccumulator(headers)
await historyNode1.historyNetwork.initMasterAccumulator(some(masterAccumulator))
await historyNode2.historyNetwork.initMasterAccumulator(some(masterAccumulator))
let contentInfos = headersToContentInfo(headers)
# One of the nodes needs to have the epochAccumulator to build proofs from
# for the offered headers.
for (contentKey, epochAccumulator) in epochAccumulators:
let contentId = toContentId(contentKey)
historyNode2.portalProtocol().storeContent(
contentId, SSZ.encode(epochAccumulator))
# node 1 will offer content so it need to have it in its database
for ci in contentInfos:
let id = toContentId(ci.contentKey)
historyNode1.portalProtocol.storeContent(id, ci.content)
# This is one header more than maxOfferedHistoryContent
let contentInfos = headersToContentInfo(headers[0..maxOfferedHistoryContent])
let offerResultTooMany = await historyNode1.portalProtocol.offer(
historyNode2.localNode(),
contentInfos
)
# node 1 will offer the content so it needs to have it in its database
for contentInfo in contentInfos:
let id = toContentId(contentInfo.contentKey)
historyNode1.portalProtocol.storeContent(id, contentInfo.content)
# failing due timeout, as remote side must drop too large discv5 packets
check offerResultTooMany.isErr()
# Offering 1 content item too much which should result in a discv5 packet
# that is too large and thus not get any response.
block:
let offerResult = await historyNode1.portalProtocol.offer(
historyNode2.localNode(),
contentInfos
)
for ci in contentInfos:
let id = toContentId(ci.contentKey)
check historyNode2.containsId(id) == false
# Fail due timeout, as remote side must drop the too large discv5 packet
check offerResult.isErr()
# one content key less should make offer go through
let correctInfos = contentInfos[0..<len(contentInfos)-1]
let offerResultCorrect = await historyNode1.portalProtocol.offer(
historyNode2.localNode(),
correctInfos
)
check offerResultCorrect.isOk()
for i, ci in contentInfos:
let id = toContentId(ci.contentKey)
if i < len(contentInfos) - 1:
check historyNode2.containsId(id) == true
else:
for contentInfo in contentInfos:
let id = toContentId(contentInfo.contentKey)
check historyNode2.containsId(id) == false
# One content key less should make offer be succesful and should result
# in the content being transferred and stored on the other node.
block:
let offerResult = await historyNode1.portalProtocol.offer(
historyNode2.localNode(),
contentInfos[0..<maxOfferedHistoryContent]
)
check offerResult.isOk()
for i, contentInfo in contentInfos:
let id = toContentId(contentInfo.contentKey)
if i < len(contentInfos) - 1:
check historyNode2.containsId(id) == true
else:
check historyNode2.containsId(id) == false
await historyNode1.stop()
await historyNode2.stop()
asyncTest "Offer - Headers with Historical Epochs":
asyncTest "Offer - Headers with Historical Epochs - Stopped at Epoch":
const
lastBlockNumber = 9000
headersToTest = [0, epochSize - 1, lastBlockNumber]
# Needs one extra header currently due to the way that updateAccumulator
# works
lastBlockNumber = epochSize
headersToTest = [0, 1, lastBlockNumber div 2, lastBlockNumber - 1]
let
headers = createEmptyHeaders(0, lastBlockNumber)
@ -251,14 +278,21 @@ procSuite "History Content Network":
await historyNode1.stop()
await historyNode2.stop()
asyncTest "Offer - Headers with No Historical Epochs":
asyncTest "Offer - Headers with No Historical Epochs - Stopped at Merge Block":
const
lastBlockNumber = epochSize - 1
headersToTest = [0, lastBlockNumber]
lastBlockNumber = int(mergeBlockNumber - 1)
headersToTest = [
0,
1,
epochSize div 2,
epochSize - 1,
lastBlockNumber - 1,
lastBlockNumber]
let
headers = createEmptyHeaders(0, lastBlockNumber)
masterAccumulator = buildAccumulator(headers)
epochAccumulators = buildAccumulatorData(headers)
historyNode1 = newHistoryNode(rng, 20302)
historyNode2 = newHistoryNode(rng, 20303)
@ -270,6 +304,13 @@ procSuite "History Content Network":
(await historyNode1.portalProtocol().ping(historyNode2.localNode())).isOk()
(await historyNode2.portalProtocol().ping(historyNode1.localNode())).isOk()
# Need to store the epochAccumulators, because else the headers can't be
# verified if being part of the canonical chain currently
for (contentKey, epochAccumulator) in epochAccumulators:
let contentId = toContentId(contentKey)
historyNode1.portalProtocol.storeContent(
contentId, SSZ.encode(epochAccumulator))
# Need to run start to get the processContentLoop running
historyNode1.start()
historyNode2.start()

View File

@ -89,7 +89,10 @@ task test_portal_testnet, "Build test_portal_testnet":
task testfluffy, "Run fluffy tests":
# Need the nimbus_db_backend in state network tests as we need a Hexary to
# start from, even though it only uses the MemoryDb.
test "fluffy/tests", "all_fluffy_tests", "-d:chronicles_log_level=ERROR -d:chronosStrictException -d:nimbus_db_backend=sqlite -d:PREFER_BLST_SHA256=false -d:canonicalVerify=true"
test "fluffy/tests/portal_spec_tests/mainnet", "all_fluffy_portal_spec_tests", "-d:chronicles_log_level=ERROR -d:chronosStrictException -d:nimbus_db_backend=sqlite -d:PREFER_BLST_SHA256=false -d:canonicalVerify=true"
# Running tests with a low `mergeBlockNumber` to make the tests faster.
# Using the real mainnet merge block number is not realistic for these tests.
test "fluffy/tests", "all_fluffy_tests", "-d:chronicles_log_level=ERROR -d:chronosStrictException -d:nimbus_db_backend=sqlite -d:PREFER_BLST_SHA256=false -d:canonicalVerify=true -d:mergeBlockNumber:38130"
task testlcproxy, "Run light proxy tests":
test "lc_proxy/tests", "test_proof_validation", "-d:chronicles_log_level=ERROR -d:chronosStrictException -d:nimbus_db_backend=sqlite -d:PREFER_BLST_SHA256=false"