Fix bug in inCurrentEpoch and improve accumulator related tests (#1217)

* Fix bug in inCurrentEpoch and improve accumulator related tests

- Fix negative wraparound / underflow in inCurrentEpoch
- Add tests in accumulator tests to verify the above
- Add header offer tests with accumulator that does and doesn't
contain historical epochs
- Additional clean-up of history tests
- enable canonicalVerify in the tests
This commit is contained in:
Kim De Mey 2022-09-09 13:12:09 +02:00 committed by GitHub
parent 9d10f8fbae
commit 621c6a31a7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 232 additions and 65 deletions

View File

@ -109,7 +109,15 @@ func buildAccumulatorData*(headers: seq[BlockHeader]):
## against the Accumulator and the header proofs. ## against the Accumulator and the header proofs.
func inCurrentEpoch*(blockNumber: uint64, a: Accumulator): bool = func inCurrentEpoch*(blockNumber: uint64, a: Accumulator): bool =
blockNumber > uint64(a.historicalEpochs.len() * epochSize) - 1 # Note:
# Block numbers start at 0, so historical epochs are set as:
# 0 -> 8191 -> len = 1 * 8192
# 8192 -> 16383 -> len = 2 * 8192
# ...
# A block number is in the current epoch if it is bigger than the last block
# number in the last historical epoch. Which is the same as being equal or
# bigger than current length of historical epochs * epochSize.
blockNumber >= uint64(a.historicalEpochs.len() * epochSize)
func inCurrentEpoch*(header: BlockHeader, a: Accumulator): bool = func inCurrentEpoch*(header: BlockHeader, a: Accumulator): bool =
let blockNumber = header.blockNumber.truncate(uint64) let blockNumber = header.blockNumber.truncate(uint64)

View File

@ -63,7 +63,7 @@ suite "Header Accumulator":
check accumulator.hash_tree_root().data.toHex() == hashTreeRoots[i] check accumulator.hash_tree_root().data.toHex() == hashTreeRoots[i]
test "Header Accumulator Proofs": test "Header Accumulator Canonical Verification":
const const
# Amount of headers to be created and added to the accumulator # Amount of headers to be created and added to the accumulator
amount = 25000 amount = 25000
@ -117,7 +117,54 @@ suite "Header Accumulator":
check verifyHeader(accumulator, header, none(seq[Digest])).isErr() check verifyHeader(accumulator, header, none(seq[Digest])).isErr()
test "Header Accumulator header hash for blocknumber": test "Header Accumulator Canonical Verification - No Historical Epochs":
const
# Amount of headers to be created and added to the accumulator
amount = epochSize
# Headers to test verification for
headersToTest = [
0,
epochSize - 1]
var headers: seq[BlockHeader]
for i in 0..<amount:
# Note: These test headers will not be a blockchain, as the parent hashes
# are not properly filled in. That's fine however for this test, as that
# is not the way the headers are verified with the accumulator.
headers.add(BlockHeader(
blockNumber: i.stuint(256), difficulty: 1.stuint(256)))
let
accumulator = buildAccumulator(headers)
epochAccumulators = buildAccumulatorData(headers)
block: # Test valid headers
for i in headersToTest:
let header = headers[i]
let proofOpt =
if header.inCurrentEpoch(accumulator):
none(seq[Digest])
else:
let proof = buildProof(accumulator, epochAccumulators, header)
check proof.isOk()
some(proof.get())
check verifyHeader(accumulator, header, proofOpt).isOk()
block: # Test some invalid headers
# Test a header with block number > than latest in accumulator
let header = BlockHeader(blockNumber: (amount).stuint(256))
check verifyHeader(accumulator, header, none(seq[Digest])).isErr()
# Test different block headers by altering the difficulty
for i in headersToTest:
let header = BlockHeader(
blockNumber: i.stuint(256), difficulty: 2.stuint(256))
check verifyHeader(accumulator, header, none(seq[Digest])).isErr()
test "Header Accumulator Blocknumber to Header Hash":
var acc = Accumulator.init() var acc = Accumulator.init()
let let

View File

@ -8,7 +8,6 @@
{.used.} {.used.}
import import
std/algorithm,
unittest2, stint, unittest2, stint,
eth/keys, eth/keys,
../network/state/state_content, ../network/state/state_content,

View File

@ -30,10 +30,10 @@ proc newHistoryNode(rng: ref HmacDrbgContext, port: int): HistoryNode =
return HistoryNode(discoveryProtocol: node, historyNetwork: hn) return HistoryNode(discoveryProtocol: node, historyNetwork: hn)
proc portalWireProtocol(hn: HistoryNode): PortalProtocol = proc portalProtocol(hn: HistoryNode): PortalProtocol =
hn.historyNetwork.portalProtocol hn.historyNetwork.portalProtocol
proc localNodeInfo(hn: HistoryNode): Node = proc localNode(hn: HistoryNode): Node =
hn.discoveryProtocol.localNode hn.discoveryProtocol.localNode
proc start(hn: HistoryNode) = proc start(hn: HistoryNode) =
@ -61,87 +61,97 @@ proc createEmptyHeaders(fromNum: int, toNum: int): seq[BlockHeader] =
proc headersToContentInfo(headers: seq[BlockHeader]): seq[ContentInfo] = proc headersToContentInfo(headers: seq[BlockHeader]): seq[ContentInfo] =
var contentInfos: seq[ContentInfo] var contentInfos: seq[ContentInfo]
for h in headers: for h in headers:
let headerHash = h.blockHash() let
let bk = BlockKey(chainId: 1'u16, blockHash: headerHash) headerHash = h.blockHash()
let ck = encode(ContentKey(contentType: blockHeader, blockHeaderKey: bk)) bk = BlockKey(chainId: 1'u16, blockHash: headerHash)
let headerEncoded = rlp.encode(h) ck = encode(ContentKey(contentType: blockHeader, blockHeaderKey: bk))
let ci = ContentInfo(contentKey: ck, content: headerEncoded) headerEncoded = rlp.encode(h)
ci = ContentInfo(contentKey: ck, content: headerEncoded)
contentInfos.add(ci) contentInfos.add(ci)
return contentInfos return contentInfos
procSuite "History Content Network": procSuite "History Content Network":
let rng = newRng() let rng = newRng()
asyncTest "Get block by block number":
asyncTest "Get Block by Number":
# enough headers for one historical epoch in the master accumulator
const lastBlockNumber = 9000
let let
historyNode1 = newHistoryNode(rng, 20302) historyNode1 = newHistoryNode(rng, 20302)
historyNode2 = newHistoryNode(rng, 20303) historyNode2 = newHistoryNode(rng, 20303)
# enough headers so there will be at least two epochs headers = createEmptyHeaders(0, lastBlockNumber)
let numHeaders = 9000 masterAccumulator = buildAccumulator(headers)
var headers: seq[BlockHeader] = createEmptyHeaders(0, numHeaders) epochAccumulators = buildAccumulatorData(headers)
let masterAccumulator = buildAccumulator(headers) # Note:
let epochAccumulators = buildAccumulatorData(headers) # Both nodes start with the same master accumulator, but only node 2 has all
# headers and all epoch accumulators.
# both nodes start with the same master accumulator, but only node2 have all # node 2 requires the master accumulator to do the block number to block
# headers and all epoch accumulators # hash mapping.
await historyNode1.historyNetwork.initMasterAccumulator(some(masterAccumulator)) await historyNode1.historyNetwork.initMasterAccumulator(some(masterAccumulator))
await historyNode2.historyNetwork.initMasterAccumulator(some(masterAccumulator)) await historyNode2.historyNetwork.initMasterAccumulator(some(masterAccumulator))
for h in headers: for h in headers:
let headerHash = h.blockHash() let
let bk = BlockKey(chainId: 1'u16, blockHash: headerHash) headerHash = h.blockHash()
let ck = ContentKey(contentType: blockHeader, blockHeaderKey: bk) blockKey = BlockKey(chainId: 1'u16, blockHash: headerHash)
let ci = toContentId(ck) contentKey = ContentKey(
let headerEncoded = rlp.encode(h) contentType: blockHeader, blockHeaderKey: blockKey)
historyNode2.portalWireProtocol().storeContent(ci, headerEncoded) contentId = toContentId(contentKey)
headerEncoded = rlp.encode(h)
historyNode2.portalProtocol().storeContent(contentId, headerEncoded)
for ad in epochAccumulators: for (contentKey, epochAccumulator) in epochAccumulators:
let (ck, epochAccumulator) = ad let contentId = toContentId(contentKey)
let id = toContentId(ck) historyNode2.portalProtocol().storeContent(
let bytes = SSZ.encode(epochAccumulator) contentId, SSZ.encode(epochAccumulator))
historyNode2.portalWireProtocol().storeContent(id, bytes)
check historyNode1.portalWireProtocol().addNode(historyNode2.localNodeInfo()) == Added check:
check historyNode2.portalWireProtocol().addNode(historyNode1.localNodeInfo()) == Added historyNode1.portalProtocol().addNode(historyNode2.localNode()) == Added
historyNode2.portalProtocol().addNode(historyNode1.localNode()) == Added
check (await historyNode1.portalWireProtocol().ping(historyNode2.localNodeInfo())).isOk() (await historyNode1.portalProtocol().ping(historyNode2.localNode())).isOk()
check (await historyNode2.portalWireProtocol().ping(historyNode1.localNodeInfo())).isOk() (await historyNode2.portalProtocol().ping(historyNode1.localNode())).isOk()
for i in 0..numHeaders: for i in 0..lastBlockNumber:
let blockResponse = await historyNode1.historyNetwork.getBlock(1'u16, u256(i)) let blockResponse = await historyNode1.historyNetwork.getBlock(1'u16, u256(i))
check: check blockResponse.isOk()
blockResponse.isOk()
let blockOpt = blockResponse.get() let blockOpt = blockResponse.get()
check: check blockOpt.isSome()
blockOpt.isSome()
let (blockHeader, blockBody) = blockOpt.unsafeGet() let (blockHeader, blockBody) = blockOpt.unsafeGet()
check: check blockHeader == headers[i]
blockHeader == headers[i]
await historyNode1.stop() await historyNode1.stop()
await historyNode2.stop() await historyNode2.stop()
asyncTest "Offer maximum amout of content in one offer message": asyncTest "Offer - Maximum Content Keys in 1 Message":
let let
historyNode1 = newHistoryNode(rng, 20302) historyNode1 = newHistoryNode(rng, 20302)
historyNode2 = newHistoryNode(rng, 20303) historyNode2 = newHistoryNode(rng, 20303)
check historyNode1.portalWireProtocol().addNode(historyNode2.localNodeInfo()) == Added check:
check historyNode2.portalWireProtocol().addNode(historyNode1.localNodeInfo()) == Added historyNode1.portalProtocol().addNode(historyNode2.localNode()) == Added
historyNode2.portalProtocol().addNode(historyNode1.localNode()) == Added
check (await historyNode1.portalWireProtocol().ping(historyNode2.localNodeInfo())).isOk() (await historyNode1.portalProtocol().ping(historyNode2.localNode())).isOk()
check (await historyNode2.portalWireProtocol().ping(historyNode1.localNodeInfo())).isOk() (await historyNode2.portalProtocol().ping(historyNode1.localNode())).isOk()
# Need to run start to get the processContentLoop running
historyNode1.start()
historyNode2.start()
let maxOfferedHistoryContent = getMaxOfferedContentKeys( let maxOfferedHistoryContent = getMaxOfferedContentKeys(
uint32(len(historyProtocolId)), maxContentKeySize) uint32(len(historyProtocolId)), maxContentKeySize)
# one header too many to fit offer message, talkReq with this amout of header will fail # one header too many to fit an offer message, talkReq with this amount of
# headers must fail
let headers = createEmptyHeaders(0, maxOfferedHistoryContent) let headers = createEmptyHeaders(0, maxOfferedHistoryContent)
let masterAccumulator = buildAccumulator(headers) let masterAccumulator = buildAccumulator(headers)
@ -153,33 +163,136 @@ procSuite "History Content Network":
# node 1 will offer content so it need to have it in its database # node 1 will offer content so it need to have it in its database
for ci in contentInfos: for ci in contentInfos:
let id = toContentId(ci.contentKey) let id = toContentId(ci.contentKey)
historyNode1.portalWireProtocol.storeContent(id, ci.content) historyNode1.portalProtocol.storeContent(id, ci.content)
let offerResultTooMany = await historyNode1.portalProtocol.offer(
let offerResultTooMany = await historyNode1.portalWireProtocol.offer( historyNode2.localNode(),
historyNode2.localNodeInfo(),
contentInfos contentInfos
) )
check: # failing due timeout, as remote side must drop too large discv5 packets
# failing due timeout, as remote side won't respond to large discv5 packets check offerResultTooMany.isErr()
offerResultTooMany.isErr()
for ci in contentInfos: for ci in contentInfos:
let id = toContentId(ci.contentKey) let id = toContentId(ci.contentKey)
check: check historyNode2.containsId(id) == false
historyNode2.containsId(id) == false
# one contentkey less should make offer go through # one content key less should make offer go through
let correctInfos = contentInfos[0..<len(contentInfos)-1] let correctInfos = contentInfos[0..<len(contentInfos)-1]
let offerResultCorrect = await historyNode1.portalWireProtocol.offer( let offerResultCorrect = await historyNode1.portalProtocol.offer(
historyNode2.localNodeInfo(), historyNode2.localNode(),
correctInfos correctInfos
) )
check: check offerResultCorrect.isOk()
offerResultCorrect.isOk()
for i, ci in contentInfos:
let id = toContentId(ci.contentKey)
if i < len(contentInfos) - 1:
check historyNode2.containsId(id) == true
else:
check historyNode2.containsId(id) == false
await historyNode1.stop()
await historyNode2.stop()
asyncTest "Offer - Headers with Historical Epochs":
const
lastBlockNumber = 9000
headersToTest = [0, epochSize - 1, lastBlockNumber]
let
headers = createEmptyHeaders(0, lastBlockNumber)
masterAccumulator = buildAccumulator(headers)
epochAccumulators = buildAccumulatorData(headers)
historyNode1 = newHistoryNode(rng, 20302)
historyNode2 = newHistoryNode(rng, 20303)
check:
historyNode1.portalProtocol().addNode(historyNode2.localNode()) == Added
historyNode2.portalProtocol().addNode(historyNode1.localNode()) == Added
(await historyNode1.portalProtocol().ping(historyNode2.localNode())).isOk()
(await historyNode2.portalProtocol().ping(historyNode1.localNode())).isOk()
# Need to store the epochAccumulators, because else the headers can't be
# verified if being part of the canonical chain currently
for (contentKey, epochAccumulator) in epochAccumulators:
let contentId = toContentId(contentKey)
historyNode1.portalProtocol.storeContent(
contentId, SSZ.encode(epochAccumulator))
# Need to run start to get the processContentLoop running
historyNode1.start()
historyNode2.start()
await historyNode1.historyNetwork.initMasterAccumulator(some(masterAccumulator))
await historyNode2.historyNetwork.initMasterAccumulator(some(masterAccumulator))
let contentInfos = headersToContentInfo(headers)
for header in headersToTest:
let id = toContentId(contentInfos[header].contentKey)
historyNode1.portalProtocol.storeContent(id, contentInfos[header].content)
let offerResult = await historyNode1.portalProtocol.offer(
historyNode2.localNode(),
contentInfos[header..header]
)
check offerResult.isOk()
for header in headersToTest:
let id = toContentId(contentInfos[header].contentKey)
check historyNode2.containsId(id) == true
await historyNode1.stop()
await historyNode2.stop()
asyncTest "Offer - Headers with No Historical Epochs":
const
lastBlockNumber = epochSize - 1
headersToTest = [0, lastBlockNumber]
let
headers = createEmptyHeaders(0, lastBlockNumber)
masterAccumulator = buildAccumulator(headers)
historyNode1 = newHistoryNode(rng, 20302)
historyNode2 = newHistoryNode(rng, 20303)
check:
historyNode1.portalProtocol().addNode(historyNode2.localNode()) == Added
historyNode2.portalProtocol().addNode(historyNode1.localNode()) == Added
(await historyNode1.portalProtocol().ping(historyNode2.localNode())).isOk()
(await historyNode2.portalProtocol().ping(historyNode1.localNode())).isOk()
# Need to run start to get the processContentLoop running
historyNode1.start()
historyNode2.start()
await historyNode1.historyNetwork.initMasterAccumulator(some(masterAccumulator))
await historyNode2.historyNetwork.initMasterAccumulator(some(masterAccumulator))
let contentInfos = headersToContentInfo(headers)
for header in headersToTest:
let id = toContentId(contentInfos[header].contentKey)
historyNode1.portalProtocol.storeContent(id, contentInfos[header].content)
let offerResult = await historyNode1.portalProtocol.offer(
historyNode2.localNode(),
contentInfos[header..header]
)
check offerResult.isOk()
for header in headersToTest:
let id = toContentId(contentInfos[header].contentKey)
check historyNode2.containsId(id) == true
await historyNode1.stop() await historyNode1.stop()
await historyNode2.stop() await historyNode2.stop()

View File

@ -89,7 +89,7 @@ task test_portal_testnet, "Build test_portal_testnet":
task testfluffy, "Run fluffy tests": task testfluffy, "Run fluffy tests":
# Need the nimbus_db_backend in state network tests as we need a Hexary to # Need the nimbus_db_backend in state network tests as we need a Hexary to
# start from, even though it only uses the MemoryDb. # start from, even though it only uses the MemoryDb.
test "fluffy/tests", "all_fluffy_tests", "-d:chronicles_log_level=ERROR -d:chronosStrictException -d:nimbus_db_backend=sqlite -d:PREFER_BLST_SHA256=false" test "fluffy/tests", "all_fluffy_tests", "-d:chronicles_log_level=ERROR -d:chronosStrictException -d:nimbus_db_backend=sqlite -d:PREFER_BLST_SHA256=false -d:canonicalVerify=true"
task testlcproxy, "Run light proxy tests": task testlcproxy, "Run light proxy tests":
test "lc_proxy/tests", "test_proof_validation", "-d:chronicles_log_level=ERROR -d:chronosStrictException -d:nimbus_db_backend=sqlite -d:PREFER_BLST_SHA256=false" test "lc_proxy/tests", "test_proof_validation", "-d:chronicles_log_level=ERROR -d:chronosStrictException -d:nimbus_db_backend=sqlite -d:PREFER_BLST_SHA256=false"