Fix bug in inCurrentEpoch and improve accumulator related tests (#1217)

* Fix bug in inCurrentEpoch and improve accumulator related tests

- Fix negative wraparound / underflow in inCurrentEpoch
- Add tests in accumulator tests to verify the above
- Add header offer tests with accumulator that does and doesn't
contain historical epochs
- Additional clean-up of history tests
- enable canonicalVerify in the tests
This commit is contained in:
Kim De Mey 2022-09-09 13:12:09 +02:00 committed by GitHub
parent 9d10f8fbae
commit 621c6a31a7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 232 additions and 65 deletions

View File

@ -109,7 +109,15 @@ func buildAccumulatorData*(headers: seq[BlockHeader]):
## against the Accumulator and the header proofs.
func inCurrentEpoch*(blockNumber: uint64, a: Accumulator): bool =
blockNumber > uint64(a.historicalEpochs.len() * epochSize) - 1
# Note:
# Block numbers start at 0, so historical epochs are set as:
# 0 -> 8191 -> len = 1 * 8192
# 8192 -> 16383 -> len = 2 * 8192
# ...
# A block number is in the current epoch if it is bigger than the last block
# number in the last historical epoch. Which is the same as being equal or
# bigger than current length of historical epochs * epochSize.
blockNumber >= uint64(a.historicalEpochs.len() * epochSize)
func inCurrentEpoch*(header: BlockHeader, a: Accumulator): bool =
let blockNumber = header.blockNumber.truncate(uint64)

View File

@ -63,7 +63,7 @@ suite "Header Accumulator":
check accumulator.hash_tree_root().data.toHex() == hashTreeRoots[i]
test "Header Accumulator Proofs":
test "Header Accumulator Canonical Verification":
const
# Amount of headers to be created and added to the accumulator
amount = 25000
@ -117,7 +117,54 @@ suite "Header Accumulator":
check verifyHeader(accumulator, header, none(seq[Digest])).isErr()
test "Header Accumulator header hash for blocknumber":
test "Header Accumulator Canonical Verification - No Historical Epochs":
const
# Amount of headers to be created and added to the accumulator
amount = epochSize
# Headers to test verification for
headersToTest = [
0,
epochSize - 1]
var headers: seq[BlockHeader]
for i in 0..<amount:
# Note: These test headers will not be a blockchain, as the parent hashes
# are not properly filled in. That's fine however for this test, as that
# is not the way the headers are verified with the accumulator.
headers.add(BlockHeader(
blockNumber: i.stuint(256), difficulty: 1.stuint(256)))
let
accumulator = buildAccumulator(headers)
epochAccumulators = buildAccumulatorData(headers)
block: # Test valid headers
for i in headersToTest:
let header = headers[i]
let proofOpt =
if header.inCurrentEpoch(accumulator):
none(seq[Digest])
else:
let proof = buildProof(accumulator, epochAccumulators, header)
check proof.isOk()
some(proof.get())
check verifyHeader(accumulator, header, proofOpt).isOk()
block: # Test some invalid headers
# Test a header with block number > than latest in accumulator
let header = BlockHeader(blockNumber: (amount).stuint(256))
check verifyHeader(accumulator, header, none(seq[Digest])).isErr()
# Test different block headers by altering the difficulty
for i in headersToTest:
let header = BlockHeader(
blockNumber: i.stuint(256), difficulty: 2.stuint(256))
check verifyHeader(accumulator, header, none(seq[Digest])).isErr()
test "Header Accumulator Blocknumber to Header Hash":
var acc = Accumulator.init()
let

View File

@ -8,7 +8,6 @@
{.used.}
import
std/algorithm,
unittest2, stint,
eth/keys,
../network/state/state_content,
@ -81,10 +80,10 @@ suite "Content Database":
db.del(u256(2))
db.del(u256(1))
let realSize1 = db.realSize()
let size5 = db.size()
check:
size4 == size5
# real size will be smaller as after del, there are free pages in sqlite

View File

@ -30,10 +30,10 @@ proc newHistoryNode(rng: ref HmacDrbgContext, port: int): HistoryNode =
return HistoryNode(discoveryProtocol: node, historyNetwork: hn)
proc portalWireProtocol(hn: HistoryNode): PortalProtocol =
proc portalProtocol(hn: HistoryNode): PortalProtocol =
hn.historyNetwork.portalProtocol
proc localNodeInfo(hn: HistoryNode): Node =
proc localNode(hn: HistoryNode): Node =
hn.discoveryProtocol.localNode
proc start(hn: HistoryNode) =
@ -61,87 +61,97 @@ proc createEmptyHeaders(fromNum: int, toNum: int): seq[BlockHeader] =
proc headersToContentInfo(headers: seq[BlockHeader]): seq[ContentInfo] =
var contentInfos: seq[ContentInfo]
for h in headers:
let headerHash = h.blockHash()
let bk = BlockKey(chainId: 1'u16, blockHash: headerHash)
let ck = encode(ContentKey(contentType: blockHeader, blockHeaderKey: bk))
let headerEncoded = rlp.encode(h)
let ci = ContentInfo(contentKey: ck, content: headerEncoded)
let
headerHash = h.blockHash()
bk = BlockKey(chainId: 1'u16, blockHash: headerHash)
ck = encode(ContentKey(contentType: blockHeader, blockHeaderKey: bk))
headerEncoded = rlp.encode(h)
ci = ContentInfo(contentKey: ck, content: headerEncoded)
contentInfos.add(ci)
return contentInfos
procSuite "History Content Network":
let rng = newRng()
asyncTest "Get block by block number":
asyncTest "Get Block by Number":
# enough headers for one historical epoch in the master accumulator
const lastBlockNumber = 9000
let
historyNode1 = newHistoryNode(rng, 20302)
historyNode2 = newHistoryNode(rng, 20303)
# enough headers so there will be at least two epochs
let numHeaders = 9000
var headers: seq[BlockHeader] = createEmptyHeaders(0, numHeaders)
headers = createEmptyHeaders(0, lastBlockNumber)
masterAccumulator = buildAccumulator(headers)
epochAccumulators = buildAccumulatorData(headers)
let masterAccumulator = buildAccumulator(headers)
let epochAccumulators = buildAccumulatorData(headers)
# both nodes start with the same master accumulator, but only node2 have all
# headers and all epoch accumulators
# Note:
# Both nodes start with the same master accumulator, but only node 2 has all
# headers and all epoch accumulators.
# node 2 requires the master accumulator to do the block number to block
# hash mapping.
await historyNode1.historyNetwork.initMasterAccumulator(some(masterAccumulator))
await historyNode2.historyNetwork.initMasterAccumulator(some(masterAccumulator))
for h in headers:
let headerHash = h.blockHash()
let bk = BlockKey(chainId: 1'u16, blockHash: headerHash)
let ck = ContentKey(contentType: blockHeader, blockHeaderKey: bk)
let ci = toContentId(ck)
let headerEncoded = rlp.encode(h)
historyNode2.portalWireProtocol().storeContent(ci, headerEncoded)
let
headerHash = h.blockHash()
blockKey = BlockKey(chainId: 1'u16, blockHash: headerHash)
contentKey = ContentKey(
contentType: blockHeader, blockHeaderKey: blockKey)
contentId = toContentId(contentKey)
headerEncoded = rlp.encode(h)
historyNode2.portalProtocol().storeContent(contentId, headerEncoded)
for ad in epochAccumulators:
let (ck, epochAccumulator) = ad
let id = toContentId(ck)
let bytes = SSZ.encode(epochAccumulator)
historyNode2.portalWireProtocol().storeContent(id, bytes)
for (contentKey, epochAccumulator) in epochAccumulators:
let contentId = toContentId(contentKey)
historyNode2.portalProtocol().storeContent(
contentId, SSZ.encode(epochAccumulator))
check historyNode1.portalWireProtocol().addNode(historyNode2.localNodeInfo()) == Added
check historyNode2.portalWireProtocol().addNode(historyNode1.localNodeInfo()) == Added
check:
historyNode1.portalProtocol().addNode(historyNode2.localNode()) == Added
historyNode2.portalProtocol().addNode(historyNode1.localNode()) == Added
check (await historyNode1.portalWireProtocol().ping(historyNode2.localNodeInfo())).isOk()
check (await historyNode2.portalWireProtocol().ping(historyNode1.localNodeInfo())).isOk()
(await historyNode1.portalProtocol().ping(historyNode2.localNode())).isOk()
(await historyNode2.portalProtocol().ping(historyNode1.localNode())).isOk()
for i in 0..numHeaders:
for i in 0..lastBlockNumber:
let blockResponse = await historyNode1.historyNetwork.getBlock(1'u16, u256(i))
check:
blockResponse.isOk()
check blockResponse.isOk()
let blockOpt = blockResponse.get()
check:
blockOpt.isSome()
check blockOpt.isSome()
let (blockHeader, blockBody) = blockOpt.unsafeGet()
check:
blockHeader == headers[i]
check blockHeader == headers[i]
await historyNode1.stop()
await historyNode2.stop()
asyncTest "Offer maximum amout of content in one offer message":
asyncTest "Offer - Maximum Content Keys in 1 Message":
let
historyNode1 = newHistoryNode(rng, 20302)
historyNode2 = newHistoryNode(rng, 20303)
check historyNode1.portalWireProtocol().addNode(historyNode2.localNodeInfo()) == Added
check historyNode2.portalWireProtocol().addNode(historyNode1.localNodeInfo()) == Added
check:
historyNode1.portalProtocol().addNode(historyNode2.localNode()) == Added
historyNode2.portalProtocol().addNode(historyNode1.localNode()) == Added
check (await historyNode1.portalWireProtocol().ping(historyNode2.localNodeInfo())).isOk()
check (await historyNode2.portalWireProtocol().ping(historyNode1.localNodeInfo())).isOk()
(await historyNode1.portalProtocol().ping(historyNode2.localNode())).isOk()
(await historyNode2.portalProtocol().ping(historyNode1.localNode())).isOk()
# Need to run start to get the processContentLoop running
historyNode1.start()
historyNode2.start()
let maxOfferedHistoryContent = getMaxOfferedContentKeys(
uint32(len(historyProtocolId)), maxContentKeySize)
# one header too many to fit offer message, talkReq with this amout of header will fail
# one header too many to fit an offer message, talkReq with this amount of
# headers must fail
let headers = createEmptyHeaders(0, maxOfferedHistoryContent)
let masterAccumulator = buildAccumulator(headers)
@ -153,33 +163,136 @@ procSuite "History Content Network":
# node 1 will offer content so it need to have it in its database
for ci in contentInfos:
let id = toContentId(ci.contentKey)
historyNode1.portalWireProtocol.storeContent(id, ci.content)
historyNode1.portalProtocol.storeContent(id, ci.content)
let offerResultTooMany = await historyNode1.portalWireProtocol.offer(
historyNode2.localNodeInfo(),
let offerResultTooMany = await historyNode1.portalProtocol.offer(
historyNode2.localNode(),
contentInfos
)
check:
# failing due timeout, as remote side won't respond to large discv5 packets
offerResultTooMany.isErr()
# failing due timeout, as remote side must drop too large discv5 packets
check offerResultTooMany.isErr()
for ci in contentInfos:
let id = toContentId(ci.contentKey)
check:
historyNode2.containsId(id) == false
check historyNode2.containsId(id) == false
# one contentkey less should make offer go through
# one content key less should make offer go through
let correctInfos = contentInfos[0..<len(contentInfos)-1]
let offerResultCorrect = await historyNode1.portalWireProtocol.offer(
historyNode2.localNodeInfo(),
let offerResultCorrect = await historyNode1.portalProtocol.offer(
historyNode2.localNode(),
correctInfos
)
check:
offerResultCorrect.isOk()
check offerResultCorrect.isOk()
for i, ci in contentInfos:
let id = toContentId(ci.contentKey)
if i < len(contentInfos) - 1:
check historyNode2.containsId(id) == true
else:
check historyNode2.containsId(id) == false
await historyNode1.stop()
await historyNode2.stop()
asyncTest "Offer - Headers with Historical Epochs":
const
lastBlockNumber = 9000
headersToTest = [0, epochSize - 1, lastBlockNumber]
let
headers = createEmptyHeaders(0, lastBlockNumber)
masterAccumulator = buildAccumulator(headers)
epochAccumulators = buildAccumulatorData(headers)
historyNode1 = newHistoryNode(rng, 20302)
historyNode2 = newHistoryNode(rng, 20303)
check:
historyNode1.portalProtocol().addNode(historyNode2.localNode()) == Added
historyNode2.portalProtocol().addNode(historyNode1.localNode()) == Added
(await historyNode1.portalProtocol().ping(historyNode2.localNode())).isOk()
(await historyNode2.portalProtocol().ping(historyNode1.localNode())).isOk()
# Need to store the epochAccumulators, because else the headers can't be
# verified if being part of the canonical chain currently
for (contentKey, epochAccumulator) in epochAccumulators:
let contentId = toContentId(contentKey)
historyNode1.portalProtocol.storeContent(
contentId, SSZ.encode(epochAccumulator))
# Need to run start to get the processContentLoop running
historyNode1.start()
historyNode2.start()
await historyNode1.historyNetwork.initMasterAccumulator(some(masterAccumulator))
await historyNode2.historyNetwork.initMasterAccumulator(some(masterAccumulator))
let contentInfos = headersToContentInfo(headers)
for header in headersToTest:
let id = toContentId(contentInfos[header].contentKey)
historyNode1.portalProtocol.storeContent(id, contentInfos[header].content)
let offerResult = await historyNode1.portalProtocol.offer(
historyNode2.localNode(),
contentInfos[header..header]
)
check offerResult.isOk()
for header in headersToTest:
let id = toContentId(contentInfos[header].contentKey)
check historyNode2.containsId(id) == true
await historyNode1.stop()
await historyNode2.stop()
asyncTest "Offer - Headers with No Historical Epochs":
const
lastBlockNumber = epochSize - 1
headersToTest = [0, lastBlockNumber]
let
headers = createEmptyHeaders(0, lastBlockNumber)
masterAccumulator = buildAccumulator(headers)
historyNode1 = newHistoryNode(rng, 20302)
historyNode2 = newHistoryNode(rng, 20303)
check:
historyNode1.portalProtocol().addNode(historyNode2.localNode()) == Added
historyNode2.portalProtocol().addNode(historyNode1.localNode()) == Added
(await historyNode1.portalProtocol().ping(historyNode2.localNode())).isOk()
(await historyNode2.portalProtocol().ping(historyNode1.localNode())).isOk()
# Need to run start to get the processContentLoop running
historyNode1.start()
historyNode2.start()
await historyNode1.historyNetwork.initMasterAccumulator(some(masterAccumulator))
await historyNode2.historyNetwork.initMasterAccumulator(some(masterAccumulator))
let contentInfos = headersToContentInfo(headers)
for header in headersToTest:
let id = toContentId(contentInfos[header].contentKey)
historyNode1.portalProtocol.storeContent(id, contentInfos[header].content)
let offerResult = await historyNode1.portalProtocol.offer(
historyNode2.localNode(),
contentInfos[header..header]
)
check offerResult.isOk()
for header in headersToTest:
let id = toContentId(contentInfos[header].contentKey)
check historyNode2.containsId(id) == true
await historyNode1.stop()
await historyNode2.stop()

View File

@ -89,7 +89,7 @@ task test_portal_testnet, "Build test_portal_testnet":
task testfluffy, "Run fluffy tests":
# Need the nimbus_db_backend in state network tests as we need a Hexary to
# start from, even though it only uses the MemoryDb.
test "fluffy/tests", "all_fluffy_tests", "-d:chronicles_log_level=ERROR -d:chronosStrictException -d:nimbus_db_backend=sqlite -d:PREFER_BLST_SHA256=false"
test "fluffy/tests", "all_fluffy_tests", "-d:chronicles_log_level=ERROR -d:chronosStrictException -d:nimbus_db_backend=sqlite -d:PREFER_BLST_SHA256=false -d:canonicalVerify=true"
task testlcproxy, "Run light proxy tests":
test "lc_proxy/tests", "test_proof_validation", "-d:chronicles_log_level=ERROR -d:chronosStrictException -d:nimbus_db_backend=sqlite -d:PREFER_BLST_SHA256=false"