2022-08-04 06:34:53 +00:00
|
|
|
# Nimbus - Portal Network
|
|
|
|
# Copyright (c) 2022 Status Research & Development GmbH
|
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
|
|
|
import
|
|
|
|
std/os,
|
|
|
|
testutils/unittests, chronos,
|
|
|
|
eth/p2p/discoveryv5/protocol as discv5_protocol, eth/p2p/discoveryv5/routing_table,
|
2022-09-03 18:15:35 +00:00
|
|
|
eth/common/eth_types_rlp,
|
2022-08-04 06:34:53 +00:00
|
|
|
eth/rlp,
|
|
|
|
../network/wire/[portal_protocol, portal_stream, portal_protocol_config],
|
|
|
|
../network/history/[history_network, accumulator, history_content],
|
|
|
|
../../nimbus/constants,
|
|
|
|
../content_db,
|
|
|
|
./test_helpers
|
|
|
|
|
|
|
|
type HistoryNode = ref object
|
|
|
|
discoveryProtocol*: discv5_protocol.Protocol
|
|
|
|
historyNetwork*: HistoryNetwork
|
|
|
|
|
|
|
|
proc newHistoryNode(rng: ref HmacDrbgContext, port: int): HistoryNode =
|
|
|
|
let
|
|
|
|
node = initDiscoveryNode(rng, PrivateKey.random(rng[]), localAddress(port))
|
|
|
|
db = ContentDB.new("", uint32.high, inMemory = true)
|
2022-08-17 07:32:06 +00:00
|
|
|
streamManager = StreamManager.new(node)
|
|
|
|
hn = HistoryNetwork.new(node, db, streamManager)
|
2022-08-04 06:34:53 +00:00
|
|
|
|
|
|
|
return HistoryNode(discoveryProtocol: node, historyNetwork: hn)
|
|
|
|
|
2022-09-09 11:12:09 +00:00
|
|
|
proc portalProtocol(hn: HistoryNode): PortalProtocol =
|
2022-08-04 06:34:53 +00:00
|
|
|
hn.historyNetwork.portalProtocol
|
|
|
|
|
2022-09-09 11:12:09 +00:00
|
|
|
proc localNode(hn: HistoryNode): Node =
|
2022-08-04 06:34:53 +00:00
|
|
|
hn.discoveryProtocol.localNode
|
|
|
|
|
|
|
|
proc start(hn: HistoryNode) =
|
|
|
|
hn.historyNetwork.start()
|
|
|
|
|
|
|
|
proc stop(hn: HistoryNode) {.async.} =
|
|
|
|
hn.historyNetwork.stop()
|
|
|
|
await hn.discoveryProtocol.closeWait()
|
|
|
|
|
2022-08-09 12:32:41 +00:00
|
|
|
proc containsId(hn: HistoryNode, contentId: ContentId): bool =
|
2022-09-10 19:00:27 +00:00
|
|
|
return hn.historyNetwork.contentDB.get(contentId).isSome()
|
2022-08-09 12:32:41 +00:00
|
|
|
|
|
|
|
proc createEmptyHeaders(fromNum: int, toNum: int): seq[BlockHeader] =
|
|
|
|
var headers: seq[BlockHeader]
|
|
|
|
for i in fromNum..toNum:
|
|
|
|
var bh = BlockHeader()
|
|
|
|
bh.blockNumber = u256(i)
|
|
|
|
bh.difficulty = u256(i)
|
|
|
|
# empty so that we won't care about creating fake block bodies
|
|
|
|
bh.ommersHash = EMPTY_UNCLE_HASH
|
2022-09-03 18:15:35 +00:00
|
|
|
bh.txRoot = EMPTY_ROOT_HASH
|
2022-08-09 12:32:41 +00:00
|
|
|
headers.add(bh)
|
|
|
|
return headers
|
|
|
|
|
|
|
|
proc headersToContentInfo(headers: seq[BlockHeader]): seq[ContentInfo] =
|
|
|
|
var contentInfos: seq[ContentInfo]
|
|
|
|
for h in headers:
|
2022-09-09 11:12:09 +00:00
|
|
|
let
|
|
|
|
headerHash = h.blockHash()
|
2022-09-28 07:09:38 +00:00
|
|
|
bk = BlockKey(blockHash: headerHash)
|
2022-09-09 11:12:09 +00:00
|
|
|
ck = encode(ContentKey(contentType: blockHeader, blockHeaderKey: bk))
|
|
|
|
headerEncoded = rlp.encode(h)
|
|
|
|
ci = ContentInfo(contentKey: ck, content: headerEncoded)
|
2022-08-09 12:32:41 +00:00
|
|
|
contentInfos.add(ci)
|
|
|
|
return contentInfos
|
|
|
|
|
2022-08-04 06:34:53 +00:00
|
|
|
procSuite "History Content Network":
|
|
|
|
let rng = newRng()
|
2022-09-09 11:12:09 +00:00
|
|
|
|
|
|
|
asyncTest "Get Block by Number":
|
2022-10-10 10:59:55 +00:00
|
|
|
const
|
|
|
|
lastBlockNumber = mergeBlockNumber - 1
|
|
|
|
|
|
|
|
headersToTest = [
|
|
|
|
0,
|
|
|
|
epochSize - 1,
|
|
|
|
epochSize,
|
|
|
|
epochSize*2 - 1,
|
|
|
|
epochSize*2,
|
|
|
|
epochSize*3 - 1,
|
|
|
|
epochSize*3,
|
|
|
|
epochSize*3 + 1,
|
|
|
|
int(lastBlockNumber)]
|
2022-09-09 11:12:09 +00:00
|
|
|
|
2022-08-04 06:34:53 +00:00
|
|
|
let
|
|
|
|
historyNode1 = newHistoryNode(rng, 20302)
|
|
|
|
historyNode2 = newHistoryNode(rng, 20303)
|
|
|
|
|
2022-10-10 10:59:55 +00:00
|
|
|
headers = createEmptyHeaders(0, int(lastBlockNumber))
|
2022-09-09 11:12:09 +00:00
|
|
|
masterAccumulator = buildAccumulator(headers)
|
|
|
|
epochAccumulators = buildAccumulatorData(headers)
|
2022-08-04 06:34:53 +00:00
|
|
|
|
2022-09-09 11:12:09 +00:00
|
|
|
# Note:
|
|
|
|
# Both nodes start with the same master accumulator, but only node 2 has all
|
|
|
|
# headers and all epoch accumulators.
|
|
|
|
# node 2 requires the master accumulator to do the block number to block
|
|
|
|
# hash mapping.
|
2022-08-04 06:34:53 +00:00
|
|
|
await historyNode1.historyNetwork.initMasterAccumulator(some(masterAccumulator))
|
|
|
|
await historyNode2.historyNetwork.initMasterAccumulator(some(masterAccumulator))
|
|
|
|
|
|
|
|
for h in headers:
|
2022-09-09 11:12:09 +00:00
|
|
|
let
|
|
|
|
headerHash = h.blockHash()
|
2022-09-28 07:09:38 +00:00
|
|
|
blockKey = BlockKey(blockHash: headerHash)
|
2022-09-09 11:12:09 +00:00
|
|
|
contentKey = ContentKey(
|
|
|
|
contentType: blockHeader, blockHeaderKey: blockKey)
|
|
|
|
contentId = toContentId(contentKey)
|
|
|
|
headerEncoded = rlp.encode(h)
|
|
|
|
historyNode2.portalProtocol().storeContent(contentId, headerEncoded)
|
|
|
|
|
|
|
|
for (contentKey, epochAccumulator) in epochAccumulators:
|
|
|
|
let contentId = toContentId(contentKey)
|
|
|
|
historyNode2.portalProtocol().storeContent(
|
|
|
|
contentId, SSZ.encode(epochAccumulator))
|
|
|
|
|
|
|
|
check:
|
|
|
|
historyNode1.portalProtocol().addNode(historyNode2.localNode()) == Added
|
|
|
|
historyNode2.portalProtocol().addNode(historyNode1.localNode()) == Added
|
|
|
|
|
|
|
|
(await historyNode1.portalProtocol().ping(historyNode2.localNode())).isOk()
|
|
|
|
(await historyNode2.portalProtocol().ping(historyNode1.localNode())).isOk()
|
|
|
|
|
2022-10-10 10:59:55 +00:00
|
|
|
for i in headersToTest:
|
2022-09-28 07:09:38 +00:00
|
|
|
let blockResponse = await historyNode1.historyNetwork.getBlock(u256(i))
|
2022-08-04 06:34:53 +00:00
|
|
|
|
2022-09-09 11:12:09 +00:00
|
|
|
check blockResponse.isOk()
|
2022-08-04 06:34:53 +00:00
|
|
|
|
|
|
|
let blockOpt = blockResponse.get()
|
|
|
|
|
2022-09-09 11:12:09 +00:00
|
|
|
check blockOpt.isSome()
|
2022-08-04 06:34:53 +00:00
|
|
|
|
|
|
|
let (blockHeader, blockBody) = blockOpt.unsafeGet()
|
|
|
|
|
2022-09-09 11:12:09 +00:00
|
|
|
check blockHeader == headers[i]
|
2022-08-04 06:34:53 +00:00
|
|
|
|
|
|
|
await historyNode1.stop()
|
|
|
|
await historyNode2.stop()
|
2022-08-09 12:32:41 +00:00
|
|
|
|
2022-09-09 11:12:09 +00:00
|
|
|
asyncTest "Offer - Maximum Content Keys in 1 Message":
|
2022-10-10 10:59:55 +00:00
|
|
|
# Need to provide enough headers to have 1 epoch accumulator "finalized" as
|
|
|
|
# else no headers with proofs can be generated.
|
|
|
|
const lastBlockNumber = epochSize
|
|
|
|
|
2022-08-09 12:32:41 +00:00
|
|
|
let
|
2022-10-10 10:59:55 +00:00
|
|
|
headers = createEmptyHeaders(0, lastBlockNumber)
|
|
|
|
masterAccumulator = buildAccumulator(headers)
|
|
|
|
epochAccumulators = buildAccumulatorData(headers)
|
|
|
|
|
2022-08-09 12:32:41 +00:00
|
|
|
historyNode1 = newHistoryNode(rng, 20302)
|
|
|
|
historyNode2 = newHistoryNode(rng, 20303)
|
|
|
|
|
2022-09-09 11:12:09 +00:00
|
|
|
check:
|
|
|
|
historyNode1.portalProtocol().addNode(historyNode2.localNode()) == Added
|
|
|
|
historyNode2.portalProtocol().addNode(historyNode1.localNode()) == Added
|
|
|
|
|
|
|
|
(await historyNode1.portalProtocol().ping(historyNode2.localNode())).isOk()
|
|
|
|
(await historyNode2.portalProtocol().ping(historyNode1.localNode())).isOk()
|
2022-08-09 12:32:41 +00:00
|
|
|
|
2022-09-09 11:12:09 +00:00
|
|
|
# Need to run start to get the processContentLoop running
|
|
|
|
historyNode1.start()
|
|
|
|
historyNode2.start()
|
2022-08-09 12:32:41 +00:00
|
|
|
|
|
|
|
let maxOfferedHistoryContent = getMaxOfferedContentKeys(
|
|
|
|
uint32(len(historyProtocolId)), maxContentKeySize)
|
|
|
|
|
|
|
|
await historyNode1.historyNetwork.initMasterAccumulator(some(masterAccumulator))
|
|
|
|
await historyNode2.historyNetwork.initMasterAccumulator(some(masterAccumulator))
|
|
|
|
|
2022-10-10 10:59:55 +00:00
|
|
|
# One of the nodes needs to have the epochAccumulator to build proofs from
|
|
|
|
# for the offered headers.
|
|
|
|
for (contentKey, epochAccumulator) in epochAccumulators:
|
|
|
|
let contentId = toContentId(contentKey)
|
|
|
|
historyNode2.portalProtocol().storeContent(
|
|
|
|
contentId, SSZ.encode(epochAccumulator))
|
2022-08-09 12:32:41 +00:00
|
|
|
|
2022-10-10 10:59:55 +00:00
|
|
|
# This is one header more than maxOfferedHistoryContent
|
|
|
|
let contentInfos = headersToContentInfo(headers[0..maxOfferedHistoryContent])
|
2022-08-09 12:32:41 +00:00
|
|
|
|
2022-10-10 10:59:55 +00:00
|
|
|
# node 1 will offer the content so it needs to have it in its database
|
|
|
|
for contentInfo in contentInfos:
|
|
|
|
let id = toContentId(contentInfo.contentKey)
|
|
|
|
historyNode1.portalProtocol.storeContent(id, contentInfo.content)
|
2022-08-09 12:32:41 +00:00
|
|
|
|
2022-10-10 10:59:55 +00:00
|
|
|
# Offering 1 content item too much which should result in a discv5 packet
|
|
|
|
# that is too large and thus not get any response.
|
|
|
|
block:
|
|
|
|
let offerResult = await historyNode1.portalProtocol.offer(
|
|
|
|
historyNode2.localNode(),
|
|
|
|
contentInfos
|
|
|
|
)
|
2022-08-09 12:32:41 +00:00
|
|
|
|
2022-10-10 10:59:55 +00:00
|
|
|
# Fail due timeout, as remote side must drop the too large discv5 packet
|
|
|
|
check offerResult.isErr()
|
2022-08-09 12:32:41 +00:00
|
|
|
|
2022-10-10 10:59:55 +00:00
|
|
|
for contentInfo in contentInfos:
|
|
|
|
let id = toContentId(contentInfo.contentKey)
|
|
|
|
check historyNode2.containsId(id) == false
|
2022-08-09 12:32:41 +00:00
|
|
|
|
2022-10-10 10:59:55 +00:00
|
|
|
# One content key less should make offer be succesful and should result
|
|
|
|
# in the content being transferred and stored on the other node.
|
|
|
|
block:
|
|
|
|
let offerResult = await historyNode1.portalProtocol.offer(
|
|
|
|
historyNode2.localNode(),
|
|
|
|
contentInfos[0..<maxOfferedHistoryContent]
|
|
|
|
)
|
2022-08-09 12:32:41 +00:00
|
|
|
|
2022-10-10 10:59:55 +00:00
|
|
|
check offerResult.isOk()
|
2022-09-09 11:12:09 +00:00
|
|
|
|
2022-10-10 10:59:55 +00:00
|
|
|
for i, contentInfo in contentInfos:
|
|
|
|
let id = toContentId(contentInfo.contentKey)
|
|
|
|
if i < len(contentInfos) - 1:
|
|
|
|
check historyNode2.containsId(id) == true
|
|
|
|
else:
|
|
|
|
check historyNode2.containsId(id) == false
|
2022-09-09 11:12:09 +00:00
|
|
|
|
|
|
|
await historyNode1.stop()
|
|
|
|
await historyNode2.stop()
|
|
|
|
|
2022-10-10 10:59:55 +00:00
|
|
|
asyncTest "Offer - Headers with Historical Epochs - Stopped at Epoch":
|
2022-09-09 11:12:09 +00:00
|
|
|
const
|
2022-10-10 10:59:55 +00:00
|
|
|
# Needs one extra header currently due to the way that updateAccumulator
|
|
|
|
# works
|
|
|
|
lastBlockNumber = epochSize
|
|
|
|
headersToTest = [0, 1, lastBlockNumber div 2, lastBlockNumber - 1]
|
2022-09-09 11:12:09 +00:00
|
|
|
|
|
|
|
let
|
|
|
|
headers = createEmptyHeaders(0, lastBlockNumber)
|
|
|
|
masterAccumulator = buildAccumulator(headers)
|
|
|
|
epochAccumulators = buildAccumulatorData(headers)
|
|
|
|
|
|
|
|
historyNode1 = newHistoryNode(rng, 20302)
|
|
|
|
historyNode2 = newHistoryNode(rng, 20303)
|
|
|
|
|
2022-08-09 12:32:41 +00:00
|
|
|
check:
|
2022-09-09 11:12:09 +00:00
|
|
|
historyNode1.portalProtocol().addNode(historyNode2.localNode()) == Added
|
|
|
|
historyNode2.portalProtocol().addNode(historyNode1.localNode()) == Added
|
|
|
|
|
|
|
|
(await historyNode1.portalProtocol().ping(historyNode2.localNode())).isOk()
|
|
|
|
(await historyNode2.portalProtocol().ping(historyNode1.localNode())).isOk()
|
|
|
|
|
|
|
|
# Need to store the epochAccumulators, because else the headers can't be
|
|
|
|
# verified if being part of the canonical chain currently
|
|
|
|
for (contentKey, epochAccumulator) in epochAccumulators:
|
|
|
|
let contentId = toContentId(contentKey)
|
|
|
|
historyNode1.portalProtocol.storeContent(
|
|
|
|
contentId, SSZ.encode(epochAccumulator))
|
|
|
|
|
|
|
|
# Need to run start to get the processContentLoop running
|
|
|
|
historyNode1.start()
|
|
|
|
historyNode2.start()
|
|
|
|
|
|
|
|
await historyNode1.historyNetwork.initMasterAccumulator(some(masterAccumulator))
|
|
|
|
await historyNode2.historyNetwork.initMasterAccumulator(some(masterAccumulator))
|
|
|
|
|
|
|
|
let contentInfos = headersToContentInfo(headers)
|
|
|
|
|
|
|
|
for header in headersToTest:
|
|
|
|
let id = toContentId(contentInfos[header].contentKey)
|
|
|
|
historyNode1.portalProtocol.storeContent(id, contentInfos[header].content)
|
|
|
|
|
|
|
|
let offerResult = await historyNode1.portalProtocol.offer(
|
|
|
|
historyNode2.localNode(),
|
|
|
|
contentInfos[header..header]
|
|
|
|
)
|
|
|
|
|
|
|
|
check offerResult.isOk()
|
|
|
|
|
|
|
|
for header in headersToTest:
|
|
|
|
let id = toContentId(contentInfos[header].contentKey)
|
|
|
|
check historyNode2.containsId(id) == true
|
|
|
|
|
|
|
|
await historyNode1.stop()
|
|
|
|
await historyNode2.stop()
|
|
|
|
|
2022-10-10 10:59:55 +00:00
|
|
|
asyncTest "Offer - Headers with No Historical Epochs - Stopped at Merge Block":
|
2022-09-09 11:12:09 +00:00
|
|
|
const
|
2022-10-10 10:59:55 +00:00
|
|
|
lastBlockNumber = int(mergeBlockNumber - 1)
|
|
|
|
headersToTest = [
|
|
|
|
0,
|
|
|
|
1,
|
|
|
|
epochSize div 2,
|
|
|
|
epochSize - 1,
|
|
|
|
lastBlockNumber - 1,
|
|
|
|
lastBlockNumber]
|
2022-09-09 11:12:09 +00:00
|
|
|
|
|
|
|
let
|
|
|
|
headers = createEmptyHeaders(0, lastBlockNumber)
|
|
|
|
masterAccumulator = buildAccumulator(headers)
|
2022-10-10 10:59:55 +00:00
|
|
|
epochAccumulators = buildAccumulatorData(headers)
|
2022-09-09 11:12:09 +00:00
|
|
|
|
|
|
|
historyNode1 = newHistoryNode(rng, 20302)
|
|
|
|
historyNode2 = newHistoryNode(rng, 20303)
|
|
|
|
|
|
|
|
check:
|
|
|
|
historyNode1.portalProtocol().addNode(historyNode2.localNode()) == Added
|
|
|
|
historyNode2.portalProtocol().addNode(historyNode1.localNode()) == Added
|
|
|
|
|
|
|
|
(await historyNode1.portalProtocol().ping(historyNode2.localNode())).isOk()
|
|
|
|
(await historyNode2.portalProtocol().ping(historyNode1.localNode())).isOk()
|
|
|
|
|
2022-10-10 10:59:55 +00:00
|
|
|
# Need to store the epochAccumulators, because else the headers can't be
|
|
|
|
# verified if being part of the canonical chain currently
|
|
|
|
for (contentKey, epochAccumulator) in epochAccumulators:
|
|
|
|
let contentId = toContentId(contentKey)
|
|
|
|
historyNode1.portalProtocol.storeContent(
|
|
|
|
contentId, SSZ.encode(epochAccumulator))
|
|
|
|
|
2022-09-09 11:12:09 +00:00
|
|
|
# Need to run start to get the processContentLoop running
|
|
|
|
historyNode1.start()
|
|
|
|
historyNode2.start()
|
|
|
|
|
|
|
|
await historyNode1.historyNetwork.initMasterAccumulator(some(masterAccumulator))
|
|
|
|
await historyNode2.historyNetwork.initMasterAccumulator(some(masterAccumulator))
|
|
|
|
|
|
|
|
let contentInfos = headersToContentInfo(headers)
|
|
|
|
|
|
|
|
for header in headersToTest:
|
|
|
|
let id = toContentId(contentInfos[header].contentKey)
|
|
|
|
historyNode1.portalProtocol.storeContent(id, contentInfos[header].content)
|
|
|
|
|
|
|
|
let offerResult = await historyNode1.portalProtocol.offer(
|
|
|
|
historyNode2.localNode(),
|
|
|
|
contentInfos[header..header]
|
|
|
|
)
|
|
|
|
|
|
|
|
check offerResult.isOk()
|
|
|
|
|
|
|
|
for header in headersToTest:
|
|
|
|
let id = toContentId(contentInfos[header].contentKey)
|
|
|
|
check historyNode2.containsId(id) == true
|
2022-08-09 12:32:41 +00:00
|
|
|
|
|
|
|
await historyNode1.stop()
|
|
|
|
await historyNode2.stop()
|