2023-12-01 16:20:52 +00:00
|
|
|
# Fluffy
|
2024-02-28 17:31:45 +00:00
|
|
|
# Copyright (c) 2022-2024 Status Research & Development GmbH
|
2022-08-04 06:34:53 +00:00
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
|
|
|
import
|
2024-02-28 17:31:45 +00:00
|
|
|
testutils/unittests,
|
|
|
|
chronos,
|
2022-10-18 11:07:32 +00:00
|
|
|
eth/p2p/discoveryv5/protocol as discv5_protocol,
|
|
|
|
eth/p2p/discoveryv5/routing_table,
|
2022-09-03 18:15:35 +00:00
|
|
|
eth/common/eth_types_rlp,
|
2022-08-04 06:34:53 +00:00
|
|
|
eth/rlp,
|
|
|
|
../network/wire/[portal_protocol, portal_stream, portal_protocol_config],
|
|
|
|
../network/history/[history_network, accumulator, history_content],
|
2023-12-01 16:20:52 +00:00
|
|
|
../database/content_db,
|
2022-08-04 06:34:53 +00:00
|
|
|
./test_helpers
|
|
|
|
|
|
|
|
type HistoryNode = ref object
|
|
|
|
discoveryProtocol*: discv5_protocol.Protocol
|
|
|
|
historyNetwork*: HistoryNetwork
|
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
proc newHistoryNode(
|
|
|
|
rng: ref HmacDrbgContext, port: int, accumulator: FinishedAccumulator
|
|
|
|
): HistoryNode =
|
2022-08-04 06:34:53 +00:00
|
|
|
let
|
|
|
|
node = initDiscoveryNode(rng, PrivateKey.random(rng[]), localAddress(port))
|
2024-09-05 16:31:55 +00:00
|
|
|
db = ContentDB.new(
|
|
|
|
"", uint32.high, RadiusConfig(kind: Dynamic), node.localNode.id, inMemory = true
|
|
|
|
)
|
2022-08-17 07:32:06 +00:00
|
|
|
streamManager = StreamManager.new(node)
|
2024-06-18 07:32:57 +00:00
|
|
|
historyNetwork =
|
|
|
|
HistoryNetwork.new(PortalNetwork.none, node, db, streamManager, accumulator)
|
2022-08-04 06:34:53 +00:00
|
|
|
|
2022-10-17 18:38:51 +00:00
|
|
|
return HistoryNode(discoveryProtocol: node, historyNetwork: historyNetwork)
|
2022-08-04 06:34:53 +00:00
|
|
|
|
2022-09-09 11:12:09 +00:00
|
|
|
proc portalProtocol(hn: HistoryNode): PortalProtocol =
|
2022-08-04 06:34:53 +00:00
|
|
|
hn.historyNetwork.portalProtocol
|
|
|
|
|
2022-09-09 11:12:09 +00:00
|
|
|
proc localNode(hn: HistoryNode): Node =
|
2022-08-04 06:34:53 +00:00
|
|
|
hn.discoveryProtocol.localNode
|
|
|
|
|
|
|
|
proc start(hn: HistoryNode) =
|
|
|
|
hn.historyNetwork.start()
|
|
|
|
|
|
|
|
proc stop(hn: HistoryNode) {.async.} =
|
2024-09-20 12:54:36 +00:00
|
|
|
discard hn.historyNetwork.stop()
|
2022-08-04 06:34:53 +00:00
|
|
|
await hn.discoveryProtocol.closeWait()
|
|
|
|
|
2022-08-09 12:32:41 +00:00
|
|
|
proc containsId(hn: HistoryNode, contentId: ContentId): bool =
|
2022-09-10 19:00:27 +00:00
|
|
|
return hn.historyNetwork.contentDB.get(contentId).isSome()
|
2022-08-09 12:32:41 +00:00
|
|
|
|
|
|
|
proc createEmptyHeaders(fromNum: int, toNum: int): seq[BlockHeader] =
|
|
|
|
var headers: seq[BlockHeader]
|
2024-02-28 17:31:45 +00:00
|
|
|
for i in fromNum .. toNum:
|
2022-08-09 12:32:41 +00:00
|
|
|
var bh = BlockHeader()
|
2024-06-14 07:31:08 +00:00
|
|
|
bh.number = BlockNumber(i)
|
2022-08-09 12:32:41 +00:00
|
|
|
bh.difficulty = u256(i)
|
|
|
|
# empty so that we won't care about creating fake block bodies
|
|
|
|
bh.ommersHash = EMPTY_UNCLE_HASH
|
2022-09-03 18:15:35 +00:00
|
|
|
bh.txRoot = EMPTY_ROOT_HASH
|
2022-08-09 12:32:41 +00:00
|
|
|
headers.add(bh)
|
|
|
|
return headers
|
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
proc headersToContentKV(headersWithProof: seq[BlockHeaderWithProof]): seq[ContentKV] =
|
2023-08-24 16:19:29 +00:00
|
|
|
var contentKVs: seq[ContentKV]
|
2022-11-04 08:27:01 +00:00
|
|
|
for headerWithProof in headersWithProof:
|
2022-09-09 11:12:09 +00:00
|
|
|
let
|
2022-11-04 08:27:01 +00:00
|
|
|
# TODO: Decoding step could be avoided
|
|
|
|
header = rlp.decode(headerWithProof.header.asSeq(), BlockHeader)
|
|
|
|
headerHash = header.blockHash()
|
|
|
|
blockKey = BlockKey(blockHash: headerHash)
|
2024-02-28 17:31:45 +00:00
|
|
|
contentKey =
|
|
|
|
encode(ContentKey(contentType: blockHeader, blockHeaderKey: blockKey))
|
|
|
|
contentKV =
|
|
|
|
ContentKV(contentKey: contentKey, content: SSZ.encode(headerWithProof))
|
2023-08-24 16:19:29 +00:00
|
|
|
contentKVs.add(contentKV)
|
|
|
|
return contentKVs
|
2022-08-09 12:32:41 +00:00
|
|
|
|
2022-08-04 06:34:53 +00:00
|
|
|
procSuite "History Content Network":
|
|
|
|
let rng = newRng()
|
2022-09-09 11:12:09 +00:00
|
|
|
|
|
|
|
asyncTest "Get Block by Number":
|
2022-10-10 10:59:55 +00:00
|
|
|
const
|
|
|
|
lastBlockNumber = mergeBlockNumber - 1
|
|
|
|
|
|
|
|
headersToTest = [
|
|
|
|
0,
|
2024-07-11 15:42:45 +00:00
|
|
|
EPOCH_SIZE - 1,
|
|
|
|
EPOCH_SIZE,
|
|
|
|
EPOCH_SIZE * 2 - 1,
|
|
|
|
EPOCH_SIZE * 2,
|
|
|
|
EPOCH_SIZE * 3 - 1,
|
|
|
|
EPOCH_SIZE * 3,
|
|
|
|
EPOCH_SIZE * 3 + 1,
|
2024-02-28 17:31:45 +00:00
|
|
|
int(lastBlockNumber),
|
|
|
|
]
|
2022-09-09 11:12:09 +00:00
|
|
|
|
2022-10-18 11:07:32 +00:00
|
|
|
let headers = createEmptyHeaders(0, int(lastBlockNumber))
|
|
|
|
let accumulatorRes = buildAccumulatorData(headers)
|
|
|
|
check accumulatorRes.isOk()
|
|
|
|
|
2022-08-04 06:34:53 +00:00
|
|
|
let
|
2024-07-11 15:42:45 +00:00
|
|
|
(masterAccumulator, epochRecords) = accumulatorRes.get()
|
2022-10-17 18:38:51 +00:00
|
|
|
historyNode1 = newHistoryNode(rng, 20302, masterAccumulator)
|
|
|
|
historyNode2 = newHistoryNode(rng, 20303, masterAccumulator)
|
2022-08-04 06:34:53 +00:00
|
|
|
|
2022-11-04 08:27:01 +00:00
|
|
|
var selectedHeaders: seq[BlockHeader]
|
|
|
|
for i in headersToTest:
|
|
|
|
selectedHeaders.add(headers[i])
|
|
|
|
|
2024-07-11 15:42:45 +00:00
|
|
|
let headersWithProof = buildHeadersWithProof(selectedHeaders, epochRecords)
|
2022-11-04 08:27:01 +00:00
|
|
|
|
|
|
|
check headersWithProof.isOk()
|
|
|
|
|
2024-09-12 17:03:58 +00:00
|
|
|
# Only node 2 stores the headers (by number)
|
2022-11-04 08:27:01 +00:00
|
|
|
for headerWithProof in headersWithProof.get():
|
2022-09-09 11:12:09 +00:00
|
|
|
let
|
2022-11-04 08:27:01 +00:00
|
|
|
header = rlp.decode(headerWithProof.header.asSeq(), BlockHeader)
|
2024-09-12 17:03:58 +00:00
|
|
|
contentKey = blockHeaderContentKey(header.number)
|
2022-11-08 17:31:45 +00:00
|
|
|
encKey = encode(contentKey)
|
2022-09-09 11:12:09 +00:00
|
|
|
contentId = toContentId(contentKey)
|
2022-11-04 08:27:01 +00:00
|
|
|
historyNode2.portalProtocol().storeContent(
|
2024-02-28 17:31:45 +00:00
|
|
|
encKey, contentId, SSZ.encode(headerWithProof)
|
2022-11-08 17:31:45 +00:00
|
|
|
)
|
2022-09-09 11:12:09 +00:00
|
|
|
|
|
|
|
check:
|
|
|
|
historyNode1.portalProtocol().addNode(historyNode2.localNode()) == Added
|
|
|
|
historyNode2.portalProtocol().addNode(historyNode1.localNode()) == Added
|
|
|
|
|
|
|
|
(await historyNode1.portalProtocol().ping(historyNode2.localNode())).isOk()
|
|
|
|
(await historyNode2.portalProtocol().ping(historyNode1.localNode())).isOk()
|
|
|
|
|
2022-10-10 10:59:55 +00:00
|
|
|
for i in headersToTest:
|
2024-09-12 17:03:58 +00:00
|
|
|
let blockResponse = await historyNode1.historyNetwork.getBlock(i.uint64)
|
2022-08-04 06:34:53 +00:00
|
|
|
|
2022-09-09 11:12:09 +00:00
|
|
|
check blockResponse.isOk()
|
2022-08-04 06:34:53 +00:00
|
|
|
|
2024-09-12 17:03:58 +00:00
|
|
|
let (blockHeader, blockBody) = blockResponse.value()
|
2022-08-04 06:34:53 +00:00
|
|
|
|
2022-09-09 11:12:09 +00:00
|
|
|
check blockHeader == headers[i]
|
2022-08-04 06:34:53 +00:00
|
|
|
|
|
|
|
await historyNode1.stop()
|
|
|
|
await historyNode2.stop()
|
2022-08-09 12:32:41 +00:00
|
|
|
|
2022-09-09 11:12:09 +00:00
|
|
|
asyncTest "Offer - Maximum Content Keys in 1 Message":
|
2022-10-17 18:38:51 +00:00
|
|
|
# Need to provide enough headers to have the accumulator "finished".
|
|
|
|
const lastBlockNumber = int(mergeBlockNumber - 1)
|
2022-10-10 10:59:55 +00:00
|
|
|
|
2022-10-18 11:07:32 +00:00
|
|
|
let headers = createEmptyHeaders(0, lastBlockNumber)
|
|
|
|
let accumulatorRes = buildAccumulatorData(headers)
|
|
|
|
check accumulatorRes.isOk()
|
2022-10-10 10:59:55 +00:00
|
|
|
|
2022-10-18 11:07:32 +00:00
|
|
|
let
|
2024-07-11 15:42:45 +00:00
|
|
|
(masterAccumulator, epochRecords) = accumulatorRes.get()
|
2022-10-17 18:38:51 +00:00
|
|
|
historyNode1 = newHistoryNode(rng, 20302, masterAccumulator)
|
|
|
|
historyNode2 = newHistoryNode(rng, 20303, masterAccumulator)
|
2022-08-09 12:32:41 +00:00
|
|
|
|
2022-09-09 11:12:09 +00:00
|
|
|
check:
|
|
|
|
historyNode1.portalProtocol().addNode(historyNode2.localNode()) == Added
|
|
|
|
historyNode2.portalProtocol().addNode(historyNode1.localNode()) == Added
|
|
|
|
|
|
|
|
(await historyNode1.portalProtocol().ping(historyNode2.localNode())).isOk()
|
|
|
|
(await historyNode2.portalProtocol().ping(historyNode1.localNode())).isOk()
|
2022-08-09 12:32:41 +00:00
|
|
|
|
2022-09-09 11:12:09 +00:00
|
|
|
# Need to run start to get the processContentLoop running
|
|
|
|
historyNode1.start()
|
|
|
|
historyNode2.start()
|
2022-08-09 12:32:41 +00:00
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
let maxOfferedHistoryContent =
|
2024-06-18 07:32:57 +00:00
|
|
|
getMaxOfferedContentKeys(uint32(len(PortalProtocolId)), maxContentKeySize)
|
2022-08-09 12:32:41 +00:00
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
let headersWithProof =
|
2024-07-11 15:42:45 +00:00
|
|
|
buildHeadersWithProof(headers[0 .. maxOfferedHistoryContent], epochRecords)
|
2022-11-04 08:27:01 +00:00
|
|
|
check headersWithProof.isOk()
|
2022-08-09 12:32:41 +00:00
|
|
|
|
2022-10-10 10:59:55 +00:00
|
|
|
# This is one header more than maxOfferedHistoryContent
|
2023-08-24 16:19:29 +00:00
|
|
|
let contentKVs = headersToContentKV(headersWithProof.get())
|
2022-08-09 12:32:41 +00:00
|
|
|
|
2022-10-10 10:59:55 +00:00
|
|
|
# node 1 will offer the content so it needs to have it in its database
|
2023-08-24 16:19:29 +00:00
|
|
|
for contentKV in contentKVs:
|
|
|
|
let id = toContentId(contentKV.contentKey)
|
2022-11-08 17:31:45 +00:00
|
|
|
historyNode1.portalProtocol.storeContent(
|
2024-02-28 17:31:45 +00:00
|
|
|
contentKV.contentKey, id, contentKV.content
|
2022-11-08 17:31:45 +00:00
|
|
|
)
|
2022-08-09 12:32:41 +00:00
|
|
|
|
2022-10-10 10:59:55 +00:00
|
|
|
# Offering 1 content item too much which should result in a discv5 packet
|
|
|
|
# that is too large and thus not get any response.
|
|
|
|
block:
|
2024-02-28 17:31:45 +00:00
|
|
|
let offerResult =
|
|
|
|
await historyNode1.portalProtocol.offer(historyNode2.localNode(), contentKVs)
|
2022-08-09 12:32:41 +00:00
|
|
|
|
2022-10-10 10:59:55 +00:00
|
|
|
# Fail due timeout, as remote side must drop the too large discv5 packet
|
|
|
|
check offerResult.isErr()
|
2022-08-09 12:32:41 +00:00
|
|
|
|
2023-08-24 16:19:29 +00:00
|
|
|
for contentKV in contentKVs:
|
|
|
|
let id = toContentId(contentKV.contentKey)
|
2022-10-10 10:59:55 +00:00
|
|
|
check historyNode2.containsId(id) == false
|
2022-08-09 12:32:41 +00:00
|
|
|
|
2022-10-10 10:59:55 +00:00
|
|
|
# One content key less should make offer be succesful and should result
|
|
|
|
# in the content being transferred and stored on the other node.
|
|
|
|
block:
|
|
|
|
let offerResult = await historyNode1.portalProtocol.offer(
|
2024-02-28 17:31:45 +00:00
|
|
|
historyNode2.localNode(), contentKVs[0 ..< maxOfferedHistoryContent]
|
2022-10-10 10:59:55 +00:00
|
|
|
)
|
2022-08-09 12:32:41 +00:00
|
|
|
|
2022-10-10 10:59:55 +00:00
|
|
|
check offerResult.isOk()
|
2022-09-09 11:12:09 +00:00
|
|
|
|
2023-10-17 12:19:50 +00:00
|
|
|
# Make sure the content got processed out of content queue
|
|
|
|
while not historyNode2.historyNetwork.contentQueue.empty():
|
|
|
|
await sleepAsync(1.milliseconds)
|
|
|
|
|
|
|
|
# Note: It seems something changed in chronos, causing different behavior.
|
|
|
|
# Seems that validateContent called through processContentLoop used to
|
|
|
|
# run immediatly in case of a "non async shortpath". This is no longer the
|
|
|
|
# case and causes the content not yet to be validated and thus stored at
|
|
|
|
# this step. Add an await here so that the store can happen.
|
|
|
|
await sleepAsync(100.milliseconds)
|
|
|
|
|
2023-08-24 16:19:29 +00:00
|
|
|
for i, contentKV in contentKVs:
|
|
|
|
let id = toContentId(contentKV.contentKey)
|
|
|
|
if i < len(contentKVs) - 1:
|
2022-10-10 10:59:55 +00:00
|
|
|
check historyNode2.containsId(id) == true
|
|
|
|
else:
|
|
|
|
check historyNode2.containsId(id) == false
|
2022-09-09 11:12:09 +00:00
|
|
|
|
|
|
|
await historyNode1.stop()
|
|
|
|
await historyNode2.stop()
|
|
|
|
|
2022-10-10 10:59:55 +00:00
|
|
|
asyncTest "Offer - Headers with No Historical Epochs - Stopped at Merge Block":
|
2022-09-09 11:12:09 +00:00
|
|
|
const
|
2022-10-10 10:59:55 +00:00
|
|
|
lastBlockNumber = int(mergeBlockNumber - 1)
|
2024-02-28 17:31:45 +00:00
|
|
|
headersToTest =
|
2024-07-11 15:42:45 +00:00
|
|
|
[0, 1, EPOCH_SIZE div 2, EPOCH_SIZE - 1, lastBlockNumber - 1, lastBlockNumber]
|
2022-09-09 11:12:09 +00:00
|
|
|
|
2022-10-18 11:07:32 +00:00
|
|
|
let headers = createEmptyHeaders(0, lastBlockNumber)
|
|
|
|
let accumulatorRes = buildAccumulatorData(headers)
|
|
|
|
check accumulatorRes.isOk()
|
2022-09-09 11:12:09 +00:00
|
|
|
|
2022-10-18 11:07:32 +00:00
|
|
|
let
|
2024-07-11 15:42:45 +00:00
|
|
|
(masterAccumulator, epochRecords) = accumulatorRes.get()
|
2022-10-17 18:38:51 +00:00
|
|
|
historyNode1 = newHistoryNode(rng, 20302, masterAccumulator)
|
|
|
|
historyNode2 = newHistoryNode(rng, 20303, masterAccumulator)
|
2022-09-09 11:12:09 +00:00
|
|
|
|
|
|
|
check:
|
|
|
|
historyNode1.portalProtocol().addNode(historyNode2.localNode()) == Added
|
|
|
|
historyNode2.portalProtocol().addNode(historyNode1.localNode()) == Added
|
|
|
|
|
|
|
|
(await historyNode1.portalProtocol().ping(historyNode2.localNode())).isOk()
|
|
|
|
(await historyNode2.portalProtocol().ping(historyNode1.localNode())).isOk()
|
|
|
|
|
|
|
|
# Need to run start to get the processContentLoop running
|
|
|
|
historyNode1.start()
|
|
|
|
historyNode2.start()
|
|
|
|
|
2022-11-04 08:27:01 +00:00
|
|
|
var selectedHeaders: seq[BlockHeader]
|
|
|
|
for i in headersToTest:
|
|
|
|
selectedHeaders.add(headers[i])
|
|
|
|
|
2024-07-11 15:42:45 +00:00
|
|
|
let headersWithProof = buildHeadersWithProof(selectedHeaders, epochRecords)
|
2022-11-04 08:27:01 +00:00
|
|
|
check headersWithProof.isOk()
|
2022-09-09 11:12:09 +00:00
|
|
|
|
2023-08-24 16:19:29 +00:00
|
|
|
let contentKVs = headersToContentKV(headersWithProof.get())
|
2022-11-04 08:27:01 +00:00
|
|
|
|
2023-08-24 16:19:29 +00:00
|
|
|
for contentKV in contentKVs:
|
|
|
|
let id = toContentId(contentKV.contentKey)
|
2022-11-08 17:31:45 +00:00
|
|
|
historyNode1.portalProtocol.storeContent(
|
2024-02-28 17:31:45 +00:00
|
|
|
contentKV.contentKey, id, contentKV.content
|
2022-11-08 17:31:45 +00:00
|
|
|
)
|
2022-09-09 11:12:09 +00:00
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
let offerResult =
|
|
|
|
await historyNode1.portalProtocol.offer(historyNode2.localNode(), @[contentKV])
|
2022-09-09 11:12:09 +00:00
|
|
|
|
|
|
|
check offerResult.isOk()
|
|
|
|
|
2023-10-17 12:19:50 +00:00
|
|
|
# Make sure the content got processed out of content queue
|
|
|
|
while not historyNode2.historyNetwork.contentQueue.empty():
|
|
|
|
await sleepAsync(1.milliseconds)
|
|
|
|
|
|
|
|
await sleepAsync(100.milliseconds)
|
|
|
|
|
2023-08-24 16:19:29 +00:00
|
|
|
for contentKV in contentKVs:
|
|
|
|
let id = toContentId(contentKV.contentKey)
|
2022-09-09 11:12:09 +00:00
|
|
|
check historyNode2.containsId(id) == true
|
2022-08-09 12:32:41 +00:00
|
|
|
|
|
|
|
await historyNode1.stop()
|
|
|
|
await historyNode2.stop()
|