mirror of
https://github.com/status-im/nimbus-eth1.git
synced 2025-02-24 17:58:30 +00:00
Add utpTransfer bool to recursiveFindContent JSON-RPC result (#1710)
Rename portal protocol ContentInfo to ContentKV in the process, due to duplicate types and simply better name for this object.
This commit is contained in:
parent
820525d78c
commit
7aead6151e
@ -63,7 +63,9 @@ proc depthContentPropagate*(
|
|||||||
seenOnly = true
|
seenOnly = true
|
||||||
)
|
)
|
||||||
|
|
||||||
proc worker(p: PortalProtocol, db: SeedDb, node: Node, radius: UInt256): Future[void] {.async.} =
|
proc worker(
|
||||||
|
p: PortalProtocol, db: SeedDb, node: Node, radius: UInt256):
|
||||||
|
Future[void] {.async.} =
|
||||||
var offset = 0
|
var offset = 0
|
||||||
while true:
|
while true:
|
||||||
let content = db.getContentInRange(node.id, radius, batchSize, offset)
|
let content = db.getContentInRange(node.id, radius, batchSize, offset)
|
||||||
@ -71,12 +73,13 @@ proc depthContentPropagate*(
|
|||||||
if len(content) == 0:
|
if len(content) == 0:
|
||||||
break
|
break
|
||||||
|
|
||||||
var contentInfo: seq[ContentInfo]
|
var contentKV: seq[ContentKV]
|
||||||
for e in content:
|
for e in content:
|
||||||
let info = ContentInfo(contentKey: ByteList.init(e.contentKey), content: e.content)
|
let info = ContentKV(
|
||||||
contentInfo.add(info)
|
contentKey: ByteList.init(e.contentKey), content: e.content)
|
||||||
|
contentKV.add(info)
|
||||||
|
|
||||||
let offerResult = await p.offer(node, contentInfo)
|
let offerResult = await p.offer(node, contentKV)
|
||||||
|
|
||||||
if offerResult.isErr() or len(content) < batchSize:
|
if offerResult.isErr() or len(content) < batchSize:
|
||||||
# peer failed or we reached end of database stop offering more content
|
# peer failed or we reached end of database stop offering more content
|
||||||
@ -89,7 +92,8 @@ proc depthContentPropagate*(
|
|||||||
|
|
||||||
var offset = 0
|
var offset = 0
|
||||||
while true:
|
while true:
|
||||||
let content = db.getContentInRange(p.localNode.id, p.dataRadius, localBatchSize, offset)
|
let content = db.getContentInRange(
|
||||||
|
p.localNode.id, p.dataRadius, localBatchSize, offset)
|
||||||
|
|
||||||
if len(content) == 0:
|
if len(content) == 0:
|
||||||
break
|
break
|
||||||
@ -127,7 +131,8 @@ proc depthContentPropagate*(
|
|||||||
|
|
||||||
return ok()
|
return ok()
|
||||||
|
|
||||||
func contentDataToKeys(contentData: seq[ContentDataDist]): (ContentKeysList, seq[seq[byte]]) =
|
func contentDataToKeys(
|
||||||
|
contentData: seq[ContentDataDist]): (ContentKeysList, seq[seq[byte]]) =
|
||||||
var contentKeys: seq[ByteList]
|
var contentKeys: seq[ByteList]
|
||||||
var content: seq[seq[byte]]
|
var content: seq[seq[byte]]
|
||||||
for cd in contentData:
|
for cd in contentData:
|
||||||
@ -176,7 +181,8 @@ proc breadthContentPropagate*(
|
|||||||
while true:
|
while true:
|
||||||
# Setting radius to `UInt256.high` and using batchSize and offset, means
|
# Setting radius to `UInt256.high` and using batchSize and offset, means
|
||||||
# we will iterate over whole database in batches of `maxItemsPerOffer` items
|
# we will iterate over whole database in batches of `maxItemsPerOffer` items
|
||||||
var contentData = db.getContentInRange(target, UInt256.high, batchSize, offset)
|
var contentData = db.getContentInRange(
|
||||||
|
target, UInt256.high, batchSize, offset)
|
||||||
|
|
||||||
if len(contentData) == 0:
|
if len(contentData) == 0:
|
||||||
break
|
break
|
||||||
@ -237,16 +243,17 @@ proc offerContentInNodeRange*(
|
|||||||
let
|
let
|
||||||
db = SeedDb.new(path = dbPath, name = dbName)
|
db = SeedDb.new(path = dbPath, name = dbName)
|
||||||
(node, radius) = maybeNodeAndRadius.unsafeGet()
|
(node, radius) = maybeNodeAndRadius.unsafeGet()
|
||||||
content = db.getContentInRange(node.id, radius, int64(numberToToOffer), int64(starting))
|
content = db.getContentInRange(
|
||||||
|
node.id, radius, int64(numberToToOffer), int64(starting))
|
||||||
|
|
||||||
# We got all we wanted from seed_db, it can be closed now.
|
# We got all we wanted from seed_db, it can be closed now.
|
||||||
db.close()
|
db.close()
|
||||||
|
|
||||||
var ci: seq[ContentInfo]
|
var ci: seq[ContentKV]
|
||||||
|
|
||||||
for cont in content:
|
for cont in content:
|
||||||
let k = ByteList.init(cont.contentKey)
|
let k = ByteList.init(cont.contentKey)
|
||||||
let info = ContentInfo(contentKey: k, content: cont.content)
|
let info = ContentKV(contentKey: k, content: cont.content)
|
||||||
ci.add(info)
|
ci.add(info)
|
||||||
|
|
||||||
# waiting for offer result, by the end of this call remote node should
|
# waiting for offer result, by the end of this call remote node should
|
||||||
@ -274,7 +281,8 @@ proc storeContentInNodeRange*(
|
|||||||
localRadius = p.dataRadius
|
localRadius = p.dataRadius
|
||||||
db = SeedDb.new(path = dbPath, name = dbName)
|
db = SeedDb.new(path = dbPath, name = dbName)
|
||||||
localId = p.localNode.id
|
localId = p.localNode.id
|
||||||
contentInRange = db.getContentInRange(localId, localRadius, int64(max), int64(starting))
|
contentInRange = db.getContentInRange(
|
||||||
|
localId, localRadius, int64(max), int64(starting))
|
||||||
|
|
||||||
db.close()
|
db.close()
|
||||||
|
|
||||||
|
@ -138,7 +138,7 @@ type
|
|||||||
|
|
||||||
RadiusCache* = LRUCache[NodeId, UInt256]
|
RadiusCache* = LRUCache[NodeId, UInt256]
|
||||||
|
|
||||||
ContentInfo* = object
|
ContentKV* = object
|
||||||
contentKey*: ByteList
|
contentKey*: ByteList
|
||||||
content*: seq[byte]
|
content*: seq[byte]
|
||||||
|
|
||||||
@ -149,7 +149,7 @@ type
|
|||||||
dst: Node
|
dst: Node
|
||||||
case kind: OfferRequestType
|
case kind: OfferRequestType
|
||||||
of Direct:
|
of Direct:
|
||||||
contentList: List[ContentInfo, contentKeysLimit]
|
contentList: List[ContentKV, contentKeysLimit]
|
||||||
of Database:
|
of Database:
|
||||||
contentKeys: ContentKeysList
|
contentKeys: ContentKeysList
|
||||||
|
|
||||||
@ -188,15 +188,16 @@ type
|
|||||||
|
|
||||||
ContentLookupResult* = object
|
ContentLookupResult* = object
|
||||||
content*: seq[byte]
|
content*: seq[byte]
|
||||||
|
utpTransfer*: bool
|
||||||
# List of nodes which do not have requested content, and for which
|
# List of nodes which do not have requested content, and for which
|
||||||
# content is in their range
|
# content is in their range
|
||||||
nodesInterestedInContent*: seq[Node]
|
nodesInterestedInContent*: seq[Node]
|
||||||
|
|
||||||
proc init*(
|
proc init*(
|
||||||
T: type ContentInfo,
|
T: type ContentKV,
|
||||||
contentKey: ByteList,
|
contentKey: ByteList,
|
||||||
content: seq[byte]): T =
|
content: seq[byte]): T =
|
||||||
ContentInfo(
|
ContentKV(
|
||||||
contentKey: contentKey,
|
contentKey: contentKey,
|
||||||
content: content
|
content: content
|
||||||
)
|
)
|
||||||
@ -204,9 +205,11 @@ proc init*(
|
|||||||
proc init*(
|
proc init*(
|
||||||
T: type ContentLookupResult,
|
T: type ContentLookupResult,
|
||||||
content: seq[byte],
|
content: seq[byte],
|
||||||
|
utpTransfer: bool,
|
||||||
nodesInterestedInContent: seq[Node]): T =
|
nodesInterestedInContent: seq[Node]): T =
|
||||||
ContentLookupResult(
|
ContentLookupResult(
|
||||||
content: content,
|
content: content,
|
||||||
|
utpTransfer: utpTransfer,
|
||||||
nodesInterestedInContent: nodesInterestedInContent
|
nodesInterestedInContent: nodesInterestedInContent
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -832,12 +835,12 @@ proc offer*(p: PortalProtocol, dst: Node, contentKeys: ContentKeysList):
|
|||||||
let req = OfferRequest(dst: dst, kind: Database, contentKeys: contentKeys)
|
let req = OfferRequest(dst: dst, kind: Database, contentKeys: contentKeys)
|
||||||
return await p.offer(req)
|
return await p.offer(req)
|
||||||
|
|
||||||
proc offer*(p: PortalProtocol, dst: Node, content: seq[ContentInfo]):
|
proc offer*(p: PortalProtocol, dst: Node, content: seq[ContentKV]):
|
||||||
Future[PortalResult[ContentKeysBitList]] {.async.} =
|
Future[PortalResult[ContentKeysBitList]] {.async.} =
|
||||||
if len(content) > contentKeysLimit:
|
if len(content) > contentKeysLimit:
|
||||||
return err("Cannot offer more than 64 content items")
|
return err("Cannot offer more than 64 content items")
|
||||||
|
|
||||||
let contentList = List[ContentInfo, contentKeysLimit].init(content)
|
let contentList = List[ContentKV, contentKeysLimit].init(content)
|
||||||
let req = OfferRequest(dst: dst, kind: Direct, contentList: contentList)
|
let req = OfferRequest(dst: dst, kind: Direct, contentList: contentList)
|
||||||
return await p.offer(req)
|
return await p.offer(req)
|
||||||
|
|
||||||
@ -939,8 +942,8 @@ proc triggerPoke*(
|
|||||||
if not p.offerQueue.full():
|
if not p.offerQueue.full():
|
||||||
try:
|
try:
|
||||||
let
|
let
|
||||||
ci = ContentInfo(contentKey: contentKey, content: content)
|
contentKV = ContentKV(contentKey: contentKey, content: content)
|
||||||
list = List[ContentInfo, contentKeysLimit].init(@[ci])
|
list = List[ContentKV, contentKeysLimit].init(@[contentKV])
|
||||||
req = OfferRequest(dst: node, kind: Direct, contentList: list)
|
req = OfferRequest(dst: node, kind: Direct, contentList: list)
|
||||||
p.offerQueue.putNoWait(req)
|
p.offerQueue.putNoWait(req)
|
||||||
except AsyncQueueFullError as e:
|
except AsyncQueueFullError as e:
|
||||||
@ -1034,7 +1037,8 @@ proc contentLookup*(p: PortalProtocol, target: ByteList, targetId: UInt256):
|
|||||||
for f in pendingQueries:
|
for f in pendingQueries:
|
||||||
f.cancel()
|
f.cancel()
|
||||||
portal_lookup_content_requests.observe(requestAmount)
|
portal_lookup_content_requests.observe(requestAmount)
|
||||||
return Opt.some(ContentLookupResult.init(content.content, nodesWithoutContent))
|
return Opt.some(ContentLookupResult.init(
|
||||||
|
content.content, content.utpTransfer, nodesWithoutContent))
|
||||||
else:
|
else:
|
||||||
# TODO: Should we do something with the node that failed responding our
|
# TODO: Should we do something with the node that failed responding our
|
||||||
# query?
|
# query?
|
||||||
@ -1120,11 +1124,11 @@ proc neighborhoodGossip*(
|
|||||||
if content.len() == 0:
|
if content.len() == 0:
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
var contentList = List[ContentInfo, contentKeysLimit].init(@[])
|
var contentList = List[ContentKV, contentKeysLimit].init(@[])
|
||||||
for i, contentItem in content:
|
for i, contentItem in content:
|
||||||
let contentInfo =
|
let contentKV =
|
||||||
ContentInfo(contentKey: contentKeys[i], content: contentItem)
|
ContentKV(contentKey: contentKeys[i], content: contentItem)
|
||||||
discard contentList.add(contentInfo)
|
discard contentList.add(contentKV)
|
||||||
|
|
||||||
# Just taking the first content item as target id.
|
# Just taking the first content item as target id.
|
||||||
# TODO: come up with something better?
|
# TODO: come up with something better?
|
||||||
|
@ -19,6 +19,11 @@ export rpcserver
|
|||||||
# Portal Network JSON-RPC impelentation as per specification:
|
# Portal Network JSON-RPC impelentation as per specification:
|
||||||
# https://github.com/ethereum/portal-network-specs/tree/master/jsonrpc
|
# https://github.com/ethereum/portal-network-specs/tree/master/jsonrpc
|
||||||
|
|
||||||
|
type
|
||||||
|
ContentInfo = object
|
||||||
|
content: string
|
||||||
|
utpTransfer: bool
|
||||||
|
|
||||||
# Note:
|
# Note:
|
||||||
# Using a string for the network parameter will give an error in the rpc macro:
|
# Using a string for the network parameter will give an error in the rpc macro:
|
||||||
# Error: Invalid node kind nnkInfix for macros.`$`
|
# Error: Invalid node kind nnkInfix for macros.`$`
|
||||||
@ -114,10 +119,6 @@ proc installPortalApiHandlers*(
|
|||||||
|
|
||||||
rpcServer.rpc("portal_" & network & "FindContent") do(
|
rpcServer.rpc("portal_" & network & "FindContent") do(
|
||||||
enr: Record, contentKey: string) -> JsonNode:
|
enr: Record, contentKey: string) -> JsonNode:
|
||||||
type ContentInfo = object
|
|
||||||
content: string
|
|
||||||
utpTransfer: bool
|
|
||||||
|
|
||||||
let
|
let
|
||||||
node = toNodeWithAddress(enr)
|
node = toNodeWithAddress(enr)
|
||||||
foundContentResult = await p.findContent(
|
foundContentResult = await p.findContent(
|
||||||
@ -144,8 +145,8 @@ proc installPortalApiHandlers*(
|
|||||||
node = toNodeWithAddress(enr)
|
node = toNodeWithAddress(enr)
|
||||||
key = hexToSeqByte(contentKey)
|
key = hexToSeqByte(contentKey)
|
||||||
content = hexToSeqByte(contentValue)
|
content = hexToSeqByte(contentValue)
|
||||||
contentInfo = ContentInfo(contentKey: ByteList.init(key), content: content)
|
contentKV = ContentKV(contentKey: ByteList.init(key), content: content)
|
||||||
res = await p.offer(node, @[contentInfo])
|
res = await p.offer(node, @[contentKV])
|
||||||
|
|
||||||
if res.isOk():
|
if res.isOk():
|
||||||
return SSZ.encode(res.get()).to0xHex()
|
return SSZ.encode(res.get()).to0xHex()
|
||||||
@ -158,16 +159,19 @@ proc installPortalApiHandlers*(
|
|||||||
return discovered.map(proc(n: Node): Record = n.record)
|
return discovered.map(proc(n: Node): Record = n.record)
|
||||||
|
|
||||||
rpcServer.rpc("portal_" & network & "RecursiveFindContent") do(
|
rpcServer.rpc("portal_" & network & "RecursiveFindContent") do(
|
||||||
contentKey: string) -> string:
|
contentKey: string) -> ContentInfo:
|
||||||
let
|
let
|
||||||
key = ByteList.init(hexToSeqByte(contentKey))
|
key = ByteList.init(hexToSeqByte(contentKey))
|
||||||
contentId = p.toContentId(key).valueOr:
|
contentId = p.toContentId(key).valueOr:
|
||||||
raise newException(ValueError, "Invalid content key")
|
raise newException(ValueError, "Invalid content key")
|
||||||
|
|
||||||
contentResult = (await p.contentLookup(key, contentId)).valueOr:
|
contentResult = (await p.contentLookup(key, contentId)).valueOr:
|
||||||
return "0x"
|
return ContentInfo(content: "0x", utpTransfer: false)
|
||||||
|
|
||||||
return contentResult.content.to0xHex()
|
return ContentInfo(
|
||||||
|
content: contentResult.content.to0xHex(),
|
||||||
|
utpTransfer: contentResult.utpTransfer
|
||||||
|
)
|
||||||
|
|
||||||
rpcServer.rpc("portal_" & network & "Store") do(
|
rpcServer.rpc("portal_" & network & "Store") do(
|
||||||
contentKey: string, contentValue: string) -> bool:
|
contentKey: string, contentValue: string) -> bool:
|
||||||
|
@ -59,9 +59,9 @@ proc createEmptyHeaders(fromNum: int, toNum: int): seq[BlockHeader] =
|
|||||||
headers.add(bh)
|
headers.add(bh)
|
||||||
return headers
|
return headers
|
||||||
|
|
||||||
proc headersToContentInfo(
|
proc headersToContentKV(
|
||||||
headersWithProof: seq[BlockHeaderWithProof]): seq[ContentInfo] =
|
headersWithProof: seq[BlockHeaderWithProof]): seq[ContentKV] =
|
||||||
var contentInfos: seq[ContentInfo]
|
var contentKVs: seq[ContentKV]
|
||||||
for headerWithProof in headersWithProof:
|
for headerWithProof in headersWithProof:
|
||||||
let
|
let
|
||||||
# TODO: Decoding step could be avoided
|
# TODO: Decoding step could be avoided
|
||||||
@ -70,10 +70,10 @@ proc headersToContentInfo(
|
|||||||
blockKey = BlockKey(blockHash: headerHash)
|
blockKey = BlockKey(blockHash: headerHash)
|
||||||
contentKey = encode(ContentKey(
|
contentKey = encode(ContentKey(
|
||||||
contentType: blockHeader, blockHeaderKey: blockKey))
|
contentType: blockHeader, blockHeaderKey: blockKey))
|
||||||
contentInfo = ContentInfo(
|
contentKV = ContentKV(
|
||||||
contentKey: contentKey, content: SSZ.encode(headerWithProof))
|
contentKey: contentKey, content: SSZ.encode(headerWithProof))
|
||||||
contentInfos.add(contentInfo)
|
contentKVs.add(contentKV)
|
||||||
return contentInfos
|
return contentKVs
|
||||||
|
|
||||||
procSuite "History Content Network":
|
procSuite "History Content Network":
|
||||||
let rng = newRng()
|
let rng = newRng()
|
||||||
@ -198,15 +198,15 @@ procSuite "History Content Network":
|
|||||||
check headersWithProof.isOk()
|
check headersWithProof.isOk()
|
||||||
|
|
||||||
# This is one header more than maxOfferedHistoryContent
|
# This is one header more than maxOfferedHistoryContent
|
||||||
let contentInfos = headersToContentInfo(headersWithProof.get())
|
let contentKVs = headersToContentKV(headersWithProof.get())
|
||||||
|
|
||||||
# node 1 will offer the content so it needs to have it in its database
|
# node 1 will offer the content so it needs to have it in its database
|
||||||
for contentInfo in contentInfos:
|
for contentKV in contentKVs:
|
||||||
let id = toContentId(contentInfo.contentKey)
|
let id = toContentId(contentKV.contentKey)
|
||||||
historyNode1.portalProtocol.storeContent(
|
historyNode1.portalProtocol.storeContent(
|
||||||
contentInfo.contentKey,
|
contentKV.contentKey,
|
||||||
id,
|
id,
|
||||||
contentInfo.content
|
contentKV.content
|
||||||
)
|
)
|
||||||
|
|
||||||
# Offering 1 content item too much which should result in a discv5 packet
|
# Offering 1 content item too much which should result in a discv5 packet
|
||||||
@ -214,14 +214,14 @@ procSuite "History Content Network":
|
|||||||
block:
|
block:
|
||||||
let offerResult = await historyNode1.portalProtocol.offer(
|
let offerResult = await historyNode1.portalProtocol.offer(
|
||||||
historyNode2.localNode(),
|
historyNode2.localNode(),
|
||||||
contentInfos
|
contentKVs
|
||||||
)
|
)
|
||||||
|
|
||||||
# Fail due timeout, as remote side must drop the too large discv5 packet
|
# Fail due timeout, as remote side must drop the too large discv5 packet
|
||||||
check offerResult.isErr()
|
check offerResult.isErr()
|
||||||
|
|
||||||
for contentInfo in contentInfos:
|
for contentKV in contentKVs:
|
||||||
let id = toContentId(contentInfo.contentKey)
|
let id = toContentId(contentKV.contentKey)
|
||||||
check historyNode2.containsId(id) == false
|
check historyNode2.containsId(id) == false
|
||||||
|
|
||||||
# One content key less should make offer be succesful and should result
|
# One content key less should make offer be succesful and should result
|
||||||
@ -229,14 +229,14 @@ procSuite "History Content Network":
|
|||||||
block:
|
block:
|
||||||
let offerResult = await historyNode1.portalProtocol.offer(
|
let offerResult = await historyNode1.portalProtocol.offer(
|
||||||
historyNode2.localNode(),
|
historyNode2.localNode(),
|
||||||
contentInfos[0..<maxOfferedHistoryContent]
|
contentKVs[0..<maxOfferedHistoryContent]
|
||||||
)
|
)
|
||||||
|
|
||||||
check offerResult.isOk()
|
check offerResult.isOk()
|
||||||
|
|
||||||
for i, contentInfo in contentInfos:
|
for i, contentKV in contentKVs:
|
||||||
let id = toContentId(contentInfo.contentKey)
|
let id = toContentId(contentKV.contentKey)
|
||||||
if i < len(contentInfos) - 1:
|
if i < len(contentKVs) - 1:
|
||||||
check historyNode2.containsId(id) == true
|
check historyNode2.containsId(id) == true
|
||||||
else:
|
else:
|
||||||
check historyNode2.containsId(id) == false
|
check historyNode2.containsId(id) == false
|
||||||
@ -283,23 +283,23 @@ procSuite "History Content Network":
|
|||||||
selectedHeaders, epochAccumulators)
|
selectedHeaders, epochAccumulators)
|
||||||
check headersWithProof.isOk()
|
check headersWithProof.isOk()
|
||||||
|
|
||||||
let contentInfos = headersToContentInfo(headersWithProof.get())
|
let contentKVs = headersToContentKV(headersWithProof.get())
|
||||||
|
|
||||||
for contentInfo in contentInfos:
|
for contentKV in contentKVs:
|
||||||
let id = toContentId(contentInfo.contentKey)
|
let id = toContentId(contentKV.contentKey)
|
||||||
historyNode1.portalProtocol.storeContent(
|
historyNode1.portalProtocol.storeContent(
|
||||||
contentInfo.contentKey,
|
contentKV.contentKey,
|
||||||
id,
|
id,
|
||||||
contentInfo.content
|
contentKV.content
|
||||||
)
|
)
|
||||||
|
|
||||||
let offerResult = await historyNode1.portalProtocol.offer(
|
let offerResult = await historyNode1.portalProtocol.offer(
|
||||||
historyNode2.localNode(), @[contentInfo])
|
historyNode2.localNode(), @[contentKV])
|
||||||
|
|
||||||
check offerResult.isOk()
|
check offerResult.isOk()
|
||||||
|
|
||||||
for contentInfo in contentInfos:
|
for contentKV in contentKVs:
|
||||||
let id = toContentId(contentInfo.contentKey)
|
let id = toContentId(contentKV.contentKey)
|
||||||
check historyNode2.containsId(id) == true
|
check historyNode2.containsId(id) == true
|
||||||
|
|
||||||
await historyNode1.stop()
|
await historyNode1.stop()
|
||||||
|
@ -165,11 +165,11 @@ procSuite "Portal Wire Protocol Tests":
|
|||||||
|
|
||||||
asyncTest "Offer/Accept/Stream":
|
asyncTest "Offer/Accept/Stream":
|
||||||
let (proto1, proto2) = defaultTestSetup(rng)
|
let (proto1, proto2) = defaultTestSetup(rng)
|
||||||
var content: seq[ContentInfo]
|
var content: seq[ContentKV]
|
||||||
for i in 0..<contentKeysLimit:
|
for i in 0..<contentKeysLimit:
|
||||||
let contentItem = ContentInfo(
|
let contentKV = ContentKV(
|
||||||
contentKey: ByteList(@[byte i]), content: repeat(byte i, 5000))
|
contentKey: ByteList(@[byte i]), content: repeat(byte i, 5000))
|
||||||
content.add(contentItem)
|
content.add(contentKV)
|
||||||
|
|
||||||
let res = await proto1.offer(proto2.baseProtocol.localNode, content)
|
let res = await proto1.offer(proto2.baseProtocol.localNode, content)
|
||||||
|
|
||||||
@ -181,10 +181,10 @@ procSuite "Portal Wire Protocol Tests":
|
|||||||
check contentItems.len() == content.len()
|
check contentItems.len() == content.len()
|
||||||
|
|
||||||
for i, contentItem in contentItems:
|
for i, contentItem in contentItems:
|
||||||
let contentInfo = content[i]
|
let contentKV = content[i]
|
||||||
check:
|
check:
|
||||||
contentItem == contentInfo.content
|
contentItem == contentKV.content
|
||||||
contentKeys[i] == contentInfo.contentKey
|
contentKeys[i] == contentKV.contentKey
|
||||||
|
|
||||||
await proto1.stopPortalProtocol()
|
await proto1.stopPortalProtocol()
|
||||||
await proto2.stopPortalProtocol()
|
await proto2.stopPortalProtocol()
|
||||||
|
Loading…
x
Reference in New Issue
Block a user