Refactor and clean-up of history network and eth rpc (#1357)
- Switch from using Option to Opt which allows for smoother usage with already existing Result types - With all moved to Opt, make more use of valueOr to avoid too many if else clause indentation and unstead have a more clear error path at each step - Remove dead code, char limits, style guide, etc. - Replace getEncodedKeyForContent with ContentKey.init and use ContentKey.init for each type
This commit is contained in:
parent
179b4adac3
commit
c5ecba83cd
|
@ -8,7 +8,6 @@
|
|||
{.push raises: [Defect].}
|
||||
|
||||
import
|
||||
std/[options],
|
||||
chronicles,
|
||||
metrics,
|
||||
eth/db/kvstore,
|
||||
|
@ -143,27 +142,27 @@ proc new*(
|
|||
|
||||
## Private KvStoreRef Calls
|
||||
|
||||
proc get(kv: KvStoreRef, key: openArray[byte]): Option[seq[byte]] =
|
||||
var res: Option[seq[byte]]
|
||||
proc onData(data: openArray[byte]) = res = some(@data)
|
||||
proc get(kv: KvStoreRef, key: openArray[byte]): Opt[seq[byte]] =
|
||||
var res: Opt[seq[byte]]
|
||||
proc onData(data: openArray[byte]) = res = Opt.some(@data)
|
||||
|
||||
discard kv.get(key, onData).expectDb()
|
||||
|
||||
return res
|
||||
|
||||
proc getSszDecoded(kv: KvStoreRef, key: openArray[byte], T: type auto): Option[T] =
|
||||
proc getSszDecoded(kv: KvStoreRef, key: openArray[byte], T: type auto): Opt[T] =
|
||||
let res = kv.get(key)
|
||||
if res.isSome():
|
||||
try:
|
||||
some(SSZ.decode(res.get(), T))
|
||||
Opt.some(SSZ.decode(res.get(), T))
|
||||
except SszError:
|
||||
raiseAssert("Stored data should always be serialized correctly")
|
||||
else:
|
||||
none(T)
|
||||
Opt.none(T)
|
||||
|
||||
## Private ContentDB calls
|
||||
|
||||
proc get(db: ContentDB, key: openArray[byte]): Option[seq[byte]] =
|
||||
proc get(db: ContentDB, key: openArray[byte]): Opt[seq[byte]] =
|
||||
db.kv.get(key)
|
||||
|
||||
proc put(db: ContentDB, key, value: openArray[byte]) =
|
||||
|
@ -176,14 +175,14 @@ proc del(db: ContentDB, key: openArray[byte]) =
|
|||
db.kv.del(key).expectDb()
|
||||
|
||||
proc getSszDecoded*(
|
||||
db: ContentDB, key: openArray[byte], T: type auto): Option[T] =
|
||||
db: ContentDB, key: openArray[byte], T: type auto): Opt[T] =
|
||||
db.kv.getSszDecoded(key, T)
|
||||
|
||||
proc reclaimSpace*(db: ContentDB): void =
|
||||
## Runs sqlite VACUUM commands which rebuilds the db, repacking it into a
|
||||
## minimal amount of disk space.
|
||||
## Ideal mode of operation, is to run it after several deletes.
|
||||
## Another options would be to run 'PRAGMA auto_vacuum = FULL;' statement at
|
||||
## Another option would be to run 'PRAGMA auto_vacuum = FULL;' statement at
|
||||
## the start of db to leave it up to sqlite to clean up
|
||||
db.vacStmt.exec().expectDb()
|
||||
|
||||
|
@ -230,7 +229,7 @@ proc contentSize(db: ContentDB): int64 =
|
|||
# checked with the Radius/distance of the node anyhow. So lets see how we end up
|
||||
# using this mostly in the code.
|
||||
|
||||
proc get*(db: ContentDB, key: ContentId): Option[seq[byte]] =
|
||||
proc get*(db: ContentDB, key: ContentId): Opt[seq[byte]] =
|
||||
# TODO: Here it is unfortunate that ContentId is a uint256 instead of Digest256.
|
||||
db.get(key.toByteArrayBE())
|
||||
|
||||
|
@ -243,7 +242,7 @@ proc contains*(db: ContentDB, key: ContentId): bool =
|
|||
proc del*(db: ContentDB, key: ContentId) =
|
||||
db.del(key.toByteArrayBE())
|
||||
|
||||
proc getSszDecoded*(db: ContentDB, key: ContentId, T: type auto): Option[T] =
|
||||
proc getSszDecoded*(db: ContentDB, key: ContentId, T: type auto): Opt[T] =
|
||||
db.getSszDecoded(key.toByteArrayBE(), T)
|
||||
|
||||
proc deleteContentFraction(
|
||||
|
@ -294,8 +293,8 @@ proc put*(
|
|||
# fragmented which may impact performance, so at some point in time `VACUUM`
|
||||
# will need to be run to defragment the db.
|
||||
# 2. Deal with the edge case where a user configures max db size lower than
|
||||
# current db.size(). With such config the database would try to prune itself with
|
||||
# each addition.
|
||||
# current db.size(). With such config the database would try to prune itself
|
||||
# with each addition.
|
||||
let dbSize = db.realSize()
|
||||
|
||||
if dbSize < int64(db.maxSize):
|
||||
|
@ -354,25 +353,23 @@ proc adjustRadius(
|
|||
|
||||
proc createGetHandler*(db: ContentDB): DbGetHandler =
|
||||
return (
|
||||
proc(contentKey: ByteList, contentId: ContentId): results.Opt[seq[byte]] =
|
||||
let
|
||||
maybeContent = db.get(contentId)
|
||||
|
||||
if maybeContent.isNone():
|
||||
proc(contentKey: ByteList, contentId: ContentId): Opt[seq[byte]] =
|
||||
let content = db.get(contentId).valueOr:
|
||||
return Opt.none(seq[byte])
|
||||
|
||||
return ok(maybeContent.unsafeGet())
|
||||
ok(content)
|
||||
)
|
||||
|
||||
proc createStoreHandler*(db: ContentDB, cfg: RadiusConfig, p: PortalProtocol): DbStoreHandler =
|
||||
proc createStoreHandler*(
|
||||
db: ContentDB, cfg: RadiusConfig, p: PortalProtocol): DbStoreHandler =
|
||||
return (proc(
|
||||
contentKey: ByteList,
|
||||
contentId: ContentId,
|
||||
content: seq[byte]) {.raises: [Defect], gcsafe.} =
|
||||
# always re-check that key is in node range, to make sure that invariant that
|
||||
# all keys in database are always in node range hold.
|
||||
# TODO current silent assumption is that both contentDb and portalProtocol are
|
||||
# using the same xor distance function
|
||||
# always re-check that the key is in the node range to make sure only
|
||||
# content in range is stored.
|
||||
# TODO: current silent assumption is that both ContentDB and PortalProtocol
|
||||
# are using the same xor distance function
|
||||
if p.inRange(contentId):
|
||||
case cfg.kind:
|
||||
of Dynamic:
|
||||
|
|
|
@ -10,12 +10,12 @@
|
|||
{.push raises: [Defect].}
|
||||
|
||||
import
|
||||
std/[options, math],
|
||||
nimcrypto/[sha2, hash], stew/byteutils, stint,
|
||||
std/math,
|
||||
nimcrypto/[sha2, hash], stew/[byteutils, results], stint,
|
||||
ssz_serialization,
|
||||
../../common/common_types
|
||||
|
||||
export ssz_serialization, common_types, hash
|
||||
export ssz_serialization, common_types, hash, results
|
||||
|
||||
## Types and calls for history network content keys
|
||||
|
||||
|
@ -53,14 +53,36 @@ type
|
|||
of blockHeaderWithProof:
|
||||
blockHeaderWithProofKey*: BlockKey
|
||||
|
||||
func init*(
|
||||
T: type ContentKey, contentType: ContentType,
|
||||
hash: BlockHash | Digest): T =
|
||||
case contentType
|
||||
of blockHeader:
|
||||
ContentKey(
|
||||
contentType: contentType, blockHeaderKey: BlockKey(blockHash: hash))
|
||||
of blockBody:
|
||||
ContentKey(
|
||||
contentType: contentType, blockBodyKey: BlockKey(blockHash: hash))
|
||||
of receipts:
|
||||
ContentKey(
|
||||
contentType: contentType, receiptsKey: BlockKey(blockHash: hash))
|
||||
of epochAccumulator:
|
||||
ContentKey(
|
||||
contentType: contentType,
|
||||
epochAccumulatorKey: EpochAccumulatorKey(epochHash: hash))
|
||||
of blockHeaderWithProof:
|
||||
ContentKey(
|
||||
contentType: contentType,
|
||||
blockHeaderWithProofKey: BlockKey(blockHash: hash))
|
||||
|
||||
func encode*(contentKey: ContentKey): ByteList =
|
||||
ByteList.init(SSZ.encode(contentKey))
|
||||
|
||||
func decode*(contentKey: ByteList): Option[ContentKey] =
|
||||
func decode*(contentKey: ByteList): Opt[ContentKey] =
|
||||
try:
|
||||
some(SSZ.decode(contentKey.asSeq(), ContentKey))
|
||||
Opt.some(SSZ.decode(contentKey.asSeq(), ContentKey))
|
||||
except SszError:
|
||||
return none[ContentKey]()
|
||||
return Opt.none(ContentKey)
|
||||
|
||||
func toContentId*(contentKey: ByteList): ContentId =
|
||||
# TODO: Should we try to parse the content key here for invalid ones?
|
||||
|
|
|
@ -8,7 +8,6 @@
|
|||
{.push raises: [Defect].}
|
||||
|
||||
import
|
||||
std/[options, tables],
|
||||
stew/results, chronos, chronicles,
|
||||
eth/[common/eth_types_rlp, rlp, trie, trie/db],
|
||||
eth/p2p/discoveryv5/[protocol, enr],
|
||||
|
@ -38,30 +37,6 @@ type
|
|||
func toContentIdHandler(contentKey: ByteList): results.Opt[ContentId] =
|
||||
ok(toContentId(contentKey))
|
||||
|
||||
func encodeKey(k: ContentKey): (ByteList, ContentId) =
|
||||
let keyEncoded = encode(k)
|
||||
return (keyEncoded, toContentId(keyEncoded))
|
||||
|
||||
func getEncodedKeyForContent(
|
||||
cType: ContentType, hash: BlockHash):
|
||||
(ByteList, ContentId) =
|
||||
let contentKeyType = BlockKey(blockHash: hash)
|
||||
|
||||
let contentKey =
|
||||
case cType
|
||||
of blockHeader:
|
||||
ContentKey(contentType: cType, blockHeaderKey: contentKeyType)
|
||||
of blockBody:
|
||||
ContentKey(contentType: cType, blockBodyKey: contentKeyType)
|
||||
of receipts:
|
||||
ContentKey(contentType: cType, receiptsKey: contentKeyType)
|
||||
of epochAccumulator:
|
||||
raiseAssert("Not implemented")
|
||||
of blockHeaderWithProof:
|
||||
ContentKey(contentType: cType, blockHeaderWithProofKey: contentKeyType)
|
||||
|
||||
return encodeKey(contentKey)
|
||||
|
||||
func decodeRlp*(input: openArray[byte], T: type): Result[T, string] =
|
||||
try:
|
||||
ok(rlp.decode(input, T))
|
||||
|
@ -210,7 +185,7 @@ proc validateReceiptsBytes*(
|
|||
|
||||
## ContentDB helper calls for specific history network types
|
||||
|
||||
proc get(db: ContentDB, T: type BlockHeader, contentId: ContentId): Option[T] =
|
||||
proc get(db: ContentDB, T: type BlockHeader, contentId: ContentId): Opt[T] =
|
||||
let contentFromDB = db.get(contentId)
|
||||
if contentFromDB.isSome():
|
||||
let headerWithProof =
|
||||
|
@ -223,43 +198,42 @@ proc get(db: ContentDB, T: type BlockHeader, contentId: ContentId): Option[T] =
|
|||
if res.isErr():
|
||||
raiseAssert(res.error)
|
||||
else:
|
||||
some(res.get())
|
||||
Opt.some(res.get())
|
||||
else:
|
||||
none(T)
|
||||
Opt.none(T)
|
||||
|
||||
proc get(db: ContentDB, T: type BlockBody, contentId: ContentId): Option[T] =
|
||||
proc get(db: ContentDB, T: type BlockBody, contentId: ContentId): Opt[T] =
|
||||
let contentFromDB = db.getSszDecoded(contentId, BlockBodySSZ)
|
||||
if contentFromDB.isSome():
|
||||
let res = T.fromPortalBlockBody(contentFromDB.get())
|
||||
if res.isErr():
|
||||
raiseAssert(res.error)
|
||||
else:
|
||||
some(res.get())
|
||||
Opt.some(res.get())
|
||||
else:
|
||||
none(T)
|
||||
Opt.none(T)
|
||||
|
||||
proc get(db: ContentDB, T: type seq[Receipt], contentId: ContentId): Option[T] =
|
||||
proc get(db: ContentDB, T: type seq[Receipt], contentId: ContentId): Opt[T] =
|
||||
let contentFromDB = db.getSszDecoded(contentId, ReceiptsSSZ)
|
||||
if contentFromDB.isSome():
|
||||
let res = T.fromReceipts(contentFromDB.get())
|
||||
if res.isErr():
|
||||
raiseAssert(res.error)
|
||||
else:
|
||||
some(res.get())
|
||||
Opt.some(res.get())
|
||||
else:
|
||||
none(T)
|
||||
Opt.none(T)
|
||||
|
||||
proc get(
|
||||
db: ContentDB, T: type EpochAccumulator, contentId: ContentId): Option[T] =
|
||||
db: ContentDB, T: type EpochAccumulator, contentId: ContentId): Opt[T] =
|
||||
db.getSszDecoded(contentId, T)
|
||||
|
||||
proc getContentFromDb(
|
||||
n: HistoryNetwork, T: type, contentId: ContentId): Option[T] =
|
||||
n: HistoryNetwork, T: type, contentId: ContentId): Opt[T] =
|
||||
if n.portalProtocol.inRange(contentId):
|
||||
n.contentDB.get(T, contentId)
|
||||
else:
|
||||
none(T)
|
||||
|
||||
Opt.none(T)
|
||||
|
||||
|
||||
## Public API to get the history network specific types, either from database
|
||||
|
@ -281,302 +255,274 @@ func verifyHeader(
|
|||
|
||||
proc getVerifiedBlockHeader*(
|
||||
n: HistoryNetwork, hash: BlockHash):
|
||||
Future[Option[BlockHeader]] {.async.} =
|
||||
let (keyEncoded, contentId) =
|
||||
getEncodedKeyForContent(blockHeaderWithProof, hash)
|
||||
Future[Opt[BlockHeader]] {.async.} =
|
||||
let
|
||||
contentKey = ContentKey.init(blockHeaderWithProof, hash).encode()
|
||||
contentId = contentKey.toContentId()
|
||||
|
||||
logScope:
|
||||
hash
|
||||
contentKey
|
||||
|
||||
# Note: This still requests a BlockHeaderWithProof from the database, as that
|
||||
# is what is stored. But the proof doesn't need to be checked as everthing
|
||||
# should get checked before storing.
|
||||
# is what is stored. But the proof doesn't need to be verified as it gets
|
||||
# gets verified before storing.
|
||||
let headerFromDb = n.getContentFromDb(BlockHeader, contentId)
|
||||
|
||||
if headerFromDb.isSome():
|
||||
info "Fetched block header from database", hash, contentKey = keyEncoded
|
||||
info "Fetched block header from database"
|
||||
return headerFromDb
|
||||
|
||||
for i in 0..<requestRetries:
|
||||
let headerContentLookup =
|
||||
await n.portalProtocol.contentLookup(keyEncoded, contentId)
|
||||
if headerContentLookup.isNone():
|
||||
warn "Failed fetching block header with proof from the network",
|
||||
hash, contentKey = keyEncoded
|
||||
return none(BlockHeader)
|
||||
let
|
||||
headerContent = (await n.portalProtocol.contentLookup(
|
||||
contentKey, contentId)).valueOr:
|
||||
warn "Failed fetching block header with proof from the network"
|
||||
return Opt.none(BlockHeader)
|
||||
|
||||
let headerContent = headerContentLookup.unsafeGet()
|
||||
headerWithProof = decodeSsz(
|
||||
headerContent.content, BlockHeaderWithProof).valueOr:
|
||||
warn "Failed decoding header with proof", error
|
||||
continue
|
||||
|
||||
let headerWithProofRes = decodeSsz(headerContent.content, BlockHeaderWithProof)
|
||||
if headerWithProofRes.isErr():
|
||||
warn "Failed decoding header with proof", err = headerWithProofRes.error
|
||||
return none(BlockHeader)
|
||||
header = validateBlockHeaderBytes(
|
||||
headerWithProof.header.asSeq(), hash).valueOr:
|
||||
warn "Validation of block header failed", error
|
||||
continue
|
||||
|
||||
let headerWithProof = headerWithProofRes.get()
|
||||
if (let r = n.verifyHeader(header, headerWithProof.proof); r.isErr):
|
||||
warn "Verification of block header failed", error = r.error
|
||||
continue
|
||||
|
||||
let res = validateBlockHeaderBytes(headerWithProof.header.asSeq(), hash)
|
||||
if res.isOk():
|
||||
let isCanonical = n.verifyHeader(res.get(), headerWithProof.proof)
|
||||
info "Fetched valid block header from the network"
|
||||
# Content is valid, it can be stored and propagated to interested peers
|
||||
n.portalProtocol.storeContent(contentKey, contentId, headerContent.content)
|
||||
n.portalProtocol.triggerPoke(
|
||||
headerContent.nodesInterestedInContent,
|
||||
contentKey,
|
||||
headerContent.content
|
||||
)
|
||||
|
||||
if isCanonical.isOk():
|
||||
info "Fetched block header from the network", hash, contentKey = keyEncoded
|
||||
# Content is valid, it can be propagated to interested peers
|
||||
n.portalProtocol.triggerPoke(
|
||||
headerContent.nodesInterestedInContent,
|
||||
keyEncoded,
|
||||
headerContent.content
|
||||
)
|
||||
|
||||
n.portalProtocol.storeContent(keyEncoded, contentId, headerContent.content)
|
||||
|
||||
return some(res.get())
|
||||
else:
|
||||
warn "Validation of block header failed", err = res.error, hash, contentKey = keyEncoded
|
||||
return Opt.some(header)
|
||||
|
||||
# Headers were requested `requestRetries` times and all failed on validation
|
||||
return none(BlockHeader)
|
||||
return Opt.none(BlockHeader)
|
||||
|
||||
# TODO: To be deprecated or not? Should there be the case for requesting a
|
||||
# block header without proofs?
|
||||
proc getBlockHeader*(
|
||||
n: HistoryNetwork, hash: BlockHash):
|
||||
Future[Option[BlockHeader]] {.async.} =
|
||||
let (keyEncoded, contentId) =
|
||||
getEncodedKeyForContent(blockHeader, hash)
|
||||
Future[Opt[BlockHeader]] {.async.} =
|
||||
let
|
||||
contentKey = ContentKey.init(blockHeader, hash).encode()
|
||||
contentId = contentKey.toContentId()
|
||||
|
||||
logScope:
|
||||
hash
|
||||
contentKey
|
||||
|
||||
let headerFromDb = n.getContentFromDb(BlockHeader, contentId)
|
||||
if headerFromDb.isSome():
|
||||
info "Fetched block header from database", hash, contentKey = keyEncoded
|
||||
info "Fetched block header from database"
|
||||
return headerFromDb
|
||||
|
||||
for i in 0..<requestRetries:
|
||||
let headerContentLookup =
|
||||
await n.portalProtocol.contentLookup(keyEncoded, contentId)
|
||||
if headerContentLookup.isNone():
|
||||
warn "Failed fetching block header from the network", hash, contentKey = keyEncoded
|
||||
return none(BlockHeader)
|
||||
let
|
||||
headerContent = (await n.portalProtocol.contentLookup(
|
||||
contentKey, contentId)).valueOr:
|
||||
warn "Failed fetching block header from the network"
|
||||
return Opt.none(BlockHeader)
|
||||
|
||||
let headerContent = headerContentLookup.unsafeGet()
|
||||
header = validateBlockHeaderBytes(headerContent.content, hash).valueOr:
|
||||
warn "Validation of block header failed", error
|
||||
continue
|
||||
|
||||
let res = validateBlockHeaderBytes(headerContent.content, hash)
|
||||
if res.isOk():
|
||||
info "Fetched block header from the network", hash, contentKey = keyEncoded
|
||||
# Content is valid we can propagate it to interested peers
|
||||
n.portalProtocol.triggerPoke(
|
||||
headerContent.nodesInterestedInContent,
|
||||
keyEncoded,
|
||||
headerContent.content
|
||||
)
|
||||
info "Fetched valid block header from the network"
|
||||
# Content is valid, it can be stored and propagated to interested peers
|
||||
n.portalProtocol.storeContent(contentKey, contentId, headerContent.content)
|
||||
n.portalProtocol.triggerPoke(
|
||||
headerContent.nodesInterestedInContent,
|
||||
contentKey,
|
||||
headerContent.content
|
||||
)
|
||||
|
||||
n.portalProtocol.storeContent(keyEncoded, contentId, headerContent.content)
|
||||
|
||||
return some(res.get())
|
||||
else:
|
||||
warn "Validation of block header failed", err = res.error, hash, contentKey = keyEncoded
|
||||
return Opt.some(header)
|
||||
|
||||
# Headers were requested `requestRetries` times and all failed on validation
|
||||
return none(BlockHeader)
|
||||
return Opt.none(BlockHeader)
|
||||
|
||||
proc getBlockBody*(
|
||||
n: HistoryNetwork, hash: BlockHash, header: BlockHeader):
|
||||
Future[Option[BlockBody]] {.async.} =
|
||||
|
||||
# Got header with empty body, no need to make any db calls or network requests
|
||||
Future[Opt[BlockBody]] {.async.} =
|
||||
if header.txRoot == EMPTY_ROOT_HASH and header.ommersHash == EMPTY_UNCLE_HASH:
|
||||
return some(BlockBody(transactions: @[], uncles: @[]))
|
||||
# Short path for empty body indicated by txRoot and ommersHash
|
||||
return Opt.some(BlockBody(transactions: @[], uncles: @[]))
|
||||
|
||||
let
|
||||
(keyEncoded, contentId) = getEncodedKeyForContent(blockBody, hash)
|
||||
bodyFromDb = n.getContentFromDb(BlockBody, contentId)
|
||||
contentKey = ContentKey.init(blockBody, hash).encode()
|
||||
contentId = contentKey.toContentId()
|
||||
|
||||
logScope:
|
||||
hash
|
||||
contentKey
|
||||
|
||||
let bodyFromDb = n.getContentFromDb(BlockBody, contentId)
|
||||
if bodyFromDb.isSome():
|
||||
info "Fetched block body from database", hash, contentKey = keyEncoded
|
||||
info "Fetched block body from database"
|
||||
return bodyFromDb
|
||||
|
||||
for i in 0..<requestRetries:
|
||||
let bodyContentLookup =
|
||||
await n.portalProtocol.contentLookup(keyEncoded, contentId)
|
||||
let
|
||||
bodyContent = (await n.portalProtocol.contentLookup(
|
||||
contentKey, contentId)).valueOr:
|
||||
warn "Failed fetching block body from the network"
|
||||
return Opt.none(BlockBody)
|
||||
|
||||
if bodyContentLookup.isNone():
|
||||
warn "Failed fetching block body from the network", hash, contentKey = keyEncoded
|
||||
return none(BlockBody)
|
||||
body = validateBlockBodyBytes(
|
||||
bodyContent.content, header.txRoot, header.ommersHash).valueOr:
|
||||
warn "Validation of block body failed", error
|
||||
continue
|
||||
|
||||
let bodyContent = bodyContentLookup.unsafeGet()
|
||||
info "Fetched block body from the network"
|
||||
# Content is valid, it can be stored and propagated to interested peers
|
||||
n.portalProtocol.storeContent(contentKey, contentId, bodyContent.content)
|
||||
n.portalProtocol.triggerPoke(
|
||||
bodyContent.nodesInterestedInContent,
|
||||
contentKey,
|
||||
bodyContent.content
|
||||
)
|
||||
|
||||
let res = validateBlockBodyBytes(
|
||||
bodyContent.content, header.txRoot, header.ommersHash)
|
||||
if res.isOk():
|
||||
info "Fetched block body from the network", hash, contentKey = keyEncoded
|
||||
return Opt.some(body)
|
||||
|
||||
# body is valid, propagate it to interested peers
|
||||
n.portalProtocol.triggerPoke(
|
||||
bodyContent.nodesInterestedInContent,
|
||||
keyEncoded,
|
||||
bodyContent.content
|
||||
)
|
||||
|
||||
n.portalProtocol.storeContent(keyEncoded, contentId, bodyContent.content)
|
||||
|
||||
return some(res.get())
|
||||
else:
|
||||
warn "Validation of block body failed", err = res.error, hash, contentKey = keyEncoded
|
||||
|
||||
return none(BlockBody)
|
||||
# Bodies were requested `requestRetries` times and all failed on validation
|
||||
return Opt.none(BlockBody)
|
||||
|
||||
proc getBlock*(
|
||||
n: HistoryNetwork, hash: BlockHash):
|
||||
Future[Option[Block]] {.async.} =
|
||||
Future[Opt[Block]] {.async.} =
|
||||
debug "Trying to retrieve block with hash", hash
|
||||
|
||||
# Note: Using `getVerifiedBlockHeader` instead of getBlockHeader even though
|
||||
# proofs are not necessiarly needed, in order to avoid having to inject
|
||||
# also the original type into the network.
|
||||
let headerOpt = await n.getVerifiedBlockHeader(hash)
|
||||
if headerOpt.isNone():
|
||||
warn "Failed to get header when getting block with hash", hash
|
||||
# Cannot validate block without header.
|
||||
return none(Block)
|
||||
let
|
||||
header = (await n.getVerifiedBlockHeader(hash)).valueOr:
|
||||
warn "Failed to get header when getting block", hash
|
||||
return Opt.none(Block)
|
||||
body = (await n.getBlockBody(hash, header)).valueOr:
|
||||
warn "Failed to get body when getting block", hash
|
||||
return Opt.none(Block)
|
||||
|
||||
let header = headerOpt.unsafeGet()
|
||||
|
||||
let bodyOpt = await n.getBlockBody(hash, header)
|
||||
|
||||
if bodyOpt.isNone():
|
||||
warn "Failed to get body when gettin block with hash", hash
|
||||
return none(Block)
|
||||
|
||||
let body = bodyOpt.unsafeGet()
|
||||
|
||||
return some((header, body))
|
||||
return Opt.some((header, body))
|
||||
|
||||
proc getReceipts*(
|
||||
n: HistoryNetwork,
|
||||
hash: BlockHash,
|
||||
header: BlockHeader): Future[Option[seq[Receipt]]] {.async.} =
|
||||
header: BlockHeader): Future[Opt[seq[Receipt]]] {.async.} =
|
||||
if header.receiptRoot == EMPTY_ROOT_HASH:
|
||||
# Short path for empty receipts indicated by receipts root
|
||||
return some(newSeq[Receipt]())
|
||||
return Opt.some(newSeq[Receipt]())
|
||||
|
||||
let (keyEncoded, contentId) = getEncodedKeyForContent(receipts, hash)
|
||||
let
|
||||
contentKey = ContentKey.init(receipts, hash).encode()
|
||||
contentId = contentKey.toContentId()
|
||||
|
||||
logScope:
|
||||
hash
|
||||
contentKey
|
||||
|
||||
let receiptsFromDb = n.getContentFromDb(seq[Receipt], contentId)
|
||||
|
||||
if receiptsFromDb.isSome():
|
||||
info "Fetched receipts from database", hash
|
||||
info "Fetched receipts from database"
|
||||
return receiptsFromDb
|
||||
|
||||
for i in 0..<requestRetries:
|
||||
let receiptsContentLookup =
|
||||
await n.portalProtocol.contentLookup(keyEncoded, contentId)
|
||||
if receiptsContentLookup.isNone():
|
||||
warn "Failed fetching receipts from the network", hash, contentKey = keyEncoded
|
||||
return none(seq[Receipt])
|
||||
let
|
||||
receiptsContent = (await n.portalProtocol.contentLookup(
|
||||
contentKey, contentId)).valueOr:
|
||||
warn "Failed fetching receipts from the network"
|
||||
return Opt.none(seq[Receipt])
|
||||
receipts = validateReceiptsBytes(
|
||||
receiptsContent.content, header.receiptRoot).valueOr:
|
||||
warn "Validation of receipts failed", error
|
||||
continue
|
||||
|
||||
let receiptsContent = receiptsContentLookup.unsafeGet()
|
||||
info "Fetched receipts from the network"
|
||||
# Content is valid, it can be stored and propagated to interested peers
|
||||
n.portalProtocol.storeContent(contentKey, contentId, receiptsContent.content)
|
||||
n.portalProtocol.triggerPoke(
|
||||
receiptsContent.nodesInterestedInContent,
|
||||
contentKey,
|
||||
receiptsContent.content
|
||||
)
|
||||
|
||||
let res = validateReceiptsBytes(receiptsContent.content, header.receiptRoot)
|
||||
if res.isOk():
|
||||
info "Fetched receipts from the network", hash, contentKey = keyEncoded
|
||||
|
||||
let receipts = res.get()
|
||||
|
||||
# receipts are valid, propagate it to interested peers
|
||||
n.portalProtocol.triggerPoke(
|
||||
receiptsContent.nodesInterestedInContent,
|
||||
keyEncoded,
|
||||
receiptsContent.content
|
||||
)
|
||||
|
||||
n.portalProtocol.storeContent(keyEncoded, contentId, receiptsContent.content)
|
||||
|
||||
return some(res.get())
|
||||
else:
|
||||
warn "Validation of receipts failed", err = res.error, hash, contentKey = keyEncoded
|
||||
|
||||
return none(seq[Receipt])
|
||||
return Opt.some(receipts)
|
||||
|
||||
proc getEpochAccumulator(
|
||||
n: HistoryNetwork, epochHash: Digest):
|
||||
Future[Option[EpochAccumulator]] {.async.} =
|
||||
Future[Opt[EpochAccumulator]] {.async.} =
|
||||
let
|
||||
contentKey = ContentKey(
|
||||
contentType: epochAccumulator,
|
||||
epochAccumulatorKey: EpochAccumulatorKey(epochHash: epochHash))
|
||||
contentKey = ContentKey.init(epochAccumulator, epochHash).encode()
|
||||
contentId = contentKey.toContentId()
|
||||
|
||||
keyEncoded = encode(contentKey)
|
||||
contentId = toContentId(keyEncoded)
|
||||
|
||||
accumulatorFromDb = n.getContentFromDb(EpochAccumulator, contentId)
|
||||
logScope:
|
||||
epochHash
|
||||
contentKey
|
||||
|
||||
let accumulatorFromDb = n.getContentFromDb(EpochAccumulator, contentId)
|
||||
if accumulatorFromDb.isSome():
|
||||
info "Fetched epoch accumulator from database", epochHash
|
||||
info "Fetched epoch accumulator from database"
|
||||
return accumulatorFromDb
|
||||
|
||||
for i in 0..<requestRetries:
|
||||
let contentLookup =
|
||||
await n.portalProtocol.contentLookup(keyEncoded, contentId)
|
||||
if contentLookup.isNone():
|
||||
warn "Failed fetching epoch accumulator from the network", epochHash
|
||||
return none(EpochAccumulator)
|
||||
let
|
||||
accumulatorContent = (await n.portalProtocol.contentLookup(
|
||||
contentKey, contentId)).valueOr:
|
||||
warn "Failed fetching epoch accumulator from the network"
|
||||
return Opt.none(EpochAccumulator)
|
||||
|
||||
let accumulatorContent = contentLookup.unsafeGet()
|
||||
|
||||
let epochAccumulator =
|
||||
try:
|
||||
SSZ.decode(accumulatorContent.content, EpochAccumulator)
|
||||
except SszError:
|
||||
continue
|
||||
# return none(EpochAccumulator)
|
||||
epochAccumulator =
|
||||
try:
|
||||
SSZ.decode(accumulatorContent.content, EpochAccumulator)
|
||||
except SszError:
|
||||
continue
|
||||
|
||||
let hash = hash_tree_root(epochAccumulator)
|
||||
if hash == epochHash:
|
||||
info "Fetched epoch accumulator from the network", epochHash
|
||||
|
||||
info "Fetched epoch accumulator from the network"
|
||||
n.portalProtocol.storeContent(contentKey, contentId, accumulatorContent.content)
|
||||
n.portalProtocol.triggerPoke(
|
||||
accumulatorContent.nodesInterestedInContent,
|
||||
keyEncoded,
|
||||
contentKey,
|
||||
accumulatorContent.content
|
||||
)
|
||||
|
||||
n.portalProtocol.storeContent(keyEncoded, contentId, accumulatorContent.content)
|
||||
|
||||
return some(epochAccumulator)
|
||||
return Opt.some(epochAccumulator)
|
||||
else:
|
||||
warn "Validation of epoch accumulator failed",
|
||||
hash, expectedHash = epochHash
|
||||
warn "Validation of epoch accumulator failed", resultedEpochHash = hash
|
||||
|
||||
return none(EpochAccumulator)
|
||||
return Opt.none(EpochAccumulator)
|
||||
|
||||
proc getBlock*(
|
||||
n: HistoryNetwork, bn: UInt256):
|
||||
Future[Result[Option[Block], string]] {.async.} =
|
||||
let epochDataRes = n.accumulator.getBlockEpochDataForBlockNumber(bn)
|
||||
if epochDataRes.isOk():
|
||||
let
|
||||
epochData = epochDataRes.get()
|
||||
digest = Digest(data: epochData.epochHash)
|
||||
|
||||
epochOpt = await n.getEpochAccumulator(digest)
|
||||
if epochOpt.isNone():
|
||||
Future[Result[Opt[Block], string]] {.async.} =
|
||||
let
|
||||
epochData = n.accumulator.getBlockEpochDataForBlockNumber(bn).valueOr:
|
||||
return err(error)
|
||||
digest = Digest(data: epochData.epochHash)
|
||||
epoch = (await n.getEpochAccumulator(digest)).valueOr:
|
||||
return err("Cannot retrieve epoch accumulator for given block number")
|
||||
blockHash = epoch[epochData.blockRelativeIndex].blockHash
|
||||
|
||||
let
|
||||
epoch = epochOpt.unsafeGet()
|
||||
blockHash = epoch[epochData.blockRelativeIndex].blockHash
|
||||
maybeBlock = await n.getBlock(blockHash)
|
||||
|
||||
let maybeBlock = await n.getBlock(blockHash)
|
||||
|
||||
return ok(maybeBlock)
|
||||
else:
|
||||
return err(epochDataRes.error)
|
||||
return ok(maybeBlock)
|
||||
|
||||
proc validateContent(
|
||||
n: HistoryNetwork, content: seq[byte], contentKey: ByteList):
|
||||
Future[bool] {.async.} =
|
||||
let keyOpt = contentKey.decode()
|
||||
|
||||
if keyOpt.isNone():
|
||||
let key = contentKey.decode().valueOr:
|
||||
return false
|
||||
|
||||
let key = keyOpt.get()
|
||||
|
||||
case key.contentType:
|
||||
of blockHeader:
|
||||
# Note: For now we still accept regular block header type to remain
|
||||
|
@ -584,14 +530,11 @@ proc validateContent(
|
|||
# basically requesting the header with proofs from somewhere else.
|
||||
# This all doesn't make much sense aside from compatibility and should
|
||||
# eventually be removed.
|
||||
let validateResult =
|
||||
validateBlockHeaderBytes(content, key.blockHeaderKey.blockHash)
|
||||
if validateResult.isErr():
|
||||
warn "Invalid block header offered", error = validateResult.error
|
||||
let header = validateBlockHeaderBytes(
|
||||
content, key.blockHeaderKey.blockHash).valueOr:
|
||||
warn "Invalid block header offered", error
|
||||
return false
|
||||
|
||||
let header = validateResult.get()
|
||||
|
||||
let res = await n.getVerifiedBlockHeader(key.blockHeaderKey.blockHash)
|
||||
if res.isNone():
|
||||
warn "Block header failed canonical verification"
|
||||
|
@ -600,33 +543,27 @@ proc validateContent(
|
|||
return true
|
||||
|
||||
of blockBody:
|
||||
let res = await n.getVerifiedBlockHeader(key.blockBodyKey.blockHash)
|
||||
if res.isNone():
|
||||
warn "Block body Failed canonical verification"
|
||||
let header = (await n.getVerifiedBlockHeader(
|
||||
key.blockBodyKey.blockHash)).valueOr:
|
||||
warn "Failed getting canonical header for block"
|
||||
return false
|
||||
|
||||
let header = res.get()
|
||||
let validationResult =
|
||||
validateBlockBodyBytes(content, header.txRoot, header.ommersHash)
|
||||
|
||||
if validationResult.isErr():
|
||||
warn "Failed validating block body", error = validationResult.error
|
||||
let res = validateBlockBodyBytes(content, header.txRoot, header.ommersHash)
|
||||
if res.isErr():
|
||||
warn "Failed validating block body", error = res.error
|
||||
return false
|
||||
else:
|
||||
return true
|
||||
|
||||
of receipts:
|
||||
let res = await n.getVerifiedBlockHeader(key.receiptsKey.blockHash)
|
||||
if res.isNone():
|
||||
warn "Receipts failed canonical verification"
|
||||
let header = (await n.getVerifiedBlockHeader(
|
||||
key.receiptsKey.blockHash)).valueOr:
|
||||
warn "Failed getting canonical header for receipts"
|
||||
return false
|
||||
|
||||
let header = res.get()
|
||||
let validationResult =
|
||||
validateReceiptsBytes(content, header.receiptRoot)
|
||||
|
||||
if validationResult.isErr():
|
||||
warn "Failed validating receipts", error = validationResult.error
|
||||
let res = validateReceiptsBytes(content, header.receiptRoot)
|
||||
if res.isErr():
|
||||
warn "Failed validating receipts", error = res.error
|
||||
return false
|
||||
else:
|
||||
return true
|
||||
|
@ -655,25 +592,20 @@ proc validateContent(
|
|||
return true
|
||||
|
||||
of blockHeaderWithProof:
|
||||
let headerWithProofRes = decodeSsz(content, BlockHeaderWithProof)
|
||||
if headerWithProofRes.isErr():
|
||||
warn "Failed decoding header with proof", err = headerWithProofRes.error
|
||||
return false
|
||||
let
|
||||
headerWithProof = decodeSsz(content, BlockHeaderWithProof).valueOr:
|
||||
warn "Failed decoding header with proof", error
|
||||
return false
|
||||
header = validateBlockHeaderBytes(
|
||||
headerWithProof.header.asSeq(),
|
||||
key.blockHeaderWithProofKey.blockHash).valueOr:
|
||||
warn "Invalid block header offered", error
|
||||
return false
|
||||
|
||||
let headerWithProof = headerWithProofRes.get()
|
||||
|
||||
let validateResult = validateBlockHeaderBytes(
|
||||
headerWithProof.header.asSeq(), key.blockHeaderWithProofKey.blockHash)
|
||||
if validateResult.isErr():
|
||||
warn "Invalid block header offered", error = validateResult.error
|
||||
return false
|
||||
|
||||
let header = validateResult.get()
|
||||
|
||||
let isCanonical = n.verifyHeader(header, headerWithProof.proof)
|
||||
if isCanonical.isErr():
|
||||
let res = n.verifyHeader(header, headerWithProof.proof)
|
||||
if res.isErr():
|
||||
warn "Failed on check if header is part of canonical chain",
|
||||
error = isCanonical.error
|
||||
error = res.error
|
||||
return false
|
||||
else:
|
||||
return true
|
||||
|
|
|
@ -11,12 +11,11 @@
|
|||
{.push raises: [Defect].}
|
||||
|
||||
import
|
||||
std/options,
|
||||
nimcrypto/[hash, sha2, keccak], stew/[objects, results], stint,
|
||||
ssz_serialization,
|
||||
../../common/common_types
|
||||
|
||||
export ssz_serialization, common_types, hash
|
||||
export ssz_serialization, common_types, hash, results
|
||||
|
||||
type
|
||||
NodeHash* = MDigest[32 * 8] # keccak256
|
||||
|
@ -70,11 +69,11 @@ type
|
|||
func encode*(contentKey: ContentKey): ByteList =
|
||||
ByteList.init(SSZ.encode(contentKey))
|
||||
|
||||
func decode*(contentKey: ByteList): Option[ContentKey] =
|
||||
func decode*(contentKey: ByteList): Opt[ContentKey] =
|
||||
try:
|
||||
some(SSZ.decode(contentKey.asSeq(), ContentKey))
|
||||
Opt.some(SSZ.decode(contentKey.asSeq(), ContentKey))
|
||||
except SszError:
|
||||
return none[ContentKey]()
|
||||
return Opt.none(ContentKey)
|
||||
|
||||
template computeContentId*(digestCtxType: type, body: untyped): ContentId =
|
||||
var h {.inject.}: digestCtxType
|
||||
|
|
|
@ -6,7 +6,6 @@
|
|||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
import
|
||||
std/options,
|
||||
stew/results, chronos, chronicles,
|
||||
eth/p2p/discoveryv5/[protocol, enr],
|
||||
../../content_db,
|
||||
|
@ -29,12 +28,8 @@ type StateNetwork* = ref object
|
|||
func toContentIdHandler(contentKey: ByteList): results.Opt[ContentId] =
|
||||
toContentId(contentKey)
|
||||
|
||||
proc dbGetHandler(db: ContentDB, contentId: ContentId):
|
||||
Option[seq[byte]] {.raises: [Defect], gcsafe.} =
|
||||
db.get(contentId)
|
||||
|
||||
proc getContent*(n: StateNetwork, key: ContentKey):
|
||||
Future[Option[seq[byte]]] {.async.} =
|
||||
Future[Opt[seq[byte]]] {.async.} =
|
||||
let
|
||||
keyEncoded = encode(key)
|
||||
contentId = toContentId(key)
|
||||
|
@ -49,7 +44,7 @@ proc getContent*(n: StateNetwork, key: ContentKey):
|
|||
let content = await n.portalProtocol.contentLookup(keyEncoded, contentId)
|
||||
|
||||
if content.isNone():
|
||||
return none[seq[byte]]()
|
||||
return Opt.none(seq[byte])
|
||||
|
||||
let contentResult = content.get()
|
||||
|
||||
|
@ -62,7 +57,7 @@ proc getContent*(n: StateNetwork, key: ContentKey):
|
|||
|
||||
# TODO: for now returning bytes, ultimately it would be nice to return proper
|
||||
# domain types.
|
||||
return some(contentResult.content)
|
||||
return Opt.some(contentResult.content)
|
||||
|
||||
proc validateContent(content: openArray[byte], contentKey: ByteList): bool =
|
||||
true
|
||||
|
|
|
@ -11,7 +11,6 @@
|
|||
{.push raises: [Defect].}
|
||||
|
||||
import
|
||||
std/options,
|
||||
stint, stew/[results, objects],
|
||||
ssz_serialization,
|
||||
../../common/common_types
|
||||
|
@ -151,29 +150,19 @@ func decodeMessage*(body: openArray[byte]): Result[Message, cstring] =
|
|||
except SszError:
|
||||
err("Invalid message encoding")
|
||||
|
||||
template innerMessage[T: SomeMessage](message: Message, expected: MessageKind): Option[T] =
|
||||
template innerMessage[T: SomeMessage](
|
||||
message: Message, expected: MessageKind): Result[T, cstring] =
|
||||
if (message.kind == expected):
|
||||
some[T](message.expected)
|
||||
ok(message.expected)
|
||||
else:
|
||||
none[T]()
|
||||
err("Invalid message response")
|
||||
|
||||
# All our Message variants correspond to enum MessageKind, therefore we are able to
|
||||
# zoom in on inner structure of message by defining expected type T.
|
||||
# If expected variant is not active, return None
|
||||
func getInnnerMessage*[T: SomeMessage](m: Message): Option[T] =
|
||||
# Each `Message` variants corresponds to an MessageKind. Therefore, the inner
|
||||
# message can be extracted when providing the expected message type T.
|
||||
# If the message does not hold the expacted variant, return error.
|
||||
func getInnerMessage*[T: SomeMessage](m: Message): Result[T, cstring] =
|
||||
innerMessage[T](m, messageKind(T))
|
||||
|
||||
# Simple conversion from Option to Result, looks like something which could live in
|
||||
# Result library.
|
||||
func optToResult*[T, E](opt: Option[T], e: E): Result[T, E] =
|
||||
if (opt.isSome()):
|
||||
ok(opt.unsafeGet())
|
||||
else:
|
||||
err(e)
|
||||
|
||||
func getInnerMessageResult*[T: SomeMessage](m: Message, errMessage: cstring): Result[T, cstring] =
|
||||
optToResult(getInnnerMessage[T](m), errMessage)
|
||||
|
||||
func getTalkReqOverhead*(protocolIdLen: int): int =
|
||||
return (
|
||||
16 + # IV size
|
||||
|
|
|
@ -496,9 +496,7 @@ proc reqResponse[Request: SomeMessage, Response: SomeMessage](
|
|||
let messageResponse = talkresp
|
||||
.flatMap(proc (x: seq[byte]): Result[Message, cstring] = decodeMessage(x))
|
||||
.flatMap(proc (m: Message): Result[Response, cstring] =
|
||||
getInnerMessageResult[Response](
|
||||
m, cstring"Invalid message response received")
|
||||
)
|
||||
getInnerMessage[Response](m))
|
||||
|
||||
if messageResponse.isOk():
|
||||
trace "Received message response", srcId = dst.id,
|
||||
|
@ -960,7 +958,7 @@ proc triggerPoke*(
|
|||
# networks will probably be very similar. Extract lookup function to separate module
|
||||
# and make it more generaic
|
||||
proc contentLookup*(p: PortalProtocol, target: ByteList, targetId: UInt256):
|
||||
Future[Option[ContentLookupResult]] {.async.} =
|
||||
Future[Opt[ContentLookupResult]] {.async.} =
|
||||
## Perform a lookup for the given target, return the closest n nodes to the
|
||||
## target. Maximum value for n is `BUCKET_SIZE`.
|
||||
# `closestNodes` holds the k closest nodes to target found, sorted by distance
|
||||
|
@ -1040,14 +1038,14 @@ proc contentLookup*(p: PortalProtocol, target: ByteList, targetId: UInt256):
|
|||
for f in pendingQueries:
|
||||
f.cancel()
|
||||
portal_lookup_content_requests.observe(requestAmount)
|
||||
return some(ContentLookupResult.init(content.content, nodesWithoutContent))
|
||||
return Opt.some(ContentLookupResult.init(content.content, nodesWithoutContent))
|
||||
else:
|
||||
# TODO: Should we do something with the node that failed responding our
|
||||
# query?
|
||||
discard
|
||||
|
||||
portal_lookup_content_failures.inc()
|
||||
return none[ContentLookupResult]()
|
||||
return Opt.none(ContentLookupResult)
|
||||
|
||||
proc query*(p: PortalProtocol, target: NodeId, k = BUCKET_SIZE): Future[seq[Node]]
|
||||
{.async.} =
|
||||
|
|
|
@ -204,13 +204,10 @@ proc installEthApiHandlers*(
|
|||
## Returns BlockObject or nil when no block was found.
|
||||
let
|
||||
blockHash = data.toHash()
|
||||
blockRes = await historyNetwork.getBlock(blockHash)
|
||||
(header, body) = (await historyNetwork.getBlock(blockHash)).valueOr:
|
||||
return none(BlockObject)
|
||||
|
||||
if blockRes.isNone():
|
||||
return none(BlockObject)
|
||||
else:
|
||||
let (header, body) = blockRes.unsafeGet()
|
||||
return some(BlockObject.init(header, body))
|
||||
return some(BlockObject.init(header, body))
|
||||
|
||||
# TODO: add test to local testnet, it requires activating accumulator
|
||||
# in testnet script
|
||||
|
@ -223,18 +220,14 @@ proc installEthApiHandlers*(
|
|||
|
||||
let
|
||||
blockNumber = fromHex(UInt256, quantityTag)
|
||||
blockResult = await historyNetwork.getBlock(blockNumber)
|
||||
maybeBlock = (await historyNetwork.getBlock(blockNumber)).valueOr:
|
||||
raise newException(ValueError, error)
|
||||
|
||||
if blockResult.isOk():
|
||||
let maybeBlock = blockResult.get()
|
||||
|
||||
if maybeBlock.isNone():
|
||||
return none(BlockObject)
|
||||
else:
|
||||
let (header, body) = maybeBlock.unsafeGet()
|
||||
return some(BlockObject.init(header, body))
|
||||
if maybeBlock.isNone():
|
||||
return none(BlockObject)
|
||||
else:
|
||||
raise newException(ValueError, blockResult.error)
|
||||
let (header, body) = maybeBlock.get()
|
||||
return some(BlockObject.init(header, body))
|
||||
|
||||
rpcServerWithProxy.rpc("eth_getBlockTransactionCountByHash") do(
|
||||
data: EthHashStr) -> HexQuantityStr:
|
||||
|
@ -245,17 +238,14 @@ proc installEthApiHandlers*(
|
|||
## Returns integer of the number of transactions in this block.
|
||||
let
|
||||
blockHash = data.toHash()
|
||||
blockRes = await historyNetwork.getBlock(blockHash)
|
||||
(_, body) = (await historyNetwork.getBlock(blockHash)).valueOr:
|
||||
raise newException(ValueError, "Could not find block with requested hash")
|
||||
|
||||
if blockRes.isNone():
|
||||
raise newException(ValueError, "Could not find block with requested hash")
|
||||
else:
|
||||
let (_, body) = blockRes.unsafeGet()
|
||||
var txCount:uint = 0
|
||||
for tx in body.transactions:
|
||||
txCount.inc()
|
||||
var txCount: uint = 0
|
||||
for tx in body.transactions:
|
||||
txCount.inc()
|
||||
|
||||
return encodeQuantity(txCount)
|
||||
return encodeQuantity(txCount)
|
||||
|
||||
# Note: can't implement this yet as the fluffy node doesn't know the relation
|
||||
# of tx hash -> block number -> block hash, in order to get the receipt
|
||||
|
@ -271,41 +261,30 @@ proc installEthApiHandlers*(
|
|||
# To support range queries the Indicies network is required.
|
||||
raise newException(ValueError,
|
||||
"Unsupported query: Only `blockHash` queries are currently supported")
|
||||
|
||||
let hash = filterOptions.blockHash.unsafeGet()
|
||||
|
||||
let header = (await historyNetwork.getVerifiedBlockHeader(hash)).valueOr:
|
||||
raise newException(ValueError,
|
||||
"Could not find header with requested hash")
|
||||
|
||||
if headerBloomFilter(header, filterOptions.address, filterOptions.topics):
|
||||
# TODO: These queries could be done concurrently, investigate if there
|
||||
# are no assumptions about usage of concurrent queries on portal
|
||||
# wire protocol level
|
||||
let
|
||||
body = (await historyNetwork.getBlockBody(hash, header)).valueOr:
|
||||
raise newException(ValueError,
|
||||
"Could not find block body for requested hash")
|
||||
receipts = (await historyNetwork.getReceipts(hash, header)).valueOr:
|
||||
raise newException(ValueError,
|
||||
"Could not find receipts for requested hash")
|
||||
|
||||
logs = deriveLogs(header, body.transactions, receipts)
|
||||
filteredLogs = filterLogs(
|
||||
logs, filterOptions.address, filterOptions.topics)
|
||||
|
||||
return filteredLogs
|
||||
else:
|
||||
let hash = filterOptions.blockHash.unsafeGet()
|
||||
|
||||
let headerOpt = await historyNetwork.getVerifiedBlockHeader(hash)
|
||||
if headerOpt.isNone():
|
||||
raise newException(ValueError,
|
||||
"Could not find header with requested hash")
|
||||
|
||||
let header = headerOpt.unsafeGet()
|
||||
|
||||
if headerBloomFilter(header, filterOptions.address, filterOptions.topics):
|
||||
# TODO: These queries could be done concurrently, investigate if there
|
||||
# are no assumptions about usage of concurrent queries on portal
|
||||
# wire protocol level
|
||||
let
|
||||
bodyOpt = await historyNetwork.getBlockBody(hash, header)
|
||||
receiptsOpt = await historyNetwork.getReceipts(hash, header)
|
||||
|
||||
if bodyOpt.isSome() and receiptsOpt.isSome():
|
||||
let
|
||||
body = bodyOpt.unsafeGet()
|
||||
receipts = receiptsOpt.unsafeGet()
|
||||
logs = deriveLogs(header, body.transactions, receipts)
|
||||
filteredLogs = filterLogs(
|
||||
logs, filterOptions.address, filterOptions.topics)
|
||||
|
||||
return filteredLogs
|
||||
else:
|
||||
if bodyOpt.isNone():
|
||||
raise newException(ValueError,
|
||||
"Could not find block body for requested hash")
|
||||
else:
|
||||
raise newException(ValueError,
|
||||
"Could not find receipts for requested hash")
|
||||
else:
|
||||
# bloomfilter returned false, we do known that there are no logs
|
||||
# matching the given criteria
|
||||
return @[]
|
||||
# bloomfilter returned false, there are no logs matching the criteria
|
||||
return @[]
|
||||
|
|
|
@ -26,9 +26,6 @@ proc toContentId(contentKey: ByteList): results.Opt[ContentId] =
|
|||
let idHash = sha256.digest(contentKey.asSeq())
|
||||
ok(readUintBE[256](idHash.data))
|
||||
|
||||
proc dbGetHandler(db: ContentDB, contentId: ContentId): Option[seq[byte]] =
|
||||
db.get(contentId)
|
||||
|
||||
proc initPortalProtocol(
|
||||
rng: ref HmacDrbgContext,
|
||||
privKey: PrivateKey,
|
||||
|
|
|
@ -198,10 +198,6 @@ proc testContentIdHandler(contentKey: ByteList): results.Opt[ContentId] =
|
|||
let idHash = sha256.digest("test")
|
||||
ok(readUintBE[256](idHash.data))
|
||||
|
||||
proc dbGetHandler(db: ContentDB, contentId: ContentId):
|
||||
Option[seq[byte]] =
|
||||
db.get(contentId)
|
||||
|
||||
proc run(config: PortalCliConf) =
|
||||
let
|
||||
rng = newRng()
|
||||
|
|
Loading…
Reference in New Issue