# Nimbus # Copyright (c) 2021-2023 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. {.push raises: [].} import stew/results, chronos, chronicles, eth/[common/eth_types_rlp, rlp, trie, trie/db], eth/p2p/discoveryv5/[protocol, enr], ../../content_db, ../../../nimbus/constants, ../wire/[portal_protocol, portal_stream, portal_protocol_config], "."/[history_content, accumulator] logScope: topics = "portal_hist" export accumulator # This looks like it makes no sense, because it makes no sense. It's a # workaround for what seems to be a compiler bug; see here: # # https://github.com/status-im/nimbus-eth1/pull/1465 # # Without this, the call `error` on a `Result` might give a compiler error for # the `Result[BlockHeader, string]` or `Result[seq[BlockHeader], string]` types. # The error is due to the `$` for `BlockHeader causing side effects, which # appears to be due to the timestamp field, which is of `times.Time` type. Its # `$` from the times module has side effects (Yes, silly times). In (my) theory # this `$` should not leak here, but it seems to do. To workaround this we # introduce this additional `$` call, which appears to work. # # Note that this also fixes the same error in another module, even when not # specifically exporting (no asterisk) the call. # # If you think this is unnecessary, feel free to try deleting it; if all the # tests still pass after deleting it, feel free to leave it out. In the # meantime, please just ignore it and go on with your life. # proc `$`(x: BlockHeader): string = $x const historyProtocolId* = [byte 0x50, 0x0B] type HistoryNetwork* = ref object portalProtocol*: PortalProtocol contentDB*: ContentDB contentQueue*: AsyncQueue[(ContentKeysList, seq[seq[byte]])] accumulator*: FinishedAccumulator processContentLoop: Future[void] statusLogLoop: Future[void] Block* = (BlockHeader, BlockBody) func toContentIdHandler(contentKey: ByteList): results.Opt[ContentId] = ok(toContentId(contentKey)) func decodeRlp*(input: openArray[byte], T: type): Result[T, string] = try: ok(rlp.decode(input, T)) except RlpError as e: err(e.msg) func decodeSsz*(input: openArray[byte], T: type): Result[T, string] = try: ok(SSZ.decode(input, T)) except SszError as e: err(e.msg) ## Calls to go from SSZ decoded types to RLP fully decoded types func fromPortalBlockBody*( T: type BlockBody, body: BlockBodySSZ): Result[T, string] = ## Get the full decoded BlockBody from the SSZ-decoded `PortalBlockBody`. try: var transactions: seq[Transaction] for tx in body.transactions: transactions.add(rlp.decode(tx.asSeq(), Transaction)) let uncles = rlp.decode(body.uncles.asSeq(), seq[BlockHeader]) ok(BlockBody(transactions: transactions, uncles: uncles)) except RlpError as e: err("RLP decoding failed: " & e.msg) func fromReceipts*( T: type seq[Receipt], receipts: ReceiptsSSZ): Result[T, string] = ## Get the full decoded seq[Receipt] from the SSZ-decoded `Receipts`. try: var res: seq[Receipt] for receipt in receipts: res.add(rlp.decode(receipt.asSeq(), Receipt)) ok(res) except RlpError as e: err("RLP decoding failed: " & e.msg) ## Calls to encode Block types to the SSZ types. func fromBlockBody(T: type BlockBodySSZ, body: BlockBody): T = var transactions: Transactions for tx in body.transactions: discard transactions.add(TransactionByteList(rlp.encode(tx))) let uncles = Uncles(rlp.encode(body.uncles)) BlockBodySSZ(transactions: transactions, uncles: uncles) func fromReceipts*(T: type ReceiptsSSZ, receipts: seq[Receipt]): T = var receiptsSSZ: ReceiptsSSZ for receipt in receipts: discard receiptsSSZ.add(ReceiptByteList(rlp.encode(receipt))) receiptsSSZ func encode*(blockBody: BlockBody): seq[byte] = let portalBlockBody = BlockBodySSZ.fromBlockBody(blockBody) SSZ.encode(portalBlockBody) func encode*(receipts: seq[Receipt]): seq[byte] = let portalReceipts = ReceiptsSSZ.fromReceipts(receipts) SSZ.encode(portalReceipts) ## Calls and helper calls to do validation of block header, body and receipts # TODO: Failures on validation and perhaps deserialisation should be punished # for if/when peer scoring/banning is added. proc calcRootHash(items: Transactions | ReceiptsSSZ): Hash256 = var tr = initHexaryTrie(newMemoryDB()) for i, item in items: try: tr.put(rlp.encode(i), item.asSeq()) except RlpError as e: # TODO: Investigate this RlpError as it doesn't sound like this is # something that can actually occur. raiseAssert(e.msg) return tr.rootHash template calcTxsRoot*(transactions: Transactions): Hash256 = calcRootHash(transactions) template calcReceiptsRoot*(receipts: ReceiptsSSZ): Hash256 = calcRootHash(receipts) func validateBlockHeaderBytes*( bytes: openArray[byte], hash: BlockHash): Result[BlockHeader, string] = let header = ? decodeRlp(bytes, BlockHeader) if header.excessDataGas.isSome: return err("EIP-4844 not yet implemented") if header.withdrawalsRoot.isSome: return err("Withdrawals not yet implemented") if not (header.blockHash() == hash): err("Block header hash does not match") else: ok(header) proc validateBlockBody( body: BlockBodySSZ, txsRoot, ommersHash: KeccakHash): Result[void, string] = ## Validate the block body against the txRoot amd ommersHash from the header. let calculatedOmmersHash = keccakHash(body.uncles.asSeq()) if calculatedOmmersHash != ommersHash: return err("Invalid ommers hash") let calculatedTxsRoot = calcTxsRoot(body.transactions) if calculatedTxsRoot != txsRoot: return err("Invalid transactions root") ok() proc validateBlockBodyBytes*( bytes: openArray[byte], txRoot, ommersHash: KeccakHash): Result[BlockBody, string] = ## Fully decode the SSZ Block Body and validate it against the header. let body = ? decodeSsz(bytes, BlockBodySSZ) ? validateBlockBody(body, txRoot, ommersHash) BlockBody.fromPortalBlockBody(body) proc validateReceipts*( receipts: ReceiptsSSZ, receiptsRoot: KeccakHash): Result[void, string] = let calculatedReceiptsRoot = calcReceiptsRoot(receipts) if calculatedReceiptsRoot != receiptsRoot: return err("Unexpected receipt root") else: return ok() proc validateReceiptsBytes*( bytes: openArray[byte], receiptsRoot: KeccakHash): Result[seq[Receipt], string] = ## Fully decode the SSZ Block Body and validate it against the header. let receipts = ? decodeSsz(bytes, ReceiptsSSZ) ? validateReceipts(receipts, receiptsRoot) seq[Receipt].fromReceipts(receipts) ## ContentDB helper calls for specific history network types proc get(db: ContentDB, T: type BlockHeader, contentId: ContentId): Opt[T] = let contentFromDB = db.get(contentId) if contentFromDB.isSome(): let headerWithProof = try: SSZ.decode(contentFromDB.get(), BlockHeaderWithProof) except SszError as e: raiseAssert(e.msg) let res = decodeRlp(headerWithProof.header.asSeq(), T) if res.isErr(): raiseAssert(res.error) else: Opt.some(res.get()) else: Opt.none(T) proc get(db: ContentDB, T: type BlockBody, contentId: ContentId): Opt[T] = let contentFromDB = db.getSszDecoded(contentId, BlockBodySSZ) if contentFromDB.isSome(): let res = T.fromPortalBlockBody(contentFromDB.get()) if res.isErr(): raiseAssert(res.error) else: Opt.some(res.get()) else: Opt.none(T) proc get(db: ContentDB, T: type seq[Receipt], contentId: ContentId): Opt[T] = let contentFromDB = db.getSszDecoded(contentId, ReceiptsSSZ) if contentFromDB.isSome(): let res = T.fromReceipts(contentFromDB.get()) if res.isErr(): raiseAssert(res.error) else: Opt.some(res.get()) else: Opt.none(T) proc get( db: ContentDB, T: type EpochAccumulator, contentId: ContentId): Opt[T] = db.getSszDecoded(contentId, T) proc getContentFromDb( n: HistoryNetwork, T: type, contentId: ContentId): Opt[T] = if n.portalProtocol.inRange(contentId): n.contentDB.get(T, contentId) else: Opt.none(T) ## Public API to get the history network specific types, either from database ## or through a lookup on the Portal Network const requestRetries = 4 # TODO: Currently doing 4 retries on lookups but only when the validation fails. # This is to avoid nodes that provide garbage from blocking us with getting the # requested data. Might want to also do that on a failed lookup, as perhaps this # could occur when being really unlucky with nodes timing out on requests. # Additionally, more improvements could be done with the lookup, as currently # ongoing requests are cancelled after the receival of the first response, # however that response is not yet validated at that moment. func verifyHeader( n: HistoryNetwork, header: BlockHeader, proof: BlockHeaderProof): Result[void, string] = verifyHeader(n.accumulator, header, proof) proc getVerifiedBlockHeader*( n: HistoryNetwork, hash: BlockHash): Future[Opt[BlockHeader]] {.async.} = let contentKey = ContentKey.init(blockHeader, hash).encode() contentId = contentKey.toContentId() logScope: hash contentKey # Note: This still requests a BlockHeaderWithProof from the database, as that # is what is stored. But the proof doesn't need to be verified as it gets # gets verified before storing. let headerFromDb = n.getContentFromDb(BlockHeader, contentId) if headerFromDb.isSome(): info "Fetched block header from database" return headerFromDb for i in 0..