diff --git a/fluffy/database/era1_db.nim b/fluffy/database/era1_db.nim index 48a0dd24c..d2c5841bc 100644 --- a/fluffy/database/era1_db.nim +++ b/fluffy/database/era1_db.nim @@ -19,7 +19,7 @@ type Era1DB* = ref object ## a linear history of pre-merge execution chain data. path: string network: string - accumulator: FinishedAccumulator + accumulator: FinishedHistoricalHashesAccumulator files: seq[Era1File] proc getEra1File(db: Era1DB, era: Era1): Result[Era1File, string] = @@ -51,7 +51,10 @@ proc getEra1File(db: Era1DB, era: Era1): Result[Era1File, string] = ok(f) proc new*( - T: type Era1DB, path: string, network: string, accumulator: FinishedAccumulator + T: type Era1DB, + path: string, + network: string, + accumulator: FinishedHistoricalHashesAccumulator, ): Era1DB = Era1DB(path: path, network: network, accumulator: accumulator) diff --git a/fluffy/eth_data/history_data_ssz_e2s.nim b/fluffy/eth_data/history_data_ssz_e2s.nim index 8b5265a2e..8d2421876 100644 --- a/fluffy/eth_data/history_data_ssz_e2s.nim +++ b/fluffy/eth_data/history_data_ssz_e2s.nim @@ -19,11 +19,13 @@ export results # Reading SSZ data from files -proc readAccumulator*(file: string): Result[FinishedAccumulator, string] = +proc readAccumulator*( + file: string +): Result[FinishedHistoricalHashesAccumulator, string] = let encodedAccumulator = ?readAllFile(file).mapErr(toString) try: - ok(SSZ.decode(encodedAccumulator, FinishedAccumulator)) + ok(SSZ.decode(encodedAccumulator, FinishedHistoricalHashesAccumulator)) except SerializationError as e: err("Failed decoding accumulator: " & e.msg) diff --git a/fluffy/network/history/content/content_deprecated.nim b/fluffy/network/history/content/content_deprecated.nim index 66b3e188a..c024ef24b 100644 --- a/fluffy/network/history/content/content_deprecated.nim +++ b/fluffy/network/history/content/content_deprecated.nim @@ -52,7 +52,7 @@ func toContentId(contentKey: ContentKeyByteList): ContentId = readUintBE[256](idHash.data) proc pruneDeprecatedAccumulatorRecords*( - accumulator: FinishedAccumulator, contentDB: ContentDB + accumulator: FinishedHistoricalHashesAccumulator, contentDB: ContentDB ) = info "Pruning deprecated accumulator records" diff --git a/fluffy/network/history/content/content_values.nim b/fluffy/network/history/content/content_values.nim index f524e4294..ee44a14d0 100644 --- a/fluffy/network/history/content/content_values.nim +++ b/fluffy/network/history/content/content_values.nim @@ -27,18 +27,18 @@ const type ## BlockHeader types - AccumulatorProof* = array[15, Digest] + HistoricalHashesAccumulatorProof* = array[15, Digest] BlockHeaderProofType* = enum none = 0x00 # An SSZ Union None - accumulatorProof = 0x01 + historicalHashesAccumulatorProof = 0x01 BlockHeaderProof* = object case proofType*: BlockHeaderProofType of none: discard - of accumulatorProof: - accumulatorProof*: AccumulatorProof + of historicalHashesAccumulatorProof: + historicalHashesAccumulatorProof*: HistoricalHashesAccumulatorProof BlockHeaderWithProof* = object header*: ByteList[MAX_HEADER_LENGTH] # RLP data @@ -68,8 +68,10 @@ type ReceiptByteList* = ByteList[MAX_RECEIPT_LENGTH] # RLP data PortalReceipts* = List[ReceiptByteList, MAX_TRANSACTION_COUNT] -func init*(T: type BlockHeaderProof, proof: AccumulatorProof): T = - BlockHeaderProof(proofType: accumulatorProof, accumulatorProof: proof) +func init*(T: type BlockHeaderProof, proof: HistoricalHashesAccumulatorProof): T = + BlockHeaderProof( + proofType: historicalHashesAccumulatorProof, historicalHashesAccumulatorProof: proof + ) func init*(T: type BlockHeaderProof): T = BlockHeaderProof(proofType: none) diff --git a/fluffy/network/history/history_network.nim b/fluffy/network/history/history_network.nim index a7f83dd7e..109da6df3 100644 --- a/fluffy/network/history/history_network.nim +++ b/fluffy/network/history/history_network.nim @@ -31,7 +31,7 @@ type portalProtocol*: PortalProtocol contentDB*: ContentDB contentQueue*: AsyncQueue[(Opt[NodeId], ContentKeysList, seq[seq[byte]])] - accumulator*: FinishedAccumulator + accumulator*: FinishedHistoricalHashesAccumulator historicalRoots*: HistoricalRoots processContentLoop: Future[void] statusLogLoop: Future[void] @@ -636,7 +636,7 @@ proc new*( baseProtocol: protocol.Protocol, contentDB: ContentDB, streamManager: StreamManager, - accumulator: FinishedAccumulator, + accumulator: FinishedHistoricalHashesAccumulator, historicalRoots: HistoricalRoots = loadHistoricalRoots(), bootstrapRecords: openArray[Record] = [], portalConfig: PortalProtocolConfig = defaultPortalProtocolConfig, diff --git a/fluffy/network/history/validation/historical_hashes_accumulator.nim b/fluffy/network/history/validation/historical_hashes_accumulator.nim index 1dd39cf14..b1cbc2cdc 100644 --- a/fluffy/network/history/validation/historical_hashes_accumulator.nim +++ b/fluffy/network/history/validation/historical_hashes_accumulator.nim @@ -17,9 +17,8 @@ import export ssz_serialization, merkleization, proofs, eth_types_rlp -# Header Accumulator, as per specification: -# https://github.com/ethereum/portal-network-specs/blob/master/history-network.md#the-header-accumulator -# But with the adjustment to finish the accumulator at merge point. +# HistoricalHashesAccumulator, as per specification: +# https://github.com/ethereum/portal-network-specs/blob/master/history/history-network.md#the-historical-hashes-accumulator const EPOCH_SIZE* = 8192 # block roots per epoch record @@ -60,18 +59,17 @@ type # obviously much faster, so this second type is added for this usage. EpochRecordCached* = HashList[HeaderRecord, EPOCH_SIZE] - # HistoricalHashesAccumulator - Accumulator* = object + HistoricalHashesAccumulator* = object historicalEpochs*: List[Bytes32, int(MAX_HISTORICAL_EPOCHS)] currentEpoch*: EpochRecord # HistoricalHashesAccumulator in its final state - FinishedAccumulator* = object + FinishedHistoricalHashesAccumulator* = object historicalEpochs*: List[Bytes32, int(MAX_HISTORICAL_EPOCHS)] currentEpoch*: EpochRecord -func init*(T: type Accumulator): T = - Accumulator( +func init*(T: type HistoricalHashesAccumulator): T = + HistoricalHashesAccumulator( historicalEpochs: List[Bytes32, int(MAX_HISTORICAL_EPOCHS)].init(@[]), currentEpoch: EpochRecord.init(@[]), ) @@ -81,7 +79,7 @@ func getEpochRecordRoot*(headerRecords: openArray[HeaderRecord]): Digest = hash_tree_root(epochRecord) -func updateAccumulator*(a: var Accumulator, header: BlockHeader) = +func updateAccumulator*(a: var HistoricalHashesAccumulator, header: BlockHeader) = doAssert( header.number < mergeBlockNumber, "No post merge blocks for header accumulator" ) @@ -95,7 +93,7 @@ func updateAccumulator*(a: var Accumulator, header: BlockHeader) = # TODO: It is a bit annoying to require an extra header + update call to # finish an epoch. However, if we were to move this after adding the # `HeaderRecord`, there would be no way to get the current total difficulty, - # unless another field is introduced in the `Accumulator` object. + # unless another field is introduced in the `HistoricalHashesAccumulator` object. if a.currentEpoch.len() == EPOCH_SIZE: let epochHash = hash_tree_root(a.currentEpoch) @@ -110,17 +108,19 @@ func updateAccumulator*(a: var Accumulator, header: BlockHeader) = let res = a.currentEpoch.add(headerRecord) doAssert(res, "Can't fail because of currentEpoch length check") -func finishAccumulator*(a: var Accumulator): FinishedAccumulator = +func finishAccumulator*( + a: var HistoricalHashesAccumulator +): FinishedHistoricalHashesAccumulator = # doAssert(a.currentEpoch[^2].totalDifficulty < TERMINAL_TOTAL_DIFFICULTY) # doAssert(a.currentEpoch[^1].totalDifficulty >= TERMINAL_TOTAL_DIFFICULTY) let epochHash = hash_tree_root(a.currentEpoch) doAssert(a.historicalEpochs.add(epochHash.data)) - FinishedAccumulator(historicalEpochs: a.historicalEpochs) + FinishedHistoricalHashesAccumulator(historicalEpochs: a.historicalEpochs) ## Calls and helper calls for building header proofs and verifying headers -## against the Accumulator and the header proofs. +## against the HistoricalHashesAccumulator and the header proofs. func getEpochIndex*(blockNumber: uint64): uint64 = blockNumber div EPOCH_SIZE @@ -144,7 +144,9 @@ func isPreMerge*(header: BlockHeader): bool = isPreMerge(header.number) func verifyProof( - a: FinishedAccumulator, header: BlockHeader, proof: openArray[Digest] + a: FinishedHistoricalHashesAccumulator, + header: BlockHeader, + proof: openArray[Digest], ): bool = let epochIndex = getEpochIndex(header) @@ -153,13 +155,14 @@ func verifyProof( leave = hash_tree_root(header.blockHash()) headerRecordIndex = getHeaderRecordIndex(header, epochIndex) - # TODO: Implement more generalized `get_generalized_index` gIndex = GeneralizedIndex(EPOCH_SIZE * 2 * 2 + (headerRecordIndex * 2)) verify_merkle_multiproof(@[leave], proof, @[gIndex], epochRecordHash) func verifyAccumulatorProof*( - a: FinishedAccumulator, header: BlockHeader, proof: AccumulatorProof + a: FinishedHistoricalHashesAccumulator, + header: BlockHeader, + proof: HistoricalHashesAccumulatorProof, ): Result[void, string] = if header.isPreMerge(): # Note: The proof is typed with correct depth, so no check on this is @@ -172,14 +175,14 @@ func verifyAccumulatorProof*( err("Cannot verify post merge header with accumulator proof") func verifyHeader*( - a: FinishedAccumulator, header: BlockHeader, proof: BlockHeaderProof + a: FinishedHistoricalHashesAccumulator, header: BlockHeader, proof: BlockHeaderProof ): Result[void, string] = case proof.proofType - of BlockHeaderProofType.accumulatorProof: - a.verifyAccumulatorProof(header, proof.accumulatorProof) + of BlockHeaderProofType.historicalHashesAccumulatorProof: + a.verifyAccumulatorProof(header, proof.historicalHashesAccumulatorProof) of BlockHeaderProofType.none: if header.isPreMerge(): - err("Pre merge header requires AccumulatorProof") + err("Pre merge header requires HistoricalHashesAccumulatorProof") else: # TODO: # Currently there is no proof solution for verifying headers post-merge. @@ -191,17 +194,16 @@ func verifyHeader*( func buildProof*( header: BlockHeader, epochRecord: EpochRecord | EpochRecordCached -): Result[AccumulatorProof, string] = +): Result[HistoricalHashesAccumulatorProof, string] = doAssert(header.isPreMerge(), "Must be pre merge header") let epochIndex = getEpochIndex(header) headerRecordIndex = getHeaderRecordIndex(header, epochIndex) - # TODO: Implement more generalized `get_generalized_index` gIndex = GeneralizedIndex(EPOCH_SIZE * 2 * 2 + (headerRecordIndex * 2)) - var proof: AccumulatorProof + var proof: HistoricalHashesAccumulatorProof ?epochRecord.build_proof(gIndex, proof) ok(proof) diff --git a/fluffy/network_metadata.nim b/fluffy/network_metadata.nim index 08f7b9476..bb6012b05 100644 --- a/fluffy/network_metadata.nim +++ b/fluffy/network_metadata.nim @@ -52,9 +52,9 @@ const historicalRootsSSZ* = slurp(portalConfigDir / "historical_roots.ssz") -func loadAccumulator*(): FinishedAccumulator = +func loadAccumulator*(): FinishedHistoricalHashesAccumulator = try: - SSZ.decode(historicalHashesAccumulatorSSZ, FinishedAccumulator) + SSZ.decode(historicalHashesAccumulatorSSZ, FinishedHistoricalHashesAccumulator) except SerializationError as err: raiseAssert "Invalid baked-in accumulator: " & err.msg diff --git a/fluffy/tests/history_network_tests/test_historical_hashes_accumulator.nim b/fluffy/tests/history_network_tests/test_historical_hashes_accumulator.nim index 31eb7d820..7ac4b4a12 100644 --- a/fluffy/tests/history_network_tests/test_historical_hashes_accumulator.nim +++ b/fluffy/tests/history_network_tests/test_historical_hashes_accumulator.nim @@ -57,7 +57,7 @@ suite "Historical Hashes Accumulator": block: # Test invalid headers # Post merge block number must fail (> than latest header in accumulator) - var proof: AccumulatorProof + var proof: HistoricalHashesAccumulatorProof let header = BlockHeader(number: mergeBlockNumber) check verifyAccumulatorProof(accumulator, header, proof).isErr() @@ -72,7 +72,7 @@ suite "Historical Hashes Accumulator": check verifyAccumulatorProof(accumulator, header, proof.get()).isErr() block: # Test invalid proofs - var proof: AccumulatorProof + var proof: HistoricalHashesAccumulatorProof for i in headersToTest: check verifyAccumulatorProof(accumulator, headers[i], proof).isErr() diff --git a/fluffy/tests/history_network_tests/test_historical_hashes_accumulator_root.nim b/fluffy/tests/history_network_tests/test_historical_hashes_accumulator_root.nim index 4f3b6b8e6..3583065bc 100644 --- a/fluffy/tests/history_network_tests/test_historical_hashes_accumulator_root.nim +++ b/fluffy/tests/history_network_tests/test_historical_hashes_accumulator_root.nim @@ -45,7 +45,7 @@ suite "Historical Hashes Accumulator Root": let header = res.get() headers[header.number] = header - var accumulator: Accumulator + var accumulator: HistoricalHashesAccumulator for i, hash in hashTreeRoots: updateAccumulator(accumulator, headers[i]) diff --git a/fluffy/tests/history_network_tests/test_history_network.nim b/fluffy/tests/history_network_tests/test_history_network.nim index 8a60b5015..9b89d31df 100644 --- a/fluffy/tests/history_network_tests/test_history_network.nim +++ b/fluffy/tests/history_network_tests/test_history_network.nim @@ -24,7 +24,9 @@ type HistoryNode = ref object historyNetwork*: HistoryNetwork proc newHistoryNode( - rng: ref HmacDrbgContext, port: int, accumulator: FinishedAccumulator + rng: ref HmacDrbgContext, + port: int, + accumulator: FinishedHistoricalHashesAccumulator, ): HistoryNode = let node = initDiscoveryNode(rng, PrivateKey.random(rng[]), localAddress(port)) diff --git a/fluffy/tests/history_network_tests/test_history_util.nim b/fluffy/tests/history_network_tests/test_history_util.nim index 00cd1fa7a..577deb122 100644 --- a/fluffy/tests/history_network_tests/test_history_util.nim +++ b/fluffy/tests/history_network_tests/test_history_util.nim @@ -31,8 +31,10 @@ proc buildHeadersWithProof*( ok(blockHeadersWithProof) -func buildAccumulator*(headers: seq[BlockHeader]): Result[FinishedAccumulator, string] = - var accumulator: Accumulator +func buildAccumulator*( + headers: seq[BlockHeader] +): Result[FinishedHistoricalHashesAccumulator, string] = + var accumulator: HistoricalHashesAccumulator for header in headers: updateAccumulator(accumulator, header) @@ -43,8 +45,8 @@ func buildAccumulator*(headers: seq[BlockHeader]): Result[FinishedAccumulator, s func buildAccumulatorData*( headers: seq[BlockHeader] -): Result[(FinishedAccumulator, seq[EpochRecord]), string] = - var accumulator: Accumulator +): Result[(FinishedHistoricalHashesAccumulator, seq[EpochRecord]), string] = + var accumulator: HistoricalHashesAccumulator var epochRecords: seq[EpochRecord] for header in headers: updateAccumulator(accumulator, header) @@ -61,7 +63,7 @@ func buildAccumulatorData*( func buildProof*( header: BlockHeader, epochRecords: seq[EpochRecord] -): Result[AccumulatorProof, string] = +): Result[HistoricalHashesAccumulatorProof, string] = let epochIndex = getEpochIndex(header) doAssert(epochIndex < uint64(epochRecords.len())) let epochRecord = epochRecords[epochIndex] diff --git a/fluffy/tests/state_network_tests/state_test_helpers.nim b/fluffy/tests/state_network_tests/state_test_helpers.nim index 94fd2d907..be57c0618 100644 --- a/fluffy/tests/state_network_tests/state_test_helpers.nim +++ b/fluffy/tests/state_network_tests/state_test_helpers.nim @@ -119,7 +119,9 @@ proc newStateNode*( "", uint32.high, RadiusConfig(kind: Dynamic), node.localNode.id, inMemory = true ) sm = StreamManager.new(node) - hn = HistoryNetwork.new(PortalNetwork.none, node, db, sm, FinishedAccumulator()) + hn = HistoryNetwork.new( + PortalNetwork.none, node, db, sm, FinishedHistoricalHashesAccumulator() + ) sn = StateNetwork.new(PortalNetwork.none, node, db, sm, historyNetwork = Opt.some(hn)) diff --git a/fluffy/tools/eth_data_exporter.nim b/fluffy/tools/eth_data_exporter.nim index 050d08a1d..2a3afbbed 100644 --- a/fluffy/tools/eth_data_exporter.nim +++ b/fluffy/tools/eth_data_exporter.nim @@ -199,7 +199,7 @@ proc cmdExportEra1(config: ExporterConf) = # error cases are fatal. But maybe we could throw proper errors still. var group = Era1Group.init(e2, startNumber).get() - # Header records to build the accumulator root + # Header records to build the HistoricalHashesAccumulator root var headerRecords: seq[historical_hashes_accumulator.HeaderRecord] for blockNumber in startNumber .. endNumber: let blck = @@ -234,7 +234,7 @@ proc cmdExportEra1(config: ExporterConf) = if completed: let name = era1FileName("mainnet", era, accumulatorRoot) # We cannot check for the exact file any earlier as we need to know the - # accumulator root. + # HistoricalHashesAccumulator root. # TODO: Could scan for file with era number in it. if isFile(name): info "Era1 file already exists", era, name @@ -381,7 +381,8 @@ when isMainModule: # to build it. let accumulatorFile = dataDir / config.accumulatorFileName if isFile(accumulatorFile): - notice "Not building accumulator, file already exists", file = accumulatorFile + notice "Not building HistoricalHashesAccumulator, file already exists", + file = accumulatorFile quit 1 # Lets verify if the necessary files exists before starting to build the @@ -394,8 +395,8 @@ when isMainModule: proc buildAccumulator( dataDir: string, writeEpochRecords = false - ): Result[FinishedAccumulator, string] = - var accumulator: Accumulator + ): Result[FinishedHistoricalHashesAccumulator, string] = + var accumulator: HistoricalHashesAccumulator for i in 0 ..< preMergeEpochs: let file = try: @@ -453,43 +454,45 @@ when isMainModule: if blockHeader.number == mergeBlockNumber - 1: let finishedAccumulator = finishAccumulator(accumulator) - info "Updated last epoch, finished building master accumulator", + info "Updated last epoch, finished building HistoricalHashesAccumulatorr", epoch = i return ok(finishedAccumulator) else: warn "Skipping record, not a block header", typ = toHex(header.typ) - err("Not enough headers provided to finish the accumulator") + err("Not enough headers provided to finish the HistoricalHashesAccumulator") let accumulatorRes = buildAccumulator(dataDir, config.writeEpochRecords) if accumulatorRes.isErr(): - fatal "Could not build accumulator", error = accumulatorRes.error + fatal "Could not build HistoricalHashesAccumulator", + error = accumulatorRes.error quit 1 let accumulator = accumulatorRes.get() let res = io2.writeFile(accumulatorFile, SSZ.encode(accumulator)) if res.isErr(): - error "Failed writing accumulator to file", + error "Failed writing HistoricalHashesAccumulator to file", file = accumulatorFile, error = res.error quit 1 else: - notice "Succesfully wrote Historical Hashes Accumulator to file", + notice "Succesfully wrote HistoricalHashesAccumulator to file", file = accumulatorFile of HistoryCmd.printAccumulatorData: let file = dataDir / config.accumulatorFileNamePrint let res = readAccumulator(file) if res.isErr(): - fatal "Failed reading accumulator from file", error = res.error, file + fatal "Failed reading HistoricalHashesAccumulator from file", + error = res.error, file quit 1 let accumulator = res.get() accumulatorRoot = hash_tree_root(accumulator) - info "Accumulator decoded successfully", root = accumulatorRoot + info "HistoricalHashesAccumulator decoded successfully", root = accumulatorRoot - echo "Master Accumulator:" + echo "HistoricalHashesAccumulator:" echo "-------------------" echo &"Root: {accumulatorRoot}" echo ""