2021-07-15 13:12:33 +00:00
|
|
|
# Nimbus - Portal Network
|
2024-01-24 15:28:03 +00:00
|
|
|
# Copyright (c) 2021-2024 Status Research & Development GmbH
|
2021-07-15 13:12:33 +00:00
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
2024-01-09 15:09:02 +00:00
|
|
|
{.push raises: [].}
|
|
|
|
|
2021-07-15 13:12:33 +00:00
|
|
|
import
|
2024-01-24 15:28:03 +00:00
|
|
|
std/net,
|
2023-12-21 09:05:53 +00:00
|
|
|
eth/[common, keys, rlp, trie, trie/db],
|
2021-07-15 13:12:33 +00:00
|
|
|
eth/p2p/discoveryv5/[enr, node, routing_table],
|
2022-10-18 11:07:32 +00:00
|
|
|
eth/p2p/discoveryv5/protocol as discv5_protocol,
|
2023-12-21 09:05:53 +00:00
|
|
|
../network/history/[accumulator, history_content],
|
2024-01-09 15:09:02 +00:00
|
|
|
../network/state/experimental/state_proof_types,
|
2024-05-03 10:31:54 +00:00
|
|
|
../../nimbus/common/chain_config,
|
2023-12-21 09:05:53 +00:00
|
|
|
../database/content_db
|
2021-07-15 13:12:33 +00:00
|
|
|
|
2024-01-09 15:09:02 +00:00
|
|
|
proc localAddress*(port: int): Address {.raises: [ValueError].} =
|
2023-11-10 18:38:11 +00:00
|
|
|
Address(ip: parseIpAddress("127.0.0.1"), port: Port(port))
|
2021-07-15 13:12:33 +00:00
|
|
|
|
2022-06-24 13:35:31 +00:00
|
|
|
proc initDiscoveryNode*(
|
2022-07-04 07:38:02 +00:00
|
|
|
rng: ref HmacDrbgContext,
|
2021-07-15 13:12:33 +00:00
|
|
|
privKey: PrivateKey,
|
|
|
|
address: Address,
|
2021-12-13 08:06:29 +00:00
|
|
|
bootstrapRecords: openArray[Record] = [],
|
|
|
|
localEnrFields: openArray[(string, seq[byte])] = [],
|
2024-02-28 17:31:45 +00:00
|
|
|
previousRecord = none[enr.Record](),
|
|
|
|
): discv5_protocol.Protocol {.raises: [CatchableError].} =
|
2021-07-15 13:12:33 +00:00
|
|
|
# set bucketIpLimit to allow bucket split
|
2022-02-02 21:48:33 +00:00
|
|
|
let config = DiscoveryConfig.init(1000, 24, 5)
|
2021-07-15 13:12:33 +00:00
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
result = newProtocol(
|
|
|
|
privKey,
|
2021-07-15 13:12:33 +00:00
|
|
|
some(address.ip),
|
2024-02-28 17:31:45 +00:00
|
|
|
some(address.port),
|
|
|
|
some(address.port),
|
2021-07-15 13:12:33 +00:00
|
|
|
bindPort = address.port,
|
|
|
|
bootstrapRecords = bootstrapRecords,
|
|
|
|
localEnrFields = localEnrFields,
|
|
|
|
previousRecord = previousRecord,
|
2022-02-02 21:48:33 +00:00
|
|
|
config = config,
|
2024-02-28 17:31:45 +00:00
|
|
|
rng = rng,
|
|
|
|
)
|
2021-07-15 13:12:33 +00:00
|
|
|
|
|
|
|
result.open()
|
2022-05-12 16:04:37 +00:00
|
|
|
|
2022-07-04 07:38:02 +00:00
|
|
|
proc genByteSeq*(length: int): seq[byte] =
|
2022-05-12 16:04:37 +00:00
|
|
|
var i = 0
|
|
|
|
var resultSeq = newSeq[byte](length)
|
|
|
|
while i < length:
|
|
|
|
resultSeq[i] = byte(i)
|
|
|
|
inc i
|
|
|
|
return resultSeq
|
2022-10-18 11:07:32 +00:00
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
func buildAccumulator*(headers: seq[BlockHeader]): Result[FinishedAccumulator, string] =
|
2022-10-18 11:07:32 +00:00
|
|
|
var accumulator: Accumulator
|
|
|
|
for header in headers:
|
|
|
|
updateAccumulator(accumulator, header)
|
|
|
|
|
|
|
|
if header.blockNumber.truncate(uint64) == mergeBlockNumber - 1:
|
|
|
|
return ok(finishAccumulator(accumulator))
|
|
|
|
|
|
|
|
err("Not enough headers provided to finish the accumulator")
|
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
func buildAccumulatorData*(
|
|
|
|
headers: seq[BlockHeader]
|
|
|
|
): Result[(FinishedAccumulator, seq[EpochAccumulator]), string] =
|
2022-10-18 11:07:32 +00:00
|
|
|
var accumulator: Accumulator
|
|
|
|
var epochAccumulators: seq[EpochAccumulator]
|
|
|
|
for header in headers:
|
|
|
|
updateAccumulator(accumulator, header)
|
|
|
|
|
|
|
|
if accumulator.currentEpoch.len() == epochSize:
|
|
|
|
epochAccumulators.add(accumulator.currentEpoch)
|
|
|
|
|
|
|
|
if header.blockNumber.truncate(uint64) == mergeBlockNumber - 1:
|
|
|
|
epochAccumulators.add(accumulator.currentEpoch)
|
|
|
|
|
|
|
|
return ok((finishAccumulator(accumulator), epochAccumulators))
|
|
|
|
|
|
|
|
err("Not enough headers provided to finish the accumulator")
|
2022-11-04 08:27:01 +00:00
|
|
|
|
|
|
|
func buildProof*(
|
2024-02-28 17:31:45 +00:00
|
|
|
header: BlockHeader, epochAccumulators: seq[EpochAccumulator]
|
|
|
|
): Result[AccumulatorProof, string] =
|
2022-11-04 08:27:01 +00:00
|
|
|
let epochIndex = getEpochIndex(header)
|
|
|
|
doAssert(epochIndex < uint64(epochAccumulators.len()))
|
|
|
|
let epochAccumulator = epochAccumulators[epochIndex]
|
|
|
|
|
|
|
|
buildProof(header, epochAccumulator)
|
|
|
|
|
|
|
|
func buildHeaderWithProof*(
|
2024-02-28 17:31:45 +00:00
|
|
|
header: BlockHeader, epochAccumulators: seq[EpochAccumulator]
|
|
|
|
): Result[BlockHeaderWithProof, string] =
|
2022-11-04 08:27:01 +00:00
|
|
|
## Construct the accumulator proof for a specific header.
|
|
|
|
## Returns the block header with the proof
|
|
|
|
if header.isPreMerge():
|
|
|
|
let epochIndex = getEpochIndex(header)
|
|
|
|
doAssert(epochIndex < uint64(epochAccumulators.len()))
|
|
|
|
let epochAccumulator = epochAccumulators[epochIndex]
|
|
|
|
|
|
|
|
buildHeaderWithProof(header, epochAccumulator)
|
|
|
|
else:
|
|
|
|
err("Cannot build accumulator proof for post merge header")
|
|
|
|
|
|
|
|
func buildHeadersWithProof*(
|
2024-02-28 17:31:45 +00:00
|
|
|
headers: seq[BlockHeader], epochAccumulators: seq[EpochAccumulator]
|
|
|
|
): Result[seq[BlockHeaderWithProof], string] =
|
2022-11-04 08:27:01 +00:00
|
|
|
var headersWithProof: seq[BlockHeaderWithProof]
|
|
|
|
for header in headers:
|
2024-02-28 17:31:45 +00:00
|
|
|
headersWithProof.add(?buildHeaderWithProof(header, epochAccumulators))
|
2022-11-04 08:27:01 +00:00
|
|
|
|
|
|
|
ok(headersWithProof)
|
2023-12-21 09:05:53 +00:00
|
|
|
|
|
|
|
proc getGenesisAlloc*(filePath: string): GenesisAlloc =
|
|
|
|
var cn: NetworkParams
|
|
|
|
if not loadNetworkParams(filePath, cn):
|
|
|
|
quit(1)
|
|
|
|
|
|
|
|
cn.genesis.alloc
|
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
proc toState*(
|
|
|
|
alloc: GenesisAlloc
|
|
|
|
): (AccountState, Table[EthAddress, StorageState]) {.raises: [RlpError].} =
|
2023-12-21 09:05:53 +00:00
|
|
|
var accountTrie = initHexaryTrie(newMemoryDB())
|
|
|
|
var storageStates = initTable[EthAddress, StorageState]()
|
|
|
|
|
|
|
|
for address, genAccount in alloc:
|
|
|
|
var storageRoot = EMPTY_ROOT_HASH
|
|
|
|
var codeHash = EMPTY_CODE_HASH
|
|
|
|
|
|
|
|
if genAccount.code.len() > 0:
|
|
|
|
var storageTrie = initHexaryTrie(newMemoryDB())
|
|
|
|
for slotKey, slotValue in genAccount.storage:
|
|
|
|
let key = keccakHash(toBytesBE(slotKey)).data
|
|
|
|
let value = rlp.encode(slotValue)
|
|
|
|
storageTrie.put(key, value)
|
|
|
|
storageStates[address] = storageTrie.StorageState
|
|
|
|
storageRoot = storageTrie.rootHash()
|
|
|
|
codeHash = keccakHash(genAccount.code)
|
|
|
|
|
|
|
|
let account = Account(
|
2024-02-28 17:31:45 +00:00
|
|
|
nonce: genAccount.nonce,
|
|
|
|
balance: genAccount.balance,
|
|
|
|
storageRoot: storageRoot,
|
|
|
|
codeHash: codeHash,
|
|
|
|
)
|
2023-12-21 09:05:53 +00:00
|
|
|
let key = keccakHash(address).data
|
|
|
|
let value = rlp.encode(account)
|
|
|
|
accountTrie.put(key, value)
|
|
|
|
|
|
|
|
(accountTrie.AccountState, storageStates)
|