Rename to HistoricalHashesAccumulator as per Portal spec (#2663)
This commit is contained in:
parent
debb68b3a7
commit
438e183586
|
@ -19,7 +19,7 @@ type Era1DB* = ref object
|
||||||
## a linear history of pre-merge execution chain data.
|
## a linear history of pre-merge execution chain data.
|
||||||
path: string
|
path: string
|
||||||
network: string
|
network: string
|
||||||
accumulator: FinishedAccumulator
|
accumulator: FinishedHistoricalHashesAccumulator
|
||||||
files: seq[Era1File]
|
files: seq[Era1File]
|
||||||
|
|
||||||
proc getEra1File(db: Era1DB, era: Era1): Result[Era1File, string] =
|
proc getEra1File(db: Era1DB, era: Era1): Result[Era1File, string] =
|
||||||
|
@ -51,7 +51,10 @@ proc getEra1File(db: Era1DB, era: Era1): Result[Era1File, string] =
|
||||||
ok(f)
|
ok(f)
|
||||||
|
|
||||||
proc new*(
|
proc new*(
|
||||||
T: type Era1DB, path: string, network: string, accumulator: FinishedAccumulator
|
T: type Era1DB,
|
||||||
|
path: string,
|
||||||
|
network: string,
|
||||||
|
accumulator: FinishedHistoricalHashesAccumulator,
|
||||||
): Era1DB =
|
): Era1DB =
|
||||||
Era1DB(path: path, network: network, accumulator: accumulator)
|
Era1DB(path: path, network: network, accumulator: accumulator)
|
||||||
|
|
||||||
|
|
|
@ -19,11 +19,13 @@ export results
|
||||||
|
|
||||||
# Reading SSZ data from files
|
# Reading SSZ data from files
|
||||||
|
|
||||||
proc readAccumulator*(file: string): Result[FinishedAccumulator, string] =
|
proc readAccumulator*(
|
||||||
|
file: string
|
||||||
|
): Result[FinishedHistoricalHashesAccumulator, string] =
|
||||||
let encodedAccumulator = ?readAllFile(file).mapErr(toString)
|
let encodedAccumulator = ?readAllFile(file).mapErr(toString)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
ok(SSZ.decode(encodedAccumulator, FinishedAccumulator))
|
ok(SSZ.decode(encodedAccumulator, FinishedHistoricalHashesAccumulator))
|
||||||
except SerializationError as e:
|
except SerializationError as e:
|
||||||
err("Failed decoding accumulator: " & e.msg)
|
err("Failed decoding accumulator: " & e.msg)
|
||||||
|
|
||||||
|
|
|
@ -52,7 +52,7 @@ func toContentId(contentKey: ContentKeyByteList): ContentId =
|
||||||
readUintBE[256](idHash.data)
|
readUintBE[256](idHash.data)
|
||||||
|
|
||||||
proc pruneDeprecatedAccumulatorRecords*(
|
proc pruneDeprecatedAccumulatorRecords*(
|
||||||
accumulator: FinishedAccumulator, contentDB: ContentDB
|
accumulator: FinishedHistoricalHashesAccumulator, contentDB: ContentDB
|
||||||
) =
|
) =
|
||||||
info "Pruning deprecated accumulator records"
|
info "Pruning deprecated accumulator records"
|
||||||
|
|
||||||
|
|
|
@ -27,18 +27,18 @@ const
|
||||||
|
|
||||||
type
|
type
|
||||||
## BlockHeader types
|
## BlockHeader types
|
||||||
AccumulatorProof* = array[15, Digest]
|
HistoricalHashesAccumulatorProof* = array[15, Digest]
|
||||||
|
|
||||||
BlockHeaderProofType* = enum
|
BlockHeaderProofType* = enum
|
||||||
none = 0x00 # An SSZ Union None
|
none = 0x00 # An SSZ Union None
|
||||||
accumulatorProof = 0x01
|
historicalHashesAccumulatorProof = 0x01
|
||||||
|
|
||||||
BlockHeaderProof* = object
|
BlockHeaderProof* = object
|
||||||
case proofType*: BlockHeaderProofType
|
case proofType*: BlockHeaderProofType
|
||||||
of none:
|
of none:
|
||||||
discard
|
discard
|
||||||
of accumulatorProof:
|
of historicalHashesAccumulatorProof:
|
||||||
accumulatorProof*: AccumulatorProof
|
historicalHashesAccumulatorProof*: HistoricalHashesAccumulatorProof
|
||||||
|
|
||||||
BlockHeaderWithProof* = object
|
BlockHeaderWithProof* = object
|
||||||
header*: ByteList[MAX_HEADER_LENGTH] # RLP data
|
header*: ByteList[MAX_HEADER_LENGTH] # RLP data
|
||||||
|
@ -68,8 +68,10 @@ type
|
||||||
ReceiptByteList* = ByteList[MAX_RECEIPT_LENGTH] # RLP data
|
ReceiptByteList* = ByteList[MAX_RECEIPT_LENGTH] # RLP data
|
||||||
PortalReceipts* = List[ReceiptByteList, MAX_TRANSACTION_COUNT]
|
PortalReceipts* = List[ReceiptByteList, MAX_TRANSACTION_COUNT]
|
||||||
|
|
||||||
func init*(T: type BlockHeaderProof, proof: AccumulatorProof): T =
|
func init*(T: type BlockHeaderProof, proof: HistoricalHashesAccumulatorProof): T =
|
||||||
BlockHeaderProof(proofType: accumulatorProof, accumulatorProof: proof)
|
BlockHeaderProof(
|
||||||
|
proofType: historicalHashesAccumulatorProof, historicalHashesAccumulatorProof: proof
|
||||||
|
)
|
||||||
|
|
||||||
func init*(T: type BlockHeaderProof): T =
|
func init*(T: type BlockHeaderProof): T =
|
||||||
BlockHeaderProof(proofType: none)
|
BlockHeaderProof(proofType: none)
|
||||||
|
|
|
@ -31,7 +31,7 @@ type
|
||||||
portalProtocol*: PortalProtocol
|
portalProtocol*: PortalProtocol
|
||||||
contentDB*: ContentDB
|
contentDB*: ContentDB
|
||||||
contentQueue*: AsyncQueue[(Opt[NodeId], ContentKeysList, seq[seq[byte]])]
|
contentQueue*: AsyncQueue[(Opt[NodeId], ContentKeysList, seq[seq[byte]])]
|
||||||
accumulator*: FinishedAccumulator
|
accumulator*: FinishedHistoricalHashesAccumulator
|
||||||
historicalRoots*: HistoricalRoots
|
historicalRoots*: HistoricalRoots
|
||||||
processContentLoop: Future[void]
|
processContentLoop: Future[void]
|
||||||
statusLogLoop: Future[void]
|
statusLogLoop: Future[void]
|
||||||
|
@ -636,7 +636,7 @@ proc new*(
|
||||||
baseProtocol: protocol.Protocol,
|
baseProtocol: protocol.Protocol,
|
||||||
contentDB: ContentDB,
|
contentDB: ContentDB,
|
||||||
streamManager: StreamManager,
|
streamManager: StreamManager,
|
||||||
accumulator: FinishedAccumulator,
|
accumulator: FinishedHistoricalHashesAccumulator,
|
||||||
historicalRoots: HistoricalRoots = loadHistoricalRoots(),
|
historicalRoots: HistoricalRoots = loadHistoricalRoots(),
|
||||||
bootstrapRecords: openArray[Record] = [],
|
bootstrapRecords: openArray[Record] = [],
|
||||||
portalConfig: PortalProtocolConfig = defaultPortalProtocolConfig,
|
portalConfig: PortalProtocolConfig = defaultPortalProtocolConfig,
|
||||||
|
|
|
@ -17,9 +17,8 @@ import
|
||||||
|
|
||||||
export ssz_serialization, merkleization, proofs, eth_types_rlp
|
export ssz_serialization, merkleization, proofs, eth_types_rlp
|
||||||
|
|
||||||
# Header Accumulator, as per specification:
|
# HistoricalHashesAccumulator, as per specification:
|
||||||
# https://github.com/ethereum/portal-network-specs/blob/master/history-network.md#the-header-accumulator
|
# https://github.com/ethereum/portal-network-specs/blob/master/history/history-network.md#the-historical-hashes-accumulator
|
||||||
# But with the adjustment to finish the accumulator at merge point.
|
|
||||||
|
|
||||||
const
|
const
|
||||||
EPOCH_SIZE* = 8192 # block roots per epoch record
|
EPOCH_SIZE* = 8192 # block roots per epoch record
|
||||||
|
@ -60,18 +59,17 @@ type
|
||||||
# obviously much faster, so this second type is added for this usage.
|
# obviously much faster, so this second type is added for this usage.
|
||||||
EpochRecordCached* = HashList[HeaderRecord, EPOCH_SIZE]
|
EpochRecordCached* = HashList[HeaderRecord, EPOCH_SIZE]
|
||||||
|
|
||||||
# HistoricalHashesAccumulator
|
HistoricalHashesAccumulator* = object
|
||||||
Accumulator* = object
|
|
||||||
historicalEpochs*: List[Bytes32, int(MAX_HISTORICAL_EPOCHS)]
|
historicalEpochs*: List[Bytes32, int(MAX_HISTORICAL_EPOCHS)]
|
||||||
currentEpoch*: EpochRecord
|
currentEpoch*: EpochRecord
|
||||||
|
|
||||||
# HistoricalHashesAccumulator in its final state
|
# HistoricalHashesAccumulator in its final state
|
||||||
FinishedAccumulator* = object
|
FinishedHistoricalHashesAccumulator* = object
|
||||||
historicalEpochs*: List[Bytes32, int(MAX_HISTORICAL_EPOCHS)]
|
historicalEpochs*: List[Bytes32, int(MAX_HISTORICAL_EPOCHS)]
|
||||||
currentEpoch*: EpochRecord
|
currentEpoch*: EpochRecord
|
||||||
|
|
||||||
func init*(T: type Accumulator): T =
|
func init*(T: type HistoricalHashesAccumulator): T =
|
||||||
Accumulator(
|
HistoricalHashesAccumulator(
|
||||||
historicalEpochs: List[Bytes32, int(MAX_HISTORICAL_EPOCHS)].init(@[]),
|
historicalEpochs: List[Bytes32, int(MAX_HISTORICAL_EPOCHS)].init(@[]),
|
||||||
currentEpoch: EpochRecord.init(@[]),
|
currentEpoch: EpochRecord.init(@[]),
|
||||||
)
|
)
|
||||||
|
@ -81,7 +79,7 @@ func getEpochRecordRoot*(headerRecords: openArray[HeaderRecord]): Digest =
|
||||||
|
|
||||||
hash_tree_root(epochRecord)
|
hash_tree_root(epochRecord)
|
||||||
|
|
||||||
func updateAccumulator*(a: var Accumulator, header: BlockHeader) =
|
func updateAccumulator*(a: var HistoricalHashesAccumulator, header: BlockHeader) =
|
||||||
doAssert(
|
doAssert(
|
||||||
header.number < mergeBlockNumber, "No post merge blocks for header accumulator"
|
header.number < mergeBlockNumber, "No post merge blocks for header accumulator"
|
||||||
)
|
)
|
||||||
|
@ -95,7 +93,7 @@ func updateAccumulator*(a: var Accumulator, header: BlockHeader) =
|
||||||
# TODO: It is a bit annoying to require an extra header + update call to
|
# TODO: It is a bit annoying to require an extra header + update call to
|
||||||
# finish an epoch. However, if we were to move this after adding the
|
# finish an epoch. However, if we were to move this after adding the
|
||||||
# `HeaderRecord`, there would be no way to get the current total difficulty,
|
# `HeaderRecord`, there would be no way to get the current total difficulty,
|
||||||
# unless another field is introduced in the `Accumulator` object.
|
# unless another field is introduced in the `HistoricalHashesAccumulator` object.
|
||||||
if a.currentEpoch.len() == EPOCH_SIZE:
|
if a.currentEpoch.len() == EPOCH_SIZE:
|
||||||
let epochHash = hash_tree_root(a.currentEpoch)
|
let epochHash = hash_tree_root(a.currentEpoch)
|
||||||
|
|
||||||
|
@ -110,17 +108,19 @@ func updateAccumulator*(a: var Accumulator, header: BlockHeader) =
|
||||||
let res = a.currentEpoch.add(headerRecord)
|
let res = a.currentEpoch.add(headerRecord)
|
||||||
doAssert(res, "Can't fail because of currentEpoch length check")
|
doAssert(res, "Can't fail because of currentEpoch length check")
|
||||||
|
|
||||||
func finishAccumulator*(a: var Accumulator): FinishedAccumulator =
|
func finishAccumulator*(
|
||||||
|
a: var HistoricalHashesAccumulator
|
||||||
|
): FinishedHistoricalHashesAccumulator =
|
||||||
# doAssert(a.currentEpoch[^2].totalDifficulty < TERMINAL_TOTAL_DIFFICULTY)
|
# doAssert(a.currentEpoch[^2].totalDifficulty < TERMINAL_TOTAL_DIFFICULTY)
|
||||||
# doAssert(a.currentEpoch[^1].totalDifficulty >= TERMINAL_TOTAL_DIFFICULTY)
|
# doAssert(a.currentEpoch[^1].totalDifficulty >= TERMINAL_TOTAL_DIFFICULTY)
|
||||||
let epochHash = hash_tree_root(a.currentEpoch)
|
let epochHash = hash_tree_root(a.currentEpoch)
|
||||||
|
|
||||||
doAssert(a.historicalEpochs.add(epochHash.data))
|
doAssert(a.historicalEpochs.add(epochHash.data))
|
||||||
|
|
||||||
FinishedAccumulator(historicalEpochs: a.historicalEpochs)
|
FinishedHistoricalHashesAccumulator(historicalEpochs: a.historicalEpochs)
|
||||||
|
|
||||||
## Calls and helper calls for building header proofs and verifying headers
|
## Calls and helper calls for building header proofs and verifying headers
|
||||||
## against the Accumulator and the header proofs.
|
## against the HistoricalHashesAccumulator and the header proofs.
|
||||||
|
|
||||||
func getEpochIndex*(blockNumber: uint64): uint64 =
|
func getEpochIndex*(blockNumber: uint64): uint64 =
|
||||||
blockNumber div EPOCH_SIZE
|
blockNumber div EPOCH_SIZE
|
||||||
|
@ -144,7 +144,9 @@ func isPreMerge*(header: BlockHeader): bool =
|
||||||
isPreMerge(header.number)
|
isPreMerge(header.number)
|
||||||
|
|
||||||
func verifyProof(
|
func verifyProof(
|
||||||
a: FinishedAccumulator, header: BlockHeader, proof: openArray[Digest]
|
a: FinishedHistoricalHashesAccumulator,
|
||||||
|
header: BlockHeader,
|
||||||
|
proof: openArray[Digest],
|
||||||
): bool =
|
): bool =
|
||||||
let
|
let
|
||||||
epochIndex = getEpochIndex(header)
|
epochIndex = getEpochIndex(header)
|
||||||
|
@ -153,13 +155,14 @@ func verifyProof(
|
||||||
leave = hash_tree_root(header.blockHash())
|
leave = hash_tree_root(header.blockHash())
|
||||||
headerRecordIndex = getHeaderRecordIndex(header, epochIndex)
|
headerRecordIndex = getHeaderRecordIndex(header, epochIndex)
|
||||||
|
|
||||||
# TODO: Implement more generalized `get_generalized_index`
|
|
||||||
gIndex = GeneralizedIndex(EPOCH_SIZE * 2 * 2 + (headerRecordIndex * 2))
|
gIndex = GeneralizedIndex(EPOCH_SIZE * 2 * 2 + (headerRecordIndex * 2))
|
||||||
|
|
||||||
verify_merkle_multiproof(@[leave], proof, @[gIndex], epochRecordHash)
|
verify_merkle_multiproof(@[leave], proof, @[gIndex], epochRecordHash)
|
||||||
|
|
||||||
func verifyAccumulatorProof*(
|
func verifyAccumulatorProof*(
|
||||||
a: FinishedAccumulator, header: BlockHeader, proof: AccumulatorProof
|
a: FinishedHistoricalHashesAccumulator,
|
||||||
|
header: BlockHeader,
|
||||||
|
proof: HistoricalHashesAccumulatorProof,
|
||||||
): Result[void, string] =
|
): Result[void, string] =
|
||||||
if header.isPreMerge():
|
if header.isPreMerge():
|
||||||
# Note: The proof is typed with correct depth, so no check on this is
|
# Note: The proof is typed with correct depth, so no check on this is
|
||||||
|
@ -172,14 +175,14 @@ func verifyAccumulatorProof*(
|
||||||
err("Cannot verify post merge header with accumulator proof")
|
err("Cannot verify post merge header with accumulator proof")
|
||||||
|
|
||||||
func verifyHeader*(
|
func verifyHeader*(
|
||||||
a: FinishedAccumulator, header: BlockHeader, proof: BlockHeaderProof
|
a: FinishedHistoricalHashesAccumulator, header: BlockHeader, proof: BlockHeaderProof
|
||||||
): Result[void, string] =
|
): Result[void, string] =
|
||||||
case proof.proofType
|
case proof.proofType
|
||||||
of BlockHeaderProofType.accumulatorProof:
|
of BlockHeaderProofType.historicalHashesAccumulatorProof:
|
||||||
a.verifyAccumulatorProof(header, proof.accumulatorProof)
|
a.verifyAccumulatorProof(header, proof.historicalHashesAccumulatorProof)
|
||||||
of BlockHeaderProofType.none:
|
of BlockHeaderProofType.none:
|
||||||
if header.isPreMerge():
|
if header.isPreMerge():
|
||||||
err("Pre merge header requires AccumulatorProof")
|
err("Pre merge header requires HistoricalHashesAccumulatorProof")
|
||||||
else:
|
else:
|
||||||
# TODO:
|
# TODO:
|
||||||
# Currently there is no proof solution for verifying headers post-merge.
|
# Currently there is no proof solution for verifying headers post-merge.
|
||||||
|
@ -191,17 +194,16 @@ func verifyHeader*(
|
||||||
|
|
||||||
func buildProof*(
|
func buildProof*(
|
||||||
header: BlockHeader, epochRecord: EpochRecord | EpochRecordCached
|
header: BlockHeader, epochRecord: EpochRecord | EpochRecordCached
|
||||||
): Result[AccumulatorProof, string] =
|
): Result[HistoricalHashesAccumulatorProof, string] =
|
||||||
doAssert(header.isPreMerge(), "Must be pre merge header")
|
doAssert(header.isPreMerge(), "Must be pre merge header")
|
||||||
|
|
||||||
let
|
let
|
||||||
epochIndex = getEpochIndex(header)
|
epochIndex = getEpochIndex(header)
|
||||||
headerRecordIndex = getHeaderRecordIndex(header, epochIndex)
|
headerRecordIndex = getHeaderRecordIndex(header, epochIndex)
|
||||||
|
|
||||||
# TODO: Implement more generalized `get_generalized_index`
|
|
||||||
gIndex = GeneralizedIndex(EPOCH_SIZE * 2 * 2 + (headerRecordIndex * 2))
|
gIndex = GeneralizedIndex(EPOCH_SIZE * 2 * 2 + (headerRecordIndex * 2))
|
||||||
|
|
||||||
var proof: AccumulatorProof
|
var proof: HistoricalHashesAccumulatorProof
|
||||||
?epochRecord.build_proof(gIndex, proof)
|
?epochRecord.build_proof(gIndex, proof)
|
||||||
|
|
||||||
ok(proof)
|
ok(proof)
|
||||||
|
|
|
@ -52,9 +52,9 @@ const
|
||||||
|
|
||||||
historicalRootsSSZ* = slurp(portalConfigDir / "historical_roots.ssz")
|
historicalRootsSSZ* = slurp(portalConfigDir / "historical_roots.ssz")
|
||||||
|
|
||||||
func loadAccumulator*(): FinishedAccumulator =
|
func loadAccumulator*(): FinishedHistoricalHashesAccumulator =
|
||||||
try:
|
try:
|
||||||
SSZ.decode(historicalHashesAccumulatorSSZ, FinishedAccumulator)
|
SSZ.decode(historicalHashesAccumulatorSSZ, FinishedHistoricalHashesAccumulator)
|
||||||
except SerializationError as err:
|
except SerializationError as err:
|
||||||
raiseAssert "Invalid baked-in accumulator: " & err.msg
|
raiseAssert "Invalid baked-in accumulator: " & err.msg
|
||||||
|
|
||||||
|
|
|
@ -57,7 +57,7 @@ suite "Historical Hashes Accumulator":
|
||||||
|
|
||||||
block: # Test invalid headers
|
block: # Test invalid headers
|
||||||
# Post merge block number must fail (> than latest header in accumulator)
|
# Post merge block number must fail (> than latest header in accumulator)
|
||||||
var proof: AccumulatorProof
|
var proof: HistoricalHashesAccumulatorProof
|
||||||
let header = BlockHeader(number: mergeBlockNumber)
|
let header = BlockHeader(number: mergeBlockNumber)
|
||||||
check verifyAccumulatorProof(accumulator, header, proof).isErr()
|
check verifyAccumulatorProof(accumulator, header, proof).isErr()
|
||||||
|
|
||||||
|
@ -72,7 +72,7 @@ suite "Historical Hashes Accumulator":
|
||||||
check verifyAccumulatorProof(accumulator, header, proof.get()).isErr()
|
check verifyAccumulatorProof(accumulator, header, proof.get()).isErr()
|
||||||
|
|
||||||
block: # Test invalid proofs
|
block: # Test invalid proofs
|
||||||
var proof: AccumulatorProof
|
var proof: HistoricalHashesAccumulatorProof
|
||||||
|
|
||||||
for i in headersToTest:
|
for i in headersToTest:
|
||||||
check verifyAccumulatorProof(accumulator, headers[i], proof).isErr()
|
check verifyAccumulatorProof(accumulator, headers[i], proof).isErr()
|
||||||
|
|
|
@ -45,7 +45,7 @@ suite "Historical Hashes Accumulator Root":
|
||||||
let header = res.get()
|
let header = res.get()
|
||||||
headers[header.number] = header
|
headers[header.number] = header
|
||||||
|
|
||||||
var accumulator: Accumulator
|
var accumulator: HistoricalHashesAccumulator
|
||||||
|
|
||||||
for i, hash in hashTreeRoots:
|
for i, hash in hashTreeRoots:
|
||||||
updateAccumulator(accumulator, headers[i])
|
updateAccumulator(accumulator, headers[i])
|
||||||
|
|
|
@ -24,7 +24,9 @@ type HistoryNode = ref object
|
||||||
historyNetwork*: HistoryNetwork
|
historyNetwork*: HistoryNetwork
|
||||||
|
|
||||||
proc newHistoryNode(
|
proc newHistoryNode(
|
||||||
rng: ref HmacDrbgContext, port: int, accumulator: FinishedAccumulator
|
rng: ref HmacDrbgContext,
|
||||||
|
port: int,
|
||||||
|
accumulator: FinishedHistoricalHashesAccumulator,
|
||||||
): HistoryNode =
|
): HistoryNode =
|
||||||
let
|
let
|
||||||
node = initDiscoveryNode(rng, PrivateKey.random(rng[]), localAddress(port))
|
node = initDiscoveryNode(rng, PrivateKey.random(rng[]), localAddress(port))
|
||||||
|
|
|
@ -31,8 +31,10 @@ proc buildHeadersWithProof*(
|
||||||
|
|
||||||
ok(blockHeadersWithProof)
|
ok(blockHeadersWithProof)
|
||||||
|
|
||||||
func buildAccumulator*(headers: seq[BlockHeader]): Result[FinishedAccumulator, string] =
|
func buildAccumulator*(
|
||||||
var accumulator: Accumulator
|
headers: seq[BlockHeader]
|
||||||
|
): Result[FinishedHistoricalHashesAccumulator, string] =
|
||||||
|
var accumulator: HistoricalHashesAccumulator
|
||||||
for header in headers:
|
for header in headers:
|
||||||
updateAccumulator(accumulator, header)
|
updateAccumulator(accumulator, header)
|
||||||
|
|
||||||
|
@ -43,8 +45,8 @@ func buildAccumulator*(headers: seq[BlockHeader]): Result[FinishedAccumulator, s
|
||||||
|
|
||||||
func buildAccumulatorData*(
|
func buildAccumulatorData*(
|
||||||
headers: seq[BlockHeader]
|
headers: seq[BlockHeader]
|
||||||
): Result[(FinishedAccumulator, seq[EpochRecord]), string] =
|
): Result[(FinishedHistoricalHashesAccumulator, seq[EpochRecord]), string] =
|
||||||
var accumulator: Accumulator
|
var accumulator: HistoricalHashesAccumulator
|
||||||
var epochRecords: seq[EpochRecord]
|
var epochRecords: seq[EpochRecord]
|
||||||
for header in headers:
|
for header in headers:
|
||||||
updateAccumulator(accumulator, header)
|
updateAccumulator(accumulator, header)
|
||||||
|
@ -61,7 +63,7 @@ func buildAccumulatorData*(
|
||||||
|
|
||||||
func buildProof*(
|
func buildProof*(
|
||||||
header: BlockHeader, epochRecords: seq[EpochRecord]
|
header: BlockHeader, epochRecords: seq[EpochRecord]
|
||||||
): Result[AccumulatorProof, string] =
|
): Result[HistoricalHashesAccumulatorProof, string] =
|
||||||
let epochIndex = getEpochIndex(header)
|
let epochIndex = getEpochIndex(header)
|
||||||
doAssert(epochIndex < uint64(epochRecords.len()))
|
doAssert(epochIndex < uint64(epochRecords.len()))
|
||||||
let epochRecord = epochRecords[epochIndex]
|
let epochRecord = epochRecords[epochIndex]
|
||||||
|
|
|
@ -119,7 +119,9 @@ proc newStateNode*(
|
||||||
"", uint32.high, RadiusConfig(kind: Dynamic), node.localNode.id, inMemory = true
|
"", uint32.high, RadiusConfig(kind: Dynamic), node.localNode.id, inMemory = true
|
||||||
)
|
)
|
||||||
sm = StreamManager.new(node)
|
sm = StreamManager.new(node)
|
||||||
hn = HistoryNetwork.new(PortalNetwork.none, node, db, sm, FinishedAccumulator())
|
hn = HistoryNetwork.new(
|
||||||
|
PortalNetwork.none, node, db, sm, FinishedHistoricalHashesAccumulator()
|
||||||
|
)
|
||||||
sn =
|
sn =
|
||||||
StateNetwork.new(PortalNetwork.none, node, db, sm, historyNetwork = Opt.some(hn))
|
StateNetwork.new(PortalNetwork.none, node, db, sm, historyNetwork = Opt.some(hn))
|
||||||
|
|
||||||
|
|
|
@ -199,7 +199,7 @@ proc cmdExportEra1(config: ExporterConf) =
|
||||||
# error cases are fatal. But maybe we could throw proper errors still.
|
# error cases are fatal. But maybe we could throw proper errors still.
|
||||||
var group = Era1Group.init(e2, startNumber).get()
|
var group = Era1Group.init(e2, startNumber).get()
|
||||||
|
|
||||||
# Header records to build the accumulator root
|
# Header records to build the HistoricalHashesAccumulator root
|
||||||
var headerRecords: seq[historical_hashes_accumulator.HeaderRecord]
|
var headerRecords: seq[historical_hashes_accumulator.HeaderRecord]
|
||||||
for blockNumber in startNumber .. endNumber:
|
for blockNumber in startNumber .. endNumber:
|
||||||
let blck =
|
let blck =
|
||||||
|
@ -234,7 +234,7 @@ proc cmdExportEra1(config: ExporterConf) =
|
||||||
if completed:
|
if completed:
|
||||||
let name = era1FileName("mainnet", era, accumulatorRoot)
|
let name = era1FileName("mainnet", era, accumulatorRoot)
|
||||||
# We cannot check for the exact file any earlier as we need to know the
|
# We cannot check for the exact file any earlier as we need to know the
|
||||||
# accumulator root.
|
# HistoricalHashesAccumulator root.
|
||||||
# TODO: Could scan for file with era number in it.
|
# TODO: Could scan for file with era number in it.
|
||||||
if isFile(name):
|
if isFile(name):
|
||||||
info "Era1 file already exists", era, name
|
info "Era1 file already exists", era, name
|
||||||
|
@ -381,7 +381,8 @@ when isMainModule:
|
||||||
# to build it.
|
# to build it.
|
||||||
let accumulatorFile = dataDir / config.accumulatorFileName
|
let accumulatorFile = dataDir / config.accumulatorFileName
|
||||||
if isFile(accumulatorFile):
|
if isFile(accumulatorFile):
|
||||||
notice "Not building accumulator, file already exists", file = accumulatorFile
|
notice "Not building HistoricalHashesAccumulator, file already exists",
|
||||||
|
file = accumulatorFile
|
||||||
quit 1
|
quit 1
|
||||||
|
|
||||||
# Lets verify if the necessary files exists before starting to build the
|
# Lets verify if the necessary files exists before starting to build the
|
||||||
|
@ -394,8 +395,8 @@ when isMainModule:
|
||||||
|
|
||||||
proc buildAccumulator(
|
proc buildAccumulator(
|
||||||
dataDir: string, writeEpochRecords = false
|
dataDir: string, writeEpochRecords = false
|
||||||
): Result[FinishedAccumulator, string] =
|
): Result[FinishedHistoricalHashesAccumulator, string] =
|
||||||
var accumulator: Accumulator
|
var accumulator: HistoricalHashesAccumulator
|
||||||
for i in 0 ..< preMergeEpochs:
|
for i in 0 ..< preMergeEpochs:
|
||||||
let file =
|
let file =
|
||||||
try:
|
try:
|
||||||
|
@ -453,43 +454,45 @@ when isMainModule:
|
||||||
|
|
||||||
if blockHeader.number == mergeBlockNumber - 1:
|
if blockHeader.number == mergeBlockNumber - 1:
|
||||||
let finishedAccumulator = finishAccumulator(accumulator)
|
let finishedAccumulator = finishAccumulator(accumulator)
|
||||||
info "Updated last epoch, finished building master accumulator",
|
info "Updated last epoch, finished building HistoricalHashesAccumulatorr",
|
||||||
epoch = i
|
epoch = i
|
||||||
return ok(finishedAccumulator)
|
return ok(finishedAccumulator)
|
||||||
else:
|
else:
|
||||||
warn "Skipping record, not a block header", typ = toHex(header.typ)
|
warn "Skipping record, not a block header", typ = toHex(header.typ)
|
||||||
|
|
||||||
err("Not enough headers provided to finish the accumulator")
|
err("Not enough headers provided to finish the HistoricalHashesAccumulator")
|
||||||
|
|
||||||
let accumulatorRes = buildAccumulator(dataDir, config.writeEpochRecords)
|
let accumulatorRes = buildAccumulator(dataDir, config.writeEpochRecords)
|
||||||
if accumulatorRes.isErr():
|
if accumulatorRes.isErr():
|
||||||
fatal "Could not build accumulator", error = accumulatorRes.error
|
fatal "Could not build HistoricalHashesAccumulator",
|
||||||
|
error = accumulatorRes.error
|
||||||
quit 1
|
quit 1
|
||||||
let accumulator = accumulatorRes.get()
|
let accumulator = accumulatorRes.get()
|
||||||
|
|
||||||
let res = io2.writeFile(accumulatorFile, SSZ.encode(accumulator))
|
let res = io2.writeFile(accumulatorFile, SSZ.encode(accumulator))
|
||||||
if res.isErr():
|
if res.isErr():
|
||||||
error "Failed writing accumulator to file",
|
error "Failed writing HistoricalHashesAccumulator to file",
|
||||||
file = accumulatorFile, error = res.error
|
file = accumulatorFile, error = res.error
|
||||||
quit 1
|
quit 1
|
||||||
else:
|
else:
|
||||||
notice "Succesfully wrote Historical Hashes Accumulator to file",
|
notice "Succesfully wrote HistoricalHashesAccumulator to file",
|
||||||
file = accumulatorFile
|
file = accumulatorFile
|
||||||
of HistoryCmd.printAccumulatorData:
|
of HistoryCmd.printAccumulatorData:
|
||||||
let file = dataDir / config.accumulatorFileNamePrint
|
let file = dataDir / config.accumulatorFileNamePrint
|
||||||
|
|
||||||
let res = readAccumulator(file)
|
let res = readAccumulator(file)
|
||||||
if res.isErr():
|
if res.isErr():
|
||||||
fatal "Failed reading accumulator from file", error = res.error, file
|
fatal "Failed reading HistoricalHashesAccumulator from file",
|
||||||
|
error = res.error, file
|
||||||
quit 1
|
quit 1
|
||||||
|
|
||||||
let
|
let
|
||||||
accumulator = res.get()
|
accumulator = res.get()
|
||||||
accumulatorRoot = hash_tree_root(accumulator)
|
accumulatorRoot = hash_tree_root(accumulator)
|
||||||
|
|
||||||
info "Accumulator decoded successfully", root = accumulatorRoot
|
info "HistoricalHashesAccumulator decoded successfully", root = accumulatorRoot
|
||||||
|
|
||||||
echo "Master Accumulator:"
|
echo "HistoricalHashesAccumulator:"
|
||||||
echo "-------------------"
|
echo "-------------------"
|
||||||
echo &"Root: {accumulatorRoot}"
|
echo &"Root: {accumulatorRoot}"
|
||||||
echo ""
|
echo ""
|
||||||
|
|
Loading…
Reference in New Issue