rm beacon_chain/networking/network_metadata_downloads.nim
This commit is contained in:
parent
7d329f877f
commit
8a605ab9e1
|
@ -13,60 +13,41 @@ import
|
||||||
state_transition],
|
state_transition],
|
||||||
./spec/datatypes/[phase0, altair, bellatrix],
|
./spec/datatypes/[phase0, altair, bellatrix],
|
||||||
"."/filepath
|
"."/filepath
|
||||||
|
|
||||||
from ./spec/datatypes/capella import BeaconState
|
from ./spec/datatypes/capella import BeaconState
|
||||||
from ./spec/datatypes/deneb import TrustedSignedBeaconBlock
|
from ./spec/datatypes/deneb import TrustedSignedBeaconBlock
|
||||||
|
|
||||||
export
|
export
|
||||||
phase0, altair, eth2_ssz_serialization, eth2_merkleization, kvstore,
|
eth2_ssz_serialization, eth2_merkleization, kvstore,
|
||||||
kvstore_sqlite3, deposit_snapshots
|
kvstore_sqlite3, deposit_snapshots
|
||||||
|
|
||||||
type
|
type
|
||||||
DbSeq[T] = object
|
DbSeq[T] = object
|
||||||
insertStmt: SqliteStmt[openArray[byte], void]
|
insertStmt: SqliteStmt[openArray[byte], void]
|
||||||
selectStmt: SqliteStmt[int64, openArray[byte]]
|
selectStmt: SqliteStmt[int64, openArray[byte]]
|
||||||
recordCount: int64
|
recordCount: int64
|
||||||
|
|
||||||
FinalizedBlocks = object
|
FinalizedBlocks = object
|
||||||
insertStmt: SqliteStmt[(int64, array[32, byte]), void]
|
insertStmt: SqliteStmt[(int64, array[32, byte]), void]
|
||||||
selectStmt: SqliteStmt[int64, array[32, byte]]
|
selectStmt: SqliteStmt[int64, array[32, byte]]
|
||||||
selectAllStmt: SqliteStmt[NoParams, (int64, array[32, byte])]
|
selectAllStmt: SqliteStmt[NoParams, (int64, array[32, byte])]
|
||||||
|
|
||||||
low: Opt[Slot]
|
low: Opt[Slot]
|
||||||
high: Opt[Slot]
|
high: Opt[Slot]
|
||||||
|
|
||||||
DepositsSeq = DbSeq[DepositData]
|
DepositsSeq = DbSeq[DepositData]
|
||||||
|
|
||||||
BeaconChainDBV0 = ref object
|
BeaconChainDBV0 = ref object
|
||||||
backend: KvStoreRef # kvstore
|
backend: KvStoreRef # kvstore
|
||||||
stateStore: KvStoreRef # state_no_validators
|
stateStore: KvStoreRef # state_no_validators
|
||||||
|
|
||||||
BeaconChainDB* = ref object
|
BeaconChainDB* = ref object
|
||||||
db: SqStoreRef
|
db: SqStoreRef
|
||||||
|
|
||||||
v0: BeaconChainDBV0
|
v0: BeaconChainDBV0
|
||||||
genesisDeposits: DepositsSeq
|
genesisDeposits: DepositsSeq
|
||||||
|
|
||||||
immutableValidatorsDb: DbSeq[ImmutableValidatorDataDb2]
|
immutableValidatorsDb: DbSeq[ImmutableValidatorDataDb2]
|
||||||
immutableValidators: seq[ImmutableValidatorData2]
|
immutableValidators: seq[ImmutableValidatorData2]
|
||||||
|
|
||||||
checkpoint: proc() {.gcsafe, raises: [].}
|
checkpoint: proc() {.gcsafe, raises: [].}
|
||||||
|
|
||||||
keyValues: KvStoreRef # Random stuff using DbKeyKind - suitable for small values mainly!
|
keyValues: KvStoreRef # Random stuff using DbKeyKind - suitable for small values mainly!
|
||||||
blocks: array[ConsensusFork, KvStoreRef] # BlockRoot -> TrustedSignedBeaconBlock
|
blocks: array[ConsensusFork, KvStoreRef] # BlockRoot -> TrustedSignedBeaconBlock
|
||||||
|
|
||||||
blobs: KvStoreRef # (BlockRoot -> BlobSidecar)
|
blobs: KvStoreRef # (BlockRoot -> BlobSidecar)
|
||||||
|
|
||||||
stateRoots: KvStoreRef # (Slot, BlockRoot) -> StateRoot
|
stateRoots: KvStoreRef # (Slot, BlockRoot) -> StateRoot
|
||||||
|
|
||||||
statesNoVal: array[ConsensusFork, KvStoreRef] # StateRoot -> ForkBeaconStateNoImmutableValidators
|
statesNoVal: array[ConsensusFork, KvStoreRef] # StateRoot -> ForkBeaconStateNoImmutableValidators
|
||||||
|
|
||||||
stateDiffs: KvStoreRef ##\
|
stateDiffs: KvStoreRef ##\
|
||||||
|
|
||||||
summaries: KvStoreRef
|
summaries: KvStoreRef
|
||||||
|
|
||||||
finalizedBlocks: FinalizedBlocks
|
finalizedBlocks: FinalizedBlocks
|
||||||
|
|
||||||
DbKeyKind = enum
|
DbKeyKind = enum
|
||||||
kHashToState
|
kHashToState
|
||||||
kHashToBlock
|
kHashToBlock
|
||||||
|
@ -83,31 +64,23 @@ type
|
||||||
kHashToStateOnlyMutableValidators
|
kHashToStateOnlyMutableValidators
|
||||||
kBackfillBlock # Obsolete, was in `unstable` for a while, but never released
|
kBackfillBlock # Obsolete, was in `unstable` for a while, but never released
|
||||||
kDepositTreeSnapshot # EIP-4881-compatible deposit contract state snapshot
|
kDepositTreeSnapshot # EIP-4881-compatible deposit contract state snapshot
|
||||||
|
|
||||||
BeaconBlockSummary = object
|
BeaconBlockSummary = object
|
||||||
slot: Slot
|
slot: Slot
|
||||||
parent_root: Eth2Digest
|
parent_root: Eth2Digest
|
||||||
|
|
||||||
func subkey(kind: DbKeyKind): array[1, byte] =
|
func subkey(kind: DbKeyKind): array[1, byte] =
|
||||||
result[0] = byte ord(kind)
|
result[0] = byte ord(kind)
|
||||||
|
|
||||||
func subkey[N: static int](kind: DbKeyKind, key: array[N, byte]):
|
func subkey[N: static int](kind: DbKeyKind, key: array[N, byte]):
|
||||||
array[N + 1, byte] =
|
array[N + 1, byte] =
|
||||||
result[0] = byte ord(kind)
|
result[0] = byte ord(kind)
|
||||||
result[1 .. ^1] = key
|
result[1 .. ^1] = key
|
||||||
|
|
||||||
func subkey(kind: type phase0.BeaconState, key: Eth2Digest): auto =
|
func subkey(kind: type phase0.BeaconState, key: Eth2Digest): auto =
|
||||||
subkey(kHashToState, key.data)
|
subkey(kHashToState, key.data)
|
||||||
|
|
||||||
func subkey(kind: type phase0.SignedBeaconBlock, key: Eth2Digest): auto =
|
func subkey(kind: type phase0.SignedBeaconBlock, key: Eth2Digest): auto =
|
||||||
subkey(kHashToBlock, key.data)
|
subkey(kHashToBlock, key.data)
|
||||||
|
|
||||||
func subkey(kind: type BeaconBlockSummary, key: Eth2Digest): auto =
|
func subkey(kind: type BeaconBlockSummary, key: Eth2Digest): auto =
|
||||||
subkey(kHashToBlockSummary, key.data)
|
subkey(kHashToBlockSummary, key.data)
|
||||||
|
|
||||||
template expectDb(x: auto): untyped =
|
template expectDb(x: auto): untyped =
|
||||||
x.expect("working database (disk broken/full?)")
|
x.expect("working database (disk broken/full?)")
|
||||||
|
|
||||||
proc init[T](
|
proc init[T](
|
||||||
Seq: type DbSeq[T], db: SqStoreRef, name: string,
|
Seq: type DbSeq[T], db: SqStoreRef, name: string,
|
||||||
readOnly = false): KvResult[Seq] =
|
readOnly = false): KvResult[Seq] =
|
||||||
|
@ -126,60 +99,46 @@ proc init[T](
|
||||||
insertStmt = db.prepareStmt(
|
insertStmt = db.prepareStmt(
|
||||||
"INSERT INTO '" & name & "'(value) VALUES (?);",
|
"INSERT INTO '" & name & "'(value) VALUES (?);",
|
||||||
openArray[byte], void, managed = false).expect("this is a valid statement")
|
openArray[byte], void, managed = false).expect("this is a valid statement")
|
||||||
|
|
||||||
selectStmt = db.prepareStmt(
|
selectStmt = db.prepareStmt(
|
||||||
"SELECT value FROM '" & name & "' WHERE id = ?;",
|
"SELECT value FROM '" & name & "' WHERE id = ?;",
|
||||||
int64, openArray[byte], managed = false).expect("this is a valid statement")
|
int64, openArray[byte], managed = false).expect("this is a valid statement")
|
||||||
|
|
||||||
countStmt = db.prepareStmt(
|
countStmt = db.prepareStmt(
|
||||||
"SELECT COUNT(1) FROM '" & name & "';",
|
"SELECT COUNT(1) FROM '" & name & "';",
|
||||||
NoParams, int64, managed = false).expect("this is a valid statement")
|
NoParams, int64, managed = false).expect("this is a valid statement")
|
||||||
|
|
||||||
var recordCount = int64 0
|
var recordCount = int64 0
|
||||||
let countQueryRes = countStmt.exec do (res: int64):
|
let countQueryRes = countStmt.exec do (res: int64):
|
||||||
recordCount = res
|
recordCount = res
|
||||||
|
|
||||||
let found = ? countQueryRes
|
let found = ? countQueryRes
|
||||||
if not found:
|
if not found:
|
||||||
return err("Cannot count existing items")
|
return err("Cannot count existing items")
|
||||||
countStmt.dispose()
|
countStmt.dispose()
|
||||||
|
|
||||||
ok(Seq(insertStmt: insertStmt,
|
ok(Seq(insertStmt: insertStmt,
|
||||||
selectStmt: selectStmt,
|
selectStmt: selectStmt,
|
||||||
recordCount: recordCount))
|
recordCount: recordCount))
|
||||||
else:
|
else:
|
||||||
ok(Seq())
|
ok(Seq())
|
||||||
|
|
||||||
proc close(s: var DbSeq) =
|
proc close(s: var DbSeq) =
|
||||||
s.insertStmt.dispose()
|
s.insertStmt.dispose()
|
||||||
s.selectStmt.dispose()
|
s.selectStmt.dispose()
|
||||||
|
|
||||||
reset(s)
|
reset(s)
|
||||||
|
|
||||||
proc add[T](s: var DbSeq[T], val: T) =
|
proc add[T](s: var DbSeq[T], val: T) =
|
||||||
doAssert(distinctBase(s.insertStmt) != nil, "database closed or table not preset")
|
doAssert(distinctBase(s.insertStmt) != nil, "database closed or table not preset")
|
||||||
let bytes = SSZ.encode(val)
|
let bytes = SSZ.encode(val)
|
||||||
s.insertStmt.exec(bytes).expectDb()
|
s.insertStmt.exec(bytes).expectDb()
|
||||||
inc s.recordCount
|
inc s.recordCount
|
||||||
|
|
||||||
template len[T](s: DbSeq[T]): int64 =
|
template len[T](s: DbSeq[T]): int64 =
|
||||||
s.recordCount
|
s.recordCount
|
||||||
|
|
||||||
proc get[T](s: DbSeq[T], idx: int64): T =
|
proc get[T](s: DbSeq[T], idx: int64): T =
|
||||||
doAssert(distinctBase(s.selectStmt) != nil, $T & " table not present for read at " & $(idx))
|
doAssert(distinctBase(s.selectStmt) != nil, $T & " table not present for read at " & $(idx))
|
||||||
|
|
||||||
let resultAddr = addr result
|
let resultAddr = addr result
|
||||||
|
|
||||||
let queryRes = s.selectStmt.exec(idx + 1) do (recordBytes: openArray[byte]):
|
let queryRes = s.selectStmt.exec(idx + 1) do (recordBytes: openArray[byte]):
|
||||||
try:
|
try:
|
||||||
resultAddr[] = decode(SSZ, recordBytes, T)
|
resultAddr[] = decode(SSZ, recordBytes, T)
|
||||||
except SerializationError as exc:
|
except SerializationError as exc:
|
||||||
raiseAssert "cannot decode " & $T & " at index " & $idx & ": " & exc.msg
|
raiseAssert "cannot decode " & $T & " at index " & $idx & ": " & exc.msg
|
||||||
|
|
||||||
let found = queryRes.expectDb()
|
let found = queryRes.expectDb()
|
||||||
if not found:
|
if not found:
|
||||||
raiseAssert $T & " not found at index " & $(idx)
|
raiseAssert $T & " not found at index " & $(idx)
|
||||||
|
|
||||||
proc init(T: type FinalizedBlocks, db: SqStoreRef, name: string,
|
proc init(T: type FinalizedBlocks, db: SqStoreRef, name: string,
|
||||||
readOnly = false): KvResult[T] =
|
readOnly = false): KvResult[T] =
|
||||||
let hasTable = if db.readOnly or readOnly:
|
let hasTable = if db.readOnly or readOnly:
|
||||||
|
@ -191,45 +150,36 @@ proc init(T: type FinalizedBlocks, db: SqStoreRef, name: string,
|
||||||
value BLOB NOT NULL
|
value BLOB NOT NULL
|
||||||
);""")
|
);""")
|
||||||
true
|
true
|
||||||
|
|
||||||
if hasTable:
|
if hasTable:
|
||||||
let
|
let
|
||||||
insertStmt = db.prepareStmt(
|
insertStmt = db.prepareStmt(
|
||||||
"REPLACE INTO '" & name & "'(id, value) VALUES (?, ?);",
|
"REPLACE INTO '" & name & "'(id, value) VALUES (?, ?);",
|
||||||
(int64, array[32, byte]), void, managed = false).expect("this is a valid statement")
|
(int64, array[32, byte]), void, managed = false).expect("this is a valid statement")
|
||||||
|
|
||||||
selectStmt = db.prepareStmt(
|
selectStmt = db.prepareStmt(
|
||||||
"SELECT value FROM '" & name & "' WHERE id = ?;",
|
"SELECT value FROM '" & name & "' WHERE id = ?;",
|
||||||
int64, array[32, byte], managed = false).expect("this is a valid statement")
|
int64, array[32, byte], managed = false).expect("this is a valid statement")
|
||||||
selectAllStmt = db.prepareStmt(
|
selectAllStmt = db.prepareStmt(
|
||||||
"SELECT id, value FROM '" & name & "' ORDER BY id;",
|
"SELECT id, value FROM '" & name & "' ORDER BY id;",
|
||||||
NoParams, (int64, array[32, byte]), managed = false).expect("this is a valid statement")
|
NoParams, (int64, array[32, byte]), managed = false).expect("this is a valid statement")
|
||||||
|
|
||||||
maxIdStmt = db.prepareStmt(
|
maxIdStmt = db.prepareStmt(
|
||||||
"SELECT MAX(id) FROM '" & name & "';",
|
"SELECT MAX(id) FROM '" & name & "';",
|
||||||
NoParams, Option[int64], managed = false).expect("this is a valid statement")
|
NoParams, Option[int64], managed = false).expect("this is a valid statement")
|
||||||
|
|
||||||
minIdStmt = db.prepareStmt(
|
minIdStmt = db.prepareStmt(
|
||||||
"SELECT MIN(id) FROM '" & name & "';",
|
"SELECT MIN(id) FROM '" & name & "';",
|
||||||
NoParams, Option[int64], managed = false).expect("this is a valid statement")
|
NoParams, Option[int64], managed = false).expect("this is a valid statement")
|
||||||
|
|
||||||
var
|
var
|
||||||
low, high: Opt[Slot]
|
low, high: Opt[Slot]
|
||||||
tmp: Option[int64]
|
tmp: Option[int64]
|
||||||
|
|
||||||
for rowRes in minIdStmt.exec(tmp):
|
for rowRes in minIdStmt.exec(tmp):
|
||||||
expectDb rowRes
|
expectDb rowRes
|
||||||
if tmp.isSome():
|
if tmp.isSome():
|
||||||
low.ok(Slot(tmp.get()))
|
low.ok(Slot(tmp.get()))
|
||||||
|
|
||||||
for rowRes in maxIdStmt.exec(tmp):
|
for rowRes in maxIdStmt.exec(tmp):
|
||||||
expectDb rowRes
|
expectDb rowRes
|
||||||
if tmp.isSome():
|
if tmp.isSome():
|
||||||
high.ok(Slot(tmp.get()))
|
high.ok(Slot(tmp.get()))
|
||||||
|
|
||||||
maxIdStmt.dispose()
|
maxIdStmt.dispose()
|
||||||
minIdStmt.dispose()
|
minIdStmt.dispose()
|
||||||
|
|
||||||
ok(T(insertStmt: insertStmt,
|
ok(T(insertStmt: insertStmt,
|
||||||
selectStmt: selectStmt,
|
selectStmt: selectStmt,
|
||||||
selectAllStmt: selectAllStmt,
|
selectAllStmt: selectAllStmt,
|
||||||
|
@ -237,22 +187,18 @@ proc init(T: type FinalizedBlocks, db: SqStoreRef, name: string,
|
||||||
high: high))
|
high: high))
|
||||||
else:
|
else:
|
||||||
ok(T())
|
ok(T())
|
||||||
|
|
||||||
proc close(s: var FinalizedBlocks) =
|
proc close(s: var FinalizedBlocks) =
|
||||||
s.insertStmt.dispose()
|
s.insertStmt.dispose()
|
||||||
s.selectStmt.dispose()
|
s.selectStmt.dispose()
|
||||||
s.selectAllStmt.dispose()
|
s.selectAllStmt.dispose()
|
||||||
reset(s)
|
reset(s)
|
||||||
|
|
||||||
proc get(s: FinalizedBlocks, idx: Slot): Opt[Eth2Digest] =
|
proc get(s: FinalizedBlocks, idx: Slot): Opt[Eth2Digest] =
|
||||||
if distinctBase(s.selectStmt) == nil: return Opt.none(Eth2Digest)
|
if distinctBase(s.selectStmt) == nil: return Opt.none(Eth2Digest)
|
||||||
var row: s.selectStmt.Result
|
var row: s.selectStmt.Result
|
||||||
for rowRes in s.selectStmt.exec(int64(idx), row):
|
for rowRes in s.selectStmt.exec(int64(idx), row):
|
||||||
expectDb rowRes
|
expectDb rowRes
|
||||||
return ok(Eth2Digest(data: row))
|
return ok(Eth2Digest(data: row))
|
||||||
|
|
||||||
return Opt.none(Eth2Digest)
|
return Opt.none(Eth2Digest)
|
||||||
|
|
||||||
proc loadImmutableValidators(vals: DbSeq[ImmutableValidatorDataDb2]): seq[ImmutableValidatorData2] =
|
proc loadImmutableValidators(vals: DbSeq[ImmutableValidatorDataDb2]): seq[ImmutableValidatorData2] =
|
||||||
result = newSeqOfCap[ImmutableValidatorData2](vals.len())
|
result = newSeqOfCap[ImmutableValidatorData2](vals.len())
|
||||||
for i in 0 ..< vals.len:
|
for i in 0 ..< vals.len:
|
||||||
|
@ -260,14 +206,12 @@ proc loadImmutableValidators(vals: DbSeq[ImmutableValidatorDataDb2]): seq[Immuta
|
||||||
result.add ImmutableValidatorData2(
|
result.add ImmutableValidatorData2(
|
||||||
pubkey: tmp.pubkey.loadValid(),
|
pubkey: tmp.pubkey.loadValid(),
|
||||||
withdrawal_credentials: tmp.withdrawal_credentials)
|
withdrawal_credentials: tmp.withdrawal_credentials)
|
||||||
|
|
||||||
proc new(T: type BeaconChainDBV0,
|
proc new(T: type BeaconChainDBV0,
|
||||||
db: SqStoreRef,
|
db: SqStoreRef,
|
||||||
readOnly = false
|
readOnly = false
|
||||||
): BeaconChainDBV0 =
|
): BeaconChainDBV0 =
|
||||||
BeaconChainDBV0(
|
BeaconChainDBV0(
|
||||||
)
|
)
|
||||||
|
|
||||||
proc new*(T: type BeaconChainDB,
|
proc new*(T: type BeaconChainDB,
|
||||||
db: SqStoreRef,
|
db: SqStoreRef,
|
||||||
cfg: RuntimeConfig = defaultRuntimeConfig
|
cfg: RuntimeConfig = defaultRuntimeConfig
|
||||||
|
@ -275,13 +219,11 @@ proc new*(T: type BeaconChainDB,
|
||||||
if not db.readOnly:
|
if not db.readOnly:
|
||||||
discard db.exec("DROP TABLE IF EXISTS deposits;")
|
discard db.exec("DROP TABLE IF EXISTS deposits;")
|
||||||
discard db.exec("DROP TABLE IF EXISTS validatorIndexFromPubKey;")
|
discard db.exec("DROP TABLE IF EXISTS validatorIndexFromPubKey;")
|
||||||
|
|
||||||
var
|
var
|
||||||
genesisDepositsSeq =
|
genesisDepositsSeq =
|
||||||
DbSeq[DepositData].init(db, "genesis_deposits").expectDb()
|
DbSeq[DepositData].init(db, "genesis_deposits").expectDb()
|
||||||
immutableValidatorsDb =
|
immutableValidatorsDb =
|
||||||
DbSeq[ImmutableValidatorDataDb2].init(db, "immutable_validators2").expectDb()
|
DbSeq[ImmutableValidatorDataDb2].init(db, "immutable_validators2").expectDb()
|
||||||
|
|
||||||
keyValues = kvStore db.openKvStore("key_values", true).expectDb()
|
keyValues = kvStore db.openKvStore("key_values", true).expectDb()
|
||||||
blocks = [
|
blocks = [
|
||||||
kvStore db.openKvStore("blocks").expectDb(),
|
kvStore db.openKvStore("blocks").expectDb(),
|
||||||
|
@ -290,9 +232,7 @@ proc new*(T: type BeaconChainDB,
|
||||||
kvStore db.openKvStore("capella_blocks").expectDb(),
|
kvStore db.openKvStore("capella_blocks").expectDb(),
|
||||||
kvStore db.openKvStore("deneb_blocks").expectDb(),
|
kvStore db.openKvStore("deneb_blocks").expectDb(),
|
||||||
kvStore db.openKvStore("electra_blocks").expectDb()]
|
kvStore db.openKvStore("electra_blocks").expectDb()]
|
||||||
|
|
||||||
stateRoots = kvStore db.openKvStore("state_roots", true).expectDb()
|
stateRoots = kvStore db.openKvStore("state_roots", true).expectDb()
|
||||||
|
|
||||||
statesNoVal = [
|
statesNoVal = [
|
||||||
kvStore db.openKvStore("state_no_validators2").expectDb(),
|
kvStore db.openKvStore("state_no_validators2").expectDb(),
|
||||||
kvStore db.openKvStore("altair_state_no_validators").expectDb(),
|
kvStore db.openKvStore("altair_state_no_validators").expectDb(),
|
||||||
|
@ -300,19 +240,15 @@ proc new*(T: type BeaconChainDB,
|
||||||
kvStore db.openKvStore("capella_state_no_validator_pubkeys").expectDb(),
|
kvStore db.openKvStore("capella_state_no_validator_pubkeys").expectDb(),
|
||||||
kvStore db.openKvStore("deneb_state_no_validator_pubkeys").expectDb(),
|
kvStore db.openKvStore("deneb_state_no_validator_pubkeys").expectDb(),
|
||||||
kvStore db.openKvStore("electra_state_no_validator_pubkeys").expectDb()]
|
kvStore db.openKvStore("electra_state_no_validator_pubkeys").expectDb()]
|
||||||
|
|
||||||
stateDiffs = kvStore db.openKvStore("state_diffs").expectDb()
|
stateDiffs = kvStore db.openKvStore("state_diffs").expectDb()
|
||||||
summaries = kvStore db.openKvStore("beacon_block_summaries", true).expectDb()
|
summaries = kvStore db.openKvStore("beacon_block_summaries", true).expectDb()
|
||||||
finalizedBlocks = FinalizedBlocks.init(db, "finalized_blocks").expectDb()
|
finalizedBlocks = FinalizedBlocks.init(db, "finalized_blocks").expectDb()
|
||||||
|
|
||||||
var blobs : KvStoreRef
|
var blobs : KvStoreRef
|
||||||
if cfg.DENEB_FORK_EPOCH != FAR_FUTURE_EPOCH:
|
if cfg.DENEB_FORK_EPOCH != FAR_FUTURE_EPOCH:
|
||||||
blobs = kvStore db.openKvStore("deneb_blobs").expectDb()
|
blobs = kvStore db.openKvStore("deneb_blobs").expectDb()
|
||||||
|
|
||||||
block:
|
block:
|
||||||
var immutableValidatorsDb1 = DbSeq[ImmutableValidatorData].init(
|
var immutableValidatorsDb1 = DbSeq[ImmutableValidatorData].init(
|
||||||
db, "immutable_validators", readOnly = true).expectDb()
|
db, "immutable_validators", readOnly = true).expectDb()
|
||||||
|
|
||||||
if immutableValidatorsDb.len() < immutableValidatorsDb1.len():
|
if immutableValidatorsDb.len() < immutableValidatorsDb1.len():
|
||||||
while immutableValidatorsDb.len() < immutableValidatorsDb1.len():
|
while immutableValidatorsDb.len() < immutableValidatorsDb1.len():
|
||||||
let val = immutableValidatorsDb1.get(immutableValidatorsDb.len())
|
let val = immutableValidatorsDb1.get(immutableValidatorsDb.len())
|
||||||
|
@ -321,10 +257,8 @@ proc new*(T: type BeaconChainDB,
|
||||||
withdrawal_credentials: val.withdrawal_credentials
|
withdrawal_credentials: val.withdrawal_credentials
|
||||||
))
|
))
|
||||||
immutableValidatorsDb1.close()
|
immutableValidatorsDb1.close()
|
||||||
|
|
||||||
if not db.readOnly:
|
if not db.readOnly:
|
||||||
discard db.exec("DROP TABLE IF EXISTS immutable_validators;")
|
discard db.exec("DROP TABLE IF EXISTS immutable_validators;")
|
||||||
|
|
||||||
T(
|
T(
|
||||||
db: db,
|
db: db,
|
||||||
v0: BeaconChainDBV0.new(db, readOnly = true),
|
v0: BeaconChainDBV0.new(db, readOnly = true),
|
||||||
|
@ -341,7 +275,6 @@ proc new*(T: type BeaconChainDB,
|
||||||
summaries: summaries,
|
summaries: summaries,
|
||||||
finalizedBlocks: finalizedBlocks
|
finalizedBlocks: finalizedBlocks
|
||||||
)
|
)
|
||||||
|
|
||||||
proc new*(T: type BeaconChainDB,
|
proc new*(T: type BeaconChainDB,
|
||||||
dir: string,
|
dir: string,
|
||||||
cfg: RuntimeConfig = defaultRuntimeConfig,
|
cfg: RuntimeConfig = defaultRuntimeConfig,
|
||||||
|
@ -355,7 +288,6 @@ proc new*(T: type BeaconChainDB,
|
||||||
else:
|
else:
|
||||||
if (let res = secureCreatePath(dir); res.isErr):
|
if (let res = secureCreatePath(dir); res.isErr):
|
||||||
quit 1
|
quit 1
|
||||||
|
|
||||||
SqStoreRef.init(
|
SqStoreRef.init(
|
||||||
dir, "nbc", readOnly = readOnly, manualCheckpoint = true).expectDb()
|
dir, "nbc", readOnly = readOnly, manualCheckpoint = true).expectDb()
|
||||||
BeaconChainDB.new(db, cfg)
|
BeaconChainDB.new(db, cfg)
|
||||||
|
|
|
@ -1,12 +1,3 @@
|
||||||
# beacon_chain
|
|
||||||
# Copyright (c) 2018-2024 Status Research & Development GmbH
|
|
||||||
# Licensed and distributed under either of
|
|
||||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
||||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
||||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
||||||
|
|
||||||
{.push raises: [].}
|
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[sequtils, strutils, os],
|
std/[sequtils, strutils, os],
|
||||||
stew/[byteutils, objects], stew/shims/macros, nimcrypto/hash,
|
stew/[byteutils, objects], stew/shims/macros, nimcrypto/hash,
|
||||||
|
@ -476,3 +467,17 @@ else:
|
||||||
|
|
||||||
func bakedGenesisValidatorsRoot*(metadata: Eth2NetworkMetadata): Opt[Eth2Digest] =
|
func bakedGenesisValidatorsRoot*(metadata: Eth2NetworkMetadata): Opt[Eth2Digest] =
|
||||||
Opt.none Eth2Digest
|
Opt.none Eth2Digest
|
||||||
|
|
||||||
|
import stew/io2
|
||||||
|
|
||||||
|
proc fetchGenesisBytes*(
|
||||||
|
metadata: Eth2NetworkMetadata): seq[byte] =
|
||||||
|
case metadata.genesis.kind
|
||||||
|
of NoGenesis:
|
||||||
|
raiseAssert "fetchGenesisBytes should be called only when metadata.hasGenesis is true"
|
||||||
|
of BakedIn:
|
||||||
|
result = @(metadata.genesis.bakedBytes)
|
||||||
|
of BakedInUrl:
|
||||||
|
raiseAssert "genesis state downlading unsuppoorted"
|
||||||
|
of UserSuppliedFile:
|
||||||
|
result = readAllBytes(metadata.genesis.path).tryGet()
|
||||||
|
|
|
@ -1,82 +0,0 @@
|
||||||
# beacon_chain
|
|
||||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
|
||||||
# Licensed and distributed under either of
|
|
||||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
||||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
||||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
||||||
|
|
||||||
import
|
|
||||||
std/uri,
|
|
||||||
stew/io2, chronos, chronos/apps/http/httpclient, snappy,
|
|
||||||
../spec/[digest, forks], ../spec/datatypes/base
|
|
||||||
|
|
||||||
import ./network_metadata
|
|
||||||
export network_metadata
|
|
||||||
|
|
||||||
type
|
|
||||||
HttpFetchError* = object of CatchableError
|
|
||||||
status*: int
|
|
||||||
|
|
||||||
DigestMismatchError* = object of CatchableError
|
|
||||||
|
|
||||||
proc downloadFile(url: Uri): Future[seq[byte]] {.async.} =
|
|
||||||
var httpSession = HttpSessionRef.new()
|
|
||||||
let response = await httpSession.fetch(url)
|
|
||||||
if response[0] == 200:
|
|
||||||
return response[1]
|
|
||||||
else:
|
|
||||||
raise (ref HttpFetchError)(
|
|
||||||
msg: "Unexpected status code " & $response[0] & " when fetching " & $url,
|
|
||||||
status: response[0])
|
|
||||||
|
|
||||||
proc fetchGenesisBytes*(
|
|
||||||
metadata: Eth2NetworkMetadata,
|
|
||||||
genesisStateUrlOverride = none(Uri)): Future[seq[byte]] {.async.} =
|
|
||||||
case metadata.genesis.kind
|
|
||||||
of NoGenesis:
|
|
||||||
raiseAssert "fetchGenesisBytes should be called only when metadata.hasGenesis is true"
|
|
||||||
of BakedIn:
|
|
||||||
result = @(metadata.genesis.bakedBytes)
|
|
||||||
of BakedInUrl:
|
|
||||||
result = await downloadFile(genesisStateUrlOverride.get(parseUri metadata.genesis.url))
|
|
||||||
# Under the built-in default URL, we serve a snappy-encoded BeaconState in order
|
|
||||||
# to reduce the size of the downloaded file with roughly 50% (this precise ratio
|
|
||||||
# depends on the number of validator recors). The user is still free to provide
|
|
||||||
# any URL which may serve an uncompressed state (e.g. a Beacon API endpoint)
|
|
||||||
#
|
|
||||||
# Since a SSZ-encoded BeaconState will start with a LittleEndian genesis time
|
|
||||||
# (64 bits) while a snappy framed stream will always start with a fixed header
|
|
||||||
# that will decoded as a timestamp with the value 5791996851603375871 (year 2153).
|
|
||||||
#
|
|
||||||
# TODO: A more complete solution will implement compression on the HTTP level,
|
|
||||||
# by relying on the Content-Encoding header to determine the compression
|
|
||||||
# algorithm. The detection method used here will not interfere with such
|
|
||||||
# an implementation and it may remain useful when dealing with misconfigured
|
|
||||||
# HTTP servers.
|
|
||||||
if result.isSnappyFramedStream:
|
|
||||||
result = decodeFramed(result)
|
|
||||||
let state = newClone(readSszForkedHashedBeaconState(metadata.cfg, result))
|
|
||||||
withState(state[]):
|
|
||||||
if forkyState.root != metadata.genesis.digest:
|
|
||||||
raise (ref DigestMismatchError)(
|
|
||||||
msg: "The downloaded genesis state cannot be verified (checksum mismatch)")
|
|
||||||
of UserSuppliedFile:
|
|
||||||
result = readAllBytes(metadata.genesis.path).tryGet()
|
|
||||||
|
|
||||||
proc sourceDesc*(metadata: GenesisMetadata): string =
|
|
||||||
case metadata.kind
|
|
||||||
of NoGenesis:
|
|
||||||
"no genesis"
|
|
||||||
of BakedIn:
|
|
||||||
metadata.networkName
|
|
||||||
of BakedInUrl:
|
|
||||||
metadata.url
|
|
||||||
of UserSuppliedFile:
|
|
||||||
metadata.path
|
|
||||||
|
|
||||||
when isMainModule:
|
|
||||||
let holeskyMetadata = getMetadataForNetwork("holesky")
|
|
||||||
io2.writeFile(
|
|
||||||
"holesky-genesis.ssz",
|
|
||||||
waitFor holeskyMetadata.fetchGenesisBytes()
|
|
||||||
).expect("success")
|
|
|
@ -4,7 +4,6 @@ import
|
||||||
std/[os, times],
|
std/[os, times],
|
||||||
chronos,
|
chronos,
|
||||||
stew/io2,
|
stew/io2,
|
||||||
./networking/network_metadata_downloads,
|
|
||||||
./spec/datatypes/[altair, bellatrix, phase0],
|
./spec/datatypes/[altair, bellatrix, phase0],
|
||||||
./spec/deposit_snapshots,
|
./spec/deposit_snapshots,
|
||||||
./validators/[keystore_management, beacon_validators],
|
./validators/[keystore_management, beacon_validators],
|
||||||
|
@ -76,7 +75,7 @@ proc init*(T: type BeaconNode,
|
||||||
quit 1
|
quit 1
|
||||||
elif metadata.hasGenesis:
|
elif metadata.hasGenesis:
|
||||||
try:
|
try:
|
||||||
await metadata.fetchGenesisBytes(config.genesisStateUrl)
|
metadata.fetchGenesisBytes()
|
||||||
except CatchableError as err:
|
except CatchableError as err:
|
||||||
quit 1
|
quit 1
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -49,6 +49,10 @@ switch("passC", "-fvisibility=hidden")
|
||||||
switch("passC", "-fno-omit-frame-pointer")
|
switch("passC", "-fno-omit-frame-pointer")
|
||||||
switch("passL", "-fno-omit-frame-pointer")
|
switch("passL", "-fno-omit-frame-pointer")
|
||||||
|
|
||||||
|
when false:
|
||||||
|
--define:nimStackTraceOverride
|
||||||
|
switch("import", "libbacktrace")
|
||||||
|
|
||||||
when false:
|
when false:
|
||||||
switch("passC", "-fstack-protector-all")
|
switch("passC", "-fstack-protector-all")
|
||||||
switch("passL", "-fstack-protector-all")
|
switch("passL", "-fstack-protector-all")
|
||||||
|
@ -61,10 +65,6 @@ when false:
|
||||||
switch("define", "nim_compiler_path=" & currentDir & "env.sh nim")
|
switch("define", "nim_compiler_path=" & currentDir & "env.sh nim")
|
||||||
switch("define", "withoutPCRE")
|
switch("define", "withoutPCRE")
|
||||||
|
|
||||||
when true:
|
|
||||||
--define:nimStackTraceOverride
|
|
||||||
switch("import", "libbacktrace")
|
|
||||||
|
|
||||||
var canEnableDebuggingSymbols = true
|
var canEnableDebuggingSymbols = true
|
||||||
if defined(macosx):
|
if defined(macosx):
|
||||||
# The default open files limit is too low on macOS (512), breaking the
|
# The default open files limit is too low on macOS (512), breaking the
|
||||||
|
|
Loading…
Reference in New Issue