nimbus-eth2/beacon_chain/beacon_chain_db.nim

151 lines
5.0 KiB
Nim
Raw Normal View History

import
initial 0.9.0 spec sync (#509) * rename compute_epoch_of_slot(...) to compute_epoch_at_slot(...) * remove some unnecessary imports; remove some crosslink-related code and tests; complete renaming of compute_epoch_of_slot(...) to compute_epoch_at_slot(...) * rm more transfer-related code and tests; rm more unnecessary strutils imports * rm remaining unused imports * remove useless get_empty_per_epoch_cache(...)/compute_start_slot_of_epoch(...) calls * rename compute_start_slot_of_epoch(...) to compute_start_slot_at_epoch(...) * rename ACTIVATION_EXIT_DELAY to MAX_SEED_LOOKAHEAD * update domain types to 0.9.0 * mark AttesterSlashing, IndexedAttestation, AttestationDataAndCustodyBit, DepositData, BeaconBlockHeader, Fork, integer_squareroot(...), and process_voluntary_exit(...) as 0.9.0 * mark increase_balance(...), decrease_balance(...), get_block_root(...), CheckPoint, Deposit, PendingAttestation, HistoricalBatch, is_active_validator(...), and is_slashable_attestation_data(...) as 0.9.0 * mark compute_activation_exit_epoch(...), bls_verify(...), Validator, get_active_validator_indices(...), get_current_epoch(...), get_total_active_balance(...), and get_previous_epoch(...) as 0.9.0 * mark get_block_root_at_slot(...), ProposerSlashing, get_domain(...), VoluntaryExit, mainnet preset Gwei values, minimal preset max operations, process_block_header(...), and is_slashable_validator(...) as 0.9.0 * mark makeWithdrawalCredentials(...), get_validator_churn_limit(...), get_total_balance(...), is_valid_indexed_attestation(...), bls_aggregate_pubkeys(...), initial genesis value/constants, Attestation, get_randao_mix(...), mainnet preset max operations per block constants, minimal preset Gwei values and time parameters, process_eth1_data(...), get_shuffled_seq(...), compute_committee(...), and process_slots(...) as 0.9.0; partially update get_indexed_attestation(...) to 0.9.0 by removing crosslink refs and associated tests * mark initiate_validator_exit(...), process_registry_updates(...), BeaconBlock, Eth1Data, compute_domain(...), process_randao(...), process_attester_slashing(...), get_base_reward(...), and process_slot(...) as 0.9.0
2019-10-30 19:41:19 +00:00
json, tables, options,
chronicles, serialization, json_serialization, eth/common/eth_types_json_serialization,
2019-01-14 12:19:44 +00:00
spec/[datatypes, digest, crypto],
2019-02-05 19:21:18 +00:00
eth/trie/db, ssz
type
BeaconChainDB* = ref object
## Database storing resolved blocks and states - resolved blocks are such
## blocks that form a chain back to the tail block.
2019-01-14 12:19:44 +00:00
backend: TrieDatabaseRef
2019-01-14 12:19:44 +00:00
DbKeyKind = enum
kHashToState
2019-01-14 12:19:44 +00:00
kHashToBlock
kHeadBlock # Pointer to the most recent block selected by the fork choice
kTailBlock ##\
## Pointer to the earliest finalized block - this is the genesis block when
## the chain starts, but might advance as the database gets pruned
## TODO: determine how aggressively the database should be pruned. For a
## healthy network sync, we probably need to store blocks at least
## past the weak subjectivity period.
kBlockSlotStateRoot ## BlockSlot -> state_root mapping
2019-01-14 12:19:44 +00:00
func subkey(kind: DbKeyKind): array[1, byte] =
result[0] = byte ord(kind)
2019-01-14 12:19:44 +00:00
2019-03-04 11:50:26 +00:00
func subkey[N: static int](kind: DbKeyKind, key: array[N, byte]):
array[N + 1, byte] =
result[0] = byte ord(kind)
result[1 .. ^1] = key
func subkey(kind: DbKeyKind, key: uint64): array[sizeof(key) + 1, byte] =
result[0] = byte ord(kind)
copyMem(addr result[1], unsafeAddr key, sizeof(key))
2019-01-25 14:17:35 +00:00
func subkey(kind: type BeaconState, key: Eth2Digest): auto =
subkey(kHashToState, key.data)
2019-01-25 14:17:35 +00:00
func subkey(kind: type BeaconBlock, key: Eth2Digest): auto =
subkey(kHashToBlock, key.data)
2019-01-25 14:17:35 +00:00
func subkey(root: Eth2Digest, slot: Slot): auto =
2019-03-28 12:15:08 +00:00
# TODO: Copy the SSZ data to `ret` properly.
# We don't need multiple calls to SSZ.encode
# Use memoryStream(ret) and SszWriter explicitly
var
# takes care of endians..
rootSSZ = SSZ.encode(root)
slotSSZ = SSZ.encode(slot)
var ret: array[1 + 32 + 8, byte]
2019-03-28 12:15:08 +00:00
doAssert sizeof(ret) == 1 + rootSSZ.len + slotSSZ.len,
"Can't sizeof this in VM"
ret[0] = byte ord(kBlockSlotStateRoot)
copyMem(addr ret[1], unsafeaddr root, sizeof(root))
copyMem(addr ret[1 + sizeof(root)], unsafeaddr slot, sizeof(slot))
ret
2019-01-14 12:19:44 +00:00
proc init*(T: type BeaconChainDB, backend: TrieDatabaseRef): BeaconChainDB =
2019-11-15 11:04:49 +00:00
T(backend: backend)
2019-02-18 10:34:39 +00:00
proc putBlock*(db: BeaconChainDB, key: Eth2Digest, value: BeaconBlock) =
db.backend.put(subkey(type value, key), SSZ.encode(value))
2019-02-18 10:34:39 +00:00
proc putHead*(db: BeaconChainDB, key: Eth2Digest) =
db.backend.put(subkey(kHeadBlock), key.data) # TODO head block?
proc putState*(db: BeaconChainDB, key: Eth2Digest, value: BeaconState) =
# TODO prune old states - this is less easy than it seems as we never know
# when or if a particular state will become finalized.
db.backend.put(subkey(type value, key), SSZ.encode(value))
proc putState*(db: BeaconChainDB, value: BeaconState) =
db.putState(hash_tree_root(value), value)
proc putStateRoot*(db: BeaconChainDB, root: Eth2Digest, slot: Slot,
value: Eth2Digest) =
db.backend.put(subkey(root, slot), value.data)
proc putBlock*(db: BeaconChainDB, value: BeaconBlock) =
db.putBlock(signing_root(value), value)
2019-11-22 14:14:13 +00:00
proc delBlock*(db: BeaconChainDB, key: Eth2Digest) =
db.backend.del(subkey(BeaconBlock, key))
proc delState*(db: BeaconChainDB, key: Eth2Digest) =
db.backend.del(subkey(BeaconState, key))
proc putHeadBlock*(db: BeaconChainDB, key: Eth2Digest) =
db.backend.put(subkey(kHeadBlock), key.data) # TODO head block?
proc putTailBlock*(db: BeaconChainDB, key: Eth2Digest) =
db.backend.put(subkey(kTailBlock), key.data)
proc get(db: BeaconChainDB, key: auto, T: typedesc): Option[T] =
let res = db.backend.get(key)
2019-01-14 12:19:44 +00:00
if res.len != 0:
try:
some(SSZ.decode(res, T))
except SerializationError:
none(T)
2019-01-14 12:19:44 +00:00
else:
none(T)
2019-01-14 12:19:44 +00:00
proc getBlock*(db: BeaconChainDB, key: Eth2Digest): Option[BeaconBlock] =
db.get(subkey(BeaconBlock, key), BeaconBlock)
proc getBlock*(db: BeaconChainDB, slot: Slot): Option[BeaconBlock] =
# TODO implement this
discard
proc getState*(db: BeaconChainDB, key: Eth2Digest): Option[BeaconState] =
db.get(subkey(BeaconState, key), BeaconState)
proc getStateRoot*(db: BeaconChainDB, root: Eth2Digest, slot: Slot):
Option[Eth2Digest] =
db.get(subkey(root, slot), Eth2Digest)
proc getHeadBlock*(db: BeaconChainDB): Option[Eth2Digest] =
db.get(subkey(kHeadBlock), Eth2Digest)
proc getTailBlock*(db: BeaconChainDB): Option[Eth2Digest] =
db.get(subkey(kTailBlock), Eth2Digest)
proc containsBlock*(
db: BeaconChainDB, key: Eth2Digest): bool =
db.backend.contains(subkey(BeaconBlock, key))
proc containsState*(
db: BeaconChainDB, key: Eth2Digest): bool =
db.backend.contains(subkey(BeaconState, key))
iterator getAncestors*(db: BeaconChainDB, root: Eth2Digest):
tuple[root: Eth2Digest, blck: BeaconBlock] =
## Load a chain of ancestors for blck - returns a list of blocks with the
## oldest block last (blck will be at result[0]).
##
## The search will go on until the ancestor cannot be found.
var root = root
while (let blck = db.getBlock(root); blck.isSome()):
yield (root, blck.get())
root = blck.get().parent_root