2018-11-23 23:58:49 +00:00
|
|
|
import
|
2019-01-14 12:19:44 +00:00
|
|
|
os, json, tables, options,
|
2019-02-05 19:21:18 +00:00
|
|
|
chronicles, json_serialization, eth/common/eth_types_json_serialization,
|
2019-01-14 12:19:44 +00:00
|
|
|
spec/[datatypes, digest, crypto],
|
2019-02-05 19:21:18 +00:00
|
|
|
eth/trie/db, ssz
|
2018-11-23 23:58:49 +00:00
|
|
|
|
|
|
|
type
|
|
|
|
BeaconChainDB* = ref object
|
2019-01-14 12:19:44 +00:00
|
|
|
backend: TrieDatabaseRef
|
2018-11-23 23:58:49 +00:00
|
|
|
|
2019-01-14 12:19:44 +00:00
|
|
|
DbKeyKind = enum
|
2019-02-21 04:42:17 +00:00
|
|
|
kHashToState
|
2019-01-14 12:19:44 +00:00
|
|
|
kHashToBlock
|
2019-02-21 04:42:17 +00:00
|
|
|
kHeadBlock
|
2019-01-14 12:19:44 +00:00
|
|
|
|
2019-02-21 04:42:17 +00:00
|
|
|
func subkey(kind: DbKeyKind): array[1, byte] =
|
|
|
|
result[0] = byte ord(kind)
|
2019-01-14 12:19:44 +00:00
|
|
|
|
2019-02-21 04:42:17 +00:00
|
|
|
func subkey[N: static int](kind: DbKeyKind, key: array[N, byte]):
|
|
|
|
array[N + 1, byte] =
|
|
|
|
result[0] = byte ord(kind)
|
|
|
|
result[1 .. ^1] = key
|
2019-01-25 14:17:35 +00:00
|
|
|
|
2019-02-21 04:42:17 +00:00
|
|
|
func subkey(kind: type BeaconState, key: Eth2Digest): auto =
|
|
|
|
subkey(kHashToState, key.data)
|
2019-01-25 14:17:35 +00:00
|
|
|
|
2019-02-21 04:42:17 +00:00
|
|
|
func subkey(kind: type BeaconBlock, key: Eth2Digest): auto =
|
|
|
|
subkey(kHashToBlock, key.data)
|
2019-01-25 14:17:35 +00:00
|
|
|
|
2019-01-14 12:19:44 +00:00
|
|
|
proc init*(T: type BeaconChainDB, backend: TrieDatabaseRef): BeaconChainDB =
|
2018-11-23 23:58:49 +00:00
|
|
|
new result
|
2019-01-14 12:19:44 +00:00
|
|
|
result.backend = backend
|
2018-11-23 23:58:49 +00:00
|
|
|
|
2019-02-21 21:38:26 +00:00
|
|
|
proc putBlock*(db: BeaconChainDB, key: Eth2Digest, value: BeaconBlock) =
|
2019-02-21 04:42:17 +00:00
|
|
|
db.backend.put(subkey(type value, key), ssz.serialize(value))
|
2019-01-25 14:17:35 +00:00
|
|
|
|
2019-02-21 04:42:17 +00:00
|
|
|
proc putHead*(db: BeaconChainDB, key: Eth2Digest) =
|
|
|
|
db.backend.put(subkey(kHeadBlock), key.data) # TODO head block?
|
2019-01-25 14:17:35 +00:00
|
|
|
|
2019-02-21 21:38:26 +00:00
|
|
|
proc putState*(db: BeaconChainDB, key: Eth2Digest, value: BeaconState) =
|
|
|
|
# TODO: prune old states
|
|
|
|
# TODO: it might be necessary to introduce the concept of a "last finalized
|
|
|
|
# state" to the storage, so that clients with limited storage have
|
|
|
|
# a natural state to start recovering from. One idea is to keep a
|
|
|
|
# special pointer to the state that has ben finalized, and prune all
|
|
|
|
# other states.
|
|
|
|
# One issue is that what will become a finalized is revealed only
|
|
|
|
# long after that state has passed, meaning that we need to keep
|
|
|
|
# a history of "finalized state candidates" or possibly replay from
|
|
|
|
# the previous finalized state, if we have that stored. To consider
|
|
|
|
# here is that the gap between finalized and present state might be
|
|
|
|
# significant (days), meaning replay might be expensive.
|
2019-02-21 04:42:17 +00:00
|
|
|
db.backend.put(subkey(type value, key), ssz.serialize(value))
|
|
|
|
|
2019-02-21 21:38:26 +00:00
|
|
|
proc putBlock*(db: BeaconChainDB, value: BeaconBlock) =
|
|
|
|
db.putBlock(hash_tree_root_final(value), value)
|
|
|
|
|
|
|
|
proc putState*(db: BeaconChainDB, value: BeaconState) =
|
|
|
|
db.putState(hash_tree_root_final(value), value)
|
2019-02-21 04:42:17 +00:00
|
|
|
|
|
|
|
proc get(db: BeaconChainDB, key: auto, T: typedesc): Option[T] =
|
|
|
|
let res = db.backend.get(key)
|
2019-01-14 12:19:44 +00:00
|
|
|
if res.len != 0:
|
2019-02-21 04:42:17 +00:00
|
|
|
ssz.deserialize(res, T)
|
2019-01-14 12:19:44 +00:00
|
|
|
else:
|
2019-02-21 04:42:17 +00:00
|
|
|
none(T)
|
2019-01-14 12:19:44 +00:00
|
|
|
|
2019-02-21 21:38:26 +00:00
|
|
|
proc getBlock*(db: BeaconChainDB, key: Eth2Digest): Option[BeaconBlock] =
|
|
|
|
db.get(subkey(BeaconBlock, key), BeaconBlock)
|
2019-02-21 04:42:17 +00:00
|
|
|
|
2019-02-21 21:38:26 +00:00
|
|
|
proc getState*(db: BeaconChainDB, key: Eth2Digest): Option[BeaconState] =
|
|
|
|
db.get(subkey(BeaconState, key), BeaconState)
|
2019-02-21 04:42:17 +00:00
|
|
|
|
2019-02-21 21:38:26 +00:00
|
|
|
proc getHead*(db: BeaconChainDB): Option[BeaconBlock] =
|
2019-02-21 04:42:17 +00:00
|
|
|
let key = db.backend.get(subkey(kHeadBlock))
|
|
|
|
if key.len == sizeof(Eth2Digest):
|
|
|
|
var tmp: Eth2Digest
|
|
|
|
copyMem(addr tmp, unsafeAddr key[0], sizeof(tmp))
|
|
|
|
|
2019-02-21 21:38:26 +00:00
|
|
|
db.getBlock(tmp)
|
2019-02-21 04:42:17 +00:00
|
|
|
else:
|
2019-02-21 21:38:26 +00:00
|
|
|
none(BeaconBlock)
|
|
|
|
|
|
|
|
proc containsBlock*(
|
|
|
|
db: BeaconChainDB, key: Eth2Digest): bool =
|
|
|
|
db.backend.contains(subkey(BeaconBlock, key))
|
2018-11-23 23:58:49 +00:00
|
|
|
|
2019-02-21 21:38:26 +00:00
|
|
|
proc containsState*(
|
|
|
|
db: BeaconChainDB, key: Eth2Digest): bool =
|
|
|
|
db.backend.contains(subkey(BeaconBlock, key))
|
2019-02-21 17:20:50 +00:00
|
|
|
|
|
|
|
proc getAncestors*(
|
|
|
|
db: BeaconChainDB, blck: BeaconBlock,
|
|
|
|
predicate: proc(blck: BeaconBlock): bool = nil): seq[BeaconBlock] =
|
|
|
|
## Load a chain of ancestors for blck - returns a list of blocks with the
|
|
|
|
## oldest block last (blck will be at result[0]).
|
|
|
|
##
|
|
|
|
## The search will go on until the ancestor cannot be found (or slot 0) or
|
|
|
|
## the predicate returns true (you found what you were looking for) - the list
|
|
|
|
## will include the last block as well
|
|
|
|
## TODO maybe turn into iterator? or add iterator also?
|
|
|
|
|
|
|
|
result = @[blck]
|
|
|
|
|
|
|
|
while result[^1].slot > 0.Slot:
|
2019-02-21 21:38:26 +00:00
|
|
|
let parent = db.getBlock(result[^1].parent_root)
|
2019-02-21 17:20:50 +00:00
|
|
|
|
|
|
|
if parent.isNone(): break
|
|
|
|
|
|
|
|
result.add parent.get()
|
|
|
|
|
|
|
|
if predicate != nil and predicate(parent.get()): break
|