2018-11-23 23:58:49 +00:00
|
|
|
import
|
2020-01-15 15:06:50 +00:00
|
|
|
options,
|
|
|
|
serialization,
|
2019-01-14 12:19:44 +00:00
|
|
|
spec/[datatypes, digest, crypto],
|
2020-01-15 15:06:50 +00:00
|
|
|
kvstore, ssz
|
2018-11-23 23:58:49 +00:00
|
|
|
|
|
|
|
type
|
|
|
|
BeaconChainDB* = ref object
|
2019-03-13 22:59:20 +00:00
|
|
|
## Database storing resolved blocks and states - resolved blocks are such
|
|
|
|
## blocks that form a chain back to the tail block.
|
2020-01-15 15:06:50 +00:00
|
|
|
backend: KVStoreRef
|
2018-11-23 23:58:49 +00:00
|
|
|
|
2019-01-14 12:19:44 +00:00
|
|
|
DbKeyKind = enum
|
2019-02-21 04:42:17 +00:00
|
|
|
kHashToState
|
2019-01-14 12:19:44 +00:00
|
|
|
kHashToBlock
|
2019-03-13 22:59:20 +00:00
|
|
|
kHeadBlock # Pointer to the most recent block selected by the fork choice
|
|
|
|
kTailBlock ##\
|
|
|
|
## Pointer to the earliest finalized block - this is the genesis block when
|
|
|
|
## the chain starts, but might advance as the database gets pruned
|
|
|
|
## TODO: determine how aggressively the database should be pruned. For a
|
|
|
|
## healthy network sync, we probably need to store blocks at least
|
|
|
|
## past the weak subjectivity period.
|
2019-03-28 06:10:48 +00:00
|
|
|
kBlockSlotStateRoot ## BlockSlot -> state_root mapping
|
2019-01-14 12:19:44 +00:00
|
|
|
|
2019-02-21 04:42:17 +00:00
|
|
|
func subkey(kind: DbKeyKind): array[1, byte] =
|
|
|
|
result[0] = byte ord(kind)
|
2019-01-14 12:19:44 +00:00
|
|
|
|
2019-03-04 11:50:26 +00:00
|
|
|
func subkey[N: static int](kind: DbKeyKind, key: array[N, byte]):
|
|
|
|
array[N + 1, byte] =
|
|
|
|
result[0] = byte ord(kind)
|
|
|
|
result[1 .. ^1] = key
|
|
|
|
|
|
|
|
func subkey(kind: DbKeyKind, key: uint64): array[sizeof(key) + 1, byte] =
|
|
|
|
result[0] = byte ord(kind)
|
|
|
|
copyMem(addr result[1], unsafeAddr key, sizeof(key))
|
2019-01-25 14:17:35 +00:00
|
|
|
|
2019-02-21 04:42:17 +00:00
|
|
|
func subkey(kind: type BeaconState, key: Eth2Digest): auto =
|
|
|
|
subkey(kHashToState, key.data)
|
2019-01-25 14:17:35 +00:00
|
|
|
|
2019-12-16 18:08:50 +00:00
|
|
|
func subkey(kind: type SignedBeaconBlock, key: Eth2Digest): auto =
|
2019-02-21 04:42:17 +00:00
|
|
|
subkey(kHashToBlock, key.data)
|
2019-01-25 14:17:35 +00:00
|
|
|
|
2019-03-28 06:10:48 +00:00
|
|
|
func subkey(root: Eth2Digest, slot: Slot): auto =
|
2019-03-28 12:15:08 +00:00
|
|
|
# TODO: Copy the SSZ data to `ret` properly.
|
|
|
|
# We don't need multiple calls to SSZ.encode
|
|
|
|
# Use memoryStream(ret) and SszWriter explicitly
|
|
|
|
|
|
|
|
var
|
|
|
|
# takes care of endians..
|
|
|
|
rootSSZ = SSZ.encode(root)
|
|
|
|
slotSSZ = SSZ.encode(slot)
|
|
|
|
|
2019-03-28 06:10:48 +00:00
|
|
|
var ret: array[1 + 32 + 8, byte]
|
2019-03-28 12:15:08 +00:00
|
|
|
doAssert sizeof(ret) == 1 + rootSSZ.len + slotSSZ.len,
|
|
|
|
"Can't sizeof this in VM"
|
2019-03-28 06:10:48 +00:00
|
|
|
|
|
|
|
ret[0] = byte ord(kBlockSlotStateRoot)
|
|
|
|
|
|
|
|
copyMem(addr ret[1], unsafeaddr root, sizeof(root))
|
|
|
|
copyMem(addr ret[1 + sizeof(root)], unsafeaddr slot, sizeof(slot))
|
|
|
|
|
|
|
|
ret
|
|
|
|
|
2020-01-15 15:06:50 +00:00
|
|
|
proc init*(T: type BeaconChainDB, backend: KVStoreRef): BeaconChainDB =
|
2019-11-15 11:04:49 +00:00
|
|
|
T(backend: backend)
|
2018-11-23 23:58:49 +00:00
|
|
|
|
2019-12-16 18:08:50 +00:00
|
|
|
proc putBlock*(db: BeaconChainDB, key: Eth2Digest, value: SignedBeaconBlock) =
|
2019-03-05 22:54:08 +00:00
|
|
|
db.backend.put(subkey(type value, key), SSZ.encode(value))
|
2019-02-18 10:34:39 +00:00
|
|
|
|
|
|
|
proc putHead*(db: BeaconChainDB, key: Eth2Digest) =
|
|
|
|
db.backend.put(subkey(kHeadBlock), key.data) # TODO head block?
|
|
|
|
|
2019-02-21 21:38:26 +00:00
|
|
|
proc putState*(db: BeaconChainDB, key: Eth2Digest, value: BeaconState) =
|
2019-03-13 22:59:20 +00:00
|
|
|
# TODO prune old states - this is less easy than it seems as we never know
|
|
|
|
# when or if a particular state will become finalized.
|
|
|
|
|
2019-03-05 22:54:08 +00:00
|
|
|
db.backend.put(subkey(type value, key), SSZ.encode(value))
|
2019-02-21 04:42:17 +00:00
|
|
|
|
2019-02-28 21:21:29 +00:00
|
|
|
proc putState*(db: BeaconChainDB, value: BeaconState) =
|
2019-03-25 16:46:31 +00:00
|
|
|
db.putState(hash_tree_root(value), value)
|
2019-02-28 21:21:29 +00:00
|
|
|
|
2019-03-28 06:10:48 +00:00
|
|
|
proc putStateRoot*(db: BeaconChainDB, root: Eth2Digest, slot: Slot,
|
|
|
|
value: Eth2Digest) =
|
|
|
|
db.backend.put(subkey(root, slot), value.data)
|
|
|
|
|
2019-12-16 18:08:50 +00:00
|
|
|
proc putBlock*(db: BeaconChainDB, value: SignedBeaconBlock) =
|
|
|
|
db.putBlock(hash_tree_root(value.message), value)
|
2019-02-21 21:38:26 +00:00
|
|
|
|
2019-11-22 14:14:13 +00:00
|
|
|
proc delBlock*(db: BeaconChainDB, key: Eth2Digest) =
|
2019-12-16 18:08:50 +00:00
|
|
|
db.backend.del(subkey(SignedBeaconBlock, key))
|
2019-11-22 14:14:13 +00:00
|
|
|
|
|
|
|
proc delState*(db: BeaconChainDB, key: Eth2Digest) =
|
|
|
|
db.backend.del(subkey(BeaconState, key))
|
|
|
|
|
2019-02-28 21:21:29 +00:00
|
|
|
proc putHeadBlock*(db: BeaconChainDB, key: Eth2Digest) =
|
|
|
|
db.backend.put(subkey(kHeadBlock), key.data) # TODO head block?
|
|
|
|
|
|
|
|
proc putTailBlock*(db: BeaconChainDB, key: Eth2Digest) =
|
|
|
|
db.backend.put(subkey(kTailBlock), key.data)
|
2019-02-21 04:42:17 +00:00
|
|
|
|
|
|
|
proc get(db: BeaconChainDB, key: auto, T: typedesc): Option[T] =
|
2020-01-15 15:06:50 +00:00
|
|
|
var res: Option[T]
|
|
|
|
discard db.backend.get(key, proc (data: openArray[byte]) =
|
2019-03-05 22:54:08 +00:00
|
|
|
try:
|
2020-01-15 15:06:50 +00:00
|
|
|
res = some(SSZ.decode(data, T))
|
2019-03-05 22:54:08 +00:00
|
|
|
except SerializationError:
|
2020-01-15 15:06:50 +00:00
|
|
|
discard
|
|
|
|
)
|
|
|
|
res
|
2019-01-14 12:19:44 +00:00
|
|
|
|
2019-12-16 18:08:50 +00:00
|
|
|
proc getBlock*(db: BeaconChainDB, key: Eth2Digest): Option[SignedBeaconBlock] =
|
|
|
|
db.get(subkey(SignedBeaconBlock, key), SignedBeaconBlock)
|
2019-02-21 04:42:17 +00:00
|
|
|
|
2019-02-21 21:38:26 +00:00
|
|
|
proc getState*(db: BeaconChainDB, key: Eth2Digest): Option[BeaconState] =
|
|
|
|
db.get(subkey(BeaconState, key), BeaconState)
|
2019-02-21 04:42:17 +00:00
|
|
|
|
2019-03-28 06:10:48 +00:00
|
|
|
proc getStateRoot*(db: BeaconChainDB, root: Eth2Digest, slot: Slot):
|
|
|
|
Option[Eth2Digest] =
|
|
|
|
db.get(subkey(root, slot), Eth2Digest)
|
|
|
|
|
2019-02-28 21:21:29 +00:00
|
|
|
proc getHeadBlock*(db: BeaconChainDB): Option[Eth2Digest] =
|
|
|
|
db.get(subkey(kHeadBlock), Eth2Digest)
|
2019-02-21 04:42:17 +00:00
|
|
|
|
2019-02-28 21:21:29 +00:00
|
|
|
proc getTailBlock*(db: BeaconChainDB): Option[Eth2Digest] =
|
|
|
|
db.get(subkey(kTailBlock), Eth2Digest)
|
2019-02-21 21:38:26 +00:00
|
|
|
|
|
|
|
proc containsBlock*(
|
|
|
|
db: BeaconChainDB, key: Eth2Digest): bool =
|
2019-12-16 18:08:50 +00:00
|
|
|
db.backend.contains(subkey(SignedBeaconBlock, key))
|
2018-11-23 23:58:49 +00:00
|
|
|
|
2019-02-21 21:38:26 +00:00
|
|
|
proc containsState*(
|
|
|
|
db: BeaconChainDB, key: Eth2Digest): bool =
|
2019-03-08 16:40:17 +00:00
|
|
|
db.backend.contains(subkey(BeaconState, key))
|
2019-02-21 17:20:50 +00:00
|
|
|
|
2019-02-28 21:21:29 +00:00
|
|
|
iterator getAncestors*(db: BeaconChainDB, root: Eth2Digest):
|
2019-12-16 18:08:50 +00:00
|
|
|
tuple[root: Eth2Digest, blck: SignedBeaconBlock] =
|
2019-02-21 17:20:50 +00:00
|
|
|
## Load a chain of ancestors for blck - returns a list of blocks with the
|
|
|
|
## oldest block last (blck will be at result[0]).
|
|
|
|
##
|
2019-02-28 21:21:29 +00:00
|
|
|
## The search will go on until the ancestor cannot be found.
|
2019-02-21 17:20:50 +00:00
|
|
|
|
2019-02-28 21:21:29 +00:00
|
|
|
var root = root
|
|
|
|
while (let blck = db.getBlock(root); blck.isSome()):
|
|
|
|
yield (root, blck.get())
|
2019-02-21 17:20:50 +00:00
|
|
|
|
2019-12-16 18:08:50 +00:00
|
|
|
root = blck.get().message.parent_root
|