nimbus-eth1/nimbus/db/storage_types.nim

129 lines
3.8 KiB
Nim
Raw Normal View History

import
eth/common
type
DBKeyKind* = enum
genericHash
blockNumberToHash
blockHashToScore
transactionHashToBlock
canonicalHeadHash
2018-12-04 08:13:35 +00:00
slotHashToSlot
2018-12-31 03:27:02 +00:00
contractHash
cliqueSnapshot
transitionStatus
safeHash
finalizedHash
skeletonProgress
skeletonBlockHashToNumber
skeletonBlock
skeletonTransaction
Prep for full sync after snap make 6 (#1291) * Update log ticker, using time interval rather than ticker count why: Counting and logging ticker occurrences is inherently imprecise. So time intervals are used. * Use separate storage tables for snap sync data * Left boundary proof update why: Was not properly implemented, yet. * Capture pivot in peer worker (aka buddy) tasks why: The pivot environment is linked to the `buddy` descriptor. While there is a task switch, the pivot may change. So it is passed on as function argument `env` rather than retrieved from the buddy at the start of a sub-function. * Split queues `fetchStorage` into `fetchStorageFull` and `fetchStoragePart` * Remove obsolete account range returned from `GetAccountRange` message why: Handler returned the wrong right value of the range. This range was for convenience, only. * Prioritise storage slots if the queue becomes large why: Currently, accounts processing is prioritised up until all accounts are downloaded. The new prioritisation has two thresholds for + start processing storage slots with a new worker + stop account processing and switch to storage processing also: Provide api for `SnapTodoRanges` pair of range sets in `worker_desc.nim` * Generalise left boundary proof for accounts or storage slots. why: Detailed explanation how this works is documented with `snapdb_accounts.importAccounts()`. Instead of enforcing a left boundary proof (which is still the default), the importer functions return a list of `holes` (aka node paths) found in the argument ranges of leaf nodes. This in turn is used by the book keeping software for data download. * Forgot to pass on variable in function wrapper also: + Start healing not before 99% accounts covered (previously 95%) + Logging updated/prettified
2022-11-08 18:56:04 +00:00
snapSyncAccount
snapSyncStorageSlot
snapSyncStateRoot
DbKey* = object
# The first byte stores the key type. The rest are key-specific values
data*: array[33, byte]
dataEndPos*: uint8 # the last populated position in the data
proc genericHashKey*(h: Hash256): DbKey {.inline.} =
result.data[0] = byte ord(genericHash)
result.data[1 .. 32] = h.data
result.dataEndPos = uint8 32
proc blockHashToScoreKey*(h: Hash256): DbKey {.inline.} =
result.data[0] = byte ord(blockHashToScore)
result.data[1 .. 32] = h.data
result.dataEndPos = uint8 32
proc transactionHashToBlockKey*(h: Hash256): DbKey {.inline.} =
result.data[0] = byte ord(transactionHashToBlock)
result.data[1 .. 32] = h.data
result.dataEndPos = uint8 32
proc blockNumberToHashKey*(u: BlockNumber): DbKey {.inline.} =
result.data[0] = byte ord(blockNumberToHash)
2019-03-13 21:36:54 +00:00
doAssert sizeof(u) <= 32
copyMem(addr result.data[1], unsafeAddr u, sizeof(u))
result.dataEndPos = uint8 sizeof(u)
proc canonicalHeadHashKey*(): DbKey {.inline.} =
result.data[0] = byte ord(canonicalHeadHash)
result.dataEndPos = 1
2018-12-04 08:13:35 +00:00
proc slotHashToSlotKey*(h: openArray[byte]): DbKey {.inline.} =
2019-03-13 21:36:54 +00:00
doAssert(h.len == 32)
2018-12-04 08:13:35 +00:00
result.data[0] = byte ord(slotHashToSlot)
result.data[1 .. 32] = h
result.dataEndPos = uint8 32
2018-12-31 03:27:02 +00:00
proc contractHashKey*(h: Hash256): DbKey {.inline.} =
result.data[0] = byte ord(contractHash)
result.data[1 .. 32] = h.data
result.dataEndPos = uint8 32
proc cliqueSnapshotKey*(h: Hash256): DbKey {.inline.} =
result.data[0] = byte ord(cliqueSnapshot)
result.data[1 .. 32] = h.data
result.dataEndPos = uint8 32
proc transitionStatusKey*(): DbKey =
# ETH-2 Transition Status
result.data[0] = byte ord(transitionStatus)
result.dataEndPos = uint8 1
proc safeHashKey*(): DbKey {.inline.} =
result.data[0] = byte ord(safeHash)
result.dataEndPos = uint8 1
proc finalizedHashKey*(): DbKey {.inline.} =
result.data[0] = byte ord(finalizedHash)
result.dataEndPos = uint8 1
proc skeletonProgressKey*(): DbKey {.inline.} =
result.data[0] = byte ord(skeletonProgress)
result.dataEndPos = 1
proc skeletonBlockHashToNumberKey*(h: Hash256): DbKey {.inline.} =
result.data[0] = byte ord(skeletonBlockHashToNumber)
result.data[1 .. 32] = h.data
result.dataEndPos = uint8 32
proc skeletonBlockKey*(u: BlockNumber): DbKey {.inline.} =
result.data[0] = byte ord(skeletonBlock)
doAssert sizeof(u) <= 32
copyMem(addr result.data[1], unsafeAddr u, sizeof(u))
result.dataEndPos = uint8 sizeof(u)
proc skeletonTransactionKey*(u: BlockNumber): DbKey {.inline.} =
result.data[0] = byte ord(skeletonTransaction)
doAssert sizeof(u) <= 32
copyMem(addr result.data[1], unsafeAddr u, sizeof(u))
result.dataEndPos = uint8 sizeof(u)
Prep for full sync after snap make 6 (#1291) * Update log ticker, using time interval rather than ticker count why: Counting and logging ticker occurrences is inherently imprecise. So time intervals are used. * Use separate storage tables for snap sync data * Left boundary proof update why: Was not properly implemented, yet. * Capture pivot in peer worker (aka buddy) tasks why: The pivot environment is linked to the `buddy` descriptor. While there is a task switch, the pivot may change. So it is passed on as function argument `env` rather than retrieved from the buddy at the start of a sub-function. * Split queues `fetchStorage` into `fetchStorageFull` and `fetchStoragePart` * Remove obsolete account range returned from `GetAccountRange` message why: Handler returned the wrong right value of the range. This range was for convenience, only. * Prioritise storage slots if the queue becomes large why: Currently, accounts processing is prioritised up until all accounts are downloaded. The new prioritisation has two thresholds for + start processing storage slots with a new worker + stop account processing and switch to storage processing also: Provide api for `SnapTodoRanges` pair of range sets in `worker_desc.nim` * Generalise left boundary proof for accounts or storage slots. why: Detailed explanation how this works is documented with `snapdb_accounts.importAccounts()`. Instead of enforcing a left boundary proof (which is still the default), the importer functions return a list of `holes` (aka node paths) found in the argument ranges of leaf nodes. This in turn is used by the book keeping software for data download. * Forgot to pass on variable in function wrapper also: + Start healing not before 99% accounts covered (previously 95%) + Logging updated/prettified
2022-11-08 18:56:04 +00:00
proc snapSyncAccountKey*(h: openArray[byte]): DbKey {.inline.} =
doAssert(h.len == 32)
result.data[0] = byte ord(snapSyncAccount)
result.data[1 .. 32] = h
result.dataEndPos = uint8 sizeof(h)
proc snapSyncStorageSlotKey*(h: openArray[byte]): DbKey {.inline.} =
doAssert(h.len == 32)
result.data[0] = byte ord(snapSyncStorageSlot)
result.data[1 .. 32] = h
result.dataEndPos = uint8 sizeof(h)
proc snapSyncStateRootKey*(h: openArray[byte]): DbKey {.inline.} =
doAssert(h.len == 32)
result.data[0] = byte ord(snapSyncStateRoot)
result.data[1 .. 32] = h
result.dataEndPos = uint8 sizeof(h)
template toOpenArray*(k: DbKey): openArray[byte] =
k.data.toOpenArray(0, int(k.dataEndPos))
proc `==`*(a, b: DbKey): bool {.inline.} =
a.toOpenArray == b.toOpenArray