pulled out cache logic into separate file

why:
  handy to re-use, eg. for upcoming clique implementation
This commit is contained in:
Jordan Hrycaj 2021-05-20 13:07:01 +01:00 committed by Jordan Hrycaj
parent d6a5cecb98
commit 3663b1603f
3 changed files with 87 additions and 50 deletions

View File

@ -17,6 +17,7 @@ import
../vm_state, ../vm_state,
../vm_types, ../vm_types,
../vm_types2, ../vm_types2,
./validate/cache,
chronicles, chronicles,
eth/[common, rlp, trie/trie_defs], eth/[common, rlp, trie/trie_defs],
ethash, ethash,
@ -29,6 +30,8 @@ import
times times
export export
cache.EpochHashCache,
cache.initEpochHashCache,
results results
type type
@ -49,11 +52,6 @@ type
Hash512 = MDigest[512] Hash512 = MDigest[512]
CacheByEpoch* = OrderedTableRef[uint64, seq[Hash512]]
const
CACHE_MAX_ITEMS = 10
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Private Helpers # Private Helpers
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
@ -85,40 +83,7 @@ func isGenesis(header: BlockHeader): bool =
# Private cache management functions # Private cache management functions
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc mkCacheBytes(blockNumber: uint64): seq[Hash512] = func cacheHash(x: EpochHashDigest): Hash256 =
mkcache(getCacheSize(blockNumber), getSeedhash(blockNumber))
proc findFirstIndex(tab: CacheByEpoch): uint64 =
# Kludge: OrderedTable[] still misses a proper API
for key in tab.keys:
result = key
break
proc getCache(cacheByEpoch: CacheByEpoch; blockNumber: uint64): seq[Hash512] =
# TODO: this is very inefficient
let epochIndex = blockNumber div EPOCH_LENGTH
# Get the cache if already generated, marking it as recently used
if epochIndex in cacheByEpoch:
let c = cacheByEpoch[epochIndex]
cacheByEpoch.del(epochIndex) # pop and append at end
cacheByEpoch[epochIndex] = c
return c
# Limit memory usage for cache
if cacheByEpoch.len >= CACHE_MAX_ITEMS:
cacheByEpoch.del(cacheByEpoch.findFirstIndex)
# Generate the cache if it was not already in memory
# Simulate requesting mkcache by block number: multiply index by epoch length
var c = mkCacheBytes(epochIndex * EPOCH_LENGTH)
cacheByEpoch[epochIndex] = c
result = system.move(c)
func cacheHash(x: openArray[Hash512]): Hash256 =
var ctx: keccak256 var ctx: keccak256
ctx.init() ctx.init()
@ -134,10 +99,10 @@ func cacheHash(x: openArray[Hash512]): Hash256 =
proc checkPOW(blockNumber: Uint256; miningHash, mixHash: Hash256; proc checkPOW(blockNumber: Uint256; miningHash, mixHash: Hash256;
nonce: BlockNonce; difficulty: DifficultyInt; nonce: BlockNonce; difficulty: DifficultyInt;
cacheByEpoch: CacheByEpoch): Result[void,string] = cacheByEpoch: var EpochHashCache): Result[void,string] =
let let
blockNumber = blockNumber.truncate(uint64) blockNumber = blockNumber.truncate(uint64)
cache = cacheByEpoch.getCache(blockNumber) cache = cacheByEpoch.getEpochCacheHash(blockNumber)
size = getDataSize(blockNumber) size = getDataSize(blockNumber)
miningOutput = hashimotoLight( miningOutput = hashimotoLight(
size, cache, miningHash, uint64.fromBytesBE(nonce)) size, cache, miningHash, uint64.fromBytesBE(nonce))
@ -161,7 +126,7 @@ proc checkPOW(blockNumber: Uint256; miningHash, mixHash: Hash256;
result = ok() result = ok()
proc validateSeal(cacheByEpoch: CacheByEpoch; proc validateSeal(cacheByEpoch: var EpochHashCache;
header: BlockHeader): Result[void,string] = header: BlockHeader): Result[void,string] =
let miningHeader = header.toMiningHeader let miningHeader = header.toMiningHeader
let miningHash = miningHeader.hash let miningHash = miningHeader.hash
@ -196,7 +161,7 @@ func validateGasLimit(gasLimit, parentGasLimit: GasInt): Result[void,string] =
result = ok() result = ok()
proc validateHeader(header, parentHeader: BlockHeader; checkSealOK: bool; proc validateHeader(header, parentHeader: BlockHeader; checkSealOK: bool;
cacheByEpoch: CacheByEpoch): Result[void,string] = cacheByEpoch: var EpochHashCache): Result[void,string] =
if header.extraData.len > 32: if header.extraData.len > 32:
return err("BlockHeader.extraData larger than 32 bytes") return err("BlockHeader.extraData larger than 32 bytes")
@ -235,7 +200,7 @@ func validateUncle(currBlock, uncle, uncleParent: BlockHeader):
proc validateUncles(chainDB: BaseChainDB; header: BlockHeader; proc validateUncles(chainDB: BaseChainDB; header: BlockHeader;
uncles: seq[BlockHeader]; checkSealOK: bool; uncles: seq[BlockHeader]; checkSealOK: bool;
cacheByEpoch: CacheByEpoch): Result[void,string] = cacheByEpoch: var EpochHashCache): Result[void,string] =
let hasUncles = uncles.len > 0 let hasUncles = uncles.len > 0
let shouldHaveUncles = header.ommersHash != EMPTY_UNCLE_HASH let shouldHaveUncles = header.ommersHash != EMPTY_UNCLE_HASH
@ -343,13 +308,9 @@ proc validateTransaction*(vmState: BaseVMState, tx: Transaction,
# Public functions, extracted from test_blockchain_json # Public functions, extracted from test_blockchain_json
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc newCacheByEpoch*(): CacheByEpoch =
newOrderedTable[uint64, seq[Hash512]]()
proc validateKinship*(chainDB: BaseChainDB; header: BlockHeader; proc validateKinship*(chainDB: BaseChainDB; header: BlockHeader;
uncles: seq[BlockHeader]; checkSealOK: bool; uncles: seq[BlockHeader]; checkSealOK: bool;
cacheByEpoch: CacheByEpoch): Result[void,string] = cacheByEpoch: var EpochHashCache): Result[void,string] =
if header.isGenesis: if header.isGenesis:
if header.extraData.len > 32: if header.extraData.len > 32:
return err("BlockHeader.extraData larger than 32 bytes") return err("BlockHeader.extraData larger than 32 bytes")

View File

@ -0,0 +1,75 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
## Hash as hash can
## ================
##
## provide hash lists, indexed by epoch
import
ethash,
nimcrypto,
tables
type
EpochHashDigest* = seq[MDigest[512]]
EpochHashCache* = object
maxItems: int ## max number of entries
tab: OrderedTable[uint64,EpochHashDigest] ## cache data table
# ------------------------------------------------------------------------------
# Private cache management functions
# ------------------------------------------------------------------------------
proc mkCacheBytes(blockNumber: uint64): seq[MDigest[512]] {.inline.} =
mkcache(getCacheSize(blockNumber), getSeedhash(blockNumber))
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc initEpochHashCache*(cache: var EpochHashCache; cacheMaxItems = 10) =
## Initialise a new cache indexed by block epoch
cache.maxItems = cacheMaxItems
# note: Starting from Nim v0.20, tables are initialized by default and it
# is not necessary to call initOrderedTable() function explicitly.
proc getEpochCacheHash*(cache: var EpochHashCache;
blockNumber: uint64): EpochHashDigest =
## Return hash list, indexed by epoch of argument `blockNumber`
let epochIndex = blockNumber div EPOCH_LENGTH
# Get the cache if already generated, marking it as recently used
if epochIndex in cache.tab:
let value = cache.tab[epochIndex]
cache.tab.del(epochIndex) # pop and append at end
cache.tab[epochIndex] = value
return value
# Limit memory usage for cache
if cache.maxItems <= cache.tab.len:
# Delete oldest entry
for key in cache.tab.keys:
# Kludge: OrderedTable[] still misses a proper API
cache.tab.del(key)
break
# Simulate requesting mkcache by block number: multiply index by epoch length
var data = mkCacheBytes(epochIndex * EPOCH_LENGTH)
cache.tab[epochIndex] = data
result = system.move(data)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -40,7 +40,8 @@ type
debugData : JsonNode debugData : JsonNode
network : string network : string
var cacheByEpoch = newCacheByEpoch() var cacheByEpoch: EpochHashCache
cacheByEpoch.initEpochHashCache
proc testFixture(node: JsonNode, testStatusIMPL: var TestStatus, debugMode = false, trace = false) proc testFixture(node: JsonNode, testStatusIMPL: var TestStatus, debugMode = false, trace = false)