update ec_recover

why:
  Previous version was based on lru_cache which is ugly. This module is
  based on the stew/keyed_queue library module.

other:
  There are still some other modules rely on lru_cache which should be
  removed.
This commit is contained in:
Jordan Hrycaj 2022-01-18 13:05:00 +00:00 committed by zah
parent d2f56463ce
commit 9545767c53
9 changed files with 463 additions and 61 deletions

View File

@ -102,7 +102,7 @@ proc newCliqueCfg*(db: BaseChainDB): CliqueCfg =
ckpInterval: CHECKPOINT_INTERVAL,
roThreshold: FULL_IMMUTABILITY_THRESHOLD,
logInterval: SNAPS_LOG_INTERVAL_MICSECS,
signatures: initEcRecover(),
signatures: EcRecover.init(),
prng: initRand(prngSeed),
prettyPrint: PrettyPrinters(
nonce: proc(v:BlockNonce): string = $v,

View File

@ -79,7 +79,7 @@ proc procBlkPreamble(vmState: BaseVMState;
debug "Uncle hash mismatch"
return false
return true
true
proc procBlkEpilogue(vmState: BaseVMState;
@ -114,7 +114,7 @@ proc procBlkEpilogue(vmState: BaseVMState;
expected = header.receiptRoot
return false
return true
true
# ------------------------------------------------------------------------------
# Public functions

View File

@ -215,11 +215,11 @@ proc validateUncles(chainDB: BaseChainDB; header: BlockHeader;
# ------------------------------------------------------------------------------
proc validateTransaction*(
roDB: ReadOnlyStateDB; ## Accounts environment descriptor
roDB: ReadOnlyStateDB; ## Parent accounts environment for transaction
tx: Transaction; ## tx to validate
sender: EthAddress; ## tx.getSender or tx.ecRecover
maxLimit: GasInt; ## gasLimit from block header (for tx)
baseFee: Uint256; ## baseFee from block header (for tx)
maxLimit: GasInt; ## gasLimit from block header
baseFee: Uint256; ## baseFee from block header
fork: Fork): bool =
let
balance = roDB.getBalance(sender)
@ -302,10 +302,10 @@ proc validateTransaction*(
true
proc validateTransaction*(
vmState: BaseVMState; ## Accounts environment descriptor
vmState: BaseVMState; ## Parent accounts environment for transaction
tx: Transaction; ## tx to validate
sender: EthAddress; ## tx.getSender or tx.ecRecover
header: BlockHeader; ## Header of blok containing tx
header: BlockHeader; ## Header for the block containing the current tx
fork: Fork): bool =
## Variant of `validateTransaction()`
let

View File

@ -17,123 +17,198 @@
## calculation time for the price of maintaing it in a LRU cache.
import
./utils_defs,
./lru_cache,
../constants,
eth/[common, keys, rlp],
./keyed_queue/kq_rlp,
./utils_defs,
eth/[common, common/transaction, keys, rlp],
nimcrypto,
stew/results,
stew/[keyed_queue, results],
stint
export
utils_defs
{.push raises: [Defect].}
const
INMEMORY_SIGNATURES* = ##\
## Number of recent block signatures to keep in memory
## Default number of recent block signatures to keep in memory
4096
type
# simplify Hash256 for rlp serialisation
EcKey32 = array[32, byte]
EcKey* = ##\
## Internal key used for the LRU cache (derived from Hash256).
array[32,byte]
EcRecover* = LruCache[BlockHeader,EcKey32,EthAddress,UtilsError]
EcAddrResult* = ##\
## Typical `EthAddress` result as returned by `ecRecover()` functions.
Result[EthAddress,UtilsError]
{.push raises: [Defect].}
EcRecover* = object
size: uint
q: KeyedQueue[EcKey,EthAddress]
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
proc encodePreSealed(header: BlockHeader): seq[byte] {.inline.} =
## Cut sigature off `extraData` header field and consider new `baseFee`
## field for Eip1559.
doAssert EXTRA_SEAL < header.extraData.len
proc vrsSerialised(tx: Transaction): Result[array[65,byte],UtilsError] =
## Parts copied from `transaction.getSignature`.
var data: array[65,byte]
data[0..31] = tx.R.toByteArrayBE
data[32..63] = tx.S.toByteArrayBE
if tx.txType != TxLegacy:
data[64] = tx.V.byte
elif tx.V >= EIP155_CHAIN_ID_OFFSET:
data[64] = byte(1 - (tx.V and 1))
elif tx.V == 27 or tx.V == 28:
data[64] = byte(tx.V - 27)
else:
return err((errSigPrefixError,"")) # legacy error
ok(data)
proc encodePreSealed(header: BlockHeader): seq[byte] =
## Cut sigature off `extraData` header field.
if header.extraData.len < EXTRA_SEAL:
return rlp.encode(header)
var rlpHeader = header
rlpHeader.extraData.setLen(header.extraData.len - EXTRA_SEAL)
rlp.encode(rlpHeader)
proc hashPreSealed(header: BlockHeader): Hash256 {.inline.} =
proc hashPreSealed(header: BlockHeader): Hash256 =
## Returns the hash of a block prior to it being sealed.
keccak256.digest header.encodePreSealed
proc ecRecover*(extraData: openArray[byte];
hash: Hash256): Result[EthAddress,UtilsError] {.inline.} =
proc recoverImpl(rawSig: openArray[byte]; msg: Hash256): EcAddrResult =
## Extract account address from the last 65 bytes of the `extraData` argument
## (which is typically the bock header field with the same name.) The second
## argument `hash` is used to extract the intermediate public key. Typically,
## this would be the hash of the block header without the last 65 bytes of
## the `extraData` field reserved for the signature.
if extraData.len < EXTRA_SEAL:
if rawSig.len < EXTRA_SEAL:
return err((errMissingSignature,""))
let sig = Signature.fromRaw(
extraData.toOpenArray(extraData.len - EXTRA_SEAL, extraData.high))
rawSig.toOpenArray(rawSig.len - EXTRA_SEAL, rawSig.high))
if sig.isErr:
return err((errSkSigResult,$sig.error))
# Recover the public key from signature and seal hash
let pubKey = recover(sig.value, SKMessage(hash.data))
let pubKey = recover(sig.value, SKMessage(msg.data))
if pubKey.isErr:
return err((errSkPubKeyResult,$pubKey.error))
# Convert public key to address.
return ok(pubKey.value.toCanonicalAddress)
ok(pubKey.value.toCanonicalAddress)
# ------------------------------------------------------------------------------
# Public function: straight ecRecover version
# Public function: straight ecRecover versions
# ------------------------------------------------------------------------------
proc ecRecover*(header: BlockHeader): Result[EthAddress,UtilsError] =
## Extract account address from the `extraData` field (last 65 bytes) of the
## argument header.
header.extraData.ecRecover(header.hashPreSealed)
proc ecRecover*(header: BlockHeader): EcAddrResult =
## Extracts account address from the `extraData` field (last 65 bytes) of
## the argument header.
header.extraData.recoverImpl(header.hashPreSealed)
proc ecRecover*(tx: var Transaction): EcAddrResult =
## Extracts sender address from transaction. This function has similar
## functionality as `transaction.getSender()`.
let txSig = tx.vrsSerialised
if txSig.isErr:
return err(txSig.error)
txSig.value.recoverImpl(tx.txHashNoSignature)
proc ecRecover*(tx: Transaction): EcAddrResult =
## Variant of `ecRecover()` for call-by-value header.
var ty = tx
ty.ecRecover
# ------------------------------------------------------------------------------
# Public constructor for caching ecRecover version
# ------------------------------------------------------------------------------
proc initEcRecover*(cache: var EcRecover; cacheSize = INMEMORY_SIGNATURES) =
proc init*(er: var EcRecover; cacheSize = INMEMORY_SIGNATURES; initSize = 10) =
## Inialise recover cache
er.size = cacheSize.uint
er.q.init(initSize)
var toKey: LruKey[BlockHeader,EcKey32] =
proc(header:BlockHeader): EcKey32 =
header.blockHash.data
cache.initCache(toKey, ecRecover, cacheSize)
proc initEcRecover*: EcRecover {.gcsafe, raises: [Defect].} =
result.initEcRecover
proc init*(T: type EcRecover;
cacheSize = INMEMORY_SIGNATURES; initSize = 10): T =
## Inialise recover cache
result.init(cacheSize, initSize)
# ------------------------------------------------------------------------------
# Public function: caching ecRecover version
# Public functions: miscellaneous
# ------------------------------------------------------------------------------
proc ecRecover*(addrCache: var EcRecover;
header: BlockHeader): Result[EthAddress,UtilsError]
{.gcsafe, raises: [Defect,CatchableError].} =
proc len*(er: var EcRecover): int =
## Returns the current number of entries in the LRU cache.
er.q.len
# ------------------------------------------------------------------------------
# Public functions: caching ecRecover version
# ------------------------------------------------------------------------------
proc ecRecover*(er: var EcRecover; header: var BlockHeader): EcAddrResult
{.gcsafe, raises: [Defect,CatchableError].} =
## Extract account address from `extraData` field (last 65 bytes) of the
## argument header. The result is kept in a LRU cache to re-purposed for
## improved result delivery avoiding calculations.
addrCache.getItem(header)
let key = header.blockHash.data
block:
let rc = er.q.lruFetch(key)
if rc.isOK:
return ok(rc.value)
block:
let rc = header.extraData.recoverImpl(header.hashPreSealed)
if rc.isOK:
return ok(er.q.lruAppend(key, rc.value, er.size.int))
err(rc.error)
proc ecRecover*(er: var EcRecover; header: BlockHeader): EcAddrResult
{.gcsafe, raises: [Defect,CatchableError].} =
## Variant of `ecRecover()` for call-by-value header
var hdr = header
er.ecRecover(hdr)
proc ecRecover*(er: var EcRecover; hash: Hash256): EcAddrResult
{.gcsafe, raises: [Defect,CatchableError].} =
## Variant of `ecRecover()` for hash only. Will only succeed it the
## argument hash is uk the LRU queue.
let rc = er.q.lruFetch(hash.data)
if rc.isOK:
return ok(rc.value)
err((errItemNotFound,""))
# ------------------------------------------------------------------------------
# Public PLP mixin functions for caching version
# Public RLP mixin functions for caching version
# ------------------------------------------------------------------------------
proc append*(rw: var RlpWriter; ecRec: EcRecover) {.
inline, raises: [Defect,KeyError].} =
## Generic support for `rlp.encode(ecRec)`
rw.append(ecRec.data)
proc append*(rw: var RlpWriter; data: EcRecover)
{.raises: [Defect,KeyError].} =
## Generic support for `rlp.encode()`
rw.append((data.size,data.q))
proc read*(rlp: var Rlp; Q: type EcRecover): Q {.
inline, raises: [Defect,KeyError].} =
## Generic support for `rlp.decode(bytes)` for loading the cache from a
proc read*(rlp: var Rlp; Q: type EcRecover): Q
{.raises: [Defect,KeyError].} =
## Generic support for `rlp.decode()` for loading the cache from a
## serialised data stream.
result.initEcRecover
result.data = rlp.read(type result.data)
(result.size, result.q) = rlp.read((type result.size, type result.q))
# ------------------------------------------------------------------------------
# Debugging
# ------------------------------------------------------------------------------
iterator keyItemPairs*(er: var EcRecover): (EcKey,EthAddress)
{.gcsafe, raises: [Defect,CatchableError].} =
var rc = er.q.first
while rc.isOK:
yield (rc.value.key, rc.value.data)
rc = er.q.next(rc.value.key)
# ------------------------------------------------------------------------------
# End

View File

@ -0,0 +1,63 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
## Keyed Queue, RLP Support
## ========================
##
## Note that the underlying RLP driver does not support negative integers
## which causes problems when reading back. So these values should neither
## appear in any of the `K` (for key) or `V` (for value) data types (best
## to avoid `int` altogether for `KeyedQueue` if serialisation is needed.)
import
std/tables,
eth/rlp,
stew/keyed_queue
# ------------------------------------------------------------------------------
# Public functions, RLP support
# ------------------------------------------------------------------------------
proc append*[K,V](rw: var RlpWriter; kq: KeyedQueue[K,V])
{.raises: [Defect,KeyError].} =
## Generic support for `rlp.encode(kq)` for serialising a queue.
##
## :CAVEAT:
## The underlying *RLP* driver has a problem with negative integers
## when reading. So it should neither appear in any of the `K` or `V`
## data types.
# store keys in increasing order
var data = kq
rw.startList(data.tab.len)
if 0 < data.tab.len:
var key = data.kFirst
for _ in 1 .. data.tab.len:
var item = data.tab[key]
rw.append((key,item.data))
key = item.kNxt
if data.tab[key].kNxt != data.kLast:
raiseAssert "Garbled queue next/prv references"
proc read*[K,V](rlp: var Rlp; Q: type KeyedQueue[K,V]): Q
{.raises: [Defect,RlpError,KeyError].} =
## Generic support for `rlp.decode(bytes)` for loading a queue
## from a serialised data stream.
##
## :CAVEAT:
## The underlying *RLP* driver has a problem with negative integers
## when reading. So it should neither appear in any of the `K` or `V`
## data types.
for w in rlp.items:
let (key,value) = w.read((K,V))
result[key] = value
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -22,7 +22,11 @@ type
errMissingSignature = ##\
## is returned if the `extraData` header field does not seem to contain
## a 65 byte secp256k1 signature.
"extraData 65 byte signature suffix missing"
"extraData 65 byte signature suffix missing"
errSigPrefixError = ##\
## Unsupported value of the (R,S) signature prefix V.
"unsupported (R,S) signature prefix V value"
errSkSigResult = ##\
## eth/keys subsytem error: signature
@ -32,6 +36,10 @@ type
## eth/keys subsytem error: public key
"public key error"
errItemNotFound = ##\
## database lookup failed
"not found"
UtilsError* = ##\
## Error message, tinned component + explanatory text (if any)
(UtilsErrorType,string)

View File

@ -40,4 +40,5 @@ cliBuilder:
./test_lru_cache,
./test_clique,
./test_pow,
./test_configuration
./test_configuration,
./test_keyed_queue_rlp

View File

@ -9,7 +9,7 @@
# according to those terms.
import
std/[sequtils, strformat, strutils],
std/[os, sequtils, strformat, strutils],
../../nimbus/db/db_chain,
./gunzip,
eth/[common, rlp],
@ -95,6 +95,9 @@ iterator undumpNextGroup*(gzFile: string): (seq[BlockHeader],seq[BlockBody]) =
top = 0u
waitFor = "transaction"
if not gzFile.fileExists:
raiseAssert &"No such file: \"{gzFile}\""
for lno,line in gzFile.gunzipLines:
if line.len == 0 or line[0] == '#':
continue

View File

@ -0,0 +1,252 @@
# Nimbus
# Copyright (c) 2018-2019 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
import
std/[sequtils, strformat, strutils, tables],
../nimbus/utils/keyed_queue/kq_rlp,
eth/rlp,
stew/[keyed_queue, keyed_queue/kq_debug],
unittest2
const
usedStrutils = newSeq[string]().join(" ")
lruCacheLimit = 10
lruCacheModulo = 13
keyList = [
185, 208, 53, 54, 196, 189, 187, 117, 94, 29, 6, 173, 207, 45, 31,
208, 127, 106, 117, 49, 40, 171, 6, 94, 84, 60, 125, 87, 168, 183,
200, 155, 34, 27, 67, 107, 108, 223, 249, 4, 113, 9, 205, 100, 77,
224, 19, 196, 14, 83, 145, 154, 95, 56, 236, 97, 115, 140, 134, 97,
153, 167, 23, 17, 182, 116, 253, 32, 108, 148, 135, 169, 178, 124, 147,
231, 236, 174, 211, 247, 22, 118, 144, 224, 68, 124, 200, 92, 63, 183,
56, 107, 45, 180, 113, 233, 59, 246, 29, 212, 172, 161, 183, 207, 189,
56, 198, 130, 62, 28, 53, 122]
type
KUQueue = # mind the kqueue module from the nim standard lib
KeyedQueue[uint,uint]
LruCache = object
size: int
q: KUQueue
# ------------------------------------------------------------------------------
# Debugging
# ------------------------------------------------------------------------------
proc `$`(rc: KeyedQueuePair[uint,uint]): string =
"(" & $rc.key & "," & $rc.data & ")"
proc `$`(rc: Result[KeyedQueuePair[uint,uint],void]): string =
result = "<"
if rc.isOK:
result &= $rc.value.key & "," & $rc.value.data
result &= ">"
proc `$`(rc: Result[uint,void]): string =
result = "<"
if rc.isOK:
result &= $rc.value
result &= ">"
proc say(noisy = false; pfx = "***"; args: varargs[string, `$`]) =
if noisy:
if args.len == 0:
echo "*** ", pfx
elif 0 < pfx.len and pfx[^1] != ' ':
echo pfx, " ", args.toSeq.join
else:
echo pfx, args.toSeq.join
# ------------------------------------------------------------------------------
# Converters
# ------------------------------------------------------------------------------
proc toValue(n: int): uint =
(n + 1000).uint
proc fromValue(n: uint): int =
(n - 1000).int
proc toKey(n: int): uint =
n.uint
proc fromKey(n: uint): int =
n.int
# ------------------------------------------------------------------------------
# Helpers
# ------------------------------------------------------------------------------
proc lruValue(lru: var LruCache; n: int): uint =
let
key = n.toKey
rc = lru.q.lruFetch(key)
if rc.isOK:
return rc.value
lru.q.lruAppend(key, key.fromKey.toValue, lru.size)
proc toLruCache(a: openArray[int]): LruCache =
result.size = lruCacheLimit
for n in a.toSeq.mapIt(it mod lruCacheModulo):
doAssert result.lruValue(n) == n.toValue
proc toQueue(a: openArray[int]): KUQueue =
for n in a:
result[n.toKey] = n.toValue
proc toUnique(a: openArray[int]): seq[uint] =
var q = a.toQueue
toSeq(q.nextKeys)
proc addOrFlushGroupwise(rq: var KUQueue;
grpLen: int; seen: var seq[int]; n: int;
noisy = true) =
seen.add n
if seen.len < grpLen:
return
# flush group-wise
let rqLen = rq.len
noisy.say "updateSeen: deleting ", seen.mapIt($it).join(" ")
for a in seen:
doAssert rq.delete(a.toKey).value.data == a.toValue
doAssert rqLen == seen.len + rq.len
seen.setLen(0)
proc compileGenericFunctions(rq: var KUQueue) =
## Verifies that functions compile, at all
rq.del(0)
rq[0] = 0 # so `rq[0]` works
discard rq[0]
let ignoreValues = (
(rq.append(0,0), rq.push(0,0),
rq.replace(0,0),
rq.prepend(0,0), rq.unshift(0,0),
rq.shift, rq.shiftKey, rq.shiftValue,
rq.pop, rq.popKey, rq.popValue,
rq.delete(0)),
(rq.hasKey(0), rq.eq(0)),
(rq.firstKey, rq.secondKey, rq.beforeLastKey, rq.lastKey,
rq.nextKey(0), rq.prevKey(0)),
(rq.first, rq.second, rq.beforeLast, rq.last,
rq.next(0), rq.prev(0)),
(rq.firstValue, rq.secondValue, rq.beforeLastValue, rq.lastValue),
(rq == rq, rq.len),
(toSeq(rq.nextKeys), toSeq(rq.nextValues), toSeq(rq.nextPairs),
toSeq(rq.prevKeys), toSeq(rq.prevValues), toSeq(rq.prevPairs)))
# ------------------------------------------------------------------------------
# Test Runners
# ------------------------------------------------------------------------------
proc runKeyedQueueRlp(noisy = true) =
let
uniqueKeys = keyList.toUnique
numUniqeKeys = keyList.toSeq.mapIt((it,false)).toTable.len
numKeyDups = keyList.len - numUniqeKeys
suite "KeyedQueue: RLP stuff":
test &"Simple rlp serialise + reload":
var
rp = [1, 2, 3].toQueue # keyList.toQueue
rq = rp
check rp == rq
var
sp = rlp.encode(rp)
sq = rlp.encode(rq)
check sp == sq
var
pr = sp.decode(type rp)
qr = sq.decode(type rq)
check pr.verify.isOK
check qr.verify.isOK
check pr == qr
block:
proc append(rw: var RlpWriter; lru: LruCache)
{.inline, raises: [Defect,KeyError].} =
rw.append((lru.size,lru.q))
proc read(rlp: var Rlp; Q: type LruCache): Q
{.inline, raises: [Defect,KeyError,RlpError].} =
(result.size, result.q) = rlp.read((type result.size, type result.q))
test "Rlp serialise & load, append":
block:
var
c1 = keyList.toLruCache
s1 = rlp.encode(c1)
c2 = newSeq[int]().toLruCache
noisy.say &"serialised[{s1.len}]: {s1}"
c2.q.clear
check c1 != c2
check c1.q.verify.isOK
check c2.q.verify.isOK
c2 = s1.decode(type c2)
check c1 == c2
check c2.q.verify.isOK
noisy.say &"c2Specs: {c2.size} {c2.q.firstKey} {c2.q.lastKey} ..."
check s1 == rlp.encode(c2)
block:
var
c1 = keyList.toLruCache
value = c1.lruValue(77)
queue = toSeq(c1.q.nextPairs).mapIt(it.key)
values = toSeq(c1.q.nextPairs).mapIt(it.data)
noisy.say &"c1: append {value} => {queue}"
var
s1 = rlp.encode(c1)
c2 = keyList.toLruCache
noisy.say &"serialised[{s1.len}]: {s1}"
c2.q.clear
check c1 != c2
check c1.q.verify.isOK
check c2.q.verify.isOK
c2 = s1.decode(type c2)
check c1 == c2
noisy.say &"c2Specs: {c2.size} {c2.q.firstKey} {c2.q.lastKey} ..."
check s1 == rlp.encode(c2)
check c2.q.verify.isOK
# ------------------------------------------------------------------------------
# Main function(s)
# ------------------------------------------------------------------------------
proc keyedQueueRlpMain*(noisy = defined(debug)) =
noisy.runKeyedQueueRlp
when isMainModule:
let noisy = defined(debug)
noisy.runKeyedQueueRlp
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------