Remove keyed_queue rlp support (#2300)
It's unused and causing trouble because of unhandled exception effects - if we were to use it, it would need re-implementation such that it doesn't reallocate the whole queue on writing.
This commit is contained in:
parent
e9eae4df70
commit
32c7fe74be
|
@ -18,7 +18,6 @@
|
||||||
|
|
||||||
import
|
import
|
||||||
../constants,
|
../constants,
|
||||||
./keyed_queue/kq_rlp,
|
|
||||||
./utils_defs,
|
./utils_defs,
|
||||||
eth/[common, common/transaction, keys, rlp],
|
eth/[common, common/transaction, keys, rlp],
|
||||||
stew/keyed_queue,
|
stew/keyed_queue,
|
||||||
|
@ -156,8 +155,7 @@ proc len*(er: var EcRecover): int =
|
||||||
# Public functions: caching ecRecover version
|
# Public functions: caching ecRecover version
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc ecRecover*(er: var EcRecover; header: var BlockHeader): EcAddrResult
|
proc ecRecover*(er: var EcRecover; header: var BlockHeader): EcAddrResult =
|
||||||
=
|
|
||||||
## Extract account address from `extraData` field (last 65 bytes) of the
|
## Extract account address from `extraData` field (last 65 bytes) of the
|
||||||
## argument header. The result is kept in a LRU cache to re-purposed for
|
## argument header. The result is kept in a LRU cache to re-purposed for
|
||||||
## improved result delivery avoiding calculations.
|
## improved result delivery avoiding calculations.
|
||||||
|
@ -172,14 +170,12 @@ proc ecRecover*(er: var EcRecover; header: var BlockHeader): EcAddrResult
|
||||||
return ok(er.q.lruAppend(key, rc.value, er.size.int))
|
return ok(er.q.lruAppend(key, rc.value, er.size.int))
|
||||||
err(rc.error)
|
err(rc.error)
|
||||||
|
|
||||||
proc ecRecover*(er: var EcRecover; header: BlockHeader): EcAddrResult
|
proc ecRecover*(er: var EcRecover; header: BlockHeader): EcAddrResult =
|
||||||
=
|
|
||||||
## Variant of `ecRecover()` for call-by-value header
|
## Variant of `ecRecover()` for call-by-value header
|
||||||
var hdr = header
|
var hdr = header
|
||||||
er.ecRecover(hdr)
|
er.ecRecover(hdr)
|
||||||
|
|
||||||
proc ecRecover*(er: var EcRecover; hash: Hash256): EcAddrResult
|
proc ecRecover*(er: var EcRecover; hash: Hash256): EcAddrResult =
|
||||||
=
|
|
||||||
## Variant of `ecRecover()` for hash only. Will only succeed it the
|
## Variant of `ecRecover()` for hash only. Will only succeed it the
|
||||||
## argument hash is uk the LRU queue.
|
## argument hash is uk the LRU queue.
|
||||||
let rc = er.q.lruFetch(hash.data)
|
let rc = er.q.lruFetch(hash.data)
|
||||||
|
@ -187,21 +183,6 @@ proc ecRecover*(er: var EcRecover; hash: Hash256): EcAddrResult
|
||||||
return ok(rc.value)
|
return ok(rc.value)
|
||||||
err((errItemNotFound,""))
|
err((errItemNotFound,""))
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
|
||||||
# Public RLP mixin functions for caching version
|
|
||||||
# ------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
proc append*(rw: var RlpWriter; data: EcRecover)
|
|
||||||
{.raises: [KeyError].} =
|
|
||||||
## Generic support for `rlp.encode()`
|
|
||||||
rw.append((data.size,data.q))
|
|
||||||
|
|
||||||
proc read*(rlp: var Rlp; Q: type EcRecover): Q
|
|
||||||
{.raises: [KeyError].} =
|
|
||||||
## Generic support for `rlp.decode()` for loading the cache from a
|
|
||||||
## serialised data stream.
|
|
||||||
(result.size, result.q) = rlp.read((type result.size, type result.q))
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Debugging
|
# Debugging
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
|
@ -1,65 +0,0 @@
|
||||||
# Nimbus
|
|
||||||
# Copyright (c) 2018-2024 Status Research & Development GmbH
|
|
||||||
# Licensed under either of
|
|
||||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
|
||||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
|
||||||
# http://opensource.org/licenses/MIT)
|
|
||||||
# at your option. This file may not be copied, modified, or distributed except
|
|
||||||
# according to those terms.
|
|
||||||
|
|
||||||
## Keyed Queue, RLP Support
|
|
||||||
## ========================
|
|
||||||
##
|
|
||||||
## Note that the underlying RLP driver does not support negative integers
|
|
||||||
## which causes problems when reading back. So these values should neither
|
|
||||||
## appear in any of the `K` (for key) or `V` (for value) data types (best
|
|
||||||
## to avoid `int` altogether for `KeyedQueue` if serialisation is needed.)
|
|
||||||
|
|
||||||
import
|
|
||||||
std/tables,
|
|
||||||
eth/rlp,
|
|
||||||
stew/keyed_queue
|
|
||||||
|
|
||||||
{.push raises: [].}
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
|
||||||
# Public functions, RLP support
|
|
||||||
# ------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
proc append*[K,V](rw: var RlpWriter; kq: KeyedQueue[K,V])
|
|
||||||
{.raises: [KeyError].} =
|
|
||||||
## Generic support for `rlp.encode(kq)` for serialising a queue.
|
|
||||||
##
|
|
||||||
## :CAVEAT:
|
|
||||||
## The underlying *RLP* driver has a problem with negative integers
|
|
||||||
## when reading. So it should neither appear in any of the `K` or `V`
|
|
||||||
## data types.
|
|
||||||
# store keys in increasing order
|
|
||||||
var data = kq
|
|
||||||
rw.startList(data.tab.len)
|
|
||||||
if 0 < data.tab.len:
|
|
||||||
var key = data.kFirst
|
|
||||||
for _ in 1 .. data.tab.len:
|
|
||||||
var item = data.tab[key]
|
|
||||||
rw.append((key,item.data))
|
|
||||||
key = item.kNxt
|
|
||||||
if data.tab[key].kNxt != data.kLast:
|
|
||||||
raiseAssert "Garbled queue next/prv references"
|
|
||||||
|
|
||||||
proc read*[K,V](rlp: var Rlp; Q: type KeyedQueue[K,V]): Q
|
|
||||||
{.raises: [RlpError].} =
|
|
||||||
## Generic support for `rlp.decode(bytes)` for loading a queue
|
|
||||||
## from a serialised data stream.
|
|
||||||
##
|
|
||||||
## :CAVEAT:
|
|
||||||
## The underlying *RLP* driver has a problem with negative integers
|
|
||||||
## when reading. So it should neither appear in any of the `K` or `V`
|
|
||||||
## data types.
|
|
||||||
for w in rlp.items:
|
|
||||||
let (key,value) = w.read((K,V))
|
|
||||||
result[key] = value
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
|
||||||
# End
|
|
||||||
# ------------------------------------------------------------------------------
|
|
|
@ -44,7 +44,6 @@ cliBuilder:
|
||||||
#./test_graphql, -- fails
|
#./test_graphql, -- fails
|
||||||
./test_pow,
|
./test_pow,
|
||||||
./test_configuration,
|
./test_configuration,
|
||||||
./test_keyed_queue_rlp,
|
|
||||||
#./test_txpool, -- fails
|
#./test_txpool, -- fails
|
||||||
./test_txpool2,
|
./test_txpool2,
|
||||||
#./test_merge, -- fails
|
#./test_merge, -- fails
|
||||||
|
|
|
@ -1,244 +0,0 @@
|
||||||
# Nimbus
|
|
||||||
# Copyright (c) 2021-2024 Status Research & Development GmbH
|
|
||||||
# Licensed under either of
|
|
||||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
|
||||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
|
||||||
# http://opensource.org/licenses/MIT)
|
|
||||||
# at your option. This file may not be copied, modified, or distributed except
|
|
||||||
# according to those terms.
|
|
||||||
|
|
||||||
import
|
|
||||||
std/[sequtils, strformat, strutils, tables],
|
|
||||||
../nimbus/utils/keyed_queue/kq_rlp,
|
|
||||||
eth/rlp,
|
|
||||||
stew/[keyed_queue, keyed_queue/kq_debug],
|
|
||||||
unittest2
|
|
||||||
|
|
||||||
const
|
|
||||||
lruCacheLimit = 10
|
|
||||||
lruCacheModulo = 13
|
|
||||||
|
|
||||||
keyList = [
|
|
||||||
185, 208, 53, 54, 196, 189, 187, 117, 94, 29, 6, 173, 207, 45, 31,
|
|
||||||
208, 127, 106, 117, 49, 40, 171, 6, 94, 84, 60, 125, 87, 168, 183,
|
|
||||||
200, 155, 34, 27, 67, 107, 108, 223, 249, 4, 113, 9, 205, 100, 77,
|
|
||||||
224, 19, 196, 14, 83, 145, 154, 95, 56, 236, 97, 115, 140, 134, 97,
|
|
||||||
153, 167, 23, 17, 182, 116, 253, 32, 108, 148, 135, 169, 178, 124, 147,
|
|
||||||
231, 236, 174, 211, 247, 22, 118, 144, 224, 68, 124, 200, 92, 63, 183,
|
|
||||||
56, 107, 45, 180, 113, 233, 59, 246, 29, 212, 172, 161, 183, 207, 189,
|
|
||||||
56, 198, 130, 62, 28, 53, 122]
|
|
||||||
|
|
||||||
type
|
|
||||||
KUQueue = # mind the kqueue module from the nim standard lib
|
|
||||||
KeyedQueue[uint,uint]
|
|
||||||
|
|
||||||
LruCache = object
|
|
||||||
size: int
|
|
||||||
q: KUQueue
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
|
||||||
# Debugging
|
|
||||||
# ------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
when false:
|
|
||||||
proc `$`(rc: KeyedQueuePair[uint,uint]): string =
|
|
||||||
"(" & $rc.key & "," & $rc.data & ")"
|
|
||||||
|
|
||||||
proc `$`(rc: Result[KeyedQueuePair[uint,uint],void]): string =
|
|
||||||
result = "<"
|
|
||||||
if rc.isOK:
|
|
||||||
result &= $rc.value.key & "," & $rc.value.data
|
|
||||||
result &= ">"
|
|
||||||
|
|
||||||
proc `$`(rc: Result[uint,void]): string =
|
|
||||||
result = "<"
|
|
||||||
if rc.isOK:
|
|
||||||
result &= $rc.value
|
|
||||||
result &= ">"
|
|
||||||
|
|
||||||
proc say(noisy = false; pfx = "***"; args: varargs[string, `$`]) =
|
|
||||||
if noisy:
|
|
||||||
if args.len == 0:
|
|
||||||
echo "*** ", pfx
|
|
||||||
elif 0 < pfx.len and pfx[^1] != ' ':
|
|
||||||
echo pfx, " ", args.toSeq.join
|
|
||||||
else:
|
|
||||||
echo pfx, args.toSeq.join
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
|
||||||
# Converters
|
|
||||||
# ------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
proc toValue(n: int): uint =
|
|
||||||
(n + 1000).uint
|
|
||||||
|
|
||||||
when false:
|
|
||||||
proc fromValue(n: uint): int =
|
|
||||||
(n - 1000).int
|
|
||||||
|
|
||||||
proc toKey(n: int): uint =
|
|
||||||
n.uint
|
|
||||||
|
|
||||||
proc fromKey(n: uint): int =
|
|
||||||
n.int
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
|
||||||
# Helpers
|
|
||||||
# ------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
proc lruValue(lru: var LruCache; n: int): uint =
|
|
||||||
let
|
|
||||||
key = n.toKey
|
|
||||||
rc = lru.q.lruFetch(key)
|
|
||||||
if rc.isOK:
|
|
||||||
return rc.value
|
|
||||||
lru.q.lruAppend(key, key.fromKey.toValue, lru.size)
|
|
||||||
|
|
||||||
proc toLruCache(a: openArray[int]): LruCache =
|
|
||||||
result.size = lruCacheLimit
|
|
||||||
for n in a.toSeq.mapIt(it mod lruCacheModulo):
|
|
||||||
doAssert result.lruValue(n) == n.toValue
|
|
||||||
|
|
||||||
proc toQueue(a: openArray[int]): KUQueue =
|
|
||||||
for n in a:
|
|
||||||
result[n.toKey] = n.toValue
|
|
||||||
|
|
||||||
when false:
|
|
||||||
proc addOrFlushGroupwise(rq: var KUQueue;
|
|
||||||
grpLen: int; seen: var seq[int]; n: int;
|
|
||||||
noisy = true) =
|
|
||||||
seen.add n
|
|
||||||
if seen.len < grpLen:
|
|
||||||
return
|
|
||||||
|
|
||||||
# flush group-wise
|
|
||||||
let rqLen = rq.len
|
|
||||||
noisy.say "updateSeen: deleting ", seen.mapIt($it).join(" ")
|
|
||||||
for a in seen:
|
|
||||||
doAssert rq.delete(a.toKey).value.data == a.toValue
|
|
||||||
doAssert rqLen == seen.len + rq.len
|
|
||||||
seen.setLen(0)
|
|
||||||
|
|
||||||
proc compileGenericFunctions(rq: var KUQueue) {.used.} =
|
|
||||||
## Verifies that functions compile, at all
|
|
||||||
rq.del(0)
|
|
||||||
rq[0] = 0 # so `rq[0]` works
|
|
||||||
discard rq[0]
|
|
||||||
|
|
||||||
let ignoreValues {.used.} = (
|
|
||||||
(rq.append(0,0), rq.push(0,0),
|
|
||||||
rq.replace(0,0),
|
|
||||||
rq.prepend(0,0), rq.unshift(0,0),
|
|
||||||
rq.shift, rq.shiftKey, rq.shiftValue,
|
|
||||||
rq.pop, rq.popKey, rq.popValue,
|
|
||||||
rq.delete(0)),
|
|
||||||
|
|
||||||
(rq.hasKey(0), rq.eq(0)),
|
|
||||||
|
|
||||||
(rq.firstKey, rq.secondKey, rq.beforeLastKey, rq.lastKey,
|
|
||||||
rq.nextKey(0), rq.prevKey(0)),
|
|
||||||
|
|
||||||
(rq.first, rq.second, rq.beforeLast, rq.last,
|
|
||||||
rq.next(0), rq.prev(0)),
|
|
||||||
|
|
||||||
(rq.firstValue, rq.secondValue, rq.beforeLastValue, rq.lastValue),
|
|
||||||
|
|
||||||
(rq == rq, rq.len),
|
|
||||||
|
|
||||||
(toSeq(rq.nextKeys), toSeq(rq.nextValues), toSeq(rq.nextPairs),
|
|
||||||
toSeq(rq.prevKeys), toSeq(rq.prevValues), toSeq(rq.prevPairs)))
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
|
||||||
# Test Runners
|
|
||||||
# ------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
proc runKeyedQueueRlp(noisy = true) =
|
|
||||||
suite "KeyedQueue: RLP stuff":
|
|
||||||
|
|
||||||
test &"Simple rlp serialise + reload":
|
|
||||||
var
|
|
||||||
rp = [1, 2, 3].toQueue # keyList.toQueue
|
|
||||||
rq = rp
|
|
||||||
check rp == rq
|
|
||||||
|
|
||||||
var
|
|
||||||
sp = rlp.encode(rp)
|
|
||||||
sq = rlp.encode(rq)
|
|
||||||
check sp == sq
|
|
||||||
|
|
||||||
var
|
|
||||||
pr = sp.decode(type rp)
|
|
||||||
qr = sq.decode(type rq)
|
|
||||||
|
|
||||||
check pr.verify.isOK
|
|
||||||
check qr.verify.isOK
|
|
||||||
|
|
||||||
check pr == qr
|
|
||||||
|
|
||||||
block:
|
|
||||||
proc append(rw: var RlpWriter; lru: LruCache)
|
|
||||||
{.used, inline, raises: [KeyError].} =
|
|
||||||
rw.append((lru.size,lru.q))
|
|
||||||
proc read(rlp: var Rlp; Q: type LruCache): Q
|
|
||||||
{.inline, raises: [RlpError].} =
|
|
||||||
(result.size, result.q) = rlp.read((type result.size, type result.q))
|
|
||||||
|
|
||||||
test "Rlp serialise & load, append":
|
|
||||||
block:
|
|
||||||
var
|
|
||||||
c1 = keyList.toLruCache
|
|
||||||
s1 = rlp.encode(c1)
|
|
||||||
c2 = newSeq[int]().toLruCache
|
|
||||||
|
|
||||||
noisy.say &"serialised[{s1.len}]: {s1}"
|
|
||||||
c2.q.clear
|
|
||||||
check c1 != c2
|
|
||||||
check c1.q.verify.isOK
|
|
||||||
check c2.q.verify.isOK
|
|
||||||
|
|
||||||
c2 = s1.decode(type c2)
|
|
||||||
check c1 == c2
|
|
||||||
check c2.q.verify.isOK
|
|
||||||
|
|
||||||
noisy.say &"c2Specs: {c2.size} {c2.q.firstKey} {c2.q.lastKey} ..."
|
|
||||||
check s1 == rlp.encode(c2)
|
|
||||||
|
|
||||||
block:
|
|
||||||
var
|
|
||||||
c1 = keyList.toLruCache
|
|
||||||
value = c1.lruValue(77)
|
|
||||||
queue = toSeq(c1.q.nextPairs).mapIt(it.key)
|
|
||||||
values {.used.} = toSeq(c1.q.nextPairs).mapIt(it.data)
|
|
||||||
|
|
||||||
noisy.say &"c1: append {value} => {queue}"
|
|
||||||
var
|
|
||||||
s1 = rlp.encode(c1)
|
|
||||||
c2 = keyList.toLruCache
|
|
||||||
|
|
||||||
noisy.say &"serialised[{s1.len}]: {s1}"
|
|
||||||
c2.q.clear
|
|
||||||
check c1 != c2
|
|
||||||
check c1.q.verify.isOK
|
|
||||||
check c2.q.verify.isOK
|
|
||||||
|
|
||||||
c2 = s1.decode(type c2)
|
|
||||||
check c1 == c2
|
|
||||||
noisy.say &"c2Specs: {c2.size} {c2.q.firstKey} {c2.q.lastKey} ..."
|
|
||||||
check s1 == rlp.encode(c2)
|
|
||||||
check c2.q.verify.isOK
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
|
||||||
# Main function(s)
|
|
||||||
# ------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
proc keyedQueueRlpMain*(noisy = defined(debug)) =
|
|
||||||
noisy.runKeyedQueueRlp
|
|
||||||
|
|
||||||
when isMainModule:
|
|
||||||
let noisy = defined(debug)
|
|
||||||
noisy.runKeyedQueueRlp
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
|
||||||
# End
|
|
||||||
# ------------------------------------------------------------------------------
|
|
Loading…
Reference in New Issue