avoid unnecessary memory allocations and lookups (#2334)
* use `withValue` instead of `hasKey` + `[]` * avoid `@` et al * parse database data inside `onData` instead of making seq then parsing
This commit is contained in:
parent
5fca57a6d1
commit
eb041abba7
|
@ -453,7 +453,7 @@ proc buildAccumulator*(f: Era1File): Result[EpochAccumulatorCached, string] =
|
||||||
HeaderRecord(blockHash: blockHeader.blockHash(), totalDifficulty: totalDifficulty)
|
HeaderRecord(blockHash: blockHeader.blockHash(), totalDifficulty: totalDifficulty)
|
||||||
)
|
)
|
||||||
|
|
||||||
ok(EpochAccumulatorCached.init(@headerRecords))
|
ok(EpochAccumulatorCached.init(headerRecords))
|
||||||
|
|
||||||
proc verify*(f: Era1File): Result[Digest, string] =
|
proc verify*(f: Era1File): Result[Digest, string] =
|
||||||
let
|
let
|
||||||
|
|
|
@ -30,7 +30,6 @@ import
|
||||||
eth/common,
|
eth/common,
|
||||||
rocksdb,
|
rocksdb,
|
||||||
results,
|
results,
|
||||||
../aristo_constants,
|
|
||||||
../aristo_desc,
|
../aristo_desc,
|
||||||
../aristo_desc/desc_backend,
|
../aristo_desc/desc_backend,
|
||||||
../aristo_blobify,
|
../aristo_blobify,
|
||||||
|
|
|
@ -32,9 +32,6 @@ type
|
||||||
basePath*: string ## Database directory
|
basePath*: string ## Database directory
|
||||||
noFq*: bool ## No filter queues available
|
noFq*: bool ## No filter queues available
|
||||||
|
|
||||||
RdbKey* = array[1 + sizeof VertexID, byte]
|
|
||||||
## Sub-table key, <pfx> + VertexID
|
|
||||||
|
|
||||||
# Alien interface
|
# Alien interface
|
||||||
RdbGuest* = enum
|
RdbGuest* = enum
|
||||||
## The guest CF was worth a try, but there are better solutions and this
|
## The guest CF was worth a try, but there are better solutions and this
|
||||||
|
|
|
@ -64,9 +64,10 @@ proc getKey*(
|
||||||
return ok(move(rc.value))
|
return ok(move(rc.value))
|
||||||
|
|
||||||
# Otherwise fetch from backend database
|
# Otherwise fetch from backend database
|
||||||
var res: Blob
|
var res: Result[HashKey,(AristoError,string)]
|
||||||
let onData = proc(data: openArray[byte]) =
|
let onData = proc(data: openArray[byte]) =
|
||||||
res = @data
|
res = HashKey.fromBytes(data).mapErr(proc(): auto =
|
||||||
|
(RdbHashKeyExpected,""))
|
||||||
|
|
||||||
let gotData = rdb.keyCol.get(vid.toOpenArray, onData).valueOr:
|
let gotData = rdb.keyCol.get(vid.toOpenArray, onData).valueOr:
|
||||||
const errSym = RdbBeDriverGetKeyError
|
const errSym = RdbBeDriverGetKeyError
|
||||||
|
@ -75,16 +76,13 @@ proc getKey*(
|
||||||
return err((errSym,error))
|
return err((errSym,error))
|
||||||
|
|
||||||
# Correct result if needed
|
# Correct result if needed
|
||||||
let key = block:
|
if not gotData:
|
||||||
if gotData:
|
res = ok(VOID_HASH_KEY)
|
||||||
HashKey.fromBytes(res).valueOr:
|
elif res.isErr():
|
||||||
return err((RdbHashKeyExpected,""))
|
return res # Parsing failed
|
||||||
else:
|
|
||||||
VOID_HASH_KEY
|
|
||||||
|
|
||||||
# Update cache and return
|
# Update cache and return
|
||||||
ok rdb.rdKeyLru.lruAppend(vid, key, RdKeyLruMaxSize)
|
ok rdb.rdKeyLru.lruAppend(vid, res.value(), RdKeyLruMaxSize)
|
||||||
|
|
||||||
|
|
||||||
proc getVtx*(
|
proc getVtx*(
|
||||||
rdb: var RdbInst;
|
rdb: var RdbInst;
|
||||||
|
@ -96,9 +94,10 @@ proc getVtx*(
|
||||||
return ok(move(rc.value))
|
return ok(move(rc.value))
|
||||||
|
|
||||||
# Otherwise fetch from backend database
|
# Otherwise fetch from backend database
|
||||||
var res: Blob
|
var res: Result[VertexRef,(AristoError,string)]
|
||||||
let onData = proc(data: openArray[byte]) =
|
let onData = proc(data: openArray[byte]) =
|
||||||
res = @data
|
res = data.deblobify(VertexRef).mapErr(proc(error: AristoError): auto =
|
||||||
|
(error,""))
|
||||||
|
|
||||||
let gotData = rdb.vtxCol.get(vid.toOpenArray, onData).valueOr:
|
let gotData = rdb.vtxCol.get(vid.toOpenArray, onData).valueOr:
|
||||||
const errSym = RdbBeDriverGetVtxError
|
const errSym = RdbBeDriverGetVtxError
|
||||||
|
@ -106,15 +105,13 @@ proc getVtx*(
|
||||||
trace logTxt "getVtx", vid, error=errSym, info=error
|
trace logTxt "getVtx", vid, error=errSym, info=error
|
||||||
return err((errSym,error))
|
return err((errSym,error))
|
||||||
|
|
||||||
var vtx = VertexRef(nil)
|
if not gotData:
|
||||||
if gotData:
|
res = ok(VertexRef(nil))
|
||||||
let rc = res.deblobify VertexRef
|
elif res.isErr():
|
||||||
if rc.isErr:
|
return res # Parsing failed
|
||||||
return err((rc.error,""))
|
|
||||||
vtx = rc.value
|
|
||||||
|
|
||||||
# Update cache and return
|
# Update cache and return
|
||||||
ok rdb.rdVtxLru.lruAppend(vid, vtx, RdVtxLruMaxSize)
|
ok rdb.rdVtxLru.lruAppend(vid, res.value(), RdVtxLruMaxSize)
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# End
|
# End
|
||||||
|
|
|
@ -62,39 +62,39 @@ func nLayersKey*(db: AristoDbRef): int =
|
||||||
# Public functions: getter variants
|
# Public functions: getter variants
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
func layersGetVtx*(db: AristoDbRef; vid: VertexID): Result[VertexRef,void] =
|
func layersGetVtx*(db: AristoDbRef; vid: VertexID): Opt[VertexRef] =
|
||||||
## Find a vertex on the cache layers. An `ok()` result might contain a
|
## Find a vertex on the cache layers. An `ok()` result might contain a
|
||||||
## `nil` vertex if it is stored on the cache that way.
|
## `nil` vertex if it is stored on the cache that way.
|
||||||
##
|
##
|
||||||
if db.top.delta.sTab.hasKey vid:
|
db.top.delta.sTab.withValue(vid, item):
|
||||||
return ok(db.top.delta.sTab.getOrVoid vid)
|
return Opt.some(item[])
|
||||||
|
|
||||||
for w in db.rstack:
|
for w in db.rstack:
|
||||||
if w.delta.sTab.hasKey vid:
|
w.delta.sTab.withValue(vid, item):
|
||||||
return ok(w.delta.sTab.getOrVoid vid)
|
return Opt.some(item[])
|
||||||
|
|
||||||
err()
|
Opt.none(VertexRef)
|
||||||
|
|
||||||
func layersGetVtxOrVoid*(db: AristoDbRef; vid: VertexID): VertexRef =
|
func layersGetVtxOrVoid*(db: AristoDbRef; vid: VertexID): VertexRef =
|
||||||
## Simplified version of `layersGetVtx()`
|
## Simplified version of `layersGetVtx()`
|
||||||
db.layersGetVtx(vid).valueOr: VertexRef(nil)
|
db.layersGetVtx(vid).valueOr: VertexRef(nil)
|
||||||
|
|
||||||
|
|
||||||
func layersGetKey*(db: AristoDbRef; vid: VertexID): Result[HashKey,void] =
|
func layersGetKey*(db: AristoDbRef; vid: VertexID): Opt[HashKey] =
|
||||||
## Find a hash key on the cache layers. An `ok()` result might contain a void
|
## Find a hash key on the cache layers. An `ok()` result might contain a void
|
||||||
## hash key if it is stored on the cache that way.
|
## hash key if it is stored on the cache that way.
|
||||||
##
|
##
|
||||||
if db.top.delta.kMap.hasKey vid:
|
db.top.delta.kMap.withValue(vid, item):
|
||||||
# This is ok regardless of the `dirty` flag. If this vertex has become
|
# This is ok regardless of the `dirty` flag. If this vertex has become
|
||||||
# dirty, there is an empty `kMap[]` entry on this layer.
|
# dirty, there is an empty `kMap[]` entry on this layer.
|
||||||
return ok(db.top.delta.kMap.getOrVoid vid)
|
return Opt.some(item[])
|
||||||
|
|
||||||
for w in db.rstack:
|
for w in db.rstack:
|
||||||
if w.delta.kMap.hasKey vid:
|
w.delta.kMap.withValue(vid, item):
|
||||||
# Same reasoning as above regarding the `dirty` flag.
|
# Same reasoning as above regarding the `dirty` flag.
|
||||||
return ok(w.delta.kMap.getOrVoid vid)
|
return ok(item[])
|
||||||
|
|
||||||
err()
|
Opt.none(HashKey)
|
||||||
|
|
||||||
func layersGetKeyOrVoid*(db: AristoDbRef; vid: VertexID): HashKey =
|
func layersGetKeyOrVoid*(db: AristoDbRef; vid: VertexID): HashKey =
|
||||||
## Simplified version of `layersGetkey()`
|
## Simplified version of `layersGetkey()`
|
||||||
|
|
|
@ -162,9 +162,9 @@ proc mergeProof*(
|
||||||
## Check for embedded nodes, i.e. fully encoded node instead of a hash.
|
## Check for embedded nodes, i.e. fully encoded node instead of a hash.
|
||||||
## They need to be treated as full nodes, here.
|
## They need to be treated as full nodes, here.
|
||||||
if key.isValid and key.len < 32:
|
if key.isValid and key.len < 32:
|
||||||
let lid = @(key.data).digestTo(HashKey)
|
let lid = key.data.digestTo(HashKey)
|
||||||
if not seen.hasKey lid:
|
if not seen.hasKey lid:
|
||||||
let node = @(key.data).decode(NodeRef)
|
let node = key.data.decode(NodeRef)
|
||||||
discard todo.append node
|
discard todo.append node
|
||||||
seen[lid] = node
|
seen[lid] = node
|
||||||
|
|
||||||
|
|
|
@ -31,29 +31,34 @@ func nLayersKeys*(db: KvtDbRef): int =
|
||||||
# Public functions: get function
|
# Public functions: get function
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
func layersHasKey*(db: KvtDbRef; key: openArray[byte]): bool =
|
func layersHasKey*(db: KvtDbRef; key: openArray[byte]|seq[byte]): bool =
|
||||||
## Return `true` id the argument key is cached.
|
## Return `true` id the argument key is cached.
|
||||||
##
|
##
|
||||||
if db.top.delta.sTab.hasKey @key:
|
when key isnot seq[byte]:
|
||||||
|
let key = @key
|
||||||
|
if db.top.delta.sTab.hasKey key:
|
||||||
return true
|
return true
|
||||||
|
|
||||||
for w in db.rstack:
|
for w in db.rstack:
|
||||||
if w.delta.sTab.hasKey @key:
|
if w.delta.sTab.hasKey key:
|
||||||
return true
|
return true
|
||||||
|
|
||||||
|
|
||||||
func layersGet*(db: KvtDbRef; key: openArray[byte]): Result[Blob,void] =
|
func layersGet*(db: KvtDbRef; key: openArray[byte]|seq[byte]): Opt[Blob] =
|
||||||
## Find an item on the cache layers. An `ok()` result might contain an
|
## Find an item on the cache layers. An `ok()` result might contain an
|
||||||
## empty value if it is stored on the cache that way.
|
## empty value if it is stored on the cache that way.
|
||||||
##
|
##
|
||||||
if db.top.delta.sTab.hasKey @key:
|
when key isnot seq[byte]:
|
||||||
return ok(db.top.delta.sTab.getOrVoid @key)
|
let key = @key
|
||||||
|
|
||||||
|
db.top.delta.sTab.withValue(key, item):
|
||||||
|
return Opt.some(item[])
|
||||||
|
|
||||||
for w in db.rstack:
|
for w in db.rstack:
|
||||||
if w.delta.sTab.hasKey @key:
|
w.delta.sTab.withValue(key, item):
|
||||||
return ok(w.delta.sTab.getOrVoid @key)
|
return Opt.some(item[])
|
||||||
|
|
||||||
err()
|
Opt.none(Blob)
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public functions: put function
|
# Public functions: put function
|
||||||
|
|
|
@ -106,7 +106,7 @@ proc hasKey*(
|
||||||
if key.len == 0:
|
if key.len == 0:
|
||||||
return err(KeyInvalid)
|
return err(KeyInvalid)
|
||||||
|
|
||||||
if db.layersHasKey @key:
|
if db.layersHasKey key:
|
||||||
return ok(true)
|
return ok(true)
|
||||||
|
|
||||||
let rc = db.getBe key
|
let rc = db.getBe key
|
||||||
|
|
|
@ -12,16 +12,10 @@ from pandas.plotting import register_matplotlib_converters
|
||||||
register_matplotlib_converters()
|
register_matplotlib_converters()
|
||||||
|
|
||||||
|
|
||||||
def readStats(name: str, min_block_number: int):
|
def readStats(name: str):
|
||||||
df = pd.read_csv(name).convert_dtypes()
|
df = pd.read_csv(name).convert_dtypes()
|
||||||
# at least one item - let it lag in the beginning until we reach the min
|
# at least one item - let it lag in the beginning until we reach the min
|
||||||
# block number or the table will be empty
|
# block number or the table will be empty
|
||||||
if df.block_number.iloc[-1] > min_block_number + df.block_number.iloc[0]:
|
|
||||||
cutoff = min(
|
|
||||||
df.block_number.iloc[-1] - min_block_number,
|
|
||||||
min_block_number,
|
|
||||||
)
|
|
||||||
df = df[df.block_number >= cutoff]
|
|
||||||
df.set_index("block_number", inplace=True)
|
df.set_index("block_number", inplace=True)
|
||||||
df.time /= 1000000000
|
df.time /= 1000000000
|
||||||
df.drop(columns=["gas"], inplace=True)
|
df.drop(columns=["gas"], inplace=True)
|
||||||
|
@ -72,19 +66,27 @@ parser.add_argument(
|
||||||
help="Skip block blocks below the given number",
|
help="Skip block blocks below the given number",
|
||||||
)
|
)
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
min_block_number = args.min_block_number
|
||||||
|
|
||||||
baseline = readStats(args.baseline, 0)
|
baseline = readStats(args.baseline)
|
||||||
contender = readStats(args.contender, args.min_block_number)
|
contender = readStats(args.contender)
|
||||||
|
|
||||||
# Pick out the rows to match - a more sophisticated version of this would
|
# Pick out the rows to match - a more sophisticated version of this would
|
||||||
# interpolate, perhaps - also, maybe should check for non-matching block/tx counts
|
# interpolate, perhaps - also, maybe should check for non-matching block/tx counts
|
||||||
df = baseline.merge(contender, on=("block_number", "blocks", "txs"))
|
df = baseline.merge(contender, on=("block_number", "blocks", "txs"))
|
||||||
|
df.reset_index(inplace=True)
|
||||||
|
|
||||||
|
if df.block_number.iloc[-1] > min_block_number + df.block_number.iloc[0]:
|
||||||
|
cutoff = min(
|
||||||
|
df.block_number.iloc[-1] - min_block_number,
|
||||||
|
min_block_number,
|
||||||
|
)
|
||||||
|
df = df[df.block_number >= cutoff]
|
||||||
|
|
||||||
df["bpsd"] = ((df.bps_y - df.bps_x) / df.bps_x)
|
df["bpsd"] = ((df.bps_y - df.bps_x) / df.bps_x)
|
||||||
df["tpsd"] = ((df.tps_y - df.tps_x) / df.tps_x.replace(0, 1))
|
df["tpsd"] = ((df.tps_y - df.tps_x) / df.tps_x.replace(0, 1))
|
||||||
df["timed"] = (df.time_y - df.time_x) / df.time_x
|
df["timed"] = (df.time_y - df.time_x) / df.time_x
|
||||||
|
|
||||||
df.reset_index(inplace=True)
|
|
||||||
|
|
||||||
if args.plot:
|
if args.plot:
|
||||||
plt.rcParams["axes.grid"] = True
|
plt.rcParams["axes.grid"] = True
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
Subproject commit 104132fd0217e846b04dd26a5fbe3e43a4929a05
|
Subproject commit 28743363fff5d751b9dd1809b0c7ccf332eb8d79
|
Loading…
Reference in New Issue