2023-06-20 13:26:25 +00:00
|
|
|
# nimbus-eth1
|
2024-03-14 22:17:43 +00:00
|
|
|
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
2023-06-20 13:26:25 +00:00
|
|
|
# Licensed under either of
|
|
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
|
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
|
|
|
# http://opensource.org/licenses/MIT)
|
|
|
|
# at your option. This file may not be copied, modified, or distributed
|
|
|
|
# except according to those terms.
|
|
|
|
|
|
|
|
## Rocksdb backend for Aristo DB
|
|
|
|
## =============================
|
|
|
|
##
|
|
|
|
## The iterators provided here are currently available only by direct
|
|
|
|
## backend access
|
|
|
|
## ::
|
|
|
|
## import
|
|
|
|
## aristo/aristo_init,
|
|
|
|
## aristo/aristo_init/aristo_rocksdb
|
|
|
|
##
|
|
|
|
## let rc = AristoDb.init(BackendRocksDB, "/var/tmp")
|
|
|
|
## if rc.isOk:
|
|
|
|
## let be = rc.value.to(RdbBackendRef)
|
|
|
|
## for (n, key, vtx) in be.walkVtx:
|
|
|
|
## ...
|
|
|
|
##
|
|
|
|
{.push raises: [].}
|
|
|
|
|
|
|
|
import
|
|
|
|
eth/common,
|
|
|
|
rocksdb,
|
2023-09-12 18:45:12 +00:00
|
|
|
results,
|
2023-06-22 11:13:24 +00:00
|
|
|
../aristo_desc,
|
2023-08-25 22:53:59 +00:00
|
|
|
../aristo_desc/desc_backend,
|
2023-11-08 12:18:32 +00:00
|
|
|
../aristo_blobify,
|
2023-08-25 22:53:59 +00:00
|
|
|
./init_common,
|
2024-06-05 15:08:29 +00:00
|
|
|
./rocks_db/[rdb_desc, rdb_get, rdb_init, rdb_put, rdb_walk],
|
|
|
|
../../opts
|
2023-06-20 13:26:25 +00:00
|
|
|
|
2024-04-16 20:39:11 +00:00
|
|
|
const
|
|
|
|
extraTraceMessages = false
|
|
|
|
## Enabled additional logging noise
|
2023-06-20 13:26:25 +00:00
|
|
|
|
|
|
|
type
|
2023-06-30 22:22:33 +00:00
|
|
|
RdbBackendRef* = ref object of TypedBackendRef
|
2023-06-20 13:26:25 +00:00
|
|
|
rdb: RdbInst ## Allows low level access to database
|
|
|
|
|
|
|
|
RdbPutHdlRef = ref object of TypedPutHdlRef
|
|
|
|
|
2024-04-16 20:39:11 +00:00
|
|
|
when extraTraceMessages:
|
|
|
|
import chronicles
|
2023-06-20 13:26:25 +00:00
|
|
|
|
2024-04-16 20:39:11 +00:00
|
|
|
logScope:
|
|
|
|
topics = "aristo-backend"
|
2023-06-20 13:26:25 +00:00
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private helpers
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc newSession(db: RdbBackendRef): RdbPutHdlRef =
|
|
|
|
new result
|
|
|
|
result.TypedPutHdlRef.beginSession db
|
|
|
|
|
|
|
|
proc getSession(hdl: PutHdlRef; db: RdbBackendRef): RdbPutHdlRef =
|
|
|
|
hdl.TypedPutHdlRef.verifySession db
|
|
|
|
hdl.RdbPutHdlRef
|
|
|
|
|
|
|
|
proc endSession(hdl: PutHdlRef; db: RdbBackendRef): RdbPutHdlRef =
|
|
|
|
hdl.TypedPutHdlRef.finishSession db
|
|
|
|
hdl.RdbPutHdlRef
|
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private functions: interface
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc getVtxFn(db: RdbBackendRef): GetVtxFn =
|
|
|
|
result =
|
|
|
|
proc(vid: VertexID): Result[VertexRef,AristoError] =
|
|
|
|
|
|
|
|
# Fetch serialised data record
|
2024-06-10 12:04:22 +00:00
|
|
|
let vtx = db.rdb.getVtx(vid).valueOr:
|
2024-04-16 20:39:11 +00:00
|
|
|
when extraTraceMessages:
|
|
|
|
trace logTxt "getVtxFn() failed", vid, error=error[0], info=error[1]
|
|
|
|
return err(error[0])
|
2023-06-20 13:26:25 +00:00
|
|
|
|
2024-06-10 12:04:22 +00:00
|
|
|
if vtx.isValid:
|
|
|
|
return ok(vtx)
|
2023-06-20 13:26:25 +00:00
|
|
|
|
|
|
|
err(GetVtxNotFound)
|
|
|
|
|
|
|
|
proc getKeyFn(db: RdbBackendRef): GetKeyFn =
|
|
|
|
result =
|
|
|
|
proc(vid: VertexID): Result[HashKey,AristoError] =
|
|
|
|
|
|
|
|
# Fetch serialised data record
|
2024-06-10 12:04:22 +00:00
|
|
|
let key = db.rdb.getKey(vid).valueOr:
|
2024-04-16 20:39:11 +00:00
|
|
|
when extraTraceMessages:
|
|
|
|
trace logTxt "getKeyFn: failed", vid, error=error[0], info=error[1]
|
|
|
|
return err(error[0])
|
2023-06-20 13:26:25 +00:00
|
|
|
|
2024-06-10 12:04:22 +00:00
|
|
|
if key.isValid:
|
|
|
|
return ok(key)
|
2023-06-20 13:26:25 +00:00
|
|
|
|
|
|
|
err(GetKeyNotFound)
|
|
|
|
|
2024-06-04 15:05:13 +00:00
|
|
|
proc getTuvFn(db: RdbBackendRef): GetTuvFn =
|
2023-06-20 13:26:25 +00:00
|
|
|
result =
|
2024-06-04 15:05:13 +00:00
|
|
|
proc(): Result[VertexID,AristoError]=
|
2023-06-20 13:26:25 +00:00
|
|
|
|
2024-04-22 19:02:22 +00:00
|
|
|
# Fetch serialised data record.
|
2024-06-10 12:04:22 +00:00
|
|
|
let data = db.rdb.getAdm(AdmTabIdTuv).valueOr:
|
2024-04-16 20:39:11 +00:00
|
|
|
when extraTraceMessages:
|
2024-06-04 15:05:13 +00:00
|
|
|
trace logTxt "getTuvFn: failed", error=error[0], info=error[1]
|
2024-04-16 20:39:11 +00:00
|
|
|
return err(error[0])
|
2023-06-20 13:26:25 +00:00
|
|
|
|
2024-04-16 20:39:11 +00:00
|
|
|
# Decode data record
|
|
|
|
if data.len == 0:
|
2024-06-04 15:05:13 +00:00
|
|
|
return ok VertexID(0)
|
2023-06-22 11:13:24 +00:00
|
|
|
|
2023-06-20 13:26:25 +00:00
|
|
|
# Decode data record
|
2024-06-10 12:04:22 +00:00
|
|
|
result = data.deblobify VertexID
|
2023-06-20 13:26:25 +00:00
|
|
|
|
2024-05-31 17:32:22 +00:00
|
|
|
proc getLstFn(db: RdbBackendRef): GetLstFn =
|
|
|
|
result =
|
|
|
|
proc(): Result[SavedState,AristoError]=
|
|
|
|
|
|
|
|
# Fetch serialised data record.
|
2024-06-10 12:04:22 +00:00
|
|
|
let data = db.rdb.getAdm(AdmTabIdLst).valueOr:
|
2024-05-31 17:32:22 +00:00
|
|
|
when extraTraceMessages:
|
|
|
|
trace logTxt "getLstFn: failed", error=error[0], info=error[1]
|
|
|
|
return err(error[0])
|
|
|
|
|
|
|
|
# Decode data record
|
|
|
|
data.deblobify SavedState
|
|
|
|
|
2023-06-20 13:26:25 +00:00
|
|
|
# -------------
|
|
|
|
|
|
|
|
proc putBegFn(db: RdbBackendRef): PutBegFn =
|
|
|
|
result =
|
2024-06-13 18:15:11 +00:00
|
|
|
proc(): Result[PutHdlRef,AristoError] =
|
2024-04-16 20:39:11 +00:00
|
|
|
db.rdb.begin()
|
2024-06-13 18:15:11 +00:00
|
|
|
ok db.newSession()
|
2023-06-20 13:26:25 +00:00
|
|
|
|
|
|
|
proc putVtxFn(db: RdbBackendRef): PutVtxFn =
|
|
|
|
result =
|
2024-06-25 11:39:53 +00:00
|
|
|
proc(hdl: PutHdlRef; vid: VertexID; vtx: VertexRef) =
|
2023-06-20 13:26:25 +00:00
|
|
|
let hdl = hdl.getSession db
|
2023-06-30 22:22:33 +00:00
|
|
|
if hdl.error.isNil:
|
2024-06-25 11:39:53 +00:00
|
|
|
db.rdb.putVtx(vid, vtx).isOkOr:
|
2024-04-16 20:39:11 +00:00
|
|
|
hdl.error = TypedPutHdlErrRef(
|
|
|
|
pfx: VtxPfx,
|
2024-06-10 12:04:22 +00:00
|
|
|
vid: error[0],
|
2024-04-16 20:39:11 +00:00
|
|
|
code: error[1],
|
|
|
|
info: error[2])
|
2023-06-20 13:26:25 +00:00
|
|
|
|
|
|
|
proc putKeyFn(db: RdbBackendRef): PutKeyFn =
|
|
|
|
result =
|
2024-06-25 11:39:53 +00:00
|
|
|
proc(hdl: PutHdlRef; vid: VertexID, key: HashKey) =
|
2023-06-20 13:26:25 +00:00
|
|
|
let hdl = hdl.getSession db
|
2023-06-30 22:22:33 +00:00
|
|
|
if hdl.error.isNil:
|
2024-06-25 11:39:53 +00:00
|
|
|
db.rdb.putKey(vid, key).isOkOr:
|
2024-04-16 20:39:11 +00:00
|
|
|
hdl.error = TypedPutHdlErrRef(
|
|
|
|
pfx: KeyPfx,
|
2024-06-10 12:04:22 +00:00
|
|
|
vid: error[0],
|
2024-04-16 20:39:11 +00:00
|
|
|
code: error[1],
|
|
|
|
info: error[2])
|
2023-06-20 13:26:25 +00:00
|
|
|
|
2024-06-04 15:05:13 +00:00
|
|
|
proc putTuvFn(db: RdbBackendRef): PutTuvFn =
|
2023-06-20 13:26:25 +00:00
|
|
|
result =
|
2024-06-04 15:05:13 +00:00
|
|
|
proc(hdl: PutHdlRef; vs: VertexID) =
|
2023-06-20 13:26:25 +00:00
|
|
|
let hdl = hdl.getSession db
|
2023-06-30 22:22:33 +00:00
|
|
|
if hdl.error.isNil:
|
2024-06-04 15:05:13 +00:00
|
|
|
if vs.isValid:
|
2024-06-10 12:04:22 +00:00
|
|
|
db.rdb.putAdm(AdmTabIdTuv, vs.blobify).isOkOr:
|
2024-06-04 15:05:13 +00:00
|
|
|
hdl.error = TypedPutHdlErrRef(
|
|
|
|
pfx: AdmPfx,
|
|
|
|
aid: AdmTabIdTuv,
|
|
|
|
code: error[1],
|
|
|
|
info: error[2])
|
2024-06-10 12:04:22 +00:00
|
|
|
return
|
|
|
|
|
2023-08-22 18:44:54 +00:00
|
|
|
|
2024-05-31 17:32:22 +00:00
|
|
|
proc putLstFn(db: RdbBackendRef): PutLstFn =
|
|
|
|
result =
|
|
|
|
proc(hdl: PutHdlRef; lst: SavedState) =
|
|
|
|
let hdl = hdl.getSession db
|
|
|
|
if hdl.error.isNil:
|
2024-06-05 18:17:50 +00:00
|
|
|
let data = lst.blobify.valueOr:
|
|
|
|
hdl.error = TypedPutHdlErrRef(
|
|
|
|
pfx: AdmPfx,
|
|
|
|
aid: AdmTabIdLst,
|
|
|
|
code: error)
|
|
|
|
return
|
2024-06-10 12:04:22 +00:00
|
|
|
db.rdb.putAdm(AdmTabIdLst, data).isOkOr:
|
2024-05-31 17:32:22 +00:00
|
|
|
hdl.error = TypedPutHdlErrRef(
|
|
|
|
pfx: AdmPfx,
|
|
|
|
aid: AdmTabIdLst,
|
|
|
|
code: error[1],
|
|
|
|
info: error[2])
|
|
|
|
|
2023-06-20 13:26:25 +00:00
|
|
|
proc putEndFn(db: RdbBackendRef): PutEndFn =
|
|
|
|
result =
|
2023-09-12 18:45:12 +00:00
|
|
|
proc(hdl: PutHdlRef): Result[void,AristoError] =
|
2023-06-20 13:26:25 +00:00
|
|
|
let hdl = hdl.endSession db
|
2023-06-30 22:22:33 +00:00
|
|
|
if not hdl.error.isNil:
|
2024-04-16 20:39:11 +00:00
|
|
|
when extraTraceMessages:
|
|
|
|
case hdl.error.pfx:
|
|
|
|
of VtxPfx, KeyPfx: trace logTxt "putEndFn: vtx/key failed",
|
2023-08-18 19:46:55 +00:00
|
|
|
pfx=hdl.error.pfx, vid=hdl.error.vid, error=hdl.error.code
|
2024-04-16 20:39:11 +00:00
|
|
|
of FilPfx: trace logTxt "putEndFn: filter failed",
|
|
|
|
pfx=FilPfx, qid=hdl.error.qid, error=hdl.error.code
|
|
|
|
of AdmPfx: trace logTxt "putEndFn: admin failed",
|
|
|
|
pfx=AdmPfx, aid=hdl.error.aid.uint64, error=hdl.error.code
|
|
|
|
of Oops: trace logTxt "putEndFn: oops",
|
2024-06-13 18:15:11 +00:00
|
|
|
pfx=hdl.error.pfx, error=hdl.error.code
|
2024-06-10 12:04:22 +00:00
|
|
|
db.rdb.rollback()
|
2023-09-12 18:45:12 +00:00
|
|
|
return err(hdl.error.code)
|
2024-04-16 20:39:11 +00:00
|
|
|
|
|
|
|
# Commit session
|
|
|
|
db.rdb.commit().isOkOr:
|
2023-06-20 13:26:25 +00:00
|
|
|
when extraTraceMessages:
|
2024-04-16 20:39:11 +00:00
|
|
|
trace logTxt "putEndFn: failed", error=($error[0]), info=error[1]
|
|
|
|
return err(error[0])
|
2023-09-12 18:45:12 +00:00
|
|
|
ok()
|
2023-06-20 13:26:25 +00:00
|
|
|
|
|
|
|
proc closeFn(db: RdbBackendRef): CloseFn =
|
|
|
|
result =
|
2024-06-14 11:19:48 +00:00
|
|
|
proc(eradicate: bool) =
|
|
|
|
db.rdb.destroy(eradicate)
|
2023-06-20 13:26:25 +00:00
|
|
|
|
2024-06-13 18:15:11 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private functions: hosting interface changes
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc putBegHostingFn(db: RdbBackendRef): PutBegFn =
|
|
|
|
result =
|
|
|
|
proc(): Result[PutHdlRef,AristoError] =
|
|
|
|
db.rdb.begin()
|
|
|
|
if db.rdb.trgWriteEvent(db.rdb.session):
|
|
|
|
ok db.newSession()
|
|
|
|
else:
|
|
|
|
when extraTraceMessages:
|
|
|
|
trace logTxt "putBegFn: guest trigger aborted session"
|
|
|
|
db.rdb.rollback()
|
|
|
|
err(RdbGuestInstanceAborted)
|
|
|
|
|
2023-06-20 13:26:25 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public functions
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2024-06-05 15:08:29 +00:00
|
|
|
proc rocksDbBackend*(
|
|
|
|
path: string;
|
2024-06-19 08:55:57 +00:00
|
|
|
dbOpts: DbOptionsRef;
|
|
|
|
cfOpts: ColFamilyOptionsRef;
|
|
|
|
guestCFs: openArray[ColFamilyDescriptor];
|
|
|
|
): Result[(BackendRef, seq[ColFamilyReadWrite]),AristoError] =
|
2023-09-05 13:57:20 +00:00
|
|
|
let db = RdbBackendRef(
|
2024-03-14 22:17:43 +00:00
|
|
|
beKind: BackendRocksDB)
|
2023-09-05 13:57:20 +00:00
|
|
|
|
|
|
|
# Initialise RocksDB
|
2024-06-19 08:55:57 +00:00
|
|
|
let oCfs = block:
|
|
|
|
let rc = db.rdb.init(path, dbOpts, cfOpts, guestCFs)
|
2023-09-05 13:57:20 +00:00
|
|
|
if rc.isErr:
|
|
|
|
when extraTraceMessages:
|
|
|
|
trace logTxt "constructor failed",
|
|
|
|
error=rc.error[0], info=rc.error[1]
|
eth: bump (#2308)
* eth: bump
Speed up basic operations like hashing and creating RLP:s - up to 25%
improvement in certain block ranges!
```
876729c.csv /data/nimbus_stats/stats-20240605_2204-ed4f6221.csv
stats-20240605_2000-c876729c.csv vs stats-20240605_2204-ed4f6221.csv
bps_x bps_y tps_x tps_y bpsd tpsd timed
block_number
(500001, 888889] 1,017.72 996.07 1,784.96 1742.438676 -2.72% -2.72% 3.31%
(888889, 1277778] 528.00 536.30 2,159.79 2198.781046 1.69% 1.69% -1.44%
(1277778, 1666667] 324.29 317.78 2,064.48 2008.106377 -2.82% -2.82% 3.33%
(1666667, 2055556] 253.87 258.74 1,840.94 1872.935273 1.67% 1.67% -1.39%
(2055556, 2444445] 175.79 178.66 1,340.61 1363.248939 0.93% 0.93% -0.74%
(2444445, 2833334] 137.27 159.74 958.75 1113.323757 14.24% 14.24% -10.69%
(2833334, 3222223] 170.48 228.63 1,272.70 1704.047195 34.41% 34.41% -25.17%
(3222223, 3611112] 127.49 125.48 1,572.39 1548.835791 -1.19% -1.19% 1.47%
(3611112, 4000001] 37.25 40.42 1,100.65 1184.740493 9.58% 9.58% -7.04%
blocks: 3501696, baseline: 11h59m40s, contender: 11h21m38s
bpsd (mean): 6.18%
tpsd (mean): 6.18%
Time (sum): -38m1s, -4.26%
bpsd = blocks per sec diff (+), tpsd = txs per sec diff, timed = time to process diff (-)
+ = more is better, - = less is better
```
* ignore gitignore
2024-06-06 23:39:09 +00:00
|
|
|
return err(rc.error[0])
|
2024-06-19 08:55:57 +00:00
|
|
|
rc.value()
|
2023-06-20 13:26:25 +00:00
|
|
|
|
|
|
|
db.getVtxFn = getVtxFn db
|
|
|
|
db.getKeyFn = getKeyFn db
|
2024-06-04 15:05:13 +00:00
|
|
|
db.getTuvFn = getTuvFn db
|
2024-05-31 17:32:22 +00:00
|
|
|
db.getLstFn = getLstFn db
|
2023-06-20 13:26:25 +00:00
|
|
|
|
|
|
|
db.putBegFn = putBegFn db
|
|
|
|
db.putVtxFn = putVtxFn db
|
|
|
|
db.putKeyFn = putKeyFn db
|
2024-06-04 15:05:13 +00:00
|
|
|
db.putTuvFn = putTuvFn db
|
2024-05-31 17:32:22 +00:00
|
|
|
db.putLstFn = putLstFn db
|
2023-06-20 13:26:25 +00:00
|
|
|
db.putEndFn = putEndFn db
|
|
|
|
|
|
|
|
db.closeFn = closeFn db
|
2024-06-19 08:55:57 +00:00
|
|
|
ok((db, oCfs))
|
2024-06-13 18:15:11 +00:00
|
|
|
|
|
|
|
|
|
|
|
proc rocksDbSetEventTrigger*(
|
|
|
|
be: BackendRef;
|
|
|
|
hdl: RdbWriteEventCb;
|
|
|
|
): Result[void,AristoError] =
|
|
|
|
## Store event trigger. This also changes the backend type.
|
|
|
|
if hdl.isNil:
|
|
|
|
err(RdbBeWrTriggerNilFn)
|
|
|
|
else:
|
|
|
|
let db = RdbBackendRef(be)
|
|
|
|
db.rdb.trgWriteEvent = hdl
|
|
|
|
db.beKind = BackendRdbHosting
|
|
|
|
db.putBegFn = putBegHostingFn db
|
|
|
|
ok()
|
|
|
|
|
|
|
|
|
2024-03-14 22:17:43 +00:00
|
|
|
proc dup*(db: RdbBackendRef): RdbBackendRef =
|
|
|
|
## Duplicate descriptor shell as needed for API debugging
|
|
|
|
new result
|
|
|
|
init_common.init(result[], db[])
|
|
|
|
result.rdb = db.rdb
|
|
|
|
|
2023-06-20 13:26:25 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public iterators (needs direct backend access)
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
iterator walk*(
|
|
|
|
be: RdbBackendRef;
|
2023-12-20 16:19:00 +00:00
|
|
|
): tuple[pfx: StorageType, xid: uint64, data: Blob] =
|
2023-06-20 13:26:25 +00:00
|
|
|
## Walk over all key-value pairs of the database.
|
|
|
|
##
|
2024-06-10 12:04:22 +00:00
|
|
|
## Non-decodable entries are ignored
|
|
|
|
##
|
|
|
|
for (xid, data) in be.rdb.walkAdm:
|
|
|
|
yield (AdmPfx, xid, data)
|
|
|
|
for (vid, data) in be.rdb.walkVtx:
|
|
|
|
yield (VtxPfx, vid, data)
|
|
|
|
for (vid, data) in be.rdb.walkKey:
|
|
|
|
yield (KeyPfx, vid, data)
|
2023-06-20 13:26:25 +00:00
|
|
|
|
|
|
|
iterator walkVtx*(
|
|
|
|
be: RdbBackendRef;
|
2023-12-20 16:19:00 +00:00
|
|
|
): tuple[vid: VertexID, vtx: VertexRef] =
|
2023-06-20 13:26:25 +00:00
|
|
|
## Variant of `walk()` iteration over the vertex sub-table.
|
2024-06-10 12:04:22 +00:00
|
|
|
for (vid, data) in be.rdb.walkVtx:
|
2023-06-20 13:26:25 +00:00
|
|
|
let rc = data.deblobify VertexRef
|
|
|
|
if rc.isOk:
|
2024-06-10 12:04:22 +00:00
|
|
|
yield (VertexID(vid), rc.value)
|
2023-06-20 13:26:25 +00:00
|
|
|
|
2023-08-22 18:44:54 +00:00
|
|
|
iterator walkKey*(
|
2023-06-20 13:26:25 +00:00
|
|
|
be: RdbBackendRef;
|
2023-12-20 16:19:00 +00:00
|
|
|
): tuple[vid: VertexID, key: HashKey] =
|
2023-06-20 13:26:25 +00:00
|
|
|
## Variant of `walk()` iteration over the Markle hash sub-table.
|
2024-06-10 12:04:22 +00:00
|
|
|
for (vid, data) in be.rdb.walkKey:
|
2023-11-08 12:18:32 +00:00
|
|
|
let lid = HashKey.fromBytes(data).valueOr:
|
|
|
|
continue
|
2024-06-10 12:04:22 +00:00
|
|
|
yield (VertexID(vid), lid)
|
2023-06-20 13:26:25 +00:00
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# End
|
|
|
|
# ------------------------------------------------------------------------------
|