mirror of
https://github.com/status-im/nimbus-eth1.git
synced 2025-01-13 13:55:45 +00:00
c47f021596
* Disable `TransactionID` related functions from `state_db.nim` why: Functions `getCommittedStorage()` and `updateOriginalRoot()` from the `state_db` module are nowhere used. The emulation of a legacy `TransactionID` type functionality is administratively expensive to provide by `Aristo` (the legacy DB version is only partially implemented, anyway). As there is no other place where `TransactionID`s are used, they will not be provided by the `Aristo` variant of the `CoreDb`. For the legacy DB API, nothing will change. * Fix copyright headers in source code * Get rid of compiler warning * Update Aristo code, remove unused `merge()` variant, export `hashify()` why: Adapt to upcoming `CoreDb` wrapper * Remove synced tx feature from `Aristo` why: + This feature allowed to synchronise transaction methods like begin, commit, and rollback for a group of descriptors. + The feature is over engineered and not needed for `CoreDb`, neither is it complete (some convergence features missing.) * Add debugging helpers to `Kvt` also: Update database iterator, add count variable yield argument similar to `Aristo`. * Provide optional destructors for `CoreDb` API why; For the upcoming Aristo wrapper, this allows to control when certain smart destruction and update can take place. The auto destructor works fine in general when the storage/cache strategy is known and acceptable when creating descriptors. * Add update option for `CoreDb` API function `hash()` why; The hash function is typically used to get the state root of the MPT. Due to lazy hashing, this might be not available on the `Aristo` DB. So the `update` function asks for re-hashing the gurrent state changes if needed. * Update API tracking log mode: `info` => `debug * Use shared `Kvt` descriptor in new Ledger API why: No need to create a new descriptor all the time
181 lines
4.9 KiB
Nim
181 lines
4.9 KiB
Nim
# Nimbus
|
|
# Copyright (c) 2018-2023 Status Research & Development GmbH
|
|
# Licensed under either of
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
|
# at your option.
|
|
# This file may not be copied, modified, or distributed except according to
|
|
# those terms.
|
|
|
|
import
|
|
std/[json, os, strutils, typetraits],
|
|
unittest2,
|
|
json_rpc/[rpcserver, rpcclient],
|
|
web3/[engine_api_types],
|
|
../nimbus/sync/protocol,
|
|
../nimbus/rpc,
|
|
../nimbus/common,
|
|
../nimbus/config,
|
|
../nimbus/core/[sealer, tx_pool, chain],
|
|
../nimbus/beacon/[beacon_engine, payload_queue],
|
|
./test_helpers
|
|
|
|
const
|
|
baseDir = "tests" / "merge"
|
|
paramsFile = baseDir / "params.json"
|
|
stepsFile = baseDir / "steps.json"
|
|
|
|
type
|
|
Step = ref object
|
|
name: string
|
|
meth: string
|
|
params: JSonNode
|
|
expect: JsonNode
|
|
error : bool
|
|
|
|
Steps = ref object
|
|
list: seq[Step]
|
|
|
|
proc parseStep(s: Step, node: JsonNode) =
|
|
for k, v in node:
|
|
case k
|
|
of "name": s.name = v.getStr()
|
|
of "method": s.meth = v.getStr()
|
|
of "params": s.params = v
|
|
of "expect": s.expect = v
|
|
of "error": s.error = true
|
|
else:
|
|
doAssert(false, "unknown key: " & k)
|
|
|
|
proc parseSteps(node: JsonNode): Steps =
|
|
let ss = Steps(list: @[])
|
|
for n in node:
|
|
let s = Step()
|
|
s.parseStep(n)
|
|
ss.list.add s
|
|
ss
|
|
|
|
proc forkChoiceUpdate(step: Step, client: RpcClient, testStatusIMPL: var TestStatus) =
|
|
let arg = step.params[1]
|
|
if arg.kind == JNull:
|
|
step.params.elems.setLen(1)
|
|
|
|
let res = waitFor client.call(step.meth, step.params)
|
|
check toLowerAscii($res) == toLowerAscii($step.expect)
|
|
|
|
proc getPayload(step: Step, client: RpcClient, testStatusIMPL: var TestStatus) =
|
|
try:
|
|
let res = waitFor client.call(step.meth, step.params)
|
|
check toLowerAscii($res) == toLowerAscii($step.expect)
|
|
except CatchableError:
|
|
check step.error == true
|
|
|
|
proc newPayload(step: Step, client: RpcClient, testStatusIMPL: var TestStatus) =
|
|
let res = waitFor client.call(step.meth, step.params)
|
|
check toLowerAscii($res) == toLowerAscii($step.expect)
|
|
|
|
proc runTest(steps: Steps) =
|
|
let
|
|
conf = makeConfig(@["--custom-network:" & paramsFile])
|
|
ctx = newEthContext()
|
|
ethNode = setupEthNode(conf, ctx, eth)
|
|
com = CommonRef.new(
|
|
newCoreDbRef LegacyDbMemory,
|
|
conf.pruneMode == PruneMode.Full,
|
|
conf.networkId,
|
|
conf.networkParams
|
|
)
|
|
chainRef = newChain(com)
|
|
|
|
com.initializeEmptyDb()
|
|
|
|
var
|
|
rpcServer = newRpcSocketServer(["127.0.0.1:" & $conf.rpcPort])
|
|
client = newRpcSocketClient()
|
|
txPool = TxPoolRef.new(com, conf.engineSigner)
|
|
sealingEngine = SealingEngineRef.new(
|
|
chainRef, ctx, conf.engineSigner,
|
|
txPool, EnginePostMerge
|
|
)
|
|
beaconEngine = BeaconEngineRef.new(txPool, chainRef)
|
|
|
|
setupEthRpc(ethNode, ctx, com, txPool, rpcServer)
|
|
setupEngineAPI(beaconEngine, rpcServer)
|
|
|
|
sealingEngine.start()
|
|
rpcServer.start()
|
|
waitFor client.connect("127.0.0.1", conf.rpcPort)
|
|
|
|
suite "Engine API tests":
|
|
for i, step in steps.list:
|
|
test $i & " " & step.name:
|
|
case step.meth
|
|
of "engine_forkchoiceUpdatedV1":
|
|
forkChoiceUpdate(step, client, testStatusIMPL)
|
|
of "engine_getPayloadV1":
|
|
getPayload(step, client, testStatusIMPL)
|
|
of "engine_newPayloadV1":
|
|
newPayload(step, client, testStatusIMPL)
|
|
else:
|
|
doAssert(false, "unknown method: " & step.meth)
|
|
|
|
waitFor client.close()
|
|
waitFor sealingEngine.stop()
|
|
rpcServer.stop()
|
|
waitFor rpcServer.closeWait()
|
|
|
|
proc testEngineAPI() =
|
|
let node = parseJSON(readFile(stepsFile))
|
|
let steps = parseSteps(node)
|
|
runTest(steps)
|
|
|
|
proc toId(x: int): PayloadId =
|
|
var id: distinctBase PayloadId
|
|
id[^1] = x.byte
|
|
PayloadId(id)
|
|
|
|
proc `==`(a, b: Quantity): bool =
|
|
uint64(a) == uint64(b)
|
|
|
|
proc testEngineApiSupport() =
|
|
var api = PayloadQueue()
|
|
let
|
|
id1 = toId(1)
|
|
id2 = toId(2)
|
|
ep1 = ExecutionPayloadV1(gasLimit: Quantity 100)
|
|
ep2 = ExecutionPayloadV1(gasLimit: Quantity 101)
|
|
hdr1 = common.BlockHeader(gasLimit: 100)
|
|
hdr2 = common.BlockHeader(gasLimit: 101)
|
|
hash1 = hdr1.blockHash
|
|
hash2 = hdr2.blockHash
|
|
|
|
suite "Test engine api support":
|
|
test "test payload queue":
|
|
api.put(id1, 123.u256, ep1)
|
|
api.put(id2, 456.u256, ep2)
|
|
var eep1, eep2: ExecutionPayloadV1
|
|
var bv1, bv2: UInt256
|
|
check api.get(id1, bv1, eep1)
|
|
check api.get(id2, bv2, eep2)
|
|
check eep1.gasLimit == ep1.gasLimit
|
|
check eep2.gasLimit == ep2.gasLimit
|
|
check bv1 == 123.u256
|
|
check bv2 == 456.u256
|
|
|
|
test "test header queue":
|
|
api.put(hash1, hdr1)
|
|
api.put(hash2, hdr2)
|
|
var eh1, eh2: common.BlockHeader
|
|
check api.get(hash1, eh1)
|
|
check api.get(hash2, eh2)
|
|
check eh1.gasLimit == hdr1.gasLimit
|
|
check eh2.gasLimit == hdr2.gasLimit
|
|
|
|
proc mergeMain*() =
|
|
# temporary disable it until engine API more stable
|
|
testEngineAPI()
|
|
testEngineApiSupport()
|
|
|
|
when isMainModule:
|
|
mergeMain()
|