mirror of
https://github.com/status-im/nimbus-eth1.git
synced 2025-01-09 11:55:57 +00:00
221e6c9e2f
* Nimbus folder environment update details: * Integrated `CoreDbRef` for the sources in the `nimbus` sub-folder. * The `nimbus` program does not compile yet as it needs the updates in the parallel `stateless` sub-folder. * Stateless environment update details: * Integrated `CoreDbRef` for the sources in the `stateless` sub-folder. * The `nimbus` program compiles now. * Premix environment update details: * Integrated `CoreDbRef` for the sources in the `premix` sub-folder. * Fluffy environment update details: * Integrated `CoreDbRef` for the sources in the `fluffy` sub-folder. * Tools environment update details: * Integrated `CoreDbRef` for the sources in the `tools` sub-folder. * Nodocker environment update details: * Integrated `CoreDbRef` for the sources in the `hive_integration/nodocker` sub-folder. * Tests environment update details: * Integrated `CoreDbRef` for the sources in the `tests` sub-folder. * The unit tests compile and run cleanly now. * Generalise `CoreDbRef` to any `select_backend` supported database why: Generalisation was just missed due to overcoming some compiler oddity which was tied to rocksdb for testing. * Suppress compiler warning for `newChainDB()` why: Warning was added to this function which must be wrapped so that any `CatchableError` is re-raised as `Defect`. * Split off persistent `CoreDbRef` constructor into separate file why: This allows to compile a memory only database version without linking the backend library. * Use memory `CoreDbRef` database by default detail: Persistent DB constructor needs to import `db/core_db/persistent why: Most tests use memory DB anyway. This avoids linking `-lrocksdb` or any other backend by default. * fix `toLegacyBackend()` availability check why: got garbled after memory/persistent split. * Clarify raw access to MPT for snap sync handler why: Logically, `kvt` is not the raw access for the hexary trie (although this holds for the legacy database)
118 lines
3.1 KiB
Nim
118 lines
3.1 KiB
Nim
# Nimbus
|
|
# Copyright (c) 2021 Status Research & Development GmbH
|
|
# Licensed under either of
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
|
# at your option.
|
|
# This file may not be copied, modified, or distributed except according to
|
|
# those terms.
|
|
|
|
import
|
|
std/[os, json, times],
|
|
eth/p2p,
|
|
../../../nimbus/sync/protocol,
|
|
../../../nimbus/config,
|
|
../../../nimbus/graphql/ethapi,
|
|
../../../tests/test_helpers,
|
|
../../../nimbus/core/[tx_pool, block_import],
|
|
../../../nimbus/common,
|
|
graphql, ../sim_utils
|
|
|
|
const
|
|
baseFolder = "hive_integration" / "nodocker" / "graphql"
|
|
blocksFile = baseFolder / "init" / "blocks.rlp"
|
|
genesisFile = baseFolder / "init" / "genesis.json"
|
|
caseFolder = baseFolder / "testcases"
|
|
|
|
template testCond(expr: untyped) =
|
|
if not (expr):
|
|
result = TestStatus.Failed
|
|
|
|
proc processNode(ctx: GraphqlRef, node: JsonNode, fileName: string): TestStatus =
|
|
let request = node["request"]
|
|
let responses = node["responses"]
|
|
let statusCode = node["statusCode"].getInt()
|
|
|
|
let savePoint = ctx.getNameCounter()
|
|
let res = ctx.parseQuery(request.getStr())
|
|
|
|
result = TestStatus.OK
|
|
block:
|
|
if res.isErr:
|
|
if statusCode == 200:
|
|
debugEcho res.error
|
|
testCond statusCode != 200
|
|
break
|
|
|
|
let resp = JsonRespStream.new()
|
|
let r = ctx.executeRequest(respStream(resp))
|
|
if r.isErr:
|
|
if statusCode == 200:
|
|
debugEcho r.error
|
|
testCond statusCode != 200
|
|
break
|
|
|
|
testCond statusCode == 200
|
|
testCond r.isOk
|
|
|
|
let nimbus = resp.getString()
|
|
var resultOK = false
|
|
for x in responses:
|
|
let hive = $(x["data"])
|
|
if nimbus == hive:
|
|
resultOK = true
|
|
break
|
|
|
|
testCond resultOK
|
|
if not resultOK:
|
|
debugEcho "NIMBUS RESULT: ", nimbus
|
|
for x in responses:
|
|
let hive = $(x["data"])
|
|
debugEcho "HIVE RESULT: ", hive
|
|
|
|
ctx.purgeQueries()
|
|
ctx.purgeNames(savePoint)
|
|
|
|
proc main() =
|
|
let
|
|
conf = makeConfig(@["--custom-network:" & genesisFile])
|
|
ethCtx = newEthContext()
|
|
ethNode = setupEthNode(conf, ethCtx, eth)
|
|
com = CommonRef.new(newCoreDbRef LegacyDbMemory,
|
|
pruneTrie = false,
|
|
conf.networkId,
|
|
conf.networkParams
|
|
)
|
|
|
|
com.initializeEmptyDb()
|
|
let txPool = TxPoolRef.new(com, conf.engineSigner)
|
|
discard importRlpBlock(blocksFile, com)
|
|
let ctx = setupGraphqlContext(com, ethNode, txPool)
|
|
|
|
var stat: SimStat
|
|
let start = getTime()
|
|
|
|
# txPool must be informed of active head
|
|
# so it can know the latest account state
|
|
# e.g. "sendRawTransaction Nonce too low" case
|
|
let head = com.db.getCanonicalHead()
|
|
doAssert txPool.smartHead(head)
|
|
|
|
for fileName in walkDirRec(
|
|
caseFolder, yieldFilter = {pcFile,pcLinkToFile}):
|
|
if not fileName.endsWith(".json"):
|
|
continue
|
|
|
|
let (folder, name) = fileName.splitPath()
|
|
let node = parseFile(fileName)
|
|
let status = ctx.processNode(node, fileName)
|
|
stat.inc(name, status)
|
|
|
|
# simulate the real simulator
|
|
txPool.disposeAll()
|
|
|
|
let elpd = getTime() - start
|
|
print(stat, elpd, "graphql")
|
|
|
|
main()
|