Wiring ForkedChainRef to other components
- Disable majority of hive simulators - Only enable pyspec_sim for the moment - The pyspec_sim is using a smaller RPC service wired to ForkedChainRef - The RPC service will gradually grow
This commit is contained in:
parent
3d3831dde8
commit
ef8a065fb8
|
@ -19,17 +19,18 @@ ENV_SCRIPT="vendor/nimbus-build-system/scripts/env.sh"
|
|||
# nimbus_db_backend:none -> we only use memory db in simulators
|
||||
NIM_FLAGS="c -d:release"
|
||||
|
||||
${ENV_SCRIPT} nim ${NIM_FLAGS} ${SIM_DIR}/engine/engine_sim
|
||||
${ENV_SCRIPT} nim ${NIM_FLAGS} ${SIM_DIR}/consensus/consensus_sim
|
||||
${ENV_SCRIPT} nim ${NIM_FLAGS} ${SIM_DIR}/graphql/graphql_sim
|
||||
${ENV_SCRIPT} nim ${NIM_FLAGS} ${SIM_DIR}/rpc/rpc_sim
|
||||
# ${ENV_SCRIPT} nim ${NIM_FLAGS} ${SIM_DIR}/engine/engine_sim
|
||||
# ${ENV_SCRIPT} nim ${NIM_FLAGS} ${SIM_DIR}/consensus/consensus_sim
|
||||
# ${ENV_SCRIPT} nim ${NIM_FLAGS} ${SIM_DIR}/graphql/graphql_sim
|
||||
# ${ENV_SCRIPT} nim ${NIM_FLAGS} ${SIM_DIR}/rpc/rpc_sim
|
||||
${ENV_SCRIPT} nim ${NIM_FLAGS} ${SIM_DIR}/pyspec/pyspec_sim
|
||||
|
||||
${SIM_DIR}/engine/engine_sim
|
||||
${SIM_DIR}/consensus/consensus_sim
|
||||
${SIM_DIR}/graphql/graphql_sim
|
||||
${SIM_DIR}/rpc/rpc_sim
|
||||
# ${SIM_DIR}/engine/engine_sim
|
||||
# ${SIM_DIR}/consensus/consensus_sim
|
||||
# ${SIM_DIR}/graphql/graphql_sim
|
||||
# ${SIM_DIR}/rpc/rpc_sim
|
||||
${SIM_DIR}/pyspec/pyspec_sim
|
||||
|
||||
echo "## ${1}" > simulators.md
|
||||
cat engine.md consensus.md graphql.md rpc.md pyspec.md >> simulators.md
|
||||
# cat engine.md consensus.md graphql.md rpc.md pyspec.md >> simulators.md
|
||||
cat pyspec.md >> simulators.md
|
|
@ -116,8 +116,7 @@ proc validatePostState(node: JsonNode, t: TestEnv): bool =
|
|||
|
||||
proc runTest(node: JsonNode, network: string): TestStatus =
|
||||
let conf = getChainConfig(network)
|
||||
var t = TestEnv(conf: makeTestConfig())
|
||||
t.setupELClient(conf, node)
|
||||
var env = setupELClient(conf, node)
|
||||
|
||||
let blks = node["blocks"]
|
||||
var
|
||||
|
@ -143,7 +142,7 @@ proc runTest(node: JsonNode, network: string): TestStatus =
|
|||
|
||||
latestVersion = payload.payload.version
|
||||
|
||||
let res = t.rpcClient.newPayload(payload.payload, payload.beaconRoot)
|
||||
let res = env.rpcClient.newPayload(payload.payload, payload.beaconRoot)
|
||||
if res.isErr:
|
||||
result = TestStatus.Failed
|
||||
echo "unable to send block ",
|
||||
|
@ -164,22 +163,22 @@ proc runTest(node: JsonNode, network: string): TestStatus =
|
|||
echo pStatus.validationError.get
|
||||
break
|
||||
|
||||
block:
|
||||
block blockOne:
|
||||
# only update head of beacon chain if valid response occurred
|
||||
if latestValidHash != common.Hash256():
|
||||
# update with latest valid response
|
||||
let fcState = ForkchoiceStateV1(headBlockHash: BlockHash latestValidHash.data)
|
||||
let res = t.rpcClient.forkchoiceUpdated(latestVersion, fcState)
|
||||
let res = env.rpcClient.forkchoiceUpdated(latestVersion, fcState)
|
||||
if res.isErr:
|
||||
result = TestStatus.Failed
|
||||
echo "unable to update head of beacon chain: ", res.error
|
||||
break
|
||||
break blockOne
|
||||
|
||||
if not validatePostState(node, t):
|
||||
if not validatePostState(node, env):
|
||||
result = TestStatus.Failed
|
||||
break
|
||||
break blockOne
|
||||
|
||||
t.stopELClient()
|
||||
env.stopELClient()
|
||||
|
||||
const
|
||||
skipName = [
|
||||
|
|
|
@ -10,8 +10,6 @@
|
|||
|
||||
import
|
||||
std/[json],
|
||||
eth/p2p as eth_p2p,
|
||||
eth/trie/trie_defs,
|
||||
stew/[byteutils],
|
||||
json_rpc/[rpcserver, rpcclient],
|
||||
../../../nimbus/[
|
||||
|
@ -22,62 +20,57 @@ import
|
|||
core/chain,
|
||||
core/tx_pool,
|
||||
rpc,
|
||||
sync/protocol,
|
||||
beacon/beacon_engine,
|
||||
common
|
||||
],
|
||||
../../../tests/test_helpers,
|
||||
../../../tools/evmstate/helpers
|
||||
|
||||
type
|
||||
TestEnv* = ref object
|
||||
conf*: NimbusConf
|
||||
ctx: EthContext
|
||||
ethNode: EthereumNode
|
||||
com: CommonRef
|
||||
chainRef: ChainRef
|
||||
rpcServer: RpcHttpServer
|
||||
chain : ForkedChainRef
|
||||
rpcServer : RpcHttpServer
|
||||
rpcClient*: RpcHttpClient
|
||||
|
||||
proc genesisHeader(node: JsonNode): BlockHeader =
|
||||
let genesisRLP = hexToSeqByte(node["genesisRLP"].getStr)
|
||||
rlp.decode(genesisRLP, EthBlock).header
|
||||
|
||||
proc setupELClient*(t: TestEnv, conf: ChainConfig, node: JsonNode) =
|
||||
let memDB = newCoreDbRef DefaultDbMemory
|
||||
t.ctx = newEthContext()
|
||||
t.ethNode = setupEthNode(t.conf, t.ctx, eth)
|
||||
t.com = CommonRef.new(
|
||||
memDB,
|
||||
conf
|
||||
)
|
||||
t.chainRef = newChain(t.com, extraValidation = true)
|
||||
proc setupELClient*(conf: ChainConfig, node: JsonNode): TestEnv =
|
||||
let
|
||||
stateDB = LedgerRef.init(memDB, emptyRlpHash)
|
||||
memDB = newCoreDbRef DefaultDbMemory
|
||||
genesisHeader = node.genesisHeader
|
||||
com = CommonRef.new(memDB, conf)
|
||||
stateDB = LedgerRef.init(memDB, EMPTY_ROOT_HASH)
|
||||
chain = newForkedChain(com, genesisHeader)
|
||||
|
||||
setupStateDB(node["pre"], stateDB)
|
||||
stateDB.persist()
|
||||
|
||||
doAssert stateDB.rootHash == genesisHeader.stateRoot
|
||||
|
||||
doAssert t.com.db.persistHeader(genesisHeader,
|
||||
t.com.consensus == ConsensusType.POS)
|
||||
doAssert(t.com.db.getCanonicalHead().blockHash == genesisHeader.blockHash)
|
||||
doAssert com.db.persistHeader(genesisHeader,
|
||||
com.consensus == ConsensusType.POS)
|
||||
doAssert(com.db.getCanonicalHead().blockHash ==
|
||||
genesisHeader.blockHash)
|
||||
|
||||
let txPool = TxPoolRef.new(t.com)
|
||||
t.rpcServer = newRpcHttpServer(["127.0.0.1:8545"])
|
||||
let
|
||||
txPool = TxPoolRef.new(com)
|
||||
beaconEngine = BeaconEngineRef.new(txPool, chain)
|
||||
serverApi = newServerAPI(chain)
|
||||
rpcServer = newRpcHttpServer(["127.0.0.1:0"])
|
||||
rpcClient = newRpcHttpClient()
|
||||
|
||||
let beaconEngine = BeaconEngineRef.new(txPool, t.chainRef)
|
||||
let oracle = Oracle.new(t.com)
|
||||
setupEthRpc(t.ethNode, t.ctx, t.com, txPool, oracle, t.rpcServer)
|
||||
setupEngineAPI(beaconEngine, t.rpcServer)
|
||||
setupServerAPI(serverApi, rpcServer)
|
||||
setupEngineAPI(beaconEngine, rpcServer)
|
||||
|
||||
t.rpcServer.start()
|
||||
rpcServer.start()
|
||||
waitFor rpcClient.connect("127.0.0.1", rpcServer.localAddress[0].port, false)
|
||||
|
||||
t.rpcClient = newRpcHttpClient()
|
||||
waitFor t.rpcClient.connect("127.0.0.1", 8545.Port, false)
|
||||
TestEnv(
|
||||
chain: chain,
|
||||
rpcServer: rpcServer,
|
||||
rpcClient: rpcClient,
|
||||
)
|
||||
|
||||
proc stopELClient*(t: TestEnv) =
|
||||
waitFor t.rpcClient.close()
|
||||
waitFor t.rpcServer.closeWait()
|
||||
proc stopELClient*(env: TestEnv) =
|
||||
waitFor env.rpcClient.close()
|
||||
waitFor env.rpcServer.closeWait()
|
||||
|
|
|
@ -161,10 +161,7 @@ proc forkchoiceUpdated*(ben: BeaconEngineRef,
|
|||
blockHash=blockHash.short,
|
||||
blockNumber=header.number
|
||||
return validFCU(Opt.none(PayloadID), blockHash)
|
||||
|
||||
chain.setCanonical(header).isOkOr:
|
||||
return invalidFCU(error, com, header)
|
||||
|
||||
|
||||
# If the beacon client also advertised a finalized block, mark the local
|
||||
# chain final and completely in PoS mode.
|
||||
let finalizedBlockHash = ethHash update.finalizedBlockHash
|
||||
|
@ -212,6 +209,9 @@ proc forkchoiceUpdated*(ben: BeaconEngineRef,
|
|||
raise invalidForkChoiceState("safe head not canonical")
|
||||
db.safeHeaderHash(safeBlockHash)
|
||||
|
||||
chain.forkChoice(finalizedBlockHash, blockHash).isOkOr:
|
||||
return invalidFCU(error, com, header)
|
||||
|
||||
# If payload generation was requested, create a new block to be potentially
|
||||
# sealed by the beacon client. The payload will be requested later, and we
|
||||
# might replace it arbitrarilly many times in between.
|
||||
|
|
|
@ -177,7 +177,7 @@ proc newPayload*(ben: BeaconEngineRef,
|
|||
if api.eth.SyncMode() != downloader.FullSync:
|
||||
return api.delayPayloadImport(header)
|
||||
|
||||
if not db.haveBlockAndState(header.parentHash):
|
||||
if not ben.chain.haveBlockAndState(header.parentHash):
|
||||
ben.put(blockHash, header)
|
||||
warn "State not available, ignoring new payload",
|
||||
hash = blockHash,
|
||||
|
@ -187,7 +187,7 @@ proc newPayload*(ben: BeaconEngineRef,
|
|||
|
||||
trace "Inserting block without sethead",
|
||||
hash = blockHash, number = header.number
|
||||
let vres = ben.chain.insertBlockWithoutSetHead(blk)
|
||||
let vres = ben.chain.importBlock(blk)
|
||||
if vres.isErr:
|
||||
ben.setInvalidAncestor(header, blockHash)
|
||||
let blockHash = latestValidHash(db, parent, ttd)
|
||||
|
|
|
@ -29,7 +29,7 @@ type
|
|||
txPool: TxPoolRef
|
||||
merge : MergeTracker
|
||||
queue : PayloadQueue
|
||||
chain : ChainRef
|
||||
chain : ForkedChainRef
|
||||
|
||||
# The forkchoice update and new payload method require us to return the
|
||||
# latest valid hash in an invalid chain. To support that return, we need
|
||||
|
@ -98,7 +98,7 @@ proc setInvalidAncestor(ben: BeaconEngineRef,
|
|||
|
||||
proc new*(_: type BeaconEngineRef,
|
||||
txPool: TxPoolRef,
|
||||
chain: ChainRef): BeaconEngineRef =
|
||||
chain: ForkedChainRef): BeaconEngineRef =
|
||||
let ben = BeaconEngineRef(
|
||||
txPool: txPool,
|
||||
merge : MergeTracker.init(txPool.com.db),
|
||||
|
@ -154,7 +154,7 @@ proc put*(ben: BeaconEngineRef, id: PayloadID,
|
|||
func com*(ben: BeaconEngineRef): CommonRef =
|
||||
ben.txPool.com
|
||||
|
||||
func chain*(ben: BeaconEngineRef): ChainRef =
|
||||
func chain*(ben: BeaconEngineRef): ForkedChainRef =
|
||||
ben.chain
|
||||
|
||||
func ttdReached*(ben: BeaconEngineRef): bool =
|
||||
|
@ -215,9 +215,8 @@ proc generatePayload*(ben: BeaconEngineRef,
|
|||
wrapException:
|
||||
let
|
||||
xp = ben.txPool
|
||||
db = xp.com.db
|
||||
pos = xp.com.pos
|
||||
headBlock = db.getCanonicalHead()
|
||||
headBlock = ben.chain.latestHeader
|
||||
|
||||
pos.prevRandao = ethHash attrs.prevRandao
|
||||
pos.timestamp = ethTime attrs.timestamp
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018 Status Research & Development GmbH
|
||||
# Copyright (c) 2018-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
@ -9,10 +9,11 @@
|
|||
# according to those terms.
|
||||
|
||||
import
|
||||
./chain/[chain_desc, persist_blocks]
|
||||
./chain/[chain_desc, persist_blocks, forked_chain]
|
||||
|
||||
export
|
||||
chain_desc,
|
||||
persist_blocks
|
||||
persist_blocks,
|
||||
forked_chain
|
||||
|
||||
# End
|
||||
|
|
|
@ -299,11 +299,6 @@ proc markCanonicalChain(
|
|||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc exists*(db: CoreDbRef, hash: Hash256): bool =
|
||||
db.newKvt().hasKey(hash.data).valueOr:
|
||||
warn logTxt "exisis()", hash, action="hasKey()", error=($$error)
|
||||
return false
|
||||
|
||||
proc getSavedStateBlockNumber*(
|
||||
db: CoreDbRef;
|
||||
): BlockNumber =
|
||||
|
@ -953,13 +948,6 @@ proc finalizedHeader*(
|
|||
{.gcsafe, raises: [BlockNotFound].} =
|
||||
db.getBlockHeader(db.finalizedHeaderHash)
|
||||
|
||||
proc haveBlockAndState*(db: CoreDbRef, headerHash: Hash256): bool =
|
||||
var header: BlockHeader
|
||||
if not db.getBlockHeader(headerHash, header):
|
||||
return false
|
||||
# see if stateRoot exists
|
||||
db.exists(header.stateRoot)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
||||
|
|
|
@ -48,11 +48,11 @@ proc basicServices(nimbus: NimbusNode,
|
|||
doAssert nimbus.txPool.smartHead(head)
|
||||
|
||||
# chainRef: some name to avoid module-name/filed/function misunderstandings
|
||||
nimbus.chainRef = newChain(com)
|
||||
if conf.verifyFrom.isSome:
|
||||
let verifyFrom = conf.verifyFrom.get()
|
||||
nimbus.chainRef.extraValidation = 0 < verifyFrom
|
||||
nimbus.chainRef.verifyFrom = verifyFrom
|
||||
nimbus.chainRef = newForkedChain(com, head)
|
||||
#if conf.verifyFrom.isSome:
|
||||
# let verifyFrom = conf.verifyFrom.get()
|
||||
# nimbus.chainRef.extraValidation = 0 < verifyFrom
|
||||
# nimbus.chainRef.verifyFrom = verifyFrom
|
||||
|
||||
nimbus.beaconEngine = BeaconEngineRef.new(nimbus.txPool, nimbus.chainRef)
|
||||
|
||||
|
|
|
@ -46,7 +46,7 @@ type
|
|||
ethNode*: EthereumNode
|
||||
state*: NimbusState
|
||||
ctx*: EthContext
|
||||
chainRef*: ChainRef
|
||||
chainRef*: ForkedChainRef
|
||||
txPool*: TxPoolRef
|
||||
networkLoop*: Future[void]
|
||||
peerManager*: PeerManagerRef
|
||||
|
|
|
@ -21,6 +21,7 @@ import
|
|||
./rpc/rpc_server,
|
||||
./rpc/experimental,
|
||||
./rpc/oracle,
|
||||
./rpc/server_api,
|
||||
./nimbus_desc,
|
||||
./graphql/ethapi
|
||||
|
||||
|
@ -33,7 +34,8 @@ export
|
|||
cors,
|
||||
rpc_server,
|
||||
experimental,
|
||||
oracle
|
||||
oracle,
|
||||
server_api
|
||||
|
||||
{.push gcsafe, raises: [].}
|
||||
|
||||
|
|
|
@ -0,0 +1,82 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
stint,
|
||||
web3/conversions,
|
||||
json_rpc/rpcserver,
|
||||
../common,
|
||||
../db/ledger,
|
||||
../core/chain/forked_chain,
|
||||
../beacon/web3_eth_conv,
|
||||
./rpc_types
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
type
|
||||
ServerAPIRef = ref object
|
||||
com: CommonRef
|
||||
chain: ForkedChainRef
|
||||
|
||||
const
|
||||
defaultTag = blockId("latest")
|
||||
|
||||
func newServerAPI*(c: ForkedChainRef): ServerAPIRef =
|
||||
new(result)
|
||||
result.com = c.com
|
||||
result.chain = c
|
||||
|
||||
proc headerFromTag(api: ServerAPIRef, blockTag: BlockTag): Result[common.BlockHeader, string] =
|
||||
if blockTag.kind == bidAlias:
|
||||
let tag = blockTag.alias.toLowerAscii
|
||||
case tag
|
||||
of "latest": return ok(api.chain.latestHeader)
|
||||
else:
|
||||
return err("Unsupported block tag " & tag)
|
||||
else:
|
||||
let blockNum = common.BlockNumber blockTag.number
|
||||
return api.chain.headerByNumber(blockNum)
|
||||
|
||||
proc ledgerFromTag(api: ServerAPIRef, blockTag: BlockTag): Result[LedgerRef, string] =
|
||||
let header = ?api.headerFromTag(blockTag)
|
||||
if api.chain.stateReady(header):
|
||||
ok(LedgerRef.init(api.com.db, header.stateRoot))
|
||||
else:
|
||||
# TODO: Replay state?
|
||||
err("Block state not ready")
|
||||
|
||||
proc setupServerAPI*(api: ServerAPIRef, server: RpcServer) =
|
||||
|
||||
server.rpc("eth_getBalance") do(data: Web3Address, blockTag: BlockTag) -> UInt256:
|
||||
## Returns the balance of the account of given address.
|
||||
let
|
||||
ledger = api.ledgerFromTag(blockTag).valueOr:
|
||||
raise newException(ValueError, error)
|
||||
address = ethAddr data
|
||||
result = ledger.getBalance(address)
|
||||
|
||||
server.rpc("eth_getStorageAt") do(data: Web3Address, slot: UInt256, blockTag: BlockTag) -> FixedBytes[32]:
|
||||
## Returns the value from a storage position at a given address.
|
||||
let
|
||||
ledger = api.ledgerFromTag(blockTag).valueOr:
|
||||
raise newException(ValueError, error)
|
||||
address = ethAddr data
|
||||
value = ledger.getStorage(address, slot)
|
||||
result = w3FixedBytes value
|
||||
|
||||
server.rpc("eth_getTransactionCount") do(data: Web3Address, blockTag: BlockTag) -> Web3Quantity:
|
||||
## Returns the number of transactions ak.s. nonce sent from an address.
|
||||
let
|
||||
ledger = api.ledgerFromTag(blockTag).valueOr:
|
||||
raise newException(ValueError, error)
|
||||
address = ethAddr data
|
||||
nonce = ledger.getNonce(address)
|
||||
result = w3Qty nonce
|
|
@ -127,7 +127,7 @@ proc enableRpcMagic(ctx: BeaconSyncRef) =
|
|||
proc init*(
|
||||
T: type BeaconSyncRef;
|
||||
ethNode: EthereumNode;
|
||||
chain: ChainRef;
|
||||
chain: ForkedChainRef;
|
||||
rng: ref HmacDrbgContext;
|
||||
maxPeers: int;
|
||||
id: int = 0): T =
|
||||
|
|
|
@ -280,8 +280,7 @@ proc fillCanonicalChain*(sk: SkeletonRef): Result[void, string] =
|
|||
let subchain = sk.last
|
||||
if sk.progress.canonicalHeadReset:
|
||||
# Grab previous head block in case of resettng canonical head
|
||||
let oldHead = sk.canonicalHead().valueOr:
|
||||
return err(error)
|
||||
let oldHead = sk.canonicalHead()
|
||||
maybeOldHead = Opt.some oldHead
|
||||
|
||||
if subchain.tail > canonicalHead + 1:
|
||||
|
|
|
@ -184,12 +184,8 @@ proc deleteHeaderAndBody*(sk: SkeletonRef, header: BlockHeader) =
|
|||
sk.del(skeletonBlockHashToNumberKey(header.blockHash))
|
||||
sk.del(skeletonBodyKey(header.sumHash))
|
||||
|
||||
proc canonicalHead*(sk: SkeletonRef): Result[BlockHeader, string] =
|
||||
## Returns Opt.some or error, never returns Opt.none
|
||||
try:
|
||||
ok(sk.db.getCanonicalHead())
|
||||
except CatchableError as ex:
|
||||
err(ex.msg)
|
||||
proc canonicalHead*(sk: SkeletonRef): BlockHeader =
|
||||
sk.chain.latestHeader
|
||||
|
||||
proc resetCanonicalHead*(sk: SkeletonRef, newHead, oldHead: uint64) =
|
||||
debug "RESET CANONICAL", newHead, oldHead
|
||||
|
@ -198,7 +194,8 @@ proc resetCanonicalHead*(sk: SkeletonRef, newHead, oldHead: uint64) =
|
|||
proc insertBlocks*(sk: SkeletonRef,
|
||||
blocks: openArray[EthBlock],
|
||||
fromEngine: bool): Result[uint64, string] =
|
||||
discard ? sk.chain.persistBlocks(blocks)
|
||||
for blk in blocks:
|
||||
? sk.chain.importBlock(blk)
|
||||
ok(blocks.len.uint64)
|
||||
|
||||
proc insertBlock*(sk: SkeletonRef,
|
||||
|
|
|
@ -62,7 +62,7 @@ type
|
|||
started* : Time # Timestamp when the skeleton syncer was created
|
||||
logged* : Time # Timestamp when progress was last logged to user
|
||||
db* : CoreDbRef
|
||||
chain* : ChainRef
|
||||
chain* : ForkedChainRef
|
||||
conf* : SkeletonConfig
|
||||
fillLogIndex*: uint64
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at
|
||||
# https://opensource.org/licenses/MIT).
|
||||
|
@ -29,7 +29,7 @@ logScope:
|
|||
# Constructors
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc new*(_: type SkeletonRef, chain: ChainRef): SkeletonRef =
|
||||
proc new*(_: type SkeletonRef, chain: ForkedChainRef): SkeletonRef =
|
||||
SkeletonRef(
|
||||
progress: Progress(),
|
||||
pulled : 0,
|
||||
|
|
|
@ -55,7 +55,7 @@ type
|
|||
|
||||
EthWireRef* = ref object of EthWireBase
|
||||
db: CoreDbRef
|
||||
chain: ChainRef
|
||||
chain: ForkedChainRef
|
||||
txPool: TxPoolRef
|
||||
peerPool: PeerPool
|
||||
enableTxPool: EthWireRunState
|
||||
|
@ -342,7 +342,7 @@ proc setupPeerObserver(ctx: EthWireRef) =
|
|||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc new*(_: type EthWireRef,
|
||||
chain: ChainRef,
|
||||
chain: ForkedChainRef,
|
||||
txPool: TxPoolRef,
|
||||
peerPool: PeerPool): EthWireRef =
|
||||
let ctx = EthWireRef(
|
||||
|
|
|
@ -33,7 +33,7 @@ proc setEthHandlerNewBlocksAndHashes*(
|
|||
proc addEthHandlerCapability*(
|
||||
node: EthereumNode;
|
||||
peerPool: PeerPool;
|
||||
chain: ChainRef;
|
||||
chain: ForkedChainRef;
|
||||
txPool = TxPoolRef(nil);
|
||||
) =
|
||||
## Install `eth` handlers. Passing `txPool` as `nil` installs the handler
|
||||
|
|
|
@ -46,7 +46,7 @@ type
|
|||
## Shared state among all syncing peer workers (aka buddies.)
|
||||
buddiesMax*: int ## Max number of buddies
|
||||
ethWireCtx*: EthWireRef ## Eth protocol wire context (if available)
|
||||
chain*: ChainRef ## Block chain database (no need for `Peer`)
|
||||
chain*: ForkedChainRef ## Block chain database (no need for `Peer`)
|
||||
poolMode*: bool ## Activate `runPool()` workers if set `true`
|
||||
daemon*: bool ## Enable global background job
|
||||
exCtrlFile*: Opt[string] ## Extra instructions file (if any)
|
||||
|
|
|
@ -383,7 +383,7 @@ proc onPeerDisconnected[S,W](dsc: RunnerSyncRef[S,W], peer: Peer) =
|
|||
proc initSync*[S,W](
|
||||
dsc: RunnerSyncRef[S,W];
|
||||
node: EthereumNode;
|
||||
chain: ChainRef,
|
||||
chain: ForkedChainRef,
|
||||
slots: int;
|
||||
exCtrlFile = Opt.none(string);
|
||||
) =
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
|
||||
import
|
||||
stew/byteutils,
|
||||
../../nimbus/core/chain,
|
||||
../../nimbus/core/chain/forked_chain,
|
||||
../../nimbus/core/pow/difficulty,
|
||||
../../nimbus/config,
|
||||
../../nimbus/common,
|
||||
|
@ -26,7 +26,7 @@ type
|
|||
|
||||
TestEnv* = object
|
||||
conf* : NimbusConf
|
||||
chain*: ChainRef
|
||||
chain*: ForkedChainRef
|
||||
|
||||
CCModify = proc(cc: NetworkParams)
|
||||
|
||||
|
@ -67,7 +67,7 @@ proc setupEnv*(extraValidation: bool = false, ccm: CCModify = nil): TestEnv =
|
|||
conf.networkId,
|
||||
conf.networkParams
|
||||
)
|
||||
chain = newChain(com, extraValidation)
|
||||
chain = newForkedChain(com, com.genesisHeader, extraValidation)
|
||||
|
||||
com.initializeEmptyDb()
|
||||
TestEnv(
|
||||
|
|
|
@ -75,8 +75,7 @@ proc test6*() =
|
|||
check skel.blockHeight == 4
|
||||
|
||||
test "canonical height should now be at head with correct chain":
|
||||
let latestHash = env.chain.currentBlock().blockHash
|
||||
check latestHash == block4PoS.blockHash
|
||||
check env.chain.latestHash == block4PoS.blockHash
|
||||
|
||||
test "should update to new height":
|
||||
skel.setHeadT(block5, true, false)
|
||||
|
|
|
@ -61,5 +61,4 @@ proc test8*() =
|
|||
skel.initSyncT(block3, true)
|
||||
skel.putBlocksT([block2], 1, {FillCanonical})
|
||||
check skel.blockHeight == 3
|
||||
let latestHash = env.chain.currentBlock().blockHash
|
||||
check latestHash == block3.blockHash
|
||||
check env.chain.latestHash == block3.blockHash
|
||||
|
|
|
@ -148,6 +148,6 @@ when isMainModule:
|
|||
let node = json.parseFile(name)
|
||||
executeFile(node, testStatusIMPL)
|
||||
|
||||
executeFile("tests/fixtures/eth_tests/BlockchainTests/ValidBlocks/bcTotalDifficultyTest/sideChainWithMoreTransactions.json")
|
||||
executeFile("tests/fixtures/eth_tests/BlockchainTests/ValidBlocks/bcWalletTest/walletReorganizeOwners.json")
|
||||
else:
|
||||
blockchainJsonMain()
|
||||
|
|
Loading…
Reference in New Issue