Wiring ForkedChainRef to other components (#2423)
* Wiring ForkedChainRef to other components - Disable majority of hive simulators - Only enable pyspec_sim for the moment - The pyspec_sim is using a smaller RPC service wired to ForkedChainRef - The RPC service will gradually grow * Addressing PR review * Fix test_beacon/setup_env * Enable consensus_sim (#2441) * Enable consensus_sim * Remove isFile check * Enable Engine API jwt auth tests and exchange cap tests * Enable engine api in build_sim.sh * Wire ForkedChainRef to Engine API newPayload * Wire Engine API getBodies to ForkedChainRef * Wire Engine API api_forkchoice to ForkedChainRef * Wire more RPC methods to ForkedChainRef * Implement eth_syncing * Implement eth_call and eth_getlogs * TxPool: simplify smartHead * Fix smartHead usage * Fix txpool headDiff * Remove hasBlockHeader and use headerExists * Addressing review
This commit is contained in:
parent
486c492bd4
commit
4d9e288340
|
@ -16,20 +16,16 @@ USE_SYSTEM_NIM=1
|
|||
|
||||
ENV_SCRIPT="vendor/nimbus-build-system/scripts/env.sh"
|
||||
|
||||
# nimbus_db_backend:none -> we only use memory db in simulators
|
||||
# we only use memory db in simulators
|
||||
NIM_FLAGS="c -d:release"
|
||||
|
||||
${ENV_SCRIPT} nim ${NIM_FLAGS} ${SIM_DIR}/engine/engine_sim
|
||||
${ENV_SCRIPT} nim ${NIM_FLAGS} ${SIM_DIR}/consensus/consensus_sim
|
||||
${ENV_SCRIPT} nim ${NIM_FLAGS} ${SIM_DIR}/graphql/graphql_sim
|
||||
${ENV_SCRIPT} nim ${NIM_FLAGS} ${SIM_DIR}/rpc/rpc_sim
|
||||
${ENV_SCRIPT} nim ${NIM_FLAGS} ${SIM_DIR}/pyspec/pyspec_sim
|
||||
|
||||
${SIM_DIR}/engine/engine_sim
|
||||
${SIM_DIR}/consensus/consensus_sim
|
||||
${SIM_DIR}/graphql/graphql_sim
|
||||
${SIM_DIR}/rpc/rpc_sim
|
||||
${SIM_DIR}/pyspec/pyspec_sim
|
||||
|
||||
echo "## ${1}" > simulators.md
|
||||
cat engine.md consensus.md graphql.md rpc.md pyspec.md >> simulators.md
|
||||
|
||||
# more suites: engine, graphql, rpc
|
||||
suites=(consensus pyspec engine)
|
||||
for suite in "${suites[@]}"
|
||||
do
|
||||
${ENV_SCRIPT} nim ${NIM_FLAGS} ${SIM_DIR}/${suite}/${suite}_sim
|
||||
${SIM_DIR}/${suite}/${suite}_sim
|
||||
cat ${suite}.md >> simulators.md
|
||||
done
|
||||
|
|
|
@ -12,6 +12,7 @@ import
|
|||
stew/byteutils,
|
||||
results,
|
||||
chronicles,
|
||||
../../../nimbus/core/chain,
|
||||
../../../nimbus/core/block_import,
|
||||
../../../nimbus/common,
|
||||
../../../nimbus/core/eip4844,
|
||||
|
@ -26,11 +27,13 @@ proc processChainData(cd: ChainData): TestStatus =
|
|||
cd.params
|
||||
)
|
||||
|
||||
let c = newForkedChain(com, com.genesisHeader)
|
||||
|
||||
for bytes in cd.blocksRlp:
|
||||
# ignore return value here
|
||||
# because good blocks maybe interleaved with
|
||||
# bad blocks
|
||||
discard importRlpBlock(bytes, com, "consensus_sim")
|
||||
discard importRlpBlocks(bytes, c, finalize = true)
|
||||
|
||||
let head = com.db.getCanonicalHead()
|
||||
let blockHash = "0x" & head.blockHash.data.toHex
|
||||
|
@ -43,8 +46,23 @@ proc processChainData(cd: ChainData): TestStatus =
|
|||
expected=cd.lastBlockHash
|
||||
TestStatus.Failed
|
||||
|
||||
# except loopMul, all other tests are related to total difficulty
|
||||
# which is not supported in ForkedChain
|
||||
const unsupportedTests = [
|
||||
"lotsOfBranchesOverrideAtTheMiddle.json",
|
||||
"sideChainWithMoreTransactions.json",
|
||||
"uncleBlockAtBlock3afterBlock4.json",
|
||||
"CallContractFromNotBestBlock.json",
|
||||
"ChainAtoChainB_difficultyB.json",
|
||||
"ForkStressTest.json",
|
||||
"blockChainFrontierWithLargerTDvsHomesteadBlockchain.json",
|
||||
"blockChainFrontierWithLargerTDvsHomesteadBlockchain2.json",
|
||||
"lotsOfLeafs.json",
|
||||
"loopMul.json"
|
||||
]
|
||||
|
||||
proc main() =
|
||||
const basePath = "tests" / "fixtures" / "eth_tests" / "BlockchainTests"
|
||||
const basePath = "tests/fixtures/eth_tests/BlockchainTests"
|
||||
var stat: SimStat
|
||||
let start = getTime()
|
||||
|
||||
|
@ -57,15 +75,17 @@ proc main() =
|
|||
if not fileName.endsWith(".json"):
|
||||
continue
|
||||
|
||||
let n = json.parseFile(fileName)
|
||||
for name, unit in n:
|
||||
if "loopMul" in name:
|
||||
inc stat.skipped
|
||||
continue
|
||||
let (_, name) = fileName.splitPath()
|
||||
if name in unsupportedTests:
|
||||
let n = json.parseFile(fileName)
|
||||
stat.skipped += n.len
|
||||
continue
|
||||
|
||||
let n = json.parseFile(fileName)
|
||||
for caseName, unit in n:
|
||||
let cd = extractChainData(unit)
|
||||
let status = processChainData(cd)
|
||||
stat.inc(name, status)
|
||||
stat.inc(caseName, status)
|
||||
|
||||
let elpd = getTime() - start
|
||||
print(stat, elpd, "consensus")
|
||||
|
|
|
@ -48,7 +48,7 @@ type
|
|||
client : RpcHttpClient
|
||||
sync : BeaconSyncRef
|
||||
txPool : TxPoolRef
|
||||
chain : ChainRef
|
||||
chain : ForkedChainRef
|
||||
|
||||
const
|
||||
baseFolder = "hive_integration/nodocker/engine"
|
||||
|
@ -66,7 +66,6 @@ proc makeCom*(conf: NimbusConf): CommonRef =
|
|||
|
||||
proc envConfig*(): NimbusConf =
|
||||
makeConfig(@[
|
||||
"--engine-signer:658bdf435d810c91414ec09147daa6db62406379",
|
||||
"--custom-network:" & genesisFile,
|
||||
"--listen-address: 127.0.0.1",
|
||||
])
|
||||
|
@ -88,7 +87,8 @@ proc newEngineEnv*(conf: var NimbusConf, chainFile: string, enableAuth: bool): E
|
|||
let
|
||||
node = setupEthNode(conf, ctx)
|
||||
com = makeCom(conf)
|
||||
chain = newChain(com)
|
||||
head = com.db.getCanonicalHead()
|
||||
chain = newForkedChain(com, head)
|
||||
|
||||
let txPool = TxPoolRef.new(com)
|
||||
|
||||
|
@ -99,8 +99,7 @@ proc newEngineEnv*(conf: var NimbusConf, chainFile: string, enableAuth: bool): E
|
|||
|
||||
# txPool must be informed of active head
|
||||
# so it can know the latest account state
|
||||
let head = com.db.getCanonicalHead()
|
||||
doAssert txPool.smartHead(head)
|
||||
doAssert txPool.smartHead(head, chain)
|
||||
|
||||
var key: JwtSharedKey
|
||||
key.fromHex(jwtSecret).isOkOr:
|
||||
|
@ -120,14 +119,16 @@ proc newEngineEnv*(conf: var NimbusConf, chainFile: string, enableAuth: bool): E
|
|||
BeaconSyncRef(nil)
|
||||
beaconEngine = BeaconEngineRef.new(txPool, chain)
|
||||
oracle = Oracle.new(com)
|
||||
serverApi = newServerAPI(chain)
|
||||
|
||||
setupEthRpc(node, ctx, com, txPool, oracle, server)
|
||||
setupServerAPI(serverApi, server)
|
||||
setupEngineAPI(beaconEngine, server)
|
||||
setupDebugRpc(com, txPool, server)
|
||||
# temporary disabled
|
||||
#setupDebugRpc(com, txPool, server)
|
||||
|
||||
# Do not start clique sealing engine if we are using a Proof of Work chain file
|
||||
if chainFile.len > 0:
|
||||
if not importRlpBlock(chainFolder / chainFile, com):
|
||||
importRlpBlocks(chainFolder / chainFile, chain, true).isOkOr:
|
||||
echo "Failed to import RLP blocks: ", error
|
||||
quit(QuitFailure)
|
||||
|
||||
server.start()
|
||||
|
@ -183,7 +184,6 @@ proc connect*(env: EngineEnv, node: ENode) =
|
|||
waitFor env.node.connectToNode(node)
|
||||
|
||||
func ID*(env: EngineEnv): string =
|
||||
# $env.node.listeningAddress
|
||||
$env.conf.httpPort
|
||||
|
||||
proc peer*(env: EngineEnv): Peer =
|
||||
|
@ -218,4 +218,6 @@ func version*(env: EngineEnv, time: uint64): Version =
|
|||
env.version(time.EthTime)
|
||||
|
||||
proc setBlock*(env: EngineEnv, blk: common.EthBlock): bool =
|
||||
env.chain.setBlock(blk).isOk()
|
||||
# env.chain.setBlock(blk).isOk()
|
||||
debugEcho "TODO: fix setBlock"
|
||||
false
|
||||
|
|
|
@ -17,18 +17,18 @@ import
|
|||
../../../nimbus/core/eip4844
|
||||
|
||||
import
|
||||
./engine_tests,
|
||||
# ./engine_tests,
|
||||
./auths_tests,
|
||||
./exchange_cap_tests,
|
||||
./withdrawal_tests,
|
||||
./cancun_tests
|
||||
./exchange_cap_tests#,
|
||||
#./withdrawal_tests,
|
||||
#./cancun_tests
|
||||
|
||||
proc combineTests(): seq[TestDesc] =
|
||||
result.add wdTestList
|
||||
#result.add wdTestList
|
||||
result.add ecTestList
|
||||
result.add authTestList
|
||||
result.add engineTestList
|
||||
result.add cancunTestList
|
||||
#result.add engineTestList
|
||||
#result.add cancunTestList
|
||||
|
||||
let
|
||||
testList = combineTests()
|
||||
|
|
|
@ -113,7 +113,7 @@ proc setBlock*(c: ChainRef; blk: EthBlock): Result[void, string] =
|
|||
c.db.persistReceipts(header.receiptsRoot, vmState.receipts)
|
||||
|
||||
if blk.withdrawals.isSome:
|
||||
c.db.persistWithdrawals(header.withdrawalsRoot, blk.withdrawals.get)
|
||||
c.db.persistWithdrawals(header.withdrawalsRoot.get, blk.withdrawals.get)
|
||||
except CatchableError as exc:
|
||||
return err(exc.msg)
|
||||
|
||||
|
|
|
@ -94,7 +94,8 @@ proc main() =
|
|||
# so it can know the latest account state
|
||||
# e.g. "sendRawTransaction Nonce too low" case
|
||||
let head = com.db.getCanonicalHead()
|
||||
doAssert txPool.smartHead(head)
|
||||
let chainRef = newForkedChain(com, head)
|
||||
doAssert txPool.smartHead(head, chainRef)
|
||||
|
||||
for fileName in walkDirRec(
|
||||
caseFolder, yieldFilter = {pcFile,pcLinkToFile}):
|
||||
|
|
|
@ -16,7 +16,6 @@ import
|
|||
../sim_utils,
|
||||
../../../tools/common/helpers as chp,
|
||||
../../../tools/evmstate/helpers as ehp,
|
||||
../../../tests/test_helpers,
|
||||
../../../nimbus/beacon/web3_eth_conv,
|
||||
../../../nimbus/beacon/payload_conv,
|
||||
../../../nimbus/core/eip4844,
|
||||
|
@ -116,8 +115,7 @@ proc validatePostState(node: JsonNode, t: TestEnv): bool =
|
|||
|
||||
proc runTest(node: JsonNode, network: string): TestStatus =
|
||||
let conf = getChainConfig(network)
|
||||
var t = TestEnv(conf: makeTestConfig())
|
||||
t.setupELClient(conf, node)
|
||||
var env = setupELClient(conf, node)
|
||||
|
||||
let blks = node["blocks"]
|
||||
var
|
||||
|
@ -143,7 +141,7 @@ proc runTest(node: JsonNode, network: string): TestStatus =
|
|||
|
||||
latestVersion = payload.payload.version
|
||||
|
||||
let res = t.rpcClient.newPayload(payload.payload, payload.beaconRoot)
|
||||
let res = env.rpcClient.newPayload(payload.payload, payload.beaconRoot)
|
||||
if res.isErr:
|
||||
result = TestStatus.Failed
|
||||
echo "unable to send block ",
|
||||
|
@ -164,22 +162,22 @@ proc runTest(node: JsonNode, network: string): TestStatus =
|
|||
echo pStatus.validationError.get
|
||||
break
|
||||
|
||||
block:
|
||||
block blockOne:
|
||||
# only update head of beacon chain if valid response occurred
|
||||
if latestValidHash != common.Hash256():
|
||||
# update with latest valid response
|
||||
let fcState = ForkchoiceStateV1(headBlockHash: BlockHash latestValidHash.data)
|
||||
let res = t.rpcClient.forkchoiceUpdated(latestVersion, fcState)
|
||||
let res = env.rpcClient.forkchoiceUpdated(latestVersion, fcState)
|
||||
if res.isErr:
|
||||
result = TestStatus.Failed
|
||||
echo "unable to update head of beacon chain: ", res.error
|
||||
break
|
||||
break blockOne
|
||||
|
||||
if not validatePostState(node, t):
|
||||
if not validatePostState(node, env):
|
||||
result = TestStatus.Failed
|
||||
break
|
||||
break blockOne
|
||||
|
||||
t.stopELClient()
|
||||
env.stopELClient()
|
||||
|
||||
const
|
||||
skipName = [
|
||||
|
|
|
@ -10,8 +10,6 @@
|
|||
|
||||
import
|
||||
std/[json],
|
||||
eth/p2p as eth_p2p,
|
||||
eth/trie/trie_defs,
|
||||
stew/[byteutils],
|
||||
json_rpc/[rpcserver, rpcclient],
|
||||
../../../nimbus/[
|
||||
|
@ -22,62 +20,57 @@ import
|
|||
core/chain,
|
||||
core/tx_pool,
|
||||
rpc,
|
||||
sync/protocol,
|
||||
beacon/beacon_engine,
|
||||
common
|
||||
],
|
||||
../../../tests/test_helpers,
|
||||
../../../tools/evmstate/helpers
|
||||
|
||||
type
|
||||
TestEnv* = ref object
|
||||
conf*: NimbusConf
|
||||
ctx: EthContext
|
||||
ethNode: EthereumNode
|
||||
com: CommonRef
|
||||
chainRef: ChainRef
|
||||
rpcServer: RpcHttpServer
|
||||
chain : ForkedChainRef
|
||||
rpcServer : RpcHttpServer
|
||||
rpcClient*: RpcHttpClient
|
||||
|
||||
proc genesisHeader(node: JsonNode): BlockHeader =
|
||||
let genesisRLP = hexToSeqByte(node["genesisRLP"].getStr)
|
||||
rlp.decode(genesisRLP, EthBlock).header
|
||||
|
||||
proc setupELClient*(t: TestEnv, conf: ChainConfig, node: JsonNode) =
|
||||
let memDB = newCoreDbRef DefaultDbMemory
|
||||
t.ctx = newEthContext()
|
||||
t.ethNode = setupEthNode(t.conf, t.ctx, eth)
|
||||
t.com = CommonRef.new(
|
||||
memDB,
|
||||
conf
|
||||
)
|
||||
t.chainRef = newChain(t.com, extraValidation = true)
|
||||
proc setupELClient*(conf: ChainConfig, node: JsonNode): TestEnv =
|
||||
let
|
||||
stateDB = LedgerRef.init(memDB, emptyRlpHash)
|
||||
memDB = newCoreDbRef DefaultDbMemory
|
||||
genesisHeader = node.genesisHeader
|
||||
com = CommonRef.new(memDB, conf)
|
||||
stateDB = LedgerRef.init(memDB, EMPTY_ROOT_HASH)
|
||||
chain = newForkedChain(com, genesisHeader)
|
||||
|
||||
setupStateDB(node["pre"], stateDB)
|
||||
stateDB.persist()
|
||||
|
||||
doAssert stateDB.rootHash == genesisHeader.stateRoot
|
||||
|
||||
doAssert t.com.db.persistHeader(genesisHeader,
|
||||
t.com.consensus == ConsensusType.POS)
|
||||
doAssert(t.com.db.getCanonicalHead().blockHash == genesisHeader.blockHash)
|
||||
doAssert com.db.persistHeader(genesisHeader,
|
||||
com.consensus == ConsensusType.POS)
|
||||
doAssert(com.db.getCanonicalHead().blockHash ==
|
||||
genesisHeader.blockHash)
|
||||
|
||||
let txPool = TxPoolRef.new(t.com)
|
||||
t.rpcServer = newRpcHttpServer(["127.0.0.1:8545"])
|
||||
let
|
||||
txPool = TxPoolRef.new(com)
|
||||
beaconEngine = BeaconEngineRef.new(txPool, chain)
|
||||
serverApi = newServerAPI(chain)
|
||||
rpcServer = newRpcHttpServer(["127.0.0.1:0"])
|
||||
rpcClient = newRpcHttpClient()
|
||||
|
||||
let beaconEngine = BeaconEngineRef.new(txPool, t.chainRef)
|
||||
let oracle = Oracle.new(t.com)
|
||||
setupEthRpc(t.ethNode, t.ctx, t.com, txPool, oracle, t.rpcServer)
|
||||
setupEngineAPI(beaconEngine, t.rpcServer)
|
||||
setupServerAPI(serverApi, rpcServer)
|
||||
setupEngineAPI(beaconEngine, rpcServer)
|
||||
|
||||
t.rpcServer.start()
|
||||
rpcServer.start()
|
||||
waitFor rpcClient.connect("127.0.0.1", rpcServer.localAddress[0].port, false)
|
||||
|
||||
t.rpcClient = newRpcHttpClient()
|
||||
waitFor t.rpcClient.connect("127.0.0.1", 8545.Port, false)
|
||||
TestEnv(
|
||||
chain: chain,
|
||||
rpcServer: rpcServer,
|
||||
rpcClient: rpcClient,
|
||||
)
|
||||
|
||||
proc stopELClient*(t: TestEnv) =
|
||||
waitFor t.rpcClient.close()
|
||||
waitFor t.rpcServer.closeWait()
|
||||
proc stopELClient*(env: TestEnv) =
|
||||
waitFor env.rpcClient.close()
|
||||
waitFor env.rpcServer.closeWait()
|
||||
|
|
|
@ -82,13 +82,13 @@ proc setupEnv*(): TestEnv =
|
|||
|
||||
manageAccounts(ethCtx, conf)
|
||||
|
||||
let chainRef = newChain(com)
|
||||
let head = com.db.getCanonicalHead()
|
||||
let chainRef = newForkedChain(com, head)
|
||||
let txPool = TxPoolRef.new(com)
|
||||
|
||||
# txPool must be informed of active head
|
||||
# so it can know the latest account state
|
||||
let head = com.db.getCanonicalHead()
|
||||
doAssert txPool.smartHead(head)
|
||||
doAssert txPool.smartHead(head, chainRef)
|
||||
|
||||
let rpcServer = setupRpcServer(ethCtx, com, ethNode, txPool, conf)
|
||||
let rpcClient = newRpcHttpClient()
|
||||
|
|
|
@ -87,8 +87,7 @@ proc forkchoiceUpdated*(ben: BeaconEngineRef,
|
|||
# Check whether we have the block yet in our database or not. If not, we'll
|
||||
# need to either trigger a sync, or to reject this forkchoice update for a
|
||||
# reason.
|
||||
var header: common.BlockHeader
|
||||
if not db.getBlockHeader(blockHash, header):
|
||||
let header = ben.chain.headerByHash(blockHash).valueOr:
|
||||
# If this block was previously invalidated, keep rejecting it here too
|
||||
let res = ben.checkInvalidAncestor(blockHash, blockHash)
|
||||
if res.isSome:
|
||||
|
@ -98,6 +97,7 @@ proc forkchoiceUpdated*(ben: BeaconEngineRef,
|
|||
# we cannot resolve the header, so not much to do. This could be extended in
|
||||
# the future to resolve from the `eth` network, but it's an unexpected case
|
||||
# that should be fixed, not papered over.
|
||||
var header: common.BlockHeader
|
||||
if not ben.get(blockHash, header):
|
||||
warn "Forkchoice requested unknown head",
|
||||
hash = blockHash.short
|
||||
|
@ -162,9 +162,6 @@ proc forkchoiceUpdated*(ben: BeaconEngineRef,
|
|||
blockNumber=header.number
|
||||
return validFCU(Opt.none(PayloadID), blockHash)
|
||||
|
||||
chain.setCanonical(header).isOkOr:
|
||||
return invalidFCU(error, com, header)
|
||||
|
||||
# If the beacon client also advertised a finalized block, mark the local
|
||||
# chain final and completely in PoS mode.
|
||||
let finalizedBlockHash = ethHash update.finalizedBlockHash
|
||||
|
@ -172,46 +169,23 @@ proc forkchoiceUpdated*(ben: BeaconEngineRef,
|
|||
if not ben.posFinalized:
|
||||
ben.finalizePoS()
|
||||
|
||||
# TODO: If the finalized block is not in our canonical tree, somethings wrong
|
||||
var finalBlock: common.BlockHeader
|
||||
if not db.getBlockHeader(finalizedBlockHash, finalBlock):
|
||||
warn "Final block not available in database",
|
||||
hash=finalizedBlockHash.short
|
||||
raise invalidForkChoiceState("finalized block header not available")
|
||||
var finalHash: common.Hash256
|
||||
if not db.getBlockHash(finalBlock.number, finalHash):
|
||||
if not ben.chain.isCanonical(finalizedBlockHash):
|
||||
warn "Final block not in canonical chain",
|
||||
number=finalBlock.number,
|
||||
hash=finalizedBlockHash.short
|
||||
raise invalidForkChoiceState("finalized block hash not available")
|
||||
if finalHash != finalizedBlockHash:
|
||||
warn "Final block not in canonical chain",
|
||||
number=finalBlock.number,
|
||||
expect=finalizedBlockHash.short,
|
||||
get=finalHash.short
|
||||
raise invalidForkChoiceState("finalized block not canonical")
|
||||
db.finalizedHeaderHash(finalizedBlockHash)
|
||||
|
||||
let safeBlockHash = ethHash update.safeBlockHash
|
||||
if safeBlockHash != common.Hash256():
|
||||
var safeBlock: common.BlockHeader
|
||||
if not db.getBlockHeader(safeBlockHash, safeBlock):
|
||||
warn "Safe block not available in database",
|
||||
hash = safeBlockHash.short
|
||||
raise invalidForkChoiceState("safe head not available")
|
||||
var safeHash: common.Hash256
|
||||
if not db.getBlockHash(safeBlock.number, safeHash):
|
||||
warn "Safe block hash not available in database",
|
||||
hash = safeHash.short
|
||||
raise invalidForkChoiceState("safe block hash not available")
|
||||
if safeHash != safeBlockHash:
|
||||
if not ben.chain.isCanonical(safeBlockHash):
|
||||
warn "Safe block not in canonical chain",
|
||||
blockNumber=safeBlock.number,
|
||||
expect=safeBlockHash.short,
|
||||
get=safeHash.short
|
||||
hash=safeBlockHash.short
|
||||
raise invalidForkChoiceState("safe head not canonical")
|
||||
db.safeHeaderHash(safeBlockHash)
|
||||
|
||||
chain.forkChoice(finalizedBlockHash, blockHash).isOkOr:
|
||||
return invalidFCU(error, com, header)
|
||||
|
||||
# If payload generation was requested, create a new block to be potentially
|
||||
# sealed by the beacon client. The payload will be requested later, and we
|
||||
# might replace it arbitrarilly many times in between.
|
||||
|
|
|
@ -48,19 +48,33 @@ proc getPayloadBodyByHeader(db: CoreDbRef,
|
|||
))
|
||||
)
|
||||
|
||||
func toPayloadBody(blk: EthBlock): ExecutionPayloadBodyV1 =
|
||||
var wds: seq[WithdrawalV1]
|
||||
if blk.withdrawals.isSome:
|
||||
for w in blk.withdrawals.get:
|
||||
wds.add w3Withdrawal(w)
|
||||
|
||||
ExecutionPayloadBodyV1(
|
||||
transactions: w3Txs(blk.transactions),
|
||||
# pre Shanghai block return null withdrawals
|
||||
# post Shanghai block return at least empty slice
|
||||
withdrawals: if blk.withdrawals.isSome:
|
||||
Opt.some(wds)
|
||||
else:
|
||||
Opt.none(seq[WithdrawalV1])
|
||||
)
|
||||
|
||||
proc getPayloadBodiesByHash*(ben: BeaconEngineRef,
|
||||
hashes: seq[Web3Hash]):
|
||||
seq[Opt[ExecutionPayloadBodyV1]] =
|
||||
if hashes.len > maxBodyRequest:
|
||||
raise tooLargeRequest("request exceeds max allowed " & $maxBodyRequest)
|
||||
|
||||
let db = ben.com.db
|
||||
var header: common.BlockHeader
|
||||
for h in hashes:
|
||||
if not db.getBlockHeader(ethHash h, header):
|
||||
let blk = ben.chain.blockByHash(ethHash h).valueOr:
|
||||
result.add Opt.none(ExecutionPayloadBodyV1)
|
||||
continue
|
||||
db.getPayloadBodyByHeader(header, result)
|
||||
result.add Opt.some(toPayloadBody(blk))
|
||||
|
||||
proc getPayloadBodiesByRange*(ben: BeaconEngineRef,
|
||||
start: uint64, count: uint64):
|
||||
|
@ -75,19 +89,27 @@ proc getPayloadBodiesByRange*(ben: BeaconEngineRef,
|
|||
raise tooLargeRequest("request exceeds max allowed " & $maxBodyRequest)
|
||||
|
||||
let
|
||||
com = ben.com
|
||||
db = com.db
|
||||
current = com.syncCurrent
|
||||
|
||||
db = ben.com.db
|
||||
|
||||
var
|
||||
header: common.BlockHeader
|
||||
last = start+count-1
|
||||
header: common.BlockHeader
|
||||
|
||||
if last > current:
|
||||
last = current
|
||||
if start > ben.chain.latestNumber:
|
||||
# requested range beyond the latest known block
|
||||
return
|
||||
|
||||
for bn in start..last:
|
||||
if last > ben.chain.latestNumber:
|
||||
last = ben.chain.latestNumber
|
||||
|
||||
# get bodies from database
|
||||
for bn in start..ben.chain.baseNumber:
|
||||
if not db.getBlockHeader(bn, header):
|
||||
result.add Opt.none(ExecutionPayloadBodyV1)
|
||||
continue
|
||||
db.getPayloadBodyByHeader(header, result)
|
||||
|
||||
if last > ben.chain.baseNumber:
|
||||
let blocks = ben.chain.blockFromBaseTo(last)
|
||||
for i in countdown(blocks.len-1, 0):
|
||||
result.add Opt.some(toPayloadBody(blocks[i]))
|
||||
|
|
|
@ -134,7 +134,7 @@ proc newPayload*(ben: BeaconEngineRef,
|
|||
|
||||
# If we already have the block locally, ignore the entire execution and just
|
||||
# return a fake success.
|
||||
if db.getBlockHeader(blockHash, header):
|
||||
if ben.chain.haveBlockLocally(blockHash):
|
||||
warn "Ignoring already known beacon payload",
|
||||
number = header.number, hash = blockHash.short
|
||||
return validStatus(blockHash)
|
||||
|
@ -150,8 +150,7 @@ proc newPayload*(ben: BeaconEngineRef,
|
|||
# our live chain. As such, payload execution will not permit reorgs and thus
|
||||
# will not trigger a sync cycle. That is fine though, if we get a fork choice
|
||||
# update after legit payload executions.
|
||||
var parent: common.BlockHeader
|
||||
if not db.getBlockHeader(header.parentHash, parent):
|
||||
let parent = ben.chain.headerByHash(header.parentHash).valueOr:
|
||||
return ben.delayPayloadImport(header)
|
||||
|
||||
# We have an existing parent, do some sanity checks to avoid the beacon client
|
||||
|
@ -185,7 +184,7 @@ proc newPayload*(ben: BeaconEngineRef,
|
|||
if api.eth.SyncMode() != downloader.FullSync:
|
||||
return api.delayPayloadImport(header)
|
||||
|
||||
if not db.haveBlockAndState(header.parentHash):
|
||||
if not ben.chain.haveBlockAndState(header.parentHash):
|
||||
ben.put(blockHash, header)
|
||||
warn "State not available, ignoring new payload",
|
||||
hash = blockHash,
|
||||
|
@ -195,7 +194,7 @@ proc newPayload*(ben: BeaconEngineRef,
|
|||
|
||||
trace "Inserting block without sethead",
|
||||
hash = blockHash, number = header.number
|
||||
let vres = ben.chain.insertBlockWithoutSetHead(blk)
|
||||
let vres = ben.chain.importBlock(blk)
|
||||
if vres.isErr:
|
||||
ben.setInvalidAncestor(header, blockHash)
|
||||
let blockHash = latestValidHash(db, parent, ttd)
|
||||
|
|
|
@ -29,7 +29,7 @@ type
|
|||
txPool: TxPoolRef
|
||||
merge : MergeTracker
|
||||
queue : PayloadQueue
|
||||
chain : ChainRef
|
||||
chain : ForkedChainRef
|
||||
|
||||
# The forkchoice update and new payload method require us to return the
|
||||
# latest valid hash in an invalid chain. To support that return, we need
|
||||
|
@ -98,7 +98,7 @@ proc setInvalidAncestor(ben: BeaconEngineRef,
|
|||
|
||||
proc new*(_: type BeaconEngineRef,
|
||||
txPool: TxPoolRef,
|
||||
chain: ChainRef): BeaconEngineRef =
|
||||
chain: ForkedChainRef): BeaconEngineRef =
|
||||
let ben = BeaconEngineRef(
|
||||
txPool: txPool,
|
||||
merge : MergeTracker.init(txPool.com.db),
|
||||
|
@ -154,7 +154,7 @@ proc put*(ben: BeaconEngineRef, id: PayloadID,
|
|||
func com*(ben: BeaconEngineRef): CommonRef =
|
||||
ben.txPool.com
|
||||
|
||||
func chain*(ben: BeaconEngineRef): ChainRef =
|
||||
func chain*(ben: BeaconEngineRef): ForkedChainRef =
|
||||
ben.chain
|
||||
|
||||
func ttdReached*(ben: BeaconEngineRef): bool =
|
||||
|
@ -211,9 +211,8 @@ proc generatePayload*(ben: BeaconEngineRef,
|
|||
wrapException:
|
||||
let
|
||||
xp = ben.txPool
|
||||
db = xp.com.db
|
||||
pos = xp.com.pos
|
||||
headBlock = db.getCanonicalHead()
|
||||
headBlock = ben.chain.latestHeader
|
||||
|
||||
pos.prevRandao = ethHash attrs.prevRandao
|
||||
pos.timestamp = ethTime attrs.timestamp
|
||||
|
@ -226,7 +225,7 @@ proc generatePayload*(ben: BeaconEngineRef,
|
|||
|
||||
if headBlock.blockHash != xp.head.blockHash:
|
||||
# reorg
|
||||
discard xp.smartHead(headBlock)
|
||||
discard xp.smartHead(headBlock, ben.chain)
|
||||
|
||||
if pos.timestamp <= headBlock.timestamp:
|
||||
return err "timestamp must be strictly later than parent"
|
||||
|
|
|
@ -34,6 +34,11 @@ type
|
|||
current: BlockNumber
|
||||
highest: BlockNumber
|
||||
|
||||
SyncState* = enum
|
||||
Waiting
|
||||
Syncing
|
||||
Synced
|
||||
|
||||
SyncReqNewHeadCB* = proc(header: BlockHeader) {.gcsafe, raises: [].}
|
||||
## Update head for syncing
|
||||
|
||||
|
@ -62,6 +67,8 @@ type
|
|||
# synchronizer need this
|
||||
syncProgress: SyncProgress
|
||||
|
||||
syncState: SyncState
|
||||
|
||||
# one of POW/POS, updated after calling `hardForkTransition`
|
||||
consensusType: ConsensusType
|
||||
|
||||
|
@ -178,6 +185,7 @@ proc init(com : CommonRef,
|
|||
com.forkTransitionTable = config.toForkTransitionTable()
|
||||
com.networkId = networkId
|
||||
com.syncProgress= SyncProgress()
|
||||
com.syncState = Waiting
|
||||
com.pruneHistory= pruneHistory
|
||||
com.pos = CasperRef.new
|
||||
|
||||
|
@ -472,6 +480,9 @@ func syncCurrent*(com: CommonRef): BlockNumber =
|
|||
func syncHighest*(com: CommonRef): BlockNumber =
|
||||
com.syncProgress.highest
|
||||
|
||||
func syncState*(com: CommonRef): SyncState =
|
||||
com.syncState
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Setters
|
||||
# ------------------------------------------------------------------------------
|
||||
|
@ -485,6 +496,9 @@ func `syncCurrent=`*(com: CommonRef, number: BlockNumber) =
|
|||
func `syncHighest=`*(com: CommonRef, number: BlockNumber) =
|
||||
com.syncProgress.highest = number
|
||||
|
||||
func `syncState=`*(com: CommonRef, state: SyncState) =
|
||||
com.syncState = state
|
||||
|
||||
func `startOfHistory=`*(com: CommonRef, val: Hash256) =
|
||||
## Setter
|
||||
com.startOfHistory = val
|
||||
|
|
|
@ -14,47 +14,54 @@ import
|
|||
eth/rlp, stew/io2,
|
||||
./chain,
|
||||
../common/common,
|
||||
../utils/utils
|
||||
../utils/utils,
|
||||
../config
|
||||
|
||||
proc importRlpBlock*(blocksRlp: openArray[byte]; com: CommonRef; importFile: string = ""): bool =
|
||||
proc importRlpBlocks*(blocksRlp: openArray[byte],
|
||||
chain: ForkedChainRef,
|
||||
finalize: bool):
|
||||
Result[void, string] =
|
||||
var
|
||||
# the encoded rlp can contains one or more blocks
|
||||
rlp = rlpFromBytes(blocksRlp)
|
||||
chain = newChain(com, extraValidation = true)
|
||||
errorCount = 0
|
||||
blk: array[1, EthBlock]
|
||||
blk: EthBlock
|
||||
|
||||
# even though the new imported blocks have block number
|
||||
# smaller than head, we keep importing it.
|
||||
# it maybe a side chain.
|
||||
# TODO the above is no longer true with a single-state database - to deal with
|
||||
# that scenario the code needs to be rewritten to not persist the blocks
|
||||
# to the state database until all have been processed
|
||||
while rlp.hasData:
|
||||
blk[0] = try:
|
||||
blk = try:
|
||||
rlp.read(EthBlock)
|
||||
except RlpError as e:
|
||||
# terminate if there was a decoding error
|
||||
error "rlp error",
|
||||
fileName = importFile,
|
||||
msg = e.msg,
|
||||
exception = e.name
|
||||
return false
|
||||
return err($e.name & ": " & e.msg)
|
||||
|
||||
chain.persistBlocks(blk).isOkOr():
|
||||
# register one more error and continue
|
||||
error "import error",
|
||||
fileName = importFile,
|
||||
error
|
||||
errorCount.inc
|
||||
? chain.importBlock(blk)
|
||||
|
||||
return errorCount == 0
|
||||
if finalize:
|
||||
? chain.forkChoice(chain.latestHash, chain.latestHash)
|
||||
|
||||
proc importRlpBlock*(importFile: string; com: CommonRef): bool =
|
||||
let res = io2.readAllBytes(importFile)
|
||||
if res.isErr:
|
||||
error "failed to import",
|
||||
fileName = importFile
|
||||
return false
|
||||
ok()
|
||||
|
||||
importRlpBlock(res.get, com, importFile)
|
||||
proc importRlpBlocks*(importFile: string,
|
||||
chain: ForkedChainRef,
|
||||
finalize: bool): Result[void, string] =
|
||||
let bytes = io2.readAllBytes(importFile).valueOr:
|
||||
return err($error)
|
||||
importRlpBlocks(bytes, chain, finalize)
|
||||
|
||||
proc importRlpBlocks*(conf: NimbusConf, com: CommonRef) =
|
||||
var head: BlockHeader
|
||||
if not com.db.getCanonicalHead(head):
|
||||
error "cannot get canonical head from db"
|
||||
quit(QuitFailure)
|
||||
|
||||
let chain = newForkedChain(com, head, baseDistance = 0)
|
||||
|
||||
# success or not, we quit after importing blocks
|
||||
for i, blocksFile in conf.blocksFile:
|
||||
importRlpBlocks(string blocksFile, chain, i == conf.blocksFile.len-1).isOkOr:
|
||||
warn "Error when importing blocks", msg=error
|
||||
quit(QuitFailure)
|
||||
|
||||
quit(QuitSuccess)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018 Status Research & Development GmbH
|
||||
# Copyright (c) 2018-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
@ -9,10 +9,11 @@
|
|||
# according to those terms.
|
||||
|
||||
import
|
||||
./chain/[chain_desc, persist_blocks]
|
||||
./chain/[chain_desc, persist_blocks, forked_chain]
|
||||
|
||||
export
|
||||
chain_desc,
|
||||
persist_blocks
|
||||
persist_blocks,
|
||||
forked_chain
|
||||
|
||||
# End
|
||||
|
|
|
@ -517,13 +517,20 @@ proc forkChoice*(c: ForkedChainRef,
|
|||
|
||||
ok()
|
||||
|
||||
func haveBlockAndState*(c: ForkedChainRef, hash: Hash256): bool =
|
||||
if c.blocks.hasKey(hash):
|
||||
func haveBlockAndState*(c: ForkedChainRef, blockHash: Hash256): bool =
|
||||
if c.blocks.hasKey(blockHash):
|
||||
return true
|
||||
if c.baseHash == hash:
|
||||
if c.baseHash == blockHash:
|
||||
return true
|
||||
false
|
||||
|
||||
proc haveBlockLocally*(c: ForkedChainRef, blockHash: Hash256): bool =
|
||||
if c.blocks.hasKey(blockHash):
|
||||
return true
|
||||
if c.baseHash == blockHash:
|
||||
return true
|
||||
c.db.headerExists(blockHash)
|
||||
|
||||
func stateReady*(c: ForkedChainRef, header: BlockHeader): bool =
|
||||
let blockHash = header.blockHash
|
||||
blockHash == c.cursorHash
|
||||
|
@ -537,9 +544,19 @@ func db*(c: ForkedChainRef): CoreDbRef =
|
|||
func latestHeader*(c: ForkedChainRef): BlockHeader =
|
||||
c.cursorHeader
|
||||
|
||||
func latestNumber*(c: ForkedChainRef): BlockNumber =
|
||||
c.cursorHeader.number
|
||||
|
||||
func latestHash*(c: ForkedChainRef): Hash256 =
|
||||
c.cursorHash
|
||||
|
||||
func baseNumber*(c: ForkedChainRef): BlockNumber =
|
||||
c.baseHeader.number
|
||||
|
||||
func latestBlock*(c: ForkedChainRef): EthBlock =
|
||||
c.blocks.withValue(c.cursorHash, val) do:
|
||||
return val.blk
|
||||
|
||||
proc headerByNumber*(c: ForkedChainRef, number: BlockNumber): Result[BlockHeader, string] =
|
||||
if number > c.cursorHeader.number:
|
||||
return err("Requested block number not exists: " & $number)
|
||||
|
@ -555,7 +572,7 @@ proc headerByNumber*(c: ForkedChainRef, number: BlockNumber): Result[BlockHeader
|
|||
if c.db.getBlockHeader(number, header):
|
||||
return ok(header)
|
||||
else:
|
||||
return err("Failed to get block with number: " & $number)
|
||||
return err("Failed to get header with number: " & $number)
|
||||
|
||||
shouldNotKeyError:
|
||||
var prevHash = c.cursorHeader.parentHash
|
||||
|
@ -566,3 +583,52 @@ proc headerByNumber*(c: ForkedChainRef, number: BlockNumber): Result[BlockHeader
|
|||
prevHash = header.parentHash
|
||||
|
||||
doAssert(false, "headerByNumber: Unreachable code")
|
||||
|
||||
proc headerByHash*(c: ForkedChainRef, blockHash: Hash256): Result[BlockHeader, string] =
|
||||
c.blocks.withValue(blockHash, val) do:
|
||||
return ok(val.blk.header)
|
||||
do:
|
||||
if c.baseHash == blockHash:
|
||||
return ok(c.baseHeader)
|
||||
var header: BlockHeader
|
||||
if c.db.getBlockHeader(blockHash, header):
|
||||
return ok(header)
|
||||
return err("Failed to get header with hash: " & $blockHash)
|
||||
|
||||
proc blockByHash*(c: ForkedChainRef, blockHash: Hash256): Opt[EthBlock] =
|
||||
# used by getPayloadBodiesByHash
|
||||
# https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.4/src/engine/shanghai.md#specification-3
|
||||
# 4. Client software MAY NOT respond to requests for finalized blocks by hash.
|
||||
c.blocks.withValue(blockHash, val) do:
|
||||
return Opt.some(val.blk)
|
||||
do:
|
||||
return Opt.none(EthBlock)
|
||||
|
||||
func blockByNumber*(c: ForkedChainRef, number: BlockNumber): Result[EthBlock, string] =
|
||||
shouldNotKeyError:
|
||||
var prevHash = c.cursorHash
|
||||
while prevHash != c.baseHash:
|
||||
c.blocks.withValue(prevHash, item):
|
||||
if item.blk.header.number == number:
|
||||
return ok(item.blk)
|
||||
prevHash = item.blk.header.parentHash
|
||||
return err("Block not found, number = " & $number)
|
||||
|
||||
func blockFromBaseTo*(c: ForkedChainRef, number: BlockNumber): seq[EthBlock] =
|
||||
# return block in reverse order
|
||||
shouldNotKeyError:
|
||||
var prevHash = c.cursorHash
|
||||
while prevHash != c.baseHash:
|
||||
c.blocks.withValue(prevHash, item):
|
||||
if item.blk.header.number <= number:
|
||||
result.add item.blk
|
||||
prevHash = item.blk.header.parentHash
|
||||
|
||||
func isCanonical*(c: ForkedChainRef, blockHash: Hash256): bool =
|
||||
shouldNotKeyError:
|
||||
var prevHash = c.cursorHash
|
||||
while prevHash != c.baseHash:
|
||||
c.blocks.withValue(prevHash, item):
|
||||
if blockHash == prevHash:
|
||||
return true
|
||||
prevHash = item.blk.header.parentHash
|
||||
|
|
|
@ -330,6 +330,7 @@ import
|
|||
stew/keyed_queue,
|
||||
results,
|
||||
../common/common,
|
||||
./chain/forked_chain,
|
||||
./casper
|
||||
|
||||
export
|
||||
|
@ -424,7 +425,7 @@ proc add*(xp: TxPoolRef; tx: PooledTransaction; info = "")
|
|||
## Variant of `add()` for a single transaction.
|
||||
xp.add(@[tx], info)
|
||||
|
||||
proc smartHead*(xp: TxPoolRef; pos: BlockHeader): bool
|
||||
proc smartHead*(xp: TxPoolRef; pos: BlockHeader, chain: ForkedChainRef): bool
|
||||
{.gcsafe,raises: [CatchableError].} =
|
||||
## This function moves the internal head cache (i.e. tx insertion point,
|
||||
## vmState) and ponts it to a now block on the chain.
|
||||
|
@ -436,7 +437,7 @@ proc smartHead*(xp: TxPoolRef; pos: BlockHeader): bool
|
|||
## the internal head cache, the previously calculated actions will be
|
||||
## applied.
|
||||
##
|
||||
let rcDiff = xp.headDiff(pos)
|
||||
let rcDiff = xp.headDiff(pos, chain)
|
||||
if rcDiff.isOk:
|
||||
let changes = rcDiff.value
|
||||
|
||||
|
@ -444,13 +445,6 @@ proc smartHead*(xp: TxPoolRef; pos: BlockHeader): bool
|
|||
# `addTxs()` otherwise.
|
||||
xp.setHead(pos)
|
||||
|
||||
# Re-inject transactions, do that via job queue
|
||||
if 0 < changes.addTxs.len:
|
||||
debug "queuing delta txs",
|
||||
mode = "inject",
|
||||
num = changes.addTxs.len
|
||||
xp.pDoubleCheckAdd xp.addTxs(toSeq(changes.addTxs.nextValues)).topItems
|
||||
|
||||
# Delete already *mined* transactions
|
||||
if 0 < changes.remTxs.len:
|
||||
debug "queuing delta txs",
|
||||
|
|
|
@ -19,9 +19,8 @@ import
|
|||
../tx_desc,
|
||||
../tx_info,
|
||||
../tx_item,
|
||||
chronicles,
|
||||
eth/keys,
|
||||
stew/keyed_queue
|
||||
../../chain/forked_chain,
|
||||
eth/keys
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
|
@ -30,225 +29,30 @@ type
|
|||
## Diff data, txs changes that apply after changing the head\
|
||||
## insertion point of the block chain
|
||||
|
||||
addTxs*: KeyedQueue[Hash256, PooledTransaction] ##\
|
||||
## txs to add; using a queue makes it more intuive to delete
|
||||
## items while travesing the queue in a loop.
|
||||
|
||||
remTxs*: Table[Hash256,bool] ##\
|
||||
## txs to remove
|
||||
|
||||
logScope:
|
||||
topics = "tx-pool head adjust"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# use it as a stack/lifo as the ordering is reversed
|
||||
proc insert(xp: TxPoolRef; kq: TxHeadDiffRef; blockHash: Hash256)
|
||||
{.raises: [BlockNotFound].} =
|
||||
let db = xp.vmState.com.db
|
||||
for tx in db.getBlockBody(blockHash).transactions:
|
||||
if tx.versionedHashes.len > 0:
|
||||
# EIP-4844 blobs are not persisted and cannot be re-broadcasted.
|
||||
# Note that it is also not possible to crete a cache in all cases,
|
||||
# as we may have never seen the actual blob sidecar while syncing.
|
||||
# Only the consensus layer persists the blob sidecar.
|
||||
continue
|
||||
kq.addTxs[tx.itemID] = PooledTransaction(tx: tx)
|
||||
|
||||
proc remove(xp: TxPoolRef; kq: TxHeadDiffRef; blockHash: Hash256)
|
||||
{.gcsafe,raises: [BlockNotFound].} =
|
||||
let db = xp.vmState.com.db
|
||||
for tx in db.getBlockBody(blockHash).transactions:
|
||||
kq.remTxs[tx.itemID] = true
|
||||
|
||||
proc new(T: type TxHeadDiffRef): T =
|
||||
new result
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# core/tx_pool.go(218): func (pool *TxPool) reset(oldHead, newHead ...
|
||||
proc headDiff*(xp: TxPoolRef;
|
||||
newHead: BlockHeader): Result[TxHeadDiffRef,TxInfo]
|
||||
{.gcsafe,raises: [CatchableError].} =
|
||||
## This function caclulates the txs differences between the cached block
|
||||
## chain head to a new head implied by the argument `newHeader`. Differences
|
||||
## are returned as two tables for adding and removing txs. The tables table
|
||||
## for adding transactions (is a queue and) preserves the order of the txs
|
||||
## from the block chain.
|
||||
##
|
||||
## Now, considering add/delete tx actions needed when replacing the cached
|
||||
## *current head* position by the *new head* position (as derived from the
|
||||
## function argument `newHeader`), the most complex case of a block chain
|
||||
## might look as follows:
|
||||
## ::
|
||||
## . o---o-- .. ---o---o
|
||||
## . / ^
|
||||
## . block chain .. ---o---o---o .. --o |
|
||||
## . ^ ^ |
|
||||
## . | | |
|
||||
## . common ancestor | |
|
||||
## . | |
|
||||
## . new head |
|
||||
## . |
|
||||
## . current head
|
||||
##
|
||||
## Legend
|
||||
## * the bullet *o* stands for a block
|
||||
## * a link *---* between two blocks indicates that the block number to
|
||||
## the right increases by *1*
|
||||
## * the *common ancestor* is chosen with the largest possible block number
|
||||
## not exceeding the block numbers of both, the *current head* and the
|
||||
## *new head*
|
||||
## * the branches to the right of the *common ancestor* may collapse to a
|
||||
## a single branch (in which case at least one of *old head* or
|
||||
## *new head* collapses with the *common ancestor*)
|
||||
## * there is no assumption on the block numbers of *new head* and
|
||||
## *current head* as of which one is greater, they might also be equal
|
||||
##
|
||||
## Consider the two sets *ADD* and *DEL* where
|
||||
##
|
||||
## *ADD*
|
||||
## is the set of txs on the branch between the *common ancestor* and
|
||||
## the *current head*, and
|
||||
## *DEL*
|
||||
## is the set of txs on the branch between the *common ancestor* and
|
||||
## the *new head*
|
||||
##
|
||||
## Then, the set of txs to be added to the pool is *ADD - DEL* and the set
|
||||
## of txs to be removed is *DEL - ADD*.
|
||||
##
|
||||
newHead: BlockHeader, chain: ForkedChainRef): Result[TxHeadDiffRef,TxInfo] =
|
||||
let
|
||||
txDiffs = TxHeadDiffRef()
|
||||
|
||||
if newHead.number <= chain.baseNumber:
|
||||
# return empty diff
|
||||
return ok(txDiffs)
|
||||
|
||||
let
|
||||
curHead = xp.head
|
||||
curHash = curHead.blockHash
|
||||
newHash = newHead.blockHash
|
||||
db = xp.vmState.com.db
|
||||
blk = chain.blockByHash(newHash).valueOr:
|
||||
return err(txInfoErrForwardHeadMissing)
|
||||
|
||||
var ignHeader: BlockHeader
|
||||
if not db.getBlockHeader(newHash, ignHeader):
|
||||
# sanity check
|
||||
warn "Tx-pool head forward for non-existing header",
|
||||
newHead = newHash,
|
||||
newNumber = newHead.number
|
||||
return err(txInfoErrForwardHeadMissing)
|
||||
|
||||
if not db.getBlockHeader(curHash, ignHeader):
|
||||
# This can happen if a `setHead()` is performed, where we have discarded
|
||||
# the old head from the chain.
|
||||
if curHead.number <= newHead.number:
|
||||
warn "Tx-pool head forward from detached current header",
|
||||
curHead = curHash,
|
||||
curNumber = curHead.number
|
||||
return err(txInfoErrAncestorMissing)
|
||||
debug "Tx-pool reset with detached current head",
|
||||
curHeader = curHash,
|
||||
curNumber = curHead.number,
|
||||
newHeader = newHash,
|
||||
newNumber = newHead.number
|
||||
return err(txInfoErrChainHeadMissing)
|
||||
|
||||
# Equalise block numbers between branches (typically, these btanches
|
||||
# collapse and there is a single strain only)
|
||||
var
|
||||
txDiffs = TxHeadDiffRef.new
|
||||
|
||||
curBranchHead = curHead
|
||||
curBranchHash = curHash
|
||||
newBranchHead = newHead
|
||||
newBranchHash = newHash
|
||||
|
||||
if newHead.number < curHead.number:
|
||||
#
|
||||
# new head block number smaller than the current head one
|
||||
#
|
||||
# ,o---o-- ..--o
|
||||
# / ^
|
||||
# / |
|
||||
# ----o---o---o |
|
||||
# ^ |
|
||||
# | |
|
||||
# new << current (step back this one)
|
||||
#
|
||||
# + preserve transactions on the upper branch blocks,
|
||||
#
|
||||
# + txs of blocks with numbers between #new..#current need to be
|
||||
# re-inserted into the pool
|
||||
#
|
||||
while newHead.number < curBranchHead.number:
|
||||
xp.insert(txDiffs, curBranchHash)
|
||||
let
|
||||
tmpHead = curBranchHead # cache value for error logging
|
||||
tmpHash = curBranchHash
|
||||
curBranchHash = curBranchHead.parentHash # decrement block number
|
||||
if not db.getBlockHeader(curBranchHash, curBranchHead):
|
||||
error "Unrooted old chain seen by tx-pool",
|
||||
curBranchHead = tmpHash,
|
||||
curBranchNumber = tmpHead.number
|
||||
return err(txInfoErrUnrootedCurChain)
|
||||
else:
|
||||
#
|
||||
# current head block number smaller (or equal) than the new head one
|
||||
#
|
||||
# ,o---o-- ..--o
|
||||
# / ^
|
||||
# / |
|
||||
# ----o---o---o |
|
||||
# ^ |
|
||||
# | |
|
||||
# current << new (step back this one)
|
||||
#
|
||||
# + preserve some transactions on the upper branch blocks,
|
||||
#
|
||||
# + txs of blocks with numbers between #current..#new need to be
|
||||
# deleted from the pool (as they are on the block chain, now)
|
||||
#
|
||||
while curHead.number < newBranchHead.number:
|
||||
xp.remove(txDiffs, newBranchHash)
|
||||
let
|
||||
tmpHead = newBranchHead # cache value for error logging
|
||||
tmpHash = newBranchHash
|
||||
newBranchHash = newBranchHead.parentHash # decrement block number
|
||||
if not db.getBlockHeader(newBranchHash, newBranchHead):
|
||||
error "Unrooted new chain seen by tx-pool",
|
||||
newBranchHead = tmpHash,
|
||||
newBranchNumber = tmpHead.number
|
||||
return err(txInfoErrUnrootedNewChain)
|
||||
|
||||
# simultaneously step back until junction-head (aka common ancestor) while
|
||||
# preserving txs between block numbers #ancestor..#current unless
|
||||
# between #ancestor..#new
|
||||
while curBranchHash != newBranchHash:
|
||||
block:
|
||||
xp.insert(txDiffs, curBranchHash)
|
||||
let
|
||||
tmpHead = curBranchHead # cache value for error logging
|
||||
tmpHash = curBranchHash
|
||||
curBranchHash = curBranchHead.parentHash
|
||||
if not db.getBlockHeader(curBranchHash, curBranchHead):
|
||||
error "Unrooted old chain seen by tx-pool",
|
||||
curBranchHead = tmpHash,
|
||||
curBranchNumber = tmpHead.number
|
||||
return err(txInfoErrUnrootedCurChain)
|
||||
block:
|
||||
xp.remove(txDiffs, newBranchHash)
|
||||
let
|
||||
tmpHead = newBranchHead # cache value for error logging
|
||||
tmpHash = newBranchHash
|
||||
newBranchHash = newBranchHead.parentHash
|
||||
if not db.getBlockHeader(newBranchHash, newBranchHead):
|
||||
error "Unrooted new chain seen by tx-pool",
|
||||
newBranchHead = tmpHash,
|
||||
newBranchNumber = tmpHead.number
|
||||
return err(txInfoErrUnrootedNewChain)
|
||||
|
||||
# figure out difference sets
|
||||
for itemID in txDiffs.addTxs.nextKeys:
|
||||
if txDiffs.remTxs.hasKey(itemID):
|
||||
txDiffs.addTxs.del(itemID) # ok to delete the current one on a KeyedQueue
|
||||
txDiffs.remTxs.del(itemID)
|
||||
for tx in blk.transactions:
|
||||
txDiffs.remTxs[tx.itemID] = true
|
||||
|
||||
ok(txDiffs)
|
||||
|
||||
|
|
|
@ -300,12 +300,6 @@ proc markCanonicalChain(
|
|||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc exists*(db: CoreDbRef, hash: Hash256): bool =
|
||||
db.ctx.getKvt().hasKeyRc(hash.data).valueOr:
|
||||
warn "exisis", hash, error=($$error)
|
||||
return false
|
||||
# => true/false
|
||||
|
||||
proc getSavedStateBlockNumber*(
|
||||
db: CoreDbRef;
|
||||
): BlockNumber =
|
||||
|
@ -941,13 +935,6 @@ proc finalizedHeader*(
|
|||
{.gcsafe, raises: [BlockNotFound].} =
|
||||
db.getBlockHeader(db.finalizedHeaderHash)
|
||||
|
||||
proc haveBlockAndState*(db: CoreDbRef, headerHash: Hash256): bool =
|
||||
var header: BlockHeader
|
||||
if not db.getBlockHeader(headerHash, header):
|
||||
return false
|
||||
# see if stateRoot exists
|
||||
db.exists(header.stateRoot)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
||||
|
|
|
@ -45,14 +45,8 @@ proc basicServices(nimbus: NimbusNode,
|
|||
# so it can know the latest account state
|
||||
# e.g. sender nonce, etc
|
||||
let head = com.db.getCanonicalHead()
|
||||
doAssert nimbus.txPool.smartHead(head)
|
||||
|
||||
# chainRef: some name to avoid module-name/filed/function misunderstandings
|
||||
nimbus.chainRef = newChain(com)
|
||||
if conf.verifyFrom.isSome:
|
||||
let verifyFrom = conf.verifyFrom.get()
|
||||
nimbus.chainRef.extraValidation = 0 < verifyFrom
|
||||
nimbus.chainRef.verifyFrom = verifyFrom
|
||||
nimbus.chainRef = newForkedChain(com, head)
|
||||
doAssert nimbus.txPool.smartHead(head, nimbus.chainRef)
|
||||
|
||||
nimbus.beaconEngine = BeaconEngineRef.new(nimbus.txPool, nimbus.chainRef)
|
||||
|
||||
|
|
|
@ -46,7 +46,7 @@ type
|
|||
ethNode*: EthereumNode
|
||||
state*: NimbusState
|
||||
ctx*: EthContext
|
||||
chainRef*: ChainRef
|
||||
chainRef*: ForkedChainRef
|
||||
txPool*: TxPoolRef
|
||||
networkLoop*: Future[void]
|
||||
peerManager*: PeerManagerRef
|
||||
|
|
|
@ -336,10 +336,4 @@ proc importBlocks*(conf: NimbusConf, com: CommonRef) =
|
|||
if blocks.len > 0:
|
||||
process()
|
||||
|
||||
for blocksFile in conf.blocksFile:
|
||||
if isFile(string blocksFile):
|
||||
# success or not, we quit after importing blocks
|
||||
if not importRlpBlock(string blocksFile, com):
|
||||
quit(QuitFailure)
|
||||
else:
|
||||
quit(QuitSuccess)
|
||||
importRlpBlocks(conf, com)
|
||||
|
|
|
@ -21,6 +21,7 @@ import
|
|||
./rpc/rpc_server,
|
||||
./rpc/experimental,
|
||||
./rpc/oracle,
|
||||
./rpc/server_api,
|
||||
./nimbus_desc,
|
||||
./graphql/ethapi
|
||||
|
||||
|
@ -33,7 +34,8 @@ export
|
|||
cors,
|
||||
rpc_server,
|
||||
experimental,
|
||||
oracle
|
||||
oracle,
|
||||
server_api
|
||||
|
||||
{.push gcsafe, raises: [].}
|
||||
|
||||
|
|
|
@ -0,0 +1,244 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
stint,
|
||||
web3/conversions,
|
||||
json_rpc/rpcserver,
|
||||
../common,
|
||||
../db/ledger,
|
||||
../core/chain/forked_chain,
|
||||
../beacon/web3_eth_conv,
|
||||
../transaction/call_evm,
|
||||
../evm/evm_errors,
|
||||
./rpc_types,
|
||||
./filters,
|
||||
./server_api_helpers
|
||||
|
||||
type
|
||||
ServerAPIRef = ref object
|
||||
com: CommonRef
|
||||
chain: ForkedChainRef
|
||||
|
||||
const
|
||||
defaultTag = blockId("latest")
|
||||
|
||||
func newServerAPI*(c: ForkedChainRef): ServerAPIRef =
|
||||
ServerAPIRef(
|
||||
com: c.com,
|
||||
chain: c,
|
||||
)
|
||||
|
||||
proc headerFromTag(api: ServerAPIRef, blockTag: BlockTag): Result[common.BlockHeader, string] =
|
||||
if blockTag.kind == bidAlias:
|
||||
let tag = blockTag.alias.toLowerAscii
|
||||
case tag
|
||||
of "latest": return ok(api.chain.latestHeader)
|
||||
else:
|
||||
return err("Unsupported block tag " & tag)
|
||||
else:
|
||||
let blockNum = common.BlockNumber blockTag.number
|
||||
return api.chain.headerByNumber(blockNum)
|
||||
|
||||
proc headerFromTag(api: ServerAPIRef, blockTag: Opt[BlockTag]): Result[common.BlockHeader, string] =
|
||||
let blockId = blockTag.get(defaultTag)
|
||||
api.headerFromTag(blockId)
|
||||
|
||||
proc ledgerFromTag(api: ServerAPIRef, blockTag: BlockTag): Result[LedgerRef, string] =
|
||||
let header = ?api.headerFromTag(blockTag)
|
||||
if api.chain.stateReady(header):
|
||||
ok(LedgerRef.init(api.com.db, header.stateRoot))
|
||||
else:
|
||||
# TODO: Replay state?
|
||||
err("Block state not ready")
|
||||
|
||||
proc blockFromTag(api: ServerAPIRef, blockTag: BlockTag): Result[EthBlock, string] =
|
||||
if blockTag.kind == bidAlias:
|
||||
let tag = blockTag.alias.toLowerAscii
|
||||
case tag
|
||||
of "latest": return ok(api.chain.latestBlock)
|
||||
else:
|
||||
return err("Unsupported block tag " & tag)
|
||||
else:
|
||||
let blockNum = common.BlockNumber blockTag.number
|
||||
return api.chain.blockByNumber(blockNum)
|
||||
|
||||
proc blockFromTag(api: ServerAPIRef, blockTag: Opt[BlockTag]): Result[EthBlock, string] =
|
||||
let blockId = blockTag.get(defaultTag)
|
||||
api.blockFromTag(blockId)
|
||||
|
||||
proc setupServerAPI*(api: ServerAPIRef, server: RpcServer) =
|
||||
|
||||
server.rpc("eth_getBalance") do(data: Web3Address, blockTag: BlockTag) -> UInt256:
|
||||
## Returns the balance of the account of given address.
|
||||
let
|
||||
ledger = api.ledgerFromTag(blockTag).valueOr:
|
||||
raise newException(ValueError, error)
|
||||
address = ethAddr data
|
||||
result = ledger.getBalance(address)
|
||||
|
||||
server.rpc("eth_getStorageAt") do(data: Web3Address, slot: UInt256, blockTag: BlockTag) -> FixedBytes[32]:
|
||||
## Returns the value from a storage position at a given address.
|
||||
let
|
||||
ledger = api.ledgerFromTag(blockTag).valueOr:
|
||||
raise newException(ValueError, error)
|
||||
address = ethAddr data
|
||||
value = ledger.getStorage(address, slot)
|
||||
result = w3FixedBytes value
|
||||
|
||||
server.rpc("eth_getTransactionCount") do(data: Web3Address, blockTag: BlockTag) -> Web3Quantity:
|
||||
## Returns the number of transactions ak.s. nonce sent from an address.
|
||||
let
|
||||
ledger = api.ledgerFromTag(blockTag).valueOr:
|
||||
raise newException(ValueError, error)
|
||||
address = ethAddr data
|
||||
nonce = ledger.getNonce(address)
|
||||
result = w3Qty nonce
|
||||
|
||||
server.rpc("eth_blockNumber") do() -> Web3Quantity:
|
||||
## Returns integer of the current block number the client is on.
|
||||
result = w3Qty(api.chain.latestNumber)
|
||||
|
||||
server.rpc("eth_chainId") do() -> Web3Quantity:
|
||||
return w3Qty(distinctBase(api.com.chainId))
|
||||
|
||||
server.rpc("eth_getCode") do(data: Web3Address, blockTag: BlockTag) -> seq[byte]:
|
||||
## Returns code at a given address.
|
||||
##
|
||||
## data: address
|
||||
## blockTag: integer block number, or the string "latest", "earliest" or "pending", see the default block parameter.
|
||||
## Returns the code from the given address.
|
||||
let
|
||||
ledger = api.ledgerFromTag(blockTag).valueOr:
|
||||
raise newException(ValueError, error)
|
||||
address = ethAddr data
|
||||
result = ledger.getCode(address).bytes()
|
||||
|
||||
server.rpc("eth_getBlockByHash") do(data: Web3Hash, fullTransactions: bool) -> BlockObject:
|
||||
## Returns information about a block by hash.
|
||||
##
|
||||
## data: Hash of a block.
|
||||
## fullTransactions: If true it returns the full transaction objects, if false only the hashes of the transactions.
|
||||
## Returns BlockObject or nil when no block was found.
|
||||
let blockHash = data.ethHash
|
||||
|
||||
let blk = api.chain.blockByHash(blockHash).valueOr:
|
||||
return nil
|
||||
|
||||
return populateBlockObject(blockHash, blk, fullTransactions)
|
||||
|
||||
server.rpc("eth_getBlockByNumber") do(blockTag: BlockTag, fullTransactions: bool) -> BlockObject:
|
||||
## Returns information about a block by block number.
|
||||
##
|
||||
## blockTag: integer of a block number, or the string "earliest", "latest" or "pending", as in the default block parameter.
|
||||
## fullTransactions: If true it returns the full transaction objects, if false only the hashes of the transactions.
|
||||
## Returns BlockObject or nil when no block was found.
|
||||
let blk = api.blockFromTag(blockTag).valueOr:
|
||||
return nil
|
||||
|
||||
let blockHash = blk.header.blockHash
|
||||
return populateBlockObject(blockHash, blk, fullTransactions)
|
||||
|
||||
server.rpc("eth_syncing") do() -> SyncingStatus:
|
||||
## Returns SyncObject or false when not syncing.
|
||||
if api.com.syncState != Waiting:
|
||||
let sync = SyncObject(
|
||||
startingBlock: w3Qty api.com.syncStart,
|
||||
currentBlock : w3Qty api.com.syncCurrent,
|
||||
highestBlock : w3Qty api.com.syncHighest
|
||||
)
|
||||
return SyncingStatus(syncing: true, syncObject: sync)
|
||||
else:
|
||||
return SyncingStatus(syncing: false)
|
||||
|
||||
proc getLogsForBlock(
|
||||
chain: ForkedChainRef,
|
||||
blk: EthBlock,
|
||||
opts: FilterOptions): seq[FilterLog]
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
if headerBloomFilter(blk.header, opts.address, opts.topics):
|
||||
let receipts = chain.db.getReceipts(blk.header.receiptsRoot)
|
||||
# Note: this will hit assertion error if number of block transactions
|
||||
# do not match block receipts.
|
||||
# Although this is fine as number of receipts should always match number
|
||||
# of transactions
|
||||
let logs = deriveLogs(blk.header, blk.transactions, receipts)
|
||||
let filteredLogs = filterLogs(logs, opts.address, opts.topics)
|
||||
return filteredLogs
|
||||
else:
|
||||
return @[]
|
||||
|
||||
proc getLogsForRange(
|
||||
chain: ForkedChainRef,
|
||||
start: common.BlockNumber,
|
||||
finish: common.BlockNumber,
|
||||
opts: FilterOptions): seq[FilterLog]
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
var
|
||||
logs = newSeq[FilterLog]()
|
||||
blockNum = start
|
||||
|
||||
while blockNum <= finish:
|
||||
let
|
||||
blk = chain.blockByNumber(blockNum).valueOr:
|
||||
return logs
|
||||
filtered = chain.getLogsForBlock(blk, opts)
|
||||
logs.add(filtered)
|
||||
blockNum = blockNum + 1
|
||||
return logs
|
||||
|
||||
server.rpc("eth_getLogs") do(filterOptions: FilterOptions) -> seq[FilterLog]:
|
||||
## filterOptions: settings for this filter.
|
||||
## Returns a list of all logs matching a given filter object.
|
||||
## TODO: Current implementation is pretty naive and not efficient
|
||||
## as it requires to fetch all transactions and all receipts from database.
|
||||
## Other clients (Geth):
|
||||
## - Store logs related data in receipts.
|
||||
## - Have separate indexes for Logs in given block
|
||||
## Both of those changes require improvements to the way how we keep our data
|
||||
## in Nimbus.
|
||||
if filterOptions.blockHash.isSome():
|
||||
let
|
||||
hash = ethHash filterOptions.blockHash.expect("blockHash")
|
||||
blk = api.chain.blockByHash(hash).valueOr:
|
||||
raise newException(ValueError, "Block not found")
|
||||
return getLogsForBlock(api.chain, blk, filterOptions)
|
||||
else:
|
||||
# TODO: do something smarter with tags. It would be the best if
|
||||
# tag would be an enum (Earliest, Latest, Pending, Number), and all operations
|
||||
# would operate on this enum instead of raw strings. This change would need
|
||||
# to be done on every endpoint to be consistent.
|
||||
let
|
||||
blockFrom = api.headerFromTag(filterOptions.fromBlock).valueOr:
|
||||
raise newException(ValueError, "Block not found")
|
||||
blockTo = api.headerFromTag(filterOptions.toBlock).valueOr:
|
||||
raise newException(ValueError, "Block not found")
|
||||
|
||||
# Note: if fromHeader.number > toHeader.number, no logs will be
|
||||
# returned. This is consistent with, what other ethereum clients return
|
||||
return api.chain.getLogsForRange(
|
||||
blockFrom.number,
|
||||
blockTo.number,
|
||||
filterOptions
|
||||
)
|
||||
|
||||
server.rpc("eth_call") do(args: TransactionArgs, blockTag: BlockTag) -> seq[byte]:
|
||||
## Executes a new message call immediately without creating a transaction on the block chain.
|
||||
##
|
||||
## call: the transaction call object.
|
||||
## quantityTag: integer block number, or the string "latest", "earliest" or "pending", see the default block parameter.
|
||||
## Returns the return value of executed contract.
|
||||
let
|
||||
header = api.headerFromTag(blockTag).valueOr:
|
||||
raise newException(ValueError, "Block not found")
|
||||
res = rpcCallEvm(args, header, api.com).valueOr:
|
||||
raise newException(ValueError, "rpcCallEvm error: " & $error.code)
|
||||
result = res.output
|
|
@ -0,0 +1,117 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
eth/common/eth_types,
|
||||
eth/common/eth_types_rlp,
|
||||
web3/eth_api_types,
|
||||
../beacon/web3_eth_conv,
|
||||
../constants,
|
||||
../transaction
|
||||
|
||||
proc toWd(wd: eth_types.Withdrawal): WithdrawalObject =
|
||||
WithdrawalObject(
|
||||
index: w3Qty wd.index,
|
||||
validatorIndex: w3Qty wd.validatorIndex,
|
||||
address: w3Addr wd.address,
|
||||
amount: w3Qty wd.amount,
|
||||
)
|
||||
|
||||
proc toWdList(list: openArray[eth_types.Withdrawal]): seq[WithdrawalObject] =
|
||||
result = newSeqOfCap[WithdrawalObject](list.len)
|
||||
for x in list:
|
||||
result.add toWd(x)
|
||||
|
||||
func toWdList(x: Opt[seq[eth_types.Withdrawal]]):
|
||||
Opt[seq[WithdrawalObject]] =
|
||||
if x.isNone: Opt.none(seq[WithdrawalObject])
|
||||
else: Opt.some(toWdList x.get)
|
||||
|
||||
proc populateTransactionObject*(tx: Transaction,
|
||||
optionalHash: Opt[eth_types.Hash256] = Opt.none(eth_types.Hash256),
|
||||
optionalNumber: Opt[eth_types.BlockNumber] = Opt.none(eth_types.BlockNumber),
|
||||
txIndex: Opt[uint64] = Opt.none(uint64)): TransactionObject =
|
||||
result = TransactionObject()
|
||||
result.`type` = Opt.some Quantity(tx.txType)
|
||||
result.blockHash = w3Hash optionalHash
|
||||
result.blockNumber = w3BlockNumber optionalNumber
|
||||
|
||||
var sender: EthAddress
|
||||
if tx.getSender(sender):
|
||||
result.`from` = w3Addr sender
|
||||
result.gas = w3Qty(tx.gasLimit)
|
||||
result.gasPrice = w3Qty(tx.gasPrice)
|
||||
result.hash = w3Hash tx.rlpHash
|
||||
result.input = tx.payload
|
||||
result.nonce = w3Qty(tx.nonce)
|
||||
result.to = Opt.some(w3Addr tx.destination)
|
||||
if txIndex.isSome:
|
||||
result.transactionIndex = Opt.some(Quantity(txIndex.get))
|
||||
result.value = tx.value
|
||||
result.v = w3Qty(tx.V)
|
||||
result.r = tx.R
|
||||
result.s = tx.S
|
||||
result.maxFeePerGas = Opt.some w3Qty(tx.maxFeePerGas)
|
||||
result.maxPriorityFeePerGas = Opt.some w3Qty(tx.maxPriorityFeePerGas)
|
||||
|
||||
if tx.txType >= TxEip2930:
|
||||
result.chainId = Opt.some(Web3Quantity(tx.chainId))
|
||||
result.accessList = Opt.some(w3AccessList(tx.accessList))
|
||||
|
||||
if tx.txType >= TxEIP4844:
|
||||
result.maxFeePerBlobGas = Opt.some(tx.maxFeePerBlobGas)
|
||||
result.blobVersionedHashes = Opt.some(w3Hashes tx.versionedHashes)
|
||||
|
||||
proc populateBlockObject*(blockHash: eth_types.Hash256,
|
||||
blk: EthBlock,
|
||||
fullTx: bool): BlockObject =
|
||||
template header: auto = blk.header
|
||||
|
||||
result = BlockObject()
|
||||
result.number = w3BlockNumber(header.number)
|
||||
result.hash = w3Hash blockHash
|
||||
result.parentHash = w3Hash header.parentHash
|
||||
result.nonce = Opt.some(FixedBytes[8] header.nonce)
|
||||
result.sha3Uncles = w3Hash header.ommersHash
|
||||
result.logsBloom = FixedBytes[256] header.logsBloom
|
||||
result.transactionsRoot = w3Hash header.txRoot
|
||||
result.stateRoot = w3Hash header.stateRoot
|
||||
result.receiptsRoot = w3Hash header.receiptsRoot
|
||||
result.miner = w3Addr header.coinbase
|
||||
result.difficulty = header.difficulty
|
||||
result.extraData = HistoricExtraData header.extraData
|
||||
result.mixHash = w3Hash header.mixHash
|
||||
|
||||
# discard sizeof(seq[byte]) of extraData and use actual length
|
||||
let size = sizeof(eth_types.BlockHeader) - sizeof(eth_types.Blob) + header.extraData.len
|
||||
result.size = Quantity(size)
|
||||
|
||||
result.gasLimit = w3Qty(header.gasLimit)
|
||||
result.gasUsed = w3Qty(header.gasUsed)
|
||||
result.timestamp = w3Qty(header.timestamp)
|
||||
result.baseFeePerGas = header.baseFeePerGas
|
||||
|
||||
if fullTx:
|
||||
for i, tx in blk.transactions:
|
||||
let txObj = populateTransactionObject(tx,
|
||||
Opt.some(blockHash),
|
||||
Opt.some(header.number), Opt.some(i.uint64))
|
||||
result.transactions.add txOrHash(txObj)
|
||||
else:
|
||||
for i, tx in blk.transactions:
|
||||
let txHash = rlpHash(tx)
|
||||
result.transactions.add txOrHash(w3Hash(txHash))
|
||||
|
||||
result.withdrawalsRoot = w3Hash header.withdrawalsRoot
|
||||
result.withdrawals = toWdList blk.withdrawals
|
||||
result.parentBeaconBlockRoot = w3Hash header.parentBeaconBlockRoot
|
||||
result.blobGasUsed = w3Qty(header.blobGasUsed)
|
||||
result.excessBlobGas = w3Qty(header.excessBlobGas)
|
|
@ -127,7 +127,7 @@ proc enableRpcMagic(ctx: BeaconSyncRef) =
|
|||
proc init*(
|
||||
T: type BeaconSyncRef;
|
||||
ethNode: EthereumNode;
|
||||
chain: ChainRef;
|
||||
chain: ForkedChainRef;
|
||||
rng: ref HmacDrbgContext;
|
||||
maxPeers: int;
|
||||
id: int = 0): T =
|
||||
|
|
|
@ -280,8 +280,7 @@ proc fillCanonicalChain*(sk: SkeletonRef): Result[void, string] =
|
|||
let subchain = sk.last
|
||||
if sk.progress.canonicalHeadReset:
|
||||
# Grab previous head block in case of resettng canonical head
|
||||
let oldHead = sk.canonicalHead().valueOr:
|
||||
return err(error)
|
||||
let oldHead = sk.canonicalHead()
|
||||
maybeOldHead = Opt.some oldHead
|
||||
|
||||
if subchain.tail > canonicalHead + 1:
|
||||
|
|
|
@ -184,12 +184,8 @@ proc deleteHeaderAndBody*(sk: SkeletonRef, header: BlockHeader) =
|
|||
sk.del(skeletonBlockHashToNumberKey(header.blockHash))
|
||||
sk.del(skeletonBodyKey(header.sumHash))
|
||||
|
||||
proc canonicalHead*(sk: SkeletonRef): Result[BlockHeader, string] =
|
||||
## Returns Opt.some or error, never returns Opt.none
|
||||
try:
|
||||
ok(sk.db.getCanonicalHead())
|
||||
except CatchableError as ex:
|
||||
err(ex.msg)
|
||||
proc canonicalHead*(sk: SkeletonRef): BlockHeader =
|
||||
sk.chain.latestHeader
|
||||
|
||||
proc resetCanonicalHead*(sk: SkeletonRef, newHead, oldHead: uint64) =
|
||||
debug "RESET CANONICAL", newHead, oldHead
|
||||
|
@ -198,7 +194,8 @@ proc resetCanonicalHead*(sk: SkeletonRef, newHead, oldHead: uint64) =
|
|||
proc insertBlocks*(sk: SkeletonRef,
|
||||
blocks: openArray[EthBlock],
|
||||
fromEngine: bool): Result[uint64, string] =
|
||||
discard ? sk.chain.persistBlocks(blocks)
|
||||
for blk in blocks:
|
||||
? sk.chain.importBlock(blk)
|
||||
ok(blocks.len.uint64)
|
||||
|
||||
proc insertBlock*(sk: SkeletonRef,
|
||||
|
|
|
@ -62,7 +62,7 @@ type
|
|||
started* : Time # Timestamp when the skeleton syncer was created
|
||||
logged* : Time # Timestamp when progress was last logged to user
|
||||
db* : CoreDbRef
|
||||
chain* : ChainRef
|
||||
chain* : ForkedChainRef
|
||||
conf* : SkeletonConfig
|
||||
fillLogIndex*: uint64
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at
|
||||
# https://opensource.org/licenses/MIT).
|
||||
|
@ -29,7 +29,7 @@ logScope:
|
|||
# Constructors
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc new*(_: type SkeletonRef, chain: ChainRef): SkeletonRef =
|
||||
proc new*(_: type SkeletonRef, chain: ForkedChainRef): SkeletonRef =
|
||||
SkeletonRef(
|
||||
progress: Progress(),
|
||||
pulled : 0,
|
||||
|
|
|
@ -28,7 +28,7 @@ type
|
|||
|
||||
EthWireRef* = ref object of EthWireBase
|
||||
db: CoreDbRef
|
||||
chain: ChainRef
|
||||
chain: ForkedChainRef
|
||||
txPool: TxPoolRef
|
||||
peerPool: PeerPool
|
||||
knownByPeer: Table[Peer, HashToTime]
|
||||
|
@ -273,7 +273,7 @@ proc setupPeerObserver(ctx: EthWireRef) =
|
|||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc new*(_: type EthWireRef,
|
||||
chain: ChainRef,
|
||||
chain: ForkedChainRef,
|
||||
txPool: TxPoolRef,
|
||||
peerPool: PeerPool): EthWireRef =
|
||||
let ctx = EthWireRef(
|
||||
|
|
|
@ -22,7 +22,7 @@ import
|
|||
proc addEthHandlerCapability*(
|
||||
node: EthereumNode;
|
||||
peerPool: PeerPool;
|
||||
chain: ChainRef;
|
||||
chain: ForkedChainRef;
|
||||
txPool = TxPoolRef(nil);
|
||||
) =
|
||||
## Install `eth` handlers. Passing `txPool` as `nil` installs the handler
|
||||
|
|
|
@ -46,7 +46,7 @@ type
|
|||
## Shared state among all syncing peer workers (aka buddies.)
|
||||
buddiesMax*: int ## Max number of buddies
|
||||
ethWireCtx*: EthWireRef ## Eth protocol wire context (if available)
|
||||
chain*: ChainRef ## Block chain database (no need for `Peer`)
|
||||
chain*: ForkedChainRef ## Block chain database (no need for `Peer`)
|
||||
poolMode*: bool ## Activate `runPool()` workers if set `true`
|
||||
daemon*: bool ## Enable global background job
|
||||
pool*: S ## Shared context for all worker peers
|
||||
|
|
|
@ -383,7 +383,7 @@ proc onPeerDisconnected[S,W](dsc: RunnerSyncRef[S,W], peer: Peer) =
|
|||
proc initSync*[S,W](
|
||||
dsc: RunnerSyncRef[S,W];
|
||||
node: EthereumNode;
|
||||
chain: ChainRef;
|
||||
chain: ForkedChainRef,
|
||||
slots: int;
|
||||
) =
|
||||
## Constructor
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
|
||||
import
|
||||
stew/byteutils,
|
||||
../../nimbus/core/chain,
|
||||
../../nimbus/core/chain/forked_chain,
|
||||
../../nimbus/core/pow/difficulty,
|
||||
../../nimbus/config,
|
||||
../../nimbus/common,
|
||||
|
@ -26,7 +26,7 @@ type
|
|||
|
||||
TestEnv* = object
|
||||
conf* : NimbusConf
|
||||
chain*: ChainRef
|
||||
chain*: ForkedChainRef
|
||||
|
||||
CCModify = proc(cc: NetworkParams)
|
||||
|
||||
|
@ -67,7 +67,7 @@ proc setupEnv*(extraValidation: bool = false, ccm: CCModify = nil): TestEnv =
|
|||
conf.networkId,
|
||||
conf.networkParams
|
||||
)
|
||||
chain = newChain(com, extraValidation)
|
||||
chain = newForkedChain(com, com.genesisHeader, extraValidation = extraValidation)
|
||||
|
||||
TestEnv(
|
||||
conf : conf,
|
||||
|
|
|
@ -75,8 +75,7 @@ proc test6*() =
|
|||
check skel.blockHeight == 4
|
||||
|
||||
test "canonical height should now be at head with correct chain":
|
||||
let latestHash = env.chain.currentBlock().blockHash
|
||||
check latestHash == block4PoS.blockHash
|
||||
check env.chain.latestHash == block4PoS.blockHash
|
||||
|
||||
test "should update to new height":
|
||||
skel.setHeadT(block5, true, false)
|
||||
|
|
|
@ -61,5 +61,4 @@ proc test8*() =
|
|||
skel.initSyncT(block3, true)
|
||||
skel.putBlocksT([block2], 1, {FillCanonical})
|
||||
check skel.blockHeight == 3
|
||||
let latestHash = env.chain.currentBlock().blockHash
|
||||
check latestHash == block3.blockHash
|
||||
check env.chain.latestHash == block3.blockHash
|
||||
|
|
|
@ -148,6 +148,6 @@ when isMainModule:
|
|||
let node = json.parseFile(name)
|
||||
executeFile(node, testStatusIMPL)
|
||||
|
||||
executeFile("tests/fixtures/eth_tests/BlockchainTests/ValidBlocks/bcTotalDifficultyTest/sideChainWithMoreTransactions.json")
|
||||
executeFile("tests/fixtures/eth_tests/BlockchainTests/ValidBlocks/bcWalletTest/walletReorganizeOwners.json")
|
||||
else:
|
||||
blockchainJsonMain()
|
||||
|
|
|
@ -42,7 +42,7 @@ type
|
|||
nonce : uint64
|
||||
chainId : ChainId
|
||||
xp : TxPoolRef
|
||||
chain : ChainRef
|
||||
chain : ForkedChainRef
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Helpers
|
||||
|
@ -110,7 +110,7 @@ proc initEnv(): TestEnv =
|
|||
nonce : 0'u64,
|
||||
chainId : conf.networkParams.config.chainId,
|
||||
xp : TxPoolRef.new(com),
|
||||
chain : newChain(com),
|
||||
chain : newForkedChain(com, com.genesisHeader),
|
||||
)
|
||||
|
||||
func makeTx(
|
||||
|
@ -140,8 +140,8 @@ func initAddr(z: int): EthAddress =
|
|||
const L = sizeof(result)
|
||||
result[L-sizeof(uint32)..^1] = toBytesBE(z.uint32)
|
||||
|
||||
proc importBlocks(env: TestEnv; blk: EthBlock) =
|
||||
env.chain.persistBlocks([blk]).isOkOr:
|
||||
proc importBlock(env: TestEnv; blk: EthBlock) =
|
||||
env.chain.importBlock(blk).isOkOr:
|
||||
raiseAssert "persistBlocks() failed at block #" &
|
||||
$blk.header.number & " msg: " & error
|
||||
|
||||
|
@ -333,9 +333,9 @@ proc runLedgerTransactionTests(noisy = true) =
|
|||
uncles: blk.uncles,
|
||||
withdrawals: Opt.some(newSeq[Withdrawal]())
|
||||
)
|
||||
env.importBlocks(EthBlock.init(blk.header, body))
|
||||
env.importBlock(EthBlock.init(blk.header, body))
|
||||
|
||||
check env.xp.smartHead(blk.header)
|
||||
check env.xp.smartHead(blk.header, env.chain)
|
||||
for tx in body.transactions:
|
||||
env.txs.add tx
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ import
|
|||
#./persistBlockTestGen, # -- ditto
|
||||
../hive_integration/nodocker/rpc/rpc_sim,
|
||||
../hive_integration/nodocker/consensus/consensus_sim,
|
||||
../hive_integration/nodocker/graphql/graphql_sim,
|
||||
#../hive_integration/nodocker/graphql/graphql_sim, # -- does not compile
|
||||
#../hive_integration/nodocker/engine/engine_sim, # -- does not compile
|
||||
../hive_integration/nodocker/pyspec/pyspec_sim,
|
||||
../tools/t8n/t8n,
|
||||
|
|
|
@ -36,7 +36,7 @@ type
|
|||
vaultKey: PrivateKey
|
||||
conf : NimbusConf
|
||||
com : CommonRef
|
||||
chain : ChainRef
|
||||
chain : ForkedChainRef
|
||||
xp : TxPoolRef
|
||||
|
||||
const
|
||||
|
@ -110,7 +110,7 @@ proc initEnv(envFork: HardFork): TestEnv =
|
|||
conf.networkId,
|
||||
conf.networkParams
|
||||
)
|
||||
chain = newChain(com)
|
||||
chain = newForkedChain(com, com.genesisHeader)
|
||||
|
||||
result = TestEnv(
|
||||
conf: conf,
|
||||
|
@ -167,7 +167,7 @@ proc runTxPoolPosTest() =
|
|||
check blk.txs.len == 1
|
||||
|
||||
test "PoS persistBlocks":
|
||||
let rr = chain.persistBlocks([EthBlock.init(blk.header, body)])
|
||||
let rr = chain.importBlock(EthBlock.init(blk.header, body))
|
||||
check rr.isOk()
|
||||
|
||||
test "validate TxPool prevRandao setter":
|
||||
|
@ -232,7 +232,7 @@ proc runTxPoolBlobhashTest() =
|
|||
check blockValue == bundle.blockValue
|
||||
|
||||
test "Blobhash persistBlocks":
|
||||
let rr = chain.persistBlocks([EthBlock.init(blk.header, body)])
|
||||
let rr = chain.importBlock(EthBlock.init(blk.header, body))
|
||||
check rr.isOk()
|
||||
|
||||
test "validate TxPool prevRandao setter":
|
||||
|
@ -253,7 +253,7 @@ proc runTxPoolBlobhashTest() =
|
|||
tx4 = env.signTxWithNonce(tx3, AccountNonce(env.nonce-2))
|
||||
xp = env.xp
|
||||
|
||||
check xp.smartHead(blk.header)
|
||||
check xp.smartHead(blk.header, chain)
|
||||
xp.add(PooledTransaction(tx: tx4))
|
||||
|
||||
check inPoolAndOk(xp, rlpHash(tx4)) == false
|
||||
|
@ -308,12 +308,12 @@ proc runTxHeadDelta(noisy = true) =
|
|||
uncles: blk.uncles)
|
||||
|
||||
# Commit to block chain
|
||||
check chain.persistBlocks([EthBlock.init(blk.header, body)]).isOk
|
||||
check chain.importBlock(EthBlock.init(blk.header, body)).isOk
|
||||
|
||||
# Synchronise TxPool against new chain head, register txs differences.
|
||||
# In this particular case, these differences will simply flush the
|
||||
# packer bucket.
|
||||
check xp.smartHead(blk.header)
|
||||
check xp.smartHead(blk.header, chain)
|
||||
|
||||
# Move TxPool chain head to new chain head and apply delta jobs
|
||||
check xp.nItems.staged == 0
|
||||
|
@ -355,9 +355,9 @@ proc runGetBlockBodyTest() =
|
|||
return
|
||||
|
||||
let blk = r.get.blk
|
||||
check env.chain.persistBlocks([blk]).isOk
|
||||
check env.chain.importBlock(blk).isOk
|
||||
parentHeader = blk.header
|
||||
check env.xp.smartHead(parentHeader)
|
||||
check env.xp.smartHead(parentHeader, env.chain)
|
||||
check blk.transactions.len == 2
|
||||
|
||||
test "TxPool create second block":
|
||||
|
@ -380,25 +380,12 @@ proc runGetBlockBodyTest() =
|
|||
return
|
||||
|
||||
let blk = r.get.blk
|
||||
check env.chain.persistBlocks([blk]).isOk
|
||||
check env.chain.importBlock(blk).isOk
|
||||
currentHeader = blk.header
|
||||
check env.xp.smartHead(currentHeader)
|
||||
check env.xp.smartHead(currentHeader, env.chain)
|
||||
check blk.transactions.len == 3
|
||||
|
||||
test "Get current block body":
|
||||
var body: BlockBody
|
||||
check env.com.db.getBlockBody(currentHeader, body)
|
||||
check body.transactions.len == 3
|
||||
check env.com.db.getReceipts(currentHeader.receiptsRoot).len == 3
|
||||
check env.com.db.getTransactionCount(currentHeader.txRoot) == 3
|
||||
|
||||
test "Get parent block body":
|
||||
# Make sure parent baggage doesn't swept away by aristo
|
||||
var body: BlockBody
|
||||
check env.com.db.getBlockBody(parentHeader, body)
|
||||
check body.transactions.len == 2
|
||||
check env.com.db.getReceipts(parentHeader.receiptsRoot).len == 2
|
||||
check env.com.db.getTransactionCount(parentHeader.txRoot) == 2
|
||||
let currHash = currentHeader.blockHash
|
||||
check env.chain.forkChoice(currHash, currHash).isOk
|
||||
|
||||
proc txPool2Main*() =
|
||||
const
|
||||
|
|
Loading…
Reference in New Issue