disable NewBlockHashes and NewBlock of eth wire handler after POS transition
fix #1133
This commit is contained in:
parent
c7b3c374f0
commit
43f4b99a1b
|
@ -22,7 +22,8 @@ import
|
|||
rpc/debug,
|
||||
rpc/jwt_auth,
|
||||
sync/protocol,
|
||||
utils/tx_pool
|
||||
utils/tx_pool,
|
||||
merge/merger
|
||||
],
|
||||
../../../tests/test_helpers,
|
||||
"."/[clmock, engine_client]
|
||||
|
@ -112,8 +113,9 @@ proc setupELClient*(t: TestEnv, chainFile: string, enableAuth: bool) =
|
|||
txPool, EngineStopped
|
||||
)
|
||||
|
||||
let merger = MergerRef.new(t.chainDB)
|
||||
setupEthRpc(t.ethNode, t.ctx, t.chainDB, txPool, t.rpcServer)
|
||||
setupEngineAPI(t.sealingEngine, t.rpcServer)
|
||||
setupEngineAPI(t.sealingEngine, t.rpcServer, merger)
|
||||
setupDebugRpc(t.chainDB, t.rpcServer)
|
||||
|
||||
# Do not start clique sealing engine if we are using a Proof of Work chain file
|
||||
|
|
|
@ -13,60 +13,56 @@ type
|
|||
|
||||
# Merger is an internal help structure used to track the eth1/2 transition status.
|
||||
# It's a common structure can be used in both full node and light client.
|
||||
Merger* = object
|
||||
MergerRef* = ref object
|
||||
db : TrieDatabaseRef
|
||||
status: TransitionStatus
|
||||
|
||||
proc write(db: TrieDatabaseRef, status: TransitionStatus) =
|
||||
proc writeStatus(db: TrieDatabaseRef, status: TransitionStatus) =
|
||||
db.put(transitionStatusKey().toOpenArray(), rlp.encode(status))
|
||||
|
||||
proc read(db: TrieDatabaseRef, status: var TransitionStatus) =
|
||||
proc readStatus(db: TrieDatabaseRef): TransitionStatus =
|
||||
var bytes = db.get(transitionStatusKey().toOpenArray())
|
||||
if bytes.len > 0:
|
||||
try:
|
||||
status = rlp.decode(bytes, typeof status)
|
||||
result = rlp.decode(bytes, typeof result)
|
||||
except:
|
||||
error "Failed to decode transition status"
|
||||
error "Failed to decode POS transition status"
|
||||
|
||||
proc init*(m: var Merger, db: TrieDatabaseRef) =
|
||||
m.db = db
|
||||
db.read(m.status)
|
||||
proc new*(_: type MergerRef, db: TrieDatabaseRef): MergerRef =
|
||||
MergerRef(
|
||||
db: db,
|
||||
status: db.readStatus()
|
||||
)
|
||||
|
||||
proc init*(m: var Merger, db: BaseChainDB) =
|
||||
init(m, db.db)
|
||||
|
||||
proc initMerger*(db: BaseChainDB): Merger =
|
||||
result.init(db)
|
||||
|
||||
proc initMerger*(db: TrieDatabaseRef): Merger =
|
||||
result.init(db)
|
||||
proc new*(_: type MergerRef, db: BaseChainDB): MergerRef =
|
||||
MergerRef.new(db.db)
|
||||
|
||||
# ReachTTD is called whenever the first NewHead message received
|
||||
# from the consensus-layer.
|
||||
proc reachTTD*(m: var Merger) =
|
||||
proc reachTTD*(m: MergerRef) =
|
||||
if m.status.leftPoW:
|
||||
return
|
||||
|
||||
m.status = TransitionStatus(leftPoW: true)
|
||||
m.db.write(m.status)
|
||||
m.db.writeStatus(m.status)
|
||||
|
||||
info "Left PoW stage"
|
||||
|
||||
# FinalizePoS is called whenever the first FinalisedBlock message received
|
||||
# from the consensus-layer.
|
||||
proc finalizePoS*(m: var Merger) =
|
||||
proc finalizePoS*(m: MergerRef) =
|
||||
if m.status.enteredPoS:
|
||||
return
|
||||
|
||||
m.status = TransitionStatus(leftPoW: true, enteredPoS: true)
|
||||
m.db.write(m.status)
|
||||
m.db.writeStatus(m.status)
|
||||
|
||||
info "Entered PoS stage"
|
||||
|
||||
# TTDReached reports whether the chain has left the PoW stage.
|
||||
proc ttdReached*(m: Merger): bool =
|
||||
proc ttdReached*(m: MergerRef): bool =
|
||||
m.status.leftPoW
|
||||
|
||||
# PoSFinalized reports whether the chain has entered the PoS stage.
|
||||
proc posFinalized*(m: Merger): bool =
|
||||
proc posFinalized*(m: MergerRef): bool =
|
||||
m.status.enteredPoS
|
||||
|
|
|
@ -37,8 +37,8 @@ type
|
|||
hash: Hash256
|
||||
header: EthBlockHeader
|
||||
|
||||
EngineAPI* = ref object
|
||||
merger*: Merger
|
||||
EngineApiRef* = ref object
|
||||
merger: MergerRef
|
||||
payloadQueue: SimpleQueue[MaxTrackedPayloads, PayloadItem]
|
||||
headerQueue: SimpleQueue[MaxTrackedHeaders, HeaderItem]
|
||||
|
||||
|
@ -54,27 +54,33 @@ iterator items[M, T](x: SimpleQueue[M, T]): T =
|
|||
if z.used:
|
||||
yield z.data
|
||||
|
||||
proc new*(_: type EngineAPI, db: BaseChainDB): EngineAPI =
|
||||
new result
|
||||
if not db.isNil:
|
||||
result.merger.init(db)
|
||||
template new*(_: type EngineApiRef): EngineApiRef =
|
||||
{.error: "EngineApiRef should be created with merger param " & $instantiationInfo().}
|
||||
|
||||
proc put*(api: EngineAPI, hash: Hash256, header: EthBlockHeader) =
|
||||
proc new*(_: type EngineApiRef, merger: MergerRef): EngineApiRef =
|
||||
EngineApiRef(
|
||||
merger: merger
|
||||
)
|
||||
|
||||
proc put*(api: EngineApiRef, hash: Hash256, header: EthBlockHeader) =
|
||||
api.headerQueue.put(HeaderItem(hash: hash, header: header))
|
||||
|
||||
proc get*(api: EngineAPI, hash: Hash256, header: var EthBlockHeader): bool =
|
||||
proc get*(api: EngineApiRef, hash: Hash256, header: var EthBlockHeader): bool =
|
||||
for x in api.headerQueue:
|
||||
if x.hash == hash:
|
||||
header = x.header
|
||||
return true
|
||||
false
|
||||
|
||||
proc put*(api: EngineAPI, id: PayloadID, payload: ExecutionPayloadV1) =
|
||||
proc put*(api: EngineApiRef, id: PayloadID, payload: ExecutionPayloadV1) =
|
||||
api.payloadQueue.put(PayloadItem(id: id, payload: payload))
|
||||
|
||||
proc get*(api: EngineAPI, id: PayloadID, payload: var ExecutionPayloadV1): bool =
|
||||
proc get*(api: EngineApiRef, id: PayloadID, payload: var ExecutionPayloadV1): bool =
|
||||
for x in api.payloadQueue:
|
||||
if x.id == id:
|
||||
payload = x.payload
|
||||
return true
|
||||
false
|
||||
|
||||
proc merger*(api: EngineApiRef): MergerRef =
|
||||
api.merger
|
||||
|
|
|
@ -28,7 +28,8 @@ import
|
|||
./p2p/[chain, clique/clique_desc, clique/clique_sealer],
|
||||
./rpc/[common, debug, engine_api, jwt_auth, p2p, cors],
|
||||
./sync/[fast, full, protocol, snap, protocol/les_protocol, handlers],
|
||||
./utils/tx_pool
|
||||
./utils/tx_pool,
|
||||
./merge/merger
|
||||
|
||||
when defined(evmc_enabled):
|
||||
import transaction/evmc_dynamic_loader
|
||||
|
@ -57,6 +58,7 @@ type
|
|||
networkLoop: Future[void]
|
||||
dbBackend: ChainDB
|
||||
peerManager: PeerManagerRef
|
||||
merger: MergerRef
|
||||
|
||||
proc importBlocks(conf: NimbusConf, chainDB: BaseChainDB) =
|
||||
if string(conf.blocksFile).len > 0:
|
||||
|
@ -81,6 +83,11 @@ proc basicServices(nimbus: NimbusNode,
|
|||
nimbus.chainRef.extraValidation = 0 < verifyFrom
|
||||
nimbus.chainRef.verifyFrom = verifyFrom
|
||||
|
||||
# this is temporary workaround to track POS transition
|
||||
# until we have proper chain config and hard fork module
|
||||
# see issue #640
|
||||
nimbus.merger = MergerRef.new(chainDB)
|
||||
|
||||
proc manageAccounts(nimbus: NimbusNode, conf: NimbusConf) =
|
||||
if string(conf.keyStore).len > 0:
|
||||
let res = nimbus.ctx.am.loadKeystores(string conf.keyStore)
|
||||
|
@ -141,7 +148,8 @@ proc setupP2P(nimbus: NimbusNode, conf: NimbusConf,
|
|||
let ethWireHandler = EthWireRef.new(
|
||||
nimbus.chainRef,
|
||||
nimbus.txPool,
|
||||
nimbus.ethNode.peerPool
|
||||
nimbus.ethNode.peerPool,
|
||||
nimbus.merger
|
||||
)
|
||||
nimbus.ethNode.addCapability(protocol.eth, ethWireHandler)
|
||||
case conf.syncMode:
|
||||
|
@ -336,11 +344,11 @@ proc localServices(nimbus: NimbusNode, conf: NimbusConf,
|
|||
[initTAddress(conf.engineApiAddress, conf.engineApiPort)],
|
||||
authHooks = @[httpJwtAuthHook, httpCorsHook]
|
||||
)
|
||||
setupEngineAPI(nimbus.sealingEngine, nimbus.engineApiServer)
|
||||
setupEngineAPI(nimbus.sealingEngine, nimbus.engineApiServer, nimbus.merger)
|
||||
setupEthRpc(nimbus.ethNode, nimbus.ctx, chainDB, nimbus.txPool, nimbus.engineApiServer)
|
||||
nimbus.engineApiServer.start()
|
||||
else:
|
||||
setupEngineAPI(nimbus.sealingEngine, nimbus.rpcServer)
|
||||
setupEngineAPI(nimbus.sealingEngine, nimbus.rpcServer, nimbus.merger)
|
||||
|
||||
info "Starting engine API server", port = conf.engineApiPort
|
||||
|
||||
|
@ -350,11 +358,11 @@ proc localServices(nimbus: NimbusNode, conf: NimbusConf,
|
|||
initTAddress(conf.engineApiWsAddress, conf.engineApiWsPort),
|
||||
authHooks = @[wsJwtAuthHook, wsCorsHook]
|
||||
)
|
||||
setupEngineAPI(nimbus.sealingEngine, nimbus.engineApiWsServer)
|
||||
setupEngineAPI(nimbus.sealingEngine, nimbus.engineApiWsServer, nimbus.merger)
|
||||
setupEthRpc(nimbus.ethNode, nimbus.ctx, chainDB, nimbus.txPool, nimbus.engineApiWsServer)
|
||||
nimbus.engineApiWsServer.start()
|
||||
else:
|
||||
setupEngineAPI(nimbus.sealingEngine, nimbus.wsRpcServer)
|
||||
setupEngineAPI(nimbus.sealingEngine, nimbus.wsRpcServer, nimbus.merger)
|
||||
|
||||
info "Starting WebSocket engine API server", port = conf.engineApiWsPort
|
||||
|
||||
|
|
|
@ -41,12 +41,13 @@ proc invalidFCU(db: BaseChainDB, header: EthBlockHeader): ForkchoiceUpdatedRespo
|
|||
let blockHash = latestValidHash(db, parent, db.ttd())
|
||||
invalidFCU(blockHash)
|
||||
|
||||
proc setupEngineAPI*(
|
||||
proc setupEngineApi*(
|
||||
sealingEngine: SealingEngineRef,
|
||||
server: RpcServer) =
|
||||
server: RpcServer,
|
||||
merger: MergerRef) =
|
||||
|
||||
# TODO: put this singleton somewhere else
|
||||
let api = EngineAPI.new(sealingEngine.chain.db)
|
||||
let api = EngineApiRef.new(merger)
|
||||
|
||||
# https://github.com/ethereum/execution-apis/blob/main/src/engine/specification.md#engine_newpayloadv1
|
||||
# cannot use `params` as param name. see https:#github.com/status-im/nim-json-rpc/issues/128
|
||||
|
|
|
@ -9,7 +9,8 @@ import
|
|||
../db/db_chain,
|
||||
../p2p/chain,
|
||||
../utils/tx_pool,
|
||||
../utils/tx_pool/tx_item
|
||||
../utils/tx_pool/tx_item,
|
||||
../merge/merger
|
||||
|
||||
type
|
||||
HashToTime = TableRef[Hash256, Time]
|
||||
|
@ -23,14 +24,57 @@ type
|
|||
knownByPeer: Table[Peer, HashToTime]
|
||||
pending: HashSet[Hash256]
|
||||
lastCleanup: Time
|
||||
merger: MergerRef
|
||||
|
||||
ReconnectRef = ref object
|
||||
pool: PeerPool
|
||||
node: Node
|
||||
|
||||
const
|
||||
NUM_PEERS_REBROADCAST_QUOTIENT = 4
|
||||
POOLED_STORAGE_TIME_LIMIT = initDuration(minutes = 20)
|
||||
PEER_LONG_BANTIME = chronos.minutes(150)
|
||||
|
||||
proc hash(peer: Peer): hashes.Hash =
|
||||
hash(peer.remote)
|
||||
|
||||
proc banExpiredReconnect(arg: pointer) {.gcsafe, raises: [Defect].} =
|
||||
# Reconnect to peer after ban period if pool is empty
|
||||
try:
|
||||
|
||||
let reconnect = cast[ReconnectRef](arg)
|
||||
if reconnect.pool.len > 0:
|
||||
return
|
||||
|
||||
asyncSpawn reconnect.pool.connectToNode(reconnect.node)
|
||||
|
||||
except TransportError:
|
||||
debug "Transport got closed during banExpiredReconnect"
|
||||
except CatchableError as e:
|
||||
debug "Exception in banExpiredReconnect", exc = e.name, err = e.msg
|
||||
|
||||
proc banPeer(pool: PeerPool, peer: Peer, banTime: chronos.Duration) {.async.} =
|
||||
try:
|
||||
|
||||
await peer.disconnect(SubprotocolReason)
|
||||
|
||||
let expired = Moment.fromNow(banTime)
|
||||
let reconnect = ReconnectRef(
|
||||
pool: pool,
|
||||
node: peer.remote
|
||||
)
|
||||
|
||||
discard setTimer(
|
||||
expired,
|
||||
banExpiredReconnect,
|
||||
cast[pointer](reconnect)
|
||||
)
|
||||
|
||||
except TransportError:
|
||||
debug "Transport got closed during banPeer"
|
||||
except CatchableError as e:
|
||||
debug "Exception in banPeer", exc = e.name, err = e.msg
|
||||
|
||||
proc cleanupKnownByPeer(ctx: EthWireRef) =
|
||||
let now = getTime()
|
||||
var tmp = initHashSet[Hash256]()
|
||||
|
@ -151,13 +195,15 @@ proc setupPeerObserver(ctx: EthWireRef) =
|
|||
proc new*(_: type EthWireRef,
|
||||
chain: Chain,
|
||||
txPool: TxPoolRef,
|
||||
peerPool: PeerPool): EthWireRef =
|
||||
peerPool: PeerPool,
|
||||
merger: MergerRef): EthWireRef =
|
||||
let ctx = EthWireRef(
|
||||
db: chain.db,
|
||||
chain: chain,
|
||||
txPool: txPool,
|
||||
peerPool: peerPool,
|
||||
lastCleanup: getTime()
|
||||
merger: merger,
|
||||
lastCleanup: getTime(),
|
||||
)
|
||||
|
||||
ctx.setupPeerObserver()
|
||||
|
@ -345,7 +391,7 @@ proc fetchTransactions(ctx: EthWireRef, reqHashes: seq[Hash256], peer: Peer): Fu
|
|||
method handleAnnouncedTxsHashes*(ctx: EthWireRef, peer: Peer, txHashes: openArray[Hash256]) {.gcsafe.} =
|
||||
if ctx.disableTxPool:
|
||||
when trMissingOrDisabledGossipOk:
|
||||
notImplemented("handleAnnouncedTxsHashes")
|
||||
notEnabled("handleAnnouncedTxsHashes")
|
||||
return
|
||||
|
||||
if txHashes.len == 0:
|
||||
|
@ -373,10 +419,19 @@ method handleAnnouncedTxsHashes*(ctx: EthWireRef, peer: Peer, txHashes: openArra
|
|||
asyncSpawn ctx.fetchTransactions(reqHashes, peer)
|
||||
|
||||
method handleNewBlock*(ctx: EthWireRef, peer: Peer, blk: EthBlock, totalDifficulty: DifficultyInt) {.gcsafe.} =
|
||||
notImplemented("handleNewBlock")
|
||||
if ctx.merger.posFinalized:
|
||||
debug "Dropping peer for sending NewBlock after merge (EIP-3675)",
|
||||
peer, blockNumber=blk.header.blockNumber,
|
||||
blockHash=blk.header.blockHash, totalDifficulty
|
||||
asyncSpawn banPeer(ctx.peerPool, peer, PEER_LONG_BANTIME)
|
||||
return
|
||||
|
||||
method handleNewBlockHashes*(ctx: EthWireRef, peer: Peer, hashes: openArray[NewBlockHashesAnnounce]) {.gcsafe.} =
|
||||
notImplemented("handleNewBlockHashes")
|
||||
if ctx.merger.posFinalized:
|
||||
debug "Dropping peer for sending NewBlockHashes after merge (EIP-3675)",
|
||||
peer, numHashes=hashes.len
|
||||
asyncSpawn banPeer(ctx.peerPool, peer, PEER_LONG_BANTIME)
|
||||
return
|
||||
|
||||
when defined(legacy_eth66_enabled):
|
||||
method getStorageNodes*(ctx: EthWireRef, hashes: openArray[Hash256]): seq[Blob] {.gcsafe.} =
|
||||
|
|
|
@ -18,7 +18,7 @@ import
|
|||
../nimbus/[config, context, genesis, sealer],
|
||||
../nimbus/utils/[tx_pool],
|
||||
../nimbus/p2p/chain,
|
||||
../nimbus/merge/mergetypes,
|
||||
../nimbus/merge/[mergetypes, merger],
|
||||
./test_helpers
|
||||
|
||||
const
|
||||
|
@ -98,9 +98,10 @@ proc runTest(steps: Steps) =
|
|||
chainRef, ctx, conf.engineSigner,
|
||||
txPool, EnginePostMerge
|
||||
)
|
||||
merger = MergerRef.new(chainDB)
|
||||
|
||||
setupEthRpc(ethNode, ctx, chainDB, txPool, rpcServer)
|
||||
setupEngineAPI(sealingEngine, rpcServer)
|
||||
setupEngineAPI(sealingEngine, rpcServer, merger)
|
||||
|
||||
sealingEngine.start()
|
||||
rpcServer.start()
|
||||
|
@ -138,7 +139,7 @@ proc `==`(a, b: Quantity): bool =
|
|||
uint64(a) == uint64(b)
|
||||
|
||||
proc testEngineApiSupport() =
|
||||
var api = EngineAPI.new()
|
||||
var api = EngineAPIRef.new(nil)
|
||||
let
|
||||
id1 = toId(1)
|
||||
id2 = toId(2)
|
||||
|
|
|
@ -23,4 +23,8 @@ import
|
|||
../hive_integration/nodocker/consensus/extract_consensus_data,
|
||||
../hive_integration/nodocker/consensus/consensus_sim,
|
||||
../hive_integration/nodocker/graphql/graphql_sim,
|
||||
../hive_integration/nodocker/engine/engine_sim
|
||||
../hive_integration/nodocker/engine/engine_sim,
|
||||
../tools/t8n/t8n,
|
||||
../tools/t8n/t8n_test,
|
||||
../tools/evmstate/evmstate,
|
||||
../tools/evmstate/evmstate_test
|
||||
|
|
Loading…
Reference in New Issue