fix some deprecation warnings (#6702)

This commit is contained in:
tersec 2024-11-04 11:19:43 +00:00 committed by GitHub
parent 544b07ba0d
commit b7ea6a627e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
8 changed files with 23 additions and 25 deletions

View File

@ -70,7 +70,7 @@ type
config*: BeaconNodeConf config*: BeaconNodeConf
attachedValidators*: ref ValidatorPool attachedValidators*: ref ValidatorPool
optimisticProcessor*: OptimisticProcessor optimisticProcessor*: OptimisticProcessor
optimisticFcuFut*: Future[(PayloadExecutionStatus, Opt[BlockHash])] optimisticFcuFut*: Future[(PayloadExecutionStatus, Opt[Hash32])]
.Raising([CancelledError]) .Raising([CancelledError])
lightClient*: LightClient lightClient*: LightClient
dag*: ChainDAGRef dag*: ChainDAGRef

View File

@ -45,7 +45,6 @@ type
SignatureBytes = DynamicBytes[96, 96] SignatureBytes = DynamicBytes[96, 96]
Int64LeBytes = DynamicBytes[8, 8] Int64LeBytes = DynamicBytes[8, 8]
WithoutTimeout* = distinct int WithoutTimeout* = distinct int
Address = web3.Address
DeadlineObject* = object DeadlineObject* = object
# TODO (cheatfate): This object declaration could be removed when # TODO (cheatfate): This object declaration could be removed when
@ -130,7 +129,7 @@ type
depositContractAddress*: Eth1Address depositContractAddress*: Eth1Address
depositContractBlockNumber: uint64 depositContractBlockNumber: uint64
depositContractBlockHash: BlockHash depositContractBlockHash: Hash32
blocksPerLogsRequest: uint64 blocksPerLogsRequest: uint64
## This value is used to dynamically adjust the number of ## This value is used to dynamically adjust the number of
@ -199,7 +198,7 @@ type
FullBlockId* = object FullBlockId* = object
number: Eth1BlockNumber number: Eth1BlockNumber
hash: BlockHash hash: Hash32
DataProviderFailure* = object of CatchableError DataProviderFailure* = object of CatchableError
CorruptDataProvider* = object of DataProviderFailure CorruptDataProvider* = object of DataProviderFailure
@ -458,7 +457,7 @@ proc connectedRpcClient(connection: ELConnection): Future[RpcClient] {.
proc getBlockByHash( proc getBlockByHash(
rpcClient: RpcClient, rpcClient: RpcClient,
hash: BlockHash hash: Hash32
): Future[BlockObject] {.async: (raises: [CatchableError]).} = ): Future[BlockObject] {.async: (raises: [CatchableError]).} =
await rpcClient.eth_getBlockByHash(hash, false) await rpcClient.eth_getBlockByHash(hash, false)
@ -730,7 +729,7 @@ proc getPayload*(
proc waitELToSyncDeposits( proc waitELToSyncDeposits(
connection: ELConnection, connection: ELConnection,
minimalRequiredBlock: BlockHash minimalRequiredBlock: Hash32
) {.async: (raises: [CancelledError]).} = ) {.async: (raises: [CancelledError]).} =
var rpcClient: RpcClient = nil var rpcClient: RpcClient = nil
@ -776,7 +775,7 @@ proc waitELToSyncDeposits(
func networkHasDepositContract(m: ELManager): bool = func networkHasDepositContract(m: ELManager): bool =
not m.cfg.DEPOSIT_CONTRACT_ADDRESS.isDefaultValue not m.cfg.DEPOSIT_CONTRACT_ADDRESS.isDefaultValue
func mostRecentKnownBlock(m: ELManager): BlockHash = func mostRecentKnownBlock(m: ELManager): Hash32 =
if m.eth1Chain.finalizedDepositsMerkleizer.getChunkCount() > 0: if m.eth1Chain.finalizedDepositsMerkleizer.getChunkCount() > 0:
m.eth1Chain.finalizedBlockHash.asBlockHash m.eth1Chain.finalizedBlockHash.asBlockHash
else: else:
@ -1111,7 +1110,7 @@ proc forkchoiceUpdated*(
Opt[PayloadAttributesV3], Opt[PayloadAttributesV3],
deadlineObj: DeadlineObject, deadlineObj: DeadlineObject,
maxRetriesCount: int maxRetriesCount: int
): Future[(PayloadExecutionStatus, Opt[BlockHash])] {. ): Future[(PayloadExecutionStatus, Opt[Hash32])] {.
async: (raises: [CancelledError]).} = async: (raises: [CancelledError]).} =
doAssert not headBlockHash.isZero doAssert not headBlockHash.isZero
@ -1129,7 +1128,7 @@ proc forkchoiceUpdated*(
# payload (`Hash32()` if none yet finalized)" # payload (`Hash32()` if none yet finalized)"
if m.elConnections.len == 0: if m.elConnections.len == 0:
return (PayloadExecutionStatus.syncing, Opt.none BlockHash) return (PayloadExecutionStatus.syncing, Opt.none Hash32)
when payloadAttributes is Opt[PayloadAttributesV3]: when payloadAttributes is Opt[PayloadAttributesV3]:
template payloadAttributesV3(): auto = template payloadAttributesV3(): auto =
@ -1238,7 +1237,7 @@ proc forkchoiceUpdated*(
pendingRequests.filterIt(not(it.finished())). pendingRequests.filterIt(not(it.finished())).
mapIt(it.cancelAndWait()) mapIt(it.cancelAndWait())
await noCancel allFutures(pending) await noCancel allFutures(pending)
return (PayloadExecutionStatus.invalid, Opt.none BlockHash) return (PayloadExecutionStatus.invalid, Opt.none Hash32)
elif responseProcessor.selectedResponse.isSome: elif responseProcessor.selectedResponse.isSome:
# We spawn task which will wait for all other responses which are # We spawn task which will wait for all other responses which are
# still pending, after 30.seconds all pending requests will be # still pending, after 30.seconds all pending requests will be
@ -1253,14 +1252,14 @@ proc forkchoiceUpdated*(
pendingRequests.filterIt(not(it.finished())). pendingRequests.filterIt(not(it.finished())).
mapIt(it.cancelAndWait()) mapIt(it.cancelAndWait())
await noCancel allFutures(pending) await noCancel allFutures(pending)
return (PayloadExecutionStatus.syncing, Opt.none BlockHash) return (PayloadExecutionStatus.syncing, Opt.none Hash32)
if len(pendingRequests) == 0: if len(pendingRequests) == 0:
# All requests failed, we will continue our attempts until deadline # All requests failed, we will continue our attempts until deadline
# is not finished. # is not finished.
inc(retriesCount) inc(retriesCount)
if retriesCount == maxRetriesCount: if retriesCount == maxRetriesCount:
return (PayloadExecutionStatus.syncing, Opt.none BlockHash) return (PayloadExecutionStatus.syncing, Opt.none Hash32)
# To avoid continous spam of requests when EL node is offline we # To avoid continous spam of requests when EL node is offline we
# going to sleep until next attempt. # going to sleep until next attempt.
@ -1273,7 +1272,7 @@ proc forkchoiceUpdated*(
payloadAttributes: Opt[PayloadAttributesV1] | payloadAttributes: Opt[PayloadAttributesV1] |
Opt[PayloadAttributesV2] | Opt[PayloadAttributesV2] |
Opt[PayloadAttributesV3] Opt[PayloadAttributesV3]
): Future[(PayloadExecutionStatus, Opt[BlockHash])] {. ): Future[(PayloadExecutionStatus, Opt[Hash32])] {.
async: (raises: [CancelledError], raw: true).} = async: (raises: [CancelledError], raw: true).} =
forkchoiceUpdated( forkchoiceUpdated(
m, headBlockHash, safeBlockHash, finalizedBlockHash, m, headBlockHash, safeBlockHash, finalizedBlockHash,
@ -1389,7 +1388,7 @@ func depositEventsToBlocks(
let let
logEvent = JrpcConv.decode(logEventData.string, LogObject) logEvent = JrpcConv.decode(logEventData.string, LogObject)
blockNumber = Eth1BlockNumber readJsonField(logEvent, blockNumber, Quantity) blockNumber = Eth1BlockNumber readJsonField(logEvent, blockNumber, Quantity)
blockHash = readJsonField(logEvent, blockHash, BlockHash) blockHash = readJsonField(logEvent, blockHash, Hash32)
if lastEth1Block == nil or lastEth1Block.number != blockNumber: if lastEth1Block == nil or lastEth1Block.number != blockNumber:
lastEth1Block = Eth1Block( lastEth1Block = Eth1Block(

View File

@ -19,11 +19,11 @@ type
executionPayload*: ExecutionPayloadV1 executionPayload*: ExecutionPayloadV1
blockValue*: UInt256 blockValue*: UInt256
func asEth2Digest*(x: BlockHash|Bytes32): Eth2Digest = func asEth2Digest*(x: Hash32|Bytes32): Eth2Digest =
Eth2Digest(data: array[32, byte](x)) Eth2Digest(data: array[32, byte](x))
template asBlockHash*(x: Eth2Digest): BlockHash = template asBlockHash*(x: Eth2Digest): Hash32 =
BlockHash(x.data) Hash32(x.data)
func asConsensusWithdrawal*(w: WithdrawalV1): capella.Withdrawal = func asConsensusWithdrawal*(w: WithdrawalV1): capella.Withdrawal =
capella.Withdrawal( capella.Withdrawal(

View File

@ -32,7 +32,7 @@ declarePublicGauge eth1_finalized_deposits,
declareGauge eth1_chain_len, declareGauge eth1_chain_len,
"The length of the in-memory chain of Eth1 blocks" "The length of the in-memory chain of Eth1 blocks"
template toGaugeValue*(x: Quantity | BlockNumber): int64 = template toGaugeValue*(x: Quantity): int64 =
toGaugeValue(distinctBase x) toGaugeValue(distinctBase x)
type type
@ -69,7 +69,7 @@ type
## A non-forkable chain of blocks ending at the block with ## A non-forkable chain of blocks ending at the block with
## ETH1_FOLLOW_DISTANCE offset from the head. ## ETH1_FOLLOW_DISTANCE offset from the head.
blocksByHash: Table[BlockHash, Eth1Block] blocksByHash: Table[Hash32, Eth1Block]
headMerkleizer: DepositsMerkleizer headMerkleizer: DepositsMerkleizer
## Merkleizer state after applying all `blocks` ## Merkleizer state after applying all `blocks`

View File

@ -24,7 +24,7 @@ import
pubsub, gossipsub, rpc/message, rpc/messages, peertable, pubsubpeer], pubsub, gossipsub, rpc/message, rpc/messages, peertable, pubsubpeer],
libp2p/stream/connection, libp2p/stream/connection,
libp2p/services/wildcardresolverservice, libp2p/services/wildcardresolverservice,
eth/[keys, async_utils], eth/[common/keys, async_utils],
eth/net/nat, eth/p2p/discoveryv5/[enr, node, random2], eth/net/nat, eth/p2p/discoveryv5/[enr, node, random2],
".."/[version, conf, beacon_clock, conf_light_client], ".."/[version, conf, beacon_clock, conf_light_client],
../spec/[eth2_ssz_serialization, network, helpers, forks], ../spec/[eth2_ssz_serialization, network, helpers, forks],

View File

@ -457,7 +457,7 @@ elif const_preset == "minimal":
TERMINAL_TOTAL_DIFFICULTY: TERMINAL_TOTAL_DIFFICULTY:
u256"115792089237316195423570985008687907853269984665640564039457584007913129638912", u256"115792089237316195423570985008687907853269984665640564039457584007913129638912",
# By default, don't use these params # By default, don't use these params
TERMINAL_BLOCK_HASH: BlockHash.fromHex( TERMINAL_BLOCK_HASH: Hash32.fromHex(
"0x0000000000000000000000000000000000000000000000000000000000000000"), "0x0000000000000000000000000000000000000000000000000000000000000000"),

View File

@ -10,7 +10,7 @@
import import
std/[json, options, times], std/[json, options, times],
chronos, bearssl/rand, chronicles, confutils, stint, json_serialization, chronos, bearssl/rand, chronicles, confutils, stint, json_serialization,
web3, eth/keys, eth/p2p/discoveryv5/random2, web3, eth/common/keys, eth/p2p/discoveryv5/random2,
stew/[io2, byteutils], json_rpc/jsonmarshal, stew/[io2, byteutils], json_rpc/jsonmarshal,
../beacon_chain/conf, ../beacon_chain/conf,
../beacon_chain/el/el_manager, ../beacon_chain/el/el_manager,
@ -21,7 +21,6 @@ import
../beacon_chain/validators/keystore_management ../beacon_chain/validators/keystore_management
from std/os import changeFileExt, fileExists from std/os import changeFileExt, fileExists
from std/times import toUnix
from ../beacon_chain/el/engine_api_conversions import asEth2Digest from ../beacon_chain/el/engine_api_conversions import asEth2Digest
from ../beacon_chain/spec/beaconstate import initialize_beacon_state_from_eth1 from ../beacon_chain/spec/beaconstate import initialize_beacon_state_from_eth1
from ../tests/mocking/mock_genesis import mockEth1BlockHash from ../tests/mocking/mock_genesis import mockEth1BlockHash
@ -239,7 +238,7 @@ contract(DepositContract):
template `as`(address: Eth1Address, T: type bellatrix.ExecutionAddress): T = template `as`(address: Eth1Address, T: type bellatrix.ExecutionAddress): T =
T(data: distinctBase(address)) T(data: distinctBase(address))
template `as`(address: BlockHash, T: type Eth2Digest): T = template `as`(address: Hash32, T: type Eth2Digest): T =
asEth2Digest(address) asEth2Digest(address)
func getOrDefault[T](x: Opt[T]): T = func getOrDefault[T](x: Opt[T]): T =

View File

@ -10,7 +10,7 @@
import import
testutils/unittests, testutils/unittests,
chronos, eth/keys, eth/p2p/discoveryv5/enr, chronos, eth/p2p/discoveryv5/enr,
../beacon_chain/spec/[forks, network], ../beacon_chain/spec/[forks, network],
../beacon_chain/networking/[eth2_network, eth2_discovery], ../beacon_chain/networking/[eth2_network, eth2_discovery],
./testutil ./testutil