2018-11-26 13:33:06 +00:00
|
|
|
import
|
2020-12-09 22:44:59 +00:00
|
|
|
std/[deques, hashes, options, strformat, strutils, sequtils, tables,
|
|
|
|
typetraits, uri],
|
|
|
|
# Nimble packages:
|
|
|
|
chronos, json, metrics, chronicles/timings,
|
|
|
|
web3, web3/ethtypes as web3Types, eth/common/eth_types, eth/async_utils,
|
|
|
|
# Local modules:
|
2020-11-24 21:21:47 +00:00
|
|
|
spec/[datatypes, digest, crypto, helpers],
|
2020-10-12 01:07:20 +00:00
|
|
|
ssz, beacon_chain_db, network_metadata, merkle_minimal, beacon_node_status
|
2018-11-26 13:33:06 +00:00
|
|
|
|
2020-06-19 17:42:28 +00:00
|
|
|
export
|
2020-07-02 15:14:11 +00:00
|
|
|
web3Types
|
2020-06-19 17:42:28 +00:00
|
|
|
|
2020-11-19 17:19:03 +00:00
|
|
|
logScope:
|
|
|
|
topics = "eth1"
|
|
|
|
|
2020-03-24 11:13:07 +00:00
|
|
|
contract(DepositContract):
|
|
|
|
proc deposit(pubkey: Bytes48,
|
|
|
|
withdrawalCredentials: Bytes32,
|
|
|
|
signature: Bytes96,
|
|
|
|
deposit_data_root: FixedBytes[32])
|
|
|
|
|
|
|
|
proc get_deposit_root(): FixedBytes[32]
|
|
|
|
proc get_deposit_count(): Bytes8
|
|
|
|
|
|
|
|
proc DepositEvent(pubkey: Bytes48,
|
|
|
|
withdrawalCredentials: Bytes32,
|
|
|
|
amount: Bytes8,
|
|
|
|
signature: Bytes96,
|
|
|
|
index: Bytes8) {.event.}
|
|
|
|
# TODO
|
|
|
|
# The raises list of this module are still not usable due to general
|
|
|
|
# Exceptions being reported from Chronos's asyncfutures2.
|
|
|
|
|
2020-11-24 21:21:47 +00:00
|
|
|
const
|
2020-11-24 21:28:20 +00:00
|
|
|
web3Timeouts = 60.seconds
|
2020-12-01 11:14:32 +00:00
|
|
|
hasDepositRootChecks = defined(has_deposit_root_checks)
|
2020-11-24 21:21:47 +00:00
|
|
|
hasGenesisDetection* = defined(has_genesis_detection)
|
|
|
|
|
2018-11-26 13:33:06 +00:00
|
|
|
type
|
2020-03-24 11:13:07 +00:00
|
|
|
Eth1BlockNumber* = uint64
|
|
|
|
Eth1BlockTimestamp* = uint64
|
2020-07-02 15:14:11 +00:00
|
|
|
Eth1BlockHeader = web3Types.BlockHeader
|
2020-03-24 11:13:07 +00:00
|
|
|
|
2020-10-12 01:07:20 +00:00
|
|
|
Database* = object
|
|
|
|
|
2020-03-24 11:13:07 +00:00
|
|
|
Eth1Block* = ref object
|
|
|
|
number*: Eth1BlockNumber
|
|
|
|
timestamp*: Eth1BlockTimestamp
|
2020-11-24 21:21:47 +00:00
|
|
|
deposits*: seq[DepositData]
|
2020-03-24 11:13:07 +00:00
|
|
|
voteData*: Eth1Data
|
2020-11-24 21:21:47 +00:00
|
|
|
voteDataVerified*: bool
|
|
|
|
|
|
|
|
when hasGenesisDetection:
|
|
|
|
activeValidatorsCount*: uint64
|
2020-06-25 23:33:06 +00:00
|
|
|
|
2020-03-24 11:13:07 +00:00
|
|
|
Eth1Chain* = object
|
|
|
|
blocks: Deque[Eth1Block]
|
|
|
|
blocksByHash: Table[BlockHash, Eth1Block]
|
|
|
|
|
2020-11-03 01:21:07 +00:00
|
|
|
Eth1Monitor* = ref object
|
2020-10-12 01:07:20 +00:00
|
|
|
db: BeaconChainDB
|
2020-07-07 23:02:14 +00:00
|
|
|
preset: RuntimePreset
|
2020-10-14 14:04:08 +00:00
|
|
|
|
|
|
|
dataProvider: Web3DataProviderRef
|
2020-11-03 01:21:07 +00:00
|
|
|
|
|
|
|
latestEth1BlockNumber: Eth1BlockNumber
|
|
|
|
eth1Progress: AsyncEvent
|
|
|
|
|
2020-10-14 14:04:08 +00:00
|
|
|
eth1Chain: Eth1Chain
|
2020-11-24 21:21:47 +00:00
|
|
|
knownStart: DepositContractSnapshot
|
2018-11-26 13:33:06 +00:00
|
|
|
|
2020-11-24 21:21:47 +00:00
|
|
|
eth2FinalizedDepositsMerkleizer: DepositsMerkleizer
|
2018-11-26 13:33:06 +00:00
|
|
|
|
2019-11-22 13:16:07 +00:00
|
|
|
runFut: Future[void]
|
|
|
|
|
2020-11-24 21:21:47 +00:00
|
|
|
when hasGenesisDetection:
|
|
|
|
genesisState: NilableBeaconStateRef
|
|
|
|
genesisStateFut: Future[void]
|
|
|
|
|
2020-10-14 14:04:08 +00:00
|
|
|
Web3DataProvider* = object
|
2020-03-24 11:13:07 +00:00
|
|
|
url: string
|
|
|
|
web3: Web3
|
|
|
|
ns: Sender[DepositContract]
|
2020-06-27 12:01:19 +00:00
|
|
|
blockHeadersSubscription: Subscription
|
2020-03-24 11:13:07 +00:00
|
|
|
|
|
|
|
Web3DataProviderRef* = ref Web3DataProvider
|
|
|
|
|
2020-12-01 21:20:28 +00:00
|
|
|
DataProviderFailure = object of CatchableError
|
|
|
|
CorruptDataProvider = object of DataProviderFailure
|
|
|
|
DataProviderTimeout = object of DataProviderFailure
|
2020-03-24 11:13:07 +00:00
|
|
|
|
|
|
|
DisconnectHandler* = proc () {.gcsafe, raises: [Defect].}
|
|
|
|
|
|
|
|
DepositEventHandler* = proc (
|
|
|
|
pubkey: Bytes48,
|
|
|
|
withdrawalCredentials: Bytes32,
|
|
|
|
amount: Bytes8,
|
2020-05-08 14:24:47 +00:00
|
|
|
signature: Bytes96, merkleTreeIndex: Bytes8, j: JsonNode) {.raises: [Defect], gcsafe.}
|
2020-03-24 11:13:07 +00:00
|
|
|
|
2020-11-19 17:19:03 +00:00
|
|
|
BlockProposalEth1Data* = object
|
|
|
|
vote*: Eth1Data
|
|
|
|
deposits*: seq[Deposit]
|
2020-11-24 21:21:47 +00:00
|
|
|
hasMissingDeposits*: bool
|
2020-11-19 17:19:03 +00:00
|
|
|
|
2020-12-09 22:44:59 +00:00
|
|
|
declareCounter failed_web3_requests,
|
|
|
|
"Failed web3 requests"
|
|
|
|
|
|
|
|
declareGauge eth1_latest_head,
|
|
|
|
"The highest Eth1 block number observed on the network"
|
|
|
|
|
|
|
|
declareGauge eth1_synced_head,
|
|
|
|
"Block number of the highest synchronized block according to follow distance"
|
|
|
|
|
|
|
|
declareGauge eth1_finalized_head,
|
|
|
|
"Block number of the highest Eth1 block finalized by Eth2 consensus"
|
|
|
|
|
|
|
|
declareGauge eth1_finalized_deposits,
|
|
|
|
"Number of deposits that were finalized by the Eth2 consensus"
|
|
|
|
|
|
|
|
declareGauge eth1_chain_len,
|
|
|
|
"The length of the in-memory chain of Eth1 blocks"
|
|
|
|
|
2020-11-24 21:21:47 +00:00
|
|
|
template depositContractAddress*(m: Eth1Monitor): Eth1Address =
|
2020-10-14 14:04:08 +00:00
|
|
|
m.dataProvider.ns.contractAddress
|
|
|
|
|
2020-11-24 21:21:47 +00:00
|
|
|
template web3Url*(m: Eth1Monitor): string =
|
2020-10-14 14:04:08 +00:00
|
|
|
m.dataProvider.url
|
|
|
|
|
2020-11-19 17:19:03 +00:00
|
|
|
template blocks*(m: Eth1Monitor): Deque[Eth1Block] =
|
|
|
|
m.eth1Chain.blocks
|
|
|
|
|
2020-11-12 13:49:13 +00:00
|
|
|
proc fixupWeb3Urls*(web3Url: var string) =
|
2020-11-05 23:11:06 +00:00
|
|
|
## Converts HTTP and HTTPS Infura URLs to their WebSocket equivalents
|
|
|
|
## because we are missing a functional HTTPS client.
|
|
|
|
let normalizedUrl = toLowerAscii(web3Url)
|
|
|
|
var pos = 0
|
|
|
|
|
2020-11-12 13:49:13 +00:00
|
|
|
template skip(x: string): bool {.dirty.} =
|
2020-11-05 23:11:06 +00:00
|
|
|
if normalizedUrl.len - pos >= x.len and
|
|
|
|
normalizedUrl.toOpenArray(pos, pos + x.len - 1) == x:
|
|
|
|
pos += x.len
|
|
|
|
true
|
|
|
|
else:
|
|
|
|
false
|
|
|
|
|
|
|
|
if not (skip("https://") or skip("http://")):
|
2020-11-12 13:49:13 +00:00
|
|
|
if not (skip("ws://") or skip("wss://")):
|
|
|
|
web3Url = "ws://" & web3Url
|
|
|
|
warn "The Web3 URL does not specify a protocol. Assuming a WebSocket server", web3Url
|
2020-11-05 23:11:06 +00:00
|
|
|
return
|
|
|
|
|
2020-11-12 13:49:13 +00:00
|
|
|
block infuraRewrite:
|
|
|
|
var pos = pos
|
|
|
|
let network = if skip("mainnet"): mainnet
|
|
|
|
elif skip("goerli"): goerli
|
|
|
|
else: break
|
2020-11-05 23:11:06 +00:00
|
|
|
|
2020-11-12 13:49:13 +00:00
|
|
|
if not skip(".infura.io/v3/"):
|
|
|
|
break
|
2020-11-05 23:11:06 +00:00
|
|
|
|
2020-11-12 13:49:13 +00:00
|
|
|
template infuraKey: string = normalizedUrl.substr(pos)
|
2020-11-05 23:11:06 +00:00
|
|
|
|
2020-11-12 13:49:13 +00:00
|
|
|
web3Url = "wss://" & $network & ".infura.io/ws/v3/" & infuraKey
|
|
|
|
return
|
2020-11-05 23:11:06 +00:00
|
|
|
|
2020-11-12 13:49:13 +00:00
|
|
|
block gethRewrite:
|
|
|
|
web3Url = "ws://" & normalizedUrl.substr(pos)
|
|
|
|
warn "Only WebSocket web3 providers are supported. Rewriting URL", web3Url
|
2020-11-05 23:11:06 +00:00
|
|
|
|
2020-12-09 22:44:59 +00:00
|
|
|
func toGaugeValue(x: uint64): int64 =
|
|
|
|
if x > uint64(int64.high):
|
|
|
|
int64.high
|
|
|
|
else:
|
|
|
|
int64(x)
|
|
|
|
|
|
|
|
template toGaugeValue(x: Quantity): int64 =
|
|
|
|
toGaugeValue(distinctBase x)
|
|
|
|
|
2020-07-07 23:02:14 +00:00
|
|
|
# TODO: Add preset validation
|
|
|
|
# MIN_GENESIS_ACTIVE_VALIDATOR_COUNT should be larger than SLOTS_PER_EPOCH
|
2020-08-06 09:08:54 +00:00
|
|
|
# doAssert SECONDS_PER_ETH1_BLOCK * preset.ETH1_FOLLOW_DISTANCE < GENESIS_DELAY,
|
2020-07-07 23:02:14 +00:00
|
|
|
# "Invalid configuration: GENESIS_DELAY is set too low"
|
2020-03-24 11:13:07 +00:00
|
|
|
|
2020-11-09 14:18:55 +00:00
|
|
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.0/specs/phase0/validator.md#get_eth1_data
|
2020-03-24 11:13:07 +00:00
|
|
|
func compute_time_at_slot(state: BeaconState, slot: Slot): uint64 =
|
2020-06-15 09:38:05 +00:00
|
|
|
state.genesis_time + slot * SECONDS_PER_SLOT
|
2020-03-24 11:13:07 +00:00
|
|
|
|
2020-11-09 14:18:55 +00:00
|
|
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.0/specs/phase0/validator.md#get_eth1_data
|
2020-03-24 11:13:07 +00:00
|
|
|
func voting_period_start_time*(state: BeaconState): uint64 =
|
2020-06-15 09:38:05 +00:00
|
|
|
let eth1_voting_period_start_slot =
|
|
|
|
state.slot - state.slot mod SLOTS_PER_ETH1_VOTING_PERIOD.uint64
|
|
|
|
compute_time_at_slot(state, eth1_voting_period_start_slot)
|
2020-03-24 11:13:07 +00:00
|
|
|
|
2020-11-09 14:18:55 +00:00
|
|
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.0/specs/phase0/validator.md#get_eth1_data
|
2020-11-20 14:05:37 +00:00
|
|
|
func is_candidate_block(preset: RuntimePreset,
|
|
|
|
blk: Eth1Block,
|
|
|
|
period_start: uint64): bool =
|
|
|
|
(blk.timestamp + SECONDS_PER_ETH1_BLOCK * preset.ETH1_FOLLOW_DISTANCE <= period_start) and
|
|
|
|
(blk.timestamp + SECONDS_PER_ETH1_BLOCK * preset.ETH1_FOLLOW_DISTANCE * 2 >= period_start)
|
2020-03-24 11:13:07 +00:00
|
|
|
|
2020-06-19 17:42:28 +00:00
|
|
|
func asEth2Digest*(x: BlockHash): Eth2Digest =
|
2020-03-24 11:13:07 +00:00
|
|
|
Eth2Digest(data: array[32, byte](x))
|
|
|
|
|
|
|
|
template asBlockHash(x: Eth2Digest): BlockHash =
|
|
|
|
BlockHash(x.data)
|
|
|
|
|
2020-09-06 08:39:25 +00:00
|
|
|
func shortLog*(b: Eth1Block): string =
|
2020-11-24 21:21:47 +00:00
|
|
|
&"{b.number}:{shortLog b.voteData.block_hash}(deposits = {b.voteData.deposit_count})"
|
2020-06-27 12:01:19 +00:00
|
|
|
|
2020-03-24 11:13:07 +00:00
|
|
|
template findBlock*(eth1Chain: Eth1Chain, eth1Data: Eth1Data): Eth1Block =
|
|
|
|
getOrDefault(eth1Chain.blocksByHash, asBlockHash(eth1Data.block_hash), nil)
|
|
|
|
|
2020-11-19 17:19:03 +00:00
|
|
|
func makeSuccessorWithoutDeposits(existingBlock: Eth1Block,
|
|
|
|
successor: BlockObject): ETh1Block =
|
2020-11-24 21:21:47 +00:00
|
|
|
result = Eth1Block(
|
2020-11-19 17:19:03 +00:00
|
|
|
number: Eth1BlockNumber successor.number,
|
|
|
|
timestamp: Eth1BlockTimestamp successor.timestamp,
|
|
|
|
voteData: Eth1Data(
|
|
|
|
block_hash: successor.hash.asEth2Digest,
|
|
|
|
deposit_count: existingBlock.voteData.deposit_count,
|
2020-11-24 21:21:47 +00:00
|
|
|
deposit_root: existingBlock.voteData.deposit_root))
|
|
|
|
|
|
|
|
when hasGenesisDetection:
|
|
|
|
result.activeValidatorsCount = existingBlock.activeValidatorsCount
|
2020-11-19 17:19:03 +00:00
|
|
|
|
2020-11-20 14:05:37 +00:00
|
|
|
func latestCandidateBlock(m: Eth1Monitor, periodStart: uint64): Eth1Block =
|
|
|
|
for i in countdown(m.eth1Chain.blocks.len - 1, 0):
|
|
|
|
let blk = m.eth1Chain.blocks[i]
|
|
|
|
if is_candidate_block(m.preset, blk, periodStart):
|
2020-03-24 11:13:07 +00:00
|
|
|
return blk
|
|
|
|
|
2020-12-09 22:44:59 +00:00
|
|
|
proc popFirst(eth1Chain: var Eth1Chain) =
|
2020-11-03 01:21:07 +00:00
|
|
|
let removed = eth1Chain.blocks.popFirst
|
2020-06-27 12:01:19 +00:00
|
|
|
eth1Chain.blocksByHash.del removed.voteData.block_hash.asBlockHash
|
2020-12-09 22:44:59 +00:00
|
|
|
eth1_chain_len.set eth1Chain.blocks.len.int64
|
2020-06-27 12:01:19 +00:00
|
|
|
|
2020-12-09 22:44:59 +00:00
|
|
|
proc addBlock(eth1Chain: var Eth1Chain, newBlock: Eth1Block) =
|
2020-10-26 08:55:10 +00:00
|
|
|
eth1Chain.blocks.addLast newBlock
|
|
|
|
eth1Chain.blocksByHash[newBlock.voteData.block_hash.asBlockHash] = newBlock
|
2020-12-09 22:44:59 +00:00
|
|
|
eth1_chain_len.set eth1Chain.blocks.len.int64
|
2020-03-24 11:13:07 +00:00
|
|
|
|
2020-11-20 14:42:04 +00:00
|
|
|
func hash*(x: Eth1Data): Hash =
|
|
|
|
hashData(unsafeAddr x, sizeof(x))
|
2020-11-19 17:19:03 +00:00
|
|
|
|
2020-03-24 11:13:07 +00:00
|
|
|
template hash*(x: Eth1Block): Hash =
|
2020-11-19 17:19:03 +00:00
|
|
|
hash(x.voteData)
|
2020-03-24 11:13:07 +00:00
|
|
|
|
2020-12-01 21:20:28 +00:00
|
|
|
template awaitWithRetries[T](lazyFutExpr: Future[T],
|
|
|
|
retries = 3,
|
|
|
|
timeout = web3Timeouts): untyped =
|
|
|
|
const
|
|
|
|
reqType = astToStr(lazyFutExpr)
|
|
|
|
|
|
|
|
var
|
|
|
|
retryDelayMs = 16000
|
|
|
|
f: Future[T]
|
|
|
|
attempts = 0
|
|
|
|
|
|
|
|
while true:
|
|
|
|
f = lazyFutExpr
|
|
|
|
yield f or sleepAsync(timeout)
|
|
|
|
if not f.finished:
|
|
|
|
await cancelAndWait(f)
|
|
|
|
elif f.failed:
|
|
|
|
if f.error[] of Defect:
|
|
|
|
raise f.error
|
|
|
|
else:
|
|
|
|
debug "Web3 request failed", req = reqType, err = f.error.msg
|
2020-12-09 22:44:59 +00:00
|
|
|
inc failed_web3_requests
|
2020-12-01 21:20:28 +00:00
|
|
|
else:
|
|
|
|
break
|
|
|
|
|
|
|
|
inc attempts
|
|
|
|
if attempts >= retries:
|
2020-12-09 23:45:49 +00:00
|
|
|
var errorMsg = reqType & " failed " & $retries & " times"
|
|
|
|
if f.failed: errorMsg &= ". Last error: " & f.error.msg
|
|
|
|
raise newException(DataProviderFailure, errorMsg)
|
2020-12-01 21:20:28 +00:00
|
|
|
|
|
|
|
await sleepAsync(chronos.milliseconds(retryDelayMs))
|
|
|
|
retryDelayMs *= 2
|
|
|
|
|
|
|
|
read(f)
|
|
|
|
|
2020-11-03 01:21:07 +00:00
|
|
|
proc close*(p: Web3DataProviderRef): Future[void] {.async.} =
|
2020-06-27 12:01:19 +00:00
|
|
|
if p.blockHeadersSubscription != nil:
|
2020-12-01 21:20:28 +00:00
|
|
|
awaitWithRetries(p.blockHeadersSubscription.unsubscribe())
|
2020-06-27 12:01:19 +00:00
|
|
|
|
|
|
|
await p.web3.close()
|
|
|
|
|
2020-11-03 01:21:07 +00:00
|
|
|
proc getBlockByHash*(p: Web3DataProviderRef, hash: BlockHash):
|
|
|
|
Future[BlockObject] =
|
2020-06-27 12:01:19 +00:00
|
|
|
return p.web3.provider.eth_getBlockByHash(hash, false)
|
|
|
|
|
2020-11-03 01:21:07 +00:00
|
|
|
proc getBlockByNumber*(p: Web3DataProviderRef,
|
|
|
|
number: Eth1BlockNumber): Future[BlockObject] =
|
2020-06-27 12:01:19 +00:00
|
|
|
return p.web3.provider.eth_getBlockByNumber(&"0x{number:X}", false)
|
|
|
|
|
2020-11-03 01:21:07 +00:00
|
|
|
template readJsonField(j: JsonNode, fieldName: string, ValueType: type): untyped =
|
2020-06-27 12:01:19 +00:00
|
|
|
var res: ValueType
|
|
|
|
fromJson(j[fieldName], fieldName, res)
|
|
|
|
res
|
|
|
|
|
2020-11-03 01:21:07 +00:00
|
|
|
proc depositEventsToBlocks(depositsList: JsonNode): seq[Eth1Block] =
|
2020-06-27 12:01:19 +00:00
|
|
|
if depositsList.kind != JArray:
|
|
|
|
raise newException(CatchableError,
|
|
|
|
"Web3 provider didn't return a list of deposit events")
|
|
|
|
|
|
|
|
var lastEth1Block: Eth1Block
|
|
|
|
|
|
|
|
for logEvent in depositsList:
|
|
|
|
let
|
|
|
|
blockNumber = Eth1BlockNumber readJsonField(logEvent, "blockNumber", Quantity)
|
|
|
|
blockHash = readJsonField(logEvent, "blockHash", BlockHash)
|
|
|
|
logData = strip0xPrefix(logEvent["data"].getStr)
|
|
|
|
|
|
|
|
if lastEth1Block == nil or lastEth1Block.number != blockNumber:
|
|
|
|
lastEth1Block = Eth1Block(
|
|
|
|
number: blockNumber,
|
|
|
|
voteData: Eth1Data(block_hash: blockHash.asEth2Digest))
|
|
|
|
|
|
|
|
result.add lastEth1Block
|
|
|
|
|
|
|
|
var
|
|
|
|
pubkey: Bytes48
|
|
|
|
withdrawalCredentials: Bytes32
|
|
|
|
amount: Bytes8
|
|
|
|
signature: Bytes96
|
|
|
|
index: Bytes8
|
|
|
|
|
|
|
|
var offset = 0
|
|
|
|
offset += decode(logData, offset, pubkey)
|
|
|
|
offset += decode(logData, offset, withdrawalCredentials)
|
|
|
|
offset += decode(logData, offset, amount)
|
|
|
|
offset += decode(logData, offset, signature)
|
|
|
|
offset += decode(logData, offset, index)
|
|
|
|
|
2020-11-24 21:21:47 +00:00
|
|
|
lastEth1Block.deposits.add DepositData(
|
|
|
|
pubkey: ValidatorPubKey.init(array[48, byte](pubkey)),
|
|
|
|
withdrawal_credentials: Eth2Digest(data: array[32, byte](withdrawalCredentials)),
|
|
|
|
amount: bytes_to_uint64(array[8, byte](amount)),
|
|
|
|
signature: ValidatorSig.init(array[96, byte](signature)))
|
2020-06-27 12:01:19 +00:00
|
|
|
|
2020-11-03 01:21:07 +00:00
|
|
|
proc fetchTimestamp(p: Web3DataProviderRef, blk: Eth1Block) {.async.} =
|
2020-12-01 21:20:28 +00:00
|
|
|
let web3block = awaitWithRetries(
|
|
|
|
p.getBlockByHash(blk.voteData.block_hash.asBlockHash))
|
2020-11-03 01:21:07 +00:00
|
|
|
blk.timestamp = Eth1BlockTimestamp web3block.timestamp
|
|
|
|
|
2020-12-01 11:14:32 +00:00
|
|
|
type
|
|
|
|
DepositContractDataStatus = enum
|
|
|
|
Fetched
|
|
|
|
VerifiedCorrect
|
|
|
|
DepositRootIncorrect
|
|
|
|
DepositRootUnavailable
|
|
|
|
DepositCountIncorrect
|
|
|
|
DepositCountUnavailable
|
2020-11-03 01:21:07 +00:00
|
|
|
|
2020-12-01 11:14:32 +00:00
|
|
|
when hasDepositRootChecks:
|
2020-11-24 21:21:47 +00:00
|
|
|
const
|
2020-11-24 21:28:20 +00:00
|
|
|
contractCallTimeout = seconds(60)
|
2020-11-24 21:21:47 +00:00
|
|
|
|
|
|
|
template awaitOrRaiseOnTimeout[T](fut: Future[T],
|
|
|
|
timeout: Duration): T =
|
|
|
|
awaitWithTimeout(fut, timeout):
|
|
|
|
raise newException(DataProviderTimeout, "Timeout")
|
|
|
|
|
2020-11-03 01:21:07 +00:00
|
|
|
proc fetchDepositContractData(p: Web3DataProviderRef, blk: Eth1Block):
|
|
|
|
Future[DepositContractDataStatus] {.async.} =
|
|
|
|
let
|
|
|
|
depositRoot = p.ns.get_deposit_root.call(blockNumber = blk.number)
|
|
|
|
rawCount = p.ns.get_deposit_count.call(blockNumber = blk.number)
|
2020-10-21 13:25:53 +00:00
|
|
|
|
2020-11-03 01:21:07 +00:00
|
|
|
try:
|
2020-11-24 21:21:47 +00:00
|
|
|
let fetchedRoot = asEth2Digest(
|
|
|
|
awaitOrRaiseOnTimeout(depositRoot, contractCallTimeout))
|
2020-11-03 01:21:07 +00:00
|
|
|
if blk.voteData.deposit_root == default(Eth2Digest):
|
|
|
|
blk.voteData.deposit_root = fetchedRoot
|
|
|
|
result = Fetched
|
|
|
|
elif blk.voteData.deposit_root == fetchedRoot:
|
|
|
|
result = VerifiedCorrect
|
|
|
|
else:
|
|
|
|
result = DepositRootIncorrect
|
|
|
|
except CatchableError as err:
|
|
|
|
debug "Failed to fetch deposits root",
|
|
|
|
blockNumber = blk.number,
|
|
|
|
err = err.msg
|
|
|
|
result = DepositRootUnavailable
|
2020-10-21 13:25:53 +00:00
|
|
|
|
2020-11-03 01:21:07 +00:00
|
|
|
try:
|
2020-11-24 21:21:47 +00:00
|
|
|
let fetchedCount = bytes_to_uint64(array[8, byte](
|
|
|
|
awaitOrRaiseOnTimeout(rawCount, contractCallTimeout)))
|
2020-11-03 01:21:07 +00:00
|
|
|
if blk.voteData.deposit_count == 0:
|
|
|
|
blk.voteData.deposit_count = fetchedCount
|
|
|
|
elif blk.voteData.deposit_count != fetchedCount:
|
|
|
|
result = DepositCountIncorrect
|
|
|
|
except CatchableError as err:
|
|
|
|
debug "Failed to fetch deposits count",
|
|
|
|
blockNumber = blk.number,
|
|
|
|
err = err.msg
|
|
|
|
result = DepositCountUnavailable
|
2020-06-27 12:01:19 +00:00
|
|
|
|
2020-10-14 14:04:08 +00:00
|
|
|
proc onBlockHeaders*(p: Web3DataProviderRef,
|
|
|
|
blockHeaderHandler: BlockHeaderHandler,
|
2020-11-03 01:21:07 +00:00
|
|
|
errorHandler: SubscriptionErrorHandler) {.async.} =
|
2020-06-27 12:01:19 +00:00
|
|
|
if p.blockHeadersSubscription != nil:
|
2020-12-01 21:20:28 +00:00
|
|
|
awaitWithRetries(p.blockHeadersSubscription.unsubscribe())
|
2020-06-27 12:01:19 +00:00
|
|
|
|
2020-10-12 01:07:20 +00:00
|
|
|
info "Waiting for new Eth1 block headers"
|
2020-06-27 12:01:19 +00:00
|
|
|
|
2020-12-01 21:20:28 +00:00
|
|
|
p.blockHeadersSubscription = awaitWithRetries(
|
|
|
|
p.web3.subscribeForBlockHeaders(blockHeaderHandler, errorHandler))
|
2020-06-27 12:01:19 +00:00
|
|
|
|
2020-11-24 23:51:17 +00:00
|
|
|
{.push raises: [Defect].}
|
|
|
|
|
2020-11-04 09:06:58 +00:00
|
|
|
func getDepositsRoot(m: DepositsMerkleizer): Eth2Digest =
|
|
|
|
mixInLength(m.getFinalHash, int m.totalChunks)
|
|
|
|
|
2020-11-24 21:21:47 +00:00
|
|
|
func depositCountU64(s: DepositContractState): uint64 =
|
|
|
|
for i in 0 .. 23:
|
|
|
|
doAssert s.deposit_count[i] == 0
|
|
|
|
|
|
|
|
uint64.fromBytesBE s.deposit_count[24..31]
|
|
|
|
|
|
|
|
func toDepositContractState(merkleizer: DepositsMerkleizer): DepositContractState =
|
|
|
|
# TODO There is an off by one discrepancy in the size of the arrays here that
|
|
|
|
# need to be investigated. It shouldn't matter as long as the tree is
|
|
|
|
# not populated to its maximum size.
|
|
|
|
result.branch[0..31] = merkleizer.getCombinedChunks[0..31]
|
|
|
|
result.deposit_count[24..31] = merkleizer.getChunkCount().toBytesBE
|
|
|
|
|
|
|
|
func createMerkleizer(s: DepositContractSnapshot): DepositsMerkleizer =
|
|
|
|
DepositsMerkleizer.init(
|
|
|
|
s.depositContractState.branch,
|
|
|
|
s.depositContractState.depositCountU64)
|
|
|
|
|
|
|
|
func eth1DataFromMerkleizer(eth1Block: Eth2Digest,
|
|
|
|
merkleizer: DepositsMerkleizer): Eth1Data =
|
|
|
|
Eth1Data(
|
|
|
|
block_hash: eth1Block,
|
|
|
|
deposit_count: merkleizer.getChunkCount,
|
|
|
|
deposit_root: merkleizer.getDepositsRoot)
|
|
|
|
|
2020-11-24 23:51:17 +00:00
|
|
|
proc pruneOldBlocks(m: Eth1Monitor, depositIndex: uint64) =
|
2020-11-24 21:21:47 +00:00
|
|
|
let initialChunks = m.eth2FinalizedDepositsMerkleizer.getChunkCount
|
|
|
|
var lastBlock: Eth1Block
|
|
|
|
|
|
|
|
while m.eth1Chain.blocks.len > 0:
|
|
|
|
let blk = m.eth1Chain.blocks.peekFirst
|
2020-11-24 23:51:17 +00:00
|
|
|
if blk.voteData.deposit_count >= depositIndex:
|
2020-11-24 21:21:47 +00:00
|
|
|
break
|
|
|
|
else:
|
|
|
|
for deposit in blk.deposits:
|
|
|
|
m.eth2FinalizedDepositsMerkleizer.addChunk hash_tree_root(deposit).data
|
|
|
|
m.eth1Chain.popFirst()
|
|
|
|
lastBlock = blk
|
|
|
|
|
|
|
|
if m.eth2FinalizedDepositsMerkleizer.getChunkCount > initialChunks:
|
|
|
|
m.db.putEth2FinalizedTo DepositContractSnapshot(
|
|
|
|
eth1Block: lastBlock.voteData.block_hash,
|
|
|
|
depositContractState: m.eth2FinalizedDepositsMerkleizer.toDepositContractState)
|
|
|
|
|
2020-12-09 22:44:59 +00:00
|
|
|
eth1_finalized_head.set lastBlock.number.toGaugeValue
|
|
|
|
eth1_finalized_deposits.set lastBlock.voteData.deposit_count.toGaugeValue
|
|
|
|
|
2020-11-30 23:59:35 +00:00
|
|
|
debug "Eth1 blocks pruned",
|
|
|
|
newTailBlock = lastBlock.voteData.block_hash,
|
|
|
|
depositsCount = lastBlock.voteData.deposit_count
|
|
|
|
|
2020-11-24 21:21:47 +00:00
|
|
|
proc advanceMerkleizer(eth1Chain: Eth1Chain,
|
|
|
|
merkleizer: var DepositsMerkleizer,
|
|
|
|
depositIndex: uint64): bool =
|
|
|
|
if eth1Chain.blocks.len == 0:
|
|
|
|
return depositIndex == merkleizer.getChunkCount
|
|
|
|
|
|
|
|
if eth1Chain.blocks.peekLast.voteData.deposit_count < depositIndex:
|
|
|
|
return false
|
|
|
|
|
|
|
|
let
|
|
|
|
firstBlock = eth1Chain.blocks[0]
|
|
|
|
depositsInLastPrunedBlock = firstBlock.voteData.deposit_count -
|
|
|
|
firstBlock.deposits.lenu64
|
|
|
|
|
|
|
|
# advanceMerkleizer should always be called shortly after prunning the chain
|
|
|
|
doAssert depositsInLastPrunedBlock == merkleizer.getChunkCount
|
|
|
|
|
|
|
|
for blk in eth1Chain.blocks:
|
|
|
|
for deposit in blk.deposits:
|
|
|
|
if merkleizer.getChunkCount < depositIndex:
|
|
|
|
merkleizer.addChunk hash_tree_root(deposit).data
|
|
|
|
else:
|
|
|
|
return true
|
|
|
|
|
|
|
|
return merkleizer.getChunkCount == depositIndex
|
|
|
|
|
2020-12-02 18:15:36 +00:00
|
|
|
proc getDepositsRange(eth1Chain: Eth1Chain, first, last: uint64): seq[DepositData] =
|
2020-11-24 21:21:47 +00:00
|
|
|
# TODO It's possible to make this faster by performing binary search that
|
|
|
|
# will locate the blocks holding the `first` and `last` indices.
|
|
|
|
# TODO There is an assumption here that the requested range will be present
|
|
|
|
# in the Eth1Chain. This should hold true at the single call site right
|
|
|
|
# now, but we need to guard the pre-conditions better.
|
|
|
|
for blk in eth1Chain.blocks:
|
|
|
|
if blk.voteData.deposit_count <= first:
|
|
|
|
continue
|
|
|
|
|
|
|
|
let firstDepositIdxInBlk = blk.voteData.deposit_count - blk.deposits.lenu64
|
|
|
|
if firstDepositIdxInBlk >= last:
|
|
|
|
return
|
|
|
|
|
|
|
|
for i in 0 ..< blk.deposits.lenu64:
|
|
|
|
let globalIdx = firstDepositIdxInBlk + i
|
|
|
|
if globalIdx >= first and globalIdx < last:
|
2020-12-02 18:15:36 +00:00
|
|
|
result.add blk.deposits[i]
|
2020-11-24 21:21:47 +00:00
|
|
|
|
2020-11-30 23:59:35 +00:00
|
|
|
proc lowerBound(chain: Eth1Chain, depositCount: uint64): Eth1Block =
|
|
|
|
# TODO: This can be replaced with a proper binary search in the
|
|
|
|
# future, but the `algorithm` module currently requires an
|
|
|
|
# `openArray`, which the `deques` module can't provide yet.
|
|
|
|
for eth1Block in chain.blocks:
|
|
|
|
if eth1Block.voteData.deposit_count > depositCount:
|
|
|
|
return
|
|
|
|
result = eth1Block
|
|
|
|
|
|
|
|
proc trackFinalizedState*(m: Eth1Monitor,
|
|
|
|
finalizedEth1Data: Eth1Data,
|
|
|
|
finalizedStateDepositIndex: uint64): bool =
|
|
|
|
# Returns true if the Eth1Monitor is synced to the finalization point
|
|
|
|
if m.eth1Chain.blocks.len == 0:
|
|
|
|
debug "Eth1 chain not initialized"
|
|
|
|
return false
|
|
|
|
|
|
|
|
let latest = m.eth1Chain.blocks.peekLast
|
|
|
|
if latest.voteData.deposit_count < finalizedEth1Data.deposit_count:
|
|
|
|
debug "Eth1 chain not synced",
|
|
|
|
ourDepositsCount = latest.voteData.deposit_count,
|
|
|
|
targetDepositsCount = finalizedEth1Data.deposit_count
|
|
|
|
return false
|
|
|
|
|
|
|
|
let matchingBlock = m.eth1Chain.lowerBound(finalizedEth1Data.deposit_count)
|
|
|
|
result = if matchingBlock != nil:
|
|
|
|
if matchingBlock.voteData.deposit_root == finalizedEth1Data.deposit_root:
|
|
|
|
matchingBlock.voteDataVerified = true
|
2020-11-24 21:21:47 +00:00
|
|
|
true
|
|
|
|
else:
|
|
|
|
error "Corrupted deposits history detected",
|
2020-11-30 23:59:35 +00:00
|
|
|
depositsCount = finalizedEth1Data.deposit_count,
|
2020-11-04 09:06:58 +00:00
|
|
|
targetDepositsRoot = finalizedEth1Data.deposit_root,
|
2020-11-30 23:59:35 +00:00
|
|
|
ourDepositsRoot = matchingBlock.voteData.deposit_root
|
2020-11-04 09:06:58 +00:00
|
|
|
false
|
|
|
|
else:
|
2020-11-30 23:59:35 +00:00
|
|
|
error "The Eth1 chain is in inconsistent state",
|
2020-11-24 21:21:47 +00:00
|
|
|
checkpointHash = finalizedEth1Data.block_hash,
|
|
|
|
checkpointDeposits = finalizedEth1Data.deposit_count,
|
|
|
|
localChainStart = shortLog(m.eth1Chain.blocks.peekFirst),
|
|
|
|
localChainEnd = shortLog(m.eth1Chain.blocks.peekLast)
|
2020-11-04 09:06:58 +00:00
|
|
|
false
|
2020-10-26 08:55:10 +00:00
|
|
|
|
2020-11-30 23:59:35 +00:00
|
|
|
if result:
|
2020-11-24 21:21:47 +00:00
|
|
|
m.pruneOldBlocks(finalizedStateDepositIndex)
|
|
|
|
|
2020-11-30 23:59:35 +00:00
|
|
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.0/specs/phase0/validator.md#get_eth1_data
|
|
|
|
proc getBlockProposalData*(m: Eth1Monitor,
|
|
|
|
state: BeaconState,
|
|
|
|
finalizedEth1Data: Eth1Data,
|
|
|
|
finalizedStateDepositIndex: uint64): BlockProposalEth1Data =
|
|
|
|
let
|
|
|
|
periodStart = voting_period_start_time(state)
|
|
|
|
hasLatestDeposits = m.trackFinalizedState(finalizedEth1Data,
|
|
|
|
finalizedStateDepositIndex)
|
2020-03-24 11:13:07 +00:00
|
|
|
|
2020-11-19 17:19:03 +00:00
|
|
|
var otherVotesCountTable = initCountTable[Eth1Data]()
|
2020-03-24 11:13:07 +00:00
|
|
|
for vote in state.eth1_data_votes:
|
2020-11-20 14:05:37 +00:00
|
|
|
let
|
|
|
|
eth1Block = m.eth1Chain.findBlock(vote)
|
|
|
|
isSuccessor = vote.deposit_count >= state.eth1_data.deposit_count
|
|
|
|
# TODO(zah)
|
|
|
|
# There is a slight deviation from the spec here to deal with the following
|
|
|
|
# problem: the in-memory database of eth1 blocks for a restarted node will
|
|
|
|
# be empty which will lead a "no change" vote. To fix this, we'll need to
|
|
|
|
# add rolling persistance for all potentially voted on blocks.
|
|
|
|
isCandidate = (eth1Block == nil or is_candidate_block(m.preset, eth1Block, periodStart))
|
|
|
|
|
|
|
|
if isSuccessor and isCandidate:
|
2020-11-19 17:19:03 +00:00
|
|
|
otherVotesCountTable.inc vote
|
2020-11-20 14:05:37 +00:00
|
|
|
else:
|
|
|
|
debug "Ignoring eth1 vote", root = vote.block_hash, isSuccessor, isCandidate
|
2020-03-24 11:13:07 +00:00
|
|
|
|
2020-11-24 21:21:47 +00:00
|
|
|
var pendingDepositsCount = state.eth1_data.deposit_count - state.eth1_deposit_index
|
2020-03-24 11:13:07 +00:00
|
|
|
if otherVotesCountTable.len > 0:
|
2020-11-19 17:19:03 +00:00
|
|
|
let (winningVote, votes) = otherVotesCountTable.largest
|
2020-11-20 14:05:37 +00:00
|
|
|
debug "Voting on eth1 head with majority", votes
|
2020-11-19 17:19:03 +00:00
|
|
|
result.vote = winningVote
|
2020-10-12 01:07:20 +00:00
|
|
|
if uint64((votes + 1) * 2) > SLOTS_PER_ETH1_VOTING_PERIOD:
|
2020-11-20 14:05:37 +00:00
|
|
|
pendingDepositsCount = winningVote.deposit_count - state.eth1_deposit_index
|
2020-03-24 11:13:07 +00:00
|
|
|
else:
|
2020-11-20 14:05:37 +00:00
|
|
|
let latestBlock = m.latestCandidateBlock(periodStart)
|
2020-10-12 01:07:20 +00:00
|
|
|
if latestBlock == nil:
|
2020-11-20 14:05:37 +00:00
|
|
|
debug "No acceptable eth1 votes and no recent candidates. Voting no change"
|
2020-11-19 17:19:03 +00:00
|
|
|
result.vote = state.eth1_data
|
2020-10-12 01:07:20 +00:00
|
|
|
else:
|
2020-11-20 14:05:37 +00:00
|
|
|
debug "No acceptable eth1 votes. Voting for latest candidate"
|
2020-11-19 17:19:03 +00:00
|
|
|
result.vote = latestBlock.voteData
|
2020-10-15 17:30:33 +00:00
|
|
|
|
2020-11-24 21:21:47 +00:00
|
|
|
if pendingDepositsCount > 0:
|
|
|
|
if hasLatestDeposits:
|
2020-12-02 18:15:36 +00:00
|
|
|
let
|
|
|
|
totalDepositsInNewBlock = min(MAX_DEPOSITS, pendingDepositsCount)
|
|
|
|
deposits = m.eth1Chain.getDepositsRange(
|
|
|
|
state.eth1_deposit_index,
|
|
|
|
state.eth1_deposit_index + pendingDepositsCount)
|
|
|
|
depositRoots = mapIt(deposits, hash_tree_root(it))
|
2020-11-24 21:21:47 +00:00
|
|
|
|
|
|
|
var scratchMerkleizer = copy m.eth2FinalizedDepositsMerkleizer
|
|
|
|
if m.eth1Chain.advanceMerkleizer(scratchMerkleizer, state.eth1_deposit_index):
|
|
|
|
let proofs = scratchMerkleizer.addChunksAndGenMerkleProofs(depositRoots)
|
2020-12-02 18:15:36 +00:00
|
|
|
for i in 0 ..< totalDepositsInNewBlock:
|
|
|
|
var proof: array[33, Eth2Digest]
|
|
|
|
proof[0..31] = proofs.getProof(i.int)
|
|
|
|
proof[32] = default(Eth2Digest)
|
|
|
|
proof[32].data[0..7] = toBytesLE uint64(result.vote.deposit_count)
|
|
|
|
result.deposits.add Deposit(data: deposits[i], proof: proof)
|
2020-11-24 21:21:47 +00:00
|
|
|
else:
|
|
|
|
error "The Eth1 chain is in inconsistent state" # This should not really happen
|
|
|
|
result.hasMissingDeposits = true
|
|
|
|
else:
|
|
|
|
result.hasMissingDeposits = true
|
2020-03-24 11:13:07 +00:00
|
|
|
|
2020-11-24 23:51:17 +00:00
|
|
|
{.pop.}
|
|
|
|
|
2020-11-17 19:50:07 +00:00
|
|
|
proc new(T: type Web3DataProvider,
|
|
|
|
depositContractAddress: Eth1Address,
|
|
|
|
web3Url: string): Future[Result[Web3DataProviderRef, string]] {.async.} =
|
|
|
|
let web3Fut = newWeb3(web3Url)
|
|
|
|
yield web3Fut or sleepAsync(chronos.seconds(5))
|
|
|
|
if (not web3Fut.finished) or web3Fut.failed:
|
|
|
|
await cancelAndWait(web3Fut)
|
|
|
|
return err "Failed to setup web3 connection"
|
|
|
|
|
|
|
|
let
|
|
|
|
web3 = web3Fut.read
|
|
|
|
ns = web3.contractSender(DepositContract, depositContractAddress)
|
|
|
|
|
|
|
|
return ok Web3DataProviderRef(url: web3Url, web3: web3, ns: ns)
|
|
|
|
|
2020-11-03 01:21:07 +00:00
|
|
|
proc init*(T: type Eth1Monitor,
|
2020-10-12 01:07:20 +00:00
|
|
|
db: BeaconChainDB,
|
2020-07-07 23:02:14 +00:00
|
|
|
preset: RuntimePreset,
|
2020-10-14 14:04:08 +00:00
|
|
|
web3Url: string,
|
2020-07-02 15:14:11 +00:00
|
|
|
depositContractAddress: Eth1Address,
|
2020-11-24 21:21:47 +00:00
|
|
|
depositContractSnapshot: DepositContractSnapshot,
|
2020-11-06 21:45:56 +00:00
|
|
|
eth1Network: Option[Eth1Network]): Future[Result[T, string]] {.async.} =
|
2020-11-24 21:21:47 +00:00
|
|
|
|
2020-11-05 23:11:06 +00:00
|
|
|
var web3Url = web3Url
|
2020-11-12 13:49:13 +00:00
|
|
|
fixupWeb3Urls web3Url
|
2020-11-05 23:11:06 +00:00
|
|
|
|
2020-11-20 08:26:27 +00:00
|
|
|
try:
|
|
|
|
let dataProviderRes = await Web3DataProvider.new(depositContractAddress, web3Url)
|
|
|
|
if dataProviderRes.isErr:
|
|
|
|
return err(dataProviderRes.error)
|
2020-10-14 14:04:08 +00:00
|
|
|
|
2020-11-06 21:45:56 +00:00
|
|
|
let
|
2020-11-20 08:26:27 +00:00
|
|
|
dataProvider = dataProviderRes.get
|
|
|
|
web3 = dataProvider.web3
|
|
|
|
|
|
|
|
if eth1Network.isSome:
|
|
|
|
let
|
2020-12-01 21:20:28 +00:00
|
|
|
providerNetwork = awaitWithRetries web3.provider.net_version()
|
2020-11-20 08:26:27 +00:00
|
|
|
expectedNetwork = case eth1Network.get
|
|
|
|
of mainnet: "1"
|
|
|
|
of rinkeby: "4"
|
|
|
|
of goerli: "5"
|
|
|
|
if expectedNetwork != providerNetwork:
|
|
|
|
return err("The specified web3 provider is not attached to the " &
|
|
|
|
$eth1Network.get & " network")
|
|
|
|
|
|
|
|
return ok T(
|
|
|
|
db: db,
|
|
|
|
preset: preset,
|
2020-11-24 21:21:47 +00:00
|
|
|
knownStart: depositContractSnapshot,
|
2020-11-20 08:26:27 +00:00
|
|
|
dataProvider: dataProvider,
|
|
|
|
eth1Progress: newAsyncEvent())
|
|
|
|
except CatchableError as err:
|
|
|
|
return err("Failed to initialize the Eth1 monitor")
|
2019-09-09 15:59:02 +00:00
|
|
|
|
2020-09-28 15:19:57 +00:00
|
|
|
proc safeCancel(fut: var Future[void]) =
|
|
|
|
if not fut.isNil and not fut.finished:
|
|
|
|
fut.cancel()
|
2020-11-03 01:21:07 +00:00
|
|
|
fut = nil
|
2020-09-28 15:19:57 +00:00
|
|
|
|
2020-11-24 21:21:47 +00:00
|
|
|
proc clear(chain: var Eth1Chain) =
|
|
|
|
chain.blocks.clear()
|
|
|
|
chain.blocksByHash.clear()
|
|
|
|
|
2020-11-03 01:21:07 +00:00
|
|
|
proc stop*(m: Eth1Monitor) =
|
2020-09-28 15:19:57 +00:00
|
|
|
safeCancel m.runFut
|
2019-09-09 15:59:02 +00:00
|
|
|
|
2020-11-24 21:21:47 +00:00
|
|
|
m.eth1Chain.clear()
|
|
|
|
m.latestEth1BlockNumber = 0
|
|
|
|
|
2020-11-19 17:19:03 +00:00
|
|
|
const
|
2020-11-24 21:21:47 +00:00
|
|
|
votedBlocksSafetyMargin = 50
|
2020-11-19 17:19:03 +00:00
|
|
|
|
|
|
|
proc earliestBlockOfInterest(m: Eth1Monitor): Eth1BlockNumber =
|
|
|
|
m.latestEth1BlockNumber - (2 * m.preset.ETH1_FOLLOW_DISTANCE) - votedBlocksSafetyMargin
|
|
|
|
|
|
|
|
proc syncBlockRange(m: Eth1Monitor,
|
2020-11-24 21:21:47 +00:00
|
|
|
merkleizer: ref DepositsMerkleizer,
|
2020-11-19 17:19:03 +00:00
|
|
|
fromBlock, toBlock,
|
|
|
|
fullSyncFromBlock: Eth1BlockNumber) {.async.} =
|
|
|
|
doAssert m.eth1Chain.blocks.len > 0
|
|
|
|
|
2020-11-03 01:21:07 +00:00
|
|
|
var currentBlock = fromBlock
|
|
|
|
while currentBlock <= toBlock:
|
2020-11-14 20:51:50 +00:00
|
|
|
var
|
|
|
|
depositLogs: JsonNode = nil
|
|
|
|
blocksPerRequest = 5000'u64 # This is roughly a day of Eth1 blocks
|
|
|
|
maxBlockNumberRequested: Eth1BlockNumber
|
2020-12-01 23:35:07 +00:00
|
|
|
backoff = 100
|
2020-03-24 11:13:07 +00:00
|
|
|
|
2020-11-03 01:21:07 +00:00
|
|
|
while true:
|
2020-11-14 20:51:50 +00:00
|
|
|
maxBlockNumberRequested = min(toBlock, currentBlock + blocksPerRequest - 1)
|
2020-06-25 23:33:06 +00:00
|
|
|
|
2020-11-24 21:21:47 +00:00
|
|
|
template retryOrRaise(err: ref CatchableError) =
|
2020-11-03 01:21:07 +00:00
|
|
|
blocksPerRequest = blocksPerRequest div 2
|
|
|
|
if blocksPerRequest == 0:
|
|
|
|
raise err
|
2020-11-24 21:21:47 +00:00
|
|
|
continue
|
2020-03-24 11:13:07 +00:00
|
|
|
|
2020-11-24 21:21:47 +00:00
|
|
|
debug "Obtaining deposit log events",
|
|
|
|
fromBlock = currentBlock,
|
2020-12-01 23:35:07 +00:00
|
|
|
toBlock = maxBlockNumberRequested,
|
|
|
|
backoff
|
2020-02-07 07:13:38 +00:00
|
|
|
|
2020-11-24 21:21:47 +00:00
|
|
|
debug.logTime "Deposit logs obtained":
|
2020-12-01 11:14:32 +00:00
|
|
|
# Reduce all request rate until we have a more general solution
|
|
|
|
# for dealing with Infura's rate limits
|
2020-12-01 23:35:07 +00:00
|
|
|
await sleepAsync(milliseconds(backoff))
|
2020-12-01 11:14:32 +00:00
|
|
|
|
2020-11-24 21:21:47 +00:00
|
|
|
let jsonLogsFut = m.dataProvider.ns.getJsonLogs(
|
|
|
|
DepositEvent,
|
|
|
|
fromBlock = some blockId(currentBlock),
|
|
|
|
toBlock = some blockId(maxBlockNumberRequested))
|
|
|
|
|
|
|
|
depositLogs = try:
|
|
|
|
# Downloading large amounts of deposits can be quite slow
|
|
|
|
awaitWithTimeout(jsonLogsFut, seconds(600)):
|
|
|
|
retryOrRaise newException(DataProviderTimeout,
|
|
|
|
"Request time out while obtaining json logs")
|
|
|
|
except CatchableError as err:
|
2020-12-01 23:35:07 +00:00
|
|
|
debug "Request for deposit logs failed", err = err.msg
|
2020-12-09 22:44:59 +00:00
|
|
|
inc failed_web3_requests
|
2020-12-01 23:35:07 +00:00
|
|
|
backoff = (backoff * 3) div 2
|
2020-11-24 21:21:47 +00:00
|
|
|
retryOrRaise err
|
2020-11-03 01:21:07 +00:00
|
|
|
|
2020-11-24 21:21:47 +00:00
|
|
|
currentBlock = maxBlockNumberRequested + 1
|
|
|
|
break
|
|
|
|
|
2020-12-01 11:14:32 +00:00
|
|
|
let blocksWithDeposits = depositEventsToBlocks(depositLogs)
|
2020-11-24 21:21:47 +00:00
|
|
|
|
|
|
|
for i in 0 ..< blocksWithDeposits.len:
|
|
|
|
let blk = blocksWithDeposits[i]
|
2020-11-03 01:21:07 +00:00
|
|
|
|
2020-12-01 11:14:32 +00:00
|
|
|
for deposit in blk.deposits:
|
|
|
|
merkleizer[].addChunk hash_tree_root(deposit).data
|
2020-11-03 01:21:07 +00:00
|
|
|
|
2020-12-01 11:14:32 +00:00
|
|
|
blk.voteData.deposit_count = merkleizer[].getChunkCount
|
|
|
|
blk.voteData.deposit_root = merkleizer[].getDepositsRoot
|
2020-03-24 11:13:07 +00:00
|
|
|
|
2020-11-19 17:19:03 +00:00
|
|
|
if blk.number > fullSyncFromBlock:
|
2020-11-30 23:59:35 +00:00
|
|
|
let lastBlock = m.eth1Chain.blocks.peekLast
|
2020-11-19 17:19:03 +00:00
|
|
|
for n in max(lastBlock.number + 1, fullSyncFromBlock) ..< blk.number:
|
2020-12-01 23:35:07 +00:00
|
|
|
debug "Obtaining block without deposits", blockNum = n
|
2020-12-01 21:20:28 +00:00
|
|
|
let blockWithoutDeposits = awaitWithRetries(
|
|
|
|
m.dataProvider.getBlockByNumber(n))
|
|
|
|
|
2020-11-30 23:59:35 +00:00
|
|
|
m.eth1Chain.addBlock(
|
2020-11-19 17:19:03 +00:00
|
|
|
lastBlock.makeSuccessorWithoutDeposits(blockWithoutDeposits))
|
2020-12-09 22:44:59 +00:00
|
|
|
eth1_synced_head.set blockWithoutDeposits.number.toGaugeValue
|
2020-11-19 17:19:03 +00:00
|
|
|
|
2020-11-30 23:59:35 +00:00
|
|
|
m.eth1Chain.addBlock blk
|
2020-12-09 22:44:59 +00:00
|
|
|
eth1_synced_head.set blk.number.toGaugeValue
|
2020-11-24 21:21:47 +00:00
|
|
|
|
|
|
|
if blocksWithDeposits.len > 0:
|
|
|
|
let lastIdx = blocksWithDeposits.len - 1
|
|
|
|
template lastBlock: auto = blocksWithDeposits[lastIdx]
|
2020-11-03 01:21:07 +00:00
|
|
|
|
2020-11-24 21:21:47 +00:00
|
|
|
let status = when hasDepositRootChecks:
|
2020-12-01 21:20:28 +00:00
|
|
|
awaitWithRetries m.dataProvider.fetchDepositContractData(lastBlock)
|
2020-11-24 21:21:47 +00:00
|
|
|
else:
|
|
|
|
DepositRootUnavailable
|
2020-11-03 01:21:07 +00:00
|
|
|
|
|
|
|
when hasDepositRootChecks:
|
2020-11-24 21:21:47 +00:00
|
|
|
debug "Deposit contract state verified",
|
|
|
|
status = $status,
|
2020-11-03 01:21:07 +00:00
|
|
|
ourCount = lastBlock.voteData.deposit_count,
|
|
|
|
ourRoot = lastBlock.voteData.deposit_root
|
|
|
|
|
2020-11-24 21:21:47 +00:00
|
|
|
let depositContractState = DepositContractSnapshot(
|
|
|
|
eth1Block: lastBlock.voteData.block_hash,
|
|
|
|
depositContractState: merkleizer[].toDepositContractState)
|
|
|
|
|
|
|
|
case status
|
|
|
|
of DepositRootIncorrect, DepositCountIncorrect:
|
|
|
|
raise newException(CorruptDataProvider,
|
|
|
|
"The deposit log events disagree with the deposit contract state")
|
|
|
|
of VerifiedCorrect:
|
2020-11-30 23:59:35 +00:00
|
|
|
lastBlock.voteDataVerified = true
|
2020-11-24 21:21:47 +00:00
|
|
|
else:
|
|
|
|
discard
|
2020-11-03 01:21:07 +00:00
|
|
|
|
|
|
|
notice "Eth1 sync progress",
|
2020-11-24 21:21:47 +00:00
|
|
|
blockNumber = lastBlock.number,
|
|
|
|
depositsProcessed = lastBlock.voteData.deposit_count
|
2020-11-17 19:50:07 +00:00
|
|
|
|
2020-11-24 21:21:47 +00:00
|
|
|
when hasGenesisDetection:
|
|
|
|
if m.genesisStateFut != nil:
|
|
|
|
for blk in blocksWithDeposits:
|
|
|
|
for deposit in blk.deposits:
|
|
|
|
if skipBlsCheck or verify_deposit_signature(m.preset, deposit):
|
|
|
|
let pubkey = deposit.pubkey
|
|
|
|
if pubkey notin validatorKeyToIndex:
|
|
|
|
let idx = ValidatorIndex validators.len
|
|
|
|
validators.add ImmutableValidatorData(
|
|
|
|
pubkey: pubkey,
|
|
|
|
withdrawal_credentials: deposit.withdrawal_credentials)
|
|
|
|
validatorKeyToIndex.insert(pubkey, idx)
|
|
|
|
|
|
|
|
blk.activeValidatorsCount = m.db.immutableValidatorData.lenu64
|
|
|
|
|
|
|
|
if m.genesisStateFut != nil and m.chainHasEnoughValidators:
|
|
|
|
let lastIdx = m.eth1Chain.blocks.len - 1
|
|
|
|
template lastBlock: auto = m.eth1Chain.blocks[lastIdx]
|
|
|
|
|
|
|
|
if maxBlockNumberRequested == toBlock and
|
|
|
|
(m.eth1Chain.blocks.len == 0 or lastBlock.number != toBlock):
|
2020-12-01 21:20:28 +00:00
|
|
|
let web3Block = awaitWithRetries(
|
|
|
|
m.dataProvider.getBlockByNumber(toBlock))
|
2020-11-24 21:21:47 +00:00
|
|
|
|
|
|
|
debug "Latest block doesn't hold deposits. Obtaining it",
|
|
|
|
ts = web3Block.timestamp.uint64,
|
|
|
|
number = web3Block.number.uint64
|
|
|
|
|
|
|
|
m.eth1Chain.addBlock lastBlock.makeSuccessorWithoutDeposits(web3Block)
|
|
|
|
else:
|
2020-12-01 21:20:28 +00:00
|
|
|
awaitWithRetries m.dataProvider.fetchTimestamp(lastBlock)
|
2020-11-24 21:21:47 +00:00
|
|
|
|
|
|
|
var genesisBlockIdx = m.eth1Chain.blocks.len - 1
|
|
|
|
if m.isAfterMinGenesisTime(m.eth1Chain.blocks[genesisBlockIdx]):
|
|
|
|
for i in 1 ..< eth1Blocks.len:
|
|
|
|
let idx = (m.eth1Chain.blocks.len - 1) - i
|
|
|
|
let blk = m.eth1Chain.blocks[idx]
|
2020-12-01 21:20:28 +00:00
|
|
|
awaitWithRetries m.dataProvider.fetchTimestamp(blk)
|
2020-11-24 21:21:47 +00:00
|
|
|
if m.isGenesisCandidate(blk):
|
|
|
|
genesisBlockIdx = idx
|
|
|
|
else:
|
|
|
|
break
|
|
|
|
# We have a candidate state on our hands, but our current Eth1Chain
|
|
|
|
# may consist only of blocks that have deposits attached to them
|
|
|
|
# while the real genesis may have happened in a block without any
|
|
|
|
# deposits (triggered by MIN_GENESIS_TIME).
|
|
|
|
#
|
|
|
|
# This can happen when the beacon node is launched after the genesis
|
|
|
|
# event. We take a short cut when constructing the initial Eth1Chain
|
|
|
|
# by downloading only deposit log entries. Thus, we'll see all the
|
|
|
|
# blocks with deposits, but not the regular blocks in between.
|
|
|
|
#
|
|
|
|
# We'll handle this special case below by examing whether we are in
|
|
|
|
# this potential scenario and we'll use a fast guessing algorith to
|
|
|
|
# discover the ETh1 block with minimal valid genesis time.
|
|
|
|
var genesisBlock = m.eth1Chain.blocks[genesisBlockIdx]
|
|
|
|
if genesisBlockIdx > 0:
|
|
|
|
let genesisParent = m.eth1Chain.blocks[genesisBlockIdx - 1]
|
|
|
|
if genesisParent.timestamp == 0:
|
2020-12-01 21:20:28 +00:00
|
|
|
awaitWithRetries m.dataProvider.fetchTimestamp(genesisParent)
|
2020-11-24 21:21:47 +00:00
|
|
|
if m.hasEnoughValidators(genesisParent) and
|
|
|
|
genesisBlock.number - genesisParent.number > 1:
|
2020-12-01 21:20:28 +00:00
|
|
|
genesisBlock = awaitWithRetries(
|
|
|
|
m.findGenesisBlockInRange(genesisParent, genesisBlock))
|
|
|
|
|
2020-11-24 21:21:47 +00:00
|
|
|
m.signalGenesis m.createGenesisState(genesisBlock)
|
|
|
|
|
|
|
|
proc startEth1Syncing(m: Eth1Monitor) {.async.} =
|
|
|
|
let eth2PreviouslyFinalizedTo = m.db.getEth2FinalizedTo()
|
|
|
|
if eth2PreviouslyFinalizedTo.isOk:
|
|
|
|
m.knownStart = eth2PreviouslyFinalizedTo.get
|
|
|
|
|
|
|
|
m.eth2FinalizedDepositsMerkleizer = m.knownStart.createMerkleizer
|
2020-11-12 19:01:26 +00:00
|
|
|
|
2020-12-01 21:20:28 +00:00
|
|
|
let startBlock = awaitWithRetries(
|
|
|
|
m.dataProvider.getBlockByHash(m.knownStart.eth1Block.asBlockHash))
|
2020-11-19 17:19:03 +00:00
|
|
|
|
2020-11-24 21:21:47 +00:00
|
|
|
doAssert m.eth1Chain.blocks.len == 0
|
2020-11-19 17:19:03 +00:00
|
|
|
m.eth1Chain.addBlock Eth1Block(
|
|
|
|
number: Eth1BlockNumber startBlock.number,
|
|
|
|
timestamp: Eth1BlockTimestamp startBlock.timestamp,
|
2020-11-24 21:21:47 +00:00
|
|
|
voteData: eth1DataFromMerkleizer(
|
|
|
|
m.knownStart.eth1Block,
|
|
|
|
m.eth2FinalizedDepositsMerkleizer))
|
2020-11-19 17:19:03 +00:00
|
|
|
|
|
|
|
var eth1SyncedTo = Eth1BlockNumber startBlock.number
|
2020-12-09 22:44:59 +00:00
|
|
|
eth1_synced_head.set eth1SyncedTo.toGaugeValue
|
|
|
|
eth1_finalized_head.set eth1SyncedTo.toGaugeValue
|
|
|
|
eth1_finalized_deposits.set(
|
|
|
|
m.eth2FinalizedDepositsMerkleizer.getChunkCount.toGaugeValue)
|
|
|
|
|
2020-11-24 21:21:47 +00:00
|
|
|
var scratchMerkleizer = newClone(copy m.eth2FinalizedDepositsMerkleizer)
|
2020-09-28 15:19:57 +00:00
|
|
|
|
2020-11-30 23:59:35 +00:00
|
|
|
debug "Starting Eth1 syncing", `from` = shortLog(m.eth1Chain.blocks[0])
|
|
|
|
|
2020-11-03 01:21:07 +00:00
|
|
|
while true:
|
|
|
|
if bnStatus == BeaconNodeStatus.Stopping:
|
2020-11-24 21:21:47 +00:00
|
|
|
when hasGenesisDetection:
|
|
|
|
if not m.genesisStateFut.isNil:
|
|
|
|
m.genesisStateFut.complete()
|
|
|
|
m.genesisStateFut = nil
|
2020-11-03 01:21:07 +00:00
|
|
|
m.stop()
|
|
|
|
return
|
2020-10-14 14:04:08 +00:00
|
|
|
|
2020-11-03 01:21:07 +00:00
|
|
|
await m.eth1Progress.wait()
|
|
|
|
m.eth1Progress.clear()
|
2020-03-24 11:13:07 +00:00
|
|
|
|
2020-11-03 01:21:07 +00:00
|
|
|
if m.latestEth1BlockNumber <= m.preset.ETH1_FOLLOW_DISTANCE:
|
|
|
|
continue
|
2020-06-27 12:01:19 +00:00
|
|
|
|
2020-11-03 01:21:07 +00:00
|
|
|
let targetBlock = m.latestEth1BlockNumber - m.preset.ETH1_FOLLOW_DISTANCE
|
|
|
|
if targetBlock <= eth1SyncedTo:
|
|
|
|
continue
|
2020-03-24 11:13:07 +00:00
|
|
|
|
2020-11-19 17:19:03 +00:00
|
|
|
let earliestBlockOfInterest = m.earliestBlockOfInterest()
|
2020-11-24 21:21:47 +00:00
|
|
|
await m.syncBlockRange(scratchMerkleizer,
|
|
|
|
eth1SyncedTo + 1,
|
|
|
|
targetBlock,
|
|
|
|
earliestBlockOfInterest)
|
2020-11-03 01:21:07 +00:00
|
|
|
eth1SyncedTo = targetBlock
|
2020-12-09 22:44:59 +00:00
|
|
|
eth1_synced_head.set eth1SyncedTo.toGaugeValue
|
2019-11-22 13:16:07 +00:00
|
|
|
|
2020-11-03 01:21:07 +00:00
|
|
|
proc run(m: Eth1Monitor, delayBeforeStart: Duration) {.async.} =
|
2019-11-22 13:16:07 +00:00
|
|
|
if delayBeforeStart != ZeroDuration:
|
|
|
|
await sleepAsync(delayBeforeStart)
|
|
|
|
|
2020-10-14 14:04:08 +00:00
|
|
|
info "Starting Eth1 deposit contract monitoring",
|
|
|
|
contract = $m.depositContractAddress, url = m.web3Url
|
2019-11-22 13:16:07 +00:00
|
|
|
|
2020-10-14 14:04:08 +00:00
|
|
|
await m.dataProvider.onBlockHeaders do (blk: Eth1BlockHeader)
|
2020-08-02 21:19:25 +00:00
|
|
|
{.raises: [Defect], gcsafe.}:
|
2020-10-14 14:04:08 +00:00
|
|
|
try:
|
2020-11-03 01:21:07 +00:00
|
|
|
if blk.number.uint64 > m.latestEth1BlockNumber:
|
2020-12-09 22:44:59 +00:00
|
|
|
eth1_latest_head.set blk.number.toGaugeValue
|
|
|
|
m.latestEth1BlockNumber = Eth1BlockNumber blk.number
|
2020-11-03 01:21:07 +00:00
|
|
|
m.eth1Progress.fire()
|
2020-10-14 14:04:08 +00:00
|
|
|
except Exception:
|
|
|
|
# TODO Investigate why this exception is being raised
|
2020-11-03 01:21:07 +00:00
|
|
|
raiseAssert "AsyncEvent.fire should not raise exceptions"
|
2020-10-14 14:04:08 +00:00
|
|
|
do (err: CatchableError):
|
|
|
|
debug "Error while processing Eth1 block headers subscription", err = err.msg
|
2020-06-19 17:42:28 +00:00
|
|
|
|
2020-11-24 21:21:47 +00:00
|
|
|
await m.startEth1Syncing()
|
2019-09-09 15:59:02 +00:00
|
|
|
|
2020-11-03 01:21:07 +00:00
|
|
|
proc start(m: Eth1Monitor, delayBeforeStart: Duration) =
|
2019-11-22 13:16:07 +00:00
|
|
|
if m.runFut.isNil:
|
|
|
|
let runFut = m.run(delayBeforeStart)
|
|
|
|
m.runFut = runFut
|
2020-03-24 11:13:07 +00:00
|
|
|
runFut.addCallback do (p: pointer):
|
|
|
|
if runFut.failed:
|
|
|
|
if runFut.error[] of CatchableError:
|
|
|
|
if runFut == m.runFut:
|
2020-11-02 20:16:24 +00:00
|
|
|
error "Eth1 chain monitoring failure, restarting", err = runFut.error.msg
|
2020-06-27 12:01:19 +00:00
|
|
|
m.stop()
|
2020-03-24 11:13:07 +00:00
|
|
|
else:
|
|
|
|
fatal "Fatal exception reached", err = runFut.error.msg
|
|
|
|
quit 1
|
2019-11-22 13:16:07 +00:00
|
|
|
|
2020-11-19 17:19:03 +00:00
|
|
|
m.runFut = nil
|
|
|
|
m.start(5.seconds)
|
|
|
|
|
2020-11-20 10:00:22 +00:00
|
|
|
proc start*(m: Eth1Monitor) =
|
2019-11-22 13:16:07 +00:00
|
|
|
m.start(0.seconds)
|
|
|
|
|
2020-07-28 13:36:11 +00:00
|
|
|
proc getEth1BlockHash*(url: string, blockId: RtBlockIdentifier): Future[BlockHash] {.async.} =
|
2019-10-25 14:53:31 +00:00
|
|
|
let web3 = await newWeb3(url)
|
2020-04-22 23:35:55 +00:00
|
|
|
try:
|
2020-12-01 21:20:28 +00:00
|
|
|
let blk = awaitWithRetries(
|
|
|
|
web3.provider.eth_getBlockByNumber(blockId, false))
|
2020-07-28 13:36:11 +00:00
|
|
|
return blk.hash
|
2020-04-22 23:35:55 +00:00
|
|
|
finally:
|
|
|
|
await web3.close()
|
|
|
|
|
2020-12-04 16:28:42 +00:00
|
|
|
proc testWeb3Provider*(web3Url: Uri,
|
|
|
|
depositContractAddress: Option[Eth1Address],
|
|
|
|
depositContractDeployedAt: Option[BlockHashOrNumber]) {.async.} =
|
|
|
|
template mustSucceed(action: static string, expr: untyped): untyped =
|
|
|
|
try: expr
|
|
|
|
except CatchableError as err:
|
|
|
|
fatal("Failed to " & action, err = err.msg)
|
|
|
|
quit 1
|
|
|
|
|
|
|
|
let
|
|
|
|
web3 = mustSucceed "connect to web3 provider":
|
|
|
|
await newWeb3($web3Url)
|
|
|
|
network = mustSucceed "get network version":
|
|
|
|
awaitWithRetries web3.provider.net_version()
|
|
|
|
latestBlock = mustSucceed "get latest block":
|
|
|
|
awaitWithRetries web3.provider.eth_getBlockByNumber(blockId("latest"), false)
|
|
|
|
|
|
|
|
echo "Network: ", network
|
|
|
|
echo "Latest block: ", latestBlock.number.uint64
|
|
|
|
|
|
|
|
if depositContractAddress.isSome:
|
|
|
|
let ns = web3.contractSender(DepositContract, depositContractAddress.get)
|
|
|
|
try:
|
|
|
|
let depositRoot = awaitWithRetries(
|
|
|
|
ns.get_deposit_root.call(blockNumber = latestBlock.number.uint64))
|
|
|
|
echo "Deposit root: ", depositRoot
|
|
|
|
except CatchableError as err:
|
|
|
|
echo "Web3 provider is not archive mode"
|
|
|
|
|
2020-11-24 21:21:47 +00:00
|
|
|
when hasGenesisDetection:
|
|
|
|
proc init*(T: type Eth1Monitor,
|
|
|
|
db: BeaconChainDB,
|
|
|
|
preset: RuntimePreset,
|
|
|
|
web3Url: string,
|
|
|
|
depositContractAddress: Eth1Address,
|
|
|
|
depositContractDeployedAt: BlockHashOrNumber,
|
|
|
|
eth1Network: Option[Eth1Network]): Future[Result[T, string]] {.async.} =
|
|
|
|
try:
|
|
|
|
let dataProviderRes = Web3DataProvider.new(depositContractAddress, web3Url)
|
|
|
|
if dataProviderRes.isErr:
|
|
|
|
return err(dataProviderRes.error)
|
|
|
|
let dataProvider = dataProviderRes.get
|
|
|
|
|
|
|
|
let knownStartBlockHash =
|
|
|
|
if depositContractDeployedAt.isHash:
|
|
|
|
depositContractDeployedAt.hash
|
|
|
|
else:
|
|
|
|
var blk: BlockObject
|
|
|
|
while true:
|
|
|
|
try:
|
2020-12-01 21:20:28 +00:00
|
|
|
blk = awaitWithRetries(
|
|
|
|
dataProvider.getBlockByNumber(depositContractDeployedAt.number))
|
2020-11-24 21:21:47 +00:00
|
|
|
break
|
|
|
|
except CatchableError as err:
|
|
|
|
error "Failed to obtain details for the starting block " &
|
|
|
|
"of the deposit contract sync. The Web3 provider " &
|
|
|
|
"may still be not fully synced", error = err.msg
|
|
|
|
await sleepAsync(chronos.seconds(10))
|
|
|
|
# TODO: After a single failure, the web3 object may enter a state
|
|
|
|
# where it's no longer possible to make additional requests.
|
|
|
|
# Until this is fixed upstream, we'll just try to recreate
|
|
|
|
# the web3 provider before retrying. In case this fails,
|
|
|
|
# the Eth1Monitor will be restarted.
|
|
|
|
dataProvider = tryGet(
|
|
|
|
await Web3DataProvider.new(depositContractAddress, web3Url))
|
|
|
|
blk.hash.asEth2Digest
|
|
|
|
|
|
|
|
let depositContractSnapshot = DepositContractSnapshot(
|
|
|
|
eth1Block: knownStartBlockHash)
|
|
|
|
|
|
|
|
return await Eth1Monitor.init(
|
|
|
|
db,
|
|
|
|
preset,
|
|
|
|
web3Url,
|
|
|
|
depositContarctAddress,
|
|
|
|
depositContractSnapshot,
|
|
|
|
eth1Network)
|
|
|
|
|
|
|
|
except CatchableError as err:
|
|
|
|
return err("Failed to initialize the Eth1 monitor")
|
|
|
|
|
|
|
|
proc allGenesisDepositsUpTo(m: Eth1Monitor, totalDeposits: uint64): seq[DepositData] =
|
|
|
|
for i in 0'u64 ..< totalDeposits:
|
|
|
|
result.add m.db.genesisDeposits.get(i)
|
|
|
|
|
|
|
|
proc createGenesisState(m: Eth1Monitor, eth1Block: Eth1Block): BeaconStateRef =
|
|
|
|
notice "Generating genesis state",
|
|
|
|
blockNum = eth1Block.number,
|
|
|
|
blockHash = eth1Block.voteData.block_hash,
|
|
|
|
blockTimestamp = eth1Block.timestamp,
|
|
|
|
totalDeposits = eth1Block.voteData.deposit_count,
|
|
|
|
activeValidators = eth1Block.activeValidatorsCount
|
|
|
|
|
|
|
|
var deposits = m.allGenesisDepositsUpTo(eth1Block.voteData.deposit_count)
|
|
|
|
|
|
|
|
result = initialize_beacon_state_from_eth1(
|
|
|
|
m.preset,
|
|
|
|
eth1Block.voteData.block_hash,
|
|
|
|
eth1Block.timestamp.uint64,
|
|
|
|
deposits, {})
|
|
|
|
|
|
|
|
doAssert result.validators.lenu64 == eth1Block.activeValidatorsCount
|
|
|
|
|
|
|
|
proc signalGenesis(m: Eth1Monitor, genesisState: BeaconStateRef) =
|
|
|
|
m.genesisState = genesisState
|
|
|
|
|
|
|
|
if not m.genesisStateFut.isNil:
|
|
|
|
m.genesisStateFut.complete()
|
|
|
|
m.genesisStateFut = nil
|
|
|
|
|
|
|
|
template hasEnoughValidators(m: Eth1Monitor, blk: Eth1Block): bool =
|
|
|
|
blk.activeValidatorsCount >= m.preset.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT
|
|
|
|
|
|
|
|
func chainHasEnoughValidators(m: Eth1Monitor): bool =
|
|
|
|
if m.eth1Chain.blocks.len > 0:
|
|
|
|
m.hasEnoughValidators(m.eth1Chain.blocks[^1])
|
|
|
|
else:
|
|
|
|
m.knownStart.depositContractState.depositCountU64 >=
|
|
|
|
m.preset.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT
|
|
|
|
|
|
|
|
func isAfterMinGenesisTime(m: Eth1Monitor, blk: Eth1Block): bool =
|
|
|
|
doAssert blk.timestamp != 0
|
|
|
|
let t = genesis_time_from_eth1_timestamp(m.preset, uint64 blk.timestamp)
|
|
|
|
t >= m.preset.MIN_GENESIS_TIME
|
|
|
|
|
|
|
|
func isGenesisCandidate(m: Eth1Monitor, blk: Eth1Block): bool =
|
|
|
|
m.hasEnoughValidators(blk) and m.isAfterMinGenesisTime(blk)
|
|
|
|
|
|
|
|
proc findGenesisBlockInRange(m: Eth1Monitor, startBlock, endBlock: Eth1Block):
|
|
|
|
Future[Eth1Block] {.async.} =
|
|
|
|
doAssert startBlock.timestamp != 0 and not m.isAfterMinGenesisTime(startBlock)
|
|
|
|
doAssert endBlock.timestamp != 0 and m.isAfterMinGenesisTime(endBlock)
|
|
|
|
doAssert m.hasEnoughValidators(startBlock)
|
|
|
|
doAssert m.hasEnoughValidators(endBlock)
|
|
|
|
|
|
|
|
var
|
|
|
|
startBlock = startBlock
|
|
|
|
endBlock = endBlock
|
|
|
|
depositData = startBlock.voteData
|
|
|
|
activeValidatorsCountDuringRange = startBlock.activeValidatorsCount
|
|
|
|
|
|
|
|
while startBlock.number + 1 < endBlock.number:
|
|
|
|
let
|
|
|
|
MIN_GENESIS_TIME = m.preset.MIN_GENESIS_TIME
|
|
|
|
startBlockTime = genesis_time_from_eth1_timestamp(m.preset, startBlock.timestamp)
|
|
|
|
secondsPerBlock = float(endBlock.timestamp - startBlock.timestamp) /
|
|
|
|
float(endBlock.number - startBlock.number)
|
|
|
|
blocksToJump = max(float(MIN_GENESIS_TIME - startBlockTime) / secondsPerBlock, 1.0)
|
|
|
|
candidateNumber = min(endBlock.number - 1, startBlock.number + blocksToJump.uint64)
|
2020-12-01 21:20:28 +00:00
|
|
|
candidateBlock = awaitWithRetries(
|
|
|
|
m.dataProvider.getBlockByNumber(candidateNumber))
|
2020-11-24 21:21:47 +00:00
|
|
|
|
|
|
|
var candidateAsEth1Block = Eth1Block(number: candidateBlock.number.uint64,
|
|
|
|
timestamp: candidateBlock.timestamp.uint64,
|
|
|
|
voteData: depositData)
|
|
|
|
candidateAsEth1Block.voteData.block_hash = candidateBlock.hash.asEth2Digest
|
|
|
|
|
|
|
|
let candidateGenesisTime = genesis_time_from_eth1_timestamp(
|
|
|
|
m.preset, candidateBlock.timestamp.uint64)
|
|
|
|
|
|
|
|
notice "Probing possible genesis block",
|
|
|
|
`block` = candidateBlock.number.uint64,
|
|
|
|
candidateGenesisTime
|
|
|
|
|
|
|
|
if candidateGenesisTime < MIN_GENESIS_TIME:
|
|
|
|
startBlock = candidateAsEth1Block
|
|
|
|
else:
|
|
|
|
endBlock = candidateAsEth1Block
|
|
|
|
|
|
|
|
if endBlock.activeValidatorsCount == 0:
|
|
|
|
endBlock.activeValidatorsCount = activeValidatorsCountDuringRange
|
|
|
|
|
|
|
|
return endBlock
|
|
|
|
|
|
|
|
proc waitGenesis*(m: Eth1Monitor): Future[BeaconStateRef] {.async.} =
|
|
|
|
if m.genesisState.isNil:
|
|
|
|
m.start()
|
|
|
|
|
|
|
|
if m.genesisStateFut.isNil:
|
|
|
|
m.genesisStateFut = newFuture[void]("waitGenesis")
|
|
|
|
|
|
|
|
info "Awaiting genesis event"
|
|
|
|
await m.genesisStateFut
|
|
|
|
m.genesisStateFut = nil
|
|
|
|
|
|
|
|
if m.genesisState != nil:
|
|
|
|
return m.genesisState
|
|
|
|
else:
|
|
|
|
doAssert bnStatus == BeaconNodeStatus.Stopping
|
|
|
|
return new BeaconStateRef # cannot return nil...
|
|
|
|
|