nimbus-eth2/beacon_chain/validator_client/api.nim

3523 lines
120 KiB
Nim
Raw Permalink Normal View History

# beacon_chain
# Copyright (c) 2021-2024 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [].}
import std/strutils
import chronicles, stew/base10
import ".."/spec/eth2_apis/eth2_rest_serialization,
".."/spec/datatypes/[phase0, altair]
import "."/[common, fallback_service, scoring]
New validator client using REST API. (#2651) * Initial commit. * Exporting getConfig(). * Add beacon node checking procedures. * Post rebase fixes. * Use runSlotLoop() from nimbus_beacon_node. Fallback implementation. Fixes for ETH2 REST serialization. * Add beacon_clock.durationToNextSlot(). Move type declarations from beacon_rest_api to json_rest_serialization. Fix seq[ValidatorIndex] serialization. Refactor ValidatorPool and add some utility procedures. Create separate version of validator_client. * Post-rebase fixes. Remove CookedPubKey from validator_pool.nim. * Now we should be able to produce attestations and aggregate and proofs. But its not working yet. * Debugging attestation sending. * Add durationToNextAttestation. Optimize some debug logs. Fix aggregation_bits encoding. Bump chronos/presto. * Its alive. * Fixes for launch_local_testnet script. Bump chronos. * Switch client API to not use `/api` prefix. * Post-rebase adjustments. * Fix endpoint for publishBlock(). * Add CONFIG_NAME. Add more checks to ensure that beacon_node is compatible. * Add beacon committee subscription support to validator_client. * Fix stacktrace should be an array of strings. Fix committee subscriptions should not be `data` keyed. * Log duration to next block proposal. * Fix beacon_node_status import. * Use jsonMsgResponse() instead of jsonError(). * Fix graffityBytes usage. Remove unnecessary `await`. Adjust creation of SignedBlock instance. Remove legacy files. * Rework durationToNextSlot() and durationToNextEpoch() to use `fromNow`. * Fix race condition for block proposal and attestations for same slot. Fix local_testnet script to properly kill tasks on Windows. Bump chronos and nim-http-tools, to allow connections to infura.io (basic auth). * Catch services errors. Improve performance of local_testnet.sh script on Windows. Fix race condition when attestation producing. * Post-rebase fixes. * Bump chronos and presto. * Calculate block publishing delay. Fix pkill in one more place. * Add error handling and timeouts to firstSuccess() template. Add onceToAll() template. Add checkNodes() procedure. Refactor firstSuccess() template. Add error checking to api.nim calls. * Deprecated usage onceToAll() for better stability. Address comment and send attestations asap. * Avoid unnecessary loop when calculating minimal duration.
2021-07-13 11:15:07 +00:00
export eth2_rest_serialization, common
const
ResponseContentTypeError = "Content-type is not supported"
ResponseInvalidError = "Received invalid request response"
ResponseInternalError = "Received internal error response"
ResponseUnexpectedError = "Received unexpected error response"
ResponseNotFoundError = "Received resource missing error response"
ResponseNoSyncError = "Received nosync error response"
ResponseDecodeError = "Received response could not be decoded"
ResponseECNotInSyncError* = "Execution client not in sync"
2023-11-08 12:03:51 +00:00
ResponseNotImplementedError =
"Received endpoint not implemented error response"
New validator client using REST API. (#2651) * Initial commit. * Exporting getConfig(). * Add beacon node checking procedures. * Post rebase fixes. * Use runSlotLoop() from nimbus_beacon_node. Fallback implementation. Fixes for ETH2 REST serialization. * Add beacon_clock.durationToNextSlot(). Move type declarations from beacon_rest_api to json_rest_serialization. Fix seq[ValidatorIndex] serialization. Refactor ValidatorPool and add some utility procedures. Create separate version of validator_client. * Post-rebase fixes. Remove CookedPubKey from validator_pool.nim. * Now we should be able to produce attestations and aggregate and proofs. But its not working yet. * Debugging attestation sending. * Add durationToNextAttestation. Optimize some debug logs. Fix aggregation_bits encoding. Bump chronos/presto. * Its alive. * Fixes for launch_local_testnet script. Bump chronos. * Switch client API to not use `/api` prefix. * Post-rebase adjustments. * Fix endpoint for publishBlock(). * Add CONFIG_NAME. Add more checks to ensure that beacon_node is compatible. * Add beacon committee subscription support to validator_client. * Fix stacktrace should be an array of strings. Fix committee subscriptions should not be `data` keyed. * Log duration to next block proposal. * Fix beacon_node_status import. * Use jsonMsgResponse() instead of jsonError(). * Fix graffityBytes usage. Remove unnecessary `await`. Adjust creation of SignedBlock instance. Remove legacy files. * Rework durationToNextSlot() and durationToNextEpoch() to use `fromNow`. * Fix race condition for block proposal and attestations for same slot. Fix local_testnet script to properly kill tasks on Windows. Bump chronos and nim-http-tools, to allow connections to infura.io (basic auth). * Catch services errors. Improve performance of local_testnet.sh script on Windows. Fix race condition when attestation producing. * Post-rebase fixes. * Bump chronos and presto. * Calculate block publishing delay. Fix pkill in one more place. * Add error handling and timeouts to firstSuccess() template. Add onceToAll() template. Add checkNodes() procedure. Refactor firstSuccess() template. Add error checking to api.nim calls. * Deprecated usage onceToAll() for better stability. Address comment and send attestations asap. * Avoid unnecessary loop when calculating minimal duration.
2021-07-13 11:15:07 +00:00
type
ApiResponse*[T] = Result[T, string]
ApiOperation = enum
Success, Timeout, Failure, Interrupt
ApiNodeResponse*[T] = object
node*: BeaconNodeServerRef
data*: ApiResponse[T]
ApiResponseSeq*[T] = object
status*: ApiOperation
data*: seq[ApiNodeResponse[T]]
ApiScore*[T] = object
index*: int
score*: Opt[T]
BestNodeResponse*[T, X] = object
node*: BeaconNodeServerRef
data*: ApiResponse[T]
score*: X
const
ViableNodeStatus* = {
RestBeaconNodeStatus.Compatible,
RestBeaconNodeStatus.NotSynced,
RestBeaconNodeStatus.OptSynced,
RestBeaconNodeStatus.Synced
}
proc `$`*[T](s: ApiScore[T]): string =
var res = Base10.toString(uint64(s.index))
res.add(": ")
if s.score.isSome():
res.add(shortScore(s.score.get()))
else:
res.add("<n/a>")
res
proc `$`*[T](ss: openArray[ApiScore[T]]): string =
"[" & ss.mapIt($it).join(",") & "]"
chronicles.formatIt(seq[ApiScore]):
$it
func init*(t: typedesc[ApiScore], node: BeaconNodeServerRef,
score: float64): ApiScore[float64] =
ApiScore[float64](index: node.index, score: Opt.some(score))
func init*(t: typedesc[ApiScore], node: BeaconNodeServerRef,
score: UInt256): ApiScore[UInt256] =
ApiScore[UInt256](index: node.index, score: Opt.some(score))
func init*(tt: typedesc[ApiScore],
node: BeaconNodeServerRef, T: typedesc): ApiScore[T] =
ApiScore[T](index: node.index, score: Opt.none(T))
func init*[T, X](t: typedesc[BestNodeResponse], node: BeaconNodeServerRef,
data: ApiResponse[T], score: X): BestNodeResponse[T, X] =
BestNodeResponse[T, X](node: node, data: data, score: score)
proc lazyWaiter(
node: BeaconNodeServerRef,
request: FutureBase,
requestName: string,
strategy: ApiStrategyKind
) {.async: (raises: []).} =
try:
await allFutures(request)
if request.failed():
let failure = ApiNodeFailure.init(
ApiFailure.Communication, requestName, strategy, node,
$request.error.msg)
node.updateStatus(RestBeaconNodeStatus.Offline, failure)
except CancelledError:
await cancelAndWait(request)
proc lazyWait(
nodes: seq[BeaconNodeServerRef],
requests: seq[FutureBase],
timerFut: Future[void],
requestName: string,
strategy: ApiStrategyKind
) {.async: (raises: [CancelledError]).} =
doAssert(len(nodes) == len(requests))
if len(nodes) == 0:
return
var futures: seq[Future[void]]
for index in 0 ..< len(requests):
futures.add(lazyWaiter(nodes[index], requests[index], requestName,
strategy))
if not(isNil(timerFut)):
discard await race(allFutures(futures), timerFut)
if timerFut.finished():
let pending = futures.mapIt(it.cancelAndWait())
await allFutures(pending)
else:
await cancelAndWait(timerFut)
else:
await allFutures(futures)
proc apiResponseOr[T](future: FutureBase, timerFut: Future[void],
message: string): ApiResponse[T] =
if future.finished() and not(future.cancelled()):
if future.failed():
ApiResponse[T].err($future.error.msg)
else:
ApiResponse[T].ok(Future[T](future).value())
else:
if timerFut.finished():
ApiResponse[T].err(message)
else:
ApiResponse[T].err("Interrupted by the caller")
template firstSuccessParallel*(
vc: ValidatorClientRef,
responseType: typedesc,
handlerType: typedesc,
timeout: Duration,
statuses: set[RestBeaconNodeStatus],
roles: set[BeaconNodeRole],
body1, body2: untyped
): ApiResponse[handlerType] =
var
it {.inject.}: RestClientRef
iterations = 0
var timerFut =
if timeout != InfiniteDuration:
sleepAsync(timeout)
else:
nil
var retRes: ApiResponse[handlerType]
while true:
var resultReady = false
let onlineNodes =
try:
if iterations == 0:
# We are not going to wait for BNs if there some available.
await vc.waitNodes(timerFut, statuses, roles, false)
else:
# We get here only, if all the requests are failed. To avoid requests
# spam we going to wait for changes in BNs statuses.
await vc.waitNodes(timerFut, statuses, roles, true)
vc.filterNodes(statuses, roles)
except CancelledError as exc:
if not(isNil(timerFut)) and not(timerFut.finished()):
await timerFut.cancelAndWait()
raise exc
if len(onlineNodes) == 0:
retRes = ApiResponse[handlerType].err("No online beacon node(s)")
resultReady = true
else:
var
(pendingRequests, pendingNodes) =
block:
var requests: seq[FutureBase]
var nodes: seq[BeaconNodeServerRef]
for node {.inject.} in onlineNodes:
it = node.client
let fut = FutureBase(body1)
requests.add(fut)
nodes.add(node)
(requests, nodes)
raceFut: Future[FutureBase]
requestsCancelled = false
while true:
try:
if len(pendingRequests) == 0:
if not(isNil(timerFut)) and not(timerFut.finished()):
await timerFut.cancelAndWait()
retRes = ApiResponse[handlerType].err(
"Beacon node(s) unable to satisfy request")
resultReady = true
break
else:
raceFut = race(pendingRequests)
if not(isNil(timerFut)):
discard await race(raceFut, timerFut)
else:
await allFutures(raceFut)
let
index =
if not(isNil(timerFut)) and timerFut.finished():
# Timeout exceeded first.
if not(requestsCancelled):
var pending: seq[Future[void]]
pending.add(raceFut.cancelAndWait())
for future in pendingRequests.items():
if not(future.finished()):
pending.add(future.cancelAndWait())
await allFutures(pending)
requestsCancelled = true
0
else:
let res = pendingRequests.find(raceFut.value)
doAssert(res >= 0)
res
requestFut = pendingRequests[index]
beaconNode = pendingNodes[index]
# Remove completed future from pending list.
pendingRequests.del(index)
pendingNodes.del(index)
let
node {.inject, used.} = beaconNode
apiResponse {.inject.} =
apiResponseOr[responseType](requestFut, timerFut,
"Timeout exceeded while awaiting for the response")
handlerResponse =
try:
body2
except CancelledError as exc:
raise exc
if handlerResponse.isOk():
retRes = handlerResponse
resultReady = true
asyncSpawn lazyWait(pendingNodes, pendingRequests, timerFut,
RequestName, strategy)
break
except CancelledError as exc:
var pendingCancel: seq[Future[void]]
if not(isNil(raceFut)) and not(raceFut.finished()):
pendingCancel.add(raceFut.cancelAndWait())
if not(isNil(timerFut)) and not(timerFut.finished()):
pendingCancel.add(timerFut.cancelAndWait())
for future in pendingRequests.items():
if not(future.finished()):
pendingCancel.add(future.cancelAndWait())
await noCancel allFutures(pendingCancel)
raise exc
if resultReady:
break
if resultReady:
break
inc(iterations)
retRes
template bestSuccess*(
vc: ValidatorClientRef,
responseType: typedesc,
handlerType: typedesc,
scoreType: typedesc,
timeout: Duration,
statuses: set[RestBeaconNodeStatus],
roles: set[BeaconNodeRole],
bodyRequest,
bodyScore,
bodyHandler: untyped): ApiResponse[handlerType] =
var
it {.inject.}: RestClientRef
iterations = 0
var timerFut =
if timeout != InfiniteDuration:
sleepAsync(timeout)
else:
nil
var
retRes: ApiResponse[handlerType]
scores: seq[ApiScore[scoreType]]
bestResponse: Opt[BestNodeResponse[handlerType, scoreType]]
block mainLoop:
while true:
let onlineNodes =
try:
if iterations == 0:
# We are not going to wait for BNs if there some available.
await vc.waitNodes(timerFut, statuses, roles, false)
else:
# We get here only, if all the requests are failed. To avoid requests
# spam we going to wait for changes in BNs statuses.
await vc.waitNodes(timerFut, statuses, roles, true)
vc.filterNodes(statuses, roles)
except CancelledError as exc:
if not(isNil(timerFut)) and not(timerFut.finished()):
await timerFut.cancelAndWait()
raise exc
if len(onlineNodes) == 0:
retRes = ApiResponse[handlerType].err("No online beacon node(s)")
break mainLoop
else:
var
(pendingRequests, pendingNodes) =
block:
var requests: seq[FutureBase]
var nodes: seq[BeaconNodeServerRef]
for node {.inject.} in onlineNodes:
it = node.client
let fut = FutureBase(bodyRequest)
requests.add(fut)
nodes.add(node)
(requests, nodes)
perfectScoreFound = false
block innerLoop:
while len(pendingRequests) > 0:
var
finishedRequests: seq[FutureBase]
finishedNodes: seq[BeaconNodeServerRef]
raceFut: Future[FutureBase].Raising([ValueError, CancelledError])
try:
raceFut = race(pendingRequests)
if not(isNil(timerFut)):
discard await race(raceFut, timerFut)
else:
await allFutures(raceFut)
for index, future in pendingRequests.pairs():
if future.finished() or
(not(isNil(timerFut)) and timerFut.finished()):
finishedRequests.add(future)
finishedNodes.add(pendingNodes[index])
let
node {.inject.} = pendingNodes[index]
apiResponse {.inject.} =
apiResponseOr[responseType](future, timerFut,
"Timeout exceeded while awaiting for the response")
handlerResponse =
try:
bodyHandler
except CancelledError as exc:
raise exc
if handlerResponse.isOk():
let
itresponse {.inject.} = handlerResponse.get()
score =
try:
bodyScore
except CancelledError as exc:
raise exc
scores.add(ApiScore.init(node, score))
if bestResponse.isNone() or
(score > bestResponse.get().score):
bestResponse = Opt.some(
BestNodeResponse.init(node, handlerResponse, score))
if perfectScore(score):
perfectScoreFound = true
break
else:
scores.add(ApiScore.init(node, scoreType))
if perfectScoreFound:
# lazyWait will cancel `pendingRequests` on timeout.
asyncSpawn lazyWait(pendingNodes, pendingRequests, timerFut,
RequestName, strategy)
break innerLoop
if not(isNil(timerFut)) and timerFut.finished():
# If timeout is exceeded we need to cancel all the tasks which
# are still running.
var pendingCancel: seq[Future[void]]
for future in pendingRequests.items():
if not(future.finished()):
pendingCancel.add(future.cancelAndWait())
await allFutures(pendingCancel)
break innerLoop
pendingRequests.keepItIf(it notin finishedRequests)
pendingNodes.keepItIf(it notin finishedNodes)
except CancelledError as exc:
var pendingCancel: seq[Future[void]]
# `or` operation does not cancelling Futures passed as arguments.
if not(isNil(raceFut)) and not(raceFut.finished()):
pendingCancel.add(raceFut.cancelAndWait())
if not(isNil(timerFut)) and not(timerFut.finished()):
pendingCancel.add(timerFut.cancelAndWait())
# We should cancel all the requests which are still pending.
for future in pendingRequests.items():
if not(future.finished()):
pendingCancel.add(future.cancelAndWait())
# Awaiting cancellations.
await noCancel allFutures(pendingCancel)
raise exc
if bestResponse.isSome():
retRes = bestResponse.get().data
break mainLoop
else:
if timerFut.finished():
retRes = ApiResponse[handlerType].err(
"Timeout exceeded while awaiting for responses")
break mainLoop
else:
# When all requests failed
discard
inc(iterations)
if retRes.isOk():
debug "Best score result selected",
request = RequestName, available_scores = scores,
best_score = shortScore(bestResponse.get().score),
best_node = bestResponse.get().node
retRes
template onceToAll*(
vc: ValidatorClientRef,
responseType: typedesc,
timeout: Duration,
statuses: set[RestBeaconNodeStatus],
roles: set[BeaconNodeRole],
body: untyped
): ApiResponseSeq[responseType] =
var it {.inject.}: RestClientRef
type BodyType = typeof(body)
var timerFut =
if timeout != InfiniteDuration:
sleepAsync(timeout)
else:
nil
let onlineNodes =
try:
await vc.waitNodes(timerFut, statuses, roles, false)
vc.filterNodes(statuses, roles)
except CancelledError as exc:
if not(isNil(timerFut)) and not(timerFut.finished()):
await timerFut.cancelAndWait()
raise exc
if len(onlineNodes) == 0:
# Timeout exceeded or operation was cancelled
ApiResponseSeq[responseType](status: ApiOperation.Timeout)
else:
let (pendingRequests, pendingNodes) =
block:
var requests: seq[BodyType]
var nodes: seq[BeaconNodeServerRef]
for node {.inject.} in onlineNodes:
it = node.client
let fut = body
requests.add(fut)
nodes.add(node)
(requests, nodes)
let status =
try:
if isNil(timerFut):
await allFutures(pendingRequests)
ApiOperation.Success
else:
let waitFut = allFutures(pendingRequests)
discard await race(waitFut, timerFut)
if not(waitFut.finished()):
await waitFut.cancelAndWait()
ApiOperation.Timeout
else:
if not(timerFut.finished()):
await timerFut.cancelAndWait()
ApiOperation.Success
except CancelledError as exc:
# We should cancel all the pending requests and timer before we return
# result.
var pendingCancel: seq[Future[void]]
for fut in pendingRequests:
if not(fut.finished()):
pendingCancel.add(fut.cancelAndWait())
if not(isNil(timerFut)) and not(timerFut.finished()):
pendingCancel.add(timerFut.cancelAndWait())
await noCancel allFutures(pendingCancel)
raise exc
let responses =
block:
var res: seq[ApiNodeResponse[responseType]]
for idx, pnode in pendingNodes.pairs():
let apiResponse =
block:
let fut = pendingRequests[idx]
if fut.finished():
if fut.failed() or fut.cancelled():
let exc = fut.error
ApiNodeResponse[responseType](
node: pnode,
data: ApiResponse[responseType].err("[" & $exc.name & "] " &
$exc.msg)
)
else:
ApiNodeResponse[responseType](
node: pnode,
data: ApiResponse[responseType].ok(fut.value)
)
else:
case status
of ApiOperation.Interrupt:
ApiNodeResponse[responseType](
node: pnode,
data: ApiResponse[responseType].err("Operation interrupted")
)
of ApiOperation.Timeout:
pendingNodes[idx].status = RestBeaconNodeStatus.Offline
ApiNodeResponse[responseType](
node: pnode,
data: ApiResponse[responseType].err(
"Operation timeout exceeded")
)
of ApiOperation.Success, ApiOperation.Failure:
# This should not be happened, because all Futures should be
# finished, and `Failure` processed when Future is finished.
ApiNodeResponse[responseType](
node: pnode,
data: ApiResponse[responseType].err("Unexpected error")
)
res.add(apiResponse)
res
ApiResponseSeq[responseType](status: status, data: responses)
template firstSuccessSequential*(
vc: ValidatorClientRef,
responseType: typedesc,
timeout: Duration,
statuses: set[RestBeaconNodeStatus],
roles: set[BeaconNodeRole],
body: untyped,
handlers: untyped
): untyped =
New validator client using REST API. (#2651) * Initial commit. * Exporting getConfig(). * Add beacon node checking procedures. * Post rebase fixes. * Use runSlotLoop() from nimbus_beacon_node. Fallback implementation. Fixes for ETH2 REST serialization. * Add beacon_clock.durationToNextSlot(). Move type declarations from beacon_rest_api to json_rest_serialization. Fix seq[ValidatorIndex] serialization. Refactor ValidatorPool and add some utility procedures. Create separate version of validator_client. * Post-rebase fixes. Remove CookedPubKey from validator_pool.nim. * Now we should be able to produce attestations and aggregate and proofs. But its not working yet. * Debugging attestation sending. * Add durationToNextAttestation. Optimize some debug logs. Fix aggregation_bits encoding. Bump chronos/presto. * Its alive. * Fixes for launch_local_testnet script. Bump chronos. * Switch client API to not use `/api` prefix. * Post-rebase adjustments. * Fix endpoint for publishBlock(). * Add CONFIG_NAME. Add more checks to ensure that beacon_node is compatible. * Add beacon committee subscription support to validator_client. * Fix stacktrace should be an array of strings. Fix committee subscriptions should not be `data` keyed. * Log duration to next block proposal. * Fix beacon_node_status import. * Use jsonMsgResponse() instead of jsonError(). * Fix graffityBytes usage. Remove unnecessary `await`. Adjust creation of SignedBlock instance. Remove legacy files. * Rework durationToNextSlot() and durationToNextEpoch() to use `fromNow`. * Fix race condition for block proposal and attestations for same slot. Fix local_testnet script to properly kill tasks on Windows. Bump chronos and nim-http-tools, to allow connections to infura.io (basic auth). * Catch services errors. Improve performance of local_testnet.sh script on Windows. Fix race condition when attestation producing. * Post-rebase fixes. * Bump chronos and presto. * Calculate block publishing delay. Fix pkill in one more place. * Add error handling and timeouts to firstSuccess() template. Add onceToAll() template. Add checkNodes() procedure. Refactor firstSuccess() template. Add error checking to api.nim calls. * Deprecated usage onceToAll() for better stability. Address comment and send attestations asap. * Avoid unnecessary loop when calculating minimal duration.
2021-07-13 11:15:07 +00:00
doAssert(timeout != ZeroDuration)
var
it {.inject.}: RestClientRef
iterations = 0
New validator client using REST API. (#2651) * Initial commit. * Exporting getConfig(). * Add beacon node checking procedures. * Post rebase fixes. * Use runSlotLoop() from nimbus_beacon_node. Fallback implementation. Fixes for ETH2 REST serialization. * Add beacon_clock.durationToNextSlot(). Move type declarations from beacon_rest_api to json_rest_serialization. Fix seq[ValidatorIndex] serialization. Refactor ValidatorPool and add some utility procedures. Create separate version of validator_client. * Post-rebase fixes. Remove CookedPubKey from validator_pool.nim. * Now we should be able to produce attestations and aggregate and proofs. But its not working yet. * Debugging attestation sending. * Add durationToNextAttestation. Optimize some debug logs. Fix aggregation_bits encoding. Bump chronos/presto. * Its alive. * Fixes for launch_local_testnet script. Bump chronos. * Switch client API to not use `/api` prefix. * Post-rebase adjustments. * Fix endpoint for publishBlock(). * Add CONFIG_NAME. Add more checks to ensure that beacon_node is compatible. * Add beacon committee subscription support to validator_client. * Fix stacktrace should be an array of strings. Fix committee subscriptions should not be `data` keyed. * Log duration to next block proposal. * Fix beacon_node_status import. * Use jsonMsgResponse() instead of jsonError(). * Fix graffityBytes usage. Remove unnecessary `await`. Adjust creation of SignedBlock instance. Remove legacy files. * Rework durationToNextSlot() and durationToNextEpoch() to use `fromNow`. * Fix race condition for block proposal and attestations for same slot. Fix local_testnet script to properly kill tasks on Windows. Bump chronos and nim-http-tools, to allow connections to infura.io (basic auth). * Catch services errors. Improve performance of local_testnet.sh script on Windows. Fix race condition when attestation producing. * Post-rebase fixes. * Bump chronos and presto. * Calculate block publishing delay. Fix pkill in one more place. * Add error handling and timeouts to firstSuccess() template. Add onceToAll() template. Add checkNodes() procedure. Refactor firstSuccess() template. Add error checking to api.nim calls. * Deprecated usage onceToAll() for better stability. Address comment and send attestations asap. * Avoid unnecessary loop when calculating minimal duration.
2021-07-13 11:15:07 +00:00
var timerFut =
if timeout != InfiniteDuration:
sleepAsync(timeout)
else:
nil
while true:
let onlineNodes =
try:
if iterations == 0:
# We are not going to wait for BNs if there some available.
await vc.waitNodes(timerFut, statuses, roles, false)
else:
# We get here only, if all the requests are failed. To avoid requests
# spam we going to wait for changes in BNs statuses.
await vc.waitNodes(timerFut, statuses, roles, true)
vc.filterNodes(statuses, roles)
except CancelledError as exc:
# waitNodes do not cancel `timoutFuture`.
if not(isNil(timerFut)) and not(timerFut.finished()):
await timerFut.cancelAndWait()
raise exc
if len(onlineNodes) == 0:
# `onlineNodes` sequence is empty only if operation timeout exceeded.
break
New validator client using REST API. (#2651) * Initial commit. * Exporting getConfig(). * Add beacon node checking procedures. * Post rebase fixes. * Use runSlotLoop() from nimbus_beacon_node. Fallback implementation. Fixes for ETH2 REST serialization. * Add beacon_clock.durationToNextSlot(). Move type declarations from beacon_rest_api to json_rest_serialization. Fix seq[ValidatorIndex] serialization. Refactor ValidatorPool and add some utility procedures. Create separate version of validator_client. * Post-rebase fixes. Remove CookedPubKey from validator_pool.nim. * Now we should be able to produce attestations and aggregate and proofs. But its not working yet. * Debugging attestation sending. * Add durationToNextAttestation. Optimize some debug logs. Fix aggregation_bits encoding. Bump chronos/presto. * Its alive. * Fixes for launch_local_testnet script. Bump chronos. * Switch client API to not use `/api` prefix. * Post-rebase adjustments. * Fix endpoint for publishBlock(). * Add CONFIG_NAME. Add more checks to ensure that beacon_node is compatible. * Add beacon committee subscription support to validator_client. * Fix stacktrace should be an array of strings. Fix committee subscriptions should not be `data` keyed. * Log duration to next block proposal. * Fix beacon_node_status import. * Use jsonMsgResponse() instead of jsonError(). * Fix graffityBytes usage. Remove unnecessary `await`. Adjust creation of SignedBlock instance. Remove legacy files. * Rework durationToNextSlot() and durationToNextEpoch() to use `fromNow`. * Fix race condition for block proposal and attestations for same slot. Fix local_testnet script to properly kill tasks on Windows. Bump chronos and nim-http-tools, to allow connections to infura.io (basic auth). * Catch services errors. Improve performance of local_testnet.sh script on Windows. Fix race condition when attestation producing. * Post-rebase fixes. * Bump chronos and presto. * Calculate block publishing delay. Fix pkill in one more place. * Add error handling and timeouts to firstSuccess() template. Add onceToAll() template. Add checkNodes() procedure. Refactor firstSuccess() template. Add error checking to api.nim calls. * Deprecated usage onceToAll() for better stability. Address comment and send attestations asap. * Avoid unnecessary loop when calculating minimal duration.
2021-07-13 11:15:07 +00:00
if iterations != 0:
debug "Request got failed", iterations_count = iterations
New validator client using REST API. (#2651) * Initial commit. * Exporting getConfig(). * Add beacon node checking procedures. * Post rebase fixes. * Use runSlotLoop() from nimbus_beacon_node. Fallback implementation. Fixes for ETH2 REST serialization. * Add beacon_clock.durationToNextSlot(). Move type declarations from beacon_rest_api to json_rest_serialization. Fix seq[ValidatorIndex] serialization. Refactor ValidatorPool and add some utility procedures. Create separate version of validator_client. * Post-rebase fixes. Remove CookedPubKey from validator_pool.nim. * Now we should be able to produce attestations and aggregate and proofs. But its not working yet. * Debugging attestation sending. * Add durationToNextAttestation. Optimize some debug logs. Fix aggregation_bits encoding. Bump chronos/presto. * Its alive. * Fixes for launch_local_testnet script. Bump chronos. * Switch client API to not use `/api` prefix. * Post-rebase adjustments. * Fix endpoint for publishBlock(). * Add CONFIG_NAME. Add more checks to ensure that beacon_node is compatible. * Add beacon committee subscription support to validator_client. * Fix stacktrace should be an array of strings. Fix committee subscriptions should not be `data` keyed. * Log duration to next block proposal. * Fix beacon_node_status import. * Use jsonMsgResponse() instead of jsonError(). * Fix graffityBytes usage. Remove unnecessary `await`. Adjust creation of SignedBlock instance. Remove legacy files. * Rework durationToNextSlot() and durationToNextEpoch() to use `fromNow`. * Fix race condition for block proposal and attestations for same slot. Fix local_testnet script to properly kill tasks on Windows. Bump chronos and nim-http-tools, to allow connections to infura.io (basic auth). * Catch services errors. Improve performance of local_testnet.sh script on Windows. Fix race condition when attestation producing. * Post-rebase fixes. * Bump chronos and presto. * Calculate block publishing delay. Fix pkill in one more place. * Add error handling and timeouts to firstSuccess() template. Add onceToAll() template. Add checkNodes() procedure. Refactor firstSuccess() template. Add error checking to api.nim calls. * Deprecated usage onceToAll() for better stability. Address comment and send attestations asap. * Avoid unnecessary loop when calculating minimal duration.
2021-07-13 11:15:07 +00:00
var exitNow = false
for node {.inject.} in onlineNodes:
it = node.client
var bodyFut = body
let resOp =
block:
if isNil(timerFut):
try:
# We use `allFutures()` to keep result in `bodyFut`, but still
# be able to check errors.
await allFutures(bodyFut)
ApiOperation.Success
except CancelledError as exc:
New validator client using REST API. (#2651) * Initial commit. * Exporting getConfig(). * Add beacon node checking procedures. * Post rebase fixes. * Use runSlotLoop() from nimbus_beacon_node. Fallback implementation. Fixes for ETH2 REST serialization. * Add beacon_clock.durationToNextSlot(). Move type declarations from beacon_rest_api to json_rest_serialization. Fix seq[ValidatorIndex] serialization. Refactor ValidatorPool and add some utility procedures. Create separate version of validator_client. * Post-rebase fixes. Remove CookedPubKey from validator_pool.nim. * Now we should be able to produce attestations and aggregate and proofs. But its not working yet. * Debugging attestation sending. * Add durationToNextAttestation. Optimize some debug logs. Fix aggregation_bits encoding. Bump chronos/presto. * Its alive. * Fixes for launch_local_testnet script. Bump chronos. * Switch client API to not use `/api` prefix. * Post-rebase adjustments. * Fix endpoint for publishBlock(). * Add CONFIG_NAME. Add more checks to ensure that beacon_node is compatible. * Add beacon committee subscription support to validator_client. * Fix stacktrace should be an array of strings. Fix committee subscriptions should not be `data` keyed. * Log duration to next block proposal. * Fix beacon_node_status import. * Use jsonMsgResponse() instead of jsonError(). * Fix graffityBytes usage. Remove unnecessary `await`. Adjust creation of SignedBlock instance. Remove legacy files. * Rework durationToNextSlot() and durationToNextEpoch() to use `fromNow`. * Fix race condition for block proposal and attestations for same slot. Fix local_testnet script to properly kill tasks on Windows. Bump chronos and nim-http-tools, to allow connections to infura.io (basic auth). * Catch services errors. Improve performance of local_testnet.sh script on Windows. Fix race condition when attestation producing. * Post-rebase fixes. * Bump chronos and presto. * Calculate block publishing delay. Fix pkill in one more place. * Add error handling and timeouts to firstSuccess() template. Add onceToAll() template. Add checkNodes() procedure. Refactor firstSuccess() template. Add error checking to api.nim calls. * Deprecated usage onceToAll() for better stability. Address comment and send attestations asap. * Avoid unnecessary loop when calculating minimal duration.
2021-07-13 11:15:07 +00:00
# `allFutures()` could not cancel Futures.
if not(bodyFut.finished()):
await bodyFut.cancelAndWait()
raise exc
New validator client using REST API. (#2651) * Initial commit. * Exporting getConfig(). * Add beacon node checking procedures. * Post rebase fixes. * Use runSlotLoop() from nimbus_beacon_node. Fallback implementation. Fixes for ETH2 REST serialization. * Add beacon_clock.durationToNextSlot(). Move type declarations from beacon_rest_api to json_rest_serialization. Fix seq[ValidatorIndex] serialization. Refactor ValidatorPool and add some utility procedures. Create separate version of validator_client. * Post-rebase fixes. Remove CookedPubKey from validator_pool.nim. * Now we should be able to produce attestations and aggregate and proofs. But its not working yet. * Debugging attestation sending. * Add durationToNextAttestation. Optimize some debug logs. Fix aggregation_bits encoding. Bump chronos/presto. * Its alive. * Fixes for launch_local_testnet script. Bump chronos. * Switch client API to not use `/api` prefix. * Post-rebase adjustments. * Fix endpoint for publishBlock(). * Add CONFIG_NAME. Add more checks to ensure that beacon_node is compatible. * Add beacon committee subscription support to validator_client. * Fix stacktrace should be an array of strings. Fix committee subscriptions should not be `data` keyed. * Log duration to next block proposal. * Fix beacon_node_status import. * Use jsonMsgResponse() instead of jsonError(). * Fix graffityBytes usage. Remove unnecessary `await`. Adjust creation of SignedBlock instance. Remove legacy files. * Rework durationToNextSlot() and durationToNextEpoch() to use `fromNow`. * Fix race condition for block proposal and attestations for same slot. Fix local_testnet script to properly kill tasks on Windows. Bump chronos and nim-http-tools, to allow connections to infura.io (basic auth). * Catch services errors. Improve performance of local_testnet.sh script on Windows. Fix race condition when attestation producing. * Post-rebase fixes. * Bump chronos and presto. * Calculate block publishing delay. Fix pkill in one more place. * Add error handling and timeouts to firstSuccess() template. Add onceToAll() template. Add checkNodes() procedure. Refactor firstSuccess() template. Add error checking to api.nim calls. * Deprecated usage onceToAll() for better stability. Address comment and send attestations asap. * Avoid unnecessary loop when calculating minimal duration.
2021-07-13 11:15:07 +00:00
else:
try:
discard await race(bodyFut, timerFut)
if bodyFut.finished():
ApiOperation.Success
else:
await bodyFut.cancelAndWait()
ApiOperation.Timeout
except CancelledError as exc:
New validator client using REST API. (#2651) * Initial commit. * Exporting getConfig(). * Add beacon node checking procedures. * Post rebase fixes. * Use runSlotLoop() from nimbus_beacon_node. Fallback implementation. Fixes for ETH2 REST serialization. * Add beacon_clock.durationToNextSlot(). Move type declarations from beacon_rest_api to json_rest_serialization. Fix seq[ValidatorIndex] serialization. Refactor ValidatorPool and add some utility procedures. Create separate version of validator_client. * Post-rebase fixes. Remove CookedPubKey from validator_pool.nim. * Now we should be able to produce attestations and aggregate and proofs. But its not working yet. * Debugging attestation sending. * Add durationToNextAttestation. Optimize some debug logs. Fix aggregation_bits encoding. Bump chronos/presto. * Its alive. * Fixes for launch_local_testnet script. Bump chronos. * Switch client API to not use `/api` prefix. * Post-rebase adjustments. * Fix endpoint for publishBlock(). * Add CONFIG_NAME. Add more checks to ensure that beacon_node is compatible. * Add beacon committee subscription support to validator_client. * Fix stacktrace should be an array of strings. Fix committee subscriptions should not be `data` keyed. * Log duration to next block proposal. * Fix beacon_node_status import. * Use jsonMsgResponse() instead of jsonError(). * Fix graffityBytes usage. Remove unnecessary `await`. Adjust creation of SignedBlock instance. Remove legacy files. * Rework durationToNextSlot() and durationToNextEpoch() to use `fromNow`. * Fix race condition for block proposal and attestations for same slot. Fix local_testnet script to properly kill tasks on Windows. Bump chronos and nim-http-tools, to allow connections to infura.io (basic auth). * Catch services errors. Improve performance of local_testnet.sh script on Windows. Fix race condition when attestation producing. * Post-rebase fixes. * Bump chronos and presto. * Calculate block publishing delay. Fix pkill in one more place. * Add error handling and timeouts to firstSuccess() template. Add onceToAll() template. Add checkNodes() procedure. Refactor firstSuccess() template. Add error checking to api.nim calls. * Deprecated usage onceToAll() for better stability. Address comment and send attestations asap. * Avoid unnecessary loop when calculating minimal duration.
2021-07-13 11:15:07 +00:00
# `race()` could not cancel Futures.
var pending: seq[Future[void]]
New validator client using REST API. (#2651) * Initial commit. * Exporting getConfig(). * Add beacon node checking procedures. * Post rebase fixes. * Use runSlotLoop() from nimbus_beacon_node. Fallback implementation. Fixes for ETH2 REST serialization. * Add beacon_clock.durationToNextSlot(). Move type declarations from beacon_rest_api to json_rest_serialization. Fix seq[ValidatorIndex] serialization. Refactor ValidatorPool and add some utility procedures. Create separate version of validator_client. * Post-rebase fixes. Remove CookedPubKey from validator_pool.nim. * Now we should be able to produce attestations and aggregate and proofs. But its not working yet. * Debugging attestation sending. * Add durationToNextAttestation. Optimize some debug logs. Fix aggregation_bits encoding. Bump chronos/presto. * Its alive. * Fixes for launch_local_testnet script. Bump chronos. * Switch client API to not use `/api` prefix. * Post-rebase adjustments. * Fix endpoint for publishBlock(). * Add CONFIG_NAME. Add more checks to ensure that beacon_node is compatible. * Add beacon committee subscription support to validator_client. * Fix stacktrace should be an array of strings. Fix committee subscriptions should not be `data` keyed. * Log duration to next block proposal. * Fix beacon_node_status import. * Use jsonMsgResponse() instead of jsonError(). * Fix graffityBytes usage. Remove unnecessary `await`. Adjust creation of SignedBlock instance. Remove legacy files. * Rework durationToNextSlot() and durationToNextEpoch() to use `fromNow`. * Fix race condition for block proposal and attestations for same slot. Fix local_testnet script to properly kill tasks on Windows. Bump chronos and nim-http-tools, to allow connections to infura.io (basic auth). * Catch services errors. Improve performance of local_testnet.sh script on Windows. Fix race condition when attestation producing. * Post-rebase fixes. * Bump chronos and presto. * Calculate block publishing delay. Fix pkill in one more place. * Add error handling and timeouts to firstSuccess() template. Add onceToAll() template. Add checkNodes() procedure. Refactor firstSuccess() template. Add error checking to api.nim calls. * Deprecated usage onceToAll() for better stability. Address comment and send attestations asap. * Avoid unnecessary loop when calculating minimal duration.
2021-07-13 11:15:07 +00:00
if not(bodyFut.finished()):
pending.add(bodyFut.cancelAndWait())
if not(isNil(timerFut)) and not(timerFut.finished()):
pending.add(timerFut.cancelAndWait())
await noCancel allFutures(pending)
raise exc
New validator client using REST API. (#2651) * Initial commit. * Exporting getConfig(). * Add beacon node checking procedures. * Post rebase fixes. * Use runSlotLoop() from nimbus_beacon_node. Fallback implementation. Fixes for ETH2 REST serialization. * Add beacon_clock.durationToNextSlot(). Move type declarations from beacon_rest_api to json_rest_serialization. Fix seq[ValidatorIndex] serialization. Refactor ValidatorPool and add some utility procedures. Create separate version of validator_client. * Post-rebase fixes. Remove CookedPubKey from validator_pool.nim. * Now we should be able to produce attestations and aggregate and proofs. But its not working yet. * Debugging attestation sending. * Add durationToNextAttestation. Optimize some debug logs. Fix aggregation_bits encoding. Bump chronos/presto. * Its alive. * Fixes for launch_local_testnet script. Bump chronos. * Switch client API to not use `/api` prefix. * Post-rebase adjustments. * Fix endpoint for publishBlock(). * Add CONFIG_NAME. Add more checks to ensure that beacon_node is compatible. * Add beacon committee subscription support to validator_client. * Fix stacktrace should be an array of strings. Fix committee subscriptions should not be `data` keyed. * Log duration to next block proposal. * Fix beacon_node_status import. * Use jsonMsgResponse() instead of jsonError(). * Fix graffityBytes usage. Remove unnecessary `await`. Adjust creation of SignedBlock instance. Remove legacy files. * Rework durationToNextSlot() and durationToNextEpoch() to use `fromNow`. * Fix race condition for block proposal and attestations for same slot. Fix local_testnet script to properly kill tasks on Windows. Bump chronos and nim-http-tools, to allow connections to infura.io (basic auth). * Catch services errors. Improve performance of local_testnet.sh script on Windows. Fix race condition when attestation producing. * Post-rebase fixes. * Bump chronos and presto. * Calculate block publishing delay. Fix pkill in one more place. * Add error handling and timeouts to firstSuccess() template. Add onceToAll() template. Add checkNodes() procedure. Refactor firstSuccess() template. Add error checking to api.nim calls. * Deprecated usage onceToAll() for better stability. Address comment and send attestations asap. * Avoid unnecessary loop when calculating minimal duration.
2021-07-13 11:15:07 +00:00
var handlerStatus = false
New validator client using REST API. (#2651) * Initial commit. * Exporting getConfig(). * Add beacon node checking procedures. * Post rebase fixes. * Use runSlotLoop() from nimbus_beacon_node. Fallback implementation. Fixes for ETH2 REST serialization. * Add beacon_clock.durationToNextSlot(). Move type declarations from beacon_rest_api to json_rest_serialization. Fix seq[ValidatorIndex] serialization. Refactor ValidatorPool and add some utility procedures. Create separate version of validator_client. * Post-rebase fixes. Remove CookedPubKey from validator_pool.nim. * Now we should be able to produce attestations and aggregate and proofs. But its not working yet. * Debugging attestation sending. * Add durationToNextAttestation. Optimize some debug logs. Fix aggregation_bits encoding. Bump chronos/presto. * Its alive. * Fixes for launch_local_testnet script. Bump chronos. * Switch client API to not use `/api` prefix. * Post-rebase adjustments. * Fix endpoint for publishBlock(). * Add CONFIG_NAME. Add more checks to ensure that beacon_node is compatible. * Add beacon committee subscription support to validator_client. * Fix stacktrace should be an array of strings. Fix committee subscriptions should not be `data` keyed. * Log duration to next block proposal. * Fix beacon_node_status import. * Use jsonMsgResponse() instead of jsonError(). * Fix graffityBytes usage. Remove unnecessary `await`. Adjust creation of SignedBlock instance. Remove legacy files. * Rework durationToNextSlot() and durationToNextEpoch() to use `fromNow`. * Fix race condition for block proposal and attestations for same slot. Fix local_testnet script to properly kill tasks on Windows. Bump chronos and nim-http-tools, to allow connections to infura.io (basic auth). * Catch services errors. Improve performance of local_testnet.sh script on Windows. Fix race condition when attestation producing. * Post-rebase fixes. * Bump chronos and presto. * Calculate block publishing delay. Fix pkill in one more place. * Add error handling and timeouts to firstSuccess() template. Add onceToAll() template. Add checkNodes() procedure. Refactor firstSuccess() template. Add error checking to api.nim calls. * Deprecated usage onceToAll() for better stability. Address comment and send attestations asap. * Avoid unnecessary loop when calculating minimal duration.
2021-07-13 11:15:07 +00:00
block:
let apiResponse {.inject.} =
block:
if bodyFut.finished():
if bodyFut.failed() or bodyFut.cancelled():
let exc = bodyFut.error
ApiResponse[responseType].err("[" & $exc.name & "] " & $exc.msg)
New validator client using REST API. (#2651) * Initial commit. * Exporting getConfig(). * Add beacon node checking procedures. * Post rebase fixes. * Use runSlotLoop() from nimbus_beacon_node. Fallback implementation. Fixes for ETH2 REST serialization. * Add beacon_clock.durationToNextSlot(). Move type declarations from beacon_rest_api to json_rest_serialization. Fix seq[ValidatorIndex] serialization. Refactor ValidatorPool and add some utility procedures. Create separate version of validator_client. * Post-rebase fixes. Remove CookedPubKey from validator_pool.nim. * Now we should be able to produce attestations and aggregate and proofs. But its not working yet. * Debugging attestation sending. * Add durationToNextAttestation. Optimize some debug logs. Fix aggregation_bits encoding. Bump chronos/presto. * Its alive. * Fixes for launch_local_testnet script. Bump chronos. * Switch client API to not use `/api` prefix. * Post-rebase adjustments. * Fix endpoint for publishBlock(). * Add CONFIG_NAME. Add more checks to ensure that beacon_node is compatible. * Add beacon committee subscription support to validator_client. * Fix stacktrace should be an array of strings. Fix committee subscriptions should not be `data` keyed. * Log duration to next block proposal. * Fix beacon_node_status import. * Use jsonMsgResponse() instead of jsonError(). * Fix graffityBytes usage. Remove unnecessary `await`. Adjust creation of SignedBlock instance. Remove legacy files. * Rework durationToNextSlot() and durationToNextEpoch() to use `fromNow`. * Fix race condition for block proposal and attestations for same slot. Fix local_testnet script to properly kill tasks on Windows. Bump chronos and nim-http-tools, to allow connections to infura.io (basic auth). * Catch services errors. Improve performance of local_testnet.sh script on Windows. Fix race condition when attestation producing. * Post-rebase fixes. * Bump chronos and presto. * Calculate block publishing delay. Fix pkill in one more place. * Add error handling and timeouts to firstSuccess() template. Add onceToAll() template. Add checkNodes() procedure. Refactor firstSuccess() template. Add error checking to api.nim calls. * Deprecated usage onceToAll() for better stability. Address comment and send attestations asap. * Avoid unnecessary loop when calculating minimal duration.
2021-07-13 11:15:07 +00:00
else:
ApiResponse[responseType].ok(bodyFut.value)
New validator client using REST API. (#2651) * Initial commit. * Exporting getConfig(). * Add beacon node checking procedures. * Post rebase fixes. * Use runSlotLoop() from nimbus_beacon_node. Fallback implementation. Fixes for ETH2 REST serialization. * Add beacon_clock.durationToNextSlot(). Move type declarations from beacon_rest_api to json_rest_serialization. Fix seq[ValidatorIndex] serialization. Refactor ValidatorPool and add some utility procedures. Create separate version of validator_client. * Post-rebase fixes. Remove CookedPubKey from validator_pool.nim. * Now we should be able to produce attestations and aggregate and proofs. But its not working yet. * Debugging attestation sending. * Add durationToNextAttestation. Optimize some debug logs. Fix aggregation_bits encoding. Bump chronos/presto. * Its alive. * Fixes for launch_local_testnet script. Bump chronos. * Switch client API to not use `/api` prefix. * Post-rebase adjustments. * Fix endpoint for publishBlock(). * Add CONFIG_NAME. Add more checks to ensure that beacon_node is compatible. * Add beacon committee subscription support to validator_client. * Fix stacktrace should be an array of strings. Fix committee subscriptions should not be `data` keyed. * Log duration to next block proposal. * Fix beacon_node_status import. * Use jsonMsgResponse() instead of jsonError(). * Fix graffityBytes usage. Remove unnecessary `await`. Adjust creation of SignedBlock instance. Remove legacy files. * Rework durationToNextSlot() and durationToNextEpoch() to use `fromNow`. * Fix race condition for block proposal and attestations for same slot. Fix local_testnet script to properly kill tasks on Windows. Bump chronos and nim-http-tools, to allow connections to infura.io (basic auth). * Catch services errors. Improve performance of local_testnet.sh script on Windows. Fix race condition when attestation producing. * Post-rebase fixes. * Bump chronos and presto. * Calculate block publishing delay. Fix pkill in one more place. * Add error handling and timeouts to firstSuccess() template. Add onceToAll() template. Add checkNodes() procedure. Refactor firstSuccess() template. Add error checking to api.nim calls. * Deprecated usage onceToAll() for better stability. Address comment and send attestations asap. * Avoid unnecessary loop when calculating minimal duration.
2021-07-13 11:15:07 +00:00
else:
case resOp
of ApiOperation.Interrupt:
ApiResponse[responseType].err("Operation was interrupted")
New validator client using REST API. (#2651) * Initial commit. * Exporting getConfig(). * Add beacon node checking procedures. * Post rebase fixes. * Use runSlotLoop() from nimbus_beacon_node. Fallback implementation. Fixes for ETH2 REST serialization. * Add beacon_clock.durationToNextSlot(). Move type declarations from beacon_rest_api to json_rest_serialization. Fix seq[ValidatorIndex] serialization. Refactor ValidatorPool and add some utility procedures. Create separate version of validator_client. * Post-rebase fixes. Remove CookedPubKey from validator_pool.nim. * Now we should be able to produce attestations and aggregate and proofs. But its not working yet. * Debugging attestation sending. * Add durationToNextAttestation. Optimize some debug logs. Fix aggregation_bits encoding. Bump chronos/presto. * Its alive. * Fixes for launch_local_testnet script. Bump chronos. * Switch client API to not use `/api` prefix. * Post-rebase adjustments. * Fix endpoint for publishBlock(). * Add CONFIG_NAME. Add more checks to ensure that beacon_node is compatible. * Add beacon committee subscription support to validator_client. * Fix stacktrace should be an array of strings. Fix committee subscriptions should not be `data` keyed. * Log duration to next block proposal. * Fix beacon_node_status import. * Use jsonMsgResponse() instead of jsonError(). * Fix graffityBytes usage. Remove unnecessary `await`. Adjust creation of SignedBlock instance. Remove legacy files. * Rework durationToNextSlot() and durationToNextEpoch() to use `fromNow`. * Fix race condition for block proposal and attestations for same slot. Fix local_testnet script to properly kill tasks on Windows. Bump chronos and nim-http-tools, to allow connections to infura.io (basic auth). * Catch services errors. Improve performance of local_testnet.sh script on Windows. Fix race condition when attestation producing. * Post-rebase fixes. * Bump chronos and presto. * Calculate block publishing delay. Fix pkill in one more place. * Add error handling and timeouts to firstSuccess() template. Add onceToAll() template. Add checkNodes() procedure. Refactor firstSuccess() template. Add error checking to api.nim calls. * Deprecated usage onceToAll() for better stability. Address comment and send attestations asap. * Avoid unnecessary loop when calculating minimal duration.
2021-07-13 11:15:07 +00:00
of ApiOperation.Timeout:
ApiResponse[responseType].err("Operation timeout exceeded")
New validator client using REST API. (#2651) * Initial commit. * Exporting getConfig(). * Add beacon node checking procedures. * Post rebase fixes. * Use runSlotLoop() from nimbus_beacon_node. Fallback implementation. Fixes for ETH2 REST serialization. * Add beacon_clock.durationToNextSlot(). Move type declarations from beacon_rest_api to json_rest_serialization. Fix seq[ValidatorIndex] serialization. Refactor ValidatorPool and add some utility procedures. Create separate version of validator_client. * Post-rebase fixes. Remove CookedPubKey from validator_pool.nim. * Now we should be able to produce attestations and aggregate and proofs. But its not working yet. * Debugging attestation sending. * Add durationToNextAttestation. Optimize some debug logs. Fix aggregation_bits encoding. Bump chronos/presto. * Its alive. * Fixes for launch_local_testnet script. Bump chronos. * Switch client API to not use `/api` prefix. * Post-rebase adjustments. * Fix endpoint for publishBlock(). * Add CONFIG_NAME. Add more checks to ensure that beacon_node is compatible. * Add beacon committee subscription support to validator_client. * Fix stacktrace should be an array of strings. Fix committee subscriptions should not be `data` keyed. * Log duration to next block proposal. * Fix beacon_node_status import. * Use jsonMsgResponse() instead of jsonError(). * Fix graffityBytes usage. Remove unnecessary `await`. Adjust creation of SignedBlock instance. Remove legacy files. * Rework durationToNextSlot() and durationToNextEpoch() to use `fromNow`. * Fix race condition for block proposal and attestations for same slot. Fix local_testnet script to properly kill tasks on Windows. Bump chronos and nim-http-tools, to allow connections to infura.io (basic auth). * Catch services errors. Improve performance of local_testnet.sh script on Windows. Fix race condition when attestation producing. * Post-rebase fixes. * Bump chronos and presto. * Calculate block publishing delay. Fix pkill in one more place. * Add error handling and timeouts to firstSuccess() template. Add onceToAll() template. Add checkNodes() procedure. Refactor firstSuccess() template. Add error checking to api.nim calls. * Deprecated usage onceToAll() for better stability. Address comment and send attestations asap. * Avoid unnecessary loop when calculating minimal duration.
2021-07-13 11:15:07 +00:00
of ApiOperation.Success, ApiOperation.Failure:
# This should not be happened, because all Futures should be
# finished, and `Failure` processed when Future is finished.
ApiResponse[responseType].err("Unexpected error")
New validator client using REST API. (#2651) * Initial commit. * Exporting getConfig(). * Add beacon node checking procedures. * Post rebase fixes. * Use runSlotLoop() from nimbus_beacon_node. Fallback implementation. Fixes for ETH2 REST serialization. * Add beacon_clock.durationToNextSlot(). Move type declarations from beacon_rest_api to json_rest_serialization. Fix seq[ValidatorIndex] serialization. Refactor ValidatorPool and add some utility procedures. Create separate version of validator_client. * Post-rebase fixes. Remove CookedPubKey from validator_pool.nim. * Now we should be able to produce attestations and aggregate and proofs. But its not working yet. * Debugging attestation sending. * Add durationToNextAttestation. Optimize some debug logs. Fix aggregation_bits encoding. Bump chronos/presto. * Its alive. * Fixes for launch_local_testnet script. Bump chronos. * Switch client API to not use `/api` prefix. * Post-rebase adjustments. * Fix endpoint for publishBlock(). * Add CONFIG_NAME. Add more checks to ensure that beacon_node is compatible. * Add beacon committee subscription support to validator_client. * Fix stacktrace should be an array of strings. Fix committee subscriptions should not be `data` keyed. * Log duration to next block proposal. * Fix beacon_node_status import. * Use jsonMsgResponse() instead of jsonError(). * Fix graffityBytes usage. Remove unnecessary `await`. Adjust creation of SignedBlock instance. Remove legacy files. * Rework durationToNextSlot() and durationToNextEpoch() to use `fromNow`. * Fix race condition for block proposal and attestations for same slot. Fix local_testnet script to properly kill tasks on Windows. Bump chronos and nim-http-tools, to allow connections to infura.io (basic auth). * Catch services errors. Improve performance of local_testnet.sh script on Windows. Fix race condition when attestation producing. * Post-rebase fixes. * Bump chronos and presto. * Calculate block publishing delay. Fix pkill in one more place. * Add error handling and timeouts to firstSuccess() template. Add onceToAll() template. Add checkNodes() procedure. Refactor firstSuccess() template. Add error checking to api.nim calls. * Deprecated usage onceToAll() for better stability. Address comment and send attestations asap. * Avoid unnecessary loop when calculating minimal duration.
2021-07-13 11:15:07 +00:00
handlerStatus = handlers
New validator client using REST API. (#2651) * Initial commit. * Exporting getConfig(). * Add beacon node checking procedures. * Post rebase fixes. * Use runSlotLoop() from nimbus_beacon_node. Fallback implementation. Fixes for ETH2 REST serialization. * Add beacon_clock.durationToNextSlot(). Move type declarations from beacon_rest_api to json_rest_serialization. Fix seq[ValidatorIndex] serialization. Refactor ValidatorPool and add some utility procedures. Create separate version of validator_client. * Post-rebase fixes. Remove CookedPubKey from validator_pool.nim. * Now we should be able to produce attestations and aggregate and proofs. But its not working yet. * Debugging attestation sending. * Add durationToNextAttestation. Optimize some debug logs. Fix aggregation_bits encoding. Bump chronos/presto. * Its alive. * Fixes for launch_local_testnet script. Bump chronos. * Switch client API to not use `/api` prefix. * Post-rebase adjustments. * Fix endpoint for publishBlock(). * Add CONFIG_NAME. Add more checks to ensure that beacon_node is compatible. * Add beacon committee subscription support to validator_client. * Fix stacktrace should be an array of strings. Fix committee subscriptions should not be `data` keyed. * Log duration to next block proposal. * Fix beacon_node_status import. * Use jsonMsgResponse() instead of jsonError(). * Fix graffityBytes usage. Remove unnecessary `await`. Adjust creation of SignedBlock instance. Remove legacy files. * Rework durationToNextSlot() and durationToNextEpoch() to use `fromNow`. * Fix race condition for block proposal and attestations for same slot. Fix local_testnet script to properly kill tasks on Windows. Bump chronos and nim-http-tools, to allow connections to infura.io (basic auth). * Catch services errors. Improve performance of local_testnet.sh script on Windows. Fix race condition when attestation producing. * Post-rebase fixes. * Bump chronos and presto. * Calculate block publishing delay. Fix pkill in one more place. * Add error handling and timeouts to firstSuccess() template. Add onceToAll() template. Add checkNodes() procedure. Refactor firstSuccess() template. Add error checking to api.nim calls. * Deprecated usage onceToAll() for better stability. Address comment and send attestations asap. * Avoid unnecessary loop when calculating minimal duration.
2021-07-13 11:15:07 +00:00
if resOp == ApiOperation.Success:
if handlerStatus:
New validator client using REST API. (#2651) * Initial commit. * Exporting getConfig(). * Add beacon node checking procedures. * Post rebase fixes. * Use runSlotLoop() from nimbus_beacon_node. Fallback implementation. Fixes for ETH2 REST serialization. * Add beacon_clock.durationToNextSlot(). Move type declarations from beacon_rest_api to json_rest_serialization. Fix seq[ValidatorIndex] serialization. Refactor ValidatorPool and add some utility procedures. Create separate version of validator_client. * Post-rebase fixes. Remove CookedPubKey from validator_pool.nim. * Now we should be able to produce attestations and aggregate and proofs. But its not working yet. * Debugging attestation sending. * Add durationToNextAttestation. Optimize some debug logs. Fix aggregation_bits encoding. Bump chronos/presto. * Its alive. * Fixes for launch_local_testnet script. Bump chronos. * Switch client API to not use `/api` prefix. * Post-rebase adjustments. * Fix endpoint for publishBlock(). * Add CONFIG_NAME. Add more checks to ensure that beacon_node is compatible. * Add beacon committee subscription support to validator_client. * Fix stacktrace should be an array of strings. Fix committee subscriptions should not be `data` keyed. * Log duration to next block proposal. * Fix beacon_node_status import. * Use jsonMsgResponse() instead of jsonError(). * Fix graffityBytes usage. Remove unnecessary `await`. Adjust creation of SignedBlock instance. Remove legacy files. * Rework durationToNextSlot() and durationToNextEpoch() to use `fromNow`. * Fix race condition for block proposal and attestations for same slot. Fix local_testnet script to properly kill tasks on Windows. Bump chronos and nim-http-tools, to allow connections to infura.io (basic auth). * Catch services errors. Improve performance of local_testnet.sh script on Windows. Fix race condition when attestation producing. * Post-rebase fixes. * Bump chronos and presto. * Calculate block publishing delay. Fix pkill in one more place. * Add error handling and timeouts to firstSuccess() template. Add onceToAll() template. Add checkNodes() procedure. Refactor firstSuccess() template. Add error checking to api.nim calls. * Deprecated usage onceToAll() for better stability. Address comment and send attestations asap. * Avoid unnecessary loop when calculating minimal duration.
2021-07-13 11:15:07 +00:00
exitNow = true
break
else:
exitNow = true
break
if exitNow:
break
proc getErrorMessage*(response: RestPlainResponse): string =
let res =
decodeBytes(RestErrorMessage, response.data, response.contentType).valueOr:
return "Unable to decode error response: [" & $error & "]"
if res.stacktraces.isSome():
res.message & ": [" & res.stacktraces.get().join("; ") & "]"
else:
res.message
proc unpackErrorMessage*(response: RestPlainResponse): RestIndexedErrorMessage =
decodeBytes(RestIndexedErrorMessage, response.data,
response.contentType).valueOr:
let message = "Unable to decode error response: [" & $error & "]"
return RestIndexedErrorMessage(
code: -1,
message: message,
failures: default(seq[RestIndexedErrorMessageItem]))
proc getErrorMessage*(msg: RestIndexedErrorMessage): string =
if len(msg.failures) > 0:
msg.message & ": [" &
msg.failures.mapIt($it.index & ":" & it.message).join("; ") & "]"
else:
msg.message
New validator client using REST API. (#2651) * Initial commit. * Exporting getConfig(). * Add beacon node checking procedures. * Post rebase fixes. * Use runSlotLoop() from nimbus_beacon_node. Fallback implementation. Fixes for ETH2 REST serialization. * Add beacon_clock.durationToNextSlot(). Move type declarations from beacon_rest_api to json_rest_serialization. Fix seq[ValidatorIndex] serialization. Refactor ValidatorPool and add some utility procedures. Create separate version of validator_client. * Post-rebase fixes. Remove CookedPubKey from validator_pool.nim. * Now we should be able to produce attestations and aggregate and proofs. But its not working yet. * Debugging attestation sending. * Add durationToNextAttestation. Optimize some debug logs. Fix aggregation_bits encoding. Bump chronos/presto. * Its alive. * Fixes for launch_local_testnet script. Bump chronos. * Switch client API to not use `/api` prefix. * Post-rebase adjustments. * Fix endpoint for publishBlock(). * Add CONFIG_NAME. Add more checks to ensure that beacon_node is compatible. * Add beacon committee subscription support to validator_client. * Fix stacktrace should be an array of strings. Fix committee subscriptions should not be `data` keyed. * Log duration to next block proposal. * Fix beacon_node_status import. * Use jsonMsgResponse() instead of jsonError(). * Fix graffityBytes usage. Remove unnecessary `await`. Adjust creation of SignedBlock instance. Remove legacy files. * Rework durationToNextSlot() and durationToNextEpoch() to use `fromNow`. * Fix race condition for block proposal and attestations for same slot. Fix local_testnet script to properly kill tasks on Windows. Bump chronos and nim-http-tools, to allow connections to infura.io (basic auth). * Catch services errors. Improve performance of local_testnet.sh script on Windows. Fix race condition when attestation producing. * Post-rebase fixes. * Bump chronos and presto. * Calculate block publishing delay. Fix pkill in one more place. * Add error handling and timeouts to firstSuccess() template. Add onceToAll() template. Add checkNodes() procedure. Refactor firstSuccess() template. Add error checking to api.nim calls. * Deprecated usage onceToAll() for better stability. Address comment and send attestations asap. * Avoid unnecessary loop when calculating minimal duration.
2021-07-13 11:15:07 +00:00
template handleCommunicationError(): untyped {.dirty.} =
let failure = ApiNodeFailure.init(ApiFailure.Communication, RequestName,
strategy, node, apiResponse.error)
node.updateStatus(RestBeaconNodeStatus.Offline, failure)
failures.add(failure)
template handleUnexpectedCode(): untyped {.dirty.} =
let failure = ApiNodeFailure.init(ApiFailure.UnexpectedCode, RequestName,
strategy, node, response.status, response.getErrorMessage())
node.updateStatus(RestBeaconNodeStatus.UnexpectedCode, failure)
failures.add(failure)
template handleUnexpectedData(): untyped {.dirty.} =
let failure = ApiNodeFailure.init(ApiFailure.UnexpectedResponse, RequestName,
strategy, node, response.status, $res.error)
node.updateStatus(RestBeaconNodeStatus.UnexpectedResponse, failure)
failures.add(failure)
template handleOptimistic(): untyped {.dirty.} =
let failure = ApiNodeFailure.init(ApiFailure.OptSynced, RequestName,
strategy, node, response.status,
"Response was sent by optimistically synced node")
node.updateStatus(RestBeaconNodeStatus.OptSynced, failure)
template handle400(): untyped {.dirty.} =
let failure = ApiNodeFailure.init(ApiFailure.Invalid, RequestName,
strategy, node, response.status, response.getErrorMessage())
node.updateStatus(RestBeaconNodeStatus.Incompatible, failure)
failures.add(failure)
template handle400Indexed(): untyped {.dirty.} =
let failure = ApiNodeFailure.init(ApiFailure.Invalid, RequestName,
strategy, node, response.status,
response.unpackErrorMessage().getErrorMessage())
node.updateStatus(RestBeaconNodeStatus.Incompatible, failure)
failures.add(failure)
template handle404(): untyped {.dirty.} =
let failure = ApiNodeFailure.init(ApiFailure.NotFound, RequestName,
strategy, node, response.status, response.getErrorMessage())
node.updateStatus(RestBeaconNodeStatus.Incompatible, failure)
failures.add(failure)
template handle415(): untyped {.dirty.} =
let failure = ApiNodeFailure.init(ApiFailure.UnsupportedContentType,
RequestName, strategy, node, response.status, response.getErrorMessage())
node.updateStatus(RestBeaconNodeStatus.Incompatible, failure)
failures.add(failure)
template handle500(): untyped {.dirty.} =
let failure = ApiNodeFailure.init(ApiFailure.Internal, RequestName,
strategy, node, response.status, response.getErrorMessage())
node.updateStatus(RestBeaconNodeStatus.InternalError, failure)
failures.add(failure)
2023-11-08 12:03:51 +00:00
template handle501(): untyped {.dirty.} =
let failure = ApiNodeFailure.init(ApiFailure.NotImplemented, RequestName,
strategy, node, response.status, response.getErrorMessage())
node.updateStatus(RestBeaconNodeStatus.Incompatible, failure)
failures.add(failure)
template handle503(): untyped {.dirty.} =
let failure = ApiNodeFailure.init(ApiFailure.NotSynced, RequestName,
strategy, node, response.status, response.getErrorMessage())
node.updateStatus(RestBeaconNodeStatus.NotSynced, failure)
failures.add(failure)
proc getProposerDuties*(
vc: ValidatorClientRef,
epoch: Epoch,
strategy: ApiStrategyKind
): Future[GetProposerDutiesResponse] {.
async: (raises: [CancelledError, ValidatorApiError]).} =
const RequestName = "getProposerDuties"
var failures: seq[ApiNodeFailure]
case strategy
of ApiStrategyKind.First, ApiStrategyKind.Best:
let res = vc.firstSuccessParallel(RestPlainResponse,
GetProposerDutiesResponse,
SlotDuration,
ViableNodeStatus,
{BeaconNodeRole.Duties},
getProposerDutiesPlain(it, epoch)):
if apiResponse.isErr():
handleCommunicationError()
ApiResponse[GetProposerDutiesResponse].err(apiResponse.error)
else:
let response = apiResponse.get()
case response.status
of 200:
let res = decodeBytes(GetProposerDutiesResponse, response.data,
response.contentType)
if res.isErr():
handleUnexpectedData()
ApiResponse[GetProposerDutiesResponse].err($res.error)
else:
let data = res.get()
if data.execution_optimistic.get(false):
handleOptimistic()
ApiResponse[GetProposerDutiesResponse].ok(data)
of 400:
handle400()
ApiResponse[GetProposerDutiesResponse].err(ResponseInvalidError)
of 500:
handle500()
ApiResponse[GetProposerDutiesResponse].err(ResponseInternalError)
of 503:
handle503()
ApiResponse[GetProposerDutiesResponse].err(ResponseNoSyncError)
else:
handleUnexpectedCode()
ApiResponse[GetProposerDutiesResponse].err(ResponseUnexpectedError)
if res.isErr():
raise (ref ValidatorApiError)(msg: res.error, data: failures)
return res.get()
of ApiStrategyKind.Priority:
vc.firstSuccessSequential(RestPlainResponse,
SlotDuration,
ViableNodeStatus,
{BeaconNodeRole.Duties},
getProposerDutiesPlain(it, epoch)):
if apiResponse.isErr():
handleCommunicationError()
false
else:
let response = apiResponse.get()
case response.status
of 200:
let res = decodeBytes(GetProposerDutiesResponse, response.data,
response.contentType)
if res.isOk():
let data = res.get()
if data.execution_optimistic.get(false):
handleOptimistic()
return data
handleUnexpectedData()
false
of 400:
handle400()
false
of 500:
handle500()
false
of 503:
handle503()
false
else:
handleUnexpectedCode()
false
raise (ref ValidatorApiError)(
msg: "Failed to get proposer duties", data: failures)
New validator client using REST API. (#2651) * Initial commit. * Exporting getConfig(). * Add beacon node checking procedures. * Post rebase fixes. * Use runSlotLoop() from nimbus_beacon_node. Fallback implementation. Fixes for ETH2 REST serialization. * Add beacon_clock.durationToNextSlot(). Move type declarations from beacon_rest_api to json_rest_serialization. Fix seq[ValidatorIndex] serialization. Refactor ValidatorPool and add some utility procedures. Create separate version of validator_client. * Post-rebase fixes. Remove CookedPubKey from validator_pool.nim. * Now we should be able to produce attestations and aggregate and proofs. But its not working yet. * Debugging attestation sending. * Add durationToNextAttestation. Optimize some debug logs. Fix aggregation_bits encoding. Bump chronos/presto. * Its alive. * Fixes for launch_local_testnet script. Bump chronos. * Switch client API to not use `/api` prefix. * Post-rebase adjustments. * Fix endpoint for publishBlock(). * Add CONFIG_NAME. Add more checks to ensure that beacon_node is compatible. * Add beacon committee subscription support to validator_client. * Fix stacktrace should be an array of strings. Fix committee subscriptions should not be `data` keyed. * Log duration to next block proposal. * Fix beacon_node_status import. * Use jsonMsgResponse() instead of jsonError(). * Fix graffityBytes usage. Remove unnecessary `await`. Adjust creation of SignedBlock instance. Remove legacy files. * Rework durationToNextSlot() and durationToNextEpoch() to use `fromNow`. * Fix race condition for block proposal and attestations for same slot. Fix local_testnet script to properly kill tasks on Windows. Bump chronos and nim-http-tools, to allow connections to infura.io (basic auth). * Catch services errors. Improve performance of local_testnet.sh script on Windows. Fix race condition when attestation producing. * Post-rebase fixes. * Bump chronos and presto. * Calculate block publishing delay. Fix pkill in one more place. * Add error handling and timeouts to firstSuccess() template. Add onceToAll() template. Add checkNodes() procedure. Refactor firstSuccess() template. Add error checking to api.nim calls. * Deprecated usage onceToAll() for better stability. Address comment and send attestations asap. * Avoid unnecessary loop when calculating minimal duration.
2021-07-13 11:15:07 +00:00
proc getAttesterDuties*(
vc: ValidatorClientRef,
epoch: Epoch,
validators: seq[ValidatorIndex],
strategy: ApiStrategyKind
): Future[GetAttesterDutiesResponse] {.
async: (raises: [CancelledError, ValidatorApiError]).} =
const RequestName = "getAttesterDuties"
var failures: seq[ApiNodeFailure]
case strategy
of ApiStrategyKind.First, ApiStrategyKind.Best:
let res = vc.firstSuccessParallel(RestPlainResponse,
GetAttesterDutiesResponse,
SlotDuration,
ViableNodeStatus,
{BeaconNodeRole.Duties},
getAttesterDutiesPlain(it, epoch,
validators)):
if apiResponse.isErr():
handleCommunicationError()
ApiResponse[GetAttesterDutiesResponse].err(apiResponse.error)
else:
let response = apiResponse.get()
case response.status
of 200:
let res = decodeBytes(GetAttesterDutiesResponse, response.data,
response.contentType)
if res.isErr():
handleUnexpectedData()
ApiResponse[GetAttesterDutiesResponse].err($res.error)
else:
let data = res.get()
if data.execution_optimistic.get(false):
handleOptimistic()
ApiResponse[GetAttesterDutiesResponse].ok(data)
of 400:
handle400()
ApiResponse[GetAttesterDutiesResponse].err(ResponseInvalidError)
of 500:
handle500()
ApiResponse[GetAttesterDutiesResponse].err(ResponseInternalError)
of 503:
handle503()
ApiResponse[GetAttesterDutiesResponse].err(ResponseNoSyncError)
else:
handleUnexpectedCode()
ApiResponse[GetAttesterDutiesResponse].err(ResponseUnexpectedError)
if res.isErr():
raise (ref ValidatorApiError)(msg: res.error, data: failures)
return res.get()
of ApiStrategyKind.Priority:
vc.firstSuccessSequential(RestPlainResponse,
SlotDuration,
ViableNodeStatus,
{BeaconNodeRole.Duties},
getAttesterDutiesPlain(it, epoch, validators)):
if apiResponse.isErr():
handleCommunicationError()
false
else:
let response = apiResponse.get()
case response.status
of 200:
let res = decodeBytes(GetAttesterDutiesResponse, response.data,
response.contentType)
if res.isOk():
let data = res.get()
if data.execution_optimistic.get(false):
handleOptimistic()
return data
handleUnexpectedData()
false
of 400:
handle400()
false
of 500:
handle500()
false
of 503:
handle503()
false
else:
handleUnexpectedCode()
false
raise (ref ValidatorApiError)(
msg: "Failed to get attester duties", data: failures)
New validator client using REST API. (#2651) * Initial commit. * Exporting getConfig(). * Add beacon node checking procedures. * Post rebase fixes. * Use runSlotLoop() from nimbus_beacon_node. Fallback implementation. Fixes for ETH2 REST serialization. * Add beacon_clock.durationToNextSlot(). Move type declarations from beacon_rest_api to json_rest_serialization. Fix seq[ValidatorIndex] serialization. Refactor ValidatorPool and add some utility procedures. Create separate version of validator_client. * Post-rebase fixes. Remove CookedPubKey from validator_pool.nim. * Now we should be able to produce attestations and aggregate and proofs. But its not working yet. * Debugging attestation sending. * Add durationToNextAttestation. Optimize some debug logs. Fix aggregation_bits encoding. Bump chronos/presto. * Its alive. * Fixes for launch_local_testnet script. Bump chronos. * Switch client API to not use `/api` prefix. * Post-rebase adjustments. * Fix endpoint for publishBlock(). * Add CONFIG_NAME. Add more checks to ensure that beacon_node is compatible. * Add beacon committee subscription support to validator_client. * Fix stacktrace should be an array of strings. Fix committee subscriptions should not be `data` keyed. * Log duration to next block proposal. * Fix beacon_node_status import. * Use jsonMsgResponse() instead of jsonError(). * Fix graffityBytes usage. Remove unnecessary `await`. Adjust creation of SignedBlock instance. Remove legacy files. * Rework durationToNextSlot() and durationToNextEpoch() to use `fromNow`. * Fix race condition for block proposal and attestations for same slot. Fix local_testnet script to properly kill tasks on Windows. Bump chronos and nim-http-tools, to allow connections to infura.io (basic auth). * Catch services errors. Improve performance of local_testnet.sh script on Windows. Fix race condition when attestation producing. * Post-rebase fixes. * Bump chronos and presto. * Calculate block publishing delay. Fix pkill in one more place. * Add error handling and timeouts to firstSuccess() template. Add onceToAll() template. Add checkNodes() procedure. Refactor firstSuccess() template. Add error checking to api.nim calls. * Deprecated usage onceToAll() for better stability. Address comment and send attestations asap. * Avoid unnecessary loop when calculating minimal duration.
2021-07-13 11:15:07 +00:00
proc getSyncCommitteeDuties*(
vc: ValidatorClientRef,
epoch: Epoch,
validators: seq[ValidatorIndex],
strategy: ApiStrategyKind
): Future[GetSyncCommitteeDutiesResponse] {.
async: (raises: [CancelledError, ValidatorApiError]).} =
const RequestName = "getSyncCommitteeDuties"
var failures: seq[ApiNodeFailure]
case strategy
of ApiStrategyKind.First, ApiStrategyKind.Best:
let res = vc.firstSuccessParallel(
RestPlainResponse,
GetSyncCommitteeDutiesResponse,
SlotDuration,
ViableNodeStatus,
{BeaconNodeRole.Duties},
getSyncCommitteeDutiesPlain(it, epoch, validators)):
if apiResponse.isErr():
handleCommunicationError()
ApiResponse[GetSyncCommitteeDutiesResponse].err(apiResponse.error)
else:
let response = apiResponse.get()
case response.status
of 200:
let res = decodeBytes(GetSyncCommitteeDutiesResponse, response.data,
response.contentType)
if res.isErr():
handleUnexpectedData()
ApiResponse[GetSyncCommitteeDutiesResponse].err($res.error)
else:
let data = res.get()
if data.execution_optimistic.get(false):
handleOptimistic()
ApiResponse[GetSyncCommitteeDutiesResponse].ok(data)
of 400:
handle400()
ApiResponse[GetSyncCommitteeDutiesResponse].err(ResponseInvalidError)
of 500:
handle500()
ApiResponse[GetSyncCommitteeDutiesResponse].err(ResponseInternalError)
of 503:
handle503()
ApiResponse[GetSyncCommitteeDutiesResponse].err(ResponseNoSyncError)
else:
handleUnexpectedCode()
ApiResponse[GetSyncCommitteeDutiesResponse].err(
ResponseUnexpectedError)
if res.isErr():
raise (ref ValidatorApiError)(msg: res.error, data: failures)
return res.get()
of ApiStrategyKind.Priority:
vc.firstSuccessSequential(
RestPlainResponse,
SlotDuration,
ViableNodeStatus,
{BeaconNodeRole.Duties},
getSyncCommitteeDutiesPlain(it, epoch, validators)):
if apiResponse.isErr():
handleCommunicationError()
false
else:
let response = apiResponse.get()
case response.status
of 200:
let res = decodeBytes(GetSyncCommitteeDutiesResponse, response.data,
response.contentType)
if res.isOk():
let data = res.get()
if data.execution_optimistic.get(false):
handleOptimistic()
return data
handleUnexpectedData()
false
of 400:
handle400()
false
of 500:
handle500()
false
of 503:
handle503()
false
else:
handleUnexpectedCode()
false
raise (ref ValidatorApiError)(
msg: "Failed to get sync committee duties", data: failures)
proc getForkSchedule*(
vc: ValidatorClientRef
): Future[seq[Fork]] {.async: (raises: [CancelledError]).} =
const RequestName = "getForkSchedule"
let
resp = vc.onceToAll(RestPlainResponse,
SlotDuration,
ViableNodeStatus,
{BeaconNodeRole.Duties},
getForkSchedulePlain(it))
case resp.status
of ApiOperation.Timeout:
debug "Unable to obtain fork schedule in time", timeout = SlotDuration
default(seq[Fork])
of ApiOperation.Interrupt:
debug "Fork schedule request was interrupted"
default(seq[Fork])
of ApiOperation.Failure:
debug "Unexpected error happened while trying to get fork schedule"
default(seq[Fork])
of ApiOperation.Success:
var
biggestForkSchedule: seq[Fork]
biggestNode: BeaconNodeServerRef
for apiResponse in resp.data:
if apiResponse.data.isErr():
debug "Unable to get fork schedule",
endpoint = apiResponse.node, reason = apiResponse.data.error
else:
let response = apiResponse.data.get()
case response.status
of 200:
let schedule = decodeBytes(GetForkScheduleResponse, response.data,
response.contentType).valueOr:
let failure = ApiNodeFailure.init(
ApiFailure.UnexpectedResponse, RequestName,
apiResponse.node, response.status, $error)
debug ResponseDecodeError, reason = getFailureReason(failure)
apiResponse.node.updateStatus(
RestBeaconNodeStatus.Incompatible, failure)
continue
if len(schedule.data) > len(biggestForkSchedule):
if biggestForkSchedule notin schedule.data:
let failure = ApiNodeFailure.init(
ApiFailure.UnexpectedResponse, RequestName,
apiResponse.node, response.status, "Incompatible fork schedule")
debug "Incompatible fork schedule", endpoint = biggestNode
biggestNode.updateStatus(
RestBeaconNodeStatus.Incompatible, failure)
biggestForkSchedule = schedule.data
biggestNode = apiResponse.node
else:
if schedule.data notin biggestForkSchedule:
let failure = ApiNodeFailure.init(
ApiFailure.UnexpectedResponse, RequestName,
apiResponse.node, response.status, "Incompatible fork schedule")
debug "Incompatible fork schedule", endpoint = apiResponse.node
apiResponse.node.updateStatus(
RestBeaconNodeStatus.Incompatible, failure)
continue
of 500:
let failure =
ApiNodeFailure.init(ApiFailure.Internal, RequestName,
apiResponse.node, response.status, response.getErrorMessage())
apiResponse.node.updateStatus(
RestBeaconNodeStatus.InternalError, failure)
continue
else:
let failure =
ApiNodeFailure.init(ApiFailure.UnexpectedCode, RequestName,
apiResponse.node, response.status, response.getErrorMessage())
debug ResponseUnexpectedError, reason = getFailureReason(failure)
apiResponse.node.updateStatus(
RestBeaconNodeStatus.Incompatible, failure)
continue
biggestForkSchedule
New validator client using REST API. (#2651) * Initial commit. * Exporting getConfig(). * Add beacon node checking procedures. * Post rebase fixes. * Use runSlotLoop() from nimbus_beacon_node. Fallback implementation. Fixes for ETH2 REST serialization. * Add beacon_clock.durationToNextSlot(). Move type declarations from beacon_rest_api to json_rest_serialization. Fix seq[ValidatorIndex] serialization. Refactor ValidatorPool and add some utility procedures. Create separate version of validator_client. * Post-rebase fixes. Remove CookedPubKey from validator_pool.nim. * Now we should be able to produce attestations and aggregate and proofs. But its not working yet. * Debugging attestation sending. * Add durationToNextAttestation. Optimize some debug logs. Fix aggregation_bits encoding. Bump chronos/presto. * Its alive. * Fixes for launch_local_testnet script. Bump chronos. * Switch client API to not use `/api` prefix. * Post-rebase adjustments. * Fix endpoint for publishBlock(). * Add CONFIG_NAME. Add more checks to ensure that beacon_node is compatible. * Add beacon committee subscription support to validator_client. * Fix stacktrace should be an array of strings. Fix committee subscriptions should not be `data` keyed. * Log duration to next block proposal. * Fix beacon_node_status import. * Use jsonMsgResponse() instead of jsonError(). * Fix graffityBytes usage. Remove unnecessary `await`. Adjust creation of SignedBlock instance. Remove legacy files. * Rework durationToNextSlot() and durationToNextEpoch() to use `fromNow`. * Fix race condition for block proposal and attestations for same slot. Fix local_testnet script to properly kill tasks on Windows. Bump chronos and nim-http-tools, to allow connections to infura.io (basic auth). * Catch services errors. Improve performance of local_testnet.sh script on Windows. Fix race condition when attestation producing. * Post-rebase fixes. * Bump chronos and presto. * Calculate block publishing delay. Fix pkill in one more place. * Add error handling and timeouts to firstSuccess() template. Add onceToAll() template. Add checkNodes() procedure. Refactor firstSuccess() template. Add error checking to api.nim calls. * Deprecated usage onceToAll() for better stability. Address comment and send attestations asap. * Avoid unnecessary loop when calculating minimal duration.
2021-07-13 11:15:07 +00:00
proc getHeadBlockRoot*(
vc: ValidatorClientRef,
strategy: ApiStrategyKind
): Future[DataOptimisticObject[RestRoot]] {.
async: (raises: [CancelledError, ValidatorApiError]).} =
const RequestName = "getHeadBlockRoot"
var failures: seq[ApiNodeFailure]
let blockIdent = BlockIdent.init(BlockIdentType.Head)
case strategy
of ApiStrategyKind.First:
let res = vc.firstSuccessParallel(RestPlainResponse,
GetBlockRootResponse,
SlotDuration,
ViableNodeStatus,
{BeaconNodeRole.SyncCommitteeData},
getBlockRootPlain(it, blockIdent)):
if apiResponse.isErr():
handleCommunicationError()
ApiResponse[GetBlockRootResponse].err(apiResponse.error)
else:
let response = apiResponse.get()
case response.status
of 200:
let res = decodeBytes(GetBlockRootResponse, response.data,
response.contentType)
if res.isErr():
handleUnexpectedData()
ApiResponse[GetBlockRootResponse].err($res.error)
else:
let data = res.get()
if data.execution_optimistic.get(false):
handleOptimistic()
failures.add(failure)
ApiResponse[GetBlockRootResponse].err(ResponseECNotInSyncError)
else:
ApiResponse[GetBlockRootResponse].ok(data)
of 400:
handle400()
ApiResponse[GetBlockRootResponse].err(ResponseInvalidError)
of 404:
handle404()
ApiResponse[GetBlockRootResponse].err(ResponseNotFoundError)
of 500:
handle500()
ApiResponse[GetBlockRootResponse].err(ResponseInternalError)
else:
handleUnexpectedCode()
ApiResponse[GetBlockRootResponse].err(ResponseUnexpectedError)
if res.isErr():
raise (ref ValidatorApiError)(msg: res.error, data: failures)
return res.get()
of ApiStrategyKind.Best:
let res = vc.bestSuccess(
RestPlainResponse,
GetBlockRootResponse,
float64,
SlotDuration,
ViableNodeStatus,
{BeaconNodeRole.SyncCommitteeData},
getBlockRootPlain(it, blockIdent),
getSyncCommitteeMessageDataScore(vc, itresponse)):
if apiResponse.isErr():
handleCommunicationError()
ApiResponse[GetBlockRootResponse].err(apiResponse.error)
else:
let response = apiResponse.get()
case response.status
of 200:
let res = decodeBytes(GetBlockRootResponse, response.data,
response.contentType)
if res.isErr():
handleUnexpectedData()
ApiResponse[GetBlockRootResponse].err($res.error)
else:
let data = res.get()
if data.execution_optimistic.get(false):
handleOptimistic()
failures.add(failure)
ApiResponse[GetBlockRootResponse].err(ResponseECNotInSyncError)
else:
ApiResponse[GetBlockRootResponse].ok(data)
of 400:
handle400()
ApiResponse[GetBlockRootResponse].err(ResponseInvalidError)
of 404:
handle404()
ApiResponse[GetBlockRootResponse].err(ResponseNotFoundError)
of 500:
handle500()
ApiResponse[GetBlockRootResponse].err(ResponseInternalError)
else:
handleUnexpectedCode()
ApiResponse[GetBlockRootResponse].err(ResponseUnexpectedError)
if res.isErr():
raise (ref ValidatorApiError)(msg: res.error, data: failures)
return res.get()
of ApiStrategyKind.Priority:
vc.firstSuccessSequential(RestPlainResponse, #RestResponse[GetBlockRootResponse],
SlotDuration,
ViableNodeStatus,
{BeaconNodeRole.SyncCommitteeData},
getBlockRootPlain(it, blockIdent)):
if apiResponse.isErr():
handleCommunicationError()
false
else:
let response = apiResponse.get()
case response.status
of 200:
let res = decodeBytes(GetBlockRootResponse, response.data,
response.contentType)
if res.isErr():
handleUnexpectedData()
false
else:
let data = res.get()
if data.execution_optimistic.get(false):
handleOptimistic()
failures.add(failure)
false
else:
return data
of 400:
handle400()
false
of 404:
handle404()
false
of 500:
handle500()
false
else:
handleUnexpectedCode()
false
raise (ref ValidatorApiError)(
msg: "Failed to get head block root", data: failures)
proc getValidators*(
vc: ValidatorClientRef,
id: seq[ValidatorIdent],
strategy: ApiStrategyKind
): Future[seq[RestValidator]] {.
async: (raises: [CancelledError, ValidatorApiError]).} =
const RequestName = "getStateValidators"
New validator client using REST API. (#2651) * Initial commit. * Exporting getConfig(). * Add beacon node checking procedures. * Post rebase fixes. * Use runSlotLoop() from nimbus_beacon_node. Fallback implementation. Fixes for ETH2 REST serialization. * Add beacon_clock.durationToNextSlot(). Move type declarations from beacon_rest_api to json_rest_serialization. Fix seq[ValidatorIndex] serialization. Refactor ValidatorPool and add some utility procedures. Create separate version of validator_client. * Post-rebase fixes. Remove CookedPubKey from validator_pool.nim. * Now we should be able to produce attestations and aggregate and proofs. But its not working yet. * Debugging attestation sending. * Add durationToNextAttestation. Optimize some debug logs. Fix aggregation_bits encoding. Bump chronos/presto. * Its alive. * Fixes for launch_local_testnet script. Bump chronos. * Switch client API to not use `/api` prefix. * Post-rebase adjustments. * Fix endpoint for publishBlock(). * Add CONFIG_NAME. Add more checks to ensure that beacon_node is compatible. * Add beacon committee subscription support to validator_client. * Fix stacktrace should be an array of strings. Fix committee subscriptions should not be `data` keyed. * Log duration to next block proposal. * Fix beacon_node_status import. * Use jsonMsgResponse() instead of jsonError(). * Fix graffityBytes usage. Remove unnecessary `await`. Adjust creation of SignedBlock instance. Remove legacy files. * Rework durationToNextSlot() and durationToNextEpoch() to use `fromNow`. * Fix race condition for block proposal and attestations for same slot. Fix local_testnet script to properly kill tasks on Windows. Bump chronos and nim-http-tools, to allow connections to infura.io (basic auth). * Catch services errors. Improve performance of local_testnet.sh script on Windows. Fix race condition when attestation producing. * Post-rebase fixes. * Bump chronos and presto. * Calculate block publishing delay. Fix pkill in one more place. * Add error handling and timeouts to firstSuccess() template. Add onceToAll() template. Add checkNodes() procedure. Refactor firstSuccess() template. Add error checking to api.nim calls. * Deprecated usage onceToAll() for better stability. Address comment and send attestations asap. * Avoid unnecessary loop when calculating minimal duration.
2021-07-13 11:15:07 +00:00
let stateIdent = StateIdent.init(StateIdentType.Head)
var failures: seq[ApiNodeFailure]
case strategy
of ApiStrategyKind.First, ApiStrategyKind.Best:
let res = vc.firstSuccessParallel(
RestPlainResponse,
GetStateValidatorsResponse,
SlotDuration,
ViableNodeStatus,
{BeaconNodeRole.Duties},
getStateValidatorsPlain(it, stateIdent, id)):
if apiResponse.isErr():
handleCommunicationError()
ApiResponse[GetStateValidatorsResponse].err(apiResponse.error)
New validator client using REST API. (#2651) * Initial commit. * Exporting getConfig(). * Add beacon node checking procedures. * Post rebase fixes. * Use runSlotLoop() from nimbus_beacon_node. Fallback implementation. Fixes for ETH2 REST serialization. * Add beacon_clock.durationToNextSlot(). Move type declarations from beacon_rest_api to json_rest_serialization. Fix seq[ValidatorIndex] serialization. Refactor ValidatorPool and add some utility procedures. Create separate version of validator_client. * Post-rebase fixes. Remove CookedPubKey from validator_pool.nim. * Now we should be able to produce attestations and aggregate and proofs. But its not working yet. * Debugging attestation sending. * Add durationToNextAttestation. Optimize some debug logs. Fix aggregation_bits encoding. Bump chronos/presto. * Its alive. * Fixes for launch_local_testnet script. Bump chronos. * Switch client API to not use `/api` prefix. * Post-rebase adjustments. * Fix endpoint for publishBlock(). * Add CONFIG_NAME. Add more checks to ensure that beacon_node is compatible. * Add beacon committee subscription support to validator_client. * Fix stacktrace should be an array of strings. Fix committee subscriptions should not be `data` keyed. * Log duration to next block proposal. * Fix beacon_node_status import. * Use jsonMsgResponse() instead of jsonError(). * Fix graffityBytes usage. Remove unnecessary `await`. Adjust creation of SignedBlock instance. Remove legacy files. * Rework durationToNextSlot() and durationToNextEpoch() to use `fromNow`. * Fix race condition for block proposal and attestations for same slot. Fix local_testnet script to properly kill tasks on Windows. Bump chronos and nim-http-tools, to allow connections to infura.io (basic auth). * Catch services errors. Improve performance of local_testnet.sh script on Windows. Fix race condition when attestation producing. * Post-rebase fixes. * Bump chronos and presto. * Calculate block publishing delay. Fix pkill in one more place. * Add error handling and timeouts to firstSuccess() template. Add onceToAll() template. Add checkNodes() procedure. Refactor firstSuccess() template. Add error checking to api.nim calls. * Deprecated usage onceToAll() for better stability. Address comment and send attestations asap. * Avoid unnecessary loop when calculating minimal duration.
2021-07-13 11:15:07 +00:00
else:
let response = apiResponse.get()
case response.status
of 200:
let res = decodeBytes(GetStateValidatorsResponse, response.data,
response.contentType)
if res.isErr():
handleUnexpectedData()
ApiResponse[GetStateValidatorsResponse].err($res.error)
else:
let data = res.get()
if data.execution_optimistic.get(false):
handleOptimistic()
ApiResponse[GetStateValidatorsResponse].ok(data)
of 400:
handle400()
ApiResponse[GetStateValidatorsResponse].err(ResponseInvalidError)
of 404:
handle404()
ApiResponse[GetStateValidatorsResponse].err(ResponseNotFoundError)
of 500:
handle500()
ApiResponse[GetStateValidatorsResponse].err(ResponseInternalError)
else:
handleUnexpectedCode()
ApiResponse[GetStateValidatorsResponse].err(ResponseUnexpectedError)
if res.isErr():
raise (ref ValidatorApiError)(msg: res.error, data: failures)
return res.get().data
of ApiStrategyKind.Priority:
vc.firstSuccessSequential(RestPlainResponse,
SlotDuration,
ViableNodeStatus,
{BeaconNodeRole.Duties},
getStateValidatorsPlain(it, stateIdent, id)):
if apiResponse.isErr():
handleCommunicationError()
false
else:
let response = apiResponse.get()
case response.status
of 200:
let res = decodeBytes(GetStateValidatorsResponse, response.data,
response.contentType)
if res.isOk():
let data = res.get()
if data.execution_optimistic.get(false):
handleOptimistic()
return data.data
handleUnexpectedData()
false
of 400:
handle400()
false
of 404:
handle404()
false
of 500:
handle500()
false
else:
handleUnexpectedCode()
false
New validator client using REST API. (#2651) * Initial commit. * Exporting getConfig(). * Add beacon node checking procedures. * Post rebase fixes. * Use runSlotLoop() from nimbus_beacon_node. Fallback implementation. Fixes for ETH2 REST serialization. * Add beacon_clock.durationToNextSlot(). Move type declarations from beacon_rest_api to json_rest_serialization. Fix seq[ValidatorIndex] serialization. Refactor ValidatorPool and add some utility procedures. Create separate version of validator_client. * Post-rebase fixes. Remove CookedPubKey from validator_pool.nim. * Now we should be able to produce attestations and aggregate and proofs. But its not working yet. * Debugging attestation sending. * Add durationToNextAttestation. Optimize some debug logs. Fix aggregation_bits encoding. Bump chronos/presto. * Its alive. * Fixes for launch_local_testnet script. Bump chronos. * Switch client API to not use `/api` prefix. * Post-rebase adjustments. * Fix endpoint for publishBlock(). * Add CONFIG_NAME. Add more checks to ensure that beacon_node is compatible. * Add beacon committee subscription support to validator_client. * Fix stacktrace should be an array of strings. Fix committee subscriptions should not be `data` keyed. * Log duration to next block proposal. * Fix beacon_node_status import. * Use jsonMsgResponse() instead of jsonError(). * Fix graffityBytes usage. Remove unnecessary `await`. Adjust creation of SignedBlock instance. Remove legacy files. * Rework durationToNextSlot() and durationToNextEpoch() to use `fromNow`. * Fix race condition for block proposal and attestations for same slot. Fix local_testnet script to properly kill tasks on Windows. Bump chronos and nim-http-tools, to allow connections to infura.io (basic auth). * Catch services errors. Improve performance of local_testnet.sh script on Windows. Fix race condition when attestation producing. * Post-rebase fixes. * Bump chronos and presto. * Calculate block publishing delay. Fix pkill in one more place. * Add error handling and timeouts to firstSuccess() template. Add onceToAll() template. Add checkNodes() procedure. Refactor firstSuccess() template. Add error checking to api.nim calls. * Deprecated usage onceToAll() for better stability. Address comment and send attestations asap. * Avoid unnecessary loop when calculating minimal duration.
2021-07-13 11:15:07 +00:00
raise (ref ValidatorApiError)(
msg: "Failed to get state's validators", data: failures)
New validator client using REST API. (#2651) * Initial commit. * Exporting getConfig(). * Add beacon node checking procedures. * Post rebase fixes. * Use runSlotLoop() from nimbus_beacon_node. Fallback implementation. Fixes for ETH2 REST serialization. * Add beacon_clock.durationToNextSlot(). Move type declarations from beacon_rest_api to json_rest_serialization. Fix seq[ValidatorIndex] serialization. Refactor ValidatorPool and add some utility procedures. Create separate version of validator_client. * Post-rebase fixes. Remove CookedPubKey from validator_pool.nim. * Now we should be able to produce attestations and aggregate and proofs. But its not working yet. * Debugging attestation sending. * Add durationToNextAttestation. Optimize some debug logs. Fix aggregation_bits encoding. Bump chronos/presto. * Its alive. * Fixes for launch_local_testnet script. Bump chronos. * Switch client API to not use `/api` prefix. * Post-rebase adjustments. * Fix endpoint for publishBlock(). * Add CONFIG_NAME. Add more checks to ensure that beacon_node is compatible. * Add beacon committee subscription support to validator_client. * Fix stacktrace should be an array of strings. Fix committee subscriptions should not be `data` keyed. * Log duration to next block proposal. * Fix beacon_node_status import. * Use jsonMsgResponse() instead of jsonError(). * Fix graffityBytes usage. Remove unnecessary `await`. Adjust creation of SignedBlock instance. Remove legacy files. * Rework durationToNextSlot() and durationToNextEpoch() to use `fromNow`. * Fix race condition for block proposal and attestations for same slot. Fix local_testnet script to properly kill tasks on Windows. Bump chronos and nim-http-tools, to allow connections to infura.io (basic auth). * Catch services errors. Improve performance of local_testnet.sh script on Windows. Fix race condition when attestation producing. * Post-rebase fixes. * Bump chronos and presto. * Calculate block publishing delay. Fix pkill in one more place. * Add error handling and timeouts to firstSuccess() template. Add onceToAll() template. Add checkNodes() procedure. Refactor firstSuccess() template. Add error checking to api.nim calls. * Deprecated usage onceToAll() for better stability. Address comment and send attestations asap. * Avoid unnecessary loop when calculating minimal duration.
2021-07-13 11:15:07 +00:00
proc produceAttestationData*(
vc: ValidatorClientRef,
slot: Slot,
committee_index: CommitteeIndex,
strategy: ApiStrategyKind
): Future[AttestationData] {.
async: (raises: [CancelledError, ValidatorApiError]).} =
const RequestName = "produceAttestationData"
var failures: seq[ApiNodeFailure]
case strategy
of ApiStrategyKind.First:
let res = vc.firstSuccessParallel(
RestPlainResponse,
ProduceAttestationDataResponse,
OneThirdDuration,
ViableNodeStatus,
{BeaconNodeRole.AttestationData},
produceAttestationDataPlain(it, slot, committee_index)):
if apiResponse.isErr():
handleCommunicationError()
ApiResponse[ProduceAttestationDataResponse].err(apiResponse.error)
else:
let response = apiResponse.get()
case response.status
of 200:
let res = decodeBytes(ProduceAttestationDataResponse, response.data,
response.contentType)
if res.isErr():
handleUnexpectedData()
ApiResponse[ProduceAttestationDataResponse].err($res.error)
else:
ApiResponse[ProduceAttestationDataResponse].ok(res.get())
of 400:
handle400()
ApiResponse[ProduceAttestationDataResponse].err(ResponseInvalidError)
of 500:
handle500()
ApiResponse[ProduceAttestationDataResponse].err(ResponseInternalError)
of 503:
handle503()
ApiResponse[ProduceAttestationDataResponse].err(
ResponseNoSyncError)
else:
handleUnexpectedCode()
ApiResponse[ProduceAttestationDataResponse].err(
ResponseUnexpectedError)
if res.isErr():
raise (ref ValidatorApiError)(msg: res.error, data: failures)
return res.get().data
of ApiStrategyKind.Best:
let res = vc.bestSuccess(
RestPlainResponse,
ProduceAttestationDataResponse,
float64,
OneThirdDuration,
ViableNodeStatus,
{BeaconNodeRole.AttestationData},
produceAttestationDataPlain(it, slot, committee_index),
getAttestationDataScore(vc, itresponse)):
if apiResponse.isErr():
handleCommunicationError()
ApiResponse[ProduceAttestationDataResponse].err(apiResponse.error)
else:
let response = apiResponse.get()
case response.status
of 200:
let res = decodeBytes(ProduceAttestationDataResponse, response.data,
response.contentType)
if res.isErr():
handleUnexpectedData()
ApiResponse[ProduceAttestationDataResponse].err($res.error)
else:
ApiResponse[ProduceAttestationDataResponse].ok(res.get())
of 400:
handle400()
ApiResponse[ProduceAttestationDataResponse].err(ResponseInvalidError)
of 500:
handle500()
ApiResponse[ProduceAttestationDataResponse].err(ResponseInternalError)
of 503:
handle503()
ApiResponse[ProduceAttestationDataResponse].err(
ResponseNoSyncError)
else:
handleUnexpectedCode()
ApiResponse[ProduceAttestationDataResponse].err(
ResponseUnexpectedError)
if res.isErr():
raise (ref ValidatorApiError)(msg: res.error, data: failures)
return res.get().data
of ApiStrategyKind.Priority:
vc.firstSuccessSequential(
RestPlainResponse,
OneThirdDuration,
ViableNodeStatus,
{BeaconNodeRole.AttestationData},
produceAttestationDataPlain(it, slot, committee_index)):
if apiResponse.isErr():
handleCommunicationError()
false
else:
let response = apiResponse.get()
case response.status
of 200:
let res = decodeBytes(ProduceAttestationDataResponse, response.data,
response.contentType)
if res.isOk(): return res.get().data
handleUnexpectedData()
false
of 400:
handle400()
false
of 500:
handle500()
false
of 503:
handle503()
false
else:
handleUnexpectedCode()
false
raise (ref ValidatorApiError)(
msg: "Failed to produce attestation data", data: failures)
New validator client using REST API. (#2651) * Initial commit. * Exporting getConfig(). * Add beacon node checking procedures. * Post rebase fixes. * Use runSlotLoop() from nimbus_beacon_node. Fallback implementation. Fixes for ETH2 REST serialization. * Add beacon_clock.durationToNextSlot(). Move type declarations from beacon_rest_api to json_rest_serialization. Fix seq[ValidatorIndex] serialization. Refactor ValidatorPool and add some utility procedures. Create separate version of validator_client. * Post-rebase fixes. Remove CookedPubKey from validator_pool.nim. * Now we should be able to produce attestations and aggregate and proofs. But its not working yet. * Debugging attestation sending. * Add durationToNextAttestation. Optimize some debug logs. Fix aggregation_bits encoding. Bump chronos/presto. * Its alive. * Fixes for launch_local_testnet script. Bump chronos. * Switch client API to not use `/api` prefix. * Post-rebase adjustments. * Fix endpoint for publishBlock(). * Add CONFIG_NAME. Add more checks to ensure that beacon_node is compatible. * Add beacon committee subscription support to validator_client. * Fix stacktrace should be an array of strings. Fix committee subscriptions should not be `data` keyed. * Log duration to next block proposal. * Fix beacon_node_status import. * Use jsonMsgResponse() instead of jsonError(). * Fix graffityBytes usage. Remove unnecessary `await`. Adjust creation of SignedBlock instance. Remove legacy files. * Rework durationToNextSlot() and durationToNextEpoch() to use `fromNow`. * Fix race condition for block proposal and attestations for same slot. Fix local_testnet script to properly kill tasks on Windows. Bump chronos and nim-http-tools, to allow connections to infura.io (basic auth). * Catch services errors. Improve performance of local_testnet.sh script on Windows. Fix race condition when attestation producing. * Post-rebase fixes. * Bump chronos and presto. * Calculate block publishing delay. Fix pkill in one more place. * Add error handling and timeouts to firstSuccess() template. Add onceToAll() template. Add checkNodes() procedure. Refactor firstSuccess() template. Add error checking to api.nim calls. * Deprecated usage onceToAll() for better stability. Address comment and send attestations asap. * Avoid unnecessary loop when calculating minimal duration.
2021-07-13 11:15:07 +00:00
proc submitPoolAttestationsV2*(
vc: ValidatorClientRef,
data: seq[ForkyAttestation],
strategy: ApiStrategyKind
): Future[bool] {.async: (raises: [CancelledError, ValidatorApiError]).} =
const
RequestName = "submitPoolAttestationsV2"
var failures: seq[ApiNodeFailure]
case strategy
of ApiStrategyKind.First, ApiStrategyKind.Best:
let res = vc.firstSuccessParallel(RestPlainResponse,
bool,
SlotDuration,
ViableNodeStatus,
{BeaconNodeRole.AttestationPublish},
submitPoolAttestationsV2(it, data)):
if apiResponse.isErr():
handleCommunicationError()
ApiResponse[bool].err(apiResponse.error)
else:
let response = apiResponse.get()
case response.status
of 200:
ApiResponse[bool].ok(true)
of 400:
handle400Indexed()
ApiResponse[bool].err(ResponseInvalidError)
of 500:
handle500()
ApiResponse[bool].err(ResponseInternalError)
else:
handleUnexpectedCode()
ApiResponse[bool].err(ResponseUnexpectedError)
if res.isErr():
raise (ref ValidatorApiError)(msg: res.error, data: failures)
return res.get()
of ApiStrategyKind.Priority:
vc.firstSuccessSequential(RestPlainResponse,
SlotDuration,
ViableNodeStatus,
{BeaconNodeRole.AttestationPublish},
submitPoolAttestationsV2(it, data)):
if apiResponse.isErr():
handleCommunicationError()
false
else:
let response = apiResponse.get()
case response.status
of 200:
return true
of 400:
handle400Indexed()
false
of 500:
handle500()
false
else:
handleUnexpectedCode()
false
raise (ref ValidatorApiError)(
msg: "Failed to submit attestations", data: failures)
proc submitPoolAttestations*(
vc: ValidatorClientRef,
data: seq[phase0.Attestation],
strategy: ApiStrategyKind
): Future[bool] {.async: (raises: [CancelledError, ValidatorApiError]).} =
const
RequestName = "submitPoolAttestations"
var failures: seq[ApiNodeFailure]
case strategy
of ApiStrategyKind.First, ApiStrategyKind.Best:
let res = vc.firstSuccessParallel(RestPlainResponse,
bool,
SlotDuration,
ViableNodeStatus,
{BeaconNodeRole.AttestationPublish},
submitPoolAttestations(it, data)):
if apiResponse.isErr():
handleCommunicationError()
ApiResponse[bool].err(apiResponse.error)
New validator client using REST API. (#2651) * Initial commit. * Exporting getConfig(). * Add beacon node checking procedures. * Post rebase fixes. * Use runSlotLoop() from nimbus_beacon_node. Fallback implementation. Fixes for ETH2 REST serialization. * Add beacon_clock.durationToNextSlot(). Move type declarations from beacon_rest_api to json_rest_serialization. Fix seq[ValidatorIndex] serialization. Refactor ValidatorPool and add some utility procedures. Create separate version of validator_client. * Post-rebase fixes. Remove CookedPubKey from validator_pool.nim. * Now we should be able to produce attestations and aggregate and proofs. But its not working yet. * Debugging attestation sending. * Add durationToNextAttestation. Optimize some debug logs. Fix aggregation_bits encoding. Bump chronos/presto. * Its alive. * Fixes for launch_local_testnet script. Bump chronos. * Switch client API to not use `/api` prefix. * Post-rebase adjustments. * Fix endpoint for publishBlock(). * Add CONFIG_NAME. Add more checks to ensure that beacon_node is compatible. * Add beacon committee subscription support to validator_client. * Fix stacktrace should be an array of strings. Fix committee subscriptions should not be `data` keyed. * Log duration to next block proposal. * Fix beacon_node_status import. * Use jsonMsgResponse() instead of jsonError(). * Fix graffityBytes usage. Remove unnecessary `await`. Adjust creation of SignedBlock instance. Remove legacy files. * Rework durationToNextSlot() and durationToNextEpoch() to use `fromNow`. * Fix race condition for block proposal and attestations for same slot. Fix local_testnet script to properly kill tasks on Windows. Bump chronos and nim-http-tools, to allow connections to infura.io (basic auth). * Catch services errors. Improve performance of local_testnet.sh script on Windows. Fix race condition when attestation producing. * Post-rebase fixes. * Bump chronos and presto. * Calculate block publishing delay. Fix pkill in one more place. * Add error handling and timeouts to firstSuccess() template. Add onceToAll() template. Add checkNodes() procedure. Refactor firstSuccess() template. Add error checking to api.nim calls. * Deprecated usage onceToAll() for better stability. Address comment and send attestations asap. * Avoid unnecessary loop when calculating minimal duration.
2021-07-13 11:15:07 +00:00
else:
let response = apiResponse.get()
case response.status
of 200:
ApiResponse[bool].ok(true)
of 400:
handle400Indexed()
ApiResponse[bool].err(ResponseInvalidError)
of 500:
handle500()
ApiResponse[bool].err(ResponseInternalError)
else:
handleUnexpectedCode()
ApiResponse[bool].err(ResponseUnexpectedError)
if res.isErr():
raise (ref ValidatorApiError)(msg: res.error, data: failures)
return res.get()
of ApiStrategyKind.Priority:
vc.firstSuccessSequential(RestPlainResponse,
SlotDuration,
ViableNodeStatus,
{BeaconNodeRole.AttestationPublish},
submitPoolAttestations(it, data)):
if apiResponse.isErr():
handleCommunicationError()
false
else:
let response = apiResponse.get()
case response.status
of 200:
return true
of 400:
handle400Indexed()
false
of 500:
handle500()
false
else:
handleUnexpectedCode()
false
New validator client using REST API. (#2651) * Initial commit. * Exporting getConfig(). * Add beacon node checking procedures. * Post rebase fixes. * Use runSlotLoop() from nimbus_beacon_node. Fallback implementation. Fixes for ETH2 REST serialization. * Add beacon_clock.durationToNextSlot(). Move type declarations from beacon_rest_api to json_rest_serialization. Fix seq[ValidatorIndex] serialization. Refactor ValidatorPool and add some utility procedures. Create separate version of validator_client. * Post-rebase fixes. Remove CookedPubKey from validator_pool.nim. * Now we should be able to produce attestations and aggregate and proofs. But its not working yet. * Debugging attestation sending. * Add durationToNextAttestation. Optimize some debug logs. Fix aggregation_bits encoding. Bump chronos/presto. * Its alive. * Fixes for launch_local_testnet script. Bump chronos. * Switch client API to not use `/api` prefix. * Post-rebase adjustments. * Fix endpoint for publishBlock(). * Add CONFIG_NAME. Add more checks to ensure that beacon_node is compatible. * Add beacon committee subscription support to validator_client. * Fix stacktrace should be an array of strings. Fix committee subscriptions should not be `data` keyed. * Log duration to next block proposal. * Fix beacon_node_status import. * Use jsonMsgResponse() instead of jsonError(). * Fix graffityBytes usage. Remove unnecessary `await`. Adjust creation of SignedBlock instance. Remove legacy files. * Rework durationToNextSlot() and durationToNextEpoch() to use `fromNow`. * Fix race condition for block proposal and attestations for same slot. Fix local_testnet script to properly kill tasks on Windows. Bump chronos and nim-http-tools, to allow connections to infura.io (basic auth). * Catch services errors. Improve performance of local_testnet.sh script on Windows. Fix race condition when attestation producing. * Post-rebase fixes. * Bump chronos and presto. * Calculate block publishing delay. Fix pkill in one more place. * Add error handling and timeouts to firstSuccess() template. Add onceToAll() template. Add checkNodes() procedure. Refactor firstSuccess() template. Add error checking to api.nim calls. * Deprecated usage onceToAll() for better stability. Address comment and send attestations asap. * Avoid unnecessary loop when calculating minimal duration.
2021-07-13 11:15:07 +00:00
raise (ref ValidatorApiError)(
msg: "Failed to submit attestations", data: failures)
New validator client using REST API. (#2651) * Initial commit. * Exporting getConfig(). * Add beacon node checking procedures. * Post rebase fixes. * Use runSlotLoop() from nimbus_beacon_node. Fallback implementation. Fixes for ETH2 REST serialization. * Add beacon_clock.durationToNextSlot(). Move type declarations from beacon_rest_api to json_rest_serialization. Fix seq[ValidatorIndex] serialization. Refactor ValidatorPool and add some utility procedures. Create separate version of validator_client. * Post-rebase fixes. Remove CookedPubKey from validator_pool.nim. * Now we should be able to produce attestations and aggregate and proofs. But its not working yet. * Debugging attestation sending. * Add durationToNextAttestation. Optimize some debug logs. Fix aggregation_bits encoding. Bump chronos/presto. * Its alive. * Fixes for launch_local_testnet script. Bump chronos. * Switch client API to not use `/api` prefix. * Post-rebase adjustments. * Fix endpoint for publishBlock(). * Add CONFIG_NAME. Add more checks to ensure that beacon_node is compatible. * Add beacon committee subscription support to validator_client. * Fix stacktrace should be an array of strings. Fix committee subscriptions should not be `data` keyed. * Log duration to next block proposal. * Fix beacon_node_status import. * Use jsonMsgResponse() instead of jsonError(). * Fix graffityBytes usage. Remove unnecessary `await`. Adjust creation of SignedBlock instance. Remove legacy files. * Rework durationToNextSlot() and durationToNextEpoch() to use `fromNow`. * Fix race condition for block proposal and attestations for same slot. Fix local_testnet script to properly kill tasks on Windows. Bump chronos and nim-http-tools, to allow connections to infura.io (basic auth). * Catch services errors. Improve performance of local_testnet.sh script on Windows. Fix race condition when attestation producing. * Post-rebase fixes. * Bump chronos and presto. * Calculate block publishing delay. Fix pkill in one more place. * Add error handling and timeouts to firstSuccess() template. Add onceToAll() template. Add checkNodes() procedure. Refactor firstSuccess() template. Add error checking to api.nim calls. * Deprecated usage onceToAll() for better stability. Address comment and send attestations asap. * Avoid unnecessary loop when calculating minimal duration.
2021-07-13 11:15:07 +00:00
proc submitPoolSyncCommitteeSignature*(
vc: ValidatorClientRef,
data: SyncCommitteeMessage,
strategy: ApiStrategyKind
): Future[bool] {.async: (raises: [CancelledError, ValidatorApiError]).} =
const
RequestName = "submitPoolSyncCommitteeSignatures"
let restData = RestSyncCommitteeMessage.init(
data.slot,
data.beacon_block_root,
data.validator_index,
data.signature
)
var failures: seq[ApiNodeFailure]
case strategy
of ApiStrategyKind.First, ApiStrategyKind.Best:
let res = vc.firstSuccessParallel(
RestPlainResponse,
bool,
SlotDuration,
ViableNodeStatus,
{BeaconNodeRole.SyncCommitteePublish},
submitPoolSyncCommitteeSignatures(it, @[restData])):
if apiResponse.isErr():
handleCommunicationError()
ApiResponse[bool].err(apiResponse.error)
else:
let response = apiResponse.get()
case response.status
of 200:
ApiResponse[bool].ok(true)
of 400:
handle400Indexed()
ApiResponse[bool].err(ResponseInvalidError)
of 500:
handle500()
ApiResponse[bool].err(ResponseInternalError)
else:
handleUnexpectedCode()
ApiResponse[bool].err(ResponseUnexpectedError)
if res.isErr():
raise (ref ValidatorApiError)(msg: res.error, data: failures)
return res.get()
of ApiStrategyKind.Priority:
vc.firstSuccessSequential(
RestPlainResponse,
SlotDuration,
ViableNodeStatus,
{BeaconNodeRole.SyncCommitteePublish},
submitPoolSyncCommitteeSignatures(it, @[restData])):
if apiResponse.isErr():
handleCommunicationError()
false
else:
let response = apiResponse.get()
case response.status
of 200:
return true
of 400:
handle400Indexed()
false
of 500:
handle500()
false
else:
handleUnexpectedCode()
false
raise (ref ValidatorApiError)(
msg: "Failed to submit sync committee message", data: failures)
proc getAggregatedAttestation*(
vc: ValidatorClientRef,
slot: Slot,
root: Eth2Digest,
strategy: ApiStrategyKind
): Future[phase0.Attestation] {.
async: (raises: [CancelledError, ValidatorApiError]).} =
const
RequestName = "getAggregatedAttestation"
var failures: seq[ApiNodeFailure]
case strategy
of ApiStrategyKind.First:
let res = vc.firstSuccessParallel(
RestPlainResponse,
GetAggregatedAttestationResponse,
OneThirdDuration,
ViableNodeStatus,
{BeaconNodeRole.AggregatedData},
getAggregatedAttestationPlain(it, root, slot)):
if apiResponse.isErr():
handleCommunicationError()
ApiResponse[GetAggregatedAttestationResponse].err(apiResponse.error)
New validator client using REST API. (#2651) * Initial commit. * Exporting getConfig(). * Add beacon node checking procedures. * Post rebase fixes. * Use runSlotLoop() from nimbus_beacon_node. Fallback implementation. Fixes for ETH2 REST serialization. * Add beacon_clock.durationToNextSlot(). Move type declarations from beacon_rest_api to json_rest_serialization. Fix seq[ValidatorIndex] serialization. Refactor ValidatorPool and add some utility procedures. Create separate version of validator_client. * Post-rebase fixes. Remove CookedPubKey from validator_pool.nim. * Now we should be able to produce attestations and aggregate and proofs. But its not working yet. * Debugging attestation sending. * Add durationToNextAttestation. Optimize some debug logs. Fix aggregation_bits encoding. Bump chronos/presto. * Its alive. * Fixes for launch_local_testnet script. Bump chronos. * Switch client API to not use `/api` prefix. * Post-rebase adjustments. * Fix endpoint for publishBlock(). * Add CONFIG_NAME. Add more checks to ensure that beacon_node is compatible. * Add beacon committee subscription support to validator_client. * Fix stacktrace should be an array of strings. Fix committee subscriptions should not be `data` keyed. * Log duration to next block proposal. * Fix beacon_node_status import. * Use jsonMsgResponse() instead of jsonError(). * Fix graffityBytes usage. Remove unnecessary `await`. Adjust creation of SignedBlock instance. Remove legacy files. * Rework durationToNextSlot() and durationToNextEpoch() to use `fromNow`. * Fix race condition for block proposal and attestations for same slot. Fix local_testnet script to properly kill tasks on Windows. Bump chronos and nim-http-tools, to allow connections to infura.io (basic auth). * Catch services errors. Improve performance of local_testnet.sh script on Windows. Fix race condition when attestation producing. * Post-rebase fixes. * Bump chronos and presto. * Calculate block publishing delay. Fix pkill in one more place. * Add error handling and timeouts to firstSuccess() template. Add onceToAll() template. Add checkNodes() procedure. Refactor firstSuccess() template. Add error checking to api.nim calls. * Deprecated usage onceToAll() for better stability. Address comment and send attestations asap. * Avoid unnecessary loop when calculating minimal duration.
2021-07-13 11:15:07 +00:00
else:
let response = apiResponse.get()
case response.status:
of 200:
let res = decodeBytes(GetAggregatedAttestationResponse, response.data,
response.contentType)
if res.isErr():
handleUnexpectedData()
ApiResponse[GetAggregatedAttestationResponse].err($res.error)
else:
ApiResponse[GetAggregatedAttestationResponse].ok(res.get())
of 400:
handle400()
ApiResponse[GetAggregatedAttestationResponse].err(
ResponseInvalidError)
of 404:
handle404()
ApiResponse[GetAggregatedAttestationResponse].err(
ResponseNotFoundError)
of 500:
handle500()
ApiResponse[GetAggregatedAttestationResponse].err(
ResponseInternalError)
else:
handleUnexpectedCode()
ApiResponse[GetAggregatedAttestationResponse].err(
ResponseUnexpectedError)
if res.isErr():
raise (ref ValidatorApiError)(msg: res.error, data: failures)
res.get().data
of ApiStrategyKind.Best:
let res = vc.bestSuccess(
RestPlainResponse,
GetAggregatedAttestationResponse,
float64,
OneThirdDuration,
ViableNodeStatus,
{BeaconNodeRole.AggregatedData},
getAggregatedAttestationPlain(it, root, slot),
getAggregatedAttestationDataScore(itresponse)):
if apiResponse.isErr():
handleCommunicationError()
ApiResponse[GetAggregatedAttestationResponse].err(apiResponse.error)
else:
let response = apiResponse.get()
case response.status:
of 200:
let res = decodeBytes(GetAggregatedAttestationResponse, response.data,
response.contentType)
if res.isErr():
handleUnexpectedData()
ApiResponse[GetAggregatedAttestationResponse].err($res.error)
else:
ApiResponse[GetAggregatedAttestationResponse].ok(res.get())
of 400:
handle400()
ApiResponse[GetAggregatedAttestationResponse].err(
ResponseInvalidError)
of 404:
# A 404 error must be returned if no attestation is available for the
# requested `attestation_data_root`. To address the issue #6184, we
# use empty GetAggregatedAttestationResponse.
ApiResponse[GetAggregatedAttestationResponse].ok(
GetAggregatedAttestationResponse(
data: LowestScoreAggregatedAttestation))
of 500:
handle500()
ApiResponse[GetAggregatedAttestationResponse].err(
ResponseInternalError)
else:
handleUnexpectedCode()
ApiResponse[GetAggregatedAttestationResponse].err(
ResponseUnexpectedError)
if res.isErr():
raise (ref ValidatorApiError)(msg: res.error, data: failures)
res.get().data
of ApiStrategyKind.Priority:
vc.firstSuccessSequential(
RestPlainResponse,
OneThirdDuration,
ViableNodeStatus,
{BeaconNodeRole.AggregatedData},
getAggregatedAttestationPlain(it, root, slot)):
if apiResponse.isErr():
handleCommunicationError()
false
else:
let response = apiResponse.get()
case response.status:
of 200:
let res = decodeBytes(GetAggregatedAttestationResponse, response.data,
response.contentType)
if res.isOk(): return res.get().data
handleUnexpectedData()
false
of 400:
handle400()
false
of 404:
handle404()
false
of 500:
handle500()
false
else:
handleUnexpectedCode()
false
raise (ref ValidatorApiError)(
msg: "Failed to get aggregated attestation", data: failures)
proc getAggregatedAttestationV2*(
vc: ValidatorClientRef,
slot: Slot,
root: Eth2Digest,
committee_index: CommitteeIndex,
strategy: ApiStrategyKind
): Future[ForkedAttestation] {.
async: (raises: [CancelledError, ValidatorApiError]).} =
const
RequestName = "getAggregatedAttestationV2"
var failures: seq[ApiNodeFailure]
case strategy
of ApiStrategyKind.First:
let res = vc.firstSuccessParallel(
RestPlainResponse,
GetAggregatedAttestationV2Response,
OneThirdDuration,
ViableNodeStatus,
{BeaconNodeRole.AggregatedData},
getAggregatedAttestationPlainV2(it, root, slot, committee_index)):
if apiResponse.isErr():
handleCommunicationError()
ApiResponse[GetAggregatedAttestationV2Response].err(apiResponse.error)
else:
let response = apiResponse.get()
case response.status:
of 200:
let res = decodeBytes(GetAggregatedAttestationV2Response,
response.data, response.contentType)
if res.isErr():
handleUnexpectedData()
ApiResponse[GetAggregatedAttestationV2Response].err($res.error)
else:
ApiResponse[GetAggregatedAttestationV2Response].ok(res.get())
of 400:
handle400()
ApiResponse[GetAggregatedAttestationV2Response].err(
ResponseInvalidError)
of 404:
handle404()
ApiResponse[GetAggregatedAttestationV2Response].err(
ResponseNotFoundError)
of 500:
handle500()
ApiResponse[GetAggregatedAttestationV2Response].err(
ResponseInternalError)
else:
handleUnexpectedCode()
ApiResponse[GetAggregatedAttestationV2Response].err(
ResponseUnexpectedError)
if res.isErr():
raise (ref ValidatorApiError)(msg: res.error, data: failures)
res.get()
of ApiStrategyKind.Best:
let res = vc.bestSuccess(
RestPlainResponse,
GetAggregatedAttestationV2Response,
float64,
OneThirdDuration,
ViableNodeStatus,
{BeaconNodeRole.AggregatedData},
getAggregatedAttestationPlainV2(it, root, slot, committee_index),
getAggregatedAttestationDataScore(itresponse)):
if apiResponse.isErr():
handleCommunicationError()
ApiResponse[GetAggregatedAttestationV2Response].err(apiResponse.error)
else:
let response = apiResponse.get()
case response.status:
of 200:
let res = decodeBytes(GetAggregatedAttestationV2Response,
response.data, response.contentType)
if res.isErr():
handleUnexpectedData()
ApiResponse[GetAggregatedAttestationV2Response].err($res.error)
else:
ApiResponse[GetAggregatedAttestationV2Response].ok(res.get())
of 400:
handle400()
ApiResponse[GetAggregatedAttestationV2Response].err(
ResponseInvalidError)
of 404:
# A 404 error must be returned if no attestation is available for the
# requested `attestation_data_root`.
ApiResponse[GetAggregatedAttestationV2Response].ok(
ForkedAttestation.init(
LowestScoreAggregatedElectraAttestation, ConsensusFork.Electra))
of 500:
handle500()
ApiResponse[GetAggregatedAttestationV2Response].err(
ResponseInternalError)
else:
handleUnexpectedCode()
ApiResponse[GetAggregatedAttestationV2Response].err(
ResponseUnexpectedError)
if res.isErr():
raise (ref ValidatorApiError)(msg: res.error, data: failures)
res.get()
of ApiStrategyKind.Priority:
vc.firstSuccessSequential(
RestPlainResponse,
OneThirdDuration,
ViableNodeStatus,
{BeaconNodeRole.AggregatedData},
getAggregatedAttestationPlainV2(it, root, slot, committee_index)):
if apiResponse.isErr():
handleCommunicationError()
false
else:
let response = apiResponse.get()
case response.status:
of 200:
let res = decodeBytes(GetAggregatedAttestationV2Response,
response.data, response.contentType)
if res.isOk(): return res.get()
handleUnexpectedData()
false
of 400:
handle400()
false
of 404:
handle404()
false
of 500:
handle500()
false
else:
handleUnexpectedCode()
false
New validator client using REST API. (#2651) * Initial commit. * Exporting getConfig(). * Add beacon node checking procedures. * Post rebase fixes. * Use runSlotLoop() from nimbus_beacon_node. Fallback implementation. Fixes for ETH2 REST serialization. * Add beacon_clock.durationToNextSlot(). Move type declarations from beacon_rest_api to json_rest_serialization. Fix seq[ValidatorIndex] serialization. Refactor ValidatorPool and add some utility procedures. Create separate version of validator_client. * Post-rebase fixes. Remove CookedPubKey from validator_pool.nim. * Now we should be able to produce attestations and aggregate and proofs. But its not working yet. * Debugging attestation sending. * Add durationToNextAttestation. Optimize some debug logs. Fix aggregation_bits encoding. Bump chronos/presto. * Its alive. * Fixes for launch_local_testnet script. Bump chronos. * Switch client API to not use `/api` prefix. * Post-rebase adjustments. * Fix endpoint for publishBlock(). * Add CONFIG_NAME. Add more checks to ensure that beacon_node is compatible. * Add beacon committee subscription support to validator_client. * Fix stacktrace should be an array of strings. Fix committee subscriptions should not be `data` keyed. * Log duration to next block proposal. * Fix beacon_node_status import. * Use jsonMsgResponse() instead of jsonError(). * Fix graffityBytes usage. Remove unnecessary `await`. Adjust creation of SignedBlock instance. Remove legacy files. * Rework durationToNextSlot() and durationToNextEpoch() to use `fromNow`. * Fix race condition for block proposal and attestations for same slot. Fix local_testnet script to properly kill tasks on Windows. Bump chronos and nim-http-tools, to allow connections to infura.io (basic auth). * Catch services errors. Improve performance of local_testnet.sh script on Windows. Fix race condition when attestation producing. * Post-rebase fixes. * Bump chronos and presto. * Calculate block publishing delay. Fix pkill in one more place. * Add error handling and timeouts to firstSuccess() template. Add onceToAll() template. Add checkNodes() procedure. Refactor firstSuccess() template. Add error checking to api.nim calls. * Deprecated usage onceToAll() for better stability. Address comment and send attestations asap. * Avoid unnecessary loop when calculating minimal duration.
2021-07-13 11:15:07 +00:00
raise (ref ValidatorApiError)(
msg: "Failed to get aggregated attestation", data: failures)
New validator client using REST API. (#2651) * Initial commit. * Exporting getConfig(). * Add beacon node checking procedures. * Post rebase fixes. * Use runSlotLoop() from nimbus_beacon_node. Fallback implementation. Fixes for ETH2 REST serialization. * Add beacon_clock.durationToNextSlot(). Move type declarations from beacon_rest_api to json_rest_serialization. Fix seq[ValidatorIndex] serialization. Refactor ValidatorPool and add some utility procedures. Create separate version of validator_client. * Post-rebase fixes. Remove CookedPubKey from validator_pool.nim. * Now we should be able to produce attestations and aggregate and proofs. But its not working yet. * Debugging attestation sending. * Add durationToNextAttestation. Optimize some debug logs. Fix aggregation_bits encoding. Bump chronos/presto. * Its alive. * Fixes for launch_local_testnet script. Bump chronos. * Switch client API to not use `/api` prefix. * Post-rebase adjustments. * Fix endpoint for publishBlock(). * Add CONFIG_NAME. Add more checks to ensure that beacon_node is compatible. * Add beacon committee subscription support to validator_client. * Fix stacktrace should be an array of strings. Fix committee subscriptions should not be `data` keyed. * Log duration to next block proposal. * Fix beacon_node_status import. * Use jsonMsgResponse() instead of jsonError(). * Fix graffityBytes usage. Remove unnecessary `await`. Adjust creation of SignedBlock instance. Remove legacy files. * Rework durationToNextSlot() and durationToNextEpoch() to use `fromNow`. * Fix race condition for block proposal and attestations for same slot. Fix local_testnet script to properly kill tasks on Windows. Bump chronos and nim-http-tools, to allow connections to infura.io (basic auth). * Catch services errors. Improve performance of local_testnet.sh script on Windows. Fix race condition when attestation producing. * Post-rebase fixes. * Bump chronos and presto. * Calculate block publishing delay. Fix pkill in one more place. * Add error handling and timeouts to firstSuccess() template. Add onceToAll() template. Add checkNodes() procedure. Refactor firstSuccess() template. Add error checking to api.nim calls. * Deprecated usage onceToAll() for better stability. Address comment and send attestations asap. * Avoid unnecessary loop when calculating minimal duration.
2021-07-13 11:15:07 +00:00
proc produceSyncCommitteeContribution*(
vc: ValidatorClientRef,
slot: Slot,
subcommitteeIndex: SyncSubcommitteeIndex,
root: Eth2Digest,
strategy: ApiStrategyKind
): Future[SyncCommitteeContribution] {.
async: (raises: [CancelledError, ValidatorApiError]).} =
const
RequestName = "produceSyncCommitteeContribution"
var failures: seq[ApiNodeFailure]
case strategy
of ApiStrategyKind.First:
let res = vc.firstSuccessParallel(
RestPlainResponse,
ProduceSyncCommitteeContributionResponse,
OneThirdDuration,
ViableNodeStatus,
{BeaconNodeRole.SyncCommitteeData},
produceSyncCommitteeContributionPlain(it, slot, subcommitteeIndex, root)):
if apiResponse.isErr():
handleCommunicationError()
ApiResponse[ProduceSyncCommitteeContributionResponse].err(
apiResponse.error)
else:
let response = apiResponse.get()
case response.status:
of 200:
let res = decodeBytes(ProduceSyncCommitteeContributionResponse,
response.data, response.contentType)
if res.isErr():
handleUnexpectedData()
ApiResponse[ProduceSyncCommitteeContributionResponse].err(
$res.error)
else:
ApiResponse[ProduceSyncCommitteeContributionResponse].ok(res.get())
of 400:
handle400()
ApiResponse[ProduceSyncCommitteeContributionResponse].err(
ResponseInvalidError)
of 500:
handle500()
ApiResponse[ProduceSyncCommitteeContributionResponse].err(
ResponseInternalError)
else:
handleUnexpectedCode()
ApiResponse[ProduceSyncCommitteeContributionResponse].err(
ResponseUnexpectedError)
if res.isErr():
raise (ref ValidatorApiError)(msg: res.error, data: failures)
return res.get().data
of ApiStrategyKind.Best:
let res = vc.bestSuccess(
RestPlainResponse,
ProduceSyncCommitteeContributionResponse,
float64,
OneThirdDuration,
ViableNodeStatus,
{BeaconNodeRole.SyncCommitteeData},
produceSyncCommitteeContributionPlain(it, slot, subcommitteeIndex, root),
getSyncCommitteeContributionDataScore(itresponse)):
if apiResponse.isErr():
handleCommunicationError()
ApiResponse[ProduceSyncCommitteeContributionResponse].err(
apiResponse.error)
else:
let response = apiResponse.get()
case response.status:
of 200:
let res = decodeBytes(ProduceSyncCommitteeContributionResponse,
response.data, response.contentType)
if res.isErr():
handleUnexpectedData()
ApiResponse[ProduceSyncCommitteeContributionResponse].err(
$res.error)
else:
ApiResponse[ProduceSyncCommitteeContributionResponse].ok(res.get())
of 400:
handle400()
ApiResponse[ProduceSyncCommitteeContributionResponse].err(
ResponseInvalidError)
of 500:
handle500()
ApiResponse[ProduceSyncCommitteeContributionResponse].err(
ResponseInternalError)
else:
handleUnexpectedCode()
ApiResponse[ProduceSyncCommitteeContributionResponse].err(
ResponseUnexpectedError)
if res.isErr():
raise (ref ValidatorApiError)(msg: res.error, data: failures)
return res.get().data
of ApiStrategyKind.Priority:
vc.firstSuccessSequential(
RestPlainResponse,
OneThirdDuration,
ViableNodeStatus,
{BeaconNodeRole.SyncCommitteeData},
produceSyncCommitteeContributionPlain(it, slot, subcommitteeIndex, root)):
if apiResponse.isErr():
handleCommunicationError()
false
else:
let response = apiResponse.get()
case response.status:
of 200:
let res = decodeBytes(ProduceSyncCommitteeContributionResponse,
response.data, response.contentType)
if res.isOk(): return res.get().data
handleUnexpectedData()
false
of 400:
handle400()
false
of 500:
handle500()
false
else:
handleUnexpectedCode()
false
raise (ref ValidatorApiError)(
msg: "Failed to produce sync committee contribution", data: failures)
proc publishAggregateAndProofsV2*(
vc: ValidatorClientRef,
data: seq[ForkySignedAggregateAndProof],
strategy: ApiStrategyKind
): Future[bool] {.async: (raises: [CancelledError, ValidatorApiError]).} =
const
RequestName = "publishAggregateAndProofsV2"
var failures: seq[ApiNodeFailure]
case strategy
of ApiStrategyKind.First, ApiStrategyKind.Best:
let res = vc.firstSuccessParallel(RestPlainResponse,
bool,
SlotDuration,
ViableNodeStatus,
{BeaconNodeRole.AggregatedPublish},
publishAggregateAndProofsV2(it, data)):
if apiResponse.isErr():
handleCommunicationError()
ApiResponse[bool].err(apiResponse.error)
else:
let response = apiResponse.get()
case response.status:
of 200:
ApiResponse[bool].ok(true)
of 400:
handle400()
ApiResponse[bool].err(ResponseInvalidError)
of 404:
handle404()
ApiResponse[bool].err(ResponseNotFoundError)
of 500:
handle500()
ApiResponse[bool].err(ResponseInternalError)
else:
handleUnexpectedCode()
ApiResponse[bool].err(ResponseUnexpectedError)
if res.isErr():
raise (ref ValidatorApiError)(msg: res.error, data: failures)
res.get()
of ApiStrategyKind.Priority:
vc.firstSuccessSequential(RestPlainResponse,
SlotDuration,
ViableNodeStatus,
{BeaconNodeRole.AggregatedPublish},
publishAggregateAndProofsV2(it, data)):
if apiResponse.isErr():
handleCommunicationError()
false
else:
let response = apiResponse.get()
case response.status:
of 200:
return true
of 400:
handle400()
false
of 404:
false
of 500:
handle500()
false
else:
handleUnexpectedCode()
false
raise (ref ValidatorApiError)(
msg: "Failed to publish aggregated attestation", data: failures)
proc publishAggregateAndProofs*(
vc: ValidatorClientRef,
data: seq[phase0.SignedAggregateAndProof],
strategy: ApiStrategyKind
): Future[bool] {.async: (raises: [CancelledError, ValidatorApiError]).} =
const
RequestName = "publishAggregateAndProofs"
var failures: seq[ApiNodeFailure]
case strategy
of ApiStrategyKind.First, ApiStrategyKind.Best:
let res = vc.firstSuccessParallel(RestPlainResponse,
bool,
SlotDuration,
ViableNodeStatus,
{BeaconNodeRole.AggregatedPublish},
publishAggregateAndProofs(it, data)):
if apiResponse.isErr():
handleCommunicationError()
ApiResponse[bool].err(apiResponse.error)
else:
let response = apiResponse.get()
case response.status:
of 200:
ApiResponse[bool].ok(true)
of 400:
handle400()
ApiResponse[bool].err(ResponseInvalidError)
of 500:
handle500()
ApiResponse[bool].err(ResponseInternalError)
else:
handleUnexpectedCode()
ApiResponse[bool].err(ResponseUnexpectedError)
if res.isErr():
raise (ref ValidatorApiError)(msg: res.error, data: failures)
return res.get()
of ApiStrategyKind.Priority:
vc.firstSuccessSequential(RestPlainResponse,
SlotDuration,
ViableNodeStatus,
{BeaconNodeRole.AggregatedPublish},
publishAggregateAndProofs(it, data)):
if apiResponse.isErr():
handleCommunicationError()
false
else:
let response = apiResponse.get()
case response.status:
of 200:
return true
of 400:
handle400()
false
of 500:
handle500()
false
else:
handleUnexpectedCode()
false
raise (ref ValidatorApiError)(
msg: "Failed to publish aggregated attestation", data: failures)
New validator client using REST API. (#2651) * Initial commit. * Exporting getConfig(). * Add beacon node checking procedures. * Post rebase fixes. * Use runSlotLoop() from nimbus_beacon_node. Fallback implementation. Fixes for ETH2 REST serialization. * Add beacon_clock.durationToNextSlot(). Move type declarations from beacon_rest_api to json_rest_serialization. Fix seq[ValidatorIndex] serialization. Refactor ValidatorPool and add some utility procedures. Create separate version of validator_client. * Post-rebase fixes. Remove CookedPubKey from validator_pool.nim. * Now we should be able to produce attestations and aggregate and proofs. But its not working yet. * Debugging attestation sending. * Add durationToNextAttestation. Optimize some debug logs. Fix aggregation_bits encoding. Bump chronos/presto. * Its alive. * Fixes for launch_local_testnet script. Bump chronos. * Switch client API to not use `/api` prefix. * Post-rebase adjustments. * Fix endpoint for publishBlock(). * Add CONFIG_NAME. Add more checks to ensure that beacon_node is compatible. * Add beacon committee subscription support to validator_client. * Fix stacktrace should be an array of strings. Fix committee subscriptions should not be `data` keyed. * Log duration to next block proposal. * Fix beacon_node_status import. * Use jsonMsgResponse() instead of jsonError(). * Fix graffityBytes usage. Remove unnecessary `await`. Adjust creation of SignedBlock instance. Remove legacy files. * Rework durationToNextSlot() and durationToNextEpoch() to use `fromNow`. * Fix race condition for block proposal and attestations for same slot. Fix local_testnet script to properly kill tasks on Windows. Bump chronos and nim-http-tools, to allow connections to infura.io (basic auth). * Catch services errors. Improve performance of local_testnet.sh script on Windows. Fix race condition when attestation producing. * Post-rebase fixes. * Bump chronos and presto. * Calculate block publishing delay. Fix pkill in one more place. * Add error handling and timeouts to firstSuccess() template. Add onceToAll() template. Add checkNodes() procedure. Refactor firstSuccess() template. Add error checking to api.nim calls. * Deprecated usage onceToAll() for better stability. Address comment and send attestations asap. * Avoid unnecessary loop when calculating minimal duration.
2021-07-13 11:15:07 +00:00
proc publishContributionAndProofs*(
vc: ValidatorClientRef,
data: seq[RestSignedContributionAndProof],
strategy: ApiStrategyKind
): Future[bool] {.
async: (raises: [CancelledError, ValidatorApiError]).} =
const
RequestName = "publishContributionAndProofs"
var failures: seq[ApiNodeFailure]
case strategy
of ApiStrategyKind.First, ApiStrategyKind.Best:
let res = vc.firstSuccessParallel(RestPlainResponse,
bool,
SlotDuration,
ViableNodeStatus,
{BeaconNodeRole.SyncCommitteePublish},
publishContributionAndProofs(it, data)):
if apiResponse.isErr():
handleCommunicationError()
ApiResponse[bool].err(apiResponse.error)
else:
let response = apiResponse.get()
case response.status:
of 200:
ApiResponse[bool].ok(true)
of 400:
handle400()
ApiResponse[bool].err(ResponseInvalidError)
of 500:
handle500()
ApiResponse[bool].err(ResponseInternalError)
else:
handleUnexpectedCode()
ApiResponse[bool].err(ResponseUnexpectedError)
if res.isErr():
raise (ref ValidatorApiError)(msg: res.error, data: failures)
return res.get()
of ApiStrategyKind.Priority:
vc.firstSuccessSequential(RestPlainResponse,
SlotDuration,
ViableNodeStatus,
{BeaconNodeRole.SyncCommitteePublish},
publishContributionAndProofs(it, data)):
if apiResponse.isErr():
handleCommunicationError()
false
else:
let response = apiResponse.get()
case response.status:
of 200:
return true
of 400:
handle400()
false
of 500:
handle500()
false
else:
handleUnexpectedCode()
false
raise (ref ValidatorApiError)(
msg: "Failed to publish sync committee contribution", data: failures)
proc produceBlockV3*(
vc: ValidatorClientRef,
slot: Slot,
randao_reveal: ValidatorSig,
graffiti: GraffitiBytes,
builder_boost_factor: uint64,
strategy: ApiStrategyKind
): Future[ProduceBlockResponseV3] {.
async: (raises: [CancelledError, ValidatorApiError]).} =
const
RequestName = "produceBlockV3"
var failures: seq[ApiNodeFailure]
case strategy
of ApiStrategyKind.Best:
let res = vc.bestSuccess(
RestPlainResponse,
ProduceBlockResponseV3,
UInt256,
SlotDuration,
ViableNodeStatus,
{BeaconNodeRole.BlockProposalData},
produceBlockV3Plain(it, slot, randao_reveal, graffiti,
builder_boost_factor),
getProduceBlockResponseV3Score(itresponse)):
if apiResponse.isErr():
handleCommunicationError()
ApiResponse[ProduceBlockResponseV3].err(apiResponse.error)
else:
let response = apiResponse.get()
case response.status
of 200:
let
version = response.headers.getString("eth-consensus-version")
blinded =
response.headers.getString("eth-execution-payload-blinded")
executionValue =
response.headers.getString("eth-execution-payload-value")
consensusValue =
response.headers.getString("eth-consensus-block-value")
res = decodeBytes(ProduceBlockResponseV3, response.data,
response.contentType, version, blinded,
executionValue, consensusValue)
if res.isErr():
handleUnexpectedData()
ApiResponse[ProduceBlockResponseV3].err($res.error)
else:
ApiResponse[ProduceBlockResponseV3].ok(res.get())
of 400:
handle400()
ApiResponse[ProduceBlockResponseV3].err(ResponseInvalidError)
of 500:
handle500()
ApiResponse[ProduceBlockResponseV3].err(ResponseInternalError)
of 503:
handle503()
ApiResponse[ProduceBlockResponseV3].err(
ResponseNoSyncError)
else:
handleUnexpectedCode()
ApiResponse[ProduceBlockResponseV3].err(
ResponseUnexpectedError)
if res.isErr():
raise (ref ValidatorApiError)(msg: res.error, data: failures)
return res.get()
of ApiStrategyKind.First:
let res = vc.firstSuccessParallel(
RestPlainResponse,
ProduceBlockResponseV3,
SlotDuration,
ViableNodeStatus,
{BeaconNodeRole.BlockProposalData},
produceBlockV3Plain(it, slot, randao_reveal, graffiti,
builder_boost_factor)):
if apiResponse.isErr():
handleCommunicationError()
ApiResponse[ProduceBlockResponseV3].err(apiResponse.error)
else:
let response = apiResponse.get()
case response.status:
of 200:
let
version = response.headers.getString("eth-consensus-version")
blinded =
response.headers.getString("eth-execution-payload-blinded")
executionValue =
response.headers.getString("eth-execution-payload-value")
consensusValue =
response.headers.getString("eth-consensus-block-value")
res = decodeBytes(ProduceBlockResponseV3, response.data,
response.contentType, version, blinded,
executionValue, consensusValue)
if res.isErr():
handleUnexpectedData()
ApiResponse[ProduceBlockResponseV3].err($res.error)
else:
ApiResponse[ProduceBlockResponseV3].ok(res.get())
of 400:
handle400()
ApiResponse[ProduceBlockResponseV3].err(ResponseInvalidError)
of 500:
handle500()
ApiResponse[ProduceBlockResponseV3].err(ResponseInternalError)
of 503:
handle503()
ApiResponse[ProduceBlockResponseV3].err(ResponseNoSyncError)
else:
handleUnexpectedCode()
ApiResponse[ProduceBlockResponseV3].err(ResponseUnexpectedError)
if res.isErr():
raise (ref ValidatorApiError)(msg: res.error, data: failures)
res.get()
of ApiStrategyKind.Priority:
vc.firstSuccessSequential(
RestPlainResponse,
SlotDuration,
ViableNodeStatus,
{BeaconNodeRole.BlockProposalData},
produceBlockV3Plain(it, slot, randao_reveal, graffiti,
builder_boost_factor)):
if apiResponse.isErr():
handleCommunicationError()
false
else:
let response = apiResponse.get()
case response.status:
of 200:
let
version = response.headers.getString("eth-consensus-version")
blinded =
response.headers.getString("eth-execution-payload-blinded")
executionValue =
response.headers.getString("eth-execution-payload-value")
consensusValue =
response.headers.getString("eth-consensus-block-value")
res = decodeBytes(ProduceBlockResponseV3, response.data,
response.contentType, version, blinded,
executionValue, consensusValue)
if res.isOk(): return res.get()
handleUnexpectedData()
false
of 400:
handle400()
false
of 500:
handle500()
false
of 503:
handle503()
false
else:
handleUnexpectedCode()
false
raise (ref ValidatorApiError)(
msg: "Failed to produce block", data: failures)
proc publishBlockV2*(
vc: ValidatorClientRef,
data: RestPublishedSignedBlockContents,
broadcast_validation: BroadcastValidationType,
strategy: ApiStrategyKind
): Future[bool] {.async: (raises: [CancelledError, ValidatorApiError]).} =
const
RequestName = "publishBlockV2"
BlockBroadcasted = "Block not passed validation, but still published"
var failures: seq[ApiNodeFailure]
case strategy
of ApiStrategyKind.First, ApiStrategyKind.Best:
let res = block:
vc.firstSuccessParallel(RestPlainResponse,
bool,
SlotDuration,
ViableNodeStatus,
{BeaconNodeRole.BlockProposalPublish}):
case data.kind
of ConsensusFork.Phase0:
publishBlockV2(it, some(broadcast_validation), data.phase0Data)
of ConsensusFork.Altair:
publishBlockV2(it, some(broadcast_validation), data.altairData)
of ConsensusFork.Bellatrix:
publishBlockV2(it, some(broadcast_validation), data.bellatrixData)
of ConsensusFork.Capella:
publishBlockV2(it, some(broadcast_validation), data.capellaData)
of ConsensusFork.Deneb:
publishBlockV2(it, some(broadcast_validation), data.denebData)
of ConsensusFork.Electra:
publishBlockV2(it, some(broadcast_validation), data.electraData)
of ConsensusFork.Fulu:
publishBlockV2(it, some(broadcast_validation), data.fuluData)
do:
if apiResponse.isErr():
handleCommunicationError()
ApiResponse[bool].err(apiResponse.error)
else:
let response = apiResponse.get()
case response.status:
of 200:
ApiResponse[bool].ok(true)
of 202:
debug BlockBroadcasted, node = node,
blck = shortLog(ForkedSignedBeaconBlock.init(data))
ApiResponse[bool].ok(true)
of 400:
handle400()
ApiResponse[bool].err(ResponseInvalidError)
of 415:
handle415()
ApiResponse[bool].err(ResponseContentTypeError)
of 500:
handle500()
ApiResponse[bool].err(ResponseInternalError)
of 503:
handle503()
ApiResponse[bool].err(ResponseNoSyncError)
else:
handleUnexpectedCode()
ApiResponse[bool].err(ResponseUnexpectedError)
if res.isErr():
raise (ref ValidatorApiError)(msg: res.error, data: failures)
res.get()
of ApiStrategyKind.Priority:
vc.firstSuccessSequential(RestPlainResponse,
SlotDuration,
ViableNodeStatus,
{BeaconNodeRole.BlockProposalPublish}):
case data.kind
of ConsensusFork.Phase0:
publishBlockV2(it, some(broadcast_validation), data.phase0Data)
of ConsensusFork.Altair:
publishBlockV2(it, some(broadcast_validation), data.altairData)
of ConsensusFork.Bellatrix:
publishBlockV2(it, some(broadcast_validation), data.bellatrixData)
of ConsensusFork.Capella:
publishBlockV2(it, some(broadcast_validation), data.capellaData)
of ConsensusFork.Deneb:
publishBlockV2(it, some(broadcast_validation), data.denebData)
of ConsensusFork.Electra:
publishBlockV2(it, some(broadcast_validation), data.electraData)
of ConsensusFork.Fulu:
publishBlockV2(it, some(broadcast_validation), data.fuluData)
do:
if apiResponse.isErr():
handleCommunicationError()
false
else:
let response = apiResponse.get()
case response.status:
of 200:
return true
of 202:
debug BlockBroadcasted, node = node,
blck = shortLog(ForkedSignedBeaconBlock.init(data))
return true
of 400:
handle400()
false
of 415:
handle415()
false
of 500:
handle500()
false
of 503:
handle503()
false
else:
handleUnexpectedCode()
false
raise (ref ValidatorApiError)(
msg: "Failed to publish block", data: failures)
proc publishBlock*(
vc: ValidatorClientRef,
data: RestPublishedSignedBlockContents,
strategy: ApiStrategyKind
): Future[bool] {.async: (raises: [CancelledError, ValidatorApiError]).} =
const
RequestName = "publishBlock"
BlockBroadcasted = "Block not passed validation, but still published"
var failures: seq[ApiNodeFailure]
case strategy
of ApiStrategyKind.First, ApiStrategyKind.Best:
let res = block:
vc.firstSuccessParallel(RestPlainResponse,
bool,
SlotDuration,
ViableNodeStatus,
{BeaconNodeRole.BlockProposalPublish}):
case data.kind
of ConsensusFork.Phase0:
publishBlock(it, data.phase0Data)
of ConsensusFork.Altair:
publishBlock(it, data.altairData)
of ConsensusFork.Bellatrix:
publishBlock(it, data.bellatrixData)
of ConsensusFork.Capella:
publishBlock(it, data.capellaData)
of ConsensusFork.Deneb:
publishBlock(it, data.denebData)
of ConsensusFork.Electra:
publishBlock(it, data.electraData)
of ConsensusFork.Fulu:
publishBlock(it, data.fuluData)
do:
if apiResponse.isErr():
handleCommunicationError()
ApiResponse[bool].err(apiResponse.error)
else:
let response = apiResponse.get()
case response.status:
of 200:
ApiResponse[bool].ok(true)
of 202:
debug BlockBroadcasted, node = node,
blck = shortLog(ForkedSignedBeaconBlock.init(data))
ApiResponse[bool].ok(true)
of 400:
handle400()
ApiResponse[bool].err(ResponseInvalidError)
of 500:
handle500()
ApiResponse[bool].err(ResponseInternalError)
of 503:
handle503()
ApiResponse[bool].err(ResponseNoSyncError)
else:
handleUnexpectedCode()
ApiResponse[bool].err(ResponseUnexpectedError)
if res.isErr():
raise (ref ValidatorApiError)(msg: res.error, data: failures)
return res.get()
of ApiStrategyKind.Priority:
vc.firstSuccessSequential(RestPlainResponse,
SlotDuration,
ViableNodeStatus,
{BeaconNodeRole.BlockProposalPublish}):
case data.kind
of ConsensusFork.Phase0:
publishBlock(it, data.phase0Data)
of ConsensusFork.Altair:
publishBlock(it, data.altairData)
of ConsensusFork.Bellatrix:
publishBlock(it, data.bellatrixData)
of ConsensusFork.Capella:
2022-11-24 14:38:07 +00:00
publishBlock(it, data.capellaData)
of ConsensusFork.Deneb:
publishBlock(it, data.denebData)
of ConsensusFork.Electra:
publishBlock(it, data.electraData)
of ConsensusFork.Fulu:
publishBlock(it, data.fuluData)
do:
if apiResponse.isErr():
handleCommunicationError()
false
else:
let response = apiResponse.get()
case response.status:
of 200:
return true
of 202:
debug BlockBroadcasted, node = node,
blck = shortLog(ForkedSignedBeaconBlock.init(data))
return true
of 400:
handle400()
false
of 500:
handle500()
false
of 503:
handle503()
false
else:
handleUnexpectedCode()
false
raise (ref ValidatorApiError)(
msg: "Failed to publish block", data: failures)
proc publishBlindedBlockV2*(
vc: ValidatorClientRef,
data: ForkedSignedBlindedBeaconBlock,
broadcast_validation: BroadcastValidationType,
strategy: ApiStrategyKind
): Future[bool] {.async: (raises: [CancelledError, ValidatorApiError]).} =
const
RequestName = "publishBlindedBlockV2"
BlockBroadcasted = "Block not passed validation, but still published"
var failures: seq[ApiNodeFailure]
case strategy
of ApiStrategyKind.First, ApiStrategyKind.Best:
let res = block:
vc.firstSuccessParallel(RestPlainResponse,
bool,
SlotDuration,
ViableNodeStatus,
{BeaconNodeRole.BlockProposalPublish}):
case data.kind
of ConsensusFork.Phase0:
publishBlindedBlockV2(it, some(broadcast_validation),
data.phase0Data)
of ConsensusFork.Altair:
publishBlindedBlockV2(it, some(broadcast_validation),
data.altairData)
of ConsensusFork.Bellatrix:
publishBlindedBlockV2(it, some(broadcast_validation),
data.bellatrixData)
of ConsensusFork.Capella:
publishBlindedBlockV2(it, some(broadcast_validation),
data.capellaData)
of ConsensusFork.Deneb:
publishBlindedBlockV2(it, some(broadcast_validation),
data.denebData)
of ConsensusFork.Electra:
publishBlindedBlockV2(it, some(broadcast_validation),
data.electraData)
of ConsensusFork.Fulu:
publishBlindedBlockV2(it, some(broadcast_validation),
data.fuluData)
do:
if apiResponse.isErr():
handleCommunicationError()
ApiResponse[bool].err(apiResponse.error)
else:
let response = apiResponse.get()
case response.status:
of 200:
ApiResponse[bool].ok(true)
of 202:
debug BlockBroadcasted, node = node, blck = shortLog(data)
ApiResponse[bool].ok(true)
of 400:
handle400()
ApiResponse[bool].err(ResponseInvalidError)
of 415:
handle415()
ApiResponse[bool].err(ResponseContentTypeError)
of 500:
handle500()
ApiResponse[bool].err(ResponseInternalError)
of 503:
handle503()
ApiResponse[bool].err(ResponseNoSyncError)
else:
handleUnexpectedCode()
ApiResponse[bool].err(ResponseUnexpectedError)
if res.isErr():
raise (ref ValidatorApiError)(msg: res.error, data: failures)
res.get()
of ApiStrategyKind.Priority:
vc.firstSuccessSequential(RestPlainResponse,
SlotDuration,
ViableNodeStatus,
{BeaconNodeRole.BlockProposalPublish}):
case data.kind
of ConsensusFork.Phase0:
publishBlindedBlockV2(it, some(broadcast_validation),
data.phase0Data)
of ConsensusFork.Altair:
publishBlindedBlockV2(it, some(broadcast_validation),
data.altairData)
of ConsensusFork.Bellatrix:
publishBlindedBlockV2(it, some(broadcast_validation),
data.bellatrixData)
of ConsensusFork.Capella:
publishBlindedBlockV2(it, some(broadcast_validation),
data.capellaData)
of ConsensusFork.Deneb:
publishBlindedBlockV2(it, some(broadcast_validation),
data.denebData)
of ConsensusFork.Electra:
publishBlindedBlockV2(it, some(broadcast_validation),
data.electraData)
of ConsensusFork.Fulu:
publishBlindedBlockV2(it, some(broadcast_validation),
data.fuluData)
do:
if apiResponse.isErr():
handleCommunicationError()
false
else:
let response = apiResponse.get()
case response.status:
of 200:
return true
of 202:
debug BlockBroadcasted, node = node, blck = shortLog(data)
return true
of 400:
handle400()
false
of 415:
handle415()
false
of 500:
handle500()
false
of 503:
handle503()
false
else:
handleUnexpectedCode()
false
raise (ref ValidatorApiError)(
msg: "Failed to publish blinded block", data: failures)
proc publishBlindedBlock*(
vc: ValidatorClientRef,
data: ForkedSignedBlindedBeaconBlock,
strategy: ApiStrategyKind
): Future[bool] {.async: (raises: [CancelledError, ValidatorApiError]).} =
const
RequestName = "publishBlindedBlock"
BlockBroadcasted = "Block not passed validation, but still published"
var failures: seq[ApiNodeFailure]
case strategy
of ApiStrategyKind.First, ApiStrategyKind.Best:
let res = block:
vc.firstSuccessParallel(RestPlainResponse,
bool,
SlotDuration,
ViableNodeStatus,
{BeaconNodeRole.BlockProposalPublish}):
case data.kind
of ConsensusFork.Phase0:
publishBlindedBlock(it, data.phase0Data)
of ConsensusFork.Altair:
publishBlindedBlock(it, data.altairData)
of ConsensusFork.Bellatrix:
publishBlindedBlock(it, data.bellatrixData)
of ConsensusFork.Capella:
2022-11-24 14:38:07 +00:00
publishBlindedBlock(it, data.capellaData)
of ConsensusFork.Deneb:
2023-11-26 18:40:10 +00:00
publishBlindedBlock(it, data.denebData)
of ConsensusFork.Electra:
publishBlindedBlock(it, data.electraData)
of ConsensusFork.Fulu:
publishBlindedBlock(it, data.fuluData)
do:
if apiResponse.isErr():
handleCommunicationError()
ApiResponse[bool].err(apiResponse.error)
else:
let response = apiResponse.get()
case response.status:
of 200:
ApiResponse[bool].ok(true)
of 202:
debug BlockBroadcasted, node = node, blck = shortLog(data)
ApiResponse[bool].ok(true)
of 400:
handle400()
ApiResponse[bool].err(ResponseInvalidError)
of 500:
handle500()
ApiResponse[bool].err(ResponseInternalError)
of 503:
handle503()
ApiResponse[bool].err(ResponseNoSyncError)
else:
handleUnexpectedCode()
ApiResponse[bool].err(ResponseUnexpectedError)
if res.isErr():
raise (ref ValidatorApiError)(msg: res.error, data: failures)
return res.get()
of ApiStrategyKind.Priority:
vc.firstSuccessSequential(RestPlainResponse,
SlotDuration,
ViableNodeStatus,
{BeaconNodeRole.BlockProposalPublish}):
case data.kind
of ConsensusFork.Phase0:
publishBlindedBlock(it, data.phase0Data)
of ConsensusFork.Altair:
publishBlindedBlock(it, data.altairData)
of ConsensusFork.Bellatrix:
publishBlindedBlock(it, data.bellatrixData)
of ConsensusFork.Capella:
2022-11-24 14:38:07 +00:00
publishBlindedBlock(it, data.capellaData)
of ConsensusFork.Deneb:
2023-11-26 18:40:10 +00:00
publishBlindedBlock(it, data.denebData)
of ConsensusFork.Electra:
publishBlindedBlock(it, data.electraData)
of ConsensusFork.Fulu:
publishBlindedBlock(it, data.fuluData)
do:
if apiResponse.isErr():
handleCommunicationError()
false
else:
let response = apiResponse.get()
case response.status:
of 200:
return true
of 202:
debug BlockBroadcasted, node = node, blck = shortLog(data)
return true
of 400:
handle400()
false
of 500:
handle500()
false
of 503:
handle503()
false
else:
handleUnexpectedCode()
false
raise (ref ValidatorApiError)(
msg: "Failed to publish blinded block", data: failures)
New validator client using REST API. (#2651) * Initial commit. * Exporting getConfig(). * Add beacon node checking procedures. * Post rebase fixes. * Use runSlotLoop() from nimbus_beacon_node. Fallback implementation. Fixes for ETH2 REST serialization. * Add beacon_clock.durationToNextSlot(). Move type declarations from beacon_rest_api to json_rest_serialization. Fix seq[ValidatorIndex] serialization. Refactor ValidatorPool and add some utility procedures. Create separate version of validator_client. * Post-rebase fixes. Remove CookedPubKey from validator_pool.nim. * Now we should be able to produce attestations and aggregate and proofs. But its not working yet. * Debugging attestation sending. * Add durationToNextAttestation. Optimize some debug logs. Fix aggregation_bits encoding. Bump chronos/presto. * Its alive. * Fixes for launch_local_testnet script. Bump chronos. * Switch client API to not use `/api` prefix. * Post-rebase adjustments. * Fix endpoint for publishBlock(). * Add CONFIG_NAME. Add more checks to ensure that beacon_node is compatible. * Add beacon committee subscription support to validator_client. * Fix stacktrace should be an array of strings. Fix committee subscriptions should not be `data` keyed. * Log duration to next block proposal. * Fix beacon_node_status import. * Use jsonMsgResponse() instead of jsonError(). * Fix graffityBytes usage. Remove unnecessary `await`. Adjust creation of SignedBlock instance. Remove legacy files. * Rework durationToNextSlot() and durationToNextEpoch() to use `fromNow`. * Fix race condition for block proposal and attestations for same slot. Fix local_testnet script to properly kill tasks on Windows. Bump chronos and nim-http-tools, to allow connections to infura.io (basic auth). * Catch services errors. Improve performance of local_testnet.sh script on Windows. Fix race condition when attestation producing. * Post-rebase fixes. * Bump chronos and presto. * Calculate block publishing delay. Fix pkill in one more place. * Add error handling and timeouts to firstSuccess() template. Add onceToAll() template. Add checkNodes() procedure. Refactor firstSuccess() template. Add error checking to api.nim calls. * Deprecated usage onceToAll() for better stability. Address comment and send attestations asap. * Avoid unnecessary loop when calculating minimal duration.
2021-07-13 11:15:07 +00:00
proc prepareBeaconCommitteeSubnet*(
vc: ValidatorClientRef,
data: seq[RestCommitteeSubscription],
): Future[int] {.async: (raises: [CancelledError, ValidatorApiError]).} =
logScope: request = "prepareBeaconCommitteeSubnet"
let resp = vc.onceToAll(RestPlainResponse,
SlotDuration,
ViableNodeStatus,
{BeaconNodeRole.AggregatedData},
prepareBeaconCommitteeSubnet(it, data))
if len(resp.data) == 0:
# We did not get any response from beacon nodes.
case resp.status
of ApiOperation.Success:
# This should not be happened, there should be present at least one
# successfull response.
return 0
of ApiOperation.Timeout:
debug "Unable to subscribe to beacon committee subnets in time",
timeout = SlotDuration
return 0
of ApiOperation.Interrupt:
debug "Beacon committee subscription request was interrupted"
return 0
of ApiOperation.Failure:
debug "Unexpected error happened while subscribing to beacon committee " &
"subnets"
return 0
else:
var count = 0
for apiResponse in resp.data:
if apiResponse.data.isErr():
debug "Unable to subscribe to beacon committee subnets",
endpoint = apiResponse.node, error = apiResponse.data.error
else:
let response = apiResponse.data.get()
if response.status == 200:
inc(count)
else:
debug "Subscription to beacon commitee subnets failed",
status = response.status, endpoint = apiResponse.node,
reason = response.getErrorMessage()
return count
proc prepareSyncCommitteeSubnets*(
vc: ValidatorClientRef,
data: seq[RestSyncCommitteeSubscription],
): Future[int] {.async: (raises: [CancelledError, ValidatorApiError]).} =
logScope: request = "prepareSyncCommitteeSubnet"
let resp = vc.onceToAll(RestPlainResponse,
SlotDuration,
ViableNodeStatus,
{BeaconNodeRole.SyncCommitteeData},
prepareSyncCommitteeSubnets(it, data))
if len(resp.data) == 0:
# We did not get any response from beacon nodes.
case resp.status
of ApiOperation.Success:
# This should not be happened, there should be present at least one
# successfull response.
return 0
of ApiOperation.Timeout:
debug "Unable to prepare sync committee subnets in time",
timeout = SlotDuration
return 0
of ApiOperation.Interrupt:
debug "Sync committee subnets preparation request was interrupted"
return 0
of ApiOperation.Failure:
debug "Unexpected error happened while preparing sync committee subnets"
return 0
else:
var count = 0
for apiResponse in resp.data:
if apiResponse.data.isErr():
debug "Unable to prepare sync committee subnets",
endpoint = apiResponse.node, error = apiResponse.data.error
else:
let response = apiResponse.data.get()
if response.status == 200:
inc(count)
else:
debug "Sync committee subnets preparation failed",
status = response.status, endpoint = apiResponse.node,
message = response.getErrorMessage()
count
proc prepareBeaconProposer*(
vc: ValidatorClientRef,
data: seq[PrepareBeaconProposer]
): Future[int] {.async: (raises: [CancelledError, ValidatorApiError]).} =
logScope: request = "prepareBeaconProposer"
let resp = vc.onceToAll(RestPlainResponse,
SlotDuration,
ViableNodeStatus,
{BeaconNodeRole.BlockProposalPublish},
prepareBeaconProposer(it, data))
if len(resp.data) == 0:
# We did not get any response from beacon nodes.
case resp.status
of ApiOperation.Success:
# This should not be happened, there should be present at least one
# successfull response.
return 0
of ApiOperation.Timeout:
debug "Unable to perform beacon proposer preparation request in time",
timeout = SlotDuration
return 0
of ApiOperation.Interrupt:
debug "Beacon proposer's preparation request was interrupted"
return 0
of ApiOperation.Failure:
debug "Unexpected error happened while preparing beacon proposers"
return 0
else:
var count = 0
for apiResponse in resp.data:
if apiResponse.data.isErr():
debug "Unable to perform beacon proposer preparation request",
endpoint = apiResponse.node, error = apiResponse.data.error
else:
let response = apiResponse.data.get()
if response.status == 200:
inc(count)
else:
debug "Beacon proposer preparation failed", status = response.status,
endpoint = apiResponse.node, reason = response.getErrorMessage()
count
proc registerValidator*(
vc: ValidatorClientRef,
data: seq[SignedValidatorRegistrationV1]
): Future[int] {.async: (raises: [CancelledError, ValidatorApiError]).} =
logScope: request = "registerValidators"
let resp = vc.onceToAll(RestPlainResponse,
SlotDuration,
ViableNodeStatus,
{BeaconNodeRole.BlockProposalPublish},
registerValidator(it, data))
if len(resp.data) == 0:
# We did not get any response from beacon nodes.
case resp.status
of ApiOperation.Success:
# This should not be happened, there should be present at least one
# successfull response.
return 0
of ApiOperation.Timeout:
debug "Unable to register validators in time",
timeout = SlotDuration
return 0
of ApiOperation.Interrupt:
debug "Validator registration was interrupted"
return 0
of ApiOperation.Failure:
debug "Unexpected error happened while registering validators"
return 0
else:
var count = 0
for apiResponse in resp.data:
if apiResponse.data.isErr():
debug "Unable to register validator with beacon node",
endpoint = apiResponse.node, error = apiResponse.data.error
else:
let response = apiResponse.data.get()
if response.status == 200:
inc(count)
else:
debug "Unable to register validators with beacon node",
status = response.status, endpoint = apiResponse.node,
reason = response.getErrorMessage()
return count
proc getValidatorsLiveness*(
vc: ValidatorClientRef,
epoch: Epoch,
validators: seq[ValidatorIndex]
): Future[GetValidatorsLivenessResponse] {.async: (raises: [CancelledError]).} =
const
RequestName = "getLiveness"
let resp = vc.onceToAll(RestPlainResponse,
SlotDuration,
ViableNodeStatus,
{BeaconNodeRole.Duties},
getValidatorsLiveness(it, epoch, validators))
case resp.status
of ApiOperation.Timeout:
debug "Unable to perform validator's liveness request in time",
timeout = SlotDuration
return GetValidatorsLivenessResponse()
of ApiOperation.Interrupt:
debug "Validator's liveness request was interrupted"
return GetValidatorsLivenessResponse()
of ApiOperation.Failure:
debug "Unexpected error happened while receiving validator's liveness"
return GetValidatorsLivenessResponse()
of ApiOperation.Success:
let defaultLiveness = RestLivenessItem(index: ValidatorIndex(high(uint32)))
var activities: Table[ValidatorIndex, RestLivenessItem]
for apiResponse in resp.data:
if apiResponse.data.isErr():
debug "Unable to retrieve validators liveness data",
endpoint = apiResponse.node, error = apiResponse.data.error
else:
let response = apiResponse.data.get()
case response.status
of 200:
let res = decodeBytes(GetValidatorsLivenessResponse,
response.data, response.contentType)
if res.isOk():
let list = res.get().data
if len(list) != len(validators):
debug "Received incomplete validators liveness response",
endpoint = apiResponse.node,
validators_count = len(validators),
activities_count = len(list)
continue
else:
var updated = 0
for item in list:
activities.withValue(item.index, stored):
if item.is_live:
stored[].is_live = true
inc(updated)
do:
activities[item.index] = item
inc(updated)
debug "Received validators liveness response",
endpoint = apiResponse.node,
validators_count = len(validators),
activities_count = len(list),
updated_count = updated
else:
discard ApiNodeFailure.init(
ApiFailure.UnexpectedResponse, RequestName,
apiResponse.node, response.status, $res.error)
# We do not update beacon node's status anymore because of
# issue #5377.
continue
of 400:
discard ApiNodeFailure.init(
ApiFailure.Invalid, RequestName,
apiResponse.node, response.status, response.getErrorMessage())
# We do not update beacon node's status anymore because of
# issue #5377.
continue
of 500:
discard ApiNodeFailure.init(
ApiFailure.Internal, RequestName,
apiResponse.node, response.status, response.getErrorMessage())
# We do not update beacon node's status anymore because of
# issue #5377.
continue
of 503:
discard ApiNodeFailure.init(
ApiFailure.NotSynced, RequestName,
apiResponse.node, response.status, response.getErrorMessage())
# We do not update beacon node's status anymore because of
# issue #5377.
continue
else:
discard ApiNodeFailure.init(
ApiFailure.UnexpectedCode, RequestName,
apiResponse.node, response.status, response.getErrorMessage())
# We do not update beacon node's status anymore because of
# issue #5377.
continue
var response =
block:
var res: seq[RestLivenessItem]
for vindex in validators:
let item = activities.getOrDefault(vindex, defaultLiveness)
if item == defaultLiveness:
debug "Validator is missing in response",
validator_index = vindex
return GetValidatorsLivenessResponse()
else:
res.add(item)
res
return GetValidatorsLivenessResponse(data: response)
proc getFinalizedBlockHeader*(
vc: ValidatorClientRef,
): Future[Opt[GetBlockHeaderResponse]] {.async: (raises: [CancelledError]).} =
const RequestName = "getFinalizedBlockHeader"
let
blockIdent = BlockIdent.init(BlockIdentType.Finalized)
resp = vc.onceToAll(RestPlainResponse,
SlotDuration,
ViableNodeStatus,
{BeaconNodeRole.Duties},
getBlockHeaderPlain(it, blockIdent))
case resp.status
of ApiOperation.Timeout:
debug "Unable to obtain finalized block header in time",
timeout = SlotDuration
return Opt.none(GetBlockHeaderResponse)
of ApiOperation.Interrupt:
debug "Finalized block header request was interrupted"
return Opt.none(GetBlockHeaderResponse)
of ApiOperation.Failure:
debug "Unexpected error happened while trying to get finalized block header"
return Opt.none(GetBlockHeaderResponse)
of ApiOperation.Success:
var oldestBlockHeader: GetBlockHeaderResponse
var oldestEpoch: Opt[Epoch]
for apiResponse in resp.data:
if apiResponse.data.isErr():
debug "Unable to get finalized block header",
endpoint = apiResponse.node, error = apiResponse.data.error
else:
let response = apiResponse.data.get()
case response.status
of 200:
let res = decodeBytes(GetBlockHeaderResponse,
response.data, response.contentType)
if res.isOk():
let
rdata = res.get()
epoch = rdata.data.header.message.slot.epoch()
if oldestEpoch.get(FAR_FUTURE_EPOCH) > epoch:
oldestEpoch = Opt.some(epoch)
oldestBlockHeader = rdata
else:
let failure = ApiNodeFailure.init(
ApiFailure.UnexpectedResponse, RequestName,
apiResponse.node, response.status, $res.error)
# We do not update beacon node's status anymore because of
# issue #5377.
debug ResponseDecodeError, reason = getFailureReason(failure)
continue
of 400:
let failure = ApiNodeFailure.init(
ApiFailure.Invalid, RequestName,
apiResponse.node, response.status, response.getErrorMessage())
# We do not update beacon node's status anymore because of
# issue #5377.
debug ResponseInvalidError, reason = getFailureReason(failure)
continue
of 404:
let failure = ApiNodeFailure.init(
ApiFailure.NotFound, RequestName,
apiResponse.node, response.status, response.getErrorMessage())
# We do not update beacon node's status anymore because of
# issue #5377.
debug ResponseNotFoundError, reason = getFailureReason(failure)
continue
of 500:
let failure = ApiNodeFailure.init(
ApiFailure.Internal, RequestName,
apiResponse.node, response.status, response.getErrorMessage())
# We do not update beacon node's status anymore because of
# issue #5377.
debug ResponseInternalError, reason = getFailureReason(failure)
continue
else:
let failure = ApiNodeFailure.init(
ApiFailure.UnexpectedCode, RequestName,
apiResponse.node, response.status, response.getErrorMessage())
# We do not update beacon node's status anymore because of
# issue #5377.
debug ResponseUnexpectedError, reason = getFailureReason(failure)
continue
if oldestEpoch.isSome():
return Opt.some(oldestBlockHeader)
else:
return Opt.none(GetBlockHeaderResponse)
2023-11-08 12:03:51 +00:00
proc submitBeaconCommitteeSelections*(
vc: ValidatorClientRef,
data: seq[RestBeaconCommitteeSelection],
strategy: ApiStrategyKind
): Future[SubmitBeaconCommitteeSelectionsResponse] {.
async: (raises: [CancelledError, ValidatorApiError]).} =
2023-11-08 12:03:51 +00:00
const
RequestName = "submitBeaconCommitteeSelections"
var failures: seq[ApiNodeFailure]
case strategy
of ApiStrategyKind.First, ApiStrategyKind.Best:
let res = vc.firstSuccessParallel(
RestPlainResponse,
SubmitBeaconCommitteeSelectionsResponse,
SlotDuration,
ViableNodeStatus,
{BeaconNodeRole.Duties},
submitBeaconCommitteeSelectionsPlain(it, data)):
if apiResponse.isErr():
handleCommunicationError()
ApiResponse[SubmitBeaconCommitteeSelectionsResponse].err(
apiResponse.error)
else:
let response = apiResponse.get()
case response.status
of 200:
let res = decodeBytes(SubmitBeaconCommitteeSelectionsResponse,
response.data, response.contentType)
if res.isErr():
handleUnexpectedData()
ApiResponse[SubmitBeaconCommitteeSelectionsResponse].err($res.error)
else:
ApiResponse[SubmitBeaconCommitteeSelectionsResponse].ok(res.get())
of 400:
handle400()
ApiResponse[SubmitBeaconCommitteeSelectionsResponse].err(
ResponseInvalidError)
of 500:
handle500()
ApiResponse[SubmitBeaconCommitteeSelectionsResponse].err(
ResponseInternalError)
of 501:
handle501()
ApiResponse[SubmitBeaconCommitteeSelectionsResponse].err(
ResponseNotImplementedError)
of 503:
handle503()
ApiResponse[SubmitBeaconCommitteeSelectionsResponse].err(
ResponseNoSyncError)
else:
handleUnexpectedCode()
ApiResponse[SubmitBeaconCommitteeSelectionsResponse].err(
ResponseUnexpectedError)
if res.isErr():
raise (ref ValidatorApiError)(msg: res.error, data: failures)
return res.get()
of ApiStrategyKind.Priority:
vc.firstSuccessSequential(RestPlainResponse,
SlotDuration,
ViableNodeStatus,
{BeaconNodeRole.Duties},
submitBeaconCommitteeSelectionsPlain(it, data)):
if apiResponse.isErr():
handleCommunicationError()
false
else:
let response = apiResponse.get()
case response.status
of 200:
let res = decodeBytes(SubmitBeaconCommitteeSelectionsResponse,
response.data, response.contentType)
if res.isOk(): return res.get()
handleUnexpectedData()
false
of 400:
handle400()
false
of 500:
handle500()
false
of 501:
handle501()
false
of 503:
handle503()
false
else:
handleUnexpectedCode()
false
raise (ref ValidatorApiError)(
msg: "Failed to submit beacon committee selections", data: failures)
proc submitSyncCommitteeSelections*(
vc: ValidatorClientRef,
data: seq[RestSyncCommitteeSelection],
strategy: ApiStrategyKind
): Future[SubmitSyncCommitteeSelectionsResponse] {.
async: (raises: [CancelledError, ValidatorApiError]).} =
2023-11-08 12:03:51 +00:00
const
RequestName = "submitBeaconCommitteeSelections"
var failures: seq[ApiNodeFailure]
case strategy
of ApiStrategyKind.First, ApiStrategyKind.Best:
let res = vc.firstSuccessParallel(
RestPlainResponse,
SubmitSyncCommitteeSelectionsResponse,
SlotDuration,
ViableNodeStatus,
{BeaconNodeRole.Duties},
submitSyncCommitteeSelectionsPlain(it, data)):
if apiResponse.isErr():
handleCommunicationError()
ApiResponse[SubmitSyncCommitteeSelectionsResponse].err(
apiResponse.error)
else:
let response = apiResponse.get()
case response.status
of 200:
let res = decodeBytes(SubmitSyncCommitteeSelectionsResponse,
response.data, response.contentType)
if res.isErr():
handleUnexpectedData()
ApiResponse[SubmitSyncCommitteeSelectionsResponse].err($res.error)
else:
ApiResponse[SubmitSyncCommitteeSelectionsResponse].ok(res.get())
of 400:
handle400()
ApiResponse[SubmitSyncCommitteeSelectionsResponse].err(
ResponseInvalidError)
of 500:
handle500()
ApiResponse[SubmitSyncCommitteeSelectionsResponse].err(
ResponseInternalError)
of 501:
handle501()
ApiResponse[SubmitSyncCommitteeSelectionsResponse].err(
ResponseNotImplementedError)
of 503:
handle503()
ApiResponse[SubmitSyncCommitteeSelectionsResponse].err(
ResponseNoSyncError)
else:
handleUnexpectedCode()
ApiResponse[SubmitSyncCommitteeSelectionsResponse].err(
ResponseUnexpectedError)
if res.isErr():
raise (ref ValidatorApiError)(msg: res.error, data: failures)
return res.get()
of ApiStrategyKind.Priority:
vc.firstSuccessSequential(RestPlainResponse,
SlotDuration,
ViableNodeStatus,
{BeaconNodeRole.Duties},
submitSyncCommitteeSelectionsPlain(it, data)):
if apiResponse.isErr():
handleCommunicationError()
false
else:
let response = apiResponse.get()
case response.status
of 200:
let res = decodeBytes(SubmitSyncCommitteeSelectionsResponse,
response.data, response.contentType)
if res.isOk(): return res.get()
handleUnexpectedData()
false
of 400:
handle400()
false
of 500:
handle500()
false
of 501:
handle501()
false
of 503:
handle503()
false
else:
handleUnexpectedCode()
false
raise (ref ValidatorApiError)(
msg: "Failed to submit sync committee selections", data: failures)