VC: new scoring functions. (#5447)
* Initial commit. * Fix issues and tests. * Fix test compilation issue. * Update AllTests. * Change the most poor score name from <lowest> to <bad>. Split sync committee message score in range, so lexicographic scores will not intersect with normal one. Lexicographic scores should be below to normal scores. * Address review comments. Fix aggregated attestation scoring to use MAX_VALIDATORS_PER_COMMITTEE. Fix sync committee contributions to use SYNC_SUBCOMMITTEE_SIZE. Add getUniqueVotes test vectors. * Post-rebase fixes. * Address review comments. * Return back score calculation based on actual bits length. * AllTests modification.
This commit is contained in:
parent
5f4bbd0a23
commit
9889b840ce
|
@ -599,11 +599,15 @@ OK: 1/1 Fail: 0/1 Skip: 0/1
|
|||
+ /eth/v1/validator/sync_committee_selections serialization/deserialization test OK
|
||||
+ bestSuccess() API timeout test OK
|
||||
+ firstSuccessParallel() API timeout test OK
|
||||
+ getAggregatedAttestationDataScore() test vectors OK
|
||||
+ getAttestationDataScore() test vectors OK
|
||||
+ getLiveness() response deserialization test OK
|
||||
+ getSyncCommitteeContributionDataScore() test vectors OK
|
||||
+ getSyncCommitteeMessageDataScore() test vectors OK
|
||||
+ getUniqueVotes() test vectors OK
|
||||
+ normalizeUri() test vectors OK
|
||||
```
|
||||
OK: 7/7 Fail: 0/7 Skip: 0/7
|
||||
OK: 11/11 Fail: 0/11 Skip: 0/11
|
||||
## Validator change pool testing suite
|
||||
```diff
|
||||
+ addValidatorChangeMessage/getAttesterSlashingMessage OK
|
||||
|
@ -716,4 +720,4 @@ OK: 2/2 Fail: 0/2 Skip: 0/2
|
|||
OK: 9/9 Fail: 0/9 Skip: 0/9
|
||||
|
||||
---TOTAL---
|
||||
OK: 405/410 Fail: 0/410 Skip: 5/410
|
||||
OK: 409/414 Fail: 0/414 Skip: 5/414
|
||||
|
|
|
@ -940,3 +940,27 @@ func toValidatorIndex*(value: RestValidatorIndex): Result[ValidatorIndex,
|
|||
err(ValidatorIndexError.TooHighValue)
|
||||
else:
|
||||
doAssert(false, "ValidatorIndex type size is incorrect")
|
||||
|
||||
template withBlck*(x: ProduceBlockResponseV2,
|
||||
body: untyped): untyped =
|
||||
case x.kind
|
||||
of ConsensusFork.Phase0:
|
||||
const consensusFork {.inject, used.} = ConsensusFork.Phase0
|
||||
template blck: untyped {.inject.} = x.phase0Data
|
||||
body
|
||||
of ConsensusFork.Altair:
|
||||
const consensusFork {.inject, used.} = ConsensusFork.Altair
|
||||
template blck: untyped {.inject.} = x.altairData
|
||||
body
|
||||
of ConsensusFork.Bellatrix:
|
||||
const consensusFork {.inject, used.} = ConsensusFork.Bellatrix
|
||||
template blck: untyped {.inject.} = x.bellatrixData
|
||||
body
|
||||
of ConsensusFork.Capella:
|
||||
const consensusFork {.inject, used.} = ConsensusFork.Capella
|
||||
template blck: untyped {.inject.} = x.capellaData
|
||||
body
|
||||
of ConsensusFork.Deneb:
|
||||
const consensusFork {.inject, used.} = ConsensusFork.Deneb
|
||||
template blck: untyped {.inject.} = x.denebData.blck
|
||||
body
|
||||
|
|
|
@ -1140,7 +1140,7 @@ proc getHeadBlockRoot*(
|
|||
let blockIdent = BlockIdent.init(BlockIdentType.Head)
|
||||
|
||||
case strategy
|
||||
of ApiStrategyKind.First, ApiStrategyKind.Best:
|
||||
of ApiStrategyKind.First:
|
||||
let res = vc.firstSuccessParallel(RestPlainResponse,
|
||||
GetBlockRootResponse,
|
||||
SlotDuration,
|
||||
|
@ -1184,6 +1184,52 @@ proc getHeadBlockRoot*(
|
|||
raise (ref ValidatorApiError)(msg: res.error, data: failures)
|
||||
return res.get()
|
||||
|
||||
of ApiStrategyKind.Best:
|
||||
let res = vc.bestSuccess(
|
||||
RestPlainResponse,
|
||||
GetBlockRootResponse,
|
||||
SlotDuration,
|
||||
ViableNodeStatus,
|
||||
{BeaconNodeRole.SyncCommitteeData},
|
||||
getBlockRootPlain(it, blockIdent),
|
||||
getSyncCommitteeMessageDataScore(vc, itresponse)):
|
||||
if apiResponse.isErr():
|
||||
handleCommunicationError()
|
||||
ApiResponse[GetBlockRootResponse].err(apiResponse.error)
|
||||
else:
|
||||
let response = apiResponse.get()
|
||||
case response.status
|
||||
of 200:
|
||||
let res = decodeBytes(GetBlockRootResponse, response.data,
|
||||
response.contentType)
|
||||
if res.isErr():
|
||||
handleUnexpectedData()
|
||||
ApiResponse[GetBlockRootResponse].err($res.error)
|
||||
else:
|
||||
let data = res.get()
|
||||
if data.execution_optimistic.get(false):
|
||||
handleOptimistic()
|
||||
failures.add(failure)
|
||||
ApiResponse[GetBlockRootResponse].err(ResponseECNotInSyncError)
|
||||
else:
|
||||
ApiResponse[GetBlockRootResponse].ok(data)
|
||||
of 400:
|
||||
handle400()
|
||||
ApiResponse[GetBlockRootResponse].err(ResponseInvalidError)
|
||||
of 404:
|
||||
handle404()
|
||||
ApiResponse[GetBlockRootResponse].err(ResponseNotFoundError)
|
||||
of 500:
|
||||
handle500()
|
||||
ApiResponse[GetBlockRootResponse].err(ResponseInternalError)
|
||||
else:
|
||||
handleUnexpectedCode()
|
||||
ApiResponse[GetBlockRootResponse].err(ResponseUnexpectedError)
|
||||
|
||||
if res.isErr():
|
||||
raise (ref ValidatorApiError)(msg: res.error, data: failures)
|
||||
return res.get()
|
||||
|
||||
of ApiStrategyKind.Priority:
|
||||
vc.firstSuccessSequential(RestPlainResponse, #RestResponse[GetBlockRootResponse],
|
||||
SlotDuration,
|
||||
|
@ -1603,7 +1649,7 @@ proc getAggregatedAttestation*(
|
|||
var failures: seq[ApiNodeFailure]
|
||||
|
||||
case strategy
|
||||
of ApiStrategyKind.First, ApiStrategyKind.Best:
|
||||
of ApiStrategyKind.First:
|
||||
let res = vc.firstSuccessParallel(
|
||||
RestPlainResponse,
|
||||
GetAggregatedAttestationResponse,
|
||||
|
@ -1642,6 +1688,46 @@ proc getAggregatedAttestation*(
|
|||
raise (ref ValidatorApiError)(msg: res.error, data: failures)
|
||||
return res.get().data
|
||||
|
||||
of ApiStrategyKind.Best:
|
||||
let res = vc.bestSuccess(
|
||||
RestPlainResponse,
|
||||
GetAggregatedAttestationResponse,
|
||||
OneThirdDuration,
|
||||
ViableNodeStatus,
|
||||
{BeaconNodeRole.AggregatedData},
|
||||
getAggregatedAttestationPlain(it, root, slot),
|
||||
getAggregatedAttestationDataScore(itresponse)):
|
||||
if apiResponse.isErr():
|
||||
handleCommunicationError()
|
||||
ApiResponse[GetAggregatedAttestationResponse].err(apiResponse.error)
|
||||
else:
|
||||
let response = apiResponse.get()
|
||||
case response.status:
|
||||
of 200:
|
||||
let res = decodeBytes(GetAggregatedAttestationResponse, response.data,
|
||||
response.contentType)
|
||||
if res.isErr():
|
||||
handleUnexpectedData()
|
||||
ApiResponse[GetAggregatedAttestationResponse].err($res.error)
|
||||
else:
|
||||
ApiResponse[GetAggregatedAttestationResponse].ok(res.get())
|
||||
of 400:
|
||||
handle400()
|
||||
ApiResponse[GetAggregatedAttestationResponse].err(
|
||||
ResponseInvalidError)
|
||||
of 500:
|
||||
handle500()
|
||||
ApiResponse[GetAggregatedAttestationResponse].err(
|
||||
ResponseInternalError)
|
||||
else:
|
||||
handleUnexpectedCode()
|
||||
ApiResponse[GetAggregatedAttestationResponse].err(
|
||||
ResponseUnexpectedError)
|
||||
|
||||
if res.isErr():
|
||||
raise (ref ValidatorApiError)(msg: res.error, data: failures)
|
||||
return res.get().data
|
||||
|
||||
of ApiStrategyKind.Priority:
|
||||
vc.firstSuccessSequential(
|
||||
RestPlainResponse,
|
||||
|
@ -1687,7 +1773,7 @@ proc produceSyncCommitteeContribution*(
|
|||
var failures: seq[ApiNodeFailure]
|
||||
|
||||
case strategy
|
||||
of ApiStrategyKind.First, ApiStrategyKind.Best:
|
||||
of ApiStrategyKind.First:
|
||||
let res = vc.firstSuccessParallel(
|
||||
RestPlainResponse,
|
||||
ProduceSyncCommitteeContributionResponse,
|
||||
|
@ -1728,6 +1814,48 @@ proc produceSyncCommitteeContribution*(
|
|||
raise (ref ValidatorApiError)(msg: res.error, data: failures)
|
||||
return res.get().data
|
||||
|
||||
of ApiStrategyKind.Best:
|
||||
let res = vc.bestSuccess(
|
||||
RestPlainResponse,
|
||||
ProduceSyncCommitteeContributionResponse,
|
||||
OneThirdDuration,
|
||||
ViableNodeStatus,
|
||||
{BeaconNodeRole.SyncCommitteeData},
|
||||
produceSyncCommitteeContributionPlain(it, slot, subcommitteeIndex, root),
|
||||
getSyncCommitteeContributionDataScore(itresponse)):
|
||||
if apiResponse.isErr():
|
||||
handleCommunicationError()
|
||||
ApiResponse[ProduceSyncCommitteeContributionResponse].err(
|
||||
apiResponse.error)
|
||||
else:
|
||||
let response = apiResponse.get()
|
||||
case response.status:
|
||||
of 200:
|
||||
let res = decodeBytes(ProduceSyncCommitteeContributionResponse,
|
||||
response.data, response.contentType)
|
||||
if res.isErr():
|
||||
handleUnexpectedData()
|
||||
ApiResponse[ProduceSyncCommitteeContributionResponse].err(
|
||||
$res.error)
|
||||
else:
|
||||
ApiResponse[ProduceSyncCommitteeContributionResponse].ok(res.get())
|
||||
of 400:
|
||||
handle400()
|
||||
ApiResponse[ProduceSyncCommitteeContributionResponse].err(
|
||||
ResponseInvalidError)
|
||||
of 500:
|
||||
handle500()
|
||||
ApiResponse[ProduceSyncCommitteeContributionResponse].err(
|
||||
ResponseInternalError)
|
||||
else:
|
||||
handleUnexpectedCode()
|
||||
ApiResponse[ProduceSyncCommitteeContributionResponse].err(
|
||||
ResponseUnexpectedError)
|
||||
|
||||
if res.isErr():
|
||||
raise (ref ValidatorApiError)(msg: res.error, data: failures)
|
||||
return res.get().data
|
||||
|
||||
of ApiStrategyKind.Priority:
|
||||
vc.firstSuccessSequential(
|
||||
RestPlainResponse,
|
||||
|
|
|
@ -6,15 +6,38 @@
|
|||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
import std/strutils
|
||||
import ssz_serialization/[types, bitseqs]
|
||||
import stew/endians2
|
||||
import nimcrypto/hash
|
||||
import "."/common
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
type
|
||||
CommitteeBitsArray = BitArray[int(MAX_VALIDATORS_PER_COMMITTEE)]
|
||||
CommitteeTable = Table[CommitteeIndex, CommitteeBitsArray]
|
||||
|
||||
const
|
||||
DefaultCommitteeTable = default(CommitteeTable)
|
||||
DefaultCommitteeBitsArray = default(CommitteeBitsArray)
|
||||
|
||||
func perfectScore*(score: float64): bool =
|
||||
score == Inf
|
||||
|
||||
proc shortScore*(score: float64): string =
|
||||
if score == Inf: "<perfect>" else: formatFloat(score, ffDecimal, 4)
|
||||
if score == Inf:
|
||||
"<perfect>"
|
||||
elif score == -Inf:
|
||||
"<bad>"
|
||||
else:
|
||||
formatFloat(score, ffDecimal, 4)
|
||||
|
||||
func getLexicographicScore(digest: Eth2Digest): float64 =
|
||||
# We calculate score on first 8 bytes of digest.
|
||||
let
|
||||
dvalue = uint64.fromBytesBE(digest.data.toOpenArray(0, sizeof(uint64) - 1))
|
||||
value = float64(dvalue) / float64(high(uint64))
|
||||
value
|
||||
|
||||
proc getAttestationDataScore*(rootsSeen: Table[Eth2Digest, Slot],
|
||||
adata: ProduceAttestationDataResponse): float64 =
|
||||
|
@ -47,3 +70,116 @@ proc getAttestationDataScore*(rootsSeen: Table[Eth2Digest, Slot],
|
|||
proc getAttestationDataScore*(vc: ValidatorClientRef,
|
||||
adata: ProduceAttestationDataResponse): float64 =
|
||||
getAttestationDataScore(vc.rootsSeen, adata)
|
||||
|
||||
proc getAggregatedAttestationDataScore*(
|
||||
adata: GetAggregatedAttestationResponse
|
||||
): float64 =
|
||||
# This procedure returns score value in range [0.0000, 1.0000) and `Inf`.
|
||||
# It returns perfect score when all the bits was set to `1`, but this could
|
||||
# provide wrong expectation for some edge cases (when different attestations
|
||||
# has different committee sizes), but currently this is the only viable way
|
||||
# to return perfect score.
|
||||
const MaxLength = int(MAX_VALIDATORS_PER_COMMITTEE)
|
||||
doAssert(len(adata.data.aggregation_bits) <= MaxLength)
|
||||
let
|
||||
size = len(adata.data.aggregation_bits)
|
||||
ones = countOnes(adata.data.aggregation_bits)
|
||||
res =
|
||||
if ones == size:
|
||||
# We consider score perfect, when all bits was set to 1.
|
||||
Inf
|
||||
else:
|
||||
float64(ones) / float64(size)
|
||||
|
||||
debug "Aggregated attestation score", attestation_data = shortLog(adata.data),
|
||||
block_slot = adata.data.data.slot, committee_size = size,
|
||||
ones_count = ones, score = shortScore(res)
|
||||
res
|
||||
|
||||
proc getSyncCommitteeContributionDataScore*(
|
||||
cdata: ProduceSyncCommitteeContributionResponse
|
||||
): float64 =
|
||||
# This procedure returns score value in range [0.0000, 1.0000) and `Inf`.
|
||||
# It returns perfect score when all the bits was set to `1`, but this could
|
||||
# provide wrong expectation for some edge cases (when different contributions
|
||||
# has different committee sizes), but currently this is the only viable way
|
||||
# to return perfect score.
|
||||
const MaxLength = int(SYNC_SUBCOMMITTEE_SIZE)
|
||||
doAssert(len(cdata.data.aggregation_bits) <= MaxLength)
|
||||
let
|
||||
size = len(cdata.data.aggregation_bits)
|
||||
ones = countOnes(cdata.data.aggregation_bits)
|
||||
res =
|
||||
if ones == size:
|
||||
# We consider score perfect, when all bits was set to 1.
|
||||
Inf
|
||||
else:
|
||||
float64(ones) / float64(size)
|
||||
|
||||
debug "Sync committee contribution score",
|
||||
contribution_data = shortLog(cdata.data), block_slot = cdata.data.slot,
|
||||
committee_size = size, ones_count = ones, score = shortScore(res)
|
||||
res
|
||||
|
||||
proc getSyncCommitteeMessageDataScore*(
|
||||
rootsSeen: Table[Eth2Digest, Slot],
|
||||
currentSlot: Slot,
|
||||
cdata: GetBlockRootResponse
|
||||
): float64 =
|
||||
let
|
||||
slot = rootsSeen.getOrDefault(cdata.data.root, FAR_FUTURE_SLOT)
|
||||
res =
|
||||
if cdata.execution_optimistic.get(true):
|
||||
# Responses from the nodes which are optimistically synced only are
|
||||
# not suitable, score it with minimal possible score.
|
||||
-Inf
|
||||
else:
|
||||
if slot != FAR_FUTURE_SLOT:
|
||||
# When `slot` has been found score value will be in range of
|
||||
# `(1, 2]` or `Inf`.
|
||||
if slot == currentSlot:
|
||||
# Perfect score
|
||||
Inf
|
||||
else:
|
||||
float64(1) +
|
||||
float64(1) / (float64(1) + float64(currentSlot) - float64(slot))
|
||||
else:
|
||||
# Block monitoring is disabled or we missed a block, in this case
|
||||
# score value will be in range of `(0, 1]`
|
||||
getLexicographicScore(cdata.data.root)
|
||||
|
||||
debug "Sync committee message score",
|
||||
head_block_root = shortLog(cdata.data.root), slot = slot,
|
||||
current_slot = currentSlot, score = shortScore(res)
|
||||
res
|
||||
|
||||
proc getSyncCommitteeMessageDataScore*(
|
||||
vc: ValidatorClientRef,
|
||||
cdata: GetBlockRootResponse
|
||||
): float64 =
|
||||
getSyncCommitteeMessageDataScore(
|
||||
vc.rootsSeen, vc.beaconClock.now().slotOrZero(), cdata)
|
||||
|
||||
proc processVotes(bits: var CommitteeBitsArray,
|
||||
attestation: Attestation): int =
|
||||
doAssert(len(attestation.aggregation_bits) <= len(bits))
|
||||
var res = 0
|
||||
for index in 0 ..< len(attestation.aggregation_bits):
|
||||
if attestation.aggregation_bits[index]:
|
||||
if not(bits[index]):
|
||||
inc(res)
|
||||
bits[index] = true
|
||||
res
|
||||
|
||||
proc getUniqueVotes*(attestations: openArray[Attestation]): int =
|
||||
var
|
||||
res = 0
|
||||
attested: Table[Slot, CommitteeTable]
|
||||
for attestation in attestations:
|
||||
let count =
|
||||
attested.mgetOrPut(attestation.data.slot, DefaultCommitteeTable).
|
||||
mgetOrPut(CommitteeIndex(attestation.data.index),
|
||||
DefaultCommitteeBitsArray).
|
||||
processVotes(attestation)
|
||||
res += count
|
||||
res
|
||||
|
|
|
@ -350,7 +350,7 @@ proc publishSyncMessagesAndContributions(service: SyncCommitteeServiceRef,
|
|||
let beaconBlockRoot =
|
||||
block:
|
||||
try:
|
||||
let res = await vc.getHeadBlockRoot(ApiStrategyKind.First)
|
||||
let res = await vc.getHeadBlockRoot(ApiStrategyKind.Best)
|
||||
if res.execution_optimistic.isNone():
|
||||
## The `execution_optimistic` is missing from the response, we assume
|
||||
## that the BN is unaware optimistic sync, so we consider the BN
|
||||
|
|
|
@ -96,7 +96,7 @@ func init(T: type ForkedBeaconBlock, contents: ProduceBlockResponseV2): T =
|
|||
of ConsensusFork.Capella:
|
||||
return ForkedBeaconBlock.init(contents.capellaData)
|
||||
of ConsensusFork.Deneb:
|
||||
return ForkedBeaconBlock.init(contents.denebData.block)
|
||||
return ForkedBeaconBlock.init(contents.denebData.`block`)
|
||||
|
||||
proc getBlock(fork: ConsensusFork,
|
||||
feeRecipient = SigningExpectedFeeRecipient): ForkedBeaconBlock =
|
||||
|
|
|
@ -11,7 +11,8 @@
|
|||
import std/strutils
|
||||
import httputils
|
||||
import chronos/unittest2/asynctests
|
||||
import ../beacon_chain/validator_client/[api, common, scoring, fallback_service]
|
||||
import ../beacon_chain/spec/eth2_apis/eth2_rest_serialization,
|
||||
../beacon_chain/validator_client/[api, common, scoring, fallback_service]
|
||||
|
||||
const
|
||||
HostNames = [
|
||||
|
@ -309,6 +310,12 @@ type
|
|||
target: uint64
|
||||
]
|
||||
|
||||
AttestationBitsObject = object
|
||||
data: CommitteeValidatorsBits
|
||||
|
||||
SyncCommitteeBitsObject = object
|
||||
data: SyncCommitteeAggregationBits
|
||||
|
||||
const
|
||||
AttestationDataVectors = [
|
||||
# Attestation score with block monitoring enabled (perfect).
|
||||
|
@ -368,6 +375,71 @@ const
|
|||
("00000000", 0'u64), "375197.0000"),
|
||||
]
|
||||
|
||||
AggregatedDataVectors = [
|
||||
("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01", "<perfect>"),
|
||||
("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", "0.2500"),
|
||||
("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", "0.5000"),
|
||||
("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", "0.7500"),
|
||||
("0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01", "0.9995"),
|
||||
("0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000101", "0.0005"),
|
||||
]
|
||||
ContributionDataVectors = [
|
||||
("0xffffffffffffffffffffffffffff7f7f", "0.9844"),
|
||||
("0xffffffffffffffffffffffff7f7f7f7f", "0.9688"),
|
||||
("0xffffffffffffffffffff7f7f7f7f7f7f", "0.9531"),
|
||||
("0xffffffffffffffff7f7f7f7f7f7f7f7f", "0.9375"),
|
||||
("0xffffffffffff7f7f7f7f7f7f7f7f7f7f", "0.9219"),
|
||||
("0xffffffff7f7f7f7f7f7f7f7f7f7f7f7f", "0.9062"),
|
||||
("0xffff7f7f7f7f7f7f7f7f7f7f7f7f7f7f", "0.8906"),
|
||||
("0x7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f", "0.8750"),
|
||||
("0xffffffffffffffffffffffffffffffff", "<perfect>")
|
||||
]
|
||||
|
||||
SyncMessageDataVectors = [
|
||||
# Sync committee messages score with block monitoring enabled (perfect)
|
||||
(6002798'u64, "22242212", "22242212", 6002798'u64, Opt.some(false),
|
||||
"<perfect>"),
|
||||
(6002811'u64, "26ec78d6", "26ec78d6", 6002811'u64, Opt.some(false),
|
||||
"<perfect>"),
|
||||
(6002836'u64, "42354ded", "42354ded", 6002836'u64, Opt.some(false),
|
||||
"<perfect>"),
|
||||
(6002859'u64, "97d8ac69", "97d8ac69", 6002859'u64, Opt.some(false),
|
||||
"<perfect>"),
|
||||
# Sync committee messages score when beacon node is optimistically synced
|
||||
(6002798'u64, "22242212", "22242212", 6002798'u64, Opt.some(true),
|
||||
"<bad>"),
|
||||
(6002811'u64, "26ec78d6", "26ec78d6", 6002811'u64, Opt.some(true),
|
||||
"<bad>"),
|
||||
(6002836'u64, "42354ded", "42354ded", 6002836'u64, Opt.some(true),
|
||||
"<bad>"),
|
||||
(6002859'u64, "97d8ac69", "97d8ac69", 6002859'u64, Opt.some(true),
|
||||
"<bad>"),
|
||||
# Sync committee messages score with block monitoring enabled (not perfect)
|
||||
(6002797'u64, "22242212", "22242212", 6002798'u64, Opt.some(false),
|
||||
"1.5000"),
|
||||
(6002809'u64, "26ec78d6", "26ec78d6", 6002811'u64, Opt.some(false),
|
||||
"1.3333"),
|
||||
(6002826'u64, "42354ded", "42354ded", 6002836'u64, Opt.some(false),
|
||||
"1.0909"),
|
||||
(6002819'u64, "97d8ac69", "97d8ac69", 6002859'u64, Opt.some(false),
|
||||
"1.0244"),
|
||||
# Sync committee messages score with block monitoring disabled
|
||||
(6002797'u64, "00000000", "22242212", 6002798'u64, Opt.some(false),
|
||||
"0.1334"),
|
||||
(6002809'u64, "00000000", "26ec78d6", 6002811'u64, Opt.some(false),
|
||||
"0.1520"),
|
||||
(6002826'u64, "00000000", "42354ded", 6002836'u64, Opt.some(false),
|
||||
"0.2586"),
|
||||
(6002819'u64, "00000000", "97d8ac69", 6002859'u64, Opt.some(false),
|
||||
"0.5931"),
|
||||
]
|
||||
|
||||
AttestationBitsVectors = [
|
||||
([("0xff01", Slot(0), 0'u64), ("0xff01", Slot(0), 0'u64)], 8),
|
||||
([("0xff01", Slot(0), 0'u64), ("0xff01", Slot(1), 0'u64)], 16),
|
||||
([("0xff01", Slot(0), 0'u64), ("0xff01", Slot(0), 1'u64)], 16)
|
||||
]
|
||||
|
||||
proc init(t: typedesc[Eth2Digest], data: string): Eth2Digest =
|
||||
let length = len(data)
|
||||
var dst = Eth2Digest()
|
||||
|
@ -378,8 +450,8 @@ proc init(t: typedesc[Eth2Digest], data: string): Eth2Digest =
|
|||
discard
|
||||
dst
|
||||
|
||||
proc init*(t: typedesc[ProduceAttestationDataResponse],
|
||||
ad: AttestationDataTuple): ProduceAttestationDataResponse =
|
||||
proc init(t: typedesc[ProduceAttestationDataResponse],
|
||||
ad: AttestationDataTuple): ProduceAttestationDataResponse =
|
||||
ProduceAttestationDataResponse(data: AttestationData(
|
||||
slot: Slot(ad.slot), index: ad.index,
|
||||
beacon_block_root: Eth2Digest.init(ad.beacon_block_root),
|
||||
|
@ -387,6 +459,44 @@ proc init*(t: typedesc[ProduceAttestationDataResponse],
|
|||
target: Checkpoint(epoch: Epoch(ad.target))
|
||||
))
|
||||
|
||||
proc init(t: typedesc[Attestation], bits: string,
|
||||
slot: Slot = GENESIS_SLOT, index: uint64 = 0'u64): Attestation =
|
||||
let
|
||||
jdata = "{\"data\":\"" & bits & "\"}"
|
||||
bits =
|
||||
try:
|
||||
RestJson.decode(jdata, AttestationBitsObject)
|
||||
except SerializationError as exc:
|
||||
raiseAssert "Serialization error from [" & $exc.name & "]: " & $exc.msg
|
||||
Attestation(aggregation_bits: bits.data,
|
||||
data: AttestationData(slot: slot, index: index))
|
||||
|
||||
proc init(t: typedesc[GetAggregatedAttestationResponse],
|
||||
bits: string): GetAggregatedAttestationResponse =
|
||||
GetAggregatedAttestationResponse(data: Attestation.init(bits))
|
||||
|
||||
proc init(t: typedesc[ProduceSyncCommitteeContributionResponse],
|
||||
bits: string): ProduceSyncCommitteeContributionResponse =
|
||||
let
|
||||
jdata = "{\"data\":\"" & bits & "\"}"
|
||||
bits =
|
||||
try:
|
||||
RestJson.decode(jdata, SyncCommitteeBitsObject)
|
||||
except SerializationError as exc:
|
||||
raiseAssert "Serialization error from [" & $exc.name & "]: " & $exc.msg
|
||||
ProduceSyncCommitteeContributionResponse(data: SyncCommitteeContribution(
|
||||
aggregation_bits: bits.data
|
||||
))
|
||||
|
||||
proc init(t: typedesc[GetBlockRootResponse],
|
||||
optimistic: Opt[bool], root: Eth2Digest): GetBlockRootResponse =
|
||||
let optopt =
|
||||
if optimistic.isNone():
|
||||
none[bool]()
|
||||
else:
|
||||
some(optimistic.get())
|
||||
GetBlockRootResponse(data: RestRoot(root: root), execution_optimistic: optopt)
|
||||
|
||||
proc createRootsSeen(
|
||||
root: tuple[root: string, slot: uint64]): Table[Eth2Digest, Slot] =
|
||||
var res: Table[Eth2Digest, Slot]
|
||||
|
@ -623,6 +733,40 @@ suite "Validator Client test suite":
|
|||
score = shortScore(roots.getAttestationDataScore(adata))
|
||||
check score == vector[2]
|
||||
|
||||
test "getAggregatedAttestationDataScore() test vectors":
|
||||
for vector in AggregatedDataVectors:
|
||||
let
|
||||
adata = GetAggregatedAttestationResponse.init(vector[0])
|
||||
score = shortScore(getAggregatedAttestationDataScore(adata))
|
||||
check score == vector[1]
|
||||
|
||||
test "getSyncCommitteeContributionDataScore() test vectors":
|
||||
for vector in ContributionDataVectors:
|
||||
let
|
||||
adata = ProduceSyncCommitteeContributionResponse.init(vector[0])
|
||||
score = shortScore(getSyncCommitteeContributionDataScore(adata))
|
||||
check score == vector[1]
|
||||
|
||||
test "getSyncCommitteeMessageDataScore() test vectors":
|
||||
for vector in SyncMessageDataVectors:
|
||||
let
|
||||
roots = createRootsSeen((vector[1], vector[0]))
|
||||
rdata = GetBlockRootResponse.init(vector[4], Eth2Digest.init(vector[2]))
|
||||
currentSlot = Slot(vector[3])
|
||||
score = shortScore(getSyncCommitteeMessageDataScore(roots, currentSlot,
|
||||
rdata))
|
||||
check:
|
||||
score == vector[5]
|
||||
|
||||
test "getUniqueVotes() test vectors":
|
||||
var data = CommitteeValidatorsBits.init(16)
|
||||
|
||||
for vector in AttestationBitsVectors:
|
||||
let
|
||||
a1 = Attestation.init(vector[0][0][0], vector[0][0][1], vector[0][0][2])
|
||||
a2 = Attestation.init(vector[0][1][0], vector[0][1][1], vector[0][1][2])
|
||||
check getUniqueVotes([a1, a2]) == vector[1]
|
||||
|
||||
asyncTest "firstSuccessParallel() API timeout test":
|
||||
let
|
||||
uri = parseUri("http://127.0.0.1/")
|
||||
|
@ -708,7 +852,6 @@ suite "Validator Client test suite":
|
|||
response.isErr()
|
||||
gotCancellation == true
|
||||
|
||||
|
||||
test "getLiveness() response deserialization test":
|
||||
proc generateLivenessResponse(T: typedesc[string],
|
||||
start, count, modv: int): string =
|
||||
|
|
Loading…
Reference in New Issue