mirror of
https://github.com/status-im/nimbus-eth2.git
synced 2025-01-11 14:54:12 +00:00
rm unused code in {ncli,research,tests}/ (#5809)
This commit is contained in:
parent
d669eef97b
commit
7fd8beb418
@ -64,48 +64,6 @@ func init*(T: type ValidatorDbAggregator, outputDir: string,
|
|||||||
participationEpochsCount: newSeqOfCap[uint](initialCapacity),
|
participationEpochsCount: newSeqOfCap[uint](initialCapacity),
|
||||||
inclusionDelaysCount: newSeqOfCap[uint](initialCapacity))
|
inclusionDelaysCount: newSeqOfCap[uint](initialCapacity))
|
||||||
|
|
||||||
var shouldShutDown = false
|
|
||||||
|
|
||||||
proc determineStartAndEndEpochs(config: AggregatorConf):
|
|
||||||
tuple[startEpoch, endEpoch: Epoch] =
|
|
||||||
if config.startEpoch.isNone or config.endEpoch.isNone:
|
|
||||||
(result.startEpoch, result.endEpoch) = getUnaggregatedFilesEpochRange(
|
|
||||||
config.inputDir.string)
|
|
||||||
if config.startEpoch.isSome:
|
|
||||||
result.startEpoch = config.startEpoch.get.Epoch
|
|
||||||
if config.endEpoch.isSome:
|
|
||||||
result.endEpoch = config.endEpoch.get.Epoch
|
|
||||||
if result.startEpoch > result.endEpoch:
|
|
||||||
fatal "Start epoch cannot be bigger than the end epoch.",
|
|
||||||
startEpoch = result.startEpoch, endEpoch = result.endEpoch
|
|
||||||
quit QuitFailure
|
|
||||||
|
|
||||||
proc checkIntegrity(startEpoch, endEpoch: Epoch, dir: string) =
|
|
||||||
for epoch in startEpoch .. endEpoch:
|
|
||||||
let filePath = getFilePathForEpoch(epoch, dir)
|
|
||||||
if not filePath.fileExists:
|
|
||||||
fatal "File for epoch does not exist.", epoch = epoch, filePath = filePath
|
|
||||||
quit QuitFailure
|
|
||||||
|
|
||||||
func parseRow(csvRow: CsvRow): RewardsAndPenalties =
|
|
||||||
result = RewardsAndPenalties(
|
|
||||||
source_outcome: parseBiggestInt(csvRow[0]),
|
|
||||||
max_source_reward: parseBiggestUInt(csvRow[1]),
|
|
||||||
target_outcome: parseBiggestInt(csvRow[2]),
|
|
||||||
max_target_reward: parseBiggestUInt(csvRow[3]),
|
|
||||||
head_outcome: parseBiggestInt(csvRow[4]),
|
|
||||||
max_head_reward: parseBiggestUInt(csvRow[5]),
|
|
||||||
inclusion_delay_outcome: parseBiggestInt(csvRow[6]),
|
|
||||||
max_inclusion_delay_reward: parseBiggestUInt(csvRow[7]),
|
|
||||||
sync_committee_outcome: parseBiggestInt(csvRow[8]),
|
|
||||||
max_sync_committee_reward: parseBiggestUInt(csvRow[9]),
|
|
||||||
proposer_outcome: parseBiggestInt(csvRow[10]),
|
|
||||||
inactivity_penalty: parseBiggestUInt(csvRow[11]),
|
|
||||||
slashing_outcome: parseBiggestInt(csvRow[12]),
|
|
||||||
deposits: parseBiggestUInt(csvRow[13]))
|
|
||||||
if csvRow[14].len > 0:
|
|
||||||
result.inclusion_delay = some(parseBiggestUInt(csvRow[14]))
|
|
||||||
|
|
||||||
func `+=`(lhs: var RewardsAndPenalties, rhs: RewardsAndPenalties) =
|
func `+=`(lhs: var RewardsAndPenalties, rhs: RewardsAndPenalties) =
|
||||||
lhs.source_outcome += rhs.source_outcome
|
lhs.source_outcome += rhs.source_outcome
|
||||||
lhs.max_source_reward += rhs.max_source_reward
|
lhs.max_source_reward += rhs.max_source_reward
|
||||||
@ -205,6 +163,48 @@ when isMainModule:
|
|||||||
when defined(posix):
|
when defined(posix):
|
||||||
import system/ansi_c
|
import system/ansi_c
|
||||||
|
|
||||||
|
var shouldShutDown = false
|
||||||
|
|
||||||
|
proc determineStartAndEndEpochs(config: AggregatorConf):
|
||||||
|
tuple[startEpoch, endEpoch: Epoch] =
|
||||||
|
if config.startEpoch.isNone or config.endEpoch.isNone:
|
||||||
|
(result.startEpoch, result.endEpoch) = getUnaggregatedFilesEpochRange(
|
||||||
|
config.inputDir.string)
|
||||||
|
if config.startEpoch.isSome:
|
||||||
|
result.startEpoch = config.startEpoch.get.Epoch
|
||||||
|
if config.endEpoch.isSome:
|
||||||
|
result.endEpoch = config.endEpoch.get.Epoch
|
||||||
|
if result.startEpoch > result.endEpoch:
|
||||||
|
fatal "Start epoch cannot be bigger than the end epoch.",
|
||||||
|
startEpoch = result.startEpoch, endEpoch = result.endEpoch
|
||||||
|
quit QuitFailure
|
||||||
|
|
||||||
|
proc checkIntegrity(startEpoch, endEpoch: Epoch, dir: string) =
|
||||||
|
for epoch in startEpoch .. endEpoch:
|
||||||
|
let filePath = getFilePathForEpoch(epoch, dir)
|
||||||
|
if not filePath.fileExists:
|
||||||
|
fatal "File for epoch does not exist.", epoch = epoch, filePath = filePath
|
||||||
|
quit QuitFailure
|
||||||
|
|
||||||
|
func parseRow(csvRow: CsvRow): RewardsAndPenalties =
|
||||||
|
result = RewardsAndPenalties(
|
||||||
|
source_outcome: parseBiggestInt(csvRow[0]),
|
||||||
|
max_source_reward: parseBiggestUInt(csvRow[1]),
|
||||||
|
target_outcome: parseBiggestInt(csvRow[2]),
|
||||||
|
max_target_reward: parseBiggestUInt(csvRow[3]),
|
||||||
|
head_outcome: parseBiggestInt(csvRow[4]),
|
||||||
|
max_head_reward: parseBiggestUInt(csvRow[5]),
|
||||||
|
inclusion_delay_outcome: parseBiggestInt(csvRow[6]),
|
||||||
|
max_inclusion_delay_reward: parseBiggestUInt(csvRow[7]),
|
||||||
|
sync_committee_outcome: parseBiggestInt(csvRow[8]),
|
||||||
|
max_sync_committee_reward: parseBiggestUInt(csvRow[9]),
|
||||||
|
proposer_outcome: parseBiggestInt(csvRow[10]),
|
||||||
|
inactivity_penalty: parseBiggestUInt(csvRow[11]),
|
||||||
|
slashing_outcome: parseBiggestInt(csvRow[12]),
|
||||||
|
deposits: parseBiggestUInt(csvRow[13]))
|
||||||
|
if csvRow[14].len > 0:
|
||||||
|
result.inclusion_delay = some(parseBiggestUInt(csvRow[14]))
|
||||||
|
|
||||||
proc aggregateEpochs(startEpoch, endEpoch: Epoch, resolution: uint,
|
proc aggregateEpochs(startEpoch, endEpoch: Epoch, resolution: uint,
|
||||||
inputDir, outputDir: string) =
|
inputDir, outputDir: string) =
|
||||||
if startEpoch > endEpoch:
|
if startEpoch > endEpoch:
|
||||||
|
@ -49,9 +49,6 @@ type Timers = enum
|
|||||||
tSyncCommittees = "Produce sync committee actions"
|
tSyncCommittees = "Produce sync committee actions"
|
||||||
tReplay = "Replay all produced blocks"
|
tReplay = "Replay all produced blocks"
|
||||||
|
|
||||||
template seconds(x: uint64): timer.Duration =
|
|
||||||
timer.seconds(int(x))
|
|
||||||
|
|
||||||
# TODO The rest of nimbus-eth2 uses only the forked version of these, and in
|
# TODO The rest of nimbus-eth2 uses only the forked version of these, and in
|
||||||
# general it's better for the validator_duties caller to use the forkedstate
|
# general it's better for the validator_duties caller to use the forkedstate
|
||||||
# version, so isolate these here pending refactoring of block_sim to prefer,
|
# version, so isolate these here pending refactoring of block_sim to prefer,
|
||||||
|
@ -132,11 +132,6 @@ proc block_for_next_slot(
|
|||||||
addTestBlock(
|
addTestBlock(
|
||||||
forked, cache, attestations = attestations, cfg = cfg)
|
forked, cache, attestations = attestations, cfg = cfg)
|
||||||
|
|
||||||
let full_sync_committee_bits = block:
|
|
||||||
var res: BitArray[SYNC_COMMITTEE_SIZE]
|
|
||||||
res.bytes.fill(byte.high)
|
|
||||||
res
|
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/sync-protocol.md#initialize_light_client_store
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/sync-protocol.md#initialize_light_client_store
|
||||||
func initialize_light_client_store(
|
func initialize_light_client_store(
|
||||||
state: auto, storeDataFork: static LightClientDataFork): auto =
|
state: auto, storeDataFork: static LightClientDataFork): auto =
|
||||||
|
@ -125,7 +125,7 @@ proc runTest(suiteName, path: string) =
|
|||||||
test "Light client - Sync - " & path.relativePath(SszTestsDir):
|
test "Light client - Sync - " & path.relativePath(SszTestsDir):
|
||||||
# Reduce stack size by making this a `proc`
|
# Reduce stack size by making this a `proc`
|
||||||
proc loadTestMeta(): (RuntimeConfig, TestMeta) =
|
proc loadTestMeta(): (RuntimeConfig, TestMeta) =
|
||||||
let (cfg, unknowns) = readRuntimeConfig(path/"config.yaml")
|
let (cfg, _) = readRuntimeConfig(path/"config.yaml")
|
||||||
|
|
||||||
when false:
|
when false:
|
||||||
# TODO evaluate whether this is useful and if so, fix it
|
# TODO evaluate whether this is useful and if so, fix it
|
||||||
@ -265,5 +265,4 @@ suite "EF - Light client - Sync" & preset():
|
|||||||
if kind != pcDir or not dirExists(basePath):
|
if kind != pcDir or not dirExists(basePath):
|
||||||
continue
|
continue
|
||||||
for kind, path in walkDir(basePath, relative = true, checkDir = true):
|
for kind, path in walkDir(basePath, relative = true, checkDir = true):
|
||||||
let combinedPath = basePath/path
|
|
||||||
runTest(suiteName, basePath/path)
|
runTest(suiteName, basePath/path)
|
||||||
|
@ -60,8 +60,6 @@ suite "BlockSlot and helpers":
|
|||||||
s4 = BlockRef(bid: BlockId(slot: Slot(4)), parent: s2)
|
s4 = BlockRef(bid: BlockId(slot: Slot(4)), parent: s2)
|
||||||
se1 = BlockRef(bid:
|
se1 = BlockRef(bid:
|
||||||
BlockId(slot: Epoch(1).start_slot()), parent: s2)
|
BlockId(slot: Epoch(1).start_slot()), parent: s2)
|
||||||
se2 = BlockRef(bid:
|
|
||||||
BlockId(slot: Epoch(2).start_slot()), parent: se1)
|
|
||||||
|
|
||||||
check:
|
check:
|
||||||
s0.atSlot(Slot(0)).blck == s0
|
s0.atSlot(Slot(0)).blck == s0
|
||||||
|
@ -40,13 +40,11 @@ suite "Block processor" & preset():
|
|||||||
validatorMonitor = newClone(ValidatorMonitor.init())
|
validatorMonitor = newClone(ValidatorMonitor.init())
|
||||||
dag = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor, {})
|
dag = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor, {})
|
||||||
taskpool = Taskpool.new()
|
taskpool = Taskpool.new()
|
||||||
verifier = BatchVerifier.init(rng, taskpool)
|
|
||||||
quarantine = newClone(Quarantine.init())
|
quarantine = newClone(Quarantine.init())
|
||||||
blobQuarantine = newClone(BlobQuarantine())
|
blobQuarantine = newClone(BlobQuarantine())
|
||||||
attestationPool = newClone(AttestationPool.init(dag, quarantine))
|
attestationPool = newClone(AttestationPool.init(dag, quarantine))
|
||||||
elManager = new ELManager # TODO: initialise this properly
|
elManager = new ELManager # TODO: initialise this properly
|
||||||
actionTracker: ActionTracker
|
actionTracker: ActionTracker
|
||||||
keymanagerHost: ref KeymanagerHost
|
|
||||||
consensusManager = ConsensusManager.new(
|
consensusManager = ConsensusManager.new(
|
||||||
dag, attestationPool, quarantine, elManager, actionTracker,
|
dag, attestationPool, quarantine, elManager, actionTracker,
|
||||||
newClone(DynamicFeeRecipientsStore.init()), "",
|
newClone(DynamicFeeRecipientsStore.init()), "",
|
||||||
|
@ -213,9 +213,8 @@ suite "Block pool processing" & preset():
|
|||||||
|
|
||||||
assign(state[], dag.epochRefState)
|
assign(state[], dag.epochRefState)
|
||||||
|
|
||||||
let
|
let bnext = addTestBlock(state[], cache).phase0Data
|
||||||
bnext = addTestBlock(state[], cache).phase0Data
|
discard dag.addHeadBlock(verifier, bnext, nilPhase0Callback)
|
||||||
bnextAdd = dag.addHeadBlock(verifier, bnext, nilPhase0Callback)
|
|
||||||
|
|
||||||
check:
|
check:
|
||||||
# Getting an EpochRef should not result in states being stored
|
# Getting an EpochRef should not result in states being stored
|
||||||
@ -952,9 +951,7 @@ suite "Backfill":
|
|||||||
dag2.backfill == blocks[^2].phase0Data.message.toBeaconBlockSummary()
|
dag2.backfill == blocks[^2].phase0Data.message.toBeaconBlockSummary()
|
||||||
|
|
||||||
test "Init without genesis / block":
|
test "Init without genesis / block":
|
||||||
let
|
let genBlock = get_initial_beacon_block(genState[])
|
||||||
tailBlock = blocks[^1]
|
|
||||||
genBlock = get_initial_beacon_block(genState[])
|
|
||||||
|
|
||||||
ChainDAGRef.preInit(db, tailState[])
|
ChainDAGRef.preInit(db, tailState[])
|
||||||
|
|
||||||
|
@ -22,7 +22,6 @@ const ROOT = "342cecb5a18945fbbda7c62ede3016f3"
|
|||||||
|
|
||||||
template databaseRoot: string = getTempDir().joinPath(ROOT)
|
template databaseRoot: string = getTempDir().joinPath(ROOT)
|
||||||
template key1: array[1, byte] = [byte(kOldDepositContractSnapshot)]
|
template key1: array[1, byte] = [byte(kOldDepositContractSnapshot)]
|
||||||
template key2: array[1, byte] = [byte(kDepositTreeSnapshot)]
|
|
||||||
|
|
||||||
type
|
type
|
||||||
DepositSnapshotUpgradeProc = proc(old: OldDepositContractSnapshot): DepositTreeSnapshot
|
DepositSnapshotUpgradeProc = proc(old: OldDepositContractSnapshot): DepositTreeSnapshot
|
||||||
@ -157,7 +156,6 @@ suite "DepositTreeSnapshot":
|
|||||||
inspectDCS(snapshot, 11052984)
|
inspectDCS(snapshot, 11052984)
|
||||||
|
|
||||||
test "depositCount":
|
test "depositCount":
|
||||||
let now = getTime()
|
|
||||||
var rand = initRand(12345678)
|
var rand = initRand(12345678)
|
||||||
for i in 1..1000:
|
for i in 1..1000:
|
||||||
let n = rand.next()
|
let n = rand.next()
|
||||||
|
@ -328,7 +328,7 @@ suite "Gossip validation - Altair":
|
|||||||
let
|
let
|
||||||
(subcommitteeIdx, indexInSubcommittee) =
|
(subcommitteeIdx, indexInSubcommittee) =
|
||||||
dag.getFirstAggregator(signatureSlot)
|
dag.getFirstAggregator(signatureSlot)
|
||||||
(validator, expectedCount, msg) = dag.getSyncCommitteeMessage(
|
(validator, _, msg) = dag.getSyncCommitteeMessage(
|
||||||
slot, subcommitteeIdx, indexInSubcommittee,
|
slot, subcommitteeIdx, indexInSubcommittee,
|
||||||
signatureSlot = Opt.some(signatureSlot))
|
signatureSlot = Opt.some(signatureSlot))
|
||||||
msgVerdict = waitFor dag.validateSyncCommitteeMessage(
|
msgVerdict = waitFor dag.validateSyncCommitteeMessage(
|
||||||
|
@ -46,7 +46,7 @@ suite "Spec helpers":
|
|||||||
|
|
||||||
proc process(anchor: object, index: GeneralizedIndex) =
|
proc process(anchor: object, index: GeneralizedIndex) =
|
||||||
var i = index
|
var i = index
|
||||||
anchor.enumInstanceSerializedFields(fieldNameVar, fieldVar):
|
anchor.enumInstanceSerializedFields(_, fieldVar):
|
||||||
let depth = log2trunc(i)
|
let depth = log2trunc(i)
|
||||||
var proof = newSeq[Eth2Digest](depth)
|
var proof = newSeq[Eth2Digest](depth)
|
||||||
state.build_proof(i, proof).get
|
state.build_proof(i, proof).get
|
||||||
|
@ -336,7 +336,6 @@ const
|
|||||||
secretBytes = hexToSeqByte "000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f"
|
secretBytes = hexToSeqByte "000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f"
|
||||||
salt = hexToSeqByte "d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3"
|
salt = hexToSeqByte "d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3"
|
||||||
iv = hexToSeqByte "264daa3f303d7259501c93d997d84fe6"
|
iv = hexToSeqByte "264daa3f303d7259501c93d997d84fe6"
|
||||||
secretNetBytes = hexToSeqByte "08021220fe442379443d6e2d7d75d3a58f96fbb35f0a9c7217796825fc9040e3b89c5736"
|
|
||||||
|
|
||||||
proc listLocalValidators(validatorsDir,
|
proc listLocalValidators(validatorsDir,
|
||||||
secretsDir: string): seq[ValidatorPubKey] {.
|
secretsDir: string): seq[ValidatorPubKey] {.
|
||||||
@ -746,7 +745,7 @@ proc runTests(keymanager: KeymanagerToTest) {.async.} =
|
|||||||
responseJson["message"].getStr() == InvalidAuthorizationError
|
responseJson["message"].getStr() == InvalidAuthorizationError
|
||||||
|
|
||||||
expect RestError:
|
expect RestError:
|
||||||
let keystores = await client.listKeys("Invalid Token")
|
discard await client.listKeys("Invalid Token")
|
||||||
|
|
||||||
suite "ImportKeystores requests" & testFlavour:
|
suite "ImportKeystores requests" & testFlavour:
|
||||||
asyncTest "ImportKeystores/ListKeystores/DeleteKeystores" & testFlavour:
|
asyncTest "ImportKeystores/ListKeystores/DeleteKeystores" & testFlavour:
|
||||||
@ -936,7 +935,7 @@ proc runTests(keymanager: KeymanagerToTest) {.async.} =
|
|||||||
responseJson["message"].getStr() == InvalidAuthorizationError
|
responseJson["message"].getStr() == InvalidAuthorizationError
|
||||||
|
|
||||||
expect RestError:
|
expect RestError:
|
||||||
let keystores = await client.listKeys("Invalid Token")
|
discard await client.listKeys("Invalid Token")
|
||||||
|
|
||||||
suite "Fee recipient management" & testFlavour:
|
suite "Fee recipient management" & testFlavour:
|
||||||
asyncTest "Missing Authorization header" & testFlavour:
|
asyncTest "Missing Authorization header" & testFlavour:
|
||||||
|
@ -60,39 +60,6 @@ const
|
|||||||
"version": 4
|
"version": 4
|
||||||
}"""
|
}"""
|
||||||
|
|
||||||
scryptVector2 = """
|
|
||||||
{
|
|
||||||
"crypto": {
|
|
||||||
"kdf": {
|
|
||||||
"function": "scrypt",
|
|
||||||
"params": {
|
|
||||||
"dklen": 32,
|
|
||||||
"n": 262144,
|
|
||||||
"p": 1,
|
|
||||||
"r": 8,
|
|
||||||
"salt": "d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3"
|
|
||||||
},
|
|
||||||
"message": ""
|
|
||||||
},
|
|
||||||
"checksum": {
|
|
||||||
"function": "sha256",
|
|
||||||
"params": {},
|
|
||||||
"message": "d2217fe5f3e9a1e34581ef8a78f7c9928e436d36dacc5e846690a5581e8ea484"
|
|
||||||
},
|
|
||||||
"cipher": {
|
|
||||||
"function": "aes-128-ctr",
|
|
||||||
"params": {
|
|
||||||
"iv": "264daa3f303d7259501c93d997d84fe6"
|
|
||||||
},
|
|
||||||
"message": "06ae90d55fe0a6e9c5c3bc5b170827b2e5cce3929ed3f116c2811e6366dfe20f"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"pubkey": "9612d7a727c9d0a22e185a1c768478dfe919cada9266988cb32359c11f2b7b27f4ae4040902382ae2910c15e2b420d07",
|
|
||||||
"path": "m/12381/60/3141592653/589793238",
|
|
||||||
"uuid": "1d85ae20-35c5-4611-98e8-aa14a633906f",
|
|
||||||
"version": 4
|
|
||||||
}"""
|
|
||||||
|
|
||||||
pbkdf2Vector = """
|
pbkdf2Vector = """
|
||||||
{
|
{
|
||||||
"crypto": {
|
"crypto": {
|
||||||
|
@ -561,8 +561,9 @@ suite "createValidatorFiles()":
|
|||||||
# Creating `secrets` dir with `UserRead` permissions before
|
# Creating `secrets` dir with `UserRead` permissions before
|
||||||
# calling `createValidatorFiles` which should result in problem
|
# calling `createValidatorFiles` which should result in problem
|
||||||
# with creating a secret file inside the dir:
|
# with creating a secret file inside the dir:
|
||||||
|
discard createPath(testSecretsDir, 0o400)
|
||||||
|
|
||||||
let
|
let
|
||||||
secretsDirNoPermissions = createPath(testSecretsDir, 0o400)
|
|
||||||
res = createLocalValidatorFiles(testSecretsDir, testValidatorsDir,
|
res = createLocalValidatorFiles(testSecretsDir, testValidatorsDir,
|
||||||
keystoreDir,
|
keystoreDir,
|
||||||
secretFile, password,
|
secretFile, password,
|
||||||
@ -582,8 +583,9 @@ suite "createValidatorFiles()":
|
|||||||
# Creating `validators` dir with `UserRead` permissions before
|
# Creating `validators` dir with `UserRead` permissions before
|
||||||
# calling `createValidatorFiles` which should result in problems
|
# calling `createValidatorFiles` which should result in problems
|
||||||
# creating `keystoreDir` inside the dir.
|
# creating `keystoreDir` inside the dir.
|
||||||
|
discard createPath(testValidatorsDir, 0o400)
|
||||||
|
|
||||||
let
|
let
|
||||||
validatorsDirNoPermissions = createPath(testValidatorsDir, 0o400)
|
|
||||||
res = createLocalValidatorFiles(testSecretsDir, testValidatorsDir,
|
res = createLocalValidatorFiles(testSecretsDir, testValidatorsDir,
|
||||||
keystoreDir,
|
keystoreDir,
|
||||||
secretFile, password,
|
secretFile, password,
|
||||||
@ -603,9 +605,10 @@ suite "createValidatorFiles()":
|
|||||||
# Creating `keystore` dir with `UserRead` permissions before
|
# Creating `keystore` dir with `UserRead` permissions before
|
||||||
# calling `createValidatorFiles` which should result in problems
|
# calling `createValidatorFiles` which should result in problems
|
||||||
# creating keystore file inside this dir:
|
# creating keystore file inside this dir:
|
||||||
|
discard createPath(testValidatorsDir, 0o700)
|
||||||
|
discard createPath(keystoreDir, 0o400)
|
||||||
|
|
||||||
let
|
let
|
||||||
validatorsDir = createPath(testValidatorsDir, 0o700)
|
|
||||||
keystoreDirNoPermissions = createPath(keystoreDir, 0o400)
|
|
||||||
res = createLocalValidatorFiles(testSecretsDir, testValidatorsDir,
|
res = createLocalValidatorFiles(testSecretsDir, testValidatorsDir,
|
||||||
keystoreDir,
|
keystoreDir,
|
||||||
secretFile, password,
|
secretFile, password,
|
||||||
@ -622,22 +625,23 @@ suite "createValidatorFiles()":
|
|||||||
test "`createValidatorFiles` with already existing dirs and any error":
|
test "`createValidatorFiles` with already existing dirs and any error":
|
||||||
# Generate deposits so we have files and dirs already existing
|
# Generate deposits so we have files and dirs already existing
|
||||||
# before testing `createValidatorFiles` failure
|
# before testing `createValidatorFiles` failure
|
||||||
let
|
discard generateDeposits(
|
||||||
deposits = generateDeposits(
|
cfg,
|
||||||
cfg,
|
rng[],
|
||||||
rng[],
|
seed,
|
||||||
seed,
|
0, simulationDepositsCount,
|
||||||
0, simulationDepositsCount,
|
testValidatorsDir,
|
||||||
testValidatorsDir,
|
testSecretsDir)
|
||||||
testSecretsDir)
|
|
||||||
|
|
||||||
|
let
|
||||||
validatorsCountBefore = directoryItemsCount(testValidatorsDir)
|
validatorsCountBefore = directoryItemsCount(testValidatorsDir)
|
||||||
secretsCountBefore = directoryItemsCount(testSecretsDir)
|
secretsCountBefore = directoryItemsCount(testSecretsDir)
|
||||||
|
|
||||||
# Creating `keystore` dir with `UserRead` permissions before calling
|
# Creating `keystore` dir with `UserRead` permissions before calling
|
||||||
# `createValidatorFiles` which will result in error
|
# `createValidatorFiles` which will result in error
|
||||||
keystoreDirNoPermissions = createPath(keystoreDir, 0o400)
|
discard createPath(keystoreDir, 0o400)
|
||||||
|
|
||||||
|
let
|
||||||
res = createLocalValidatorFiles(testSecretsDir, testValidatorsDir,
|
res = createLocalValidatorFiles(testSecretsDir, testValidatorsDir,
|
||||||
keystoreDir,
|
keystoreDir,
|
||||||
secretFile, password,
|
secretFile, password,
|
||||||
|
@ -176,7 +176,6 @@ suite "Message signatures":
|
|||||||
test "Sync committee message signatures":
|
test "Sync committee message signatures":
|
||||||
let
|
let
|
||||||
slot = default(Slot)
|
slot = default(Slot)
|
||||||
epoch = slot.epoch
|
|
||||||
block_root = default(Eth2Digest)
|
block_root = default(Eth2Digest)
|
||||||
|
|
||||||
check:
|
check:
|
||||||
|
@ -765,8 +765,6 @@ suite "Validator Client test suite":
|
|||||||
score == vector[5]
|
score == vector[5]
|
||||||
|
|
||||||
test "getUniqueVotes() test vectors":
|
test "getUniqueVotes() test vectors":
|
||||||
var data = CommitteeValidatorsBits.init(16)
|
|
||||||
|
|
||||||
for vector in AttestationBitsVectors:
|
for vector in AttestationBitsVectors:
|
||||||
let
|
let
|
||||||
a1 = Attestation.init(vector[0][0][0], vector[0][0][1], vector[0][0][2])
|
a1 = Attestation.init(vector[0][0][0], vector[0][0][1], vector[0][0][2])
|
||||||
|
Loading…
x
Reference in New Issue
Block a user