rm support for deprecated reqStep in req/resp (#6857)

* rm support for deprecated reqStep in req/resp

* work around / character in platform
This commit is contained in:
tersec 2025-01-18 05:11:06 +01:00 committed by GitHub
parent ce02b73f20
commit d791299262
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
8 changed files with 45 additions and 94 deletions

View File

@ -207,7 +207,12 @@ jobs:
- name: Upload combined results
uses: actions/upload-artifact@v4
with:
name: Unit Test Results ${{ matrix.target.os }}-${{ matrix.target.cpu }}
# upload-artifact requires avoiding "/", because "To maintain file
# system agnostic behavior, these characters are intentionally not
# allowed to prevent potential problems with downloads on different
# file systems". However, GitHub Actions workflows do not support a
# usual assortment of string functions.
name: Unit Test Results ${{ matrix.target.os }}-${{ matrix.target.cpu }}-${{ matrix.branch == 'upstream/version-2-0' && 'version-2-0' || matrix.branch }}
path: build/*.xml
devbuild:

View File

@ -1,5 +1,5 @@
# beacon_chain
# Copyright (c) 2018-2024 Status Research & Development GmbH
# Copyright (c) 2018-2025 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
@ -1639,11 +1639,11 @@ template forkAtEpoch*(dag: ChainDAGRef, epoch: Epoch): Fork =
forkAtEpoch(dag.cfg, epoch)
proc getBlockRange*(
dag: ChainDAGRef, startSlot: Slot, skipStep: uint64,
dag: ChainDAGRef, startSlot: Slot,
output: var openArray[BlockId]): Natural =
## This function populates an `output` buffer of blocks
## with a slots ranging from `startSlot` up to, but not including,
## `startSlot + skipStep * output.len`, skipping any slots that don't have
## `startSlot + output.len`, skipping any slots that don't have
## a block.
##
## Blocks will be written to `output` from the end without gaps, even if
@ -1657,7 +1657,7 @@ proc getBlockRange*(
headSlot = dag.head.slot
trace "getBlockRange entered",
head = shortLog(dag.head.root), requestedCount, startSlot, skipStep, headSlot
head = shortLog(dag.head.root), requestedCount, startSlot, headSlot
if startSlot < dag.backfill.slot:
debug "Got request for pre-backfill slot",
@ -1671,11 +1671,9 @@ proc getBlockRange*(
runway = uint64(headSlot - startSlot)
# This is the number of blocks that will follow the start block
extraSlots = min(runway div skipStep, requestedCount - 1)
extraSlots = min(runway, requestedCount - 1)
# If `skipStep` is very large, `extraSlots` should be 0 from
# the previous line, so `endSlot` will be equal to `startSlot`:
endSlot = startSlot + extraSlots * skipStep
endSlot = startSlot + extraSlots
var
curSlot = endSlot
@ -1687,7 +1685,7 @@ proc getBlockRange*(
if bs.isSome and bs.get().isProposed():
o -= 1
output[o] = bs.get().bid
curSlot -= skipStep
curSlot -= 1
# Handle start slot separately (to avoid underflow when computing curSlot)
let bs = dag.getBlockIdAtSlot(startSlot)

View File

@ -1595,7 +1595,7 @@ proc pruneBlobs(node: BeaconNode, slot: Slot) =
var blocks: array[SLOTS_PER_EPOCH.int, BlockId]
var count = 0
let startIndex = node.dag.getBlockRange(
blobPruneEpoch.start_slot, 1, blocks.toOpenArray(0, SLOTS_PER_EPOCH - 1))
blobPruneEpoch.start_slot, blocks.toOpenArray(0, SLOTS_PER_EPOCH - 1))
for i in startIndex..<SLOTS_PER_EPOCH:
let blck = node.dag.getForkedBlock(blocks[int(i)]).valueOr: continue
withBlck(blck):

View File

@ -9,7 +9,6 @@
import
chronicles, chronos, snappy, snappy/codec,
../spec/datatypes/[phase0, altair, bellatrix, capella, deneb],
../spec/[helpers, forks, network],
".."/[beacon_clock],
../networking/eth2_network,
@ -167,7 +166,7 @@ template getBlobSidecarsByRange(
count = int min(reqCount, blockIds.lenu64)
endIndex = count - 1
startIndex =
dag.getBlockRange(startSlot, 1, blockIds.toOpenArray(0, endIndex))
dag.getBlockRange(startSlot, blockIds.toOpenArray(0, endIndex))
var
found = 0
@ -222,12 +221,11 @@ p2pProtocol BeaconSync(version = 1,
# client call that returns `seq[ref ForkedSignedBeaconBlock]` will
# will be generated by the libp2p macro - we guarantee that seq items
# are `not-nil` in the implementation
# TODO reqStep is deprecated - future versions can remove support for
# values != 1: https://github.com/ethereum/consensus-specs/pull/2856
trace "got range request", peer, startSlot,
count = reqCount, step = reqStep
if reqCount == 0 or reqStep == 0:
trace "got range request", peer, startSlot, count = reqCount
# https://github.com/ethereum/consensus-specs/pull/2856
if reqStep != 1:
raise newException(InvalidInputsError, "Step size must be 1")
if reqCount == 0:
raise newException(InvalidInputsError, "Empty range requested")
var blocks: array[MAX_REQUEST_BLOCKS.int, BlockId]
@ -236,9 +234,8 @@ p2pProtocol BeaconSync(version = 1,
# Limit number of blocks in response
count = int min(reqCount, blocks.lenu64)
endIndex = count - 1
startIndex =
dag.getBlockRange(startSlot, reqStep,
blocks.toOpenArray(0, endIndex))
startIndex = dag.getBlockRange(
startSlot, blocks.toOpenArray(0, endIndex))
var
found = 0
@ -268,8 +265,7 @@ p2pProtocol BeaconSync(version = 1,
inc found
debug "Block range request done",
peer, startSlot, count, reqStep
debug "Block range request done", peer, startSlot, count
proc beaconBlocksByRoot_v2(
peer: Peer,
@ -459,7 +455,7 @@ p2pProtocol BeaconSync(version = 1,
count = int min(reqCount, blockIds.lenu64)
endIndex = count - 1
startIndex =
dag.getBlockRange(startSlot, 1, blockIds.toOpenArray(0, endIndex))
dag.getBlockRange(startSlot, blockIds.toOpenArray(0, endIndex))
var
found = 0

View File

@ -1,5 +1,5 @@
# beacon_chain
# Copyright (c) 2020-2024 Status Research & Development GmbH
# Copyright (c) 2020-2025 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
@ -8,7 +8,7 @@
{.push raises: [].}
import
std/[os, stats, tables],
std/tables,
snappy,
chronicles, confutils, stew/[byteutils, io2], eth/db/kvstore_sqlite3,
../beacon_chain/networking/network_metadata,
@ -21,6 +21,9 @@ import
../research/simutils,
./era, ./ncli_common, ./validator_db_aggregator
from std/os import createDir, dirExists, moveFile, `/`
from std/stats import RunningStat
when defined(posix):
import system/ansi_c
@ -641,7 +644,7 @@ proc cmdExportEra(conf: DbConf, cfg: RuntimeConfig) =
if firstSlot.isSome():
withTimer(timers[tBlocks]):
var blocks: array[SLOTS_PER_HISTORICAL_ROOT.int, BlockId]
for i in dag.getBlockRange(firstSlot.get(), 1, blocks)..<blocks.len:
for i in dag.getBlockRange(firstSlot.get(), blocks)..<blocks.len:
if not dag.getBlockSZ(blocks[i], tmp):
break writeFileBlock
group.update(e2, blocks[i].slot, tmp).get()
@ -901,11 +904,11 @@ proc createInsertValidatorProc(db: SqStoreRef): auto =
VALUES(?, ?);""",
(int64, array[48, byte]), void).expect("DB")
proc collectBalances(balances: var seq[uint64], forkedState: ForkedHashedBeaconState) =
func collectBalances(balances: var seq[uint64], forkedState: ForkedHashedBeaconState) =
withState(forkedState):
balances = seq[uint64](forkyState.data.balances.data)
proc calculateDelta(info: RewardsAndPenalties): int64 =
func calculateDelta(info: RewardsAndPenalties): int64 =
info.source_outcome +
info.target_outcome +
info.head_outcome +

View File

@ -1,16 +1,13 @@
#!/usr/bin/env bash
# Copyright (c) 2020-2024 Status Research & Development GmbH. Licensed under
# Copyright (c) 2020-2025 Status Research & Development GmbH. Licensed under
# either of:
# - Apache License, version 2.0
# - MIT license
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
# Mostly a duplication of "tests/simulation/{start.sh,run_node.sh}", but with a focus on
# replicating testnets as closely as possible, which means following the Docker execution labyrinth.
set -euo pipefail
set -Eeuo pipefail
SCRIPTS_DIR="$(dirname "${BASH_SOURCE[0]}")"
cd "$SCRIPTS_DIR/.."
@ -881,7 +878,7 @@ fi
jq -r '.hash' "$EXECUTION_GENESIS_BLOCK_JSON" > "${DATA_DIR}/deposit_contract_block_hash.txt"
for NUM_NODE in $(seq 1 $NUM_NODES); do
for NUM_NODE in $(seq 1 "${NUM_NODES}"); do
NODE_DATA_DIR="${DATA_DIR}/node${NUM_NODE}"
rm -rf "${NODE_DATA_DIR}"
scripts/makedir.sh "${NODE_DATA_DIR}" 2>&1
@ -922,7 +919,7 @@ DIRECTPEER_ENR=$(
cp "$SCRIPTS_DIR/$CONST_PRESET-non-overriden-config.yaml" "$RUNTIME_CONFIG_FILE"
# TODO the runtime config file should be used during deposit generation as well!
echo Wrote $RUNTIME_CONFIG_FILE:
echo Wrote "${RUNTIME_CONFIG_FILE}":
tee -a "$RUNTIME_CONFIG_FILE" <<EOF
PRESET_BASE: ${CONST_PRESET}
MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: ${TOTAL_VALIDATORS}
@ -1157,7 +1154,7 @@ for NUM_NODE in $(seq 1 "${NUM_NODES}"); do
fi
done
./build/${LH_BINARY} vc \
./build/"${LH_BINARY}" vc \
--debug-level "debug" \
--logfile-max-number 0 \
--log-format "JSON" \
@ -1171,7 +1168,7 @@ for NUM_NODE in $(seq 1 "${NUM_NODES}"); do
else
./build/nimbus_validator_client \
--log-level="${LOG_LEVEL}" \
${STOP_AT_EPOCH_FLAG} \
"${STOP_AT_EPOCH_FLAG}" \
--data-dir="${VALIDATOR_DATA_DIR}" \
--metrics \
--metrics-port=$(( BASE_VC_METRICS_PORT + NUM_NODE - 1 )) \
@ -1257,7 +1254,7 @@ if [ "$LC_NODES" -ge "1" ]; then
--trusted-block-root="${LC_TRUSTED_BLOCK_ROOT}" \
--jwt-secret="${JWT_FILE}" \
"${WEB3_ARG[@]}" \
${STOP_AT_EPOCH_FLAG} \
"${STOP_AT_EPOCH_FLAG}" \
&> "${DATA_DIR}/logs/nimbus_light_client.${NUM_LC}.jsonl" &
PID=$!
PIDS_TO_WAIT="${PIDS_TO_WAIT},${PID}"

View File

@ -1,24 +0,0 @@
if [ -z "${REPO_PATHS_SOURCED:-}" ]; then
REPO_PATHS_SOURCED=1
SCRIPTS_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd)
BUILD_DIR=$(cd "$SCRIPTS_DIR/../build" &> /dev/null && pwd)
data_dir_for_network() {
NETWORK_ID=$(cat "$NETWORK/genesis.json" | jq '.config.chainId')
echo "$BUILD_DIR/data/$NETWORK_ID"
}
create_data_dir_for_network() {
NETWORK_DIR=$(data_dir_for_network)
mkdir -p "$NETWORK_DIR"
echo "$NETWORK_DIR"
}
create_jwt_token() {
if [ ! -f "$1" ]; then
openssl rand -hex 32 | tr -d "\n" > "$1"
fi
}
fi

View File

@ -1,5 +1,5 @@
# beacon_chain
# Copyright (c) 2018-2024 Status Research & Development GmbH
# Copyright (c) 2018-2025 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
@ -147,41 +147,17 @@ suite "Block pool processing" & preset():
var blocks: array[3, BlockId]
check:
dag.getBlockRange(Slot(0), 1, blocks.toOpenArray(0, 0)) == 0
dag.getBlockRange(Slot(0), blocks.toOpenArray(0, 0)) == 0
blocks[0..<1] == [dag.tail]
dag.getBlockRange(Slot(0), 1, blocks.toOpenArray(0, 1)) == 0
dag.getBlockRange(Slot(0), blocks.toOpenArray(0, 1)) == 0
blocks[0..<2] == [dag.tail, b1Add[].bid]
dag.getBlockRange(Slot(0), 2, blocks.toOpenArray(0, 1)) == 0
blocks[0..<2] == [dag.tail, b2Add[].bid]
dag.getBlockRange(Slot(0), 3, blocks.toOpenArray(0, 1)) == 1
blocks[1..<2] == [dag.tail] # block 3 is missing!
dag.getBlockRange(Slot(2), 2, blocks.toOpenArray(0, 1)) == 0
blocks[0..<2] == [b2Add[].bid, b4Add[].bid] # block 3 is missing!
# large skip step
dag.getBlockRange(Slot(0), uint64.high, blocks.toOpenArray(0, 2)) == 2
blocks[2..2] == [dag.tail]
# large skip step
dag.getBlockRange(Slot(2), uint64.high, blocks.toOpenArray(0, 1)) == 1
blocks[1..1] == [b2Add[].bid]
# empty length
dag.getBlockRange(Slot(2), 2, blocks.toOpenArray(0, -1)) == 0
# No blocks in sight
dag.getBlockRange(Slot(5), blocks.toOpenArray(0, 1)) == 2
# No blocks in sight
dag.getBlockRange(Slot(5), 1, blocks.toOpenArray(0, 1)) == 2
# No blocks in sight
dag.getBlockRange(Slot(uint64.high), 1, blocks.toOpenArray(0, 1)) == 2
# No blocks in sight either due to gaps
dag.getBlockRange(Slot(3), 2, blocks.toOpenArray(0, 1)) == 2
blocks[2..<2].len == 0
dag.getBlockRange(Slot(uint64.high), blocks.toOpenArray(0, 1)) == 2
# A fork forces the clearance state to a point where it cannot be advanced
let