Merge branch 'unstable' into dev/etan/rd-shufflingacc

This commit is contained in:
Zahary Karadjov 2023-05-19 14:46:23 +03:00
commit a6b3e02ce0
No known key found for this signature in database
GPG Key ID: C1F42EAFF38D570F
15 changed files with 342 additions and 96 deletions

View File

@ -37,7 +37,7 @@ jobs:
include:
- target:
os: linux
builder: ubuntu-20.04
builder: ubuntu-22.04
shell: bash
- target:
os: macos

View File

@ -576,6 +576,11 @@ OK: 24/24 Fail: 0/24 Skip: 0/24
+ BeaconBlockType OK
```
OK: 1/1 Fail: 0/1 Skip: 0/1
## Validator Client test suite
```diff
+ normalizeUri() test vectors OK
```
OK: 1/1 Fail: 0/1 Skip: 0/1
## Validator change pool testing suite
```diff
+ addValidatorChangeMessage/getAttesterSlashingMessage OK
@ -686,4 +691,4 @@ OK: 2/2 Fail: 0/2 Skip: 0/2
OK: 9/9 Fail: 0/9 Skip: 0/9
---TOTAL---
OK: 391/396 Fail: 0/396 Skip: 5/396
OK: 392/397 Fail: 0/397 Skip: 5/397

View File

@ -1,3 +1,55 @@
2023-05-18 v23.5.1
==================
Nimbus `v23.5.1` is a `medium-urgency` point release improving the compatibility of Nimbus with 3rd party validator clients and beacon nodes and introducing the support for incremental pruning. If you are still not using the `--history:prune` option, we recommend testing it in a non-production environment, as it will be enabled by default in our next release.
### Breaking changes
* The Nimbus validator client no longer accepts under-specified beacon node URLs that doesn't include a port number or a protocol scheme. When a protocol scheme is specified, Nimbus now uses the default port for the selected protocol (80 for HTTP and 443 for HTTPS):
https://github.com/status-im/nimbus-eth2/pull/4921
### Improvements
* The history pruning is now incremental and no longer results in start-up delays when the `--history:prune` option is enabled on an existing node:
https://github.com/status-im/nimbus-eth2/pull/4887
* Nimbus now uses the withdrawal address of the validator as a default choice for the fee recipient address if the user has not provided any value in the configuration:
https://github.com/status-im/nimbus-eth2/pull/4968
* Nimbus now supports the upcoming Capella hard-fork in the Gnosis network:
https://github.com/status-im/nimbus-eth2/pull/4936
### Fixes
* The Capella-related properties `MAX_BLS_TO_EXECUTION_CHANGES`, `MAX_WITHDRAWALS_PER_PAYLOAD`, `MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP` and `DOMAIN_BLS_TO_EXECUTION_CHANGE` were missing from the `/eth/v1/config/spec` REST API end-point:
https://github.com/status-im/nimbus-eth2/pull/4925
* The `/eth/v1/validator/blinded_blocks/{slot}` was supplying incorrectly encoded response when requested to return SSZ data:
https://github.com/status-im/nimbus-eth2/pull/4943
* The safety checks associated with the `--weak-subjectivity-checkpoint` parameter are now compliant with the latest Ethereum specs:
https://github.com/status-im/nimbus-eth2/pull/4923
* The Nimbus validator client was using HTTP pipelining which is not supported by all beacon node implementations:
https://github.com/status-im/nimbus-eth2/pull/4950
* The "Connection to EL node degraded" warning is now printed only after sufficiently persistent connectivity issues with the EL client:
https://github.com/status-im/nimbus-eth2/pull/4960
* After being only briefly disconnected from the execution layer client, the Nimbus beacon node was prematurely setting the `execution_optimistic` flag when returning validator duties:
https://github.com/status-im/nimbus-eth2/pull/4955
* Nimbus now allows the builder to respond 500ms later than the spec-mandated timeout in order to account for possible additional delays introduced by proxies such as mev-boost:
https://github.com/status-im/nimbus-eth2/pull/4964
* During sync committee period transitions, for a brief period of time there was a low risk of producing an invalid sync committee contribution:
https://github.com/status-im/nimbus-eth2/pull/4953
* Nimbus `v23.5.0` introduced an unintended backwards-incompatible change in the parsing of remote keystores which is addressed in this release:
https://github.com/status-im/nimbus-eth2/pull/4967
2023-05-09 v23.5.0
==================

View File

@ -14,6 +14,7 @@ import
../sszdump
from std/deques import Deque, addLast, contains, initDeque, items, len, shrink
from std/sequtils import mapIt
from ../consensus_object_pools/consensus_manager import
ConsensusManager, checkNextProposer, optimisticExecutionPayloadHash,
runProposalForkchoiceUpdated, shouldSyncOptimistically, updateHead,
@ -33,6 +34,7 @@ from ../validators/validator_monitor import
MsgSource, ValidatorMonitor, registerAttestationInBlock, registerBeaconBlock,
registerSyncAggregateInBlock
from ../beacon_chain_db import putBlobSidecar
from ../spec/state_transition_block import validate_blobs
export sszdump, signatures_batch
@ -196,8 +198,20 @@ proc storeBackfillBlock(
# writing the block in case of blob error.
let blobsOk =
when typeof(signedBlock).toFork() >= ConsensusFork.Deneb:
blobs.len > 0 or true
# TODO: validate blobs
let kzgCommits = signedBlock.message.body.blob_kzg_commitments.asSeq
if blobs.len > 0 or kzgCommits.len > 0:
let r = validate_blobs(kzgCommits, blobs.mapIt(it.blob),
blobs.mapIt(it.kzg_proof))
if r.isErr():
debug "backfill blob validation failed",
blockRoot = shortLog(signedBlock.root),
blobs = shortLog(blobs),
blck = shortLog(signedBlock.message),
signature = shortLog(signedBlock.signature),
msg = r.error()
r.isOk()
else:
true
else:
true
if not blobsOk:
@ -222,7 +236,8 @@ proc storeBackfillBlock(
return res
# Only store blobs after successfully establishing block viability.
# TODO: store blobs in db
for b in blobs:
self.consensusManager.dag.db.putBlobSidecar(b[])
res
@ -450,9 +465,18 @@ proc storeBlock*(
# Establish blob viability before calling addHeadBlock to avoid
# writing the block in case of blob error.
when typeof(signedBlock).toFork() >= ConsensusFork.Deneb:
if blobs.len > 0:
discard
# TODO: validate blobs
let kzgCommits = signedBlock.message.body.blob_kzg_commitments.asSeq
if blobs.len > 0 or kzgCommits.len > 0:
let r = validate_blobs(kzgCommits, blobs.mapIt(it.blob),
blobs.mapIt(it.kzg_proof))
if r.isErr():
debug "blob validation failed",
blockRoot = shortLog(signedBlock.root),
blobs = shortLog(blobs),
blck = shortLog(signedBlock.message),
signature = shortLog(signedBlock.signature),
msg = r.error()
return err((VerifierError.Invalid, ProcessingStatus.completed))
type Trusted = typeof signedBlock.asTrusted()
let blck = dag.addHeadBlock(self.verifier, signedBlock, payloadValid) do (

View File

@ -31,6 +31,13 @@ import
export
ethtypes, conversions, RuntimeConfig
const
vendorDir = currentSourcePath.parentDir.replace('\\', '/') & "/../../vendor"
# TODO: Currently, this breaks the Linux/ARM packaging due
# to a toolchain incompatibility problem
incbinEnabled* = false
type
Eth1BlockHash* = ethtypes.BlockHash
@ -64,11 +71,21 @@ type
# `genesisData` will have `len == 0` for networks with a still
# unknown genesis state.
genesisData*: seq[byte]
genesisDepositsSnapshot*: seq[byte]
when incbinEnabled:
genesisData*: seq[byte]
else:
genesisData*: string
genesisDepositsSnapshot*: string
else:
incompatibilityDesc*: string
template genesisBytes*(metadata: Eth2NetworkMetadata): auto =
when incbinEnabled:
metadata.genesisData
else:
metadata.genesisData.toOpenArrayByte(0, metadata.genesisData.high)
proc readBootstrapNodes*(path: string): seq[string] {.raises: [IOError].} =
# Read a list of ENR values from a YAML file containing a flat list of entries
if fileExists(path):
@ -157,14 +174,17 @@ proc loadEth2NetworkMetadata*(
readBootEnr(bootEnrPath))
genesisData = if loadGenesis and fileExists(genesisPath):
toBytes(readFile(genesisPath))
when incbinEnabled:
toBytes readFile(genesisPath)
else:
readFile(genesisPath)
else:
@[]
""
genesisDepositsSnapshot = if fileExists(genesisDepositsSnapshotPath):
toBytes(readFile(genesisDepositsSnapshotPath))
readFile(genesisDepositsSnapshotPath)
else:
@[]
""
Eth2NetworkMetadata(
incompatible: false,
@ -196,21 +216,19 @@ proc loadCompileTimeNetworkMetadata(
else:
macros.error "config.yaml not found for network '" & path
const vendorDir =
currentSourcePath.parentDir.replace('\\', '/') & "/../../vendor"
when const_preset == "gnosis":
import stew/assign2
let
gnosisGenesis {.importc: "gnosis_mainnet_genesis".}: ptr UncheckedArray[byte]
gnosisGenesisSize {.importc: "gnosis_mainnet_genesis_size".}: int
{.compile: "network_metadata_gnosis.S".}
when incbinEnabled:
let
gnosisGenesis {.importc: "gnosis_mainnet_genesis".}: ptr UncheckedArray[byte]
gnosisGenesisSize {.importc: "gnosis_mainnet_genesis_size".}: int
{.compile: "network_metadata_gnosis.S".}
const
gnosisMetadata = loadCompileTimeNetworkMetadata(
vendorDir & "/gnosis-chain-configs/mainnet")
vendorDir & "/gnosis-chain-configs/mainnet",
none(Eth1Network), not incbinEnabled)
static:
checkForkConsistency(gnosisMetadata.cfg)
@ -220,30 +238,31 @@ when const_preset == "gnosis":
elif const_preset == "mainnet":
import stew/assign2
# Nim is very inefficent at loading large constants from binary files so we
# use this trick instead which saves significant amounts of compile time
let
mainnetGenesis {.importc: "eth2_mainnet_genesis".}: ptr UncheckedArray[byte]
mainnetGenesisSize {.importc: "eth2_mainnet_genesis_size".}: int
when incbinEnabled:
# Nim is very inefficent at loading large constants from binary files so we
# use this trick instead which saves significant amounts of compile time
let
mainnetGenesis {.importc: "eth2_mainnet_genesis".}: ptr UncheckedArray[byte]
mainnetGenesisSize {.importc: "eth2_mainnet_genesis_size".}: int
praterGenesis {.importc: "eth2_goerli_genesis".}: ptr UncheckedArray[byte]
praterGenesisSize {.importc: "eth2_goerli_genesis_size".}: int
praterGenesis {.importc: "eth2_goerli_genesis".}: ptr UncheckedArray[byte]
praterGenesisSize {.importc: "eth2_goerli_genesis_size".}: int
sepoliaGenesis {.importc: "eth2_sepolia_genesis".}: ptr UncheckedArray[byte]
sepoliaGenesisSize {.importc: "eth2_sepolia_genesis_size".}: int
sepoliaGenesis {.importc: "eth2_sepolia_genesis".}: ptr UncheckedArray[byte]
sepoliaGenesisSize {.importc: "eth2_sepolia_genesis_size".}: int
{.compile: "network_metadata_mainnet.S".}
{.compile: "network_metadata_mainnet.S".}
const
eth2NetworksDir = vendorDir & "/eth2-networks"
sepoliaDir = vendorDir & "/sepolia"
mainnetMetadata = loadCompileTimeNetworkMetadata(
vendorDir & "/eth2-networks/shared/mainnet", some mainnet, false)
vendorDir & "/eth2-networks/shared/mainnet", some mainnet, not incbinEnabled)
praterMetadata = loadCompileTimeNetworkMetadata(
vendorDir & "/eth2-networks/shared/prater", some goerli, false)
vendorDir & "/eth2-networks/shared/prater", some goerli, not incbinEnabled)
sepoliaMetadata = loadCompileTimeNetworkMetadata(
vendorDir & "/sepolia/bepolia", some sepolia, false)
vendorDir & "/sepolia/bepolia", some sepolia, not incbinEnabled)
static:
for network in [mainnetMetadata, praterMetadata, sepoliaMetadata]:
@ -272,9 +291,12 @@ proc getMetadataForNetwork*(
warn "Ropsten is unsupported; https://blog.ethereum.org/2022/11/30/ropsten-shutdown-announcement suggests migrating to Goerli or Sepolia"
template withGenesis(metadata, genesis: untyped): untyped =
var tmp = metadata
assign(tmp.genesisData, genesis.toOpenArray(0, `genesis Size` - 1))
tmp
when incbinEnabled:
var tmp = metadata
assign(tmp.genesisData, genesis.toOpenArray(0, `genesis Size` - 1))
tmp
else:
metadata
let metadata =
when const_preset == "gnosis":

View File

@ -542,7 +542,7 @@ proc init*(T: type BeaconNode,
var genesisState =
if metadata.genesisData.len > 0:
try:
newClone readSszForkedHashedBeaconState(cfg, metadata.genesisData)
newClone readSszForkedHashedBeaconState(cfg, metadata.genesisBytes)
except CatchableError as err:
raiseAssert "Invalid baked-in state: " & err.msg
else:
@ -2028,7 +2028,7 @@ proc handleStartUpCmd(config: var BeaconNodeConf) {.raises: [Defect, CatchableEr
stateId: "finalized")
genesis =
if network.genesisData.len > 0:
newClone(readSszForkedHashedBeaconState(cfg, network.genesisData))
newClone(readSszForkedHashedBeaconState(cfg, network.genesisBytes))
else: nil
if config.blockId.isSome():

View File

@ -11,7 +11,7 @@ import
eth/db/kvstore_sqlite3, eth/keys,
./el/el_manager,
./gossip_processing/optimistic_processor,
./networking/topic_params,
./networking/[topic_params, network_metadata],
./spec/beaconstate,
./spec/datatypes/[phase0, altair, bellatrix, capella, deneb],
"."/[filepath, light_client, light_client_db, nimbus_binary_common, version]
@ -65,7 +65,7 @@ programMain:
let
genesisState =
try:
newClone(readSszForkedHashedBeaconState(cfg, metadata.genesisData))
newClone(readSszForkedHashedBeaconState(cfg, metadata.genesisBytes))
except CatchableError as err:
raiseAssert "Invalid baked-in state: " & err.msg

View File

@ -544,6 +544,9 @@ func shortLog*(v: BlobSidecar): auto =
bloblen: v.blob.len(),
)
func shortLog*(v: seq[ref BlobSidecar]): auto =
"[" & v.mapIt(shortLog(it[])).join(", ") & "]"
func shortLog*(v: SignedBlobSidecar): auto =
(
blob: shortLog(v.message),

View File

@ -772,15 +772,6 @@ proc validate_blobs*(expected_kzg_commitments: seq[KzgCommitment],
ok()
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/deneb/fork-choice.md#is_data_available
func is_data_available(
slot: Slot, beacon_block_root: Eth2Digest,
blob_kzg_commitments: seq[deneb.KZGCommitment]): bool =
discard $denebImplementationMissing & ": state_transition_block.nim:is_data_available"
true
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/beacon-chain.md#block-processing
# TODO workaround for https://github.com/nim-lang/Nim/issues/18095
# copy of datatypes/phase0.nim
type SomePhase0Block =
@ -926,9 +917,4 @@ proc process_block*(
? process_blob_kzg_commitments(state, blck.body) # [New in Deneb]
# New in EIP-4844
if not is_data_available(
blck.slot, hash_tree_root(blck), blck.body.blob_kzg_commitments.asSeq):
return err("not is_data_available")
ok()

View File

@ -501,44 +501,44 @@ proc parseRoles*(data: string): Result[set[BeaconNodeRole], cstring] =
return err("Invalid beacon node role string found")
ok(res)
proc normalizeUri*(remoteUri: Uri): Uri =
var r = remoteUri
if (len(r.scheme) == 0) and (len(r.username) == 0) and
(len(r.password) == 0) and (len(r.hostname) == 0) and
(len(r.port) == 0) and (len(r.path) > 0):
# When `scheme` is not specified and `port` is not specified - whole
# hostname is stored in `path`.`query` and `anchor` could still be present.
# test.com
# test.com?q=query
# test.com?q=query#anchor=anchor
parseUri("http://" & $remoteUri & ":" & $defaultEth2RestPort)
elif (len(r.scheme) > 0) and (len(r.username) == 0) and
(len(r.password) == 0) and (len(r.hostname) == 0) and
(len(r.port) == 0) and (len(r.path) > 0):
# When `scheme` is not specified but `port` is specified - whole
# hostname is stored in `scheme` and `port` is in `path`.
# 192.168.0.1:5052
# test.com:5052
# test.com:5052?q=query
# test.com:5052?q=query#anchor=anchor
parseUri("http://" & $remoteUri)
elif (len(r.scheme) > 0) and (len(r.hostname) > 0):
# When `scheme` is specified, but `port` is not we use default.
# http://192.168.0.1
# http://test.com
# http://test.com?q=query
# http://test.com?q=query#anchor=anchor
if len(r.port) == 0: r.port = $defaultEth2RestPort
r
else:
remoteUri
proc normalizeUri*(r: Uri): Result[Uri, cstring] =
const
MissingPortNumber = cstring("Missing port number")
MissingHostname = cstring("Missing hostname")
UnknownScheme = cstring("Unknown scheme value")
if ($r).toLowerAscii().startsWith("http://") or
($r).toLowerAscii().startsWith("https://"):
# When a scheme is provided, only a hostname is required
if len(r.hostname) == 0: return err(MissingHostname)
return ok(r)
# Check for unknown scheme
if ($r).contains("://"):
return err(UnknownScheme)
# Add the default scheme (http)
let normalized =
if ($r).startsWith("//"):
parseUri("http:" & $r)
else:
parseUri("http://" & $r)
if len(normalized.hostname) == 0:
return err(MissingHostname)
if len(normalized.port) == 0:
return err(MissingPortNumber)
ok(normalized)
proc init*(t: typedesc[BeaconNodeServerRef], remote: Uri,
index: int): Result[BeaconNodeServerRef, string] =
doAssert(index >= 0)
let
flags = {RestClientFlag.CommaSeparatedArray}
remoteUri = normalizeUri(remote)
remoteUri = normalizeUri(remote).valueOr:
return err("Invalid URL: " & $error)
client =
block:
let res = RestClientRef.new($remoteUri, flags = flags)

View File

@ -18,7 +18,7 @@ const
versionMajor* = 23
versionMinor* = 5
versionBuild* = 0
versionBuild* = 1
versionBlob* = "stateofus" # Single word - ends up in the default graffiti

View File

@ -8,20 +8,22 @@ Each validator can have its own fee recipient set or a single recipient may be u
!!! warning
The execution client is not required to follow the fee recipient suggestion and may instead send the fees to a different address — only use execution clients you trust!
If no fee recipient is set up, the transaction fees are sent to the zero address, effectively causing them to be lost.
## Setting the fee recipient
Nimbus supports setting fee recipient per validator, or using defaults in both the validator client and beacon node.
Per-validator fee recipients are set using the [keymanager API](./keymanager-api.md).
Any validator without a per-validator recipient set will fall back to the `--suggested-fee-recipient` option if configured.
Any validator without a per-validator recipient set will fall back to the `--suggested-fee-recipient` option if configured or the withdrawal address of the validator.
For each validator, it selects from the first available, in the following order:
1. the keymanager API per-validator suggested fee recipient
1. The keymanager API per-validator suggested fee recipient
2. `--suggested-fee-recipient` in the validator client
3. `--suggested-fee-recipient` in the beacon node
4. If the validator has an associated [withdrawal address](./withdrawals.md), it will be used a final fallback option.
!!! warning
If none of the above are present, the transaction fees are sent to the zero address, effectively causing them to be lost.
For example, `nimbus_beacon_node --suggested-fee-recipient=0x70E47C843E0F6ab0991A3189c28F2957eb6d3842` suggests to the execution client that `0x70E47C843E0F6ab0991A3189c28F2957eb6d3842` might be the coinbase.
If this Nimbus node has two validators, one of which has its own suggested fee recipient via the keymanager API and the other does not, the former would use its own per-validator suggested fee recipient, while the latter would fall back to `0x70E47C843E0F6ab0991A3189c28F2957eb6d3842`.

View File

@ -49,7 +49,8 @@ import # Unit test
./test_signing_node,
./consensus_spec/all_tests as consensus_all_tests,
./slashing_protection/test_fixtures,
./slashing_protection/test_slashing_protection_db
./slashing_protection/test_slashing_protection_db,
./test_validator_client
when not defined(i386):
# Avoids "Out of memory" CI failures

View File

@ -17,7 +17,7 @@ template checkRoot(name, root) =
metadata = getMetadataForNetwork(name)
cfg = metadata.cfg
state = newClone(readSszForkedHashedBeaconState(
metadata.cfg, metadata.genesisData))
metadata.cfg, metadata.genesisBytes))
check:
$getStateRoot(state[]) == root

View File

@ -0,0 +1,151 @@
# beacon_chain
# Copyright (c) 2018-2023 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [].}
{.used.}
import std/strutils
import unittest2
import ../beacon_chain/validator_client/common
const
HostNames = [
"[2001:db8::1]",
"127.0.0.1",
"hostname.com",
"localhost",
"username:password@[2001:db8::1]",
"username:password@127.0.0.1",
"username:password@hostname.com",
"username:password@localhost",
]
GoodTestVectors = [
("http://$1",
"ok(http://$1)"),
("http://$1?q=query",
"ok(http://$1?q=query)"),
("http://$1?q=query#anchor",
"ok(http://$1?q=query#anchor)"),
("http://$1/subpath/",
"ok(http://$1/subpath/)"),
("http://$1/subpath/q=query",
"ok(http://$1/subpath/q=query)"),
("http://$1/subpath/q=query#anchor",
"ok(http://$1/subpath/q=query#anchor)"),
("http://$1/subpath",
"ok(http://$1/subpath)"),
("http://$1/subpath?q=query",
"ok(http://$1/subpath?q=query)"),
("http://$1/subpath?q=query#anchor",
"ok(http://$1/subpath?q=query#anchor)"),
("https://$1",
"ok(https://$1)"),
("https://$1?q=query",
"ok(https://$1?q=query)"),
("https://$1?q=query#anchor",
"ok(https://$1?q=query#anchor)"),
("https://$1/subpath/",
"ok(https://$1/subpath/)"),
("https://$1/subpath/q=query",
"ok(https://$1/subpath/q=query)"),
("https://$1/subpath/q=query#anchor",
"ok(https://$1/subpath/q=query#anchor)"),
("https://$1/subpath",
"ok(https://$1/subpath)"),
("https://$1/subpath?q=query",
"ok(https://$1/subpath?q=query)"),
("https://$1/subpath?q=query#anchor",
"ok(https://$1/subpath?q=query#anchor)"),
("$1:5052",
"ok(http://$1:5052)"),
("$1:5052?q=query",
"ok(http://$1:5052?q=query)"),
("$1:5052?q=query#anchor",
"ok(http://$1:5052?q=query#anchor)"),
("$1:5052/subpath/",
"ok(http://$1:5052/subpath/)"),
("$1:5052/subpath/q=query",
"ok(http://$1:5052/subpath/q=query)"),
("$1:5052/subpath/q=query#anchor",
"ok(http://$1:5052/subpath/q=query#anchor)"),
("$1:5052/subpath",
"ok(http://$1:5052/subpath)"),
("$1:5052/subpath?q=query",
"ok(http://$1:5052/subpath?q=query)"),
("$1:5052/subpath?q=query#anchor",
"ok(http://$1:5052/subpath?q=query#anchor)"),
("bnode://$1:5052",
"err(Unknown scheme value)"),
("bnode://$1:5052?q=query",
"err(Unknown scheme value)"),
("bnode://$1:5052?q=query#anchor",
"err(Unknown scheme value)"),
("bnode://$1:5052/subpath/",
"err(Unknown scheme value)"),
("bnode://$1:5052/subpath/q=query",
"err(Unknown scheme value)"),
("bnode://$1:5052/subpath/q=query#anchor",
"err(Unknown scheme value)"),
("bnode://$1:5052/subpath",
"err(Unknown scheme value)"),
("bnode://$1:5052/subpath?q=query",
"err(Unknown scheme value)"),
("bnode://$1:5052/subpath?q=query#anchor",
"err(Unknown scheme value)"),
("//$1:5052",
"ok(http://$1:5052)"),
("//$1:5052?q=query",
"ok(http://$1:5052?q=query)"),
("//$1:5052?q=query#anchor",
"ok(http://$1:5052?q=query#anchor)"),
("//$1:5052/subpath/",
"ok(http://$1:5052/subpath/)"),
("//$1:5052/subpath/q=query",
"ok(http://$1:5052/subpath/q=query)"),
("//$1:5052/subpath/q=query#anchor",
"ok(http://$1:5052/subpath/q=query#anchor)"),
("//$1:5052/subpath",
"ok(http://$1:5052/subpath)"),
("//$1:5052/subpath?q=query",
"ok(http://$1:5052/subpath?q=query)"),
("//$1:5052/subpath?q=query#anchor",
"ok(http://$1:5052/subpath?q=query#anchor)"),
("//$1", "err(Missing port number)"),
("//$1?q=query", "err(Missing port number)"),
("//$1?q=query#anchor", "err(Missing port number)"),
("//$1/subpath/", "err(Missing port number)"),
("//$1/subpath/q=query", "err(Missing port number)"),
("//$1/subpath/q=query#anchor", "err(Missing port number)"),
("//$1/subpath", "err(Missing port number)"),
("//$1/subpath?q=query", "err(Missing port number)"),
("//$1/subpath?q=query#anchor", "err(Missing port number)"),
("$1", "err(Missing port number)"),
("$1?q=query", "err(Missing port number)"),
("$1?q=query#anchor", "err(Missing port number)"),
("$1/subpath/", "err(Missing port number)"),
("$1/subpath/q=query", "err(Missing port number)"),
("$1/subpath/q=query#anchor", "err(Missing port number)"),
("$1/subpath", "err(Missing port number)"),
("$1/subpath?q=query", "err(Missing port number)"),
("$1/subpath?q=query#anchor", "err(Missing port number)"),
("", "err(Missing hostname)")
]
suite "Validator Client test suite":
test "normalizeUri() test vectors":
for hostname in HostNames:
for vector in GoodTestVectors:
let expect = vector[1] % (hostname)
check $normalizeUri(parseUri(vector[0] % (hostname))) == expect