chore: merge dev to master (#25)

This commit is contained in:
Hansie Odendaal 2026-02-05 08:23:14 +02:00 committed by GitHub
parent 70c40615d0
commit 59697f0830
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
46 changed files with 1278 additions and 365 deletions

155
Cargo.lock generated
View File

@ -801,9 +801,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"
[[package]]
name = "bytes"
version = "1.11.0"
version = "1.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b35204fbdc0b3f4446b89fc1ac2cf84a8a68971995d0bf2e925ec7cd960f9cb3"
checksum = "1e748733b7cbc798e1434b6ac524f0c1ff2ab456fe201501e6497c8417a4fc33"
dependencies = [
"serde",
]
@ -1254,7 +1254,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7ab67060fc6b8ef687992d439ca0fa36e7ed17e9a0b16b25b601e8757df720de"
dependencies = [
"data-encoding",
"syn 2.0.114",
"syn 1.0.109",
]
[[package]]
@ -3389,7 +3389,7 @@ checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897"
[[package]]
name = "logos-blockchain-api-service"
version = "0.1.0"
source = "git+https://github.com/logos-co/nomos-node.git?rev=3f15894f8b4df377e8d3cd9d92ddee9f648046dc#3f15894f8b4df377e8d3cd9d92ddee9f648046dc"
source = "git+https://github.com/logos-co/nomos-node.git?rev=2392190d88e8ae8271fa9321014ea33324be7c28#2392190d88e8ae8271fa9321014ea33324be7c28"
dependencies = [
"async-trait",
"bytes",
@ -3415,7 +3415,7 @@ dependencies = [
[[package]]
name = "logos-blockchain-blend"
version = "0.1.0"
source = "git+https://github.com/logos-co/nomos-node.git?rev=3f15894f8b4df377e8d3cd9d92ddee9f648046dc#3f15894f8b4df377e8d3cd9d92ddee9f648046dc"
source = "git+https://github.com/logos-co/nomos-node.git?rev=2392190d88e8ae8271fa9321014ea33324be7c28#2392190d88e8ae8271fa9321014ea33324be7c28"
dependencies = [
"logos-blockchain-blend-crypto",
"logos-blockchain-blend-message",
@ -3427,7 +3427,7 @@ dependencies = [
[[package]]
name = "logos-blockchain-blend-crypto"
version = "0.1.0"
source = "git+https://github.com/logos-co/nomos-node.git?rev=3f15894f8b4df377e8d3cd9d92ddee9f648046dc#3f15894f8b4df377e8d3cd9d92ddee9f648046dc"
source = "git+https://github.com/logos-co/nomos-node.git?rev=2392190d88e8ae8271fa9321014ea33324be7c28#2392190d88e8ae8271fa9321014ea33324be7c28"
dependencies = [
"blake2",
"logos-blockchain-groth16",
@ -3441,7 +3441,7 @@ dependencies = [
[[package]]
name = "logos-blockchain-blend-message"
version = "0.1.0"
source = "git+https://github.com/logos-co/nomos-node.git?rev=3f15894f8b4df377e8d3cd9d92ddee9f648046dc#3f15894f8b4df377e8d3cd9d92ddee9f648046dc"
source = "git+https://github.com/logos-co/nomos-node.git?rev=2392190d88e8ae8271fa9321014ea33324be7c28#2392190d88e8ae8271fa9321014ea33324be7c28"
dependencies = [
"blake2",
"derivative",
@ -3463,7 +3463,7 @@ dependencies = [
[[package]]
name = "logos-blockchain-blend-network"
version = "0.1.0"
source = "git+https://github.com/logos-co/nomos-node.git?rev=3f15894f8b4df377e8d3cd9d92ddee9f648046dc#3f15894f8b4df377e8d3cd9d92ddee9f648046dc"
source = "git+https://github.com/logos-co/nomos-node.git?rev=2392190d88e8ae8271fa9321014ea33324be7c28#2392190d88e8ae8271fa9321014ea33324be7c28"
dependencies = [
"either",
"futures",
@ -3481,7 +3481,7 @@ dependencies = [
[[package]]
name = "logos-blockchain-blend-proofs"
version = "0.1.0"
source = "git+https://github.com/logos-co/nomos-node.git?rev=3f15894f8b4df377e8d3cd9d92ddee9f648046dc#3f15894f8b4df377e8d3cd9d92ddee9f648046dc"
source = "git+https://github.com/logos-co/nomos-node.git?rev=2392190d88e8ae8271fa9321014ea33324be7c28#2392190d88e8ae8271fa9321014ea33324be7c28"
dependencies = [
"ed25519-dalek",
"generic-array 1.3.5",
@ -3496,7 +3496,7 @@ dependencies = [
[[package]]
name = "logos-blockchain-blend-scheduling"
version = "0.1.0"
source = "git+https://github.com/logos-co/nomos-node.git?rev=3f15894f8b4df377e8d3cd9d92ddee9f648046dc#3f15894f8b4df377e8d3cd9d92ddee9f648046dc"
source = "git+https://github.com/logos-co/nomos-node.git?rev=2392190d88e8ae8271fa9321014ea33324be7c28#2392190d88e8ae8271fa9321014ea33324be7c28"
dependencies = [
"async-trait",
"derivative",
@ -3519,7 +3519,7 @@ dependencies = [
[[package]]
name = "logos-blockchain-blend-service"
version = "0.1.0"
source = "git+https://github.com/logos-co/nomos-node.git?rev=3f15894f8b4df377e8d3cd9d92ddee9f648046dc#3f15894f8b4df377e8d3cd9d92ddee9f648046dc"
source = "git+https://github.com/logos-co/nomos-node.git?rev=2392190d88e8ae8271fa9321014ea33324be7c28#2392190d88e8ae8271fa9321014ea33324be7c28"
dependencies = [
"async-trait",
"fork_stream",
@ -3554,7 +3554,7 @@ dependencies = [
[[package]]
name = "logos-blockchain-chain-broadcast-service"
version = "0.1.0"
source = "git+https://github.com/logos-co/nomos-node.git?rev=3f15894f8b4df377e8d3cd9d92ddee9f648046dc#3f15894f8b4df377e8d3cd9d92ddee9f648046dc"
source = "git+https://github.com/logos-co/nomos-node.git?rev=2392190d88e8ae8271fa9321014ea33324be7c28#2392190d88e8ae8271fa9321014ea33324be7c28"
dependencies = [
"async-trait",
"derivative",
@ -3570,16 +3570,17 @@ dependencies = [
[[package]]
name = "logos-blockchain-chain-leader-service"
version = "0.1.0"
source = "git+https://github.com/logos-co/nomos-node.git?rev=3f15894f8b4df377e8d3cd9d92ddee9f648046dc#3f15894f8b4df377e8d3cd9d92ddee9f648046dc"
source = "git+https://github.com/logos-co/nomos-node.git?rev=2392190d88e8ae8271fa9321014ea33324be7c28#2392190d88e8ae8271fa9321014ea33324be7c28"
dependencies = [
"async-trait",
"futures",
"logos-blockchain-blend-service",
"logos-blockchain-chain-network-service",
"logos-blockchain-chain-service",
"logos-blockchain-chain-service-common",
"logos-blockchain-core",
"logos-blockchain-cryptarchia-engine",
"logos-blockchain-key-management-system-keys",
"logos-blockchain-key-management-system-service",
"logos-blockchain-ledger",
"logos-blockchain-services-utils",
"logos-blockchain-time-service",
@ -3597,7 +3598,7 @@ dependencies = [
[[package]]
name = "logos-blockchain-chain-network-service"
version = "0.1.0"
source = "git+https://github.com/logos-co/nomos-node.git?rev=3f15894f8b4df377e8d3cd9d92ddee9f648046dc#3f15894f8b4df377e8d3cd9d92ddee9f648046dc"
source = "git+https://github.com/logos-co/nomos-node.git?rev=2392190d88e8ae8271fa9321014ea33324be7c28#2392190d88e8ae8271fa9321014ea33324be7c28"
dependencies = [
"async-trait",
"futures",
@ -3625,7 +3626,7 @@ dependencies = [
[[package]]
name = "logos-blockchain-chain-service"
version = "0.1.0"
source = "git+https://github.com/logos-co/nomos-node.git?rev=3f15894f8b4df377e8d3cd9d92ddee9f648046dc#3f15894f8b4df377e8d3cd9d92ddee9f648046dc"
source = "git+https://github.com/logos-co/nomos-node.git?rev=2392190d88e8ae8271fa9321014ea33324be7c28#2392190d88e8ae8271fa9321014ea33324be7c28"
dependencies = [
"async-trait",
"bytes",
@ -3655,7 +3656,7 @@ dependencies = [
[[package]]
name = "logos-blockchain-chain-service-common"
version = "0.1.0"
source = "git+https://github.com/logos-co/nomos-node.git?rev=3f15894f8b4df377e8d3cd9d92ddee9f648046dc#3f15894f8b4df377e8d3cd9d92ddee9f648046dc"
source = "git+https://github.com/logos-co/nomos-node.git?rev=2392190d88e8ae8271fa9321014ea33324be7c28#2392190d88e8ae8271fa9321014ea33324be7c28"
dependencies = [
"logos-blockchain-core",
"serde",
@ -3664,7 +3665,7 @@ dependencies = [
[[package]]
name = "logos-blockchain-circuits-prover"
version = "0.1.0"
source = "git+https://github.com/logos-co/nomos-node.git?rev=3f15894f8b4df377e8d3cd9d92ddee9f648046dc#3f15894f8b4df377e8d3cd9d92ddee9f648046dc"
source = "git+https://github.com/logos-co/nomos-node.git?rev=2392190d88e8ae8271fa9321014ea33324be7c28#2392190d88e8ae8271fa9321014ea33324be7c28"
dependencies = [
"logos-blockchain-circuits-utils",
"tempfile",
@ -3673,7 +3674,7 @@ dependencies = [
[[package]]
name = "logos-blockchain-circuits-utils"
version = "0.1.0"
source = "git+https://github.com/logos-co/nomos-node.git?rev=3f15894f8b4df377e8d3cd9d92ddee9f648046dc#3f15894f8b4df377e8d3cd9d92ddee9f648046dc"
source = "git+https://github.com/logos-co/nomos-node.git?rev=2392190d88e8ae8271fa9321014ea33324be7c28#2392190d88e8ae8271fa9321014ea33324be7c28"
dependencies = [
"dirs",
]
@ -3681,7 +3682,7 @@ dependencies = [
[[package]]
name = "logos-blockchain-common-http-client"
version = "0.1.0"
source = "git+https://github.com/logos-co/nomos-node.git?rev=3f15894f8b4df377e8d3cd9d92ddee9f648046dc#3f15894f8b4df377e8d3cd9d92ddee9f648046dc"
source = "git+https://github.com/logos-co/nomos-node.git?rev=2392190d88e8ae8271fa9321014ea33324be7c28#2392190d88e8ae8271fa9321014ea33324be7c28"
dependencies = [
"futures",
"logos-blockchain-chain-broadcast-service",
@ -3698,7 +3699,7 @@ dependencies = [
[[package]]
name = "logos-blockchain-core"
version = "0.1.0"
source = "git+https://github.com/logos-co/nomos-node.git?rev=3f15894f8b4df377e8d3cd9d92ddee9f648046dc#3f15894f8b4df377e8d3cd9d92ddee9f648046dc"
source = "git+https://github.com/logos-co/nomos-node.git?rev=2392190d88e8ae8271fa9321014ea33324be7c28#2392190d88e8ae8271fa9321014ea33324be7c28"
dependencies = [
"ark-ff 0.4.2",
"bincode",
@ -3706,15 +3707,16 @@ dependencies = [
"bytes",
"const-hex",
"futures",
"generic-array 1.3.5",
"hex",
"logos-blockchain-blend-proofs",
"logos-blockchain-cryptarchia-engine",
"logos-blockchain-groth16",
"logos-blockchain-key-management-system-keys",
"logos-blockchain-poc",
"logos-blockchain-pol",
"logos-blockchain-poseidon2",
"logos-blockchain-utils",
"logos-blockchain-utxotree",
"multiaddr",
"nom 8.0.0",
"num-bigint",
@ -3727,7 +3729,7 @@ dependencies = [
[[package]]
name = "logos-blockchain-cryptarchia-engine"
version = "0.1.0"
source = "git+https://github.com/logos-co/nomos-node.git?rev=3f15894f8b4df377e8d3cd9d92ddee9f648046dc#3f15894f8b4df377e8d3cd9d92ddee9f648046dc"
source = "git+https://github.com/logos-co/nomos-node.git?rev=2392190d88e8ae8271fa9321014ea33324be7c28#2392190d88e8ae8271fa9321014ea33324be7c28"
dependencies = [
"cfg_eval",
"logos-blockchain-utils",
@ -3742,7 +3744,7 @@ dependencies = [
[[package]]
name = "logos-blockchain-cryptarchia-sync"
version = "0.1.0"
source = "git+https://github.com/logos-co/nomos-node.git?rev=3f15894f8b4df377e8d3cd9d92ddee9f648046dc#3f15894f8b4df377e8d3cd9d92ddee9f648046dc"
source = "git+https://github.com/logos-co/nomos-node.git?rev=2392190d88e8ae8271fa9321014ea33324be7c28#2392190d88e8ae8271fa9321014ea33324be7c28"
dependencies = [
"bytes",
"futures",
@ -3761,7 +3763,7 @@ dependencies = [
[[package]]
name = "logos-blockchain-groth16"
version = "0.1.0"
source = "git+https://github.com/logos-co/nomos-node.git?rev=3f15894f8b4df377e8d3cd9d92ddee9f648046dc#3f15894f8b4df377e8d3cd9d92ddee9f648046dc"
source = "git+https://github.com/logos-co/nomos-node.git?rev=2392190d88e8ae8271fa9321014ea33324be7c28#2392190d88e8ae8271fa9321014ea33324be7c28"
dependencies = [
"ark-bn254 0.4.0",
"ark-ec 0.4.2",
@ -3779,7 +3781,7 @@ dependencies = [
[[package]]
name = "logos-blockchain-http-api-common"
version = "0.1.0"
source = "git+https://github.com/logos-co/nomos-node.git?rev=3f15894f8b4df377e8d3cd9d92ddee9f648046dc#3f15894f8b4df377e8d3cd9d92ddee9f648046dc"
source = "git+https://github.com/logos-co/nomos-node.git?rev=2392190d88e8ae8271fa9321014ea33324be7c28#2392190d88e8ae8271fa9321014ea33324be7c28"
dependencies = [
"axum",
"governor",
@ -3794,13 +3796,12 @@ dependencies = [
[[package]]
name = "logos-blockchain-key-management-system-keys"
version = "0.1.0"
source = "git+https://github.com/logos-co/nomos-node.git?rev=3f15894f8b4df377e8d3cd9d92ddee9f648046dc#3f15894f8b4df377e8d3cd9d92ddee9f648046dc"
source = "git+https://github.com/logos-co/nomos-node.git?rev=2392190d88e8ae8271fa9321014ea33324be7c28#2392190d88e8ae8271fa9321014ea33324be7c28"
dependencies = [
"async-trait",
"bytes",
"ed25519-dalek",
"generic-array 1.3.5",
"logos-blockchain-blend-proofs",
"logos-blockchain-groth16",
"logos-blockchain-key-management-system-macros",
"logos-blockchain-poseidon2",
@ -3820,21 +3821,38 @@ dependencies = [
[[package]]
name = "logos-blockchain-key-management-system-macros"
version = "0.1.0"
source = "git+https://github.com/logos-co/nomos-node.git?rev=3f15894f8b4df377e8d3cd9d92ddee9f648046dc#3f15894f8b4df377e8d3cd9d92ddee9f648046dc"
source = "git+https://github.com/logos-co/nomos-node.git?rev=2392190d88e8ae8271fa9321014ea33324be7c28#2392190d88e8ae8271fa9321014ea33324be7c28"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.114",
]
[[package]]
name = "logos-blockchain-key-management-system-operators"
version = "0.1.0"
source = "git+https://github.com/logos-co/nomos-node.git?rev=2392190d88e8ae8271fa9321014ea33324be7c28#2392190d88e8ae8271fa9321014ea33324be7c28"
dependencies = [
"async-trait",
"logos-blockchain-blend-proofs",
"logos-blockchain-core",
"logos-blockchain-groth16",
"logos-blockchain-key-management-system-keys",
"logos-blockchain-poseidon2",
"logos-blockchain-utxotree",
"tokio",
"tracing",
]
[[package]]
name = "logos-blockchain-key-management-system-service"
version = "0.1.0"
source = "git+https://github.com/logos-co/nomos-node.git?rev=3f15894f8b4df377e8d3cd9d92ddee9f648046dc#3f15894f8b4df377e8d3cd9d92ddee9f648046dc"
source = "git+https://github.com/logos-co/nomos-node.git?rev=2392190d88e8ae8271fa9321014ea33324be7c28#2392190d88e8ae8271fa9321014ea33324be7c28"
dependencies = [
"async-trait",
"log",
"logos-blockchain-key-management-system-keys",
"logos-blockchain-key-management-system-operators",
"overwatch",
"serde",
"thiserror 2.0.18",
@ -3845,7 +3863,7 @@ dependencies = [
[[package]]
name = "logos-blockchain-ledger"
version = "0.1.0"
source = "git+https://github.com/logos-co/nomos-node.git?rev=3f15894f8b4df377e8d3cd9d92ddee9f648046dc#3f15894f8b4df377e8d3cd9d92ddee9f648046dc"
source = "git+https://github.com/logos-co/nomos-node.git?rev=2392190d88e8ae8271fa9321014ea33324be7c28#2392190d88e8ae8271fa9321014ea33324be7c28"
dependencies = [
"derivative",
"logos-blockchain-blend-crypto",
@ -3855,7 +3873,7 @@ dependencies = [
"logos-blockchain-cryptarchia-engine",
"logos-blockchain-groth16",
"logos-blockchain-key-management-system-keys",
"logos-blockchain-mmr",
"logos-blockchain-pol",
"logos-blockchain-utils",
"logos-blockchain-utxotree",
"num-bigint",
@ -3869,7 +3887,7 @@ dependencies = [
[[package]]
name = "logos-blockchain-libp2p"
version = "0.1.0"
source = "git+https://github.com/logos-co/nomos-node.git?rev=3f15894f8b4df377e8d3cd9d92ddee9f648046dc#3f15894f8b4df377e8d3cd9d92ddee9f648046dc"
source = "git+https://github.com/logos-co/nomos-node.git?rev=2392190d88e8ae8271fa9321014ea33324be7c28#2392190d88e8ae8271fa9321014ea33324be7c28"
dependencies = [
"async-trait",
"backon",
@ -3895,22 +3913,10 @@ dependencies = [
"zerocopy",
]
[[package]]
name = "logos-blockchain-mmr"
version = "0.1.0"
source = "git+https://github.com/logos-co/nomos-node.git?rev=3f15894f8b4df377e8d3cd9d92ddee9f648046dc#3f15894f8b4df377e8d3cd9d92ddee9f648046dc"
dependencies = [
"ark-ff 0.4.2",
"logos-blockchain-groth16",
"logos-blockchain-poseidon2",
"rpds",
"serde",
]
[[package]]
name = "logos-blockchain-network-service"
version = "0.1.0"
source = "git+https://github.com/logos-co/nomos-node.git?rev=3f15894f8b4df377e8d3cd9d92ddee9f648046dc#3f15894f8b4df377e8d3cd9d92ddee9f648046dc"
source = "git+https://github.com/logos-co/nomos-node.git?rev=2392190d88e8ae8271fa9321014ea33324be7c28#2392190d88e8ae8271fa9321014ea33324be7c28"
dependencies = [
"async-trait",
"futures",
@ -3929,7 +3935,7 @@ dependencies = [
[[package]]
name = "logos-blockchain-node"
version = "0.1.0"
source = "git+https://github.com/logos-co/nomos-node.git?rev=3f15894f8b4df377e8d3cd9d92ddee9f648046dc#3f15894f8b4df377e8d3cd9d92ddee9f648046dc"
source = "git+https://github.com/logos-co/nomos-node.git?rev=2392190d88e8ae8271fa9321014ea33324be7c28#2392190d88e8ae8271fa9321014ea33324be7c28"
dependencies = [
"async-trait",
"axum",
@ -3965,7 +3971,6 @@ dependencies = [
"logos-blockchain-tx-service",
"logos-blockchain-utils",
"logos-blockchain-wallet-service",
"num-bigint",
"overwatch",
"serde",
"serde_ignored",
@ -3983,10 +3988,25 @@ dependencies = [
"utoipa-swagger-ui",
]
[[package]]
name = "logos-blockchain-poc"
version = "0.1.0"
source = "git+https://github.com/logos-co/nomos-node.git?rev=2392190d88e8ae8271fa9321014ea33324be7c28#2392190d88e8ae8271fa9321014ea33324be7c28"
dependencies = [
"logos-blockchain-circuits-prover",
"logos-blockchain-circuits-utils",
"logos-blockchain-groth16",
"logos-blockchain-witness-generator",
"num-bigint",
"serde",
"serde_json",
"thiserror 2.0.18",
]
[[package]]
name = "logos-blockchain-pol"
version = "0.1.0"
source = "git+https://github.com/logos-co/nomos-node.git?rev=3f15894f8b4df377e8d3cd9d92ddee9f648046dc#3f15894f8b4df377e8d3cd9d92ddee9f648046dc"
source = "git+https://github.com/logos-co/nomos-node.git?rev=2392190d88e8ae8271fa9321014ea33324be7c28#2392190d88e8ae8271fa9321014ea33324be7c28"
dependencies = [
"logos-blockchain-circuits-prover",
"logos-blockchain-circuits-utils",
@ -4002,7 +4022,7 @@ dependencies = [
[[package]]
name = "logos-blockchain-poq"
version = "0.1.0"
source = "git+https://github.com/logos-co/nomos-node.git?rev=3f15894f8b4df377e8d3cd9d92ddee9f648046dc#3f15894f8b4df377e8d3cd9d92ddee9f648046dc"
source = "git+https://github.com/logos-co/nomos-node.git?rev=2392190d88e8ae8271fa9321014ea33324be7c28#2392190d88e8ae8271fa9321014ea33324be7c28"
dependencies = [
"logos-blockchain-circuits-prover",
"logos-blockchain-circuits-utils",
@ -4018,7 +4038,7 @@ dependencies = [
[[package]]
name = "logos-blockchain-poseidon2"
version = "0.1.0"
source = "git+https://github.com/logos-co/nomos-node.git?rev=3f15894f8b4df377e8d3cd9d92ddee9f648046dc#3f15894f8b4df377e8d3cd9d92ddee9f648046dc"
source = "git+https://github.com/logos-co/nomos-node.git?rev=2392190d88e8ae8271fa9321014ea33324be7c28#2392190d88e8ae8271fa9321014ea33324be7c28"
dependencies = [
"ark-bn254 0.4.0",
"ark-ff 0.4.2",
@ -4029,7 +4049,7 @@ dependencies = [
[[package]]
name = "logos-blockchain-sdp-service"
version = "0.1.0"
source = "git+https://github.com/logos-co/nomos-node.git?rev=3f15894f8b4df377e8d3cd9d92ddee9f648046dc#3f15894f8b4df377e8d3cd9d92ddee9f648046dc"
source = "git+https://github.com/logos-co/nomos-node.git?rev=2392190d88e8ae8271fa9321014ea33324be7c28#2392190d88e8ae8271fa9321014ea33324be7c28"
dependencies = [
"async-trait",
"futures",
@ -4045,7 +4065,7 @@ dependencies = [
[[package]]
name = "logos-blockchain-services-utils"
version = "0.1.0"
source = "git+https://github.com/logos-co/nomos-node.git?rev=3f15894f8b4df377e8d3cd9d92ddee9f648046dc#3f15894f8b4df377e8d3cd9d92ddee9f648046dc"
source = "git+https://github.com/logos-co/nomos-node.git?rev=2392190d88e8ae8271fa9321014ea33324be7c28#2392190d88e8ae8271fa9321014ea33324be7c28"
dependencies = [
"async-trait",
"futures",
@ -4060,7 +4080,7 @@ dependencies = [
[[package]]
name = "logos-blockchain-storage-service"
version = "0.1.0"
source = "git+https://github.com/logos-co/nomos-node.git?rev=3f15894f8b4df377e8d3cd9d92ddee9f648046dc#3f15894f8b4df377e8d3cd9d92ddee9f648046dc"
source = "git+https://github.com/logos-co/nomos-node.git?rev=2392190d88e8ae8271fa9321014ea33324be7c28#2392190d88e8ae8271fa9321014ea33324be7c28"
dependencies = [
"async-trait",
"bytes",
@ -4078,7 +4098,7 @@ dependencies = [
[[package]]
name = "logos-blockchain-system-sig-service"
version = "0.1.0"
source = "git+https://github.com/logos-co/nomos-node.git?rev=3f15894f8b4df377e8d3cd9d92ddee9f648046dc#3f15894f8b4df377e8d3cd9d92ddee9f648046dc"
source = "git+https://github.com/logos-co/nomos-node.git?rev=2392190d88e8ae8271fa9321014ea33324be7c28#2392190d88e8ae8271fa9321014ea33324be7c28"
dependencies = [
"async-ctrlc",
"async-trait",
@ -4089,7 +4109,7 @@ dependencies = [
[[package]]
name = "logos-blockchain-time-service"
version = "0.1.0"
source = "git+https://github.com/logos-co/nomos-node.git?rev=3f15894f8b4df377e8d3cd9d92ddee9f648046dc#3f15894f8b4df377e8d3cd9d92ddee9f648046dc"
source = "git+https://github.com/logos-co/nomos-node.git?rev=2392190d88e8ae8271fa9321014ea33324be7c28#2392190d88e8ae8271fa9321014ea33324be7c28"
dependencies = [
"async-trait",
"cfg_eval",
@ -4111,7 +4131,7 @@ dependencies = [
[[package]]
name = "logos-blockchain-tracing"
version = "0.1.0"
source = "git+https://github.com/logos-co/nomos-node.git?rev=3f15894f8b4df377e8d3cd9d92ddee9f648046dc#3f15894f8b4df377e8d3cd9d92ddee9f648046dc"
source = "git+https://github.com/logos-co/nomos-node.git?rev=2392190d88e8ae8271fa9321014ea33324be7c28#2392190d88e8ae8271fa9321014ea33324be7c28"
dependencies = [
"opentelemetry",
"opentelemetry-http",
@ -4134,7 +4154,7 @@ dependencies = [
[[package]]
name = "logos-blockchain-tracing-service"
version = "0.1.0"
source = "git+https://github.com/logos-co/nomos-node.git?rev=3f15894f8b4df377e8d3cd9d92ddee9f648046dc#3f15894f8b4df377e8d3cd9d92ddee9f648046dc"
source = "git+https://github.com/logos-co/nomos-node.git?rev=2392190d88e8ae8271fa9321014ea33324be7c28#2392190d88e8ae8271fa9321014ea33324be7c28"
dependencies = [
"async-trait",
"logos-blockchain-tracing",
@ -4148,7 +4168,7 @@ dependencies = [
[[package]]
name = "logos-blockchain-tx-service"
version = "0.1.0"
source = "git+https://github.com/logos-co/nomos-node.git?rev=3f15894f8b4df377e8d3cd9d92ddee9f648046dc#3f15894f8b4df377e8d3cd9d92ddee9f648046dc"
source = "git+https://github.com/logos-co/nomos-node.git?rev=2392190d88e8ae8271fa9321014ea33324be7c28#2392190d88e8ae8271fa9321014ea33324be7c28"
dependencies = [
"async-trait",
"futures",
@ -4170,7 +4190,7 @@ dependencies = [
[[package]]
name = "logos-blockchain-utils"
version = "0.1.0"
source = "git+https://github.com/logos-co/nomos-node.git?rev=3f15894f8b4df377e8d3cd9d92ddee9f648046dc#3f15894f8b4df377e8d3cd9d92ddee9f648046dc"
source = "git+https://github.com/logos-co/nomos-node.git?rev=2392190d88e8ae8271fa9321014ea33324be7c28#2392190d88e8ae8271fa9321014ea33324be7c28"
dependencies = [
"async-trait",
"blake2",
@ -4187,10 +4207,9 @@ dependencies = [
[[package]]
name = "logos-blockchain-utxotree"
version = "0.1.0"
source = "git+https://github.com/logos-co/nomos-node.git?rev=3f15894f8b4df377e8d3cd9d92ddee9f648046dc#3f15894f8b4df377e8d3cd9d92ddee9f648046dc"
source = "git+https://github.com/logos-co/nomos-node.git?rev=2392190d88e8ae8271fa9321014ea33324be7c28#2392190d88e8ae8271fa9321014ea33324be7c28"
dependencies = [
"ark-ff 0.4.2",
"logos-blockchain-core",
"logos-blockchain-groth16",
"logos-blockchain-poseidon2",
"num-bigint",
@ -4202,13 +4221,14 @@ dependencies = [
[[package]]
name = "logos-blockchain-wallet"
version = "0.1.0"
source = "git+https://github.com/logos-co/nomos-node.git?rev=3f15894f8b4df377e8d3cd9d92ddee9f648046dc#3f15894f8b4df377e8d3cd9d92ddee9f648046dc"
source = "git+https://github.com/logos-co/nomos-node.git?rev=2392190d88e8ae8271fa9321014ea33324be7c28#2392190d88e8ae8271fa9321014ea33324be7c28"
dependencies = [
"logos-blockchain-core",
"logos-blockchain-key-management-system-keys",
"logos-blockchain-ledger",
"num-bigint",
"rpds",
"serde",
"thiserror 2.0.18",
"tracing",
]
@ -4216,7 +4236,7 @@ dependencies = [
[[package]]
name = "logos-blockchain-wallet-service"
version = "0.1.0"
source = "git+https://github.com/logos-co/nomos-node.git?rev=3f15894f8b4df377e8d3cd9d92ddee9f648046dc#3f15894f8b4df377e8d3cd9d92ddee9f648046dc"
source = "git+https://github.com/logos-co/nomos-node.git?rev=2392190d88e8ae8271fa9321014ea33324be7c28#2392190d88e8ae8271fa9321014ea33324be7c28"
dependencies = [
"async-trait",
"bytes",
@ -4229,6 +4249,7 @@ dependencies = [
"logos-blockchain-ledger",
"logos-blockchain-services-utils",
"logos-blockchain-storage-service",
"logos-blockchain-utxotree",
"logos-blockchain-wallet",
"overwatch",
"serde",
@ -4240,7 +4261,7 @@ dependencies = [
[[package]]
name = "logos-blockchain-witness-generator"
version = "0.1.0"
source = "git+https://github.com/logos-co/nomos-node.git?rev=3f15894f8b4df377e8d3cd9d92ddee9f648046dc#3f15894f8b4df377e8d3cd9d92ddee9f648046dc"
source = "git+https://github.com/logos-co/nomos-node.git?rev=2392190d88e8ae8271fa9321014ea33324be7c28#2392190d88e8ae8271fa9321014ea33324be7c28"
dependencies = [
"tempfile",
]
@ -4248,7 +4269,7 @@ dependencies = [
[[package]]
name = "logos-blockchain-zksign"
version = "0.1.0"
source = "git+https://github.com/logos-co/nomos-node.git?rev=3f15894f8b4df377e8d3cd9d92ddee9f648046dc#3f15894f8b4df377e8d3cd9d92ddee9f648046dc"
source = "git+https://github.com/logos-co/nomos-node.git?rev=2392190d88e8ae8271fa9321014ea33324be7c28#2392190d88e8ae8271fa9321014ea33324be7c28"
dependencies = [
"logos-blockchain-circuits-prover",
"logos-blockchain-circuits-utils",
@ -6437,7 +6458,6 @@ dependencies = [
"hex",
"logos-blockchain-api-service",
"logos-blockchain-blend-service",
"logos-blockchain-chain-leader-service",
"logos-blockchain-chain-network-service",
"logos-blockchain-chain-service",
"logos-blockchain-core",
@ -6569,6 +6589,7 @@ dependencies = [
"thiserror 2.0.18",
"tokio",
"tracing",
"tracing-subscriber 0.3.22",
]
[[package]]

View File

@ -40,39 +40,37 @@ testing-framework-runner-local = { default-features = false, path = "testing-f
testing-framework-workflows = { default-features = false, path = "testing-framework/workflows" }
# Logos git dependencies (pinned to latest master)
broadcast-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-chain-broadcast-service", rev = "3f15894f8b4df377e8d3cd9d92ddee9f648046dc" }
cfgsync_tf = { default-features = false, path = "testing-framework/tools/cfgsync_tf" }
chain-leader = { default-features = false, features = [
"pol-dev-mode",
], git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-chain-leader-service", rev = "3f15894f8b4df377e8d3cd9d92ddee9f648046dc" }
chain-network = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-chain-network-service", rev = "3f15894f8b4df377e8d3cd9d92ddee9f648046dc" }
chain-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-chain-service", rev = "3f15894f8b4df377e8d3cd9d92ddee9f648046dc" }
common-http-client = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-common-http-client", rev = "3f15894f8b4df377e8d3cd9d92ddee9f648046dc" }
cryptarchia-engine = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-cryptarchia-engine", rev = "3f15894f8b4df377e8d3cd9d92ddee9f648046dc" }
cryptarchia-sync = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-cryptarchia-sync", rev = "3f15894f8b4df377e8d3cd9d92ddee9f648046dc" }
groth16 = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-groth16", rev = "3f15894f8b4df377e8d3cd9d92ddee9f648046dc" }
key-management-system-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-key-management-system-service", rev = "3f15894f8b4df377e8d3cd9d92ddee9f648046dc" }
nomos-api = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-api-service", rev = "3f15894f8b4df377e8d3cd9d92ddee9f648046dc" }
nomos-blend-message = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-blend-message", rev = "3f15894f8b4df377e8d3cd9d92ddee9f648046dc" }
nomos-blend-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-blend-service", rev = "3f15894f8b4df377e8d3cd9d92ddee9f648046dc" }
nomos-core = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-core", rev = "3f15894f8b4df377e8d3cd9d92ddee9f648046dc" }
nomos-http-api-common = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-http-api-common", rev = "3f15894f8b4df377e8d3cd9d92ddee9f648046dc" }
nomos-ledger = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-ledger", rev = "3f15894f8b4df377e8d3cd9d92ddee9f648046dc" }
nomos-libp2p = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-libp2p", rev = "3f15894f8b4df377e8d3cd9d92ddee9f648046dc" }
nomos-network = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-network-service", rev = "3f15894f8b4df377e8d3cd9d92ddee9f648046dc" }
nomos-node = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-node", rev = "3f15894f8b4df377e8d3cd9d92ddee9f648046dc" }
nomos-sdp = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-sdp-service", rev = "3f15894f8b4df377e8d3cd9d92ddee9f648046dc" }
nomos-time = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-time-service", rev = "3f15894f8b4df377e8d3cd9d92ddee9f648046dc" }
nomos-tracing = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-tracing", rev = "3f15894f8b4df377e8d3cd9d92ddee9f648046dc" }
nomos-tracing-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-tracing-service", rev = "3f15894f8b4df377e8d3cd9d92ddee9f648046dc" }
nomos-utils = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-utils", rev = "3f15894f8b4df377e8d3cd9d92ddee9f648046dc" }
nomos-wallet = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-wallet-service", rev = "3f15894f8b4df377e8d3cd9d92ddee9f648046dc" }
poc = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-poc", rev = "3f15894f8b4df377e8d3cd9d92ddee9f648046dc" }
pol = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-pol", rev = "3f15894f8b4df377e8d3cd9d92ddee9f648046dc" }
tests = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-tests", rev = "3f15894f8b4df377e8d3cd9d92ddee9f648046dc" }
tx-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-tx-service", rev = "3f15894f8b4df377e8d3cd9d92ddee9f648046dc" }
wallet = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-wallet", rev = "3f15894f8b4df377e8d3cd9d92ddee9f648046dc" }
zksign = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-zksign", rev = "3f15894f8b4df377e8d3cd9d92ddee9f648046dc" }
broadcast-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-chain-broadcast-service", rev = "2392190d88e8ae8271fa9321014ea33324be7c28" }
cfgsync_tf = { default-features = false, path = "testing-framework/tools/cfgsync_tf" }
chain-leader = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-chain-leader-service", rev = "2392190d88e8ae8271fa9321014ea33324be7c28" }
chain-network = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-chain-network-service", rev = "2392190d88e8ae8271fa9321014ea33324be7c28" }
chain-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-chain-service", rev = "2392190d88e8ae8271fa9321014ea33324be7c28" }
common-http-client = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-common-http-client", rev = "2392190d88e8ae8271fa9321014ea33324be7c28" }
cryptarchia-engine = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-cryptarchia-engine", rev = "2392190d88e8ae8271fa9321014ea33324be7c28" }
cryptarchia-sync = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-cryptarchia-sync", rev = "2392190d88e8ae8271fa9321014ea33324be7c28" }
groth16 = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-groth16", rev = "2392190d88e8ae8271fa9321014ea33324be7c28" }
key-management-system-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-key-management-system-service", rev = "2392190d88e8ae8271fa9321014ea33324be7c28" }
nomos-api = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-api-service", rev = "2392190d88e8ae8271fa9321014ea33324be7c28" }
nomos-blend-message = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-blend-message", rev = "2392190d88e8ae8271fa9321014ea33324be7c28" }
nomos-blend-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-blend-service", rev = "2392190d88e8ae8271fa9321014ea33324be7c28" }
nomos-core = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-core", rev = "2392190d88e8ae8271fa9321014ea33324be7c28" }
nomos-http-api-common = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-http-api-common", rev = "2392190d88e8ae8271fa9321014ea33324be7c28" }
nomos-ledger = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-ledger", rev = "2392190d88e8ae8271fa9321014ea33324be7c28" }
nomos-libp2p = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-libp2p", rev = "2392190d88e8ae8271fa9321014ea33324be7c28" }
nomos-network = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-network-service", rev = "2392190d88e8ae8271fa9321014ea33324be7c28" }
nomos-node = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-node", rev = "2392190d88e8ae8271fa9321014ea33324be7c28" }
nomos-sdp = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-sdp-service", rev = "2392190d88e8ae8271fa9321014ea33324be7c28" }
nomos-time = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-time-service", rev = "2392190d88e8ae8271fa9321014ea33324be7c28" }
nomos-tracing = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-tracing", rev = "2392190d88e8ae8271fa9321014ea33324be7c28" }
nomos-tracing-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-tracing-service", rev = "2392190d88e8ae8271fa9321014ea33324be7c28" }
nomos-utils = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-utils", rev = "2392190d88e8ae8271fa9321014ea33324be7c28" }
nomos-wallet = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-wallet-service", rev = "2392190d88e8ae8271fa9321014ea33324be7c28" }
poc = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-poc", rev = "2392190d88e8ae8271fa9321014ea33324be7c28" }
pol = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-pol", rev = "2392190d88e8ae8271fa9321014ea33324be7c28" }
tests = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-tests", rev = "2392190d88e8ae8271fa9321014ea33324be7c28" }
tx-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-tx-service", rev = "2392190d88e8ae8271fa9321014ea33324be7c28" }
wallet = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-wallet", rev = "2392190d88e8ae8271fa9321014ea33324be7c28" }
zksign = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-zksign", rev = "2392190d88e8ae8271fa9321014ea33324be7c28" }
# External crates
async-trait = { default-features = false, version = "0.1" }

View File

@ -23,5 +23,7 @@ tracing-subscriber = { features = ["env-filter", "fmt"], version =
[dev-dependencies]
async-trait = { workspace = true }
[features]
[lints]
workspace = true

View File

@ -11,8 +11,8 @@ impl Workload for RestartWorkload {
async fn start(&self, ctx: &RunContext) -> Result<(), DynError> {
if let Some(control) = ctx.node_control() {
// Restart the first node (index 0) if supported.
control.restart_node(0).await?;
// Restart the first node by name if supported.
control.restart_node("node-0").await?;
}
Ok(())
}

View File

@ -3,5 +3,5 @@ use testing_framework_core::scenario::DynError;
#[async_trait]
pub trait NodeControlHandle: Send + Sync {
async fn restart_node(&self, index: usize) -> Result<(), DynError>;
async fn restart_node(&self, name: &str) -> Result<(), DynError>;
}

View File

@ -85,6 +85,7 @@ impl Workload for JoinNodeWithPeersWorkload {
let options = StartNodeOptions {
peers: PeerSelection::Named(self.peers.clone()),
config_patch: None,
};
let node = handle.start_node_with(&self.name, options).await?;
let client = node.api;

View File

@ -32,6 +32,7 @@ async fn manual_cluster_two_clusters_merge() -> Result<()> {
"a",
StartNodeOptions {
peers: PeerSelection::None,
config_patch: None,
},
)
.await?
@ -46,6 +47,7 @@ async fn manual_cluster_two_clusters_merge() -> Result<()> {
"c",
StartNodeOptions {
peers: PeerSelection::Named(vec!["node-a".to_owned()]),
config_patch: None,
},
)
.await?

View File

@ -0,0 +1,115 @@
use std::{
net::{SocketAddr, TcpListener},
time::Duration,
};
use anyhow::Result;
use testing_framework_core::{
nodes::ApiClient,
scenario::{Deployer, PeerSelection, ScenarioBuilder, StartNodeOptions},
topology::config::TopologyConfig,
};
use testing_framework_runner_local::LocalDeployer;
use tracing_subscriber::fmt::try_init;
#[tokio::test]
#[ignore = "run manually with `cargo test -p runner-examples -- --ignored manual_cluster_api_port_override`"]
async fn manual_cluster_api_port_override() -> Result<()> {
let _ = try_init();
// Required env vars (set on the command line when running this test):
// - `POL_PROOF_DEV_MODE=true`
// - `LOGOS_BLOCKCHAIN_NODE_BIN=...`
// - `LOGOS_BLOCKCHAIN_CIRCUITS=...`
// - `RUST_LOG=info` (optional)
let api_port = random_api_port();
let deployer = LocalDeployer::new();
let cluster = deployer.manual_cluster(TopologyConfig::with_node_numbers(1))?;
let node = cluster
.start_node_with(
"override-api",
StartNodeOptions {
peers: PeerSelection::None,
config_patch: None,
}
.create_patch(move |mut config| {
println!("overriding API port to {api_port}");
let current_addr = config.user.http.backend_settings.address;
config.user.http.backend_settings.address =
SocketAddr::new(current_addr.ip(), api_port);
Ok(config)
}),
)
.await?
.api;
node.consensus_info()
.await
.expect("consensus_info should succeed");
assert_eq!(resolved_port(&node), api_port);
Ok(())
}
#[tokio::test]
#[ignore = "run manually with `cargo test -p runner-examples -- --ignored scenario_builder_api_port_override`"]
async fn scenario_builder_api_port_override() -> Result<()> {
let _ = try_init();
// Required env vars (set on the command line when running this test):
// - `POL_PROOF_DEV_MODE=true`
// - `LOGOS_BLOCKCHAIN_NODE_BIN=...`
// - `LOGOS_BLOCKCHAIN_CIRCUITS=...`
// - `RUST_LOG=info` (optional)
let api_port = random_api_port();
let mut scenario = ScenarioBuilder::topology_with(|t| {
t.network_star()
.nodes(1)
.node_config_patch_with(0, move |mut config| {
println!("overriding API port to {api_port}");
let current_addr = config.user.http.backend_settings.address;
config.user.http.backend_settings.address =
SocketAddr::new(current_addr.ip(), api_port);
Ok(config)
})
})
.with_run_duration(Duration::from_secs(1))
.build()?;
let deployer = LocalDeployer::default();
let runner = deployer.deploy(&scenario).await?;
let handle = runner.run(&mut scenario).await?;
let client = handle
.context()
.node_clients()
.any_client()
.ok_or_else(|| anyhow::anyhow!("scenario did not expose any node clients"))?;
client
.consensus_info()
.await
.expect("consensus_info should succeed");
assert_eq!(resolved_port(&client), api_port);
Ok(())
}
fn random_api_port() -> u16 {
let listener = TcpListener::bind("127.0.0.1:0").expect("bind random API port");
listener.local_addr().expect("read API port").port()
}
fn resolved_port(client: &ApiClient) -> u16 {
client.base_url().port().unwrap_or_default()
}

View File

@ -0,0 +1,111 @@
use std::time::Duration;
use anyhow::{Result, anyhow};
use testing_framework_core::{
scenario::StartNodeOptions,
topology::{
config::{TopologyBuilder, TopologyConfig},
configs::network::Libp2pNetworkLayout,
},
};
use testing_framework_runner_local::LocalDeployer;
use testing_framework_workflows::{start_node_with_timeout, wait_for_min_height};
use tokio::time::{sleep, timeout};
use tracing_subscriber::fmt::try_init;
const MIN_HEIGHT: u64 = 5;
const INITIAL_READY_TIMEOUT: Duration = Duration::from_secs(500);
const CATCH_UP_TIMEOUT: Duration = Duration::from_secs(300);
const START_NODE_TIMEOUT: Duration = Duration::from_secs(90);
const TEST_TIMEOUT: Duration = Duration::from_secs(600);
const POLL_INTERVAL: Duration = Duration::from_secs(1);
#[tokio::test]
#[ignore = "run manually with `cargo test -p runner-examples -- --ignored orphan_manual_cluster`"]
async fn orphan_manual_cluster() -> Result<()> {
let _ = try_init();
// Required env vars (set on the command line when running this test):
// - `POL_PROOF_DEV_MODE=true`
// - `LOGOS_BLOCKCHAIN_NODE_BIN=...`
// - `NOMOS_KZGRS_PARAMS_PATH=...` (path to KZG params directory/file)
// - `RUST_LOG=info` (optional; better visibility)
let config = TopologyConfig::with_node_numbers(3);
timeout(TEST_TIMEOUT, async {
let builder = TopologyBuilder::new(config).with_network_layout(Libp2pNetworkLayout::Full);
let deployer = LocalDeployer::new();
let cluster = deployer.manual_cluster_with_builder(builder)?;
// Nodes are stopped automatically when the cluster is dropped.
let node_a = start_node_with_timeout(
&cluster,
"a",
StartNodeOptions::default(),
START_NODE_TIMEOUT,
)
.await?
.api;
let node_b = start_node_with_timeout(
&cluster,
"b",
StartNodeOptions::default(),
START_NODE_TIMEOUT,
)
.await?
.api;
wait_for_min_height(
&[node_a.clone(), node_b.clone()],
MIN_HEIGHT,
INITIAL_READY_TIMEOUT,
POLL_INTERVAL,
)
.await?;
let behind_node = start_node_with_timeout(
&cluster,
"c",
StartNodeOptions::default(),
START_NODE_TIMEOUT,
)
.await?
.api;
timeout(CATCH_UP_TIMEOUT, async {
loop {
let node_a_info = node_a
.consensus_info()
.await
.map_err(|err| anyhow!("node-a consensus_info failed: {err}"))?;
let node_b_info = node_b
.consensus_info()
.await
.map_err(|err| anyhow!("node-b consensus_info failed: {err}"))?;
let behind_info = behind_node
.consensus_info()
.await
.map_err(|err| anyhow!("node-c consensus_info failed: {err}"))?;
let initial_min_height = node_a_info.height.min(node_b_info.height);
if behind_info.height >= initial_min_height.saturating_sub(1) {
return Ok::<(), anyhow::Error>(());
}
sleep(POLL_INTERVAL).await;
}
})
.await
.map_err(|_| anyhow!("timeout waiting for behind node to catch up"))??;
Ok::<(), anyhow::Error>(())
})
.await
.map_err(|_| anyhow!("test timeout exceeded"))??;
Ok(())
}

View File

@ -309,11 +309,14 @@ build_bundle::prepare_circuits() {
}
build_bundle::build_binaries() {
BUILD_FEATURES_LABEL="all"
BUILD_FEATURES_LABEL="all,high-active-slot-coefficient,verification-keys"
echo "==> Building binaries (platform=${PLATFORM})"
mkdir -p "${NODE_SRC}"
(
cd "${NODE_SRC}"
if [ -d "${NODE_TARGET}" ]; then
rm -rf "${NODE_TARGET}"
fi
if [ -n "${LOGOS_BLOCKCHAIN_NODE_PATH}" ]; then
echo "Using local logos-blockchain-node checkout at ${NODE_SRC} (no fetch/checkout)"
else
@ -326,18 +329,16 @@ build_bundle::build_binaries() {
git clean -fdx
fi
if [ -z "${LOGOS_BLOCKCHAIN_NODE_PATH}" ]; then
build_bundle::apply_nomos_node_patches "${NODE_SRC}"
fi
unset CARGO_FEATURE_BUILD_VERIFICATION_KEY
if [ -n "${BUNDLE_RUSTUP_TOOLCHAIN}" ]; then
RUSTFLAGS='--cfg feature="pol-dev-mode"' \
RUSTFLAGS='--cfg feature="high-active-slot-coefficient" --cfg feature="build-verification-key"' \
CARGO_FEATURE_BUILD_VERIFICATION_KEY=1 \
RUSTUP_TOOLCHAIN="${BUNDLE_RUSTUP_TOOLCHAIN}" \
cargo build --all-features \
-p logos-blockchain-node \
--target-dir "${NODE_TARGET}"
else
RUSTFLAGS='--cfg feature="pol-dev-mode"' \
RUSTFLAGS='--cfg feature="high-active-slot-coefficient" --cfg feature="build-verification-key"' \
CARGO_FEATURE_BUILD_VERIFICATION_KEY=1 \
cargo build --all-features \
-p logos-blockchain-node \
--target-dir "${NODE_TARGET}"

View File

@ -60,6 +60,7 @@ Environment:
LOGOS_BLOCKCHAIN_TESTNET_IMAGE_PULL_POLICY K8s imagePullPolicy (default ${DEFAULT_PULL_POLICY_LOCAL}; set to ${DEFAULT_PULL_POLICY_ECR} for --ecr)
LOGOS_BLOCKCHAIN_BINARIES_TAR Path to prebuilt binaries tarball (default .tmp/nomos-binaries-<platform>-<version>.tar.gz)
LOGOS_BLOCKCHAIN_CIRCUITS Directory containing circuits assets (defaults to ~/.logos-blockchain-circuits)
CARGO_FEATURE_BUILD_VERIFICATION_KEY Build flag to embed Groth16 verification keys in node binaries (recommended for host)
LOGOS_BLOCKCHAIN_SKIP_IMAGE_BUILD Set to 1 to skip rebuilding the compose/k8s image
LOGOS_BLOCKCHAIN_FORCE_IMAGE_BUILD Set to 1 to force image rebuild even for k8s ECR mode
LOGOS_BLOCKCHAIN_METRICS_QUERY_URL PromQL base URL for the runner process (optional)
@ -301,8 +302,9 @@ run_examples::bundle_matches_expected() {
local tar_path="$1"
[ -f "${tar_path}" ] || return 1
[ -z "${LOGOS_BLOCKCHAIN_NODE_REV:-}" ] && return 0
local expected_features="${RUN_EXAMPLES_EXPECTED_BUNDLE_FEATURES:-all,high-active-slot-coefficient,verification-keys}"
local meta tar_rev tar_head
local meta tar_rev tar_head tar_features
meta="$(tar -xOzf "${tar_path}" artifacts/nomos-bundle-meta.env 2>/dev/null || true)"
if [ -z "${meta}" ]; then
echo "Bundle meta missing in ${tar_path}; treating as stale and rebuilding." >&2
@ -310,6 +312,11 @@ run_examples::bundle_matches_expected() {
fi
tar_rev="$(echo "${meta}" | sed -n 's/^nomos_node_rev=//p' | head -n 1)"
tar_head="$(echo "${meta}" | sed -n 's/^nomos_node_git_head=//p' | head -n 1)"
tar_features="$(echo "${meta}" | sed -n 's/^features=//p' | head -n 1)"
if [ -n "${expected_features}" ] && [ "${tar_features}" != "${expected_features}" ]; then
echo "Bundle ${tar_path} features '${tar_features}' do not match expected '${expected_features}'; rebuilding." >&2
return 1
fi
if [ -n "${tar_rev}" ] && [ "${tar_rev}" != "${LOGOS_BLOCKCHAIN_NODE_REV}" ]; then
echo "Bundle ${tar_path} is for logos-blockchain-node rev ${tar_rev}, expected ${LOGOS_BLOCKCHAIN_NODE_REV}; rebuilding." >&2
return 1
@ -501,6 +508,8 @@ run_examples::run() {
if [ "${MODE}" = "host" ]; then
run_examples::ensure_circuits
# Ensure Groth16 verification keys are embedded when building local node binaries.
export CARGO_FEATURE_BUILD_VERIFICATION_KEY=1
fi
echo "==> Running ${BIN} for ${RUN_SECS}s (mode=${MODE}, image=${IMAGE})"

View File

@ -1,7 +1,7 @@
#!/usr/bin/env bash
set -euo pipefail
RUSTFLAGS='--cfg feature="pol-dev-mode"' \
RUSTFLAGS='--cfg feature="high-active-slot-coefficient"' \
cargo build --all-features --manifest-path /workspace/testing-framework/tools/cfgsync_tf/Cargo.toml --bins
cp /workspace/target/debug/cfgsync-server /workspace/artifacts/cfgsync-server

View File

@ -50,8 +50,8 @@ git checkout "${LOGOS_BLOCKCHAIN_NODE_REV}"
git reset --hard
git clean -fdx
# Enable pol-dev-mode via cfg to let POL_PROOF_DEV_MODE short-circuit proofs in tests.
RUSTFLAGS='--cfg feature="pol-dev-mode"' \
# Enable high-active-slot-coefficient via cfg to keep test blocks frequent.
RUSTFLAGS='--cfg feature="high-active-slot-coefficient"' \
cargo build --features "testing" -p logos-blockchain-node
cp /tmp/nomos-node/target/debug/logos-blockchain-node /workspace/artifacts/logos-blockchain-node

View File

@ -10,7 +10,6 @@ repository.workspace = true
version = "0.1.0"
[dependencies]
chain-leader = { workspace = true }
chain-network = { workspace = true }
chain-service = { workspace = true }
cryptarchia-engine = { features = ["serde"], workspace = true }

View File

@ -32,6 +32,7 @@ const EPOCH_TRANSITION_SLOTS: u64 = 2_600;
const SAFETY_BUFFER_INTERVALS: u64 = 100;
const MESSAGE_FREQUENCY_PER_ROUND: f64 = 1.0;
const MAX_RELEASE_DELAY_ROUNDS: u64 = 3;
const DATA_REPLICATION_FACTOR: u64 = 0;
pub(crate) fn build_blend_service_config(
config: &TopologyBlendConfig,
@ -106,6 +107,7 @@ fn build_blend_deployment_settings(
common: blend_deployment::CommonSettings {
num_blend_layers: unsafe { NonZeroU64::new_unchecked(BLEND_LAYERS_COUNT) },
minimum_network_size: unsafe { NonZeroU64::new_unchecked(MINIMUM_NETWORK_SIZE) },
data_replication_factor: DATA_REPLICATION_FACTOR,
timing: TimingSettings {
round_duration: Duration::from_secs(ROUND_DURATION_SECS),
rounds_per_interval: unsafe { NonZeroU64::new_unchecked(ROUNDS_PER_INTERVAL) },

View File

@ -1,8 +1,13 @@
use std::{collections::HashSet, num::NonZeroUsize, path::PathBuf, time::Duration};
use std::{
collections::{HashMap, HashSet},
num::NonZeroUsize,
path::PathBuf,
time::Duration,
};
use chain_leader::LeaderConfig as ChainLeaderConfig;
use chain_network::{BootstrapConfig as ChainBootstrapConfig, OrphanConfig, SyncConfig};
use chain_service::StartingState;
use key_management_system_service::keys::Key;
use nomos_api::ApiServiceSettings;
use nomos_node::{
api::backend::AxumBackendSettings as NodeAxumBackendSettings,
@ -22,7 +27,7 @@ use nomos_node::{
};
use nomos_wallet::WalletServiceSettings;
use crate::{timeouts, topology::configs::GeneralConfig};
use crate::{nodes::kms::key_id_for_preload_backend, timeouts, topology::configs::GeneralConfig};
// Configuration constants
const CRYPTARCHIA_GOSSIPSUB_PROTOCOL: &str = "/cryptarchia/proto";
@ -37,7 +42,11 @@ const API_MAX_CONCURRENT_REQUESTS: usize = 1000;
pub(crate) fn cryptarchia_deployment(config: &GeneralConfig) -> CryptarchiaDeploymentSettings {
CryptarchiaDeploymentSettings {
epoch_config: config.consensus_config.ledger_config.epoch_config,
consensus_config: config.consensus_config.ledger_config.consensus_config,
security_param: config
.consensus_config
.ledger_config
.consensus_config
.security_param(),
sdp_config: DeploymentSdpConfig {
service_params: config
.consensus_config
@ -69,9 +78,7 @@ pub(crate) fn cryptarchia_config(config: &GeneralConfig) -> CryptarchiaConfig {
starting_state: StartingState::Genesis {
genesis_tx: config.consensus_config.genesis_tx.clone(),
},
// Disable on-disk recovery in compose tests to avoid serde errors on
// non-string keys and keep services alive.
recovery_file: PathBuf::new(),
recovery_file: PathBuf::from("recovery/cryptarchia.json"),
bootstrap: chain_service::BootstrapConfig {
prolonged_bootstrap_period: config.bootstrapping_config.prolonged_bootstrap_period,
force_bootstrap: false,
@ -94,10 +101,6 @@ pub(crate) fn cryptarchia_config(config: &GeneralConfig) -> CryptarchiaConfig {
},
},
},
leader: ChainLeaderConfig {
pk: config.consensus_config.leader_config.pk,
sk: config.consensus_config.leader_config.sk.clone(),
},
}
}
@ -117,8 +120,7 @@ pub(crate) fn time_config(config: &GeneralConfig) -> TimeConfig {
pub(crate) fn mempool_config() -> nomos_node::config::mempool::serde::Config {
nomos_node::config::mempool::serde::Config {
// Disable mempool recovery for hermetic tests.
recovery_path: PathBuf::new(),
recovery_path: PathBuf::from("recovery/mempool.json"),
}
}
@ -160,19 +162,30 @@ fn wallet_settings_with_leader(
config: &GeneralConfig,
include_leader: bool,
) -> WalletServiceSettings {
let mut keys = HashSet::new();
let mut keys = HashMap::new();
if include_leader {
keys.insert(config.consensus_config.leader_config.pk);
let leader_key = Key::Zk(config.consensus_config.leader_sk.clone().into());
let leader_key_id = key_id_for_preload_backend(&leader_key);
keys.insert(leader_key_id, config.consensus_config.leader_pk);
}
keys.extend(
config
.consensus_config
.wallet_accounts
.iter()
.map(crate::topology::configs::wallet::WalletAccount::public_key),
let funding_key = Key::Zk(config.consensus_config.funding_sk.clone());
let funding_key_id = key_id_for_preload_backend(&funding_key);
keys.insert(
funding_key_id,
config.consensus_config.funding_sk.to_public_key(),
);
WalletServiceSettings { known_keys: keys }
// Note: wallet accounts are used by the transaction workload directly and
// don't need to be registered for leader eligibility.
let voucher_master_key_id =
key_id_for_preload_backend(&Key::Zk(config.consensus_config.leader_sk.clone().into()));
WalletServiceSettings {
known_keys: keys,
voucher_master_key_id,
recovery_path: PathBuf::from("recovery/wallet.json"),
}
}

View File

@ -5,7 +5,6 @@ use std::{
sync::Arc,
};
use chain_leader::LeaderConfig;
use cryptarchia_engine::EpochConfig;
use groth16::CompressedGroth16Proof;
use key_management_system_service::keys::{
@ -13,7 +12,7 @@ use key_management_system_service::keys::{
};
use nomos_core::{
mantle::{
MantleTx, Note, OpProof, Utxo,
GenesisTx as GenesisTxTrait, MantleTx, Note, OpProof, Utxo,
genesis_tx::GenesisTx,
ledger::Tx as LedgerTx,
ops::{
@ -39,6 +38,8 @@ pub enum ConsensusConfigError {
LedgerConfig { message: String },
#[error("failed to sign genesis declarations: {message}")]
DeclarationSignature { message: String },
#[error("genesis ledger is missing expected utxo note: {note}")]
MissingGenesisUtxo { note: String },
}
#[derive(Clone)]
@ -49,7 +50,7 @@ pub struct ConsensusParams {
}
impl ConsensusParams {
const DEFAULT_ACTIVE_SLOT_COEFF: f64 = 0.9;
const DEFAULT_ACTIVE_SLOT_COEFF: f64 = 1.0;
const CONSENSUS_ACTIVE_SLOT_COEFF_VAR: &str = "CONSENSUS_ACTIVE_SLOT_COEFF";
#[must_use]
@ -98,7 +99,8 @@ impl ProviderInfo {
/// be converted into a specific service or services configuration.
#[derive(Clone)]
pub struct GeneralConsensusConfig {
pub leader_config: LeaderConfig,
pub leader_pk: ZkPublicKey,
pub leader_sk: UnsecuredZkKey,
pub ledger_config: nomos_ledger::Config,
pub genesis_tx: GenesisTx,
pub utxos: Vec<Utxo>,
@ -115,7 +117,7 @@ pub struct ServiceNote {
pub output_index: usize,
}
fn create_genesis_tx(utxos: &[Utxo]) -> Result<GenesisTx, ConsensusConfigError> {
fn create_genesis_tx(utxos: &mut [Utxo]) -> Result<GenesisTx, ConsensusConfigError> {
// Create a genesis inscription op (similar to config.yaml)
let inscription = InscriptionOp {
channel_id: ChannelId::from([0; 32]),
@ -131,6 +133,12 @@ fn create_genesis_tx(utxos: &[Utxo]) -> Result<GenesisTx, ConsensusConfigError>
// Create ledger transaction with the utxos as outputs
let outputs: Vec<Note> = utxos.iter().map(|u| u.note).collect();
let ledger_tx = LedgerTx::new(vec![], outputs);
let ledger_tx_hash = ledger_tx.hash();
// Ensure utxo IDs match the ledger tx hash used at genesis.
for utxo in utxos {
utxo.tx_hash = ledger_tx_hash;
}
// Create the mantle transaction
let mantle_tx = MantleTx {
@ -160,10 +168,10 @@ fn build_ledger_config(
epoch_period_nonce_buffer: unsafe { NonZero::new_unchecked(3) },
epoch_period_nonce_stabilization: unsafe { NonZero::new_unchecked(4) },
},
consensus_config: cryptarchia_engine::Config {
security_param: consensus_params.security_param,
active_slot_coeff: consensus_params.active_slot_coeff,
},
consensus_config: cryptarchia_engine::Config::new(
consensus_params.security_param,
consensus_params.active_slot_coeff,
),
sdp_config: nomos_ledger::mantle::sdp::Config {
service_params: Arc::new(
[(
@ -192,6 +200,7 @@ fn build_ledger_config(
})?,
num_blend_layers: unsafe { NonZeroU64::new_unchecked(3) },
minimum_network_size: unsafe { NonZeroU64::new_unchecked(1) },
data_replication_factor: 0,
},
},
},
@ -208,21 +217,24 @@ pub fn create_consensus_configs(
let mut blend_notes = Vec::new();
let mut sdp_notes = Vec::new();
let leader_stake = leader_stake_amount(wallet, ids.len());
let utxos = create_utxos_for_leader_and_services(
ids,
&mut leader_keys,
&mut blend_notes,
&mut sdp_notes,
leader_stake,
);
let utxos = append_wallet_utxos(utxos, wallet);
let genesis_tx = create_genesis_tx(&utxos)?;
let mut utxos = append_wallet_utxos(utxos, wallet);
let genesis_tx = create_genesis_tx(&mut utxos)?;
let ledger_config = build_ledger_config(consensus_params)?;
Ok(leader_keys
.into_iter()
.enumerate()
.map(|(i, (pk, sk))| GeneralConsensusConfig {
leader_config: LeaderConfig { pk, sk },
leader_pk: pk,
leader_sk: sk,
ledger_config: ledger_config.clone(),
genesis_tx: genesis_tx.clone(),
utxos: utxos.clone(),
@ -233,20 +245,48 @@ pub fn create_consensus_configs(
.collect())
}
fn leader_stake_amount(wallet: &WalletConfig, n_participants: usize) -> u64 {
// Minimum leader stake (legacy baseline) so small test wallets still
// have a viable leader in low-fund scenarios.
const MIN_LEADER_STAKE: u64 = 100_000;
// Leader stake multiplier relative to average wallet allocation per validator.
// Keeps the leader stake competitive when wallet-funded UTXOs dominate total
// stake.
const LEADER_STAKE_MULTIPLIER: u64 = 10;
let total_wallet_funds: u64 = wallet.accounts.iter().map(|account| account.value).sum();
if total_wallet_funds == 0 {
return MIN_LEADER_STAKE;
}
let n = n_participants.max(1) as u64;
// Scale leader stake to stay competitive with large wallet-funded UTXOs.
// We use LEADER_STAKE_MULTIPLIER × (total_wallet_funds / n) to keep
// block production likely even when wallets dominate total stake.
let scaled = total_wallet_funds
.saturating_mul(LEADER_STAKE_MULTIPLIER)
.saturating_div(n)
.max(1);
// Floor to preserve the prior baseline leader stake and avoid too-small values.
scaled.max(MIN_LEADER_STAKE)
}
fn create_utxos_for_leader_and_services(
ids: &[[u8; 32]],
leader_keys: &mut Vec<(ZkPublicKey, UnsecuredZkKey)>,
blend_notes: &mut Vec<ServiceNote>,
sdp_notes: &mut Vec<ServiceNote>,
leader_stake: u64,
) -> Vec<Utxo> {
let mut utxos = Vec::new();
// Assume output index which will be set by the ledger tx.
let mut output_index = 0;
// Create notes for leader, Blend and DA declarations.
let mut output_index = 0;
for &id in ids {
output_index = push_leader_utxo(id, leader_keys, &mut utxos, output_index);
output_index = push_leader_utxo(id, leader_keys, &mut utxos, output_index, leader_stake);
output_index = push_service_note(b"bn", id, blend_notes, &mut utxos, output_index);
output_index = push_service_note(b"sdp", id, sdp_notes, &mut utxos, output_index);
}
@ -270,15 +310,16 @@ fn push_leader_utxo(
leader_keys: &mut Vec<(ZkPublicKey, UnsecuredZkKey)>,
utxos: &mut Vec<Utxo>,
output_index: usize,
leader_stake: u64,
) -> usize {
let sk_data = derive_key_material(b"ld", &id);
let sk = UnsecuredZkKey::from(BigUint::from_bytes_le(&sk_data));
let pk = sk.to_public_key();
leader_keys.push((pk, sk));
utxos.push(Utxo {
note: Note::new(1_000, pk),
note: Note::new(leader_stake, pk),
tx_hash: BigUint::from(0u8).into(),
output_index: 0,
output_index,
});
output_index + 1
}
@ -303,17 +344,18 @@ fn push_service_note(
utxos.push(Utxo {
note,
tx_hash: BigUint::from(0u8).into(),
output_index: 0,
output_index,
});
output_index + 1
}
fn append_wallet_utxos(mut utxos: Vec<Utxo>, wallet: &WalletConfig) -> Vec<Utxo> {
for account in &wallet.accounts {
let output_index = utxos.len();
utxos.push(Utxo {
note: Note::new(account.value, account.public_key()),
tx_hash: BigUint::from(0u8).into(),
output_index: 0,
output_index,
});
}
@ -420,3 +462,25 @@ fn build_genesis_tx(
message: err.to_string(),
})
}
pub fn sync_utxos_with_genesis(
utxos: &mut [Utxo],
genesis_tx: &GenesisTx,
) -> Result<(), ConsensusConfigError> {
let ledger_tx = genesis_tx.mantle_tx().ledger_tx.clone();
let ledger_tx_hash = ledger_tx.hash();
let outputs = &ledger_tx.outputs;
for utxo in utxos {
let output_index = outputs
.iter()
.position(|note| note == &utxo.note)
.ok_or_else(|| ConsensusConfigError::MissingGenesisUtxo {
note: format!("{:?}", utxo.note),
})?;
utxo.output_index = output_index;
utxo.tx_hash = ledger_tx_hash;
}
Ok(())
}

View File

@ -116,7 +116,7 @@ pub fn create_general_configs_with_blend_core_subset(
collect_blend_core_providers(first_consensus, &blend_configs, n_blend_core_nodes)?;
let ledger_tx = first_consensus.genesis_tx.mantle_tx().ledger_tx.clone();
let genesis_tx = create_genesis_tx_with_declarations(ledger_tx, providers)?;
apply_consensus_genesis_tx(&mut consensus_configs, &genesis_tx);
apply_consensus_genesis_tx(&mut consensus_configs, &genesis_tx)?;
// Set Blend and DA keys in KMS of each node config.
let kms_configs = build_kms_configs(&blend_configs);
@ -200,10 +200,13 @@ fn collect_blend_core_providers(
fn apply_consensus_genesis_tx(
consensus_configs: &mut [GeneralConsensusConfig],
genesis_tx: &nomos_core::mantle::genesis_tx::GenesisTx,
) {
) -> Result<(), ConsensusConfigError> {
for c in consensus_configs {
c.genesis_tx = genesis_tx.clone();
consensus::sync_utxos_with_genesis(&mut c.utxos, genesis_tx)?;
}
Ok(())
}
fn build_kms_configs(blend_configs: &[GeneralBlendConfig]) -> Vec<PreloadKMSBackendSettings> {

View File

@ -52,7 +52,7 @@ pub fn build_general_config_for_node(
.next()
.ok_or(GeneralConfigError::EmptyParticipants)?;
let kms_config = build_kms_config_for_node(&blend_config, wallet_config);
let kms_config = build_kms_config_for_node(&blend_config, wallet_config, &consensus_config);
Ok(GeneralConfig {
consensus_config,
@ -105,6 +105,7 @@ pub fn build_initial_peers(network_params: &NetworkParams, peer_ports: &[u16]) -
fn build_kms_config_for_node(
blend_config: &blend::GeneralBlendConfig,
wallet_config: &WalletConfig,
consensus_config: &GeneralConsensusConfig,
) -> PreloadKMSBackendSettings {
let mut keys = HashMap::from([
(
@ -115,6 +116,14 @@ fn build_kms_config_for_node(
key_id_for_preload_backend(&Key::Zk(blend_config.secret_zk_key.clone())),
Key::Zk(blend_config.secret_zk_key.clone()),
),
(
key_id_for_preload_backend(&Key::Zk(consensus_config.leader_sk.clone().into())),
Key::Zk(consensus_config.leader_sk.clone().into()),
),
(
key_id_for_preload_backend(&Key::Zk(consensus_config.funding_sk.clone())),
Key::Zk(consensus_config.funding_sk.clone()),
),
]);
for account in &wallet_config.accounts {

View File

@ -6,7 +6,7 @@ use std::{
use time::OffsetDateTime;
const DEFAULT_SLOT_TIME: u64 = 2;
const DEFAULT_SLOT_TIME: u64 = 1;
const CONSENSUS_SLOT_TIME_VAR: &str = "CONSENSUS_SLOT_TIME";
const DEFAULT_NTP_SERVER: &str = "pool.ntp.org";
const DEFAULT_NTP_TIMEOUT: Duration = Duration::from_secs(5);

View File

@ -1,10 +1,10 @@
use async_trait::async_trait;
use crate::scenario::{DynError, StartNodeOptions, StartedNode};
use crate::scenario::{DynError, NodeControlHandle, StartNodeOptions, StartedNode};
/// Interface for imperative, deployer-backed manual clusters.
#[async_trait]
pub trait ManualClusterHandle: Send + Sync {
pub trait ManualClusterHandle: NodeControlHandle {
async fn start_node_with(
&self,
name: &str,

View File

@ -11,6 +11,16 @@ pub fn ensure_recovery_paths(base_dir: &Path) -> io::Result<()> {
fs::write(&mempool_path, "{}")?;
}
let cryptarchia_path = recovery_dir.join("cryptarchia.json");
if !cryptarchia_path.exists() {
fs::write(&cryptarchia_path, "{}")?;
}
let wallet_path = recovery_dir.join("wallet.json");
if !wallet_path.exists() {
fs::write(&wallet_path, "{}")?;
}
let blend_core_path = recovery_dir.join("blend").join("core.json");
if let Some(parent) = blend_core_path.parent() {
fs::create_dir_all(parent)?;

View File

@ -195,7 +195,7 @@ fn write_node_config<C: Serialize>(config: &C, config_path: &Path) -> Result<(),
})
}
fn spawn_node_process(
pub(crate) fn spawn_node_process(
binary_path: &Path,
config_path: &Path,
workdir: &Path,
@ -213,7 +213,9 @@ fn spawn_node_process(
})
}
async fn wait_for_consensus_readiness(api: &ApiClient) -> Result<(), time::error::Elapsed> {
pub(crate) async fn wait_for_consensus_readiness(
api: &ApiClient,
) -> Result<(), time::error::Elapsed> {
time::timeout(STARTUP_TIMEOUT, async {
loop {
if api.consensus_info().await.is_ok() {

View File

@ -13,12 +13,18 @@ use crate::{
common::{
binary::{BinaryConfig, BinaryResolver},
lifecycle::{kill::kill_child, monitor::is_running},
node::{NodeAddresses, NodeConfigCommon, NodeHandle, SpawnNodeError, spawn_node},
node::{
NodeAddresses, NodeConfigCommon, NodeHandle, SpawnNodeError, spawn_node,
spawn_node_process, wait_for_consensus_readiness,
},
},
},
scenario::DynError,
topology::config::NodeConfigPatch,
};
const BIN_PATH: &str = "target/debug/logos-blockchain-node";
const RESTART_SHUTDOWN_TIMEOUT: Duration = Duration::from_secs(10);
fn binary_path() -> PathBuf {
let cfg = BinaryConfig {
@ -34,6 +40,23 @@ pub struct Node {
handle: NodeHandle<RunConfig>,
}
pub fn apply_node_config_patches<'a>(
mut config: RunConfig,
patches: impl IntoIterator<Item = &'a NodeConfigPatch>,
) -> Result<RunConfig, DynError> {
for patch in patches {
config = patch(config)?;
}
Ok(config)
}
pub fn apply_node_config_patch(
config: RunConfig,
patch: &NodeConfigPatch,
) -> Result<RunConfig, DynError> {
apply_node_config_patches(config, [patch])
}
impl Deref for Node {
type Target = NodeHandle<RunConfig>;
@ -56,6 +79,12 @@ impl Drop for Node {
}
impl Node {
/// Return the current process id for the running node.
#[must_use]
pub fn pid(&self) -> u32 {
self.handle.child.id()
}
/// Check if the node process is still running
pub fn is_running(&mut self) -> bool {
is_running(&mut self.handle.child)
@ -82,6 +111,40 @@ impl Node {
Ok(Self { handle })
}
/// Restart the node process using the existing config and data directory.
pub async fn restart(&mut self) -> Result<(), SpawnNodeError> {
let old_pid = self.pid();
debug!(old_pid, "restarting node process");
kill_child(&mut self.handle.child);
let _ = self.wait_for_exit(RESTART_SHUTDOWN_TIMEOUT).await;
let config_path = self.handle.tempdir.path().join("node.yaml");
let child = spawn_node_process(&binary_path(), &config_path, self.handle.tempdir.path())?;
self.handle.child = child;
let new_pid = self.pid();
wait_for_consensus_readiness(&self.handle.api)
.await
.map_err(|source| SpawnNodeError::Readiness { source })?;
info!(
old_pid,
new_pid, "node restart readiness confirmed via consensus_info"
);
Ok(())
}
/// Stop the node process without restarting it.
pub async fn stop(&mut self) {
let pid = self.pid();
debug!(pid, "stopping node process");
kill_child(&mut self.handle.child);
let _ = self.wait_for_exit(RESTART_SHUTDOWN_TIMEOUT).await;
}
}
impl NodeConfigCommon for RunConfig {

View File

@ -1,8 +1,9 @@
use async_trait::async_trait;
use std::sync::Arc;
use reqwest::Url;
use super::DynError;
use crate::nodes::ApiClient;
use crate::{nodes::ApiClient, topology::config::NodeConfigPatch};
/// Marker type used by scenario builders to request node control support.
#[derive(Clone, Copy, Debug, Default)]
@ -34,20 +35,36 @@ pub enum PeerSelection {
}
/// Options for dynamically starting a node.
#[derive(Clone, Debug)]
#[derive(Clone)]
pub struct StartNodeOptions {
/// How to select initial peers on startup.
pub peers: PeerSelection,
/// Optional node config patch applied before spawn.
pub config_patch: Option<NodeConfigPatch>,
}
impl Default for StartNodeOptions {
fn default() -> Self {
Self {
peers: PeerSelection::DefaultLayout,
config_patch: None,
}
}
}
impl StartNodeOptions {
pub fn create_patch<F>(mut self, f: F) -> Self
where
F: Fn(nomos_node::config::RunConfig) -> Result<nomos_node::config::RunConfig, DynError>
+ Send
+ Sync
+ 'static,
{
self.config_patch = Some(Arc::new(f));
self
}
}
/// Trait implemented by scenario capability markers to signal whether node
/// control is required.
pub trait RequiresNodeControl {
@ -66,28 +83,6 @@ impl RequiresNodeControl for ObservabilityCapability {
const REQUIRED: bool = false;
}
/// Interface exposed by runners that can restart nodes at runtime.
#[async_trait]
pub trait NodeControlHandle: Send + Sync {
async fn restart_node(&self, index: usize) -> Result<(), DynError>;
async fn start_node(&self, _name: &str) -> Result<StartedNode, DynError> {
Err("start_node not supported by this deployer".into())
}
async fn start_node_with(
&self,
_name: &str,
_options: StartNodeOptions,
) -> Result<StartedNode, DynError> {
Err("start_node_with not supported by this deployer".into())
}
fn node_client(&self, _name: &str) -> Option<ApiClient> {
None
}
}
#[derive(Clone)]
pub struct StartedNode {
pub name: String,

View File

@ -0,0 +1,38 @@
use async_trait::async_trait;
use crate::{
nodes::ApiClient,
scenario::{DynError, StartNodeOptions, StartedNode},
};
/// Deployer-agnostic control surface for runtime node operations.
#[async_trait]
pub trait NodeControlHandle: Send + Sync {
async fn restart_node(&self, _name: &str) -> Result<(), DynError> {
Err("restart_node not supported by this deployer".into())
}
async fn start_node(&self, _name: &str) -> Result<StartedNode, DynError> {
Err("start_node not supported by this deployer".into())
}
async fn start_node_with(
&self,
_name: &str,
_options: StartNodeOptions,
) -> Result<StartedNode, DynError> {
Err("start_node_with not supported by this deployer".into())
}
async fn stop_node(&self, _name: &str) -> Result<(), DynError> {
Err("stop_node not supported by this deployer".into())
}
fn node_client(&self, _name: &str) -> Option<ApiClient> {
None
}
fn node_pid(&self, _name: &str) -> Option<u32> {
None
}
}

View File

@ -1,5 +1,6 @@
use std::{num::NonZeroUsize, sync::Arc, time::Duration};
use nomos_node::config::RunConfig;
use thiserror::Error;
use tracing::{debug, info};
@ -8,7 +9,7 @@ use super::{
workload::Workload,
};
use crate::topology::{
config::{TopologyBuildError, TopologyBuilder, TopologyConfig},
config::{NodeConfigPatch, TopologyBuildError, TopologyBuilder, TopologyConfig},
configs::{network::Libp2pNetworkLayout, wallet::WalletConfig},
generation::GeneratedTopology,
};
@ -302,16 +303,37 @@ impl<Caps> TopologyConfigurator<Caps> {
self
}
/// Apply a config patch for a specific node index.
#[must_use]
pub fn node_config_patch(mut self, index: usize, patch: NodeConfigPatch) -> Self {
self.builder.topology = self.builder.topology.with_node_config_patch(index, patch);
self
}
/// Apply a config patch for a specific node index.
#[must_use]
pub fn node_config_patch_with<F>(mut self, index: usize, f: F) -> Self
where
F: Fn(RunConfig) -> Result<RunConfig, DynError> + Send + Sync + 'static,
{
self.builder.topology = self
.builder
.topology
.with_node_config_patch(index, Arc::new(f));
self
}
/// Finalize and return the underlying scenario builder.
#[must_use]
pub fn apply(self) -> Builder<Caps> {
let mut config = TopologyConfig::with_node_numbers(self.nodes);
if self.network_star {
config.network_params.libp2p_network_layout = Libp2pNetworkLayout::Star;
}
let mut builder = self.builder;
builder.topology = TopologyBuilder::new(config);
builder.topology = builder.topology.with_node_count(self.nodes);
if self.network_star {
builder.topology = builder
.topology
.with_network_layout(Libp2pNetworkLayout::Star);
}
builder
}
}

View File

@ -2,6 +2,7 @@
mod capabilities;
pub mod cfgsync;
mod control;
mod definition;
mod expectation;
pub mod http_probe;
@ -12,9 +13,10 @@ mod workload;
pub type DynError = Box<dyn std::error::Error + Send + Sync + 'static>;
pub use capabilities::{
NodeControlCapability, NodeControlHandle, ObservabilityCapability, PeerSelection,
RequiresNodeControl, StartNodeOptions, StartedNode,
NodeControlCapability, ObservabilityCapability, PeerSelection, RequiresNodeControl,
StartNodeOptions, StartedNode,
};
pub use control::NodeControlHandle;
pub use definition::{
Builder, Scenario, ScenarioBuildError, ScenarioBuilder, TopologyConfigurator,
};

View File

@ -1,7 +1,10 @@
use std::{collections::HashMap, sync::Arc};
use nomos_core::{
mantle::GenesisTx as _,
sdp::{Locator, ServiceType},
};
use nomos_node::config::RunConfig;
use testing_framework_config::topology::{
configs::{
api::{ApiConfigError, create_api_configs},
@ -18,12 +21,18 @@ use testing_framework_config::topology::{
};
use thiserror::Error;
use crate::topology::{
configs::{GeneralConfig, time::default_time_config},
generation::{GeneratedNodeConfig, GeneratedTopology},
utils::{TopologyResolveError, create_kms_configs, resolve_ids, resolve_ports},
use crate::{
scenario::DynError,
topology::{
configs::{GeneralConfig, time::default_time_config},
generation::{GeneratedNodeConfig, GeneratedTopology},
utils::{TopologyResolveError, create_kms_configs, resolve_ids, resolve_ports},
},
};
/// Per-node config patch applied after the default node config is generated.
pub type NodeConfigPatch = Arc<dyn Fn(RunConfig) -> Result<RunConfig, DynError> + Send + Sync>;
#[derive(Debug, Error)]
pub enum TopologyBuildError {
#[error("topology must include at least one node")]
@ -55,6 +64,7 @@ pub struct TopologyConfig {
pub consensus_params: ConsensusParams,
pub network_params: NetworkParams,
pub wallet_config: WalletConfig,
pub node_config_patches: HashMap<usize, NodeConfigPatch>,
}
impl TopologyConfig {
@ -66,6 +76,7 @@ impl TopologyConfig {
consensus_params: ConsensusParams::default_for_participants(1),
network_params: NetworkParams::default(),
wallet_config: WalletConfig::default(),
node_config_patches: HashMap::new(),
}
}
@ -77,6 +88,7 @@ impl TopologyConfig {
consensus_params: ConsensusParams::default_for_participants(2),
network_params: NetworkParams::default(),
wallet_config: WalletConfig::default(),
node_config_patches: HashMap::new(),
}
}
@ -90,6 +102,7 @@ impl TopologyConfig {
consensus_params: ConsensusParams::default_for_participants(participants),
network_params: NetworkParams::default(),
wallet_config: WalletConfig::default(),
node_config_patches: HashMap::new(),
}
}
@ -97,6 +110,17 @@ impl TopologyConfig {
pub const fn wallet(&self) -> &WalletConfig {
&self.wallet_config
}
#[must_use]
pub fn node_config_patch(&self, index: usize) -> Option<&NodeConfigPatch> {
self.node_config_patches.get(&index)
}
#[must_use]
pub fn with_node_config_patch(mut self, index: usize, patch: NodeConfigPatch) -> Self {
self.node_config_patches.insert(index, patch);
self
}
}
/// Builder that produces `GeneratedTopology` instances from a `TopologyConfig`.
@ -132,6 +156,13 @@ impl TopologyBuilder {
self
}
#[must_use]
/// Apply a config patch for a specific node index.
pub fn with_node_config_patch(mut self, index: usize, patch: NodeConfigPatch) -> Self {
self.config.node_config_patches.insert(index, patch);
self
}
#[must_use]
/// Set node counts.
pub const fn with_node_count(mut self, nodes: usize) -> Self {
@ -187,9 +218,13 @@ impl TopologyBuilder {
let providers = collect_provider_infos(first_consensus, &blend_configs)?;
let genesis_tx = create_consensus_genesis_tx(first_consensus, providers)?;
apply_consensus_genesis_tx(&mut consensus_configs, &genesis_tx);
apply_consensus_genesis_tx(&mut consensus_configs, &genesis_tx)?;
let kms_configs = create_kms_configs(&blend_configs, &config.wallet_config.accounts);
let kms_configs = create_kms_configs(
&blend_configs,
&consensus_configs,
&config.wallet_config.accounts,
);
let nodes = build_node_descriptors(
&config,
@ -204,6 +239,7 @@ impl TopologyBuilder {
&tracing_configs,
&kms_configs,
&time_config,
&config.node_config_patches,
)?;
Ok(GeneratedTopology { config, nodes })
@ -271,10 +307,15 @@ fn create_consensus_genesis_tx(
fn apply_consensus_genesis_tx(
consensus_configs: &mut [testing_framework_config::topology::configs::consensus::GeneralConsensusConfig],
genesis_tx: &nomos_core::mantle::genesis_tx::GenesisTx,
) {
) -> Result<(), TopologyBuildError> {
for c in consensus_configs {
c.genesis_tx = genesis_tx.clone();
testing_framework_config::topology::configs::consensus::sync_utxos_with_genesis(
&mut c.utxos,
genesis_tx,
)?;
}
Ok(())
}
#[allow(clippy::too_many_arguments)]
@ -291,6 +332,7 @@ fn build_node_descriptors(
tracing_configs: &[testing_framework_config::topology::configs::tracing::GeneralTracingConfig],
kms_configs: &[key_management_system_service::backend::preload::PreloadKMSBackendSettings],
time_config: &testing_framework_config::topology::configs::time::GeneralTimeConfig,
node_config_patches: &HashMap<usize, NodeConfigPatch>,
) -> Result<Vec<GeneratedNodeConfig>, TopologyBuildError> {
let mut nodes = Vec::with_capacity(config.n_nodes);
@ -324,6 +366,7 @@ fn build_node_descriptors(
id,
general,
blend_port,
config_patch: node_config_patches.get(&i).cloned(),
};
nodes.push(descriptor);

View File

@ -1,15 +1,8 @@
use std::collections::HashSet;
use thiserror::Error;
use crate::{
nodes::{
common::node::SpawnNodeError,
node::{Node, create_node_config},
},
nodes::node::Node,
topology::{
config::{TopologyBuildError, TopologyBuilder, TopologyConfig},
configs::GeneralConfig,
generation::find_expected_peer_counts,
readiness::{NetworkReadiness, ReadinessCheck, ReadinessError},
utils::multiaddr_port,
@ -21,62 +14,11 @@ pub struct Topology {
pub(crate) nodes: Vec<Node>,
}
pub type DeployedNodes = Vec<Node>;
#[derive(Debug, Error)]
pub enum SpawnTopologyError {
#[error(transparent)]
Build(#[from] TopologyBuildError),
#[error(transparent)]
Node(#[from] SpawnNodeError),
}
impl Topology {
pub async fn spawn(config: TopologyConfig) -> Result<Self, SpawnTopologyError> {
let generated = TopologyBuilder::new(config.clone()).build()?;
let n_nodes = config.n_nodes;
let node_configs = generated
.iter()
.map(|node| node.general.clone())
.collect::<Vec<_>>();
let nodes = Self::spawn_nodes(node_configs, n_nodes).await?;
Ok(Self { nodes })
}
pub async fn spawn_with_empty_membership(
config: TopologyConfig,
ids: &[[u8; 32]],
blend_ports: &[u16],
) -> Result<Self, SpawnTopologyError> {
let generated = TopologyBuilder::new(config.clone())
.with_ids(ids.to_vec())
.with_blend_ports(blend_ports.to_vec())
.build()?;
let node_configs = generated
.iter()
.map(|node| node.general.clone())
.collect::<Vec<_>>();
let nodes = Self::spawn_nodes(node_configs, config.n_nodes).await?;
Ok(Self { nodes })
}
pub(crate) async fn spawn_nodes(
config: Vec<GeneralConfig>,
n_nodes: usize,
) -> Result<DeployedNodes, SpawnTopologyError> {
let mut nodes = Vec::new();
for i in 0..n_nodes {
let config = create_node_config(config[i].clone());
let label = format!("node-{i}");
nodes.push(Node::spawn(config, &label).await?);
}
Ok(nodes)
/// Construct a topology from already-spawned nodes.
#[must_use]
pub fn from_nodes(nodes: Vec<Node>) -> Self {
Self { nodes }
}
#[must_use]
@ -84,6 +26,11 @@ impl Topology {
&self.nodes
}
#[must_use]
pub fn into_nodes(self) -> Vec<Node> {
self.nodes
}
pub async fn wait_network_ready(&self) -> Result<(), ReadinessError> {
let listen_ports = self.node_listen_ports();
if listen_ports.len() <= 1 {

View File

@ -3,9 +3,8 @@ use std::{collections::HashSet, time::Duration};
use reqwest::{Client, Url};
use crate::topology::{
config::TopologyConfig,
config::{NodeConfigPatch, TopologyConfig},
configs::{GeneralConfig, wallet::WalletAccount},
deployment::{SpawnTopologyError, Topology},
readiness::{HttpNetworkReadiness, ReadinessCheck, ReadinessError},
};
@ -16,6 +15,7 @@ pub struct GeneratedNodeConfig {
pub id: [u8; 32],
pub general: GeneralConfig,
pub blend_port: u16,
pub config_patch: Option<NodeConfigPatch>,
}
impl GeneratedNodeConfig {
@ -81,17 +81,6 @@ impl GeneratedTopology {
&self.config.wallet_config.accounts
}
pub async fn spawn_local(&self) -> Result<Topology, SpawnTopologyError> {
let configs = self
.iter()
.map(|node| node.general.clone())
.collect::<Vec<_>>();
let nodes = Topology::spawn_nodes(configs, self.config.n_nodes).await?;
Ok(Topology { nodes })
}
pub async fn wait_remote_readiness(
&self,
// Node endpoints

View File

@ -6,17 +6,21 @@ use nomos_utils::net::get_available_udp_port;
use rand::{Rng, thread_rng};
use thiserror::Error;
use crate::topology::configs::{blend::GeneralBlendConfig, wallet::WalletAccount};
use crate::topology::configs::{
blend::GeneralBlendConfig, consensus::GeneralConsensusConfig, wallet::WalletAccount,
};
#[must_use]
/// Build preload KMS configs for blend/DA and wallet keys for every node.
pub fn create_kms_configs(
blend_configs: &[GeneralBlendConfig],
consensus_configs: &[GeneralConsensusConfig],
wallet_accounts: &[WalletAccount],
) -> Vec<PreloadKMSBackendSettings> {
blend_configs
.iter()
.map(|blend_conf| {
.zip(consensus_configs.iter())
.map(|(blend_conf, consensus_conf)| {
let mut keys = HashMap::from([
(
hex::encode(blend_conf.signer.public_key().to_bytes()),
@ -28,6 +32,18 @@ pub fn create_kms_configs(
)),
Key::Zk(blend_conf.secret_zk_key.clone()),
),
(
hex::encode(fr_to_bytes(
consensus_conf.leader_sk.to_public_key().as_fr(),
)),
Key::Zk(consensus_conf.leader_sk.clone().into()),
),
(
hex::encode(fr_to_bytes(
consensus_conf.funding_sk.to_public_key().as_fr(),
)),
Key::Zk(consensus_conf.funding_sk.clone()),
),
]);
for account in wallet_accounts {

View File

@ -45,13 +45,9 @@ pub struct ComposeNodeControl {
#[async_trait::async_trait]
impl NodeControlHandle for ComposeNodeControl {
async fn restart_node(&self, index: usize) -> Result<(), DynError> {
restart_compose_service(
&self.compose_file,
&self.project_name,
&format!("node-{index}"),
)
.await
.map_err(|err| format!("node restart failed: {err}").into())
async fn restart_node(&self, name: &str) -> Result<(), DynError> {
restart_compose_service(&self.compose_file, &self.project_name, name)
.await
.map_err(|err| format!("node restart failed: {err}").into())
}
}

View File

@ -24,3 +24,6 @@ testing-framework-core = { path = "../../core" }
thiserror = { workspace = true }
tokio = { workspace = true }
tracing = { workspace = true }
[dev-dependencies]
tracing-subscriber = "0.3"

View File

@ -3,5 +3,5 @@ mod node_control;
mod runner;
pub use manual::{LocalManualCluster, ManualClusterError};
pub use node_control::{LocalDynamicError, LocalDynamicNodes, LocalDynamicSeed};
pub use node_control::{LocalNodeManager, LocalNodeManagerError, LocalNodeManagerSeed};
pub use runner::{LocalDeployer, LocalDeployerError};

View File

@ -1,7 +1,7 @@
use testing_framework_core::{
manual::ManualClusterHandle,
nodes::ApiClient,
scenario::{DynError, StartNodeOptions, StartedNode},
scenario::{DynError, NodeControlHandle, StartNodeOptions, StartedNode},
topology::{
config::{TopologyBuildError, TopologyBuilder, TopologyConfig},
readiness::{ReadinessCheck, ReadinessError},
@ -9,7 +9,7 @@ use testing_framework_core::{
};
use thiserror::Error;
use crate::node_control::{LocalDynamicError, LocalDynamicNodes, ReadinessNode};
use crate::node_control::{LocalNodeManager, LocalNodeManagerError, ReadinessNode};
mod readiness;
@ -23,32 +23,43 @@ pub enum ManualClusterError {
source: TopologyBuildError,
},
#[error(transparent)]
Dynamic(#[from] LocalDynamicError),
Dynamic(#[from] LocalNodeManagerError),
}
/// Imperative, in-process cluster that can start nodes on demand.
pub struct LocalManualCluster {
nodes: LocalDynamicNodes,
nodes: LocalNodeManager,
}
impl LocalManualCluster {
pub(crate) fn from_config(config: TopologyConfig) -> Result<Self, ManualClusterError> {
let builder = TopologyBuilder::new(config);
pub(crate) fn from_builder(builder: TopologyBuilder) -> Result<Self, ManualClusterError> {
let descriptors = builder
.build()
.map_err(|source| ManualClusterError::Build { source })?;
let nodes = LocalDynamicNodes::new(
let nodes = LocalNodeManager::new(
descriptors,
testing_framework_core::scenario::NodeClients::default(),
);
Ok(Self { nodes })
}
pub(crate) fn from_config(config: TopologyConfig) -> Result<Self, ManualClusterError> {
let builder = TopologyBuilder::new(config);
Self::from_builder(builder)
}
#[must_use]
pub fn node_client(&self, name: &str) -> Option<ApiClient> {
self.nodes.node_client(name)
}
#[must_use]
pub fn node_pid(&self, name: &str) -> Option<u32> {
self.nodes.node_pid(name)
}
pub async fn start_node(&self, name: &str) -> Result<StartedNode, ManualClusterError> {
Ok(self
.nodes
@ -68,6 +79,14 @@ impl LocalManualCluster {
self.nodes.stop_all();
}
pub async fn restart_node(&self, name: &str) -> Result<(), ManualClusterError> {
Ok(self.nodes.restart_node(name).await?)
}
pub async fn stop_node(&self, name: &str) -> Result<(), ManualClusterError> {
Ok(self.nodes.stop_node(name).await?)
}
pub async fn wait_network_ready(&self) -> Result<(), ReadinessError> {
let nodes = self.nodes.readiness_nodes();
if self.is_singleton(&nodes) {
@ -92,6 +111,44 @@ impl Drop for LocalManualCluster {
}
}
#[async_trait::async_trait]
impl NodeControlHandle for LocalManualCluster {
async fn restart_node(&self, name: &str) -> Result<(), DynError> {
self.nodes
.restart_node(name)
.await
.map_err(|err| err.into())
}
async fn stop_node(&self, name: &str) -> Result<(), DynError> {
self.nodes.stop_node(name).await.map_err(|err| err.into())
}
async fn start_node(&self, name: &str) -> Result<StartedNode, DynError> {
self.start_node_with(name, StartNodeOptions::default())
.await
.map_err(|err| err.into())
}
async fn start_node_with(
&self,
name: &str,
options: StartNodeOptions,
) -> Result<StartedNode, DynError> {
self.start_node_with(name, options)
.await
.map_err(|err| err.into())
}
fn node_client(&self, name: &str) -> Option<ApiClient> {
self.node_client(name)
}
fn node_pid(&self, name: &str) -> Option<u32> {
self.node_pid(name)
}
}
#[async_trait::async_trait]
impl ManualClusterHandle for LocalManualCluster {
async fn start_node_with(

View File

@ -8,16 +8,16 @@ use testing_framework_config::topology::configs::{
runtime::{build_general_config_for_node, build_initial_peers},
time::GeneralTimeConfig,
};
use testing_framework_core::{
pub(crate) use testing_framework_core::{
scenario::{PeerSelection, StartNodeOptions},
topology::{
config::TopologyConfig,
config::{NodeConfigPatch, TopologyConfig},
configs::GeneralConfig,
generation::{GeneratedNodeConfig, GeneratedTopology},
},
};
use super::LocalDynamicError;
use super::LocalNodeManagerError;
pub(super) fn build_general_config_for(
descriptors: &GeneratedTopology,
@ -27,7 +27,7 @@ pub(super) fn build_general_config_for(
peer_ports_by_name: &HashMap<String, u16>,
options: &StartNodeOptions,
peer_ports: &[u16],
) -> Result<(GeneralConfig, u16), LocalDynamicError> {
) -> Result<(GeneralConfig, u16, Option<NodeConfigPatch>), LocalNodeManagerError> {
if let Some(node) = descriptor_for(descriptors, index) {
let mut config = node.general.clone();
let initial_peers = resolve_initial_peers(
@ -40,7 +40,7 @@ pub(super) fn build_general_config_for(
config.network_config.backend.initial_peers = initial_peers;
return Ok((config, node.network_port()));
return Ok((config, node.network_port(), node.config_patch.clone()));
}
let id = random_node_id();
@ -59,9 +59,9 @@ pub(super) fn build_general_config_for(
base_consensus,
base_time,
)
.map_err(|source| LocalDynamicError::Config { source })?;
.map_err(|source| LocalNodeManagerError::Config { source })?;
Ok((general_config, network_port))
Ok((general_config, network_port, None))
}
fn descriptor_for(descriptors: &GeneratedTopology, index: usize) -> Option<&GeneratedNodeConfig> {
@ -71,13 +71,13 @@ fn descriptor_for(descriptors: &GeneratedTopology, index: usize) -> Option<&Gene
fn resolve_peer_names(
peer_ports_by_name: &HashMap<String, u16>,
peer_names: &[String],
) -> Result<Vec<Multiaddr>, LocalDynamicError> {
) -> Result<Vec<Multiaddr>, LocalNodeManagerError> {
let mut peers = Vec::with_capacity(peer_names.len());
for name in peer_names {
let port =
peer_ports_by_name
.get(name)
.ok_or_else(|| LocalDynamicError::InvalidArgument {
.ok_or_else(|| LocalNodeManagerError::InvalidArgument {
message: format!("unknown peer name '{name}'"),
})?;
peers.push(testing_framework_config::node_address_from_port(*port));
@ -91,7 +91,7 @@ fn resolve_initial_peers(
default_peers: &[Multiaddr],
descriptors: &GeneratedTopology,
peer_ports: &[u16],
) -> Result<Vec<Multiaddr>, LocalDynamicError> {
) -> Result<Vec<Multiaddr>, LocalNodeManagerError> {
match &options.peers {
PeerSelection::Named(names) => resolve_peer_names(peer_ports_by_name, names),
PeerSelection::DefaultLayout => {
@ -112,8 +112,8 @@ fn random_node_id() -> [u8; 32] {
id
}
fn allocate_udp_port(label: &'static str) -> Result<u16, LocalDynamicError> {
get_available_udp_port().ok_or_else(|| LocalDynamicError::PortAllocation {
fn allocate_udp_port(label: &'static str) -> Result<u16, LocalNodeManagerError> {
get_available_udp_port().ok_or_else(|| LocalNodeManagerError::PortAllocation {
message: format!("failed to allocate free UDP port for {label}"),
})
}

View File

@ -3,15 +3,16 @@ use std::{
sync::Mutex,
};
use nomos_node::config::RunConfig as NodeConfig;
use nomos_node::config::RunConfig;
use testing_framework_config::topology::configs::{consensus, time};
use testing_framework_core::{
nodes::{
ApiClient,
node::{Node, create_node_config},
node::{Node, apply_node_config_patch, create_node_config},
},
scenario::{DynError, NodeControlHandle, StartNodeOptions, StartedNode},
topology::{
deployment::Topology,
generation::{GeneratedTopology, find_expected_peer_counts},
utils::multiaddr_port,
},
@ -22,11 +23,11 @@ mod config;
mod state;
use config::build_general_config_for;
use state::LocalDynamicState;
use state::LocalNodeManagerState;
use testing_framework_core::scenario::NodeClients;
#[derive(Debug, Error)]
pub enum LocalDynamicError {
pub enum LocalNodeManagerError {
#[error("failed to generate node config: {source}")]
Config {
#[source]
@ -41,25 +42,34 @@ pub enum LocalDynamicError {
InvalidArgument { message: String },
#[error("{message}")]
PortAllocation { message: String },
#[error("node config patch failed: {message}")]
ConfigPatch { message: String },
#[error("node name '{name}' is unknown")]
NodeName { name: String },
#[error("failed to restart node: {source}")]
Restart {
#[source]
source: testing_framework_core::nodes::common::node::SpawnNodeError,
},
}
pub struct LocalDynamicNodes {
pub struct LocalNodeManager {
descriptors: GeneratedTopology,
base_consensus: consensus::GeneralConsensusConfig,
base_time: time::GeneralTimeConfig,
node_clients: NodeClients,
seed: LocalDynamicSeed,
state: Mutex<LocalDynamicState>,
seed: LocalNodeManagerSeed,
state: Mutex<LocalNodeManagerState>,
}
#[derive(Clone, Default)]
pub struct LocalDynamicSeed {
pub struct LocalNodeManagerSeed {
pub node_count: usize,
pub peer_ports: Vec<u16>,
pub peer_ports_by_name: HashMap<String, u16>,
}
impl LocalDynamicSeed {
impl LocalNodeManagerSeed {
#[must_use]
pub fn from_topology(descriptors: &GeneratedTopology) -> Self {
let peer_ports = descriptors
@ -88,15 +98,39 @@ pub(crate) struct ReadinessNode {
pub(crate) api: ApiClient,
}
impl LocalDynamicNodes {
impl LocalNodeManager {
fn default_label(index: usize) -> String {
format!("node-{index}")
}
pub async fn spawn_initial_nodes(
descriptors: &GeneratedTopology,
) -> Result<Vec<Node>, testing_framework_core::nodes::common::node::SpawnNodeError> {
let mut nodes = Vec::with_capacity(descriptors.nodes().len());
for node in descriptors.nodes() {
let label = Self::default_label(node.index());
let config = create_node_config(node.general.clone());
let spawned = Node::spawn(config, &label).await?;
nodes.push(spawned);
}
Ok(nodes)
}
pub async fn spawn_initial_topology(
descriptors: &GeneratedTopology,
) -> Result<Topology, testing_framework_core::nodes::common::node::SpawnNodeError> {
let nodes = Self::spawn_initial_nodes(descriptors).await?;
Ok(Topology::from_nodes(nodes))
}
pub fn new(descriptors: GeneratedTopology, node_clients: NodeClients) -> Self {
Self::new_with_seed(descriptors, node_clients, LocalDynamicSeed::default())
Self::new_with_seed(descriptors, node_clients, LocalNodeManagerSeed::default())
}
pub fn new_with_seed(
descriptors: GeneratedTopology,
node_clients: NodeClients,
seed: LocalDynamicSeed,
seed: LocalNodeManagerSeed,
) -> Self {
let base_node = descriptors
.nodes()
@ -106,11 +140,12 @@ impl LocalDynamicNodes {
let base_consensus = base_node.general.consensus_config.clone();
let base_time = base_node.general.time_config.clone();
let state = LocalDynamicState {
let state = LocalNodeManagerState {
node_count: seed.node_count,
peer_ports: seed.peer_ports.clone(),
peer_ports_by_name: seed.peer_ports_by_name.clone(),
clients_by_name: HashMap::new(),
indices_by_name: HashMap::new(),
nodes: Vec::new(),
};
@ -134,6 +169,22 @@ impl LocalDynamicNodes {
state.clients_by_name.get(name).cloned()
}
#[must_use]
pub fn node_pid(&self, name: &str) -> Option<u32> {
let mut state = self
.state
.lock()
.unwrap_or_else(|poisoned| poisoned.into_inner());
let index = *state.indices_by_name.get(name)?;
let node = state.nodes.get_mut(index)?;
if node.is_running() {
Some(node.pid())
} else {
None
}
}
pub fn stop_all(&self) {
let mut state = self
.state
@ -146,15 +197,46 @@ impl LocalDynamicNodes {
.peer_ports_by_name
.clone_from(&self.seed.peer_ports_by_name);
state.clients_by_name.clear();
state.indices_by_name.clear();
state.node_count = self.seed.node_count;
self.node_clients.clear();
}
pub fn initialize_with_nodes(&self, nodes: Vec<Node>) {
self.node_clients.clear();
let mut state = self
.state
.lock()
.unwrap_or_else(|poisoned| poisoned.into_inner());
state.nodes.clear();
state.peer_ports.clear();
state.peer_ports_by_name.clear();
state.clients_by_name.clear();
state.indices_by_name.clear();
state.node_count = 0;
for (idx, node) in nodes.into_iter().enumerate() {
let name = Self::default_label(idx);
let port = node.config().user.network.backend.swarm.port;
let client = node.api().clone();
self.node_clients.add_node(client.clone());
state.register_node(&name, port, client, node);
}
}
#[must_use]
pub fn node_clients(&self) -> NodeClients {
self.node_clients.clone()
}
pub async fn start_node_with(
&self,
name: &str,
options: StartNodeOptions,
) -> Result<StartedNode, LocalDynamicError> {
) -> Result<StartedNode, LocalNodeManagerError> {
self.start_node(name, options).await
}
@ -206,7 +288,7 @@ impl LocalDynamicNodes {
&self,
name: &str,
options: StartNodeOptions,
) -> Result<StartedNode, LocalDynamicError> {
) -> Result<StartedNode, LocalNodeManagerError> {
let (peer_ports, peer_ports_by_name, node_name, index) = {
let state = self
.state
@ -215,13 +297,15 @@ impl LocalDynamicNodes {
let index = state.node_count;
let label = if name.trim().is_empty() {
format!("node-{index}")
Self::default_label(index)
} else if name.starts_with("node-") {
name.to_string()
} else {
format!("node-{name}")
};
if state.peer_ports_by_name.contains_key(&label) {
return Err(LocalDynamicError::InvalidArgument {
return Err(LocalNodeManagerError::InvalidArgument {
message: format!("node name '{label}' already exists"),
});
}
@ -234,7 +318,7 @@ impl LocalDynamicNodes {
)
};
let (general_config, network_port) = build_general_config_for(
let (general_config, network_port, descriptor_patch) = build_general_config_for(
&self.descriptors,
&self.base_consensus,
&self.base_time,
@ -244,7 +328,12 @@ impl LocalDynamicNodes {
&peer_ports,
)?;
let config = create_node_config(general_config);
let config = build_node_config(
general_config,
descriptor_patch.as_ref(),
options.config_patch.as_ref(),
)?;
let api_client = self
.spawn_and_register_node(&node_name, network_port, config)
.await?;
@ -255,15 +344,94 @@ impl LocalDynamicNodes {
})
}
pub async fn restart_node(&self, name: &str) -> Result<(), LocalNodeManagerError> {
let (index, mut node) = {
let mut state = self
.state
.lock()
.unwrap_or_else(|poisoned| poisoned.into_inner());
let Some(index) = state.indices_by_name.get(name).copied() else {
return Err(LocalNodeManagerError::NodeName {
name: name.to_string(),
});
};
if index >= state.nodes.len() {
return Err(LocalNodeManagerError::NodeName {
name: name.to_string(),
});
}
let node = state.nodes.remove(index);
(index, node)
};
node.restart()
.await
.map_err(|source| LocalNodeManagerError::Restart { source })?;
let mut state = self
.state
.lock()
.unwrap_or_else(|poisoned| poisoned.into_inner());
if index <= state.nodes.len() {
state.nodes.insert(index, node);
} else {
state.nodes.push(node);
}
Ok(())
}
pub async fn stop_node(&self, name: &str) -> Result<(), LocalNodeManagerError> {
let (index, mut node) = {
let mut state = self
.state
.lock()
.unwrap_or_else(|poisoned| poisoned.into_inner());
let Some(index) = state.indices_by_name.get(name).copied() else {
return Err(LocalNodeManagerError::NodeName {
name: name.to_string(),
});
};
if index >= state.nodes.len() {
return Err(LocalNodeManagerError::NodeName {
name: name.to_string(),
});
}
let node = state.nodes.remove(index);
(index, node)
};
node.stop().await;
let mut state = self
.state
.lock()
.unwrap_or_else(|poisoned| poisoned.into_inner());
if index <= state.nodes.len() {
state.nodes.insert(index, node);
} else {
state.nodes.push(node);
}
Ok(())
}
async fn spawn_and_register_node(
&self,
node_name: &str,
network_port: u16,
config: NodeConfig,
) -> Result<ApiClient, LocalDynamicError> {
config: RunConfig,
) -> Result<ApiClient, LocalNodeManagerError> {
let node = Node::spawn(config, node_name)
.await
.map_err(|source| LocalDynamicError::Spawn { source })?;
.map_err(|source| LocalNodeManagerError::Spawn { source })?;
let client = node.api().clone();
self.node_clients.add_node(client.clone());
@ -279,10 +447,39 @@ impl LocalDynamicNodes {
}
}
fn build_node_config(
general_config: testing_framework_config::topology::configs::GeneralConfig,
descriptor_patch: Option<&config::NodeConfigPatch>,
options_patch: Option<&config::NodeConfigPatch>,
) -> Result<RunConfig, LocalNodeManagerError> {
let mut config = create_node_config(general_config);
config = apply_patch_if_needed(config, descriptor_patch)?;
config = apply_patch_if_needed(config, options_patch)?;
Ok(config)
}
fn apply_patch_if_needed(
config: RunConfig,
patch: Option<&config::NodeConfigPatch>,
) -> Result<RunConfig, LocalNodeManagerError> {
let Some(patch) = patch else {
return Ok(config);
};
apply_node_config_patch(config, patch).map_err(|err| LocalNodeManagerError::ConfigPatch {
message: err.to_string(),
})
}
#[async_trait::async_trait]
impl NodeControlHandle for LocalDynamicNodes {
async fn restart_node(&self, _index: usize) -> Result<(), DynError> {
Err("local deployer does not support restart_node".into())
impl NodeControlHandle for LocalNodeManager {
async fn restart_node(&self, name: &str) -> Result<(), DynError> {
self.restart_node(name).await.map_err(|err| err.into())
}
async fn stop_node(&self, name: &str) -> Result<(), DynError> {
self.stop_node(name).await.map_err(|err| err.into())
}
async fn start_node(&self, name: &str) -> Result<StartedNode, DynError> {
@ -304,4 +501,8 @@ impl NodeControlHandle for LocalDynamicNodes {
fn node_client(&self, name: &str) -> Option<ApiClient> {
self.node_client(name)
}
fn node_pid(&self, name: &str) -> Option<u32> {
self.node_pid(name)
}
}

View File

@ -2,15 +2,16 @@ use std::collections::HashMap;
use testing_framework_core::nodes::{ApiClient, node::Node};
pub(crate) struct LocalDynamicState {
pub(crate) struct LocalNodeManagerState {
pub(crate) node_count: usize,
pub(crate) peer_ports: Vec<u16>,
pub(crate) peer_ports_by_name: HashMap<String, u16>,
pub(crate) clients_by_name: HashMap<String, ApiClient>,
pub(crate) indices_by_name: HashMap<String, usize>,
pub(crate) nodes: Vec<Node>,
}
impl LocalDynamicState {
impl LocalNodeManagerState {
fn register_common(&mut self, node_name: &str, network_port: u16, client: ApiClient) {
self.peer_ports.push(network_port);
self.peer_ports_by_name
@ -26,6 +27,8 @@ impl LocalDynamicState {
node: Node,
) {
self.register_common(node_name, network_port, client);
let index = self.nodes.len();
self.indices_by_name.insert(node_name.to_string(), index);
self.node_count += 1;
self.nodes.push(node);
}

View File

@ -2,13 +2,14 @@ use std::sync::Arc;
use async_trait::async_trait;
use testing_framework_core::{
nodes::common::node::SpawnNodeError,
scenario::{
BlockFeed, BlockFeedTask, Deployer, DynError, Metrics, NodeClients, NodeControlCapability,
RunContext, Runner, Scenario, ScenarioError, spawn_block_feed,
},
topology::{
config::TopologyConfig,
deployment::{SpawnTopologyError, Topology},
config::{TopologyBuilder, TopologyConfig},
deployment::Topology,
readiness::ReadinessError,
},
};
@ -17,7 +18,7 @@ use tracing::{debug, info};
use crate::{
manual::{LocalManualCluster, ManualClusterError},
node_control::{LocalDynamicNodes, LocalDynamicSeed},
node_control::{LocalNodeManager, LocalNodeManagerSeed},
};
/// Spawns nodes as local processes, reusing the existing
/// integration harness.
@ -32,7 +33,7 @@ pub enum LocalDeployerError {
#[error("failed to spawn local topology: {source}")]
Spawn {
#[source]
source: SpawnTopologyError,
source: SpawnNodeError,
},
#[error("readiness probe failed: {source}")]
ReadinessFailed {
@ -103,19 +104,39 @@ impl Deployer<NodeControlCapability> for LocalDeployer {
"starting local deployment with node control"
);
let topology = Self::prepare_topology(scenario, self.membership_check).await?;
let node_clients = NodeClients::from_topology(scenario.topology(), &topology);
let node_control = Arc::new(LocalDynamicNodes::new_with_seed(
let mut nodes = LocalNodeManager::spawn_initial_nodes(scenario.topology())
.await
.map_err(|source| LocalDeployerError::Spawn { source })?;
if self.membership_check {
let topology = Topology::from_nodes(nodes);
wait_for_readiness(&topology).await.map_err(|source| {
debug!(error = ?source, "local readiness failed");
LocalDeployerError::ReadinessFailed { source }
})?;
nodes = topology.into_nodes();
info!("local nodes are ready");
} else {
info!("skipping local membership readiness checks");
}
let node_control = Arc::new(LocalNodeManager::new_with_seed(
scenario.topology().clone(),
node_clients.clone(),
LocalDynamicSeed::from_topology(scenario.topology()),
NodeClients::default(),
LocalNodeManagerSeed::from_topology(scenario.topology()),
));
node_control.initialize_with_nodes(nodes);
let node_clients = node_control.node_clients();
let (block_feed, block_feed_guard) = spawn_block_feed_with(&node_clients).await?;
let context = RunContext::new(
scenario.topology().clone(),
Some(topology),
None,
node_clients,
scenario.duration(),
Metrics::empty(),
@ -150,6 +171,14 @@ impl LocalDeployer {
LocalManualCluster::from_config(config)
}
/// Build a manual cluster from a pre-configured topology builder.
pub fn manual_cluster_with_builder(
&self,
builder: TopologyBuilder,
) -> Result<LocalManualCluster, ManualClusterError> {
LocalManualCluster::from_builder(builder)
}
async fn prepare_topology<Caps>(
scenario: &Scenario<Caps>,
membership_check: bool,
@ -158,9 +187,7 @@ impl LocalDeployer {
info!(nodes = descriptors.nodes().len(), "spawning local nodes");
let topology = descriptors
.clone()
.spawn_local()
let topology = LocalNodeManager::spawn_initial_topology(descriptors)
.await
.map_err(|source| LocalDeployerError::Spawn { source })?;

View File

@ -0,0 +1,67 @@
use std::time::Duration;
use testing_framework_core::{
scenario::{Deployer, ScenarioBuilder},
topology::config::TopologyConfig,
};
use testing_framework_runner_local::LocalDeployer;
use tracing_subscriber::fmt::try_init;
#[tokio::test]
#[ignore = "requires local node binary and open ports"]
async fn local_restart_node() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let _ = try_init();
let mut scenario = ScenarioBuilder::topology_with(|t| t.nodes(1))
.enable_node_control()
.with_run_duration(Duration::from_secs(1))
.build()?;
let deployer = LocalDeployer::default();
let runner = deployer.deploy(&scenario).await?;
let context = runner.context();
let control = context.node_control().ok_or("node control not available")?;
let node_name = "node-0";
let old_pid = control.node_pid(node_name).ok_or("missing node pid")?;
control.restart_node(node_name).await?;
let new_pid = control.node_pid(node_name).ok_or("missing node pid")?;
assert_ne!(old_pid, new_pid, "expected a new process after restart");
control.stop_node(node_name).await?;
assert!(
control.node_pid(node_name).is_none(),
"expected node pid to be absent after stop"
);
let _handle = runner.run(&mut scenario).await?;
Ok(())
}
#[tokio::test]
#[ignore = "requires local node binary and open ports"]
async fn manual_cluster_restart_node() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let _ = try_init();
let deployer = LocalDeployer::default();
let cluster = deployer.manual_cluster(TopologyConfig::with_node_numbers(1))?;
let node_name = cluster.start_node("a").await?.name;
let old_pid = cluster.node_pid(&node_name).ok_or("missing node pid")?;
cluster.restart_node(&node_name).await?;
let new_pid = cluster.node_pid(&node_name).ok_or("missing node pid")?;
assert_ne!(old_pid, new_pid, "expected a new process after restart");
cluster.stop_node(&node_name).await?;
assert!(
cluster.node_pid(&node_name).is_none(),
"expected node pid to be absent after stop"
);
Ok(())
}

View File

@ -8,7 +8,10 @@ use testing_framework_config::topology::configs::{
GeneralConfig,
api::GeneralApiConfig,
base::{BaseConfigError, BaseConfigs, build_base_configs},
consensus::{ConsensusConfigError, ConsensusParams, create_genesis_tx_with_declarations},
consensus::{
ConsensusConfigError, ConsensusParams, create_genesis_tx_with_declarations,
sync_utxos_with_genesis,
},
network::NetworkParams,
time::default_time_config,
wallet::WalletConfig,
@ -131,6 +134,7 @@ pub fn try_create_node_configs(
for c in &mut consensus_configs {
c.genesis_tx = genesis_tx.clone();
sync_utxos_with_genesis(&mut c.utxos, &genesis_tx)?;
}
let kms_configs = create_kms_configs(&blend_configs);

View File

@ -1,8 +1,10 @@
pub mod builder;
pub mod expectations;
pub mod manual;
pub mod util;
pub mod workloads;
pub use builder::{ChaosBuilderExt, ObservabilityBuilderExt, ScenarioBuilderExt};
pub use expectations::ConsensusLiveness;
pub use manual::{start_node_with_timeout, wait_for_min_height};
pub use workloads::transaction::TxInclusionExpectation;

View File

@ -0,0 +1,76 @@
use std::time::Duration;
use testing_framework_core::{
nodes::ApiClient,
scenario::{DynError, NodeControlHandle, StartNodeOptions, StartedNode},
};
use thiserror::Error;
use tokio::time::{Instant, sleep, timeout};
#[derive(Debug, Error)]
pub enum ManualTestError {
#[error("timeout: {message}")]
Timeout { message: String },
#[error("start node failed: {message}")]
StartNode { message: String },
#[error("consensus_info failed: {source}")]
ConsensusInfo {
#[from]
source: reqwest::Error,
},
}
pub async fn start_node_with_timeout<H: NodeControlHandle + ?Sized>(
handle: &H,
name: &str,
options: StartNodeOptions,
timeout_duration: Duration,
) -> Result<StartedNode, ManualTestError> {
timeout(timeout_duration, handle.start_node_with(name, options))
.await
.map_err(|_| ManualTestError::Timeout {
message: format!("starting node '{name}' exceeded timeout"),
})?
.map_err(|err: DynError| ManualTestError::StartNode {
message: err.to_string(),
})
}
pub async fn wait_for_min_height(
clients: &[ApiClient],
min_height: u64,
timeout_duration: Duration,
poll_interval: Duration,
) -> Result<(), ManualTestError> {
let start = Instant::now();
loop {
let mut heights = Vec::with_capacity(clients.len());
for client in clients {
match client.consensus_info().await {
Ok(info) => heights.push(info.height),
Err(err) => {
if start.elapsed() >= timeout_duration {
return Err(ManualTestError::ConsensusInfo { source: err });
}
sleep(poll_interval).await;
continue;
}
}
}
if heights.len() == clients.len() && heights.iter().all(|height| *height >= min_height) {
return Ok(());
}
if start.elapsed() >= timeout_duration {
return Err(ManualTestError::Timeout {
message: format!(
"min height {min_height} not reached before timeout; heights={heights:?}"
),
});
}
sleep(poll_interval).await;
}
}

View File

@ -44,7 +44,7 @@ impl RandomRestartWorkload {
if self.include_nodes {
if node_count > 1 {
for index in 0..node_count {
targets.push(Target::Node(index));
targets.push(Target::Node(format!("node-{index}")));
}
} else if node_count == 1 {
info!("chaos restart skipping nodes: only one node configured");
@ -76,7 +76,7 @@ impl RandomRestartWorkload {
let ready = now.checked_sub(self.target_cooldown).unwrap_or(now);
targets
.iter()
.copied()
.cloned()
.map(|target| (target, ready))
.collect()
}
@ -111,16 +111,16 @@ impl RandomRestartWorkload {
let available: Vec<Target> = targets
.iter()
.copied()
.cloned()
.filter(|target| cooldowns.get(target).is_none_or(|ready| *ready <= now))
.collect();
if let Some(choice) = available.choose(&mut thread_rng()).copied() {
if let Some(choice) = available.choose(&mut thread_rng()).cloned() {
tracing::debug!(?choice, "chaos restart picked target");
return Ok(choice);
}
if let Some(choice) = targets.choose(&mut thread_rng()).copied() {
if let Some(choice) = targets.choose(&mut thread_rng()).cloned() {
return Ok(choice);
}
return Err("chaos restart workload has no eligible targets".into());
@ -158,10 +158,10 @@ impl Workload for RandomRestartWorkload {
let target = self.pick_target(&targets, &cooldowns).await?;
match target {
Target::Node(index) => {
tracing::info!(index, "chaos restarting node");
Target::Node(ref name) => {
tracing::info!(name, "chaos restarting node");
handle
.restart_node(index)
.restart_node(name)
.await
.map_err(|err| format!("node restart failed: {err}"))?
}
@ -172,7 +172,7 @@ impl Workload for RandomRestartWorkload {
}
}
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
#[derive(Clone, PartialEq, Eq, Hash, Debug)]
enum Target {
Node(usize),
Node(String),
}

View File

@ -1,7 +1,7 @@
VERSION=v0.3.2
LOGOS_BLOCKCHAIN_BUNDLE_VERSION=v4
# Pinned logos-blockchain-node revision used for CI builds and binary bundles.
LOGOS_BLOCKCHAIN_NODE_REV=3f15894f8b4df377e8d3cd9d92ddee9f648046dc
LOGOS_BLOCKCHAIN_NODE_REV=2392190d88e8ae8271fa9321014ea33324be7c28
# Optional: local logos-blockchain-node checkout override (do not commit absolute paths).
# LOGOS_BLOCKCHAIN_NODE_PATH=