mirror of
https://github.com/logos-blockchain/logos-blockchain-testing.git
synced 2026-05-17 07:09:28 +00:00
Compare commits
1 Commits
tagged_ref
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
| e62976a2e8 |
@ -53,10 +53,9 @@ license-files = [{ hash = 0xcb90f5db, path = "LICENSE" }]
|
||||
name = "jsonpath-rust"
|
||||
|
||||
[sources]
|
||||
allow-git = [
|
||||
"https://github.com/EspressoSystems/jellyfish.git",
|
||||
"https://github.com/logos-blockchain/logos-blockchain.git",
|
||||
"https://github.com/logos-co/Overwatch",
|
||||
]
|
||||
unknown-git = "deny"
|
||||
allow-git = ["https://github.com/EspressoSystems/jellyfish.git"]
|
||||
unknown-git = "deny"
|
||||
unknown-registry = "deny"
|
||||
|
||||
[sources.allow-org]
|
||||
github = ["logos-co"]
|
||||
|
||||
35
.github/workflows/lint.yml
vendored
35
.github/workflows/lint.yml
vendored
@ -98,6 +98,41 @@ jobs:
|
||||
restore-keys: ${{ runner.os }}-target-clippy-
|
||||
- run: cargo +nightly-2025-09-14 clippy --all --all-targets --all-features -- -D warnings
|
||||
|
||||
doc_snippets:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Load versions
|
||||
run: |
|
||||
set -euo pipefail
|
||||
if [ ! -f versions.env ]; then
|
||||
echo "versions.env missing; populate VERSION, LOGOS_BLOCKCHAIN_NODE_REV, LOGOS_BLOCKCHAIN_BUNDLE_VERSION" >&2
|
||||
exit 1
|
||||
fi
|
||||
set -a
|
||||
. versions.env
|
||||
set +a
|
||||
# $GITHUB_ENV does not accept comments/blank lines; keep only KEY=VALUE exports.
|
||||
grep -E '^[A-Za-z_][A-Za-z0-9_]*=' versions.env >> "$GITHUB_ENV"
|
||||
: "${VERSION:?Missing VERSION}"
|
||||
: "${LOGOS_BLOCKCHAIN_NODE_REV:?Missing LOGOS_BLOCKCHAIN_NODE_REV}"
|
||||
: "${LOGOS_BLOCKCHAIN_BUNDLE_VERSION:?Missing LOGOS_BLOCKCHAIN_BUNDLE_VERSION}"
|
||||
- name: Install nomos circuits
|
||||
run: |
|
||||
./scripts/setup/setup-logos-blockchain-circuits.sh "${VERSION}" "$HOME/.logos-blockchain-circuits"
|
||||
echo "LOGOS_BLOCKCHAIN_CIRCUITS=$HOME/.logos-blockchain-circuits" >> "$GITHUB_ENV"
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: nightly-2025-09-14
|
||||
- uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: ${{ runner.os }}-cargo-
|
||||
- run: cargo +nightly-2025-09-14 check -p doc-snippets
|
||||
|
||||
deny:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@ -15,7 +15,7 @@ ci-artifacts/
|
||||
tests/kzgrs/circuits_bundle/
|
||||
NOMOS_RUST_SOURCES_ONLY.txt
|
||||
dump.zsh
|
||||
nomos/assets/stack/bin/
|
||||
testing-framework/assets/stack/bin/
|
||||
testing-framework/assets/stack/kzgrs_test_params/
|
||||
null
|
||||
|
||||
|
||||
@ -33,12 +33,6 @@ repos:
|
||||
- id: cargo-machete
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: node-rev-sync
|
||||
name: node rev sync
|
||||
language: system
|
||||
entry: sh scripts/checks/check-node-rev-sync.sh
|
||||
pass_filenames: false
|
||||
files: ^(versions\.env|Cargo\.toml)$
|
||||
- id: cargo-hack-check
|
||||
language: script
|
||||
name: cargo hack check
|
||||
|
||||
750
Cargo.lock
generated
750
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
139
Cargo.toml
139
Cargo.toml
@ -1,15 +1,15 @@
|
||||
[workspace]
|
||||
members = [
|
||||
"logos/examples",
|
||||
"logos/runtime/env",
|
||||
"logos/runtime/ext",
|
||||
"logos/runtime/workloads",
|
||||
"examples",
|
||||
"examples/doc-snippets",
|
||||
"testing-framework/configs",
|
||||
"testing-framework/core",
|
||||
"testing-framework/deployers/compose",
|
||||
"testing-framework/deployers/k8s",
|
||||
"testing-framework/deployers/local",
|
||||
"testing-framework/tools/cfgsync-core",
|
||||
"testing-framework/tools/cfgsync-runtime",
|
||||
"testing-framework/env",
|
||||
"testing-framework/tools/cfgsync_tf",
|
||||
"testing-framework/workflows",
|
||||
]
|
||||
resolver = "2"
|
||||
|
||||
@ -31,84 +31,61 @@ all = "allow"
|
||||
|
||||
[workspace.dependencies]
|
||||
# Local testing framework crates
|
||||
cfgsync-core = { default-features = false, path = "testing-framework/tools/cfgsync-core" }
|
||||
lb-ext = { default-features = false, path = "logos/runtime/ext" }
|
||||
lb-framework = { default-features = false, package = "testing_framework", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "5ebe88a6e89ec6d7dd89e123c46f6b26dd1e4667" }
|
||||
lb-workloads = { default-features = false, path = "logos/runtime/workloads" }
|
||||
testing-framework-config = { default-features = false, path = "testing-framework/configs" }
|
||||
testing-framework-core = { default-features = false, path = "testing-framework/core" }
|
||||
testing-framework-env = { default-features = false, path = "logos/runtime/env" }
|
||||
testing-framework-env = { default-features = false, path = "testing-framework/env" }
|
||||
testing-framework-runner-compose = { default-features = false, path = "testing-framework/deployers/compose" }
|
||||
testing-framework-runner-k8s = { default-features = false, path = "testing-framework/deployers/k8s" }
|
||||
testing-framework-runner-local = { default-features = false, path = "testing-framework/deployers/local" }
|
||||
testing-framework-workflows = { default-features = false, path = "testing-framework/workflows" }
|
||||
|
||||
# Logos git dependencies (pinned to latest master)
|
||||
cfgsync_tf = { default-features = false, path = "testing-framework/tools/cfgsync_tf" }
|
||||
lb-api-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-api-service", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
|
||||
lb-blend-message = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-blend-message", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
|
||||
lb-blend-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-blend-service", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
|
||||
lb-chain-broadcast-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-chain-broadcast-service", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
|
||||
lb-chain-leader-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-chain-leader-service", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
|
||||
lb-chain-network = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-chain-network-service", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
|
||||
lb-chain-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-chain-service", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
|
||||
lb-common-http-client = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-common-http-client", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
|
||||
lb-core = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-core", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
|
||||
lb-cryptarchia-engine = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-cryptarchia-engine", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
|
||||
lb-cryptarchia-sync = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-cryptarchia-sync", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
|
||||
lb-groth16 = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-groth16", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
|
||||
lb-http-api-common = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-http-api-common", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
|
||||
lb-key-management-system-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-key-management-system-service", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
|
||||
lb-ledger = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-ledger", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
|
||||
lb-libp2p = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-libp2p", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
|
||||
lb-network-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-network-service", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
|
||||
lb-node = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-node", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
|
||||
lb-poc = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-poc", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
|
||||
lb-pol = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-pol", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
|
||||
lb-sdp-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-sdp-service", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
|
||||
lb-tests = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-tests", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
|
||||
lb-time-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-time-service", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
|
||||
lb-tracing = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-tracing", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
|
||||
lb-tracing-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-tracing-service", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
|
||||
lb-tx-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-tx-service", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
|
||||
lb-utils = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-utils", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
|
||||
lb-wallet = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-wallet", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
|
||||
lb-wallet-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-wallet-service", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
|
||||
lb-zksign = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", package = "logos-blockchain-zksign", rev = "feac5ab97ef6dfcebcf6536363a5f330cb79b5e0" }
|
||||
|
||||
# Logos dependencies (from logos-blockchain master @ deccbb2d2)
|
||||
broadcast-service = { default-features = false, package = "logos-blockchain-chain-broadcast-service", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "5ebe88a6e89ec6d7dd89e123c46f6b26dd1e4667" }
|
||||
cfgsync_runtime = { default-features = false, package = "cfgsync-runtime", path = "testing-framework/tools/cfgsync-runtime" }
|
||||
chain-leader = { default-features = false, features = [
|
||||
"pol-dev-mode",
|
||||
], package = "logos-blockchain-chain-leader-service", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "5ebe88a6e89ec6d7dd89e123c46f6b26dd1e4667" }
|
||||
chain-network = { default-features = false, package = "logos-blockchain-chain-network-service", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "5ebe88a6e89ec6d7dd89e123c46f6b26dd1e4667" }
|
||||
chain-service = { default-features = false, package = "logos-blockchain-chain-service", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "5ebe88a6e89ec6d7dd89e123c46f6b26dd1e4667" }
|
||||
common-http-client = { default-features = false, package = "logos-blockchain-common-http-client", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "5ebe88a6e89ec6d7dd89e123c46f6b26dd1e4667" }
|
||||
cryptarchia-engine = { default-features = false, package = "logos-blockchain-cryptarchia-engine", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "5ebe88a6e89ec6d7dd89e123c46f6b26dd1e4667" }
|
||||
cryptarchia-sync = { default-features = false, package = "logos-blockchain-cryptarchia-sync", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "5ebe88a6e89ec6d7dd89e123c46f6b26dd1e4667" }
|
||||
groth16 = { default-features = false, package = "logos-blockchain-groth16", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "5ebe88a6e89ec6d7dd89e123c46f6b26dd1e4667" }
|
||||
key-management-system-service = { default-features = false, package = "logos-blockchain-key-management-system-service", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "5ebe88a6e89ec6d7dd89e123c46f6b26dd1e4667" }
|
||||
nomos-api = { default-features = false, package = "logos-blockchain-api-service", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "5ebe88a6e89ec6d7dd89e123c46f6b26dd1e4667" }
|
||||
nomos-blend-message = { default-features = false, package = "logos-blockchain-blend-message", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "5ebe88a6e89ec6d7dd89e123c46f6b26dd1e4667" }
|
||||
nomos-blend-service = { default-features = false, package = "logos-blockchain-blend-service", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "5ebe88a6e89ec6d7dd89e123c46f6b26dd1e4667" }
|
||||
nomos-core = { default-features = false, package = "logos-blockchain-core", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "5ebe88a6e89ec6d7dd89e123c46f6b26dd1e4667" }
|
||||
nomos-http-api-common = { default-features = false, package = "logos-blockchain-http-api-common", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "5ebe88a6e89ec6d7dd89e123c46f6b26dd1e4667" }
|
||||
nomos-ledger = { default-features = false, package = "logos-blockchain-ledger", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "5ebe88a6e89ec6d7dd89e123c46f6b26dd1e4667" }
|
||||
nomos-libp2p = { default-features = false, package = "logos-blockchain-libp2p", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "5ebe88a6e89ec6d7dd89e123c46f6b26dd1e4667" }
|
||||
nomos-network = { default-features = false, package = "logos-blockchain-network-service", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "5ebe88a6e89ec6d7dd89e123c46f6b26dd1e4667" }
|
||||
nomos-node = { default-features = false, features = [
|
||||
"testing",
|
||||
], package = "logos-blockchain-node", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "5ebe88a6e89ec6d7dd89e123c46f6b26dd1e4667" }
|
||||
nomos-sdp = { default-features = false, package = "logos-blockchain-sdp-service", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "5ebe88a6e89ec6d7dd89e123c46f6b26dd1e4667" }
|
||||
nomos-time = { default-features = false, package = "logos-blockchain-time-service", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "5ebe88a6e89ec6d7dd89e123c46f6b26dd1e4667" }
|
||||
nomos-tracing = { default-features = false, package = "logos-blockchain-tracing", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "5ebe88a6e89ec6d7dd89e123c46f6b26dd1e4667" }
|
||||
nomos-tracing-service = { default-features = false, package = "logos-blockchain-tracing-service", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "5ebe88a6e89ec6d7dd89e123c46f6b26dd1e4667" }
|
||||
nomos-utils = { default-features = false, package = "logos-blockchain-utils", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "5ebe88a6e89ec6d7dd89e123c46f6b26dd1e4667" }
|
||||
nomos-wallet = { default-features = false, package = "logos-blockchain-wallet-service", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "5ebe88a6e89ec6d7dd89e123c46f6b26dd1e4667" }
|
||||
poc = { default-features = false, package = "logos-blockchain-poc", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "5ebe88a6e89ec6d7dd89e123c46f6b26dd1e4667" }
|
||||
pol = { default-features = false, package = "logos-blockchain-pol", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "5ebe88a6e89ec6d7dd89e123c46f6b26dd1e4667" }
|
||||
tx-service = { default-features = false, package = "logos-blockchain-tx-service", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "5ebe88a6e89ec6d7dd89e123c46f6b26dd1e4667" }
|
||||
wallet = { default-features = false, package = "logos-blockchain-wallet", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "5ebe88a6e89ec6d7dd89e123c46f6b26dd1e4667" }
|
||||
zksign = { default-features = false, package = "logos-blockchain-zksign", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "5ebe88a6e89ec6d7dd89e123c46f6b26dd1e4667" }
|
||||
# lb_* aliases (nomos-node repo naming)
|
||||
lb_http_api_common = { default-features = false, package = "logos-blockchain-http-api-common", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "5ebe88a6e89ec6d7dd89e123c46f6b26dd1e4667" }
|
||||
lb_tracing = { default-features = false, package = "logos-blockchain-tracing", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "5ebe88a6e89ec6d7dd89e123c46f6b26dd1e4667" }
|
||||
lb_tracing_service = { default-features = false, package = "logos-blockchain-tracing-service", git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "5ebe88a6e89ec6d7dd89e123c46f6b26dd1e4667" }
|
||||
# External crates
|
||||
async-trait = { default-features = false, version = "0.1" }
|
||||
bytes = { default-features = false, version = "1.3" }
|
||||
hex = { default-features = false, version = "0.4.3" }
|
||||
libp2p = { default-features = false, version = "0.55" }
|
||||
num-bigint = { default-features = false, version = "0.4" }
|
||||
overwatch = { default-features = false, git = "https://github.com/logos-co/Overwatch" }
|
||||
overwatch-derive = { default-features = false, git = "https://github.com/logos-co/Overwatch" }
|
||||
parking_lot = { default-features = false, version = "0.12" }
|
||||
rand = { default-features = false, features = ["std", "std_rng"], version = "0.8" }
|
||||
reqwest = { default-features = false, version = "0.12" }
|
||||
serde = { default-features = true, features = ["derive"], version = "1.0" }
|
||||
serde_json = { default-features = false, version = "1.0" }
|
||||
serde_path_to_error = { default-features = false, version = "0.1" }
|
||||
serde_with = { default-features = false, version = "3.14.0" }
|
||||
serde_yaml = { default-features = false, version = "0.9.33" }
|
||||
tempfile = { default-features = false, version = "3" }
|
||||
thiserror = { default-features = false, version = "2.0" }
|
||||
time = { default-features = false, version = "0.3" }
|
||||
tokio = { default-features = false, version = "1" }
|
||||
tracing = { default-features = false, version = "0.1" }
|
||||
uuid = { default-features = false, version = "1", features = ["v4"] }
|
||||
|
||||
[patch."https://github.com/logos-blockchain/logos-blockchain-testing.git"]
|
||||
testing-framework-core = { path = "testing-framework/core" }
|
||||
testing-framework-env = { path = "logos/runtime/env" }
|
||||
testing-framework-runner-local = { path = "testing-framework/deployers/local" }
|
||||
|
||||
[patch."https://github.com/logos-blockchain/logos-blockchain.git"]
|
||||
testing-framework-core = { path = "testing-framework/core" }
|
||||
testing-framework-env = { path = "logos/runtime/env" }
|
||||
testing-framework-runner-local = { path = "testing-framework/deployers/local" }
|
||||
async-trait = { default-features = false, version = "0.1" }
|
||||
bytes = { default-features = false, version = "1.3" }
|
||||
hex = { default-features = false, version = "0.4.3" }
|
||||
libp2p = { default-features = false, version = "0.55" }
|
||||
overwatch = { default-features = false, git = "https://github.com/logos-co/Overwatch", rev = "f5a9902" }
|
||||
overwatch-derive = { default-features = false, git = "https://github.com/logos-co/Overwatch", rev = "f5a9902" }
|
||||
rand = { default-features = false, version = "0.8" }
|
||||
reqwest = { default-features = false, version = "0.12" }
|
||||
serde = { default-features = true, features = ["derive"], version = "1.0" }
|
||||
serde_json = { default-features = false, version = "1.0" }
|
||||
serde_with = { default-features = false, version = "3.14.0" }
|
||||
serde_yaml = { default-features = false, version = "0.9.33" }
|
||||
tempfile = { default-features = false, version = "3" }
|
||||
thiserror = { default-features = false, version = "2.0" }
|
||||
tokio = { default-features = false, version = "1" }
|
||||
tracing = { default-features = false, version = "0.1" }
|
||||
|
||||
201
LICENSE-APACHE-v2
Normal file
201
LICENSE-APACHE-v2
Normal file
@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2025-2026 Logos
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
21
LICENSE-MIT
Normal file
21
LICENSE-MIT
Normal file
@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright © 2025-2026 Logos
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
@ -11,13 +11,11 @@ version = "0.1.0"
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1"
|
||||
lb-ext = { workspace = true }
|
||||
lb-framework = { workspace = true }
|
||||
lb-workloads = { workspace = true }
|
||||
testing-framework-core = { workspace = true }
|
||||
testing-framework-runner-compose = { workspace = true }
|
||||
testing-framework-runner-k8s = { workspace = true }
|
||||
testing-framework-runner-local = { workspace = true }
|
||||
testing-framework-workflows = { workspace = true }
|
||||
tokio = { features = ["macros", "net", "rt-multi-thread", "time"], workspace = true }
|
||||
tracing = { workspace = true }
|
||||
tracing-subscriber = { features = ["env-filter", "fmt"], version = "0.3" }
|
||||
0
examples/cucumber/features/compose_smoke.feature
Normal file
0
examples/cucumber/features/compose_smoke.feature
Normal file
0
examples/cucumber/features/local_smoke.feature
Normal file
0
examples/cucumber/features/local_smoke.feature
Normal file
116
examples/src/bin/compose_runner.rs
Normal file
116
examples/src/bin/compose_runner.rs
Normal file
@ -0,0 +1,116 @@
|
||||
use std::{process, time::Duration};
|
||||
|
||||
use anyhow::{Context as _, Result};
|
||||
use runner_examples::{
|
||||
ChaosBuilderExt as _, DeployerKind, ScenarioBuilderExt as _, demo, read_env_any,
|
||||
};
|
||||
use testing_framework_core::scenario::{Deployer as _, Runner, ScenarioBuilder};
|
||||
use testing_framework_runner_compose::{ComposeDeployer, ComposeRunnerError};
|
||||
use tracing::{info, warn};
|
||||
|
||||
const MIXED_TXS_PER_BLOCK: u64 = 5;
|
||||
const TOTAL_WALLETS: usize = 1000;
|
||||
const TRANSACTION_WALLETS: usize = 500;
|
||||
|
||||
// Chaos Testing Constants
|
||||
const CHAOS_MIN_DELAY_SECS: u64 = 120;
|
||||
const CHAOS_MAX_DELAY_SECS: u64 = 180;
|
||||
const CHAOS_COOLDOWN_SECS: u64 = 240;
|
||||
const CHAOS_DELAY_HEADROOM_SECS: u64 = 1;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
runner_examples::defaults::init_node_log_dir_defaults(DeployerKind::Compose);
|
||||
|
||||
tracing_subscriber::fmt::init();
|
||||
|
||||
let nodes = read_env_any(&["LOGOS_BLOCKCHAIN_DEMO_NODES"], demo::DEFAULT_NODES);
|
||||
|
||||
let run_secs = read_env_any(&["LOGOS_BLOCKCHAIN_DEMO_RUN_SECS"], demo::DEFAULT_RUN_SECS);
|
||||
|
||||
info!(nodes, run_secs, "starting compose runner demo");
|
||||
|
||||
if let Err(err) = run_compose_case(nodes, Duration::from_secs(run_secs)).await {
|
||||
warn!("compose runner demo failed: {err:#}");
|
||||
process::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
async fn run_compose_case(nodes: usize, run_duration: Duration) -> Result<()> {
|
||||
info!(
|
||||
nodes,
|
||||
duration_secs = run_duration.as_secs(),
|
||||
"building scenario plan"
|
||||
);
|
||||
|
||||
let scenario =
|
||||
ScenarioBuilder::topology_with(|t| t.network_star().nodes(nodes)).enable_node_control();
|
||||
|
||||
let scenario = if let Some((chaos_min_delay, chaos_max_delay, chaos_target_cooldown)) =
|
||||
chaos_timings(run_duration)
|
||||
{
|
||||
scenario.chaos_with(|c| {
|
||||
c.restart()
|
||||
.min_delay(chaos_min_delay)
|
||||
.max_delay(chaos_max_delay)
|
||||
.target_cooldown(chaos_target_cooldown)
|
||||
.apply()
|
||||
})
|
||||
} else {
|
||||
scenario
|
||||
};
|
||||
|
||||
let mut plan = scenario
|
||||
.wallets(TOTAL_WALLETS)
|
||||
.transactions_with(|txs| txs.rate(MIXED_TXS_PER_BLOCK).users(TRANSACTION_WALLETS))
|
||||
.with_run_duration(run_duration)
|
||||
.expect_consensus_liveness()
|
||||
.build()?;
|
||||
|
||||
let deployer = ComposeDeployer::new();
|
||||
info!("deploying compose stack");
|
||||
|
||||
let runner: Runner = match deployer.deploy(&plan).await {
|
||||
Ok(runner) => runner,
|
||||
Err(ComposeRunnerError::DockerUnavailable) => {
|
||||
warn!("Docker is unavailable; cannot run compose demo");
|
||||
return Ok(());
|
||||
}
|
||||
Err(err) => return Err(anyhow::Error::new(err)).context("deploying compose stack failed"),
|
||||
};
|
||||
|
||||
if !runner.context().telemetry().is_configured() {
|
||||
warn!(
|
||||
"metrics querying is disabled; set LOGOS_BLOCKCHAIN_METRICS_QUERY_URL to enable PromQL queries"
|
||||
);
|
||||
}
|
||||
|
||||
info!("running scenario");
|
||||
runner
|
||||
.run(&mut plan)
|
||||
.await
|
||||
.context("running compose scenario failed")?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn chaos_timings(run_duration: Duration) -> Option<(Duration, Duration, Duration)> {
|
||||
let headroom = Duration::from_secs(CHAOS_DELAY_HEADROOM_SECS);
|
||||
let Some(max_allowed_delay) = run_duration.checked_sub(headroom) else {
|
||||
return None;
|
||||
};
|
||||
|
||||
let chaos_min_delay = Duration::from_secs(CHAOS_MIN_DELAY_SECS);
|
||||
if max_allowed_delay <= chaos_min_delay {
|
||||
return None;
|
||||
}
|
||||
|
||||
let chaos_max_delay = Duration::from_secs(CHAOS_MAX_DELAY_SECS)
|
||||
.min(max_allowed_delay)
|
||||
.max(chaos_min_delay);
|
||||
|
||||
let chaos_target_cooldown = Duration::from_secs(CHAOS_COOLDOWN_SECS)
|
||||
.min(max_allowed_delay)
|
||||
.max(chaos_max_delay);
|
||||
|
||||
Some((chaos_min_delay, chaos_max_delay, chaos_target_cooldown))
|
||||
}
|
||||
@ -1,15 +1,18 @@
|
||||
use std::{process, time::Duration};
|
||||
use std::{env, process, time::Duration};
|
||||
|
||||
use anyhow::{Context as _, Result};
|
||||
use lb_ext::{
|
||||
CoreBuilderExt as _, LbcExtEnv, LbcK8sDeployer as K8sDeployer, ScenarioBuilder,
|
||||
ScenarioBuilderExt as _, configs::network::Libp2pNetworkLayout,
|
||||
use runner_examples::{ScenarioBuilderExt as _, demo, read_env_any};
|
||||
use testing_framework_core::scenario::{
|
||||
Deployer as _, ObservabilityCapability, Runner, ScenarioBuilder,
|
||||
};
|
||||
use runner_examples::{demo, read_env_any, read_topology_seed_or_default};
|
||||
use testing_framework_core::scenario::{Deployer as _, Runner};
|
||||
use testing_framework_runner_k8s::K8sRunnerError;
|
||||
use testing_framework_runner_k8s::{K8sDeployer, K8sRunnerError};
|
||||
use testing_framework_workflows::ObservabilityBuilderExt as _;
|
||||
use tracing::{info, warn};
|
||||
|
||||
const MIXED_TXS_PER_BLOCK: u64 = 2;
|
||||
const TOTAL_WALLETS: usize = 200;
|
||||
const TRANSACTION_WALLETS: usize = 50;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
tracing_subscriber::fmt::init();
|
||||
@ -31,31 +34,31 @@ async fn run_k8s_case(nodes: usize, run_duration: Duration) -> Result<()> {
|
||||
"building scenario plan"
|
||||
);
|
||||
|
||||
let seed = read_topology_seed_or_default();
|
||||
let mut scenario = ScenarioBuilder::topology_with(|t| t.network_star().nodes(nodes))
|
||||
.with_capabilities(ObservabilityCapability::default())
|
||||
.wallets(TOTAL_WALLETS)
|
||||
.transactions_with(|txs| txs.rate(MIXED_TXS_PER_BLOCK).users(TRANSACTION_WALLETS))
|
||||
.with_run_duration(run_duration)
|
||||
.expect_consensus_liveness();
|
||||
|
||||
let scenario = ScenarioBuilder::deployment_with(|t| {
|
||||
t.with_network_layout(Libp2pNetworkLayout::Star)
|
||||
.with_node_count(nodes)
|
||||
})
|
||||
.enable_observability()
|
||||
.with_run_duration(run_duration)
|
||||
.with_deployment_seed(seed)
|
||||
.initialize_wallet(
|
||||
demo::DEFAULT_TOTAL_WALLETS as u64 * 100,
|
||||
demo::DEFAULT_TOTAL_WALLETS,
|
||||
)
|
||||
.transactions_with(|txs| {
|
||||
txs.rate(demo::DEFAULT_MIXED_TXS_PER_BLOCK)
|
||||
.users(demo::DEFAULT_TRANSACTION_WALLETS)
|
||||
})
|
||||
.expect_consensus_liveness();
|
||||
if let Ok(url) = env::var("LOGOS_BLOCKCHAIN_METRICS_QUERY_URL") {
|
||||
if !url.trim().is_empty() {
|
||||
scenario = scenario.with_metrics_query_url_str(url.trim());
|
||||
}
|
||||
}
|
||||
|
||||
if let Ok(url) = env::var("LOGOS_BLOCKCHAIN_METRICS_OTLP_INGEST_URL") {
|
||||
if !url.trim().is_empty() {
|
||||
scenario = scenario.with_metrics_otlp_ingest_url_str(url.trim());
|
||||
}
|
||||
}
|
||||
|
||||
let mut plan = scenario.build()?;
|
||||
|
||||
let deployer = K8sDeployer::new();
|
||||
info!("deploying k8s stack");
|
||||
|
||||
let runner: Runner<LbcExtEnv> = match deployer.deploy(&plan).await {
|
||||
let runner: Runner = match deployer.deploy(&plan).await {
|
||||
Ok(runner) => runner,
|
||||
Err(K8sRunnerError::ClientInit { source }) => {
|
||||
warn!("Kubernetes cluster unavailable ({source}); skipping");
|
||||
@ -1,14 +1,16 @@
|
||||
use std::{process, time::Duration};
|
||||
|
||||
use anyhow::{Context as _, Result};
|
||||
use lb_framework::{
|
||||
CoreBuilderExt as _, LbcEnv, LbcLocalDeployer, ScenarioBuilder, ScenarioBuilderExt as _,
|
||||
configs::network::Libp2pNetworkLayout,
|
||||
};
|
||||
use runner_examples::{DeployerKind, demo, read_env_any, read_topology_seed_or_default};
|
||||
use testing_framework_core::scenario::{Deployer as _, Runner};
|
||||
use runner_examples::{DeployerKind, ScenarioBuilderExt as _, demo, read_env_any};
|
||||
use testing_framework_core::scenario::{Deployer as _, Runner, ScenarioBuilder};
|
||||
use testing_framework_runner_local::LocalDeployer;
|
||||
use tracing::{info, warn};
|
||||
|
||||
const MIXED_TXS_PER_BLOCK: u64 = 5;
|
||||
const TOTAL_WALLETS: usize = 1000;
|
||||
const TRANSACTION_WALLETS: usize = 500;
|
||||
const SMOKE_RUN_SECS_MAX: u64 = 30;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
runner_examples::defaults::init_node_log_dir_defaults(DeployerKind::Local);
|
||||
@ -33,30 +35,24 @@ async fn run_local_case(nodes: usize, run_duration: Duration) -> Result<()> {
|
||||
"building scenario plan"
|
||||
);
|
||||
|
||||
let seed = read_topology_seed_or_default();
|
||||
let scenario = ScenarioBuilder::topology_with(|t| t.network_star().nodes(nodes))
|
||||
.wallets(TOTAL_WALLETS)
|
||||
.with_run_duration(run_duration);
|
||||
|
||||
let scenario = ScenarioBuilder::deployment_with(|t| {
|
||||
t.with_network_layout(Libp2pNetworkLayout::Star)
|
||||
.with_node_count(nodes)
|
||||
})
|
||||
.with_run_duration(run_duration)
|
||||
.with_deployment_seed(seed)
|
||||
.initialize_wallet(
|
||||
demo::DEFAULT_TOTAL_WALLETS as u64 * 100,
|
||||
demo::DEFAULT_TOTAL_WALLETS,
|
||||
)
|
||||
.transactions_with(|txs| {
|
||||
txs.rate(demo::DEFAULT_MIXED_TXS_PER_BLOCK)
|
||||
.users(demo::DEFAULT_TRANSACTION_WALLETS)
|
||||
})
|
||||
.expect_consensus_liveness();
|
||||
let scenario = if run_duration.as_secs() <= SMOKE_RUN_SECS_MAX {
|
||||
scenario
|
||||
} else {
|
||||
scenario
|
||||
.transactions_with(|txs| txs.rate(MIXED_TXS_PER_BLOCK).users(TRANSACTION_WALLETS))
|
||||
.expect_consensus_liveness()
|
||||
};
|
||||
|
||||
let mut plan = scenario.build()?;
|
||||
|
||||
let deployer = LbcLocalDeployer::default();
|
||||
let deployer = LocalDeployer::default();
|
||||
info!("deploying local nodes");
|
||||
|
||||
let runner: Runner<LbcEnv> = deployer
|
||||
let runner: Runner = deployer
|
||||
.deploy(&plan)
|
||||
.await
|
||||
.context("deploying local nodes failed")?;
|
||||
@ -11,17 +11,17 @@ const DEFAULT_NODE_LOG_DIR_REL: &str = ".tmp/node-logs";
|
||||
const DEFAULT_CONTAINER_NODE_LOG_DIR: &str = "/tmp/node-logs";
|
||||
|
||||
fn set_default_env(key: &str, value: &str) {
|
||||
if env::var_os(key).is_none() {
|
||||
if std::env::var_os(key).is_none() {
|
||||
// SAFETY: Used as an early-run default. Prefer setting env vars in the
|
||||
// shell for multi-threaded runs.
|
||||
unsafe {
|
||||
env::set_var(key, value);
|
||||
std::env::set_var(key, value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn init_logging_defaults() {
|
||||
set_default_env("TF_KEEP_LOGS", "1");
|
||||
set_default_env("LOGOS_BLOCKCHAIN_TESTS_KEEP_LOGS", "1");
|
||||
set_default_env("LOGOS_BLOCKCHAIN_LOG_LEVEL", "info");
|
||||
set_default_env("RUST_LOG", "info");
|
||||
}
|
||||
2
examples/src/demo.rs
Normal file
2
examples/src/demo.rs
Normal file
@ -0,0 +1,2 @@
|
||||
pub const DEFAULT_NODES: usize = 2;
|
||||
pub const DEFAULT_RUN_SECS: u64 = 60;
|
||||
10
examples/src/env.rs
Normal file
10
examples/src/env.rs
Normal file
@ -0,0 +1,10 @@
|
||||
use std::{env, str::FromStr};
|
||||
|
||||
pub fn read_env_any<T>(keys: &[&str], default: T) -> T
|
||||
where
|
||||
T: FromStr + Copy,
|
||||
{
|
||||
keys.iter()
|
||||
.find_map(|key| env::var(key).ok().and_then(|raw| raw.parse::<T>().ok()))
|
||||
.unwrap_or(default)
|
||||
}
|
||||
13
examples/src/lib.rs
Normal file
13
examples/src/lib.rs
Normal file
@ -0,0 +1,13 @@
|
||||
pub mod defaults;
|
||||
pub mod demo;
|
||||
pub mod env;
|
||||
|
||||
pub use env::read_env_any;
|
||||
pub use testing_framework_workflows::{ChaosBuilderExt, ScenarioBuilderExt};
|
||||
|
||||
#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
|
||||
pub enum DeployerKind {
|
||||
#[default]
|
||||
Local,
|
||||
Compose,
|
||||
}
|
||||
@ -2,13 +2,11 @@ use std::time::Duration;
|
||||
|
||||
use anyhow::Result;
|
||||
use async_trait::async_trait;
|
||||
use lb_framework::{
|
||||
CoreBuilderExt as _, LbcEnv, LbcLocalDeployer, ScenarioBuilder,
|
||||
configs::network::Libp2pNetworkLayout,
|
||||
};
|
||||
use testing_framework_core::scenario::{
|
||||
Deployer, DynError, PeerSelection, RunContext, StartNodeOptions, Workload,
|
||||
Deployer, DynError, PeerSelection, RunContext, ScenarioBuilder, StartNodeOptions, Workload,
|
||||
};
|
||||
use testing_framework_runner_local::LocalDeployer;
|
||||
use testing_framework_workflows::ScenarioBuilderExt;
|
||||
use tokio::time::{sleep, timeout};
|
||||
use tracing_subscriber::fmt::try_init;
|
||||
|
||||
@ -27,12 +25,12 @@ impl JoinNodeWorkload {
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Workload<LbcEnv> for JoinNodeWorkload {
|
||||
impl Workload for JoinNodeWorkload {
|
||||
fn name(&self) -> &str {
|
||||
"dynamic_join"
|
||||
}
|
||||
|
||||
async fn start(&self, ctx: &RunContext<LbcEnv>) -> Result<(), DynError> {
|
||||
async fn start(&self, ctx: &RunContext) -> Result<(), DynError> {
|
||||
let handle = ctx
|
||||
.node_control()
|
||||
.ok_or_else(|| "dynamic join workload requires node control".to_owned())?;
|
||||
@ -40,7 +38,7 @@ impl Workload<LbcEnv> for JoinNodeWorkload {
|
||||
sleep(START_DELAY).await;
|
||||
|
||||
let node = handle.start_node(&self.name).await?;
|
||||
let client = node.client;
|
||||
let client = node.api;
|
||||
|
||||
timeout(READY_TIMEOUT, async {
|
||||
loop {
|
||||
@ -73,22 +71,25 @@ impl JoinNodeWithPeersWorkload {
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Workload<LbcEnv> for JoinNodeWithPeersWorkload {
|
||||
impl Workload for JoinNodeWithPeersWorkload {
|
||||
fn name(&self) -> &str {
|
||||
"dynamic_join_with_peers"
|
||||
}
|
||||
|
||||
async fn start(&self, ctx: &RunContext<LbcEnv>) -> Result<(), DynError> {
|
||||
async fn start(&self, ctx: &RunContext) -> Result<(), DynError> {
|
||||
let handle = ctx
|
||||
.node_control()
|
||||
.ok_or_else(|| "dynamic join workload requires node control".to_owned())?;
|
||||
|
||||
sleep(START_DELAY).await;
|
||||
|
||||
let mut options = StartNodeOptions::<LbcEnv>::default();
|
||||
options.peers = PeerSelection::Named(self.peers.clone());
|
||||
let options = StartNodeOptions {
|
||||
peers: PeerSelection::Named(self.peers.clone()),
|
||||
config_patch: None,
|
||||
persist_dir: None,
|
||||
};
|
||||
let node = handle.start_node_with(&self.name, options).await?;
|
||||
let client = node.client;
|
||||
let client = node.api;
|
||||
|
||||
timeout(READY_TIMEOUT, async {
|
||||
loop {
|
||||
@ -111,17 +112,14 @@ impl Workload<LbcEnv> for JoinNodeWithPeersWorkload {
|
||||
async fn dynamic_join_reaches_consensus_liveness() -> Result<()> {
|
||||
let _ = try_init();
|
||||
|
||||
let mut scenario = ScenarioBuilder::deployment_with(|t| {
|
||||
t.with_network_layout(Libp2pNetworkLayout::Star)
|
||||
.with_node_count(2)
|
||||
})
|
||||
.enable_node_control()
|
||||
.with_workload(JoinNodeWorkload::new("joiner"))
|
||||
.with_expectation(lb_framework::workloads::ConsensusLiveness::<LbcEnv>::default())
|
||||
.with_run_duration(Duration::from_secs(60))
|
||||
.build()?;
|
||||
let mut scenario = ScenarioBuilder::topology_with(|t| t.network_star().nodes(2))
|
||||
.enable_node_control()
|
||||
.with_workload(JoinNodeWorkload::new("joiner"))
|
||||
.expect_consensus_liveness()
|
||||
.with_run_duration(Duration::from_secs(60))
|
||||
.build()?;
|
||||
|
||||
let deployer = LbcLocalDeployer::default();
|
||||
let deployer = LocalDeployer::default();
|
||||
let runner = deployer.deploy(&scenario).await?;
|
||||
let _handle = runner.run(&mut scenario).await?;
|
||||
|
||||
@ -131,20 +129,17 @@ async fn dynamic_join_reaches_consensus_liveness() -> Result<()> {
|
||||
#[tokio::test]
|
||||
#[ignore = "run manually with `cargo test -p runner-examples -- --ignored`"]
|
||||
async fn dynamic_join_with_peers_reaches_consensus_liveness() -> Result<()> {
|
||||
let mut scenario = ScenarioBuilder::deployment_with(|t| {
|
||||
t.with_network_layout(Libp2pNetworkLayout::Star)
|
||||
.with_node_count(2)
|
||||
})
|
||||
.enable_node_control()
|
||||
.with_workload(JoinNodeWithPeersWorkload::new(
|
||||
"joiner",
|
||||
vec!["node-0".to_string()],
|
||||
))
|
||||
.with_expectation(lb_framework::workloads::ConsensusLiveness::<LbcEnv>::default())
|
||||
.with_run_duration(Duration::from_secs(60))
|
||||
.build()?;
|
||||
let mut scenario = ScenarioBuilder::topology_with(|t| t.network_star().nodes(2))
|
||||
.enable_node_control()
|
||||
.with_workload(JoinNodeWithPeersWorkload::new(
|
||||
"joiner",
|
||||
vec!["node-0".to_string()],
|
||||
))
|
||||
.expect_consensus_liveness()
|
||||
.with_run_duration(Duration::from_secs(60))
|
||||
.build()?;
|
||||
|
||||
let deployer = LbcLocalDeployer::default();
|
||||
let deployer = LocalDeployer::default();
|
||||
let runner = deployer.deploy(&scenario).await?;
|
||||
let _handle = runner.run(&mut scenario).await?;
|
||||
|
||||
139
examples/tests/manual_cluster.rs
Normal file
139
examples/tests/manual_cluster.rs
Normal file
@ -0,0 +1,139 @@
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::Result;
|
||||
use testing_framework_core::{
|
||||
scenario::{PeerSelection, StartNodeOptions},
|
||||
topology::config::TopologyConfig,
|
||||
};
|
||||
use testing_framework_runner_local::LocalDeployer;
|
||||
use tokio::time::sleep;
|
||||
use tracing_subscriber::fmt::try_init;
|
||||
|
||||
const MAX_HEIGHT_DIFF: u64 = 5;
|
||||
const CONVERGENCE_TIMEOUT: Duration = Duration::from_secs(60);
|
||||
const CONVERGENCE_POLL: Duration = Duration::from_secs(2);
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore = "run manually with `cargo test -p runner-examples -- --ignored manual_cluster_two_clusters_merge`"]
|
||||
async fn manual_cluster_two_clusters_merge() -> Result<()> {
|
||||
let _ = try_init();
|
||||
// Required env vars (set on the command line when running this test):
|
||||
// - `RUST_LOG=info` (optional)
|
||||
let config = TopologyConfig::with_node_numbers(2);
|
||||
let deployer = LocalDeployer::new();
|
||||
let cluster = deployer.manual_cluster(config)?;
|
||||
// Nodes are stopped automatically when the cluster is dropped.
|
||||
|
||||
println!("starting node a");
|
||||
|
||||
let node_a = cluster
|
||||
.start_node_with(
|
||||
"a",
|
||||
StartNodeOptions {
|
||||
peers: PeerSelection::None,
|
||||
config_patch: None,
|
||||
persist_dir: None,
|
||||
},
|
||||
)
|
||||
.await?
|
||||
.api;
|
||||
|
||||
println!("waiting briefly before starting c");
|
||||
sleep(Duration::from_secs(30)).await;
|
||||
|
||||
println!("starting node c -> a");
|
||||
let node_c = cluster
|
||||
.start_node_with(
|
||||
"c",
|
||||
StartNodeOptions {
|
||||
peers: PeerSelection::Named(vec!["node-a".to_owned()]),
|
||||
config_patch: None,
|
||||
persist_dir: None,
|
||||
},
|
||||
)
|
||||
.await?
|
||||
.api;
|
||||
|
||||
println!("waiting for network readiness: cluster a,c");
|
||||
cluster.wait_network_ready().await?;
|
||||
|
||||
let start = tokio::time::Instant::now();
|
||||
|
||||
loop {
|
||||
let a_info = node_a.consensus_info().await?;
|
||||
let c_info = node_c.consensus_info().await?;
|
||||
let a_height = a_info.height;
|
||||
let c_height = c_info.height;
|
||||
let diff = a_height.abs_diff(c_height);
|
||||
|
||||
if diff <= MAX_HEIGHT_DIFF {
|
||||
println!(
|
||||
"final heights: node-a={}, node-c={}, diff={}",
|
||||
a_height, c_height, diff
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if start.elapsed() >= CONVERGENCE_TIMEOUT {
|
||||
return Err(anyhow::anyhow!(
|
||||
"height diff too large after timeout: {diff} > {MAX_HEIGHT_DIFF} (node-a={a_height}, node-c={c_height})"
|
||||
));
|
||||
}
|
||||
|
||||
sleep(CONVERGENCE_POLL).await;
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore = "run manually with `cargo test -p runner-examples -- --ignored manual_cluster_with_persist_dir`"]
|
||||
async fn manual_cluster_with_persist_dir() -> Result<()> {
|
||||
use std::path::PathBuf;
|
||||
|
||||
let _ = try_init();
|
||||
// Required env vars (set on the command line when running this test):
|
||||
// - `RUST_LOG=info` (optional)
|
||||
let config = TopologyConfig::with_node_numbers(1);
|
||||
let deployer = LocalDeployer::new();
|
||||
let cluster = deployer.manual_cluster(config)?;
|
||||
|
||||
let persist_dir = PathBuf::from("/tmp/test-node-persist-dir");
|
||||
|
||||
println!("starting validator with persist_dir: {:?}", persist_dir);
|
||||
|
||||
let _node = cluster
|
||||
.start_node_with(
|
||||
"test",
|
||||
StartNodeOptions {
|
||||
peers: PeerSelection::None,
|
||||
config_patch: None,
|
||||
persist_dir: Some(persist_dir.clone()),
|
||||
},
|
||||
)
|
||||
.await?
|
||||
.api;
|
||||
|
||||
println!("validator started, waiting briefly");
|
||||
sleep(Duration::from_secs(5)).await;
|
||||
|
||||
// Drop the cluster to trigger the persist logic
|
||||
drop(cluster);
|
||||
|
||||
println!("cluster dropped, checking if persist_dir exists");
|
||||
|
||||
// Verify the persist_dir was created
|
||||
if !persist_dir.exists() {
|
||||
return Err(anyhow::anyhow!(
|
||||
"persist_dir was not created: {:?}",
|
||||
persist_dir
|
||||
));
|
||||
}
|
||||
|
||||
println!("persist_dir verified: {:?}", persist_dir);
|
||||
|
||||
// Clean up
|
||||
if persist_dir.exists() {
|
||||
std::fs::remove_dir_all(&persist_dir)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
114
examples/tests/node_config_override.rs
Normal file
114
examples/tests/node_config_override.rs
Normal file
@ -0,0 +1,114 @@
|
||||
use std::{
|
||||
net::{SocketAddr, TcpListener},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use anyhow::Result;
|
||||
use testing_framework_core::{
|
||||
nodes::ApiClient,
|
||||
scenario::{Deployer, PeerSelection, ScenarioBuilder, StartNodeOptions},
|
||||
topology::config::TopologyConfig,
|
||||
};
|
||||
use testing_framework_runner_local::LocalDeployer;
|
||||
use tracing_subscriber::fmt::try_init;
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore = "run manually with `cargo test -p runner-examples -- --ignored manual_cluster_api_port_override`"]
|
||||
async fn manual_cluster_api_port_override() -> Result<()> {
|
||||
let _ = try_init();
|
||||
// Required env vars (set on the command line when running this test):
|
||||
// - `LOGOS_BLOCKCHAIN_NODE_BIN=...`
|
||||
// - `LOGOS_BLOCKCHAIN_CIRCUITS=...`
|
||||
// - `RUST_LOG=info` (optional)
|
||||
|
||||
let api_port = random_api_port();
|
||||
|
||||
let deployer = LocalDeployer::new();
|
||||
let cluster = deployer.manual_cluster(TopologyConfig::with_node_numbers(1))?;
|
||||
|
||||
let node = cluster
|
||||
.start_node_with(
|
||||
"override-api",
|
||||
StartNodeOptions {
|
||||
peers: PeerSelection::None,
|
||||
config_patch: None,
|
||||
persist_dir: None,
|
||||
}
|
||||
.create_patch(move |mut config| {
|
||||
println!("overriding API port to {api_port}");
|
||||
|
||||
let current_addr = config.user.http.backend_settings.address;
|
||||
|
||||
config.user.http.backend_settings.address =
|
||||
SocketAddr::new(current_addr.ip(), api_port);
|
||||
|
||||
Ok(config)
|
||||
}),
|
||||
)
|
||||
.await?
|
||||
.api;
|
||||
|
||||
node.consensus_info()
|
||||
.await
|
||||
.expect("consensus_info should succeed");
|
||||
|
||||
assert_eq!(resolved_port(&node), api_port);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore = "run manually with `cargo test -p runner-examples -- --ignored scenario_builder_api_port_override`"]
|
||||
async fn scenario_builder_api_port_override() -> Result<()> {
|
||||
let _ = try_init();
|
||||
// Required env vars (set on the command line when running this test):
|
||||
// - `LOGOS_BLOCKCHAIN_NODE_BIN=...`
|
||||
// - `LOGOS_BLOCKCHAIN_CIRCUITS=...`
|
||||
// - `RUST_LOG=info` (optional)
|
||||
let api_port = random_api_port();
|
||||
|
||||
let mut scenario = ScenarioBuilder::topology_with(|t| {
|
||||
t.network_star()
|
||||
.nodes(1)
|
||||
.node_config_patch_with(0, move |mut config| {
|
||||
println!("overriding API port to {api_port}");
|
||||
|
||||
let current_addr = config.user.http.backend_settings.address;
|
||||
|
||||
config.user.http.backend_settings.address =
|
||||
SocketAddr::new(current_addr.ip(), api_port);
|
||||
|
||||
Ok(config)
|
||||
})
|
||||
})
|
||||
.with_run_duration(Duration::from_secs(1))
|
||||
.build()?;
|
||||
|
||||
let deployer = LocalDeployer::default();
|
||||
let runner = deployer.deploy(&scenario).await?;
|
||||
let handle = runner.run(&mut scenario).await?;
|
||||
|
||||
let client = handle
|
||||
.context()
|
||||
.node_clients()
|
||||
.any_client()
|
||||
.ok_or_else(|| anyhow::anyhow!("scenario did not expose any node clients"))?;
|
||||
|
||||
client
|
||||
.consensus_info()
|
||||
.await
|
||||
.expect("consensus_info should succeed");
|
||||
|
||||
assert_eq!(resolved_port(&client), api_port);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn random_api_port() -> u16 {
|
||||
let listener = TcpListener::bind("127.0.0.1:0").expect("bind random API port");
|
||||
listener.local_addr().expect("read API port").port()
|
||||
}
|
||||
|
||||
fn resolved_port(client: &ApiClient) -> u16 {
|
||||
client.base_url().port().unwrap_or_default()
|
||||
}
|
||||
110
examples/tests/orphan_manual_cluster.rs
Normal file
110
examples/tests/orphan_manual_cluster.rs
Normal file
@ -0,0 +1,110 @@
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::{Result, anyhow};
|
||||
use testing_framework_core::{
|
||||
scenario::StartNodeOptions,
|
||||
topology::{
|
||||
config::{TopologyBuilder, TopologyConfig},
|
||||
configs::network::Libp2pNetworkLayout,
|
||||
},
|
||||
};
|
||||
use testing_framework_runner_local::LocalDeployer;
|
||||
use testing_framework_workflows::{start_node_with_timeout, wait_for_min_height};
|
||||
use tokio::time::{sleep, timeout};
|
||||
use tracing_subscriber::fmt::try_init;
|
||||
|
||||
const MIN_HEIGHT: u64 = 5;
|
||||
const INITIAL_READY_TIMEOUT: Duration = Duration::from_secs(500);
|
||||
const CATCH_UP_TIMEOUT: Duration = Duration::from_secs(300);
|
||||
const START_NODE_TIMEOUT: Duration = Duration::from_secs(90);
|
||||
const TEST_TIMEOUT: Duration = Duration::from_secs(600);
|
||||
const POLL_INTERVAL: Duration = Duration::from_secs(1);
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore = "run manually with `cargo test -p runner-examples -- --ignored orphan_manual_cluster`"]
|
||||
async fn orphan_manual_cluster() -> Result<()> {
|
||||
let _ = try_init();
|
||||
// Required env vars (set on the command line when running this test):
|
||||
// - `LOGOS_BLOCKCHAIN_NODE_BIN=...`
|
||||
// - `NOMOS_KZGRS_PARAMS_PATH=...` (path to KZG params directory/file)
|
||||
// - `RUST_LOG=info` (optional; better visibility)
|
||||
|
||||
let config = TopologyConfig::with_node_numbers(3);
|
||||
timeout(TEST_TIMEOUT, async {
|
||||
let builder = TopologyBuilder::new(config).with_network_layout(Libp2pNetworkLayout::Full);
|
||||
|
||||
let deployer = LocalDeployer::new();
|
||||
let cluster = deployer.manual_cluster_with_builder(builder)?;
|
||||
// Nodes are stopped automatically when the cluster is dropped.
|
||||
|
||||
let node_a = start_node_with_timeout(
|
||||
&cluster,
|
||||
"a",
|
||||
StartNodeOptions::default(),
|
||||
START_NODE_TIMEOUT,
|
||||
)
|
||||
.await?
|
||||
.api;
|
||||
|
||||
let node_b = start_node_with_timeout(
|
||||
&cluster,
|
||||
"b",
|
||||
StartNodeOptions::default(),
|
||||
START_NODE_TIMEOUT,
|
||||
)
|
||||
.await?
|
||||
.api;
|
||||
|
||||
wait_for_min_height(
|
||||
&[node_a.clone(), node_b.clone()],
|
||||
MIN_HEIGHT,
|
||||
INITIAL_READY_TIMEOUT,
|
||||
POLL_INTERVAL,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let behind_node = start_node_with_timeout(
|
||||
&cluster,
|
||||
"c",
|
||||
StartNodeOptions::default(),
|
||||
START_NODE_TIMEOUT,
|
||||
)
|
||||
.await?
|
||||
.api;
|
||||
|
||||
timeout(CATCH_UP_TIMEOUT, async {
|
||||
loop {
|
||||
let node_a_info = node_a
|
||||
.consensus_info()
|
||||
.await
|
||||
.map_err(|err| anyhow!("node-a consensus_info failed: {err}"))?;
|
||||
|
||||
let node_b_info = node_b
|
||||
.consensus_info()
|
||||
.await
|
||||
.map_err(|err| anyhow!("node-b consensus_info failed: {err}"))?;
|
||||
|
||||
let behind_info = behind_node
|
||||
.consensus_info()
|
||||
.await
|
||||
.map_err(|err| anyhow!("node-c consensus_info failed: {err}"))?;
|
||||
|
||||
let initial_min_height = node_a_info.height.min(node_b_info.height);
|
||||
|
||||
if behind_info.height >= initial_min_height.saturating_sub(1) {
|
||||
return Ok::<(), anyhow::Error>(());
|
||||
}
|
||||
|
||||
sleep(POLL_INTERVAL).await;
|
||||
}
|
||||
})
|
||||
.await
|
||||
.map_err(|_| anyhow!("timeout waiting for behind node to catch up"))??;
|
||||
|
||||
Ok::<(), anyhow::Error>(())
|
||||
})
|
||||
.await
|
||||
.map_err(|_| anyhow!("test timeout exceeded"))??;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -1,33 +0,0 @@
|
||||
# Logos Testing Framework Extension
|
||||
|
||||
This directory contains the **Logos-specific extension layer** that plugs into the generic
|
||||
`testing-framework` core. The goal is to keep all Nomos logic in one place with a clear
|
||||
structure so it can be reviewed and moved into the `logos-blockchain-node` repo cleanly.
|
||||
|
||||
## Layout
|
||||
|
||||
- `runtime/env`
|
||||
Logos implementation of the core `Application` trait and runtime wiring.
|
||||
|
||||
- `runtime/ext`
|
||||
Logos extension glue for compose/k8s/cfgsync integration and scenario helpers.
|
||||
|
||||
- `runtime/workloads`
|
||||
Logos workloads and expectations (e.g., transaction workload, consensus liveness).
|
||||
|
||||
- `runtime/cfgsync`
|
||||
Logos cfgsync server/client and config bundling.
|
||||
|
||||
- `infra/assets/stack`
|
||||
Docker stack assets, scripts, and monitoring bundles.
|
||||
|
||||
- `infra/helm/logos-runner`
|
||||
Helm chart used by the k8s deployer.
|
||||
|
||||
## Extension Boundary
|
||||
|
||||
The **core** (`testing-framework/*`) remains Logos-agnostic. All app assumptions should
|
||||
live under `logos/runtime/*` and expose only the minimal surface needed by the core.
|
||||
|
||||
If you need to introduce new core capabilities, add them to the core and keep the Logos
|
||||
implementation in `logos/runtime/*`.
|
||||
@ -1,83 +0,0 @@
|
||||
use std::{process, time::Duration};
|
||||
|
||||
use anyhow::{Context as _, Result};
|
||||
use lb_ext::{
|
||||
CoreBuilderExt as _, LbcComposeDeployer as ComposeDeployer, LbcExtEnv, ScenarioBuilder,
|
||||
ScenarioBuilderExt as _, configs::network::Libp2pNetworkLayout,
|
||||
};
|
||||
use runner_examples::{DeployerKind, demo, read_env_any, read_topology_seed_or_default};
|
||||
use testing_framework_core::scenario::{Deployer as _, Runner};
|
||||
use testing_framework_runner_compose::ComposeRunnerError;
|
||||
use tracing::{info, warn};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
runner_examples::defaults::init_node_log_dir_defaults(DeployerKind::Compose);
|
||||
|
||||
tracing_subscriber::fmt::init();
|
||||
|
||||
let nodes = read_env_any(&["LOGOS_BLOCKCHAIN_DEMO_NODES"], demo::DEFAULT_NODES);
|
||||
|
||||
let run_secs = read_env_any(&["LOGOS_BLOCKCHAIN_DEMO_RUN_SECS"], demo::DEFAULT_RUN_SECS);
|
||||
|
||||
info!(nodes, run_secs, "starting compose runner demo");
|
||||
|
||||
if let Err(err) = run_compose_case(nodes, Duration::from_secs(run_secs)).await {
|
||||
warn!("compose runner demo failed: {err:#}");
|
||||
process::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
async fn run_compose_case(nodes: usize, run_duration: Duration) -> Result<()> {
|
||||
info!(
|
||||
nodes,
|
||||
duration_secs = run_duration.as_secs(),
|
||||
"building scenario plan"
|
||||
);
|
||||
|
||||
let seed = read_topology_seed_or_default();
|
||||
|
||||
let scenario = ScenarioBuilder::deployment_with(|t| {
|
||||
t.with_network_layout(Libp2pNetworkLayout::Star)
|
||||
.with_node_count(nodes)
|
||||
})
|
||||
.enable_node_control()
|
||||
.with_run_duration(run_duration)
|
||||
.with_deployment_seed(seed)
|
||||
.initialize_wallet(
|
||||
demo::DEFAULT_TOTAL_WALLETS as u64 * 100,
|
||||
demo::DEFAULT_TOTAL_WALLETS,
|
||||
)
|
||||
.transactions_with(|txs| {
|
||||
txs.rate(demo::DEFAULT_MIXED_TXS_PER_BLOCK)
|
||||
.users(demo::DEFAULT_TRANSACTION_WALLETS)
|
||||
})
|
||||
.expect_consensus_liveness();
|
||||
|
||||
let mut plan = scenario.build()?;
|
||||
|
||||
let deployer = ComposeDeployer::new();
|
||||
info!("deploying compose stack");
|
||||
|
||||
let runner: Runner<LbcExtEnv> = match deployer.deploy(&plan).await {
|
||||
Ok(runner) => runner,
|
||||
Err(ComposeRunnerError::DockerUnavailable) => {
|
||||
warn!("Docker is unavailable; cannot run compose demo");
|
||||
return Ok(());
|
||||
}
|
||||
Err(err) => return Err(anyhow::Error::new(err)).context("deploying compose stack failed"),
|
||||
};
|
||||
|
||||
if !runner.context().telemetry().is_configured() {
|
||||
warn!(
|
||||
"metrics querying is disabled; set LOGOS_BLOCKCHAIN_METRICS_QUERY_URL to enable PromQL queries"
|
||||
);
|
||||
}
|
||||
|
||||
info!("running scenario");
|
||||
runner
|
||||
.run(&mut plan)
|
||||
.await
|
||||
.context("running compose scenario failed")?;
|
||||
Ok(())
|
||||
}
|
||||
@ -1,6 +0,0 @@
|
||||
pub const DEFAULT_NODES: usize = 2;
|
||||
pub const DEFAULT_RUN_SECS: u64 = 60;
|
||||
|
||||
pub const DEFAULT_TOTAL_WALLETS: usize = 200;
|
||||
pub const DEFAULT_TRANSACTION_WALLETS: usize = 20;
|
||||
pub const DEFAULT_MIXED_TXS_PER_BLOCK: u64 = 3;
|
||||
@ -1,41 +0,0 @@
|
||||
use std::{
|
||||
env,
|
||||
str::{self, FromStr},
|
||||
};
|
||||
|
||||
use testing_framework_core::topology::DeploymentSeed;
|
||||
|
||||
const DEFAULT_TOPOLOGY_SEED: [u8; 32] = {
|
||||
let mut bytes = [0u8; 32];
|
||||
bytes[31] = 1;
|
||||
bytes
|
||||
};
|
||||
|
||||
pub fn read_env_any<T>(keys: &[&str], default: T) -> T
|
||||
where
|
||||
T: FromStr + Copy,
|
||||
{
|
||||
keys.iter()
|
||||
.find_map(|key| env::var(key).ok().and_then(|raw| raw.parse::<T>().ok()))
|
||||
.unwrap_or(default)
|
||||
}
|
||||
|
||||
pub fn read_topology_seed() -> Option<DeploymentSeed> {
|
||||
let raw = env::var("LOGOS_BLOCKCHAIN_TOPOLOGY_SEED").ok()?;
|
||||
let raw = raw.strip_prefix("0x").unwrap_or(&raw);
|
||||
if raw.len() != 64 {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut bytes = [0u8; 32];
|
||||
for (idx, chunk) in raw.as_bytes().chunks(2).enumerate() {
|
||||
let chunk = str::from_utf8(chunk).ok()?;
|
||||
bytes[idx] = u8::from_str_radix(chunk, 16).ok()?;
|
||||
}
|
||||
|
||||
Some(DeploymentSeed::new(bytes))
|
||||
}
|
||||
|
||||
pub fn read_topology_seed_or_default() -> DeploymentSeed {
|
||||
read_topology_seed().unwrap_or_else(|| DeploymentSeed::new(DEFAULT_TOPOLOGY_SEED))
|
||||
}
|
||||
@ -1,14 +0,0 @@
|
||||
pub mod defaults;
|
||||
pub mod demo;
|
||||
pub mod env;
|
||||
|
||||
pub use env::{read_env_any, read_topology_seed, read_topology_seed_or_default};
|
||||
pub use lb_framework::ScenarioBuilderExt as NodeScenarioBuilderExt;
|
||||
pub use lb_workloads::{ChaosBuilderExt, ScenarioBuilderExt};
|
||||
|
||||
#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
|
||||
pub enum DeployerKind {
|
||||
#[default]
|
||||
Local,
|
||||
Compose,
|
||||
}
|
||||
@ -1,120 +0,0 @@
|
||||
use std::{env, time::Duration};
|
||||
|
||||
use anyhow::Result;
|
||||
use lb_ext::{
|
||||
CoreBuilderExt as _, LbcComposeDeployer, LbcExtEnv, LbcK8sDeployer,
|
||||
ScenarioBuilder as ExtScenarioBuilder, ScenarioBuilderExt as _,
|
||||
};
|
||||
use lb_framework::{
|
||||
CoreBuilderExt as _, LbcEnv, LbcLocalDeployer, ScenarioBuilder as LocalScenarioBuilder,
|
||||
ScenarioBuilderExt as _, configs::network::NetworkLayout,
|
||||
};
|
||||
use testing_framework_core::{
|
||||
scenario::{Deployer as _, Runner},
|
||||
topology::DeploymentDescriptor,
|
||||
};
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
struct ScenarioSpec {
|
||||
nodes: usize,
|
||||
run_secs: u64,
|
||||
tx_rate: u64,
|
||||
tx_users: usize,
|
||||
total_wallets: usize,
|
||||
}
|
||||
|
||||
fn shared_spec() -> ScenarioSpec {
|
||||
ScenarioSpec {
|
||||
nodes: 2,
|
||||
run_secs: 30,
|
||||
tx_rate: 5,
|
||||
tx_users: 500,
|
||||
total_wallets: 1000,
|
||||
}
|
||||
}
|
||||
|
||||
fn build_local_scenario(
|
||||
spec: ScenarioSpec,
|
||||
) -> Result<testing_framework_core::scenario::Scenario<LbcEnv>> {
|
||||
LocalScenarioBuilder::deployment_with(|d| {
|
||||
d.with_network_layout(NetworkLayout::Star)
|
||||
.with_node_count(spec.nodes)
|
||||
})
|
||||
.with_run_duration(Duration::from_secs(spec.run_secs))
|
||||
.initialize_wallet(spec.total_wallets as u64 * 100, spec.total_wallets)
|
||||
.transactions_with(|txs| txs.rate(spec.tx_rate).users(spec.tx_users))
|
||||
.expect_consensus_liveness()
|
||||
.build()
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
fn build_ext_scenario(
|
||||
spec: ScenarioSpec,
|
||||
) -> Result<testing_framework_core::scenario::Scenario<LbcExtEnv>> {
|
||||
ExtScenarioBuilder::deployment_with(|d| {
|
||||
d.with_network_layout(NetworkLayout::Star)
|
||||
.with_node_count(spec.nodes)
|
||||
})
|
||||
.with_run_duration(Duration::from_secs(spec.run_secs))
|
||||
.initialize_wallet(spec.total_wallets as u64 * 100, spec.total_wallets)
|
||||
.transactions_with(|txs| txs.rate(spec.tx_rate).users(spec.tx_users))
|
||||
.expect_consensus_liveness()
|
||||
.build()
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parity_builds_have_same_shape() -> Result<()> {
|
||||
let spec = shared_spec();
|
||||
let local = build_local_scenario(spec)?;
|
||||
let ext = build_ext_scenario(spec)?;
|
||||
|
||||
assert_eq!(
|
||||
local.deployment().node_count(),
|
||||
ext.deployment().node_count()
|
||||
);
|
||||
assert_eq!(local.duration(), ext.duration());
|
||||
assert_eq!(local.workloads().len(), ext.workloads().len());
|
||||
assert_eq!(local.expectations().len(), ext.expectations().len());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn local_parity_smoke_opt_in() -> Result<()> {
|
||||
if env::var("TF_RUN_LOCAL_PARITY").is_err() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut scenario = build_local_scenario(shared_spec())?;
|
||||
let deployer = LbcLocalDeployer::default();
|
||||
let runner: Runner<LbcEnv> = deployer.deploy(&scenario).await?;
|
||||
runner.run(&mut scenario).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn compose_parity_smoke_opt_in() -> Result<()> {
|
||||
if env::var("TF_RUN_COMPOSE_PARITY").is_err() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut scenario = build_ext_scenario(shared_spec())?;
|
||||
let deployer = LbcComposeDeployer::default();
|
||||
let runner: Runner<LbcExtEnv> = deployer.deploy(&scenario).await?;
|
||||
runner.run(&mut scenario).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn k8s_parity_smoke_opt_in() -> Result<()> {
|
||||
if env::var("TF_RUN_K8S_PARITY").is_err() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut scenario = build_ext_scenario(shared_spec())?;
|
||||
let deployer = LbcK8sDeployer::default();
|
||||
let runner: Runner<LbcExtEnv> = deployer.deploy(&scenario).await?;
|
||||
runner.run(&mut scenario).await?;
|
||||
Ok(())
|
||||
}
|
||||
@ -1,65 +0,0 @@
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::{Result, anyhow};
|
||||
use async_trait::async_trait;
|
||||
use lb_framework::{CoreBuilderExt as _, LbcEnv, LbcLocalDeployer, ScenarioBuilder};
|
||||
use testing_framework_core::scenario::{
|
||||
Deployer, DynError, Expectation, RunContext, ScenarioError,
|
||||
};
|
||||
use tracing_subscriber::fmt::try_init;
|
||||
|
||||
const FAIL_AFTER_CHECKS: usize = 2;
|
||||
|
||||
struct FailFastDuringCaptureExpectation {
|
||||
checks: usize,
|
||||
}
|
||||
|
||||
impl Default for FailFastDuringCaptureExpectation {
|
||||
fn default() -> Self {
|
||||
Self { checks: 0 }
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Expectation<LbcEnv> for FailFastDuringCaptureExpectation {
|
||||
fn name(&self) -> &str {
|
||||
"fail_fast_during_capture"
|
||||
}
|
||||
|
||||
async fn check_during_capture(&mut self, _ctx: &RunContext<LbcEnv>) -> Result<(), DynError> {
|
||||
self.checks += 1;
|
||||
if self.checks >= FAIL_AFTER_CHECKS {
|
||||
return Err(format!(
|
||||
"intentional fail-fast trigger after {} capture checks",
|
||||
self.checks
|
||||
)
|
||||
.into());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn evaluate(&mut self, _ctx: &RunContext<LbcEnv>) -> Result<(), DynError> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore = "requires local node binary and open ports"]
|
||||
async fn expectation_can_fail_fast_during_capture() -> Result<()> {
|
||||
let _ = try_init();
|
||||
|
||||
let mut scenario = ScenarioBuilder::deployment_with(|topology| topology.with_node_count(1))
|
||||
.with_run_duration(Duration::from_secs(30))
|
||||
.with_expectation(FailFastDuringCaptureExpectation::default())
|
||||
.build()?;
|
||||
|
||||
let deployer = LbcLocalDeployer::default();
|
||||
let runner = deployer.deploy(&scenario).await?;
|
||||
|
||||
match runner.run(&mut scenario).await {
|
||||
Err(ScenarioError::ExpectationFailedDuringCapture(_)) => Ok(()),
|
||||
Err(other) => Err(anyhow!("unexpected scenario error: {other}")),
|
||||
Ok(_) => Err(anyhow!("expected fail-fast capture error, run succeeded")),
|
||||
}
|
||||
}
|
||||
@ -1,284 +0,0 @@
|
||||
use std::{collections::HashSet, time::Duration};
|
||||
|
||||
use anyhow::Result;
|
||||
use lb_ext::{LbcExtEnv, ScenarioBuilder};
|
||||
use lb_framework::{
|
||||
DeploymentBuilder, LbcEnv, LbcLocalDeployer, LbcManualCluster, NodeHttpClient, TopologyConfig,
|
||||
configs::build_node_run_config,
|
||||
};
|
||||
use testing_framework_core::scenario::{
|
||||
Deployer as _, ExternalNodeSource, PeerSelection, StartNodeOptions,
|
||||
};
|
||||
use testing_framework_runner_local::ProcessDeployer;
|
||||
use tokio::time::sleep;
|
||||
|
||||
struct SeedCluster {
|
||||
_cluster: LbcManualCluster,
|
||||
node_a: NodeHttpClient,
|
||||
node_b: NodeHttpClient,
|
||||
bootstrap_peer_addresses: Vec<String>,
|
||||
}
|
||||
|
||||
impl SeedCluster {
|
||||
fn external_sources(&self) -> [ExternalNodeSource; 2] {
|
||||
[
|
||||
ExternalNodeSource::new("external-a".to_owned(), self.node_a.base_url().to_string()),
|
||||
ExternalNodeSource::new("external-b".to_owned(), self.node_b.base_url().to_string()),
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore = "run manually with `cargo test -p runner-examples --test external_sources_local -- --ignored`"]
|
||||
async fn managed_local_plus_external_sources_are_orchestrated() -> Result<()> {
|
||||
let seed_cluster = start_seed_cluster().await?;
|
||||
let second_cluster_bootstrap_peers =
|
||||
parse_peer_addresses(&seed_cluster.bootstrap_peer_addresses)?;
|
||||
|
||||
let second_topology = DeploymentBuilder::new(TopologyConfig::with_node_numbers(2)).build()?;
|
||||
let second_cluster = LbcLocalDeployer::new().manual_cluster_from_descriptors(second_topology);
|
||||
let second_c = second_cluster
|
||||
.start_node_with(
|
||||
"c",
|
||||
StartNodeOptions::<LbcEnv>::default()
|
||||
.with_peers(PeerSelection::None)
|
||||
.create_patch({
|
||||
let peers = second_cluster_bootstrap_peers.clone();
|
||||
move |mut run_config| {
|
||||
run_config
|
||||
.user
|
||||
.network
|
||||
.backend
|
||||
.initial_peers
|
||||
.extend(peers.clone());
|
||||
Ok(run_config)
|
||||
}
|
||||
}),
|
||||
)
|
||||
.await?
|
||||
.client;
|
||||
|
||||
let second_d = second_cluster
|
||||
.start_node_with(
|
||||
"d",
|
||||
StartNodeOptions::<LbcEnv>::default()
|
||||
.with_peers(PeerSelection::Named(vec!["node-c".to_owned()]))
|
||||
.create_patch({
|
||||
let peers = second_cluster_bootstrap_peers.clone();
|
||||
move |mut run_config| {
|
||||
run_config
|
||||
.user
|
||||
.network
|
||||
.backend
|
||||
.initial_peers
|
||||
.extend(peers.clone());
|
||||
Ok(run_config)
|
||||
}
|
||||
}),
|
||||
)
|
||||
.await?
|
||||
.client;
|
||||
|
||||
second_cluster.wait_network_ready().await?;
|
||||
|
||||
wait_until_has_peers(&second_c, Duration::from_secs(30)).await?;
|
||||
wait_until_has_peers(&second_d, Duration::from_secs(30)).await?;
|
||||
|
||||
second_cluster.add_external_clients([seed_cluster.node_a.clone(), seed_cluster.node_b.clone()]);
|
||||
let orchestrated = second_cluster.node_clients();
|
||||
|
||||
assert_eq!(
|
||||
orchestrated.len(),
|
||||
4,
|
||||
"expected 2 managed + 2 external clients"
|
||||
);
|
||||
|
||||
let expected_endpoints: HashSet<String> = [
|
||||
seed_cluster.node_a.base_url().to_string(),
|
||||
seed_cluster.node_b.base_url().to_string(),
|
||||
second_c.base_url().to_string(),
|
||||
second_d.base_url().to_string(),
|
||||
]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let actual_endpoints: HashSet<String> = orchestrated
|
||||
.snapshot()
|
||||
.into_iter()
|
||||
.map(|client| client.base_url().to_string())
|
||||
.collect();
|
||||
|
||||
assert_eq!(actual_endpoints, expected_endpoints);
|
||||
|
||||
for client in orchestrated.snapshot() {
|
||||
let _ = client.consensus_info().await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore = "run manually with `cargo test -p runner-examples --test external_sources_local -- --ignored`"]
|
||||
async fn scenario_managed_plus_external_sources_are_orchestrated() -> Result<()> {
|
||||
let seed_cluster = start_seed_cluster().await?;
|
||||
|
||||
let base_builder = DeploymentBuilder::new(TopologyConfig::with_node_numbers(2));
|
||||
let base_descriptors = base_builder.clone().build()?;
|
||||
let mut deployment_builder = base_builder;
|
||||
let parsed_peers = parse_peer_addresses(&seed_cluster.bootstrap_peer_addresses)?;
|
||||
|
||||
for node in base_descriptors.nodes() {
|
||||
let mut run_config = build_node_run_config(
|
||||
&base_descriptors,
|
||||
node,
|
||||
base_descriptors.config().node_config_override(node.index()),
|
||||
)
|
||||
.map_err(|error| anyhow::anyhow!(error.to_string()))?;
|
||||
run_config
|
||||
.user
|
||||
.network
|
||||
.backend
|
||||
.initial_peers
|
||||
.extend(parsed_peers.clone());
|
||||
deployment_builder = deployment_builder.with_node_config_override(node.index(), run_config);
|
||||
}
|
||||
|
||||
let mut scenario = ScenarioBuilder::new(Box::new(deployment_builder))
|
||||
.with_run_duration(Duration::from_secs(5))
|
||||
.with_external_node(seed_cluster.external_sources()[0].clone())
|
||||
.with_external_node(seed_cluster.external_sources()[1].clone())
|
||||
.build()?;
|
||||
|
||||
let deployer = ProcessDeployer::<LbcExtEnv>::default();
|
||||
let runner = deployer.deploy(&scenario).await?;
|
||||
let run_handle = runner.run(&mut scenario).await?;
|
||||
|
||||
let clients = run_handle.context().node_clients().snapshot();
|
||||
|
||||
assert_eq!(clients.len(), 4, "expected 2 managed + 2 external clients");
|
||||
|
||||
let first_a_endpoint = seed_cluster.node_a.base_url().to_string();
|
||||
let first_b_endpoint = seed_cluster.node_b.base_url().to_string();
|
||||
|
||||
for client in clients.iter().filter(|client| {
|
||||
let endpoint = client.base_url().to_string();
|
||||
endpoint != first_a_endpoint && endpoint != first_b_endpoint
|
||||
}) {
|
||||
wait_until_has_peers(client, Duration::from_secs(30)).await?;
|
||||
}
|
||||
|
||||
let expected_endpoints: HashSet<String> = [
|
||||
seed_cluster.node_a.base_url().to_string(),
|
||||
seed_cluster.node_b.base_url().to_string(),
|
||||
]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let actual_endpoints: HashSet<String> = clients
|
||||
.iter()
|
||||
.map(|client| client.base_url().to_string())
|
||||
.collect();
|
||||
|
||||
assert!(
|
||||
expected_endpoints.is_subset(&actual_endpoints),
|
||||
"scenario context should include external endpoints"
|
||||
);
|
||||
|
||||
for client in clients {
|
||||
let _ = client.consensus_info().await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn start_seed_cluster() -> Result<SeedCluster> {
|
||||
let topology = DeploymentBuilder::new(TopologyConfig::with_node_numbers(2)).build()?;
|
||||
let cluster = LbcLocalDeployer::new().manual_cluster_from_descriptors(topology);
|
||||
let node_a = cluster
|
||||
.start_node_with("a", node_start_options(PeerSelection::None))
|
||||
.await?
|
||||
.client;
|
||||
let node_b = cluster
|
||||
.start_node_with(
|
||||
"b",
|
||||
node_start_options(PeerSelection::Named(vec!["node-a".to_owned()])),
|
||||
)
|
||||
.await?
|
||||
.client;
|
||||
cluster.wait_network_ready().await?;
|
||||
let bootstrap_peer_addresses = collect_loopback_peer_addresses(&node_a, &node_b).await?;
|
||||
|
||||
Ok(SeedCluster {
|
||||
_cluster: cluster,
|
||||
node_a,
|
||||
node_b,
|
||||
bootstrap_peer_addresses,
|
||||
})
|
||||
}
|
||||
|
||||
fn node_start_options(peers: PeerSelection) -> StartNodeOptions<LbcEnv> {
|
||||
let mut options = StartNodeOptions::<LbcEnv>::default();
|
||||
options.peers = peers;
|
||||
options
|
||||
}
|
||||
|
||||
async fn collect_loopback_peer_addresses(
|
||||
node_a: &lb_framework::NodeHttpClient,
|
||||
node_b: &lb_framework::NodeHttpClient,
|
||||
) -> Result<Vec<String>> {
|
||||
let mut peers = Vec::new();
|
||||
|
||||
for info in [node_a.network_info().await?, node_b.network_info().await?] {
|
||||
let addresses: Vec<String> = info
|
||||
.listen_addresses
|
||||
.into_iter()
|
||||
.map(|addr| addr.to_string())
|
||||
.collect();
|
||||
|
||||
let mut loopback: Vec<String> = addresses
|
||||
.iter()
|
||||
.filter(|addr| addr.contains("/127.0.0.1/"))
|
||||
.cloned()
|
||||
.collect();
|
||||
|
||||
if loopback.is_empty() {
|
||||
loopback = addresses;
|
||||
}
|
||||
|
||||
peers.extend(loopback);
|
||||
}
|
||||
|
||||
Ok(peers)
|
||||
}
|
||||
|
||||
fn parse_peer_addresses<T>(addresses: &[String]) -> Result<Vec<T>>
|
||||
where
|
||||
T: std::str::FromStr,
|
||||
T::Err: std::error::Error + Send + Sync + 'static,
|
||||
{
|
||||
addresses
|
||||
.iter()
|
||||
.map(|address| address.parse::<T>().map_err(Into::into))
|
||||
.collect()
|
||||
}
|
||||
|
||||
async fn wait_until_has_peers(client: &NodeHttpClient, timeout: Duration) -> Result<()> {
|
||||
let start = tokio::time::Instant::now();
|
||||
loop {
|
||||
if let Ok(network_info) = client.network_info().await {
|
||||
if network_info.n_peers > 0 {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
if start.elapsed() >= timeout {
|
||||
anyhow::bail!(
|
||||
"node {} did not report non-zero peer count within {:?}",
|
||||
client.base_url(),
|
||||
timeout
|
||||
);
|
||||
}
|
||||
|
||||
sleep(Duration::from_millis(500)).await;
|
||||
}
|
||||
}
|
||||
@ -1,90 +0,0 @@
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::{Result, anyhow};
|
||||
use lb_framework::{
|
||||
CoreBuilderExt as _, DeploymentBuilder, LbcLocalDeployer, ScenarioBuilder, TopologyConfig,
|
||||
};
|
||||
use testing_framework_core::scenario::Deployer;
|
||||
use tracing_subscriber::fmt::try_init;
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore = "requires local node binary and open ports"]
|
||||
async fn local_restart_node() -> Result<()> {
|
||||
let _ = try_init();
|
||||
let mut scenario = ScenarioBuilder::deployment_with(|t| t.with_node_count(1))
|
||||
.enable_node_control()
|
||||
.with_run_duration(Duration::from_secs(1))
|
||||
.build()?;
|
||||
|
||||
let deployer = LbcLocalDeployer::default();
|
||||
let runner = deployer.deploy(&scenario).await?;
|
||||
let context = runner.context();
|
||||
|
||||
let control = context
|
||||
.node_control()
|
||||
.ok_or_else(|| anyhow!("node control not available"))?;
|
||||
|
||||
let node_name = "node-0";
|
||||
let old_pid = control
|
||||
.node_pid(node_name)
|
||||
.ok_or_else(|| anyhow!("missing node pid"))?;
|
||||
|
||||
control
|
||||
.restart_node(node_name)
|
||||
.await
|
||||
.map_err(|error| anyhow!("failed to restart {node_name}: {error}"))?;
|
||||
|
||||
let new_pid = control
|
||||
.node_pid(node_name)
|
||||
.ok_or_else(|| anyhow!("missing node pid"))?;
|
||||
assert_ne!(old_pid, new_pid, "expected a new process after restart");
|
||||
|
||||
control
|
||||
.stop_node(node_name)
|
||||
.await
|
||||
.map_err(|error| anyhow!("failed to stop {node_name}: {error}"))?;
|
||||
assert!(
|
||||
control.node_pid(node_name).is_none(),
|
||||
"expected node pid to be absent after stop"
|
||||
);
|
||||
|
||||
let _handle = runner.run(&mut scenario).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore = "requires local node binary and open ports"]
|
||||
async fn manual_cluster_restart_node() -> Result<()> {
|
||||
let _ = try_init();
|
||||
let deployer = LbcLocalDeployer::default();
|
||||
let descriptors = DeploymentBuilder::new(TopologyConfig::with_node_numbers(1)).build()?;
|
||||
let cluster = deployer.manual_cluster_from_descriptors(descriptors);
|
||||
|
||||
let node_name = cluster.start_node("a").await?.name;
|
||||
|
||||
let old_pid = cluster
|
||||
.node_pid(&node_name)
|
||||
.ok_or_else(|| anyhow!("missing node pid"))?;
|
||||
|
||||
cluster
|
||||
.restart_node(&node_name)
|
||||
.await
|
||||
.map_err(|error| anyhow!("failed to restart {node_name}: {error}"))?;
|
||||
|
||||
let new_pid = cluster
|
||||
.node_pid(&node_name)
|
||||
.ok_or_else(|| anyhow!("missing node pid"))?;
|
||||
assert_ne!(old_pid, new_pid, "expected a new process after restart");
|
||||
|
||||
cluster
|
||||
.stop_node(&node_name)
|
||||
.await
|
||||
.map_err(|error| anyhow!("failed to stop {node_name}: {error}"))?;
|
||||
assert!(
|
||||
cluster.node_pid(&node_name).is_none(),
|
||||
"expected node pid to be absent after stop"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -1,78 +0,0 @@
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::{Result, anyhow};
|
||||
use lb_framework::{DeploymentBuilder, LbcEnv, LbcLocalDeployer, NodeHttpClient, TopologyConfig};
|
||||
use testing_framework_core::scenario::{PeerSelection, StartNodeOptions};
|
||||
use tokio::time::sleep;
|
||||
use tracing_subscriber::fmt::try_init;
|
||||
|
||||
const MAX_HEIGHT_DIFF: u64 = 5;
|
||||
const CONVERGENCE_TIMEOUT: Duration = Duration::from_secs(60);
|
||||
const CONVERGENCE_POLL: Duration = Duration::from_secs(2);
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore = "run manually with `cargo test -p runner-examples -- --ignored manual_cluster_two_clusters_merge`"]
|
||||
async fn manual_cluster_two_clusters_merge() -> Result<()> {
|
||||
let _ = try_init();
|
||||
// Required env vars (set on the command line when running this test):
|
||||
// - `POL_PROOF_DEV_MODE=true`
|
||||
// - `RUST_LOG=info` (optional)
|
||||
let config = TopologyConfig::with_node_numbers(2);
|
||||
let deployer = LbcLocalDeployer::new();
|
||||
let descriptors = DeploymentBuilder::new(config).build()?;
|
||||
let cluster = deployer.manual_cluster_from_descriptors(descriptors);
|
||||
// Nodes are stopped automatically when the cluster is dropped.
|
||||
|
||||
println!("starting node a");
|
||||
|
||||
let node_a = cluster
|
||||
.start_node_with("a", node_start_options(PeerSelection::None))
|
||||
.await?
|
||||
.client;
|
||||
|
||||
println!("waiting briefly before starting c");
|
||||
sleep(Duration::from_secs(30)).await;
|
||||
|
||||
println!("starting node c -> a");
|
||||
let node_c = cluster
|
||||
.start_node_with(
|
||||
"c",
|
||||
node_start_options(PeerSelection::Named(vec!["node-a".to_owned()])),
|
||||
)
|
||||
.await?
|
||||
.client;
|
||||
|
||||
println!("waiting for network readiness: cluster a,c");
|
||||
cluster.wait_network_ready().await?;
|
||||
|
||||
wait_for_convergence(&node_a, &node_c).await
|
||||
}
|
||||
|
||||
async fn wait_for_convergence(node_a: &NodeHttpClient, node_c: &NodeHttpClient) -> Result<()> {
|
||||
let start = tokio::time::Instant::now();
|
||||
|
||||
loop {
|
||||
let a_height = node_a.consensus_info().await?.height;
|
||||
let c_height = node_c.consensus_info().await?.height;
|
||||
let diff = a_height.abs_diff(c_height);
|
||||
|
||||
if diff <= MAX_HEIGHT_DIFF {
|
||||
println!("final heights: node-a={a_height}, node-c={c_height}, diff={diff}");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if start.elapsed() >= CONVERGENCE_TIMEOUT {
|
||||
return Err(anyhow!(
|
||||
"height diff too large after timeout: {diff} > {MAX_HEIGHT_DIFF} (node-a={a_height}, node-c={c_height})"
|
||||
));
|
||||
}
|
||||
|
||||
sleep(CONVERGENCE_POLL).await;
|
||||
}
|
||||
}
|
||||
|
||||
fn node_start_options(peers: PeerSelection) -> StartNodeOptions<LbcEnv> {
|
||||
let mut options = StartNodeOptions::<LbcEnv>::default();
|
||||
options.peers = peers;
|
||||
options
|
||||
}
|
||||
@ -1,128 +0,0 @@
|
||||
use std::{
|
||||
net::{SocketAddr, TcpListener},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use anyhow::Result;
|
||||
use lb_framework::{
|
||||
DeploymentBuilder, LbcEnv, LbcLocalDeployer, NodeHttpClient, ScenarioBuilder, TopologyConfig,
|
||||
configs::build_node_run_config,
|
||||
};
|
||||
use testing_framework_core::scenario::{Deployer, PeerSelection, StartNodeOptions};
|
||||
use tracing_subscriber::fmt::try_init;
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore = "run manually with `cargo test -p runner-examples -- --ignored manual_cluster_api_port_override`"]
|
||||
async fn manual_cluster_api_port_override() -> Result<()> {
|
||||
let _ = try_init();
|
||||
// Required env vars (set on the command line when running this test):
|
||||
// - `POL_PROOF_DEV_MODE=true`
|
||||
// - `LOGOS_BLOCKCHAIN_NODE_BIN=...`
|
||||
// - `LOGOS_BLOCKCHAIN_CIRCUITS=...`
|
||||
// - `RUST_LOG=info` (optional)
|
||||
|
||||
let api_port = random_api_port();
|
||||
|
||||
let deployer = LbcLocalDeployer::new();
|
||||
let descriptors = DeploymentBuilder::new(TopologyConfig::with_node_numbers(1)).build()?;
|
||||
let cluster = deployer.manual_cluster_from_descriptors(descriptors.clone());
|
||||
|
||||
let node = cluster
|
||||
.start_node_with(
|
||||
"override-api",
|
||||
StartNodeOptions::<LbcEnv>::default()
|
||||
.with_peers(PeerSelection::None)
|
||||
.create_patch(move |mut run_config| {
|
||||
println!("overriding API port to {api_port}");
|
||||
let current_addr = run_config.user.api.backend.listen_address;
|
||||
run_config.user.api.backend.listen_address =
|
||||
SocketAddr::new(current_addr.ip(), api_port);
|
||||
Ok(run_config)
|
||||
}),
|
||||
)
|
||||
.await?
|
||||
.client;
|
||||
|
||||
cluster.wait_network_ready().await?;
|
||||
|
||||
wait_until_consensus_ready(&node).await?;
|
||||
|
||||
assert_eq!(resolved_port(&node), api_port);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore = "run manually with `cargo test -p runner-examples -- --ignored scenario_builder_api_port_override`"]
|
||||
async fn scenario_builder_api_port_override() -> Result<()> {
|
||||
let _ = try_init();
|
||||
// Required env vars (set on the command line when running this test):
|
||||
// - `POL_PROOF_DEV_MODE=true`
|
||||
// - `LOGOS_BLOCKCHAIN_NODE_BIN=...`
|
||||
// - `LOGOS_BLOCKCHAIN_CIRCUITS=...`
|
||||
// - `RUST_LOG=info` (optional)
|
||||
let api_port = random_api_port();
|
||||
|
||||
let base_builder = DeploymentBuilder::new(TopologyConfig::with_node_numbers(1));
|
||||
let base_descriptors = base_builder.clone().build()?;
|
||||
let base_node = base_descriptors.nodes().first().expect("node 0 descriptor");
|
||||
let mut run_config = build_node_run_config(
|
||||
&base_descriptors,
|
||||
base_node,
|
||||
base_descriptors
|
||||
.config()
|
||||
.node_config_override(base_node.index()),
|
||||
)
|
||||
.expect("build run config");
|
||||
println!("overriding API port to {api_port}");
|
||||
let current_addr = run_config.user.api.backend.listen_address;
|
||||
run_config.user.api.backend.listen_address = SocketAddr::new(current_addr.ip(), api_port);
|
||||
|
||||
let mut scenario = ScenarioBuilder::new(Box::new(
|
||||
base_builder.with_node_config_override(0, run_config),
|
||||
))
|
||||
.with_run_duration(Duration::from_secs(1))
|
||||
.build()?;
|
||||
|
||||
let deployer = LbcLocalDeployer::default();
|
||||
let runner = deployer.deploy(&scenario).await?;
|
||||
let handle = runner.run(&mut scenario).await?;
|
||||
|
||||
let client = handle
|
||||
.context()
|
||||
.random_node_client()
|
||||
.ok_or_else(|| anyhow::anyhow!("scenario did not expose any node clients"))?;
|
||||
|
||||
client
|
||||
.consensus_info()
|
||||
.await
|
||||
.expect("consensus_info should succeed");
|
||||
|
||||
assert_eq!(resolved_port(&client), api_port);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn random_api_port() -> u16 {
|
||||
let listener = TcpListener::bind("127.0.0.1:0").expect("bind random API port");
|
||||
listener.local_addr().expect("read API port").port()
|
||||
}
|
||||
|
||||
fn resolved_port(client: &NodeHttpClient) -> u16 {
|
||||
client.base_url().port().unwrap_or_default()
|
||||
}
|
||||
|
||||
async fn wait_until_consensus_ready(client: &NodeHttpClient) -> Result<()> {
|
||||
const RETRIES: usize = 120;
|
||||
const DELAY_MS: u64 = 500;
|
||||
|
||||
for _ in 0..RETRIES {
|
||||
if client.consensus_info().await.is_ok() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
tokio::time::sleep(Duration::from_millis(DELAY_MS)).await;
|
||||
}
|
||||
|
||||
anyhow::bail!("consensus_info did not become ready in time")
|
||||
}
|
||||
@ -1,111 +0,0 @@
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::{Result, anyhow};
|
||||
use lb_framework::{
|
||||
DeploymentBuilder, LbcEnv, LbcLocalDeployer, NodeHttpClient, TopologyConfig,
|
||||
configs::network::NetworkLayout,
|
||||
};
|
||||
use lb_workloads::{start_node_with_timeout, wait_for_min_height};
|
||||
use testing_framework_core::scenario::StartNodeOptions;
|
||||
use tokio::time::{sleep, timeout};
|
||||
use tracing_subscriber::fmt::try_init;
|
||||
|
||||
const MIN_HEIGHT: u64 = 5;
|
||||
const INITIAL_READY_TIMEOUT: Duration = Duration::from_secs(500);
|
||||
const CATCH_UP_TIMEOUT: Duration = Duration::from_secs(300);
|
||||
const START_NODE_TIMEOUT: Duration = Duration::from_secs(90);
|
||||
const TEST_TIMEOUT: Duration = Duration::from_secs(600);
|
||||
const POLL_INTERVAL: Duration = Duration::from_secs(1);
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore = "run manually with `cargo test -p runner-examples -- --ignored orphan_manual_cluster`"]
|
||||
async fn orphan_manual_cluster() -> Result<()> {
|
||||
let _ = try_init();
|
||||
// Required env vars (set on the command line when running this test):
|
||||
// - `LOGOS_BLOCKCHAIN_NODE_BIN=...`
|
||||
// - `NOMOS_KZGRS_PARAMS_PATH=...` (path to KZG params directory/file)
|
||||
// - `RUST_LOG=info` (optional; better visibility)
|
||||
|
||||
let config = TopologyConfig::with_node_numbers(3);
|
||||
timeout(TEST_TIMEOUT, run_orphan_flow(config))
|
||||
.await
|
||||
.map_err(|_| anyhow!("test timeout exceeded"))??;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run_orphan_flow(config: TopologyConfig) -> Result<()> {
|
||||
let builder = DeploymentBuilder::new(config).with_network_layout(NetworkLayout::Full);
|
||||
let deployer = LbcLocalDeployer::new();
|
||||
let descriptors = builder.build()?;
|
||||
let cluster = deployer.manual_cluster_from_descriptors(descriptors);
|
||||
|
||||
let node_a = start_node_with_timeout(
|
||||
&cluster,
|
||||
"a",
|
||||
StartNodeOptions::<LbcEnv>::default(),
|
||||
START_NODE_TIMEOUT,
|
||||
)
|
||||
.await?
|
||||
.client;
|
||||
|
||||
let node_b = start_node_with_timeout(
|
||||
&cluster,
|
||||
"b",
|
||||
StartNodeOptions::<LbcEnv>::default(),
|
||||
START_NODE_TIMEOUT,
|
||||
)
|
||||
.await?
|
||||
.client;
|
||||
|
||||
wait_for_min_height(
|
||||
&[node_a.clone(), node_b.clone()],
|
||||
MIN_HEIGHT,
|
||||
INITIAL_READY_TIMEOUT,
|
||||
POLL_INTERVAL,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let behind_node = start_node_with_timeout(
|
||||
&cluster,
|
||||
"c",
|
||||
StartNodeOptions::<LbcEnv>::default(),
|
||||
START_NODE_TIMEOUT,
|
||||
)
|
||||
.await?
|
||||
.client;
|
||||
|
||||
wait_for_catch_up(&node_a, &node_b, &behind_node).await
|
||||
}
|
||||
|
||||
async fn wait_for_catch_up(
|
||||
node_a: &NodeHttpClient,
|
||||
node_b: &NodeHttpClient,
|
||||
behind_node: &NodeHttpClient,
|
||||
) -> Result<()> {
|
||||
timeout(CATCH_UP_TIMEOUT, async {
|
||||
loop {
|
||||
let node_a_height = node_height(node_a, "node-a").await?;
|
||||
let node_b_height = node_height(node_b, "node-b").await?;
|
||||
let behind_height = node_height(behind_node, "node-c").await?;
|
||||
|
||||
let initial_min_height = node_a_height.min(node_b_height);
|
||||
if behind_height >= initial_min_height.saturating_sub(1) {
|
||||
return Ok::<(), anyhow::Error>(());
|
||||
}
|
||||
|
||||
sleep(POLL_INTERVAL).await;
|
||||
}
|
||||
})
|
||||
.await
|
||||
.map_err(|_| anyhow!("timeout waiting for behind node to catch up"))?
|
||||
}
|
||||
|
||||
async fn node_height(node: &NodeHttpClient, name: &str) -> Result<u64> {
|
||||
let info = node
|
||||
.consensus_info()
|
||||
.await
|
||||
.map_err(|error| anyhow!("{name} consensus_info failed: {error}"))?;
|
||||
|
||||
Ok(info.height)
|
||||
}
|
||||
@ -1,106 +0,0 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
# check=skip=SecretsUsedInArgOrEnv
|
||||
# Ignore warnings about sensitive information as this is test data.
|
||||
|
||||
ARG VERSION
|
||||
ARG LOGOS_BLOCKCHAIN_NODE_REV
|
||||
ARG LOGOS_BLOCKCHAIN_NODE_USE_LOCAL_CONTEXT
|
||||
|
||||
# ===========================
|
||||
# BUILD IMAGE
|
||||
# ===========================
|
||||
|
||||
FROM rust:1.91.0-slim-bookworm AS builder
|
||||
|
||||
ARG VERSION
|
||||
ARG LOGOS_BLOCKCHAIN_NODE_REV
|
||||
ARG LOGOS_BLOCKCHAIN_FORCE_BUILD
|
||||
ARG LOGOS_BLOCKCHAIN_NODE_USE_LOCAL_CONTEXT
|
||||
|
||||
LABEL maintainer="augustinas@status.im" \
|
||||
source="https://github.com/logos-co/nomos-node" \
|
||||
description="Logos testnet build image"
|
||||
|
||||
WORKDIR /workspace
|
||||
COPY . .
|
||||
|
||||
# Reduce debug artifact size.
|
||||
ENV CARGO_PROFILE_DEV_DEBUG=0
|
||||
ENV LOGOS_BLOCKCHAIN_NODE_REV=${LOGOS_BLOCKCHAIN_NODE_REV}
|
||||
ENV LOGOS_BLOCKCHAIN_FORCE_BUILD=${LOGOS_BLOCKCHAIN_FORCE_BUILD}
|
||||
ENV LOGOS_BLOCKCHAIN_NODE_USE_LOCAL_CONTEXT=${LOGOS_BLOCKCHAIN_NODE_USE_LOCAL_CONTEXT}
|
||||
|
||||
# Install dependencies needed for building RocksDB and for circuit tooling.
|
||||
RUN apt-get update && apt-get install -yq \
|
||||
git gcc g++ clang make cmake m4 xz-utils libgmp-dev libssl-dev pkg-config ca-certificates curl wget file \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN chmod +x \
|
||||
/workspace/logos/infra/assets/stack/scripts/docker/prepare_binaries.sh \
|
||||
/workspace/logos/infra/assets/stack/scripts/docker/build_cfgsync.sh \
|
||||
/workspace/logos/infra/assets/stack/scripts/setup-logos-blockchain-circuits.sh \
|
||||
|| true
|
||||
|
||||
RUN /workspace/logos/infra/assets/stack/scripts/setup-logos-blockchain-circuits.sh "${VERSION}" /opt/circuits
|
||||
|
||||
ENV LOGOS_BLOCKCHAIN_CIRCUITS=/opt/circuits
|
||||
|
||||
RUN /workspace/scripts/build/build-rapidsnark.sh /opt/circuits
|
||||
|
||||
RUN --mount=type=bind,from=nomos_node,source=.,target=/nomos-node-local,ro \
|
||||
if [ "${LOGOS_BLOCKCHAIN_NODE_USE_LOCAL_CONTEXT}" = "1" ]; then \
|
||||
rm -rf /nomos-node && mkdir -p /nomos-node && cp -a /nomos-node-local/. /nomos-node/ ; \
|
||||
if grep -q 'file:///Users/.*nomos-node' /workspace/Cargo.toml; then \
|
||||
sed -i "s#git = \\\"file:///Users/[^\\\"]*nomos-node\\\"#path = \\\"/nomos-node\\\"#g" /workspace/Cargo.toml; \
|
||||
fi; \
|
||||
# Local checkout may reference ../nomos-testing paths; remap them to /workspace in container.
|
||||
if [ -f /nomos-node/Cargo.toml ]; then \
|
||||
sed -i 's#\.\./nomos-testing/#../workspace/#g' /nomos-node/Cargo.toml; \
|
||||
fi; \
|
||||
if [ -f /nomos-node/tests/Cargo.toml ]; then \
|
||||
sed -i 's#\.\./\.\./nomos-testing/#../../workspace/#g' /nomos-node/tests/Cargo.toml; \
|
||||
fi; \
|
||||
else \
|
||||
if grep -q 'file:///Users/.*nomos-node' /workspace/Cargo.toml; then \
|
||||
sed -i "s#git = \\\"file:///Users/[^\\\"]*nomos-node\\\"#git = \\\"https://github.com/logos-co/nomos-node.git\\\", rev = \\\"${LOGOS_BLOCKCHAIN_NODE_REV}\\\"#g" /workspace/Cargo.toml; \
|
||||
fi; \
|
||||
rm -rf /nomos-node; \
|
||||
git clone https://github.com/logos-co/nomos-node.git /nomos-node; \
|
||||
cd /nomos-node; \
|
||||
git fetch origin "${LOGOS_BLOCKCHAIN_NODE_REV}"; \
|
||||
git checkout "${LOGOS_BLOCKCHAIN_NODE_REV}"; \
|
||||
fi
|
||||
|
||||
RUN /workspace/logos/infra/assets/stack/scripts/docker/prepare_binaries.sh
|
||||
|
||||
# Strip host-local patches to avoid unresolved absolute paths inside containers.
|
||||
RUN sed -i '/^\[patch\."https:\/\/github.com\/logos-co\/nomos-node"\]/,/^$/d' /workspace/Cargo.toml
|
||||
|
||||
RUN /workspace/logos/infra/assets/stack/scripts/docker/build_cfgsync.sh
|
||||
|
||||
# ===========================
|
||||
# BASE RUNTIME IMAGE
|
||||
# ===========================
|
||||
|
||||
FROM ubuntu:24.04 AS base
|
||||
|
||||
LABEL maintainer="augustinas@status.im" \
|
||||
source="https://github.com/logos-co/nomos-node" \
|
||||
description="Logos base runtime image (testing)"
|
||||
|
||||
RUN apt-get update && apt-get install -yq \
|
||||
libstdc++6 \
|
||||
libgmp10 \
|
||||
libgomp1 \
|
||||
libssl3 \
|
||||
ca-certificates \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY --from=builder /workspace/artifacts/logos-blockchain-node /usr/bin/logos-blockchain-node
|
||||
COPY --from=builder /workspace/artifacts/cfgsync-server /usr/bin/cfgsync-server
|
||||
COPY --from=builder /workspace/artifacts/cfgsync-client /usr/bin/cfgsync-client
|
||||
COPY --from=builder /opt/circuits /opt/circuits
|
||||
|
||||
ENV LOGOS_BLOCKCHAIN_CIRCUITS=/opt/circuits
|
||||
|
||||
EXPOSE 3000 8080 9000 60000
|
||||
@ -1,13 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
RUSTFLAGS='--cfg feature="pol-dev-mode"' \
|
||||
cargo build --manifest-path /workspace/testing-framework/tools/cfgsync-runtime/Cargo.toml --bin cfgsync-server
|
||||
|
||||
RUSTFLAGS='--cfg feature="pol-dev-mode"' \
|
||||
cargo build --manifest-path /workspace/testing-framework/tools/cfgsync-runtime/Cargo.toml --bin cfgsync-client
|
||||
|
||||
cp /workspace/target/debug/cfgsync-server /workspace/artifacts/cfgsync-server
|
||||
cp /workspace/target/debug/cfgsync-client /workspace/artifacts/cfgsync-client
|
||||
|
||||
rm -rf /workspace/target/debug/incremental
|
||||
@ -1,67 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
LOGOS_BLOCKCHAIN_NODE_REV="${LOGOS_BLOCKCHAIN_NODE_REV:?LOGOS_BLOCKCHAIN_NODE_REV build arg missing}"
|
||||
|
||||
mkdir -p /workspace/artifacts
|
||||
|
||||
TARGET_ARCH="$(uname -m)"
|
||||
|
||||
have_prebuilt() {
|
||||
[ -f logos/infra/assets/stack/bin/logos-blockchain-node ] && \
|
||||
[ -f logos/infra/assets/stack/bin/logos-blockchain-node ]
|
||||
}
|
||||
|
||||
bin_matches_arch() {
|
||||
local info
|
||||
info="$(file -b logos/infra/assets/stack/bin/logos-blockchain-node 2>/dev/null || true)"
|
||||
case "${info}" in
|
||||
*ELF*) : ;;
|
||||
*) return 1 ;;
|
||||
esac
|
||||
|
||||
local pattern
|
||||
case "${TARGET_ARCH}" in
|
||||
x86_64) pattern="x86-64|x86_64" ;;
|
||||
aarch64|arm64) pattern="arm64|aarch64" ;;
|
||||
*) pattern="${TARGET_ARCH}" ;;
|
||||
esac
|
||||
|
||||
echo "${info}" | grep -Eqi "${pattern}"
|
||||
}
|
||||
|
||||
if [ -n "${LOGOS_BLOCKCHAIN_FORCE_BUILD:-}" ]; then
|
||||
echo "LOGOS_BLOCKCHAIN_FORCE_BUILD is set; rebuilding logos-blockchain binaries from source"
|
||||
elif have_prebuilt && bin_matches_arch; then
|
||||
echo "Using prebuilt logos-blockchain binaries from logos/infra/assets/stack/bin"
|
||||
cp logos/infra/assets/stack/bin/logos-blockchain-node /workspace/artifacts/logos-blockchain-node
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if have_prebuilt; then
|
||||
echo "Prebuilt logos-blockchain binaries do not match target architecture (${TARGET_ARCH}); rebuilding from source"
|
||||
else
|
||||
echo "Prebuilt logos-blockchain binaries missing; building from source"
|
||||
fi
|
||||
|
||||
echo "Building logos-blockchain binaries from source (rev ${LOGOS_BLOCKCHAIN_NODE_REV})"
|
||||
if [ "${LOGOS_BLOCKCHAIN_NODE_USE_LOCAL_CONTEXT:-0}" = "1" ] && [ -d /nomos-node ]; then
|
||||
echo "Using local nomos-node checkout from Docker build context"
|
||||
cd /nomos-node
|
||||
else
|
||||
git clone https://github.com/logos-co/nomos-node.git /tmp/nomos-node
|
||||
cd /tmp/nomos-node
|
||||
git fetch --depth 1 origin "${LOGOS_BLOCKCHAIN_NODE_REV}"
|
||||
git checkout "${LOGOS_BLOCKCHAIN_NODE_REV}"
|
||||
git reset --hard
|
||||
git clean -fdx
|
||||
fi
|
||||
|
||||
# Enable pol-dev-mode and embed verification keys for proof validation.
|
||||
RUSTFLAGS='--cfg feature="pol-dev-mode" --cfg feature="build-verification-key"' \
|
||||
CARGO_FEATURE_BUILD_VERIFICATION_KEY=1 \
|
||||
cargo build --all-features -p logos-blockchain-node
|
||||
|
||||
cp target/debug/logos-blockchain-node /workspace/artifacts/logos-blockchain-node
|
||||
|
||||
rm -rf target/debug/incremental
|
||||
@ -1,6 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
cd /etc/logos
|
||||
exec /usr/bin/cfgsync-server /etc/logos/cfgsync.yaml
|
||||
@ -1,2 +0,0 @@
|
||||
#!/bin/sh
|
||||
exec /etc/logos/scripts/run_logos.sh node
|
||||
@ -1,34 +0,0 @@
|
||||
{{- define "logos-runner.chart" -}}
|
||||
{{- .Chart.Name -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "logos-runner.name" -}}
|
||||
{{- include "logos-runner.chart" . -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "logos-runner.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- printf "%s" .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s" .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "logos-runner.labels" -}}
|
||||
app.kubernetes.io/name: {{ include "logos-runner.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "logos-runner.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "logos-runner.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "logos-runner.nodeLabels" -}}
|
||||
{{- $root := index . "root" -}}
|
||||
{{- $index := index . "index" -}}
|
||||
app.kubernetes.io/name: {{ include "logos-runner.chart" $root }}
|
||||
app.kubernetes.io/instance: {{ $root.Release.Name }}
|
||||
logos/logical-role: node
|
||||
logos/node-index: "{{ $index }}"
|
||||
{{- end -}}
|
||||
@ -1,15 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "logos-runner.fullname" . }}-cfgsync
|
||||
labels:
|
||||
{{- include "logos-runner.labels" . | nindent 4 }}
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
{{- include "logos-runner.selectorLabels" . | nindent 4 }}
|
||||
logos/component: cfgsync
|
||||
ports:
|
||||
- name: http
|
||||
port: {{ .Values.cfgsync.port }}
|
||||
targetPort: http
|
||||
@ -1,37 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ include "logos-runner.fullname" . }}-assets
|
||||
labels:
|
||||
{{- include "logos-runner.labels" . | nindent 4 }}
|
||||
data:
|
||||
cfgsync.yaml: |
|
||||
{{- if .Values.cfgsync.config }}
|
||||
{{ .Values.cfgsync.config | indent 4 }}
|
||||
{{- else }}
|
||||
{{ "" | indent 4 }}
|
||||
{{- end }}
|
||||
cfgsync.bundle.yaml: |
|
||||
{{- if .Values.cfgsync.bundle }}
|
||||
{{ .Values.cfgsync.bundle | indent 4 }}
|
||||
{{- else }}
|
||||
{{ "" | indent 4 }}
|
||||
{{- end }}
|
||||
run_cfgsync.sh: |
|
||||
{{- if .Values.scripts.runCfgsyncSh }}
|
||||
{{ .Values.scripts.runCfgsyncSh | indent 4 }}
|
||||
{{- else }}
|
||||
{{ "" | indent 4 }}
|
||||
{{- end }}
|
||||
run_logos.sh: |
|
||||
{{- if .Values.scripts.runLogosSh }}
|
||||
{{ .Values.scripts.runLogosSh | indent 4 }}
|
||||
{{- else }}
|
||||
{{ "" | indent 4 }}
|
||||
{{- end }}
|
||||
run_logos_node.sh: |
|
||||
{{- if .Values.scripts.runLogosNodeSh }}
|
||||
{{ .Values.scripts.runLogosNodeSh | indent 4 }}
|
||||
{{- else }}
|
||||
{{ "" | indent 4 }}
|
||||
{{- end }}
|
||||
@ -1,32 +0,0 @@
|
||||
[package]
|
||||
description = { workspace = true }
|
||||
edition = { workspace = true }
|
||||
license = { workspace = true }
|
||||
name = "lb-ext"
|
||||
version = { workspace = true }
|
||||
|
||||
[dependencies]
|
||||
# Workspace crates
|
||||
cfgsync_runtime = { workspace = true }
|
||||
lb-framework = { workspace = true }
|
||||
testing-framework-core = { workspace = true }
|
||||
testing-framework-env = { workspace = true }
|
||||
testing-framework-runner-compose = { workspace = true }
|
||||
testing-framework-runner-k8s = { workspace = true }
|
||||
testing-framework-runner-local = { workspace = true }
|
||||
|
||||
# Logos / Nomos deps
|
||||
lb_http_api_common = { workspace = true }
|
||||
|
||||
# External
|
||||
anyhow = "1"
|
||||
async-trait = { workspace = true }
|
||||
kube = { default-features = false, features = ["client", "rustls-tls"], version = "0.87" }
|
||||
reqwest = { features = ["json"], workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_yaml = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
tokio = { features = ["macros", "process", "rt-multi-thread", "time"], workspace = true }
|
||||
tracing = { workspace = true }
|
||||
uuid = { features = ["v4"], workspace = true }
|
||||
@ -1,85 +0,0 @@
|
||||
use anyhow::Result;
|
||||
pub(crate) use cfgsync_runtime::render::CfgsyncOutputPaths;
|
||||
use cfgsync_runtime::{
|
||||
bundle::build_cfgsync_bundle_with_hostnames,
|
||||
render::{
|
||||
CfgsyncConfigOverrides, RenderedCfgsync, ensure_bundle_path,
|
||||
render_cfgsync_yaml_from_template, write_rendered_cfgsync,
|
||||
},
|
||||
};
|
||||
use reqwest::Url;
|
||||
use serde_yaml::{Mapping, Value};
|
||||
use testing_framework_core::cfgsync::CfgsyncEnv;
|
||||
|
||||
pub(crate) struct CfgsyncRenderOptions {
|
||||
pub port: Option<u16>,
|
||||
pub bundle_path: Option<String>,
|
||||
pub min_timeout_secs: Option<u64>,
|
||||
pub metrics_otlp_ingest_url: Option<Url>,
|
||||
}
|
||||
|
||||
pub(crate) fn render_cfgsync_from_template<E: CfgsyncEnv>(
|
||||
topology: &E::Deployment,
|
||||
hostnames: &[String],
|
||||
options: CfgsyncRenderOptions,
|
||||
) -> Result<RenderedCfgsync> {
|
||||
let cfg = build_cfgsync_server_config();
|
||||
let overrides = build_overrides::<E>(topology, options);
|
||||
let config_yaml = render_cfgsync_yaml_from_template(cfg, &overrides)?;
|
||||
let bundle = build_cfgsync_bundle_with_hostnames::<E>(topology, hostnames)?;
|
||||
let bundle_yaml = serde_yaml::to_string(&bundle)?;
|
||||
|
||||
Ok(RenderedCfgsync {
|
||||
config_yaml,
|
||||
bundle_yaml,
|
||||
})
|
||||
}
|
||||
|
||||
fn build_cfgsync_server_config() -> Value {
|
||||
let mut root = Mapping::new();
|
||||
root.insert(
|
||||
Value::String("port".to_string()),
|
||||
Value::Number(4400_u64.into()),
|
||||
);
|
||||
|
||||
root.insert(
|
||||
Value::String("bundle_path".to_string()),
|
||||
Value::String("cfgsync.bundle.yaml".to_string()),
|
||||
);
|
||||
|
||||
Value::Mapping(root)
|
||||
}
|
||||
|
||||
pub(crate) fn render_and_write_cfgsync_from_template<E: CfgsyncEnv>(
|
||||
topology: &E::Deployment,
|
||||
hostnames: &[String],
|
||||
mut options: CfgsyncRenderOptions,
|
||||
output: CfgsyncOutputPaths<'_>,
|
||||
) -> Result<RenderedCfgsync> {
|
||||
ensure_bundle_path(&mut options.bundle_path, output.bundle_path);
|
||||
|
||||
let rendered = render_cfgsync_from_template::<E>(topology, hostnames, options)?;
|
||||
write_rendered_cfgsync(&rendered, output)?;
|
||||
|
||||
Ok(rendered)
|
||||
}
|
||||
|
||||
fn build_overrides<E: CfgsyncEnv>(
|
||||
topology: &E::Deployment,
|
||||
options: CfgsyncRenderOptions,
|
||||
) -> CfgsyncConfigOverrides {
|
||||
let CfgsyncRenderOptions {
|
||||
port,
|
||||
bundle_path,
|
||||
min_timeout_secs,
|
||||
metrics_otlp_ingest_url,
|
||||
} = options;
|
||||
|
||||
CfgsyncConfigOverrides {
|
||||
port,
|
||||
n_hosts: Some(E::nodes(topology).len()),
|
||||
timeout_floor_secs: min_timeout_secs,
|
||||
bundle_path,
|
||||
metrics_otlp_ingest_url: metrics_otlp_ingest_url.map(|url| url.to_string()),
|
||||
}
|
||||
}
|
||||
@ -1,393 +0,0 @@
|
||||
use std::{
|
||||
env,
|
||||
path::{Path, PathBuf},
|
||||
process::Command as StdCommand,
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use anyhow::anyhow;
|
||||
use async_trait::async_trait;
|
||||
use lb_framework::{
|
||||
NodeHttpClient,
|
||||
internal::{DeploymentPlan, NodePlan},
|
||||
};
|
||||
use lb_http_api_common::paths;
|
||||
use reqwest::Url;
|
||||
use testing_framework_core::{adjust_timeout, scenario::DynError};
|
||||
use testing_framework_env as tf_env;
|
||||
use testing_framework_runner_compose::{
|
||||
ComposeDeployEnv, ComposeDescriptor, ConfigServerHandle, EnvEntry, NodeDescriptor,
|
||||
docker::commands::run_docker_command,
|
||||
infrastructure::{ports::NodeHostPorts, template::repository_root},
|
||||
};
|
||||
use tokio::process::Command;
|
||||
use tracing::{debug, info, warn};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{
|
||||
LbcExtEnv,
|
||||
cfgsync::{CfgsyncOutputPaths, CfgsyncRenderOptions, render_and_write_cfgsync_from_template},
|
||||
constants::DEFAULT_CFGSYNC_PORT,
|
||||
};
|
||||
|
||||
const NODE_ENTRYPOINT: &str = "/etc/logos/scripts/run_logos_node.sh";
|
||||
const CFGSYNC_START_TIMEOUT: Duration = Duration::from_secs(180);
|
||||
const DEFAULT_COMPOSE_RUNNER_HOST: &str = "127.0.0.1";
|
||||
const DEFAULT_COMPOSE_TEST_IMAGE: &str = "logos-blockchain-testing:local";
|
||||
const GHCR_TESTNET_IMAGE: &str = "ghcr.io/logos-co/nomos:testnet";
|
||||
const DEFAULT_CFGSYNC_HOST: &str = "cfgsync";
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct LbcCfgsyncHandle {
|
||||
name: String,
|
||||
stopped: bool,
|
||||
}
|
||||
|
||||
impl LbcCfgsyncHandle {
|
||||
fn new(name: String) -> Self {
|
||||
Self {
|
||||
name,
|
||||
stopped: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ConfigServerHandle for LbcCfgsyncHandle {
|
||||
fn shutdown(&mut self) {
|
||||
if self.stopped {
|
||||
return;
|
||||
}
|
||||
let name = self.name.clone();
|
||||
let status = StdCommand::new("docker")
|
||||
.arg("rm")
|
||||
.arg("-f")
|
||||
.arg(&name)
|
||||
.status();
|
||||
match status {
|
||||
Ok(status) if status.success() => {
|
||||
debug!(container = name, "removed cfgsync container");
|
||||
}
|
||||
Ok(status) => {
|
||||
warn!(container = name, status = ?status, "failed to remove cfgsync container");
|
||||
}
|
||||
Err(err) => {
|
||||
warn!(container = name, error = ?err, "failed to spawn docker rm for cfgsync container");
|
||||
}
|
||||
}
|
||||
self.stopped = true;
|
||||
}
|
||||
|
||||
fn mark_preserved(&mut self) {
|
||||
self.stopped = true;
|
||||
}
|
||||
|
||||
fn container_name(&self) -> Option<&str> {
|
||||
Some(self.name.as_str())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ComposeDeployEnv for LbcExtEnv {
|
||||
type ConfigHandle = LbcCfgsyncHandle;
|
||||
|
||||
fn compose_descriptor(topology: &Self::Deployment, cfgsync_port: u16) -> ComposeDescriptor {
|
||||
let cfgsync_port = normalized_cfgsync_port(cfgsync_port);
|
||||
let (image, platform) = resolve_image();
|
||||
let nodes = topology
|
||||
.nodes()
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(index, node)| {
|
||||
build_compose_node_descriptor(index, node, cfgsync_port, &image, platform.clone())
|
||||
})
|
||||
.collect();
|
||||
|
||||
ComposeDescriptor::new(nodes)
|
||||
}
|
||||
|
||||
fn update_cfgsync_config(
|
||||
path: &Path,
|
||||
topology: &Self::Deployment,
|
||||
port: u16,
|
||||
metrics_otlp_ingest_url: Option<&Url>,
|
||||
) -> Result<(), DynError> {
|
||||
debug!(
|
||||
path = %path.display(),
|
||||
port,
|
||||
nodes = topology.nodes().len(),
|
||||
"updating cfgsync template"
|
||||
);
|
||||
let bundle_path = cfgsync_bundle_path(path);
|
||||
let hostnames = topology_hostnames(topology);
|
||||
let options = cfgsync_render_options(port, metrics_otlp_ingest_url);
|
||||
|
||||
render_and_write_cfgsync_from_template::<lb_framework::LbcEnv>(
|
||||
topology,
|
||||
&hostnames,
|
||||
options,
|
||||
CfgsyncOutputPaths {
|
||||
config_path: path,
|
||||
bundle_path: &bundle_path,
|
||||
},
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn start_cfgsync(
|
||||
cfgsync_path: &Path,
|
||||
port: u16,
|
||||
network: &str,
|
||||
) -> Result<Self::ConfigHandle, DynError> {
|
||||
let testnet_dir = cfgsync_dir(cfgsync_path)?;
|
||||
let (image, _) = resolve_image();
|
||||
let container_name = cfgsync_container_name();
|
||||
|
||||
debug!(
|
||||
container = %container_name,
|
||||
image,
|
||||
cfgsync = %cfgsync_path.display(),
|
||||
port,
|
||||
"starting cfgsync container"
|
||||
);
|
||||
|
||||
let command =
|
||||
build_cfgsync_docker_run_command(&container_name, network, port, testnet_dir, &image);
|
||||
|
||||
run_docker_command(
|
||||
command,
|
||||
adjust_timeout(CFGSYNC_START_TIMEOUT),
|
||||
"docker run cfgsync server",
|
||||
)
|
||||
.await
|
||||
.map_err(|err| anyhow!(err.to_string()))?;
|
||||
|
||||
info!(container = %container_name, port, "cfgsync container started");
|
||||
|
||||
Ok(LbcCfgsyncHandle::new(container_name))
|
||||
}
|
||||
|
||||
fn node_client_from_ports(
|
||||
ports: &NodeHostPorts,
|
||||
host: &str,
|
||||
) -> Result<Self::NodeClient, DynError> {
|
||||
api_client_from_host_ports(ports, host)
|
||||
}
|
||||
|
||||
fn readiness_path() -> &'static str {
|
||||
paths::CRYPTARCHIA_INFO
|
||||
}
|
||||
|
||||
fn compose_runner_host() -> String {
|
||||
compose_runner_host()
|
||||
}
|
||||
}
|
||||
|
||||
fn node_instance_name(index: usize) -> String {
|
||||
format!("node-{index}")
|
||||
}
|
||||
|
||||
fn cfgsync_bundle_path(config_path: &Path) -> PathBuf {
|
||||
config_path
|
||||
.parent()
|
||||
.unwrap_or(config_path)
|
||||
.join("cfgsync.bundle.yaml")
|
||||
}
|
||||
|
||||
fn topology_hostnames(topology: &DeploymentPlan) -> Vec<String> {
|
||||
topology
|
||||
.nodes()
|
||||
.iter()
|
||||
.map(|node| format!("node-{}", node.index()))
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn cfgsync_render_options(
|
||||
port: u16,
|
||||
metrics_otlp_ingest_url: Option<&Url>,
|
||||
) -> CfgsyncRenderOptions {
|
||||
CfgsyncRenderOptions {
|
||||
port: Some(port),
|
||||
bundle_path: None,
|
||||
min_timeout_secs: None,
|
||||
metrics_otlp_ingest_url: metrics_otlp_ingest_url.cloned(),
|
||||
}
|
||||
}
|
||||
|
||||
fn cfgsync_dir(cfgsync_path: &Path) -> Result<&Path, DynError> {
|
||||
cfgsync_path
|
||||
.parent()
|
||||
.ok_or_else(|| anyhow!("cfgsync path {cfgsync_path:?} has no parent directory").into())
|
||||
}
|
||||
|
||||
fn normalized_cfgsync_port(port: u16) -> u16 {
|
||||
if port == 0 {
|
||||
DEFAULT_CFGSYNC_PORT
|
||||
} else {
|
||||
port
|
||||
}
|
||||
}
|
||||
|
||||
fn build_compose_node_descriptor(
|
||||
index: usize,
|
||||
node: &NodePlan,
|
||||
cfgsync_port: u16,
|
||||
image: &str,
|
||||
platform: Option<String>,
|
||||
) -> NodeDescriptor {
|
||||
let mut environment = base_environment(cfgsync_port);
|
||||
environment.push(EnvEntry::new(
|
||||
"CFG_HOST_IDENTIFIER",
|
||||
node_instance_name(index),
|
||||
));
|
||||
|
||||
let api_port = node.general.api_config.address.port();
|
||||
let testing_port = node.general.api_config.testing_http_address.port();
|
||||
let ports = vec![
|
||||
format!("127.0.0.1::{api_port}"),
|
||||
format!("127.0.0.1::{testing_port}"),
|
||||
];
|
||||
|
||||
NodeDescriptor::new(
|
||||
node_instance_name(index),
|
||||
image.to_owned(),
|
||||
NODE_ENTRYPOINT,
|
||||
base_volumes(),
|
||||
default_extra_hosts(),
|
||||
ports,
|
||||
environment,
|
||||
platform,
|
||||
)
|
||||
}
|
||||
|
||||
fn cfgsync_container_name() -> String {
|
||||
format!("nomos-cfgsync-{}", Uuid::new_v4())
|
||||
}
|
||||
|
||||
fn cfgsync_stack_volume_arg(testnet_dir: &Path) -> String {
|
||||
let stack_dir = testnet_dir
|
||||
.canonicalize()
|
||||
.unwrap_or_else(|_| testnet_dir.to_path_buf());
|
||||
format!("{}:/etc/logos:ro", stack_dir.display())
|
||||
}
|
||||
|
||||
fn maybe_add_circuits_mount(command: &mut Command) {
|
||||
let circuits_dir = env::var("LOGOS_BLOCKCHAIN_CIRCUITS_DOCKER")
|
||||
.ok()
|
||||
.or_else(|| env::var("LOGOS_BLOCKCHAIN_CIRCUITS").ok());
|
||||
|
||||
let Some(circuits_dir) = circuits_dir else {
|
||||
return;
|
||||
};
|
||||
|
||||
let host_path = PathBuf::from(&circuits_dir);
|
||||
if !host_path.exists() {
|
||||
return;
|
||||
}
|
||||
|
||||
let resolved_host_path = host_path.canonicalize().unwrap_or(host_path);
|
||||
command
|
||||
.arg("-e")
|
||||
.arg(format!("LOGOS_BLOCKCHAIN_CIRCUITS={circuits_dir}"))
|
||||
.arg("-v")
|
||||
.arg(format!(
|
||||
"{}:{circuits_dir}:ro",
|
||||
resolved_host_path.display()
|
||||
));
|
||||
}
|
||||
|
||||
fn build_cfgsync_docker_run_command(
|
||||
container_name: &str,
|
||||
network: &str,
|
||||
port: u16,
|
||||
testnet_dir: &Path,
|
||||
image: &str,
|
||||
) -> Command {
|
||||
let mut command = Command::new("docker");
|
||||
command
|
||||
.arg("run")
|
||||
.arg("-d")
|
||||
.arg("--name")
|
||||
.arg(container_name)
|
||||
.arg("--network")
|
||||
.arg(network)
|
||||
.arg("--network-alias")
|
||||
.arg("cfgsync")
|
||||
.arg("--workdir")
|
||||
.arg("/etc/logos")
|
||||
.arg("--entrypoint")
|
||||
.arg("cfgsync-server")
|
||||
.arg("-p")
|
||||
.arg(format!("{port}:{port}"))
|
||||
.arg("-v")
|
||||
.arg(cfgsync_stack_volume_arg(testnet_dir));
|
||||
|
||||
maybe_add_circuits_mount(&mut command);
|
||||
command.arg(image).arg("/etc/logos/cfgsync.yaml");
|
||||
command
|
||||
}
|
||||
|
||||
fn resolve_image() -> (String, Option<String>) {
|
||||
let image =
|
||||
tf_env::nomos_testnet_image().unwrap_or_else(|| String::from(DEFAULT_COMPOSE_TEST_IMAGE));
|
||||
let platform = (image == GHCR_TESTNET_IMAGE).then(|| "linux/amd64".to_owned());
|
||||
debug!(image, platform = ?platform, "resolved compose image");
|
||||
(image, platform)
|
||||
}
|
||||
|
||||
fn base_volumes() -> Vec<String> {
|
||||
let mut volumes = vec!["./stack:/etc/logos".into()];
|
||||
if let Some(host_log_dir) = repository_root()
|
||||
.ok()
|
||||
.map(|root| root.join("tmp").join("node-logs"))
|
||||
.map(|dir| dir.display().to_string())
|
||||
{
|
||||
volumes.push(format!("{host_log_dir}:/tmp/node-logs"));
|
||||
}
|
||||
volumes
|
||||
}
|
||||
|
||||
fn default_extra_hosts() -> Vec<String> {
|
||||
testing_framework_runner_compose::docker::platform::host_gateway_entry()
|
||||
.into_iter()
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn base_environment(cfgsync_port: u16) -> Vec<EnvEntry> {
|
||||
let rust_log = env_value_or_default(tf_env::rust_log, "info");
|
||||
let nomos_log_level = env_value_or_default(tf_env::nomos_log_level, "info");
|
||||
let time_backend = env_value_or_default(tf_env::lb_time_service_backend, "monotonic");
|
||||
let cfgsync_host = env::var("LOGOS_BLOCKCHAIN_CFGSYNC_HOST")
|
||||
.unwrap_or_else(|_| String::from(DEFAULT_CFGSYNC_HOST));
|
||||
vec![
|
||||
EnvEntry::new("RUST_LOG", rust_log),
|
||||
EnvEntry::new("LOGOS_BLOCKCHAIN_LOG_LEVEL", nomos_log_level),
|
||||
EnvEntry::new("LOGOS_BLOCKCHAIN_TIME_BACKEND", time_backend),
|
||||
EnvEntry::new(
|
||||
"CFG_SERVER_ADDR",
|
||||
format!("http://{cfgsync_host}:{cfgsync_port}"),
|
||||
),
|
||||
EnvEntry::new("OTEL_METRIC_EXPORT_INTERVAL", "5000"),
|
||||
]
|
||||
}
|
||||
|
||||
fn compose_runner_host() -> String {
|
||||
env::var("COMPOSE_RUNNER_HOST").unwrap_or_else(|_| DEFAULT_COMPOSE_RUNNER_HOST.to_string())
|
||||
}
|
||||
|
||||
fn api_client_from_host_ports(
|
||||
ports: &NodeHostPorts,
|
||||
host: &str,
|
||||
) -> Result<NodeHttpClient, DynError> {
|
||||
let base_url = url_for_host_port(host, ports.api)?;
|
||||
let testing_url = url_for_host_port(host, ports.testing)?;
|
||||
Ok(NodeHttpClient::from_urls(base_url, Some(testing_url)))
|
||||
}
|
||||
|
||||
fn env_value_or_default(getter: impl Fn() -> Option<String>, default: &'static str) -> String {
|
||||
getter().unwrap_or_else(|| String::from(default))
|
||||
}
|
||||
|
||||
fn url_for_host_port(host: &str, port: u16) -> Result<Url, DynError> {
|
||||
let url = Url::parse(&format!("http://{host}:{port}/"))?;
|
||||
Ok(url)
|
||||
}
|
||||
@ -1,13 +0,0 @@
|
||||
use testing_framework_env as tf_env;
|
||||
|
||||
/// Default cfgsync port used across extension runners.
|
||||
pub const DEFAULT_CFGSYNC_PORT: u16 = 4400;
|
||||
|
||||
/// Default stack assets directory used by k8s compose assets discovery.
|
||||
pub const DEFAULT_ASSETS_STACK_DIR: &str = "logos/infra/assets/stack";
|
||||
|
||||
/// Resolve cfgsync port from `LOGOS_BLOCKCHAIN_CFGSYNC_PORT`, falling back to
|
||||
/// the default.
|
||||
pub fn cfgsync_port() -> u16 {
|
||||
tf_env::nomos_cfgsync_port().unwrap_or(DEFAULT_CFGSYNC_PORT)
|
||||
}
|
||||
@ -1,648 +0,0 @@
|
||||
use std::{
|
||||
collections::BTreeMap,
|
||||
env, fs, io,
|
||||
path::{Path, PathBuf},
|
||||
process::Output,
|
||||
};
|
||||
|
||||
use anyhow::{Result as AnyhowResult, anyhow};
|
||||
use async_trait::async_trait;
|
||||
use kube::Client;
|
||||
use lb_framework::{
|
||||
NodeHttpClient,
|
||||
internal::{DeploymentPlan, NodePlan},
|
||||
};
|
||||
use lb_http_api_common::paths;
|
||||
use reqwest::Url;
|
||||
use serde::Serialize;
|
||||
use tempfile::TempDir;
|
||||
use testing_framework_core::scenario::DynError;
|
||||
use testing_framework_env as tf_env;
|
||||
use testing_framework_runner_k8s::{K8sDeployEnv, PortSpecs, RunnerCleanup, wait::NodeConfigPorts};
|
||||
use thiserror::Error;
|
||||
use tokio::process::Command;
|
||||
use tracing::{debug, info};
|
||||
|
||||
use crate::{
|
||||
LbcExtEnv,
|
||||
cfgsync::{CfgsyncOutputPaths, CfgsyncRenderOptions, render_and_write_cfgsync_from_template},
|
||||
constants::{DEFAULT_ASSETS_STACK_DIR, cfgsync_port},
|
||||
};
|
||||
|
||||
const CFGSYNC_K8S_TIMEOUT_SECS: u64 = 300;
|
||||
const K8S_FULLNAME_OVERRIDE: &str = "logos-runner";
|
||||
const DEFAULT_K8S_TESTNET_IMAGE: &str = "public.ecr.aws/r4s5t9y4/logos/logos-blockchain:test";
|
||||
|
||||
/// Paths and image metadata required to deploy the Helm chart.
|
||||
pub struct K8sAssets {
|
||||
pub image: String,
|
||||
pub chart_path: PathBuf,
|
||||
pub cfgsync_file: PathBuf,
|
||||
pub run_cfgsync_script: PathBuf,
|
||||
pub run_logos_script: PathBuf,
|
||||
pub run_logos_node_script: PathBuf,
|
||||
pub values_file: PathBuf,
|
||||
_tempdir: TempDir,
|
||||
}
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
/// Failures preparing Helm assets and rendered cfgsync configuration.
|
||||
pub enum AssetsError {
|
||||
#[error("failed to locate workspace root: {source}")]
|
||||
WorkspaceRoot {
|
||||
#[source]
|
||||
source: anyhow::Error,
|
||||
},
|
||||
#[error("failed to render cfgsync configuration: {source}")]
|
||||
Cfgsync {
|
||||
#[source]
|
||||
source: anyhow::Error,
|
||||
},
|
||||
#[error("missing required script at {path}")]
|
||||
MissingScript { path: PathBuf },
|
||||
#[error("missing Helm chart at {path}; ensure the repository is up-to-date")]
|
||||
MissingChart { path: PathBuf },
|
||||
#[error("failed to create temporary directory for rendered assets: {source}")]
|
||||
TempDir {
|
||||
#[source]
|
||||
source: io::Error,
|
||||
},
|
||||
#[error("failed to write asset at {path}: {source}")]
|
||||
Io {
|
||||
path: PathBuf,
|
||||
#[source]
|
||||
source: io::Error,
|
||||
},
|
||||
#[error("failed to render Helm values: {source}")]
|
||||
Values {
|
||||
#[source]
|
||||
source: serde_yaml::Error,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
/// Errors returned from Helm invocations.
|
||||
pub enum HelmError {
|
||||
#[error("failed to spawn {command}: {source}")]
|
||||
Spawn {
|
||||
command: String,
|
||||
#[source]
|
||||
source: io::Error,
|
||||
},
|
||||
#[error("{command} exited with status {status:?}\nstderr:\n{stderr}\nstdout:\n{stdout}")]
|
||||
Failed {
|
||||
command: String,
|
||||
status: Option<i32>,
|
||||
stdout: String,
|
||||
stderr: String,
|
||||
},
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl K8sDeployEnv for LbcExtEnv {
|
||||
type Assets = K8sAssets;
|
||||
|
||||
fn collect_port_specs(topology: &Self::Deployment) -> PortSpecs {
|
||||
let nodes = topology
|
||||
.nodes()
|
||||
.iter()
|
||||
.map(|node| NodeConfigPorts {
|
||||
api: node.general.api_config.address.port(),
|
||||
testing: node.general.api_config.testing_http_address.port(),
|
||||
})
|
||||
.collect();
|
||||
PortSpecs { nodes }
|
||||
}
|
||||
|
||||
fn prepare_assets(
|
||||
topology: &Self::Deployment,
|
||||
metrics_otlp_ingest_url: Option<&Url>,
|
||||
) -> Result<Self::Assets, DynError> {
|
||||
prepare_assets(topology, metrics_otlp_ingest_url).map_err(|err| err.into())
|
||||
}
|
||||
|
||||
async fn install_stack(
|
||||
client: &Client,
|
||||
assets: &Self::Assets,
|
||||
namespace: &str,
|
||||
release: &str,
|
||||
nodes: usize,
|
||||
) -> Result<RunnerCleanup, DynError> {
|
||||
install_release(assets, release, namespace, nodes)
|
||||
.await
|
||||
.map_err(|err| -> DynError { Box::new(err) })?;
|
||||
|
||||
let preserve = env::var("K8S_RUNNER_PRESERVE").is_ok();
|
||||
Ok(RunnerCleanup::new(
|
||||
client.clone(),
|
||||
namespace.to_owned(),
|
||||
release.to_owned(),
|
||||
preserve,
|
||||
))
|
||||
}
|
||||
|
||||
fn node_client_from_ports(
|
||||
host: &str,
|
||||
api_port: u16,
|
||||
testing_port: u16,
|
||||
) -> Result<Self::NodeClient, DynError> {
|
||||
let base_url = node_url(host, api_port)?;
|
||||
let testing_url = Url::parse(&format!("http://{host}:{testing_port}")).ok();
|
||||
Ok(NodeHttpClient::from_urls(base_url, testing_url))
|
||||
}
|
||||
|
||||
fn readiness_path() -> &'static str {
|
||||
paths::CRYPTARCHIA_INFO
|
||||
}
|
||||
|
||||
fn node_base_url(client: &Self::NodeClient) -> Option<String> {
|
||||
Some(client.base_url().to_string())
|
||||
}
|
||||
|
||||
fn node_deployment_name(_release: &str, index: usize) -> String {
|
||||
format!("{K8S_FULLNAME_OVERRIDE}-node-{index}")
|
||||
}
|
||||
|
||||
fn node_service_name(_release: &str, index: usize) -> String {
|
||||
format!("{K8S_FULLNAME_OVERRIDE}-node-{index}")
|
||||
}
|
||||
}
|
||||
|
||||
fn node_url(host: &str, port: u16) -> Result<Url, DynError> {
|
||||
let url = Url::parse(&format!("http://{host}:{port}"))?;
|
||||
Ok(url)
|
||||
}
|
||||
|
||||
/// Render cfgsync config, Helm values, and locate scripts for a topology.
|
||||
pub fn prepare_assets(
|
||||
topology: &DeploymentPlan,
|
||||
metrics_otlp_ingest_url: Option<&Url>,
|
||||
) -> Result<K8sAssets, AssetsError> {
|
||||
log_assets_prepare_start(topology);
|
||||
let root = workspace_root().map_err(|source| AssetsError::WorkspaceRoot { source })?;
|
||||
let tempdir = create_assets_tempdir()?;
|
||||
|
||||
let (cfgsync_file, cfgsync_yaml, bundle_yaml) =
|
||||
render_and_write_cfgsync(topology, metrics_otlp_ingest_url, &tempdir)?;
|
||||
let scripts = validate_scripts(&root)?;
|
||||
let chart_path = helm_chart_path()?;
|
||||
let values_file = render_and_write_values(topology, &tempdir, &cfgsync_yaml, &bundle_yaml)?;
|
||||
let image = testnet_image();
|
||||
|
||||
log_assets_prepare_done(&cfgsync_file, &values_file, &chart_path, &image);
|
||||
|
||||
Ok(K8sAssets {
|
||||
image,
|
||||
chart_path,
|
||||
cfgsync_file,
|
||||
run_logos_script: scripts.run_shared,
|
||||
run_cfgsync_script: scripts.run_cfgsync,
|
||||
run_logos_node_script: scripts.run_node,
|
||||
values_file,
|
||||
_tempdir: tempdir,
|
||||
})
|
||||
}
|
||||
|
||||
fn log_assets_prepare_start(topology: &DeploymentPlan) {
|
||||
info!(
|
||||
nodes = topology.nodes().len(),
|
||||
"preparing k8s runner assets"
|
||||
);
|
||||
}
|
||||
|
||||
fn log_assets_prepare_done(
|
||||
cfgsync_file: &Path,
|
||||
values_file: &Path,
|
||||
chart_path: &Path,
|
||||
image: &str,
|
||||
) {
|
||||
debug!(
|
||||
cfgsync = %cfgsync_file.display(),
|
||||
values = %values_file.display(),
|
||||
image,
|
||||
chart = %chart_path.display(),
|
||||
"k8s runner assets prepared"
|
||||
);
|
||||
}
|
||||
|
||||
async fn install_release(
|
||||
assets: &K8sAssets,
|
||||
release: &str,
|
||||
namespace: &str,
|
||||
nodes: usize,
|
||||
) -> Result<(), HelmError> {
|
||||
info!(
|
||||
release,
|
||||
namespace,
|
||||
nodes,
|
||||
image = %assets.image,
|
||||
cfgsync_port = cfgsync_port(),
|
||||
values = %assets.values_file.display(),
|
||||
"installing helm release"
|
||||
);
|
||||
|
||||
let command = format!("helm install {release}");
|
||||
let cmd = build_install_command(assets, release, namespace, nodes);
|
||||
let output = run_helm_command(cmd, &command).await?;
|
||||
|
||||
maybe_log_install_output(&command, &output);
|
||||
|
||||
info!(release, namespace, "helm install completed");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn build_install_command(
|
||||
assets: &K8sAssets,
|
||||
release: &str,
|
||||
namespace: &str,
|
||||
nodes: usize,
|
||||
) -> Command {
|
||||
let mut cmd = Command::new("helm");
|
||||
cmd.arg("install").arg(release).arg(&assets.chart_path);
|
||||
add_install_scoping_args(&mut cmd, namespace);
|
||||
add_install_settings(&mut cmd, assets, nodes);
|
||||
add_script_file_settings(&mut cmd, assets);
|
||||
|
||||
if let Ok(root) = workspace_root() {
|
||||
cmd.current_dir(root);
|
||||
}
|
||||
|
||||
cmd
|
||||
}
|
||||
|
||||
fn add_install_scoping_args(cmd: &mut Command, namespace: &str) {
|
||||
cmd.arg("--namespace")
|
||||
.arg(namespace)
|
||||
.arg("--create-namespace")
|
||||
.arg("--wait")
|
||||
.arg("--timeout")
|
||||
.arg("5m");
|
||||
}
|
||||
|
||||
fn add_install_settings(cmd: &mut Command, assets: &K8sAssets, nodes: usize) {
|
||||
cmd.arg("--set")
|
||||
.arg(format!("image={}", assets.image))
|
||||
.arg("--set")
|
||||
.arg(format!("nodes.count={nodes}"))
|
||||
.arg("--set")
|
||||
.arg(format!("cfgsync.port={}", cfgsync_port()))
|
||||
.arg("-f")
|
||||
.arg(&assets.values_file)
|
||||
.arg("--set-file")
|
||||
.arg(format!("cfgsync.config={}", assets.cfgsync_file.display()));
|
||||
}
|
||||
|
||||
fn add_script_file_settings(cmd: &mut Command, assets: &K8sAssets) {
|
||||
add_set_file_arg(cmd, "scripts.runCfgsyncSh", &assets.run_cfgsync_script);
|
||||
add_set_file_arg(cmd, "scripts.runLogosNodeSh", &assets.run_logos_node_script);
|
||||
add_set_file_arg(cmd, "scripts.runLogosSh", &assets.run_logos_script);
|
||||
}
|
||||
|
||||
fn add_set_file_arg(cmd: &mut Command, key: &str, value: &Path) {
|
||||
cmd.arg("--set-file")
|
||||
.arg(format!("{key}={}", value.display()));
|
||||
}
|
||||
|
||||
fn maybe_log_install_output(command: &str, output: &Output) {
|
||||
if env::var("K8S_RUNNER_DEBUG").is_err() {
|
||||
return;
|
||||
}
|
||||
|
||||
debug!(
|
||||
command,
|
||||
stdout = %String::from_utf8_lossy(&output.stdout),
|
||||
"helm install stdout"
|
||||
);
|
||||
debug!(
|
||||
command,
|
||||
stderr = %String::from_utf8_lossy(&output.stderr),
|
||||
"helm install stderr"
|
||||
);
|
||||
}
|
||||
|
||||
async fn run_helm_command(mut cmd: Command, command: &str) -> Result<Output, HelmError> {
|
||||
let output = cmd.output().await.map_err(|source| HelmError::Spawn {
|
||||
command: command.to_owned(),
|
||||
source,
|
||||
})?;
|
||||
|
||||
if output.status.success() {
|
||||
Ok(output)
|
||||
} else {
|
||||
Err(HelmError::Failed {
|
||||
command: command.to_owned(),
|
||||
status: output.status.code(),
|
||||
stdout: String::from_utf8_lossy(&output.stdout).into_owned(),
|
||||
stderr: String::from_utf8_lossy(&output.stderr).into_owned(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn create_assets_tempdir() -> Result<TempDir, AssetsError> {
|
||||
tempfile::Builder::new()
|
||||
.prefix("nomos-helm-")
|
||||
.tempdir()
|
||||
.map_err(|source| AssetsError::TempDir { source })
|
||||
}
|
||||
|
||||
fn render_and_write_cfgsync(
|
||||
topology: &DeploymentPlan,
|
||||
metrics_otlp_ingest_url: Option<&Url>,
|
||||
tempdir: &TempDir,
|
||||
) -> Result<(PathBuf, String, String), AssetsError> {
|
||||
let cfgsync_file = tempdir.path().join("cfgsync.yaml");
|
||||
let bundle_file = tempdir.path().join("cfgsync.bundle.yaml");
|
||||
let (cfgsync_yaml, bundle_yaml) = render_cfgsync_config(
|
||||
topology,
|
||||
metrics_otlp_ingest_url,
|
||||
&cfgsync_file,
|
||||
&bundle_file,
|
||||
)?;
|
||||
|
||||
Ok((cfgsync_file, cfgsync_yaml, bundle_yaml))
|
||||
}
|
||||
|
||||
fn render_and_write_values(
|
||||
topology: &DeploymentPlan,
|
||||
tempdir: &TempDir,
|
||||
cfgsync_yaml: &str,
|
||||
bundle_yaml: &str,
|
||||
) -> Result<PathBuf, AssetsError> {
|
||||
let values_yaml = render_values_yaml(topology, cfgsync_yaml, bundle_yaml)?;
|
||||
write_temp_file(tempdir.path(), "values.yaml", values_yaml)
|
||||
}
|
||||
|
||||
fn testnet_image() -> String {
|
||||
tf_env::nomos_testnet_image().unwrap_or_else(|| String::from(DEFAULT_K8S_TESTNET_IMAGE))
|
||||
}
|
||||
|
||||
fn render_cfgsync_config(
|
||||
topology: &DeploymentPlan,
|
||||
metrics_otlp_ingest_url: Option<&Url>,
|
||||
cfgsync_file: &Path,
|
||||
bundle_file: &Path,
|
||||
) -> Result<(String, String), AssetsError> {
|
||||
let hostnames = k8s_node_hostnames(topology);
|
||||
let rendered = render_and_write_cfgsync_from_template::<lb_framework::LbcEnv>(
|
||||
topology,
|
||||
&hostnames,
|
||||
CfgsyncRenderOptions {
|
||||
port: Some(cfgsync_port()),
|
||||
bundle_path: Some("cfgsync.bundle.yaml".to_string()),
|
||||
min_timeout_secs: Some(CFGSYNC_K8S_TIMEOUT_SECS),
|
||||
metrics_otlp_ingest_url: metrics_otlp_ingest_url.cloned(),
|
||||
},
|
||||
CfgsyncOutputPaths {
|
||||
config_path: cfgsync_file,
|
||||
bundle_path: bundle_file,
|
||||
},
|
||||
)
|
||||
.map_err(|source| AssetsError::Cfgsync { source })?;
|
||||
|
||||
Ok((rendered.config_yaml, rendered.bundle_yaml))
|
||||
}
|
||||
|
||||
fn k8s_node_hostnames(topology: &DeploymentPlan) -> Vec<String> {
|
||||
topology
|
||||
.nodes()
|
||||
.iter()
|
||||
.map(|node| format!("{K8S_FULLNAME_OVERRIDE}-node-{}", node.index()))
|
||||
.collect()
|
||||
}
|
||||
|
||||
struct ScriptPaths {
|
||||
run_cfgsync: PathBuf,
|
||||
run_shared: PathBuf,
|
||||
run_node: PathBuf,
|
||||
}
|
||||
|
||||
fn validate_scripts(root: &Path) -> Result<ScriptPaths, AssetsError> {
|
||||
let scripts_dir = stack_scripts_root(root);
|
||||
let run_cfgsync = scripts_dir.join("run_cfgsync.sh");
|
||||
let run_shared = scripts_dir.join("run_logos.sh");
|
||||
let run_node = scripts_dir.join("run_logos_node.sh");
|
||||
|
||||
for path in [&run_cfgsync, &run_shared, &run_node] {
|
||||
if !path.exists() {
|
||||
return Err(AssetsError::MissingScript { path: path.clone() });
|
||||
}
|
||||
}
|
||||
|
||||
debug!(
|
||||
run_cfgsync = %run_cfgsync.display(),
|
||||
run_shared = %run_shared.display(),
|
||||
run_node = %run_node.display(),
|
||||
"validated runner scripts exist"
|
||||
);
|
||||
|
||||
Ok(ScriptPaths {
|
||||
run_cfgsync,
|
||||
run_shared,
|
||||
run_node,
|
||||
})
|
||||
}
|
||||
|
||||
fn helm_chart_path() -> Result<PathBuf, AssetsError> {
|
||||
let root = workspace_root().map_err(|source| AssetsError::WorkspaceRoot { source })?;
|
||||
let path = if let Some(override_dir) = helm_override_dir(&root) {
|
||||
override_dir
|
||||
} else {
|
||||
root.join("logos/infra/helm/logos-runner")
|
||||
};
|
||||
if path.exists() {
|
||||
Ok(path)
|
||||
} else {
|
||||
Err(AssetsError::MissingChart { path })
|
||||
}
|
||||
}
|
||||
|
||||
fn render_values_yaml(
|
||||
topology: &DeploymentPlan,
|
||||
cfgsync_yaml: &str,
|
||||
bundle_yaml: &str,
|
||||
) -> Result<String, AssetsError> {
|
||||
let values = build_values(topology, cfgsync_yaml, bundle_yaml);
|
||||
serde_yaml::to_string(&values).map_err(|source| AssetsError::Values { source })
|
||||
}
|
||||
|
||||
fn write_temp_file(
|
||||
dir: &Path,
|
||||
name: &str,
|
||||
contents: impl AsRef<[u8]>,
|
||||
) -> Result<PathBuf, AssetsError> {
|
||||
let path = dir.join(name);
|
||||
fs::write(&path, contents).map_err(|source| AssetsError::Io {
|
||||
path: path.clone(),
|
||||
source,
|
||||
})?;
|
||||
Ok(path)
|
||||
}
|
||||
|
||||
/// Locate the workspace root, honoring `CARGO_WORKSPACE_DIR` overrides.
|
||||
pub fn workspace_root() -> AnyhowResult<PathBuf> {
|
||||
if let Ok(var) = env::var("CARGO_WORKSPACE_DIR") {
|
||||
return Ok(PathBuf::from(var));
|
||||
}
|
||||
let manifest_dir = Path::new(env!("CARGO_MANIFEST_DIR"));
|
||||
let candidate_roots = [
|
||||
manifest_dir
|
||||
.parent()
|
||||
.and_then(Path::parent)
|
||||
.and_then(Path::parent),
|
||||
manifest_dir.parent().and_then(Path::parent),
|
||||
];
|
||||
|
||||
for candidate in candidate_roots.iter().flatten() {
|
||||
let stack_root = if let Some(override_dir) = assets_override_dir(candidate) {
|
||||
override_dir
|
||||
} else {
|
||||
candidate.join(DEFAULT_ASSETS_STACK_DIR)
|
||||
};
|
||||
if stack_root.exists() {
|
||||
return Ok(candidate.to_path_buf());
|
||||
}
|
||||
}
|
||||
|
||||
Err(anyhow!(
|
||||
"resolving workspace root from manifest dir: {manifest_dir:?}"
|
||||
))
|
||||
}
|
||||
|
||||
fn stack_scripts_root(root: &Path) -> PathBuf {
|
||||
if let Some(scripts) = override_scripts_dir(root)
|
||||
&& scripts.exists()
|
||||
{
|
||||
return scripts;
|
||||
}
|
||||
root.join(DEFAULT_ASSETS_STACK_DIR).join("scripts")
|
||||
}
|
||||
|
||||
fn assets_override_dir(root: &Path) -> Option<PathBuf> {
|
||||
env::var("REL_ASSETS_STACK_DIR").ok().map(|value| {
|
||||
let path = PathBuf::from(value);
|
||||
if path.is_absolute() {
|
||||
path
|
||||
} else {
|
||||
root.join(path)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn override_scripts_dir(root: &Path) -> Option<PathBuf> {
|
||||
assets_override_dir(root).map(|dir| dir.join("scripts"))
|
||||
}
|
||||
|
||||
fn helm_override_dir(root: &Path) -> Option<PathBuf> {
|
||||
env::var("REL_HELM_CHART_DIR").ok().map(|value| {
|
||||
let path = PathBuf::from(value);
|
||||
if path.is_absolute() {
|
||||
path
|
||||
} else {
|
||||
root.join(path)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct HelmValues {
|
||||
#[serde(rename = "imagePullPolicy")]
|
||||
image_pull_policy: String,
|
||||
#[serde(rename = "fullnameOverride")]
|
||||
fullname_override: String,
|
||||
kzg: KzgValues,
|
||||
cfgsync: CfgsyncValues,
|
||||
nodes: NodeGroup,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct KzgValues {
|
||||
mode: String,
|
||||
#[serde(rename = "storageSize")]
|
||||
storage_size: String,
|
||||
#[serde(rename = "hostPath")]
|
||||
host_path: String,
|
||||
#[serde(rename = "hostPathType")]
|
||||
host_path_type: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct CfgsyncValues {
|
||||
port: u16,
|
||||
config: String,
|
||||
bundle: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct NodeGroup {
|
||||
count: usize,
|
||||
nodes: Vec<NodeValues>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct NodeValues {
|
||||
#[serde(rename = "apiPort")]
|
||||
api_port: u16,
|
||||
#[serde(rename = "testingHttpPort")]
|
||||
testing_http_port: u16,
|
||||
#[serde(rename = "networkPort")]
|
||||
network_port: u16,
|
||||
env: BTreeMap<String, String>,
|
||||
}
|
||||
|
||||
fn build_values(topology: &DeploymentPlan, cfgsync_yaml: &str, bundle_yaml: &str) -> HelmValues {
|
||||
let cfgsync = CfgsyncValues {
|
||||
port: cfgsync_port(),
|
||||
config: cfgsync_yaml.to_string(),
|
||||
bundle: bundle_yaml.to_string(),
|
||||
};
|
||||
let kzg = KzgValues::disabled();
|
||||
let image_pull_policy =
|
||||
tf_env::nomos_testnet_image_pull_policy().unwrap_or_else(|| "IfNotPresent".into());
|
||||
debug!("rendering Helm values for k8s stack");
|
||||
let nodes = build_node_group("node", topology.nodes());
|
||||
|
||||
HelmValues {
|
||||
image_pull_policy,
|
||||
fullname_override: K8S_FULLNAME_OVERRIDE.to_string(),
|
||||
kzg,
|
||||
cfgsync,
|
||||
nodes,
|
||||
}
|
||||
}
|
||||
|
||||
impl KzgValues {
|
||||
fn disabled() -> Self {
|
||||
Self {
|
||||
mode: "disabled".to_string(),
|
||||
storage_size: "1Gi".to_string(),
|
||||
host_path: "/tmp/nomos-kzg".to_string(),
|
||||
host_path_type: "DirectoryOrCreate".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn build_node_group(kind: &'static str, nodes: &[NodePlan]) -> NodeGroup {
|
||||
let node_values = nodes
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(index, node)| build_node_values(kind, index, node))
|
||||
.collect();
|
||||
|
||||
NodeGroup {
|
||||
count: nodes.len(),
|
||||
nodes: node_values,
|
||||
}
|
||||
}
|
||||
|
||||
fn build_node_values(kind: &'static str, index: usize, node: &NodePlan) -> NodeValues {
|
||||
let mut env = BTreeMap::new();
|
||||
env.insert("CFG_HOST_KIND".into(), kind.to_string());
|
||||
env.insert("CFG_HOST_IDENTIFIER".into(), format!("{kind}-{index}"));
|
||||
|
||||
NodeValues {
|
||||
api_port: node.general.api_config.address.port(),
|
||||
testing_http_port: node.general.api_config.testing_http_address.port(),
|
||||
network_port: node.general.network_config.backend.swarm.port,
|
||||
env,
|
||||
}
|
||||
}
|
||||
@ -1,140 +0,0 @@
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
path::{Path, PathBuf},
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use async_trait::async_trait;
|
||||
pub use lb_framework::*;
|
||||
use reqwest::Url;
|
||||
pub use scenario::{
|
||||
CoreBuilderExt, ObservabilityBuilderExt, ScenarioBuilder, ScenarioBuilderExt,
|
||||
ScenarioBuilderWith,
|
||||
};
|
||||
use testing_framework_core::scenario::{
|
||||
Application, DynError, ExternalNodeSource, FeedRuntime, NodeClients, RunContext,
|
||||
StartNodeOptions,
|
||||
};
|
||||
use testing_framework_runner_local::{
|
||||
BuiltNodeConfig, LocalDeployerEnv, NodeConfigEntry,
|
||||
process::{LaunchSpec, NodeEndpoints, ProcessSpawnError},
|
||||
};
|
||||
use tokio::sync::broadcast;
|
||||
use workloads::{LbcBlockFeedEnv, LbcScenarioEnv};
|
||||
|
||||
pub mod cfgsync;
|
||||
mod compose_env;
|
||||
pub mod constants;
|
||||
mod k8s_env;
|
||||
pub mod scenario;
|
||||
|
||||
pub type LbcComposeDeployer = testing_framework_runner_compose::ComposeDeployer<LbcExtEnv>;
|
||||
pub type LbcK8sDeployer = testing_framework_runner_k8s::K8sDeployer<LbcExtEnv>;
|
||||
|
||||
pub struct LbcExtEnv;
|
||||
|
||||
#[async_trait]
|
||||
impl Application for LbcExtEnv {
|
||||
type Deployment = <LbcEnv as Application>::Deployment;
|
||||
|
||||
type NodeClient = <LbcEnv as Application>::NodeClient;
|
||||
|
||||
type NodeConfig = <LbcEnv as Application>::NodeConfig;
|
||||
|
||||
type FeedRuntime = <LbcEnv as Application>::FeedRuntime;
|
||||
|
||||
fn external_node_client(source: &ExternalNodeSource) -> Result<Self::NodeClient, DynError> {
|
||||
let base_url = Url::parse(&source.endpoint)?;
|
||||
Ok(NodeHttpClient::from_urls(base_url, None))
|
||||
}
|
||||
|
||||
async fn prepare_feed(
|
||||
node_clients: NodeClients<Self>,
|
||||
) -> Result<(<Self::FeedRuntime as FeedRuntime>::Feed, Self::FeedRuntime), DynError> {
|
||||
let clients = node_clients.snapshot();
|
||||
let upstream_clients = NodeClients::<lb_framework::LbcEnv>::new(clients);
|
||||
|
||||
<LbcEnv as Application>::prepare_feed(upstream_clients).await
|
||||
}
|
||||
}
|
||||
|
||||
impl LbcScenarioEnv for LbcExtEnv {}
|
||||
|
||||
impl LbcBlockFeedEnv for LbcExtEnv {
|
||||
fn block_feed_subscription(ctx: &RunContext<Self>) -> broadcast::Receiver<Arc<BlockRecord>> {
|
||||
ctx.feed().subscribe()
|
||||
}
|
||||
|
||||
fn block_feed(ctx: &RunContext<Self>) -> BlockFeed {
|
||||
ctx.feed()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl LocalDeployerEnv for LbcExtEnv {
|
||||
fn build_node_config(
|
||||
topology: &Self::Deployment,
|
||||
index: usize,
|
||||
peer_ports_by_name: &HashMap<String, u16>,
|
||||
options: &StartNodeOptions<Self>,
|
||||
peer_ports: &[u16],
|
||||
) -> Result<BuiltNodeConfig<<Self as Application>::NodeConfig>, DynError> {
|
||||
let mapped_options = map_start_options(options)?;
|
||||
<LbcEnv as LocalDeployerEnv>::build_node_config(
|
||||
topology,
|
||||
index,
|
||||
peer_ports_by_name,
|
||||
&mapped_options,
|
||||
peer_ports,
|
||||
)
|
||||
}
|
||||
|
||||
fn build_initial_node_configs(
|
||||
topology: &Self::Deployment,
|
||||
) -> Result<Vec<NodeConfigEntry<<Self as Application>::NodeConfig>>, ProcessSpawnError> {
|
||||
<LbcEnv as LocalDeployerEnv>::build_initial_node_configs(topology)
|
||||
}
|
||||
|
||||
fn initial_persist_dir(
|
||||
topology: &Self::Deployment,
|
||||
node_name: &str,
|
||||
index: usize,
|
||||
) -> Option<PathBuf> {
|
||||
<LbcEnv as LocalDeployerEnv>::initial_persist_dir(topology, node_name, index)
|
||||
}
|
||||
|
||||
fn build_launch_spec(
|
||||
config: &<Self as Application>::NodeConfig,
|
||||
dir: &Path,
|
||||
label: &str,
|
||||
) -> Result<LaunchSpec, DynError> {
|
||||
<LbcEnv as LocalDeployerEnv>::build_launch_spec(config, dir, label)
|
||||
}
|
||||
|
||||
fn node_endpoints(config: &<Self as Application>::NodeConfig) -> NodeEndpoints {
|
||||
<LbcEnv as LocalDeployerEnv>::node_endpoints(config)
|
||||
}
|
||||
|
||||
fn node_client(endpoints: &NodeEndpoints) -> Self::NodeClient {
|
||||
<LbcEnv as LocalDeployerEnv>::node_client(endpoints)
|
||||
}
|
||||
|
||||
fn readiness_endpoint_path() -> &'static str {
|
||||
<LbcEnv as LocalDeployerEnv>::readiness_endpoint_path()
|
||||
}
|
||||
}
|
||||
|
||||
fn map_start_options(
|
||||
options: &StartNodeOptions<LbcExtEnv>,
|
||||
) -> Result<StartNodeOptions<LbcEnv>, DynError> {
|
||||
if options.config_patch.is_some() {
|
||||
return Err("LbcExtEnv local deployer bridge does not support config_patch yet".into());
|
||||
}
|
||||
|
||||
let mut mapped = StartNodeOptions::<LbcEnv>::default();
|
||||
mapped.peers = options.peers.clone();
|
||||
mapped.config_override = options.config_override.clone();
|
||||
mapped.persist_dir = options.persist_dir.clone();
|
||||
|
||||
Ok(mapped)
|
||||
}
|
||||
@ -1,212 +0,0 @@
|
||||
use std::num::{NonZeroU64, NonZeroUsize};
|
||||
|
||||
use lb_framework::{
|
||||
configs::{
|
||||
deployment::{DeploymentBuilder, TopologyConfig},
|
||||
wallet::{WalletConfig, wallet_config_for_users},
|
||||
},
|
||||
internal::{DeploymentPlan, apply_wallet_config_to_deployment},
|
||||
};
|
||||
pub use testing_framework_core::scenario::ObservabilityBuilderExt;
|
||||
use testing_framework_core::{
|
||||
scenario::{NodeControlScenarioBuilder, ObservabilityScenarioBuilder},
|
||||
topology::{DeploymentProvider, DeploymentSeed, DynTopologyError},
|
||||
};
|
||||
use tracing::warn;
|
||||
|
||||
use crate::LbcExtEnv;
|
||||
|
||||
pub type ScenarioBuilder = testing_framework_core::scenario::ScenarioBuilder<LbcExtEnv>;
|
||||
pub type ScenarioBuilderWith<Caps = ()> =
|
||||
testing_framework_core::scenario::CoreBuilder<LbcExtEnv, Caps>;
|
||||
|
||||
pub trait CoreBuilderExt: Sized {
|
||||
fn deployment_with(f: impl FnOnce(DeploymentBuilder) -> DeploymentBuilder) -> Self;
|
||||
|
||||
fn with_wallet_config(self, wallet: WalletConfig) -> Self;
|
||||
|
||||
fn wallets(self, users: usize) -> Self;
|
||||
}
|
||||
|
||||
pub trait ScenarioBuilderExt: Sized {
|
||||
fn transactions(self) -> TransactionFlowBuilder<Self>;
|
||||
|
||||
fn transactions_with(
|
||||
self,
|
||||
f: impl FnOnce(TransactionFlowBuilder<Self>) -> TransactionFlowBuilder<Self>,
|
||||
) -> Self;
|
||||
|
||||
fn expect_consensus_liveness(self) -> Self;
|
||||
|
||||
fn initialize_wallet(self, total_funds: u64, users: usize) -> Self;
|
||||
}
|
||||
|
||||
impl CoreBuilderExt for ScenarioBuilder {
|
||||
fn deployment_with(f: impl FnOnce(DeploymentBuilder) -> DeploymentBuilder) -> Self {
|
||||
let topology = f(DeploymentBuilder::new(TopologyConfig::empty()));
|
||||
ScenarioBuilder::new(Box::new(topology))
|
||||
}
|
||||
|
||||
fn with_wallet_config(self, wallet: WalletConfig) -> Self {
|
||||
self.map_deployment_provider(|provider| {
|
||||
Box::new(WalletConfigProvider {
|
||||
inner: provider,
|
||||
wallet,
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
fn wallets(self, users: usize) -> Self {
|
||||
with_wallets_or_warn(self, users, CoreBuilderExt::with_wallet_config)
|
||||
}
|
||||
}
|
||||
|
||||
impl CoreBuilderExt for NodeControlScenarioBuilder<LbcExtEnv> {
|
||||
fn deployment_with(f: impl FnOnce(DeploymentBuilder) -> DeploymentBuilder) -> Self {
|
||||
ScenarioBuilder::deployment_with(f).enable_node_control()
|
||||
}
|
||||
|
||||
fn with_wallet_config(self, wallet: WalletConfig) -> Self {
|
||||
self.map_deployment_provider(|provider| {
|
||||
Box::new(WalletConfigProvider {
|
||||
inner: provider,
|
||||
wallet,
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
fn wallets(self, users: usize) -> Self {
|
||||
with_wallets_or_warn(self, users, CoreBuilderExt::with_wallet_config)
|
||||
}
|
||||
}
|
||||
|
||||
impl CoreBuilderExt for ObservabilityScenarioBuilder<LbcExtEnv> {
|
||||
fn deployment_with(f: impl FnOnce(DeploymentBuilder) -> DeploymentBuilder) -> Self {
|
||||
ScenarioBuilder::deployment_with(f).enable_observability()
|
||||
}
|
||||
|
||||
fn with_wallet_config(self, wallet: WalletConfig) -> Self {
|
||||
self.map_deployment_provider(|provider| {
|
||||
Box::new(WalletConfigProvider {
|
||||
inner: provider,
|
||||
wallet,
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
fn wallets(self, users: usize) -> Self {
|
||||
with_wallets_or_warn(self, users, CoreBuilderExt::with_wallet_config)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> ScenarioBuilderExt for B
|
||||
where
|
||||
B: CoreBuilderExt + testing_framework_core::scenario::CoreBuilderExt<Env = LbcExtEnv> + Sized,
|
||||
{
|
||||
fn transactions(self) -> TransactionFlowBuilder<Self> {
|
||||
TransactionFlowBuilder {
|
||||
builder: self,
|
||||
rate: NonZeroU64::MIN,
|
||||
users: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn transactions_with(
|
||||
self,
|
||||
f: impl FnOnce(TransactionFlowBuilder<Self>) -> TransactionFlowBuilder<Self>,
|
||||
) -> Self {
|
||||
f(self.transactions()).apply()
|
||||
}
|
||||
|
||||
fn expect_consensus_liveness(self) -> Self {
|
||||
self.with_expectation(lb_framework::workloads::ConsensusLiveness::<LbcExtEnv>::default())
|
||||
}
|
||||
|
||||
fn initialize_wallet(self, total_funds: u64, users: usize) -> Self {
|
||||
let Some(user_count) = NonZeroUsize::new(users) else {
|
||||
warn!(
|
||||
users,
|
||||
"wallet user count must be non-zero; ignoring initialize_wallet"
|
||||
);
|
||||
return self;
|
||||
};
|
||||
|
||||
match WalletConfig::uniform(total_funds, user_count) {
|
||||
Ok(wallet) => self.with_wallet_config(wallet),
|
||||
Err(error) => {
|
||||
warn!(
|
||||
users,
|
||||
total_funds,
|
||||
error = %error,
|
||||
"invalid initialize_wallet input; ignoring initialize_wallet"
|
||||
);
|
||||
self
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct TransactionFlowBuilder<B> {
|
||||
builder: B,
|
||||
rate: NonZeroU64,
|
||||
users: Option<NonZeroUsize>,
|
||||
}
|
||||
|
||||
impl<B> TransactionFlowBuilder<B>
|
||||
where
|
||||
B: testing_framework_core::scenario::CoreBuilderExt<Env = LbcExtEnv> + Sized,
|
||||
{
|
||||
pub fn rate(mut self, rate: u64) -> Self {
|
||||
match NonZeroU64::new(rate) {
|
||||
Some(rate) => self.rate = rate,
|
||||
None => warn!(
|
||||
rate,
|
||||
"transaction rate must be non-zero; keeping previous rate"
|
||||
),
|
||||
}
|
||||
self
|
||||
}
|
||||
|
||||
pub fn users(mut self, users: usize) -> Self {
|
||||
match NonZeroUsize::new(users) {
|
||||
Some(value) => self.users = Some(value),
|
||||
None => warn!(
|
||||
users,
|
||||
"transaction user count must be non-zero; keeping previous setting"
|
||||
),
|
||||
}
|
||||
self
|
||||
}
|
||||
|
||||
pub fn apply(self) -> B {
|
||||
let workload = lb_framework::workloads::transaction::Workload::<LbcExtEnv>::new(self.rate)
|
||||
.with_user_limit(self.users);
|
||||
self.builder.with_workload(workload)
|
||||
}
|
||||
}
|
||||
|
||||
struct WalletConfigProvider {
|
||||
inner: Box<dyn DeploymentProvider<DeploymentPlan>>,
|
||||
wallet: WalletConfig,
|
||||
}
|
||||
|
||||
impl DeploymentProvider<DeploymentPlan> for WalletConfigProvider {
|
||||
fn build(&self, seed: Option<&DeploymentSeed>) -> Result<DeploymentPlan, DynTopologyError> {
|
||||
let mut deployment = self.inner.build(seed)?;
|
||||
apply_wallet_config_to_deployment(&mut deployment, &self.wallet);
|
||||
Ok(deployment)
|
||||
}
|
||||
}
|
||||
|
||||
fn with_wallets_or_warn<B>(builder: B, users: usize, apply: impl FnOnce(B, WalletConfig) -> B) -> B
|
||||
where
|
||||
B: CoreBuilderExt,
|
||||
{
|
||||
match wallet_config_for_users(users) {
|
||||
Ok(wallet) => apply(builder, wallet),
|
||||
Err(error) => {
|
||||
warn!(users, error = %error, "invalid wallets input; ignoring wallets");
|
||||
builder
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1,19 +0,0 @@
|
||||
[package]
|
||||
description = { workspace = true }
|
||||
edition = { workspace = true }
|
||||
license = { workspace = true }
|
||||
name = "lb-workloads"
|
||||
version = { workspace = true }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
# Workspace crates
|
||||
lb-ext = { workspace = true }
|
||||
lb-framework = { workspace = true }
|
||||
testing-framework-core = { workspace = true }
|
||||
|
||||
# External
|
||||
thiserror = { workspace = true }
|
||||
tokio = { features = ["time"], workspace = true }
|
||||
@ -1,9 +0,0 @@
|
||||
pub mod workflows;
|
||||
pub mod workloads;
|
||||
|
||||
pub use lb_ext::LbcExtEnv as LbcEnv;
|
||||
pub use lb_framework::workloads::{ConsensusLiveness, transaction::TxInclusionExpectation};
|
||||
pub use testing_framework_core::{scenario::BuilderInputError, workloads::RandomRestartWorkload};
|
||||
pub use workflows::{
|
||||
ChaosBuilderExt, ScenarioBuilderExt, start_node_with_timeout, wait_for_min_height,
|
||||
};
|
||||
@ -1,5 +0,0 @@
|
||||
pub mod manual;
|
||||
|
||||
pub use lb_framework::ScenarioBuilderExt;
|
||||
pub use manual::{start_node_with_timeout, wait_for_min_height};
|
||||
pub use testing_framework_core::workloads::ChaosBuilderExt;
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user