commit 7f13a12a3d142493af802fb533e125b417c0b11c Author: andrussal Date: Tue Nov 25 09:13:17 2025 +0100 Initial import of Nomos testing framework diff --git a/.cargo-deny.toml b/.cargo-deny.toml new file mode 100644 index 0000000..73163b4 --- /dev/null +++ b/.cargo-deny.toml @@ -0,0 +1,50 @@ +# Config file reference can be found at https://embarkstudios.github.io/cargo-deny/checks/cfg.html. + +[graph] +all-features = true +exclude-dev = true +no-default-features = true + +[advisories] +ignore = [ + # Keep local ignores in sync with nomos-node if needed. Unused entries removed. +] +yanked = "deny" + +[bans] +allow-wildcard-paths = false +multiple-versions = "allow" + +[licenses] +allow = [ + "Apache-2.0 WITH LLVM-exception", + "Apache-2.0", + "BSD-2-Clause", + "BSD-3-Clause", + "BSL-1.0", + "BlueOak-1.0.0", + "CC0-1.0", + "CDDL-1.0", + "CDLA-Permissive-2.0", + "ISC", + "MIT", + "MPL-2.0", + "NCSA", + "Unicode-3.0", + "Zlib", +] +private = { ignore = false } +unused-allowed-license = "deny" + +[[licenses.clarify]] +expression = "MIT AND ISC" +license-files = [{ hash = 0xbd0eed23, path = "LICENSE" }] +name = "ring" + +[sources] +allow-git = ["https://github.com/EspressoSystems/jellyfish.git"] +unknown-git = "deny" +unknown-registry = "deny" + +[sources.allow-org] +github = ["logos-co"] diff --git a/.cargo/config.toml b/.cargo/config.toml new file mode 100644 index 0000000..4c21fff --- /dev/null +++ b/.cargo/config.toml @@ -0,0 +1,4 @@ +[target.'cfg(target_os = "macos")'] +# when using osx, we need to link against some golang libraries, it did just work with this missing flags +# from: https://github.com/golang/go/issues/42459 +rustflags = ["-C", "link-args=-framework CoreFoundation -framework Security -framework CoreServices -lresolv"] diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..882bc58 --- /dev/null +++ b/.gitignore @@ -0,0 +1,10 @@ +/target +**/target +.tmp/ +# IDE / OS cruft +.idea/ +.DS_Store + +# Local test artifacts (kept when NOMOS_TESTS_KEEP_LOGS=1) +tests/workflows/.tmp* +tests/workflows/.tmp*/ diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..f1c365f --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,40 @@ +repos: + - repo: https://github.com/doublify/pre-commit-rust + rev: v1.0 + hooks: + - id: fmt + # We're running `fmt` with `--all` and `pass_filenames: false` to format the entire workspace at once. + # Otherwise, `pre-commit` passes staged files one by one, which can lead to inconsistent results + # due to, presumably, the lack of full workspace context. + entry: cargo +nightly-2025-09-14 fmt + pass_filenames: false + - id: clippy + args: ["--all", "--all-targets", "--all-features", "--", "-D", "warnings"] + - repo: https://github.com/EmbarkStudios/cargo-deny + rev: 0.18.2 + hooks: + - id: cargo-deny + args: + - check + - --hide-inclusion-graph + - -c + - .cargo-deny.toml + - --show-stats + - -D + - warnings + - repo: https://github.com/ComPWA/taplo-pre-commit + rev: v0.9.3 + hooks: + - id: taplo-format + - id: taplo-lint + - repo: https://github.com/bnjbvr/cargo-machete + rev: ba1bcd4 # No tag yet with .pre-commit-hooks.yml + hooks: + - id: cargo-machete + - repo: local + hooks: + - id: cargo-hack-check + language: script + name: cargo hack check + entry: ./hooks/cargo-hack.sh + stages: [manual] diff --git a/.taplo.toml b/.taplo.toml new file mode 100644 index 0000000..5bd77ee --- /dev/null +++ b/.taplo.toml @@ -0,0 +1,10 @@ +exclude = ["target/**"] + +[formatting] +align_entries = true +allowed_blank_lines = 1 +column_width = 120 +keys = ["build-dependencies", "dependencies", "dev-dependencies"] +reorder_arrays = true +reorder_inline_tables = true +reorder_keys = true diff --git a/Cargo.lock b/Cargo.lock new file mode 100644 index 0000000..ed452f1 --- /dev/null +++ b/Cargo.lock @@ -0,0 +1,8924 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "addr2line" +version = "0.25.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b5d307320b3181d6d7954e663bd7c774a838b8220fe0593c86d9fb09f498b4b" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" + +[[package]] +name = "ahash" +version = "0.8.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75" +dependencies = [ + "cfg-if", + "getrandom 0.3.4", + "once_cell", + "version_check", + "zerocopy", +] + +[[package]] +name = "aho-corasick" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301" +dependencies = [ + "memchr", +] + +[[package]] +name = "aligned-vec" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc890384c8602f339876ded803c97ad529f3842aba97f6392b3dba0dd171769b" +dependencies = [ + "equator", +] + +[[package]] +name = "allocator-api2" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" + +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + +[[package]] +name = "anes" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" + +[[package]] +name = "anstyle" +version = "1.0.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5192cca8006f1fd4f7237516f40fa183bb07f8fbdfedaa0036de5ea9b0b45e78" + +[[package]] +name = "anyhow" +version = "1.0.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61" + +[[package]] +name = "arbitrary" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3d036a3c4ab069c7b410a2ce876bd74808d2d0888a82667669f8e783a898bf1" +dependencies = [ + "derive_arbitrary", +] + +[[package]] +name = "arc-swap" +version = "1.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" + +[[package]] +name = "archery" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70e0a5f99dfebb87bb342d0f53bb92c81842e100bbb915223e38349580e5441d" +dependencies = [ + "triomphe", +] + +[[package]] +name = "ark-bls12-381" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c775f0d12169cba7aae4caeb547bb6a50781c7449a8aa53793827c9ec4abf488" +dependencies = [ + "ark-ec 0.4.2", + "ark-ff 0.4.2", + "ark-serialize 0.4.2", + "ark-std 0.4.0", +] + +[[package]] +name = "ark-bn254" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a22f4561524cd949590d78d7d4c5df8f592430d221f7f3c9497bbafd8972120f" +dependencies = [ + "ark-ec 0.4.2", + "ark-ff 0.4.2", + "ark-std 0.4.0", +] + +[[package]] +name = "ark-bn254" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d69eab57e8d2663efa5c63135b2af4f396d66424f88954c21104125ab6b3e6bc" +dependencies = [ + "ark-ec 0.5.0", + "ark-ff 0.5.0", + "ark-std 0.5.0", +] + +[[package]] +name = "ark-crypto-primitives" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3a13b34da09176a8baba701233fdffbaa7c1b1192ce031a3da4e55ce1f1a56" +dependencies = [ + "ark-ec 0.4.2", + "ark-ff 0.4.2", + "ark-relations", + "ark-serialize 0.4.2", + "ark-snark", + "ark-std 0.4.0", + "blake2", + "derivative", + "digest", + "sha2", +] + +[[package]] +name = "ark-ec" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "defd9a439d56ac24968cca0571f598a61bc8c55f71d50a89cda591cb750670ba" +dependencies = [ + "ark-ff 0.4.2", + "ark-poly 0.4.2", + "ark-serialize 0.4.2", + "ark-std 0.4.0", + "derivative", + "hashbrown 0.13.2", + "itertools 0.10.5", + "num-traits", + "zeroize", +] + +[[package]] +name = "ark-ec" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43d68f2d516162846c1238e755a7c4d131b892b70cc70c471a8e3ca3ed818fce" +dependencies = [ + "ahash", + "ark-ff 0.5.0", + "ark-poly 0.5.0", + "ark-serialize 0.5.0", + "ark-std 0.5.0", + "educe", + "fnv", + "hashbrown 0.15.5", + "itertools 0.13.0", + "num-bigint", + "num-integer", + "num-traits", + "zeroize", +] + +[[package]] +name = "ark-ff" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec847af850f44ad29048935519032c33da8aa03340876d351dfab5660d2966ba" +dependencies = [ + "ark-ff-asm 0.4.2", + "ark-ff-macros 0.4.2", + "ark-serialize 0.4.2", + "ark-std 0.4.0", + "derivative", + "digest", + "itertools 0.10.5", + "num-bigint", + "num-traits", + "paste", + "rustc_version", + "zeroize", +] + +[[package]] +name = "ark-ff" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a177aba0ed1e0fbb62aa9f6d0502e9b46dad8c2eab04c14258a1212d2557ea70" +dependencies = [ + "ark-ff-asm 0.5.0", + "ark-ff-macros 0.5.0", + "ark-serialize 0.5.0", + "ark-std 0.5.0", + "arrayvec", + "digest", + "educe", + "itertools 0.13.0", + "num-bigint", + "num-traits", + "paste", + "zeroize", +] + +[[package]] +name = "ark-ff-asm" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ed4aa4fe255d0bc6d79373f7e31d2ea147bcf486cba1be5ba7ea85abdb92348" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-asm" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62945a2f7e6de02a31fe400aa489f0e0f5b2502e69f95f853adb82a96c7a6b60" +dependencies = [ + "quote", + "syn 2.0.111", +] + +[[package]] +name = "ark-ff-macros" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" +dependencies = [ + "num-bigint", + "num-traits", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-macros" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09be120733ee33f7693ceaa202ca41accd5653b779563608f1234f78ae07c4b3" +dependencies = [ + "num-bigint", + "num-traits", + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "ark-groth16" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20ceafa83848c3e390f1cbf124bc3193b3e639b3f02009e0e290809a501b95fc" +dependencies = [ + "ark-crypto-primitives", + "ark-ec 0.4.2", + "ark-ff 0.4.2", + "ark-poly 0.4.2", + "ark-relations", + "ark-serialize 0.4.2", + "ark-std 0.4.0", +] + +[[package]] +name = "ark-poly" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d320bfc44ee185d899ccbadfa8bc31aab923ce1558716e1997a1e74057fe86bf" +dependencies = [ + "ark-ff 0.4.2", + "ark-serialize 0.4.2", + "ark-std 0.4.0", + "derivative", + "hashbrown 0.13.2", +] + +[[package]] +name = "ark-poly" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "579305839da207f02b89cd1679e50e67b4331e2f9294a57693e5051b7703fe27" +dependencies = [ + "ahash", + "ark-ff 0.5.0", + "ark-serialize 0.5.0", + "ark-std 0.5.0", + "educe", + "fnv", + "hashbrown 0.15.5", +] + +[[package]] +name = "ark-poly-commit" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a741492629ffcd228337676dc223a28551aa6792eedb8a2a22c767f00df6c89" +dependencies = [ + "ark-crypto-primitives", + "ark-ec 0.4.2", + "ark-ff 0.4.2", + "ark-poly 0.4.2", + "ark-relations", + "ark-serialize 0.4.2", + "ark-std 0.4.0", + "derivative", + "digest", +] + +[[package]] +name = "ark-relations" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00796b6efc05a3f48225e59cb6a2cda78881e7c390872d5786aaf112f31fb4f0" +dependencies = [ + "ark-ff 0.4.2", + "ark-std 0.4.0", + "tracing", + "tracing-subscriber 0.2.25", +] + +[[package]] +name = "ark-serialize" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adb7b85a02b83d2f22f89bd5cac66c9c89474240cb6207cb1efc16d098e822a5" +dependencies = [ + "ark-serialize-derive 0.4.2", + "ark-std 0.4.0", + "digest", + "num-bigint", +] + +[[package]] +name = "ark-serialize" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f4d068aaf107ebcd7dfb52bc748f8030e0fc930ac8e360146ca54c1203088f7" +dependencies = [ + "ark-serialize-derive 0.5.0", + "ark-std 0.5.0", + "arrayvec", + "digest", + "num-bigint", +] + +[[package]] +name = "ark-serialize-derive" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae3281bc6d0fd7e549af32b52511e1302185bd688fd3359fa36423346ff682ea" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-serialize-derive" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "213888f660fddcca0d257e88e54ac05bca01885f258ccdf695bafd77031bb69d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "ark-snark" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84d3cc6833a335bb8a600241889ead68ee89a3cf8448081fb7694c0fe503da63" +dependencies = [ + "ark-ff 0.4.2", + "ark-relations", + "ark-serialize 0.4.2", + "ark-std 0.4.0", +] + +[[package]] +name = "ark-std" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" +dependencies = [ + "num-traits", + "rand 0.8.5", +] + +[[package]] +name = "ark-std" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "246a225cc6131e9ee4f24619af0f19d67761fff15d7ccc22e42b80846e69449a" +dependencies = [ + "num-traits", + "rand 0.8.5", +] + +[[package]] +name = "arrayref" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" + +[[package]] +name = "arrayvec" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" + +[[package]] +name = "asn1-rs" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56624a96882bb8c26d61312ae18cb45868e5a9992ea73c58e45c3101e56a1e60" +dependencies = [ + "asn1-rs-derive", + "asn1-rs-impl", + "displaydoc", + "nom 7.1.3", + "num-traits", + "rusticata-macros", + "thiserror 2.0.17", + "time", +] + +[[package]] +name = "asn1-rs-derive" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3109e49b1e4909e9db6515a30c633684d68cdeaa252f215214cb4fa1a5bfee2c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", + "synstructure", +] + +[[package]] +name = "asn1-rs-impl" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "asn1_der" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "155a5a185e42c6b77ac7b88a15143d930a9e9727a5b7b77eed417404ab15c247" + +[[package]] +name = "async-channel" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "924ed96dd52d1b75e9c1a3e6275715fd320f5f9439fb5a4a11fa51f4221158d2" +dependencies = [ + "concurrent-queue", + "event-listener-strategy", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-ctrlc" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "907279f6e91a51c8ec7cac24711e8308f21da7c10c7700ca2f7e125694ed2df1" +dependencies = [ + "ctrlc", +] + +[[package]] +name = "async-io" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "456b8a8feb6f42d237746d4b3e9a178494627745c3c56c6ea55d92ba50d026fc" +dependencies = [ + "autocfg", + "cfg-if", + "concurrent-queue", + "futures-io", + "futures-lite", + "parking", + "polling", + "rustix 1.1.2", + "slab", + "windows-sys 0.61.2", +] + +[[package]] +name = "async-recursion" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "async-stream" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" +dependencies = [ + "async-stream-impl", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "async-trait" +version = "0.1.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "asynchronous-codec" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a860072022177f903e59730004fb5dc13db9275b79bb2aef7ba8ce831956c233" +dependencies = [ + "bytes", + "futures-sink", + "futures-util", + "memchr", + "pin-project-lite", +] + +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + +[[package]] +name = "attohttpc" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d9a9bf8b79a749ee0b911b91b671cc2b6c670bdbc7e3dfd537576ddc94bb2a2" +dependencies = [ + "http 0.2.12", + "log", + "url", +] + +[[package]] +name = "attohttpc" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16e2cdb6d5ed835199484bb92bb8b3edd526effe995c61732580439c1a67e2e9" +dependencies = [ + "base64 0.22.1", + "http 1.4.0", + "log", + "url", +] + +[[package]] +name = "autocfg" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" + +[[package]] +name = "axum" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edca88bc138befd0323b20752846e6587272d3b03b0343c8ea28a6f819e6e71f" +dependencies = [ + "async-trait", + "axum-core", + "bytes", + "futures-util", + "http 1.4.0", + "http-body 1.0.1", + "http-body-util", + "hyper 1.8.1", + "hyper-util", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "serde_json", + "serde_path_to_error", + "serde_urlencoded", + "sync_wrapper", + "tokio", + "tower 0.5.2", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "axum-core" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http 1.4.0", + "http-body 1.0.1", + "http-body-util", + "mime", + "pin-project-lite", + "rustversion", + "sync_wrapper", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "az" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b7e4c2464d97fe331d41de9d5db0def0a96f4d823b8b32a2efd503578988973" + +[[package]] +name = "backoff" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b62ddb9cb1ec0a098ad4bbf9344d0713fa193ae1a80af55febcff2627b6a00c1" +dependencies = [ + "getrandom 0.2.16", + "instant", + "rand 0.8.5", +] + +[[package]] +name = "backon" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cffb0e931875b666fc4fcb20fee52e9bbd1ef836fd9e9e04ec21555f9f85f7ef" +dependencies = [ + "fastrand", + "gloo-timers", + "tokio", +] + +[[package]] +name = "backtrace" +version = "0.3.76" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb531853791a215d7c62a30daf0dde835f381ab5de4589cfe7c649d2cbe92bd6" +dependencies = [ + "addr2line", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", + "windows-link", +] + +[[package]] +name = "base-x" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cbbc9d0964165b47557570cce6c952866c2678457aca742aafc9fb771d30270" + +[[package]] +name = "base16ct" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" + +[[package]] +name = "base256emoji" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5e9430d9a245a77c92176e649af6e275f20839a48389859d1661e9a128d077c" +dependencies = [ + "const-str", + "match-lookup", +] + +[[package]] +name = "base64" +version = "0.21.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" + +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + +[[package]] +name = "base64ct" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55248b47b0caf0546f7988906588779981c43bb1bc9d0c44087278f80cdb44ba" + +[[package]] +name = "bincode" +version = "1.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" +dependencies = [ + "serde", +] + +[[package]] +name = "bindgen" +version = "0.72.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "993776b509cfb49c750f11b8f07a46fa23e0a1386ffc01fb1e7d343efc387895" +dependencies = [ + "bitflags 2.10.0", + "cexpr", + "clang-sys", + "itertools 0.13.0", + "proc-macro2", + "quote", + "regex", + "rustc-hash", + "shlex", + "syn 2.0.111", +] + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "bitflags" +version = "2.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3" + +[[package]] +name = "bitvec" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" +dependencies = [ + "funty", + "radium", + "tap", + "wyz", +] + +[[package]] +name = "blake2" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" +dependencies = [ + "digest", +] + +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array 0.14.7", +] + +[[package]] +name = "block2" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdeb9d870516001442e364c5220d3574d2da8dc765554b4a617230d33fa58ef5" +dependencies = [ + "objc2", +] + +[[package]] +name = "blst" +version = "0.3.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcdb4c7013139a150f9fc55d123186dbfaba0d912817466282c73ac49e71fb45" +dependencies = [ + "cc", + "glob", + "threadpool", + "zeroize", +] + +[[package]] +name = "broadcast-service" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "async-trait", + "derivative", + "futures", + "nomos-core", + "overwatch", + "serde", + "tokio", + "tokio-stream", + "tracing", +] + +[[package]] +name = "bs58" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf88ba1141d185c399bee5288d850d63b8369520c1eafc32a0430b5b6c287bf4" +dependencies = [ + "tinyvec", +] + +[[package]] +name = "bstr" +version = "1.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "63044e1ae8e69f3b5a92c736ca6269b8d12fa7efe39bf34ddb06d102cf0e2cab" +dependencies = [ + "memchr", + "serde", +] + +[[package]] +name = "bumpalo" +version = "3.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" + +[[package]] +name = "bytemuck" +version = "1.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fbdf580320f38b612e485521afda1ee26d10cc9884efaaa750d383e13e3c5f4" + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + +[[package]] +name = "bytes" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b35204fbdc0b3f4446b89fc1ac2cf84a8a68971995d0bf2e925ec7cd960f9cb3" +dependencies = [ + "serde", +] + +[[package]] +name = "bzip2-sys" +version = "0.1.13+1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "225bff33b2141874fe80d71e07d6eec4f85c5c216453dd96388240f96e1acc14" +dependencies = [ + "cc", + "pkg-config", +] + +[[package]] +name = "cached" +version = "0.55.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0839c297f8783316fcca9d90344424e968395413f0662a5481f79c6648bbc14" +dependencies = [ + "hashbrown 0.14.5", + "once_cell", + "thiserror 2.0.17", + "web-time", +] + +[[package]] +name = "cast" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" + +[[package]] +name = "cc" +version = "1.2.47" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd405d82c84ff7f35739f175f67d8b9fb7687a0e84ccdc78bd3568839827cf07" +dependencies = [ + "find-msvc-tools", + "jobserver", + "libc", + "shlex", +] + +[[package]] +name = "cexpr" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" +dependencies = [ + "nom 7.1.3", +] + +[[package]] +name = "cfg-if" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" + +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + +[[package]] +name = "cfg_eval" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45565fc9416b9896014f5732ac776f810ee53a66730c17e4020c3ec064a8f88f" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "cfgsync" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "axum", + "clap", + "nomos-core", + "nomos-da-network-core", + "nomos-executor", + "nomos-libp2p", + "nomos-node", + "nomos-tracing-service", + "nomos-utils", + "rand 0.8.5", + "reqwest", + "serde", + "serde_json", + "serde_path_to_error", + "serde_with", + "serde_yaml", + "tests", + "tokio", + "tracing", +] + +[[package]] +name = "chain-common" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "nomos-core", + "serde", +] + +[[package]] +name = "chain-leader" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "async-trait", + "chain-common", + "chain-service", + "cryptarchia-engine", + "ed25519-dalek", + "futures", + "nomos-blend-service", + "nomos-core", + "nomos-da-sampling", + "nomos-ledger", + "nomos-time", + "nomos-wallet", + "overwatch", + "serde", + "services-utils", + "thiserror 1.0.69", + "tokio", + "tracing", + "tracing-futures", + "tx-service", + "zksign", +] + +[[package]] +name = "chain-network" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "async-trait", + "chain-common", + "chain-service", + "cryptarchia-engine", + "cryptarchia-sync", + "futures", + "nomos-core", + "nomos-da-sampling", + "nomos-ledger", + "nomos-network", + "nomos-time", + "overwatch", + "rand 0.8.5", + "serde", + "serde_with", + "services-utils", + "thiserror 1.0.69", + "tokio", + "tokio-stream", + "tracing", + "tracing-futures", + "tx-service", +] + +[[package]] +name = "chain-service" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "async-trait", + "broadcast-service", + "bytes", + "cryptarchia-engine", + "cryptarchia-sync", + "ed25519-dalek", + "futures", + "groth16", + "nomos-core", + "nomos-ledger", + "nomos-network", + "nomos-storage", + "nomos-utils", + "num-bigint", + "overwatch", + "serde", + "serde_with", + "services-utils", + "strum", + "thiserror 1.0.69", + "tokio", + "tracing", + "tracing-futures", +] + +[[package]] +name = "chrono" +version = "0.4.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "145052bdd345b87320e369255277e3fb5152762ad123a901ef5c262dd38fe8d2" +dependencies = [ + "iana-time-zone", + "num-traits", + "serde", + "windows-link", +] + +[[package]] +name = "chrono-tz" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93698b29de5e97ad0ae26447b344c482a7284c737d9ddc5f9e52b74a336671bb" +dependencies = [ + "chrono", + "chrono-tz-build", + "phf", +] + +[[package]] +name = "chrono-tz-build" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c088aee841df9c3041febbb73934cfc39708749bf96dc827e3359cd39ef11b1" +dependencies = [ + "parse-zoneinfo", + "phf", + "phf_codegen", +] + +[[package]] +name = "ciborium" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" +dependencies = [ + "ciborium-io", + "ciborium-ll", + "serde", +] + +[[package]] +name = "ciborium-io" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" + +[[package]] +name = "ciborium-ll" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" +dependencies = [ + "ciborium-io", + "half", +] + +[[package]] +name = "cipher" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" +dependencies = [ + "crypto-common", + "inout", +] + +[[package]] +name = "circuits-prover" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "circuits-utils", + "tempfile", +] + +[[package]] +name = "circuits-utils" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "dirs", +] + +[[package]] +name = "clang-sys" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" +dependencies = [ + "glob", + "libc", + "libloading", +] + +[[package]] +name = "clap" +version = "4.5.53" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9e340e012a1bf4935f5282ed1436d1489548e8f72308207ea5df0e23d2d03f8" +dependencies = [ + "clap_builder", + "clap_derive", +] + +[[package]] +name = "clap_builder" +version = "4.5.53" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d76b5d13eaa18c901fd2f7fca939fefe3a0727a953561fefdf3b2922b8569d00" +dependencies = [ + "anstyle", + "clap_lex", +] + +[[package]] +name = "clap_derive" +version = "4.5.49" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a0b5487afeab2deb2ff4e03a807ad1a03ac532ff5a2cee5d86884440c7f7671" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "clap_lex" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d" + +[[package]] +name = "color-eyre" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5920befb47832a6d61ee3a3a846565cfa39b331331e68a3b1d1116630f2f26d" +dependencies = [ + "backtrace", + "eyre", + "indenter", + "once_cell", + "owo-colors", +] + +[[package]] +name = "common-http-client" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "broadcast-service", + "futures", + "nomos-core", + "nomos-da-messages", + "nomos-http-api-common", + "reqwest", + "serde", + "serde_json", + "thiserror 1.0.69", + "url", +] + +[[package]] +name = "concurrent-queue" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "const-hex" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3bb320cac8a0750d7f25280aa97b09c26edfe161164238ecbbb31092b079e735" +dependencies = [ + "cfg-if", + "cpufeatures", + "proptest", + "serde_core", +] + +[[package]] +name = "const-oid" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" + +[[package]] +name = "const-str" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f421161cb492475f1661ddc9815a745a1c894592070661180fdec3d4872e9c3" + +[[package]] +name = "convert_case" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baaaa0ecca5b51987b9423ccdc971514dd8b0bb7b4060b983d3664dad3f1f89f" +dependencies = [ + "unicode-segmentation", +] + +[[package]] +name = "core-foundation" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + +[[package]] +name = "core2" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b49ba7ef1ad6107f8824dbe97de947cbaac53c44e7f9756a1fba0d37c1eec505" +dependencies = [ + "memchr", +] + +[[package]] +name = "counter" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f009fcafa949dc1fc46a762dae84d0c2687d3b550906b633c4979d58d2c6ae52" +dependencies = [ + "num-traits", +] + +[[package]] +name = "cpufeatures" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" +dependencies = [ + "libc", +] + +[[package]] +name = "crc32fast" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "criterion" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f" +dependencies = [ + "anes", + "cast", + "ciborium", + "clap", + "criterion-plot", + "is-terminal", + "itertools 0.10.5", + "num-traits", + "once_cell", + "oorandom", + "plotters", + "rayon", + "regex", + "serde", + "serde_derive", + "serde_json", + "tinytemplate", + "walkdir", +] + +[[package]] +name = "criterion-plot" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" +dependencies = [ + "cast", + "itertools 0.10.5", +] + +[[package]] +name = "crossbeam-channel" +version = "0.5.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-deque" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" +dependencies = [ + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" + +[[package]] +name = "crunchy" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" + +[[package]] +name = "cryptarchia-engine" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "cfg_eval", + "nomos-utils", + "serde", + "serde_with", + "thiserror 1.0.69", + "time", + "tokio", + "tracing", +] + +[[package]] +name = "cryptarchia-sync" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "bytes", + "cryptarchia-engine", + "futures", + "libp2p", + "libp2p-stream", + "nomos-core", + "rand 0.8.5", + "serde", + "serde_with", + "thiserror 1.0.69", + "tokio", + "tracing", +] + +[[package]] +name = "crypto-bigint" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" +dependencies = [ + "generic-array 0.14.7", + "rand_core 0.6.4", + "subtle", + "zeroize", +] + +[[package]] +name = "crypto-common" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78c8292055d1c1df0cce5d180393dc8cce0abec0a7102adb6c7b1eef6016d60a" +dependencies = [ + "generic-array 0.14.7", + "typenum", +] + +[[package]] +name = "ctrlc" +version = "3.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73736a89c4aff73035ba2ed2e565061954da00d4970fc9ac25dcc85a2a20d790" +dependencies = [ + "dispatch2", + "nix 0.30.1", + "windows-sys 0.61.2", +] + +[[package]] +name = "curve25519-dalek" +version = "4.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" +dependencies = [ + "cfg-if", + "cpufeatures", + "curve25519-dalek-derive", + "digest", + "fiat-crypto", + "rustc_version", + "serde", + "subtle", + "zeroize", +] + +[[package]] +name = "curve25519-dalek-derive" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "darling" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cdf337090841a411e2a7f3deb9187445851f91b309c0c0a29e05f74a00a48c0" +dependencies = [ + "darling_core", + "darling_macro", +] + +[[package]] +name = "darling_core" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1247195ecd7e3c85f83c8d2a366e4210d588e802133e1e355180a9870b517ea4" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim", + "syn 2.0.111", +] + +[[package]] +name = "darling_macro" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" +dependencies = [ + "darling_core", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "dashmap" +version = "5.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" +dependencies = [ + "cfg-if", + "hashbrown 0.14.5", + "lock_api", + "once_cell", + "parking_lot_core", +] + +[[package]] +name = "data-encoding" +version = "2.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a2330da5de22e8a3cb63252ce2abb30116bf5265e89c0e01bc17015ce30a476" + +[[package]] +name = "data-encoding-macro" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47ce6c96ea0102f01122a185683611bd5ac8d99e62bc59dd12e6bda344ee673d" +dependencies = [ + "data-encoding", + "data-encoding-macro-internal", +] + +[[package]] +name = "data-encoding-macro-internal" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d162beedaa69905488a8da94f5ac3edb4dd4788b732fadb7bd120b2625c1976" +dependencies = [ + "data-encoding", + "syn 2.0.111", +] + +[[package]] +name = "debugid" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef552e6f588e446098f6ba40d89ac146c8c7b64aade83c051ee00bb5d2bc18d" +dependencies = [ + "uuid", +] + +[[package]] +name = "default-net" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4898b43aed56499fad6b294d15b3e76a51df68079bf492e5daae38ca084e003" +dependencies = [ + "dlopen2 0.4.1", + "libc", + "memalloc", + "netlink-packet-core 0.5.0", + "netlink-packet-route 0.15.0", + "netlink-sys", + "once_cell", + "system-configuration 0.5.1", + "windows 0.32.0", +] + +[[package]] +name = "der" +version = "0.7.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7c1832837b905bbfb5101e07cc24c8deddf52f93225eee6ead5f4d63d53ddcb" +dependencies = [ + "const-oid", + "zeroize", +] + +[[package]] +name = "der-parser" +version = "10.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07da5016415d5a3c4dd39b11ed26f915f52fc4e0dc197d87908bc916e51bc1a6" +dependencies = [ + "asn1-rs", + "displaydoc", + "nom 7.1.3", + "num-bigint", + "num-traits", + "rusticata-macros", +] + +[[package]] +name = "deranged" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ececcb659e7ba858fb4f10388c250a7252eb0a27373f1a72b8748afdd248e587" +dependencies = [ + "powerfmt", + "serde_core", +] + +[[package]] +name = "derivative" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "derive_arbitrary" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e567bd82dcff979e4b03460c307b3cdc9e96fde3d73bed1496d2bc75d9dd62a" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "deunicode" +version = "1.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "abd57806937c9cc163efc8ea3910e00a62e2aeb0b8119f1793a978088f8f6b04" + +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer", + "const-oid", + "crypto-common", + "subtle", +] + +[[package]] +name = "dirs" +version = "6.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3e8aa94d75141228480295a7d0e7feb620b1a5ad9f12bc40be62411e38cce4e" +dependencies = [ + "dirs-sys", +] + +[[package]] +name = "dirs-sys" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e01a3366d27ee9890022452ee61b2b63a67e6f13f58900b651ff5665f0bb1fab" +dependencies = [ + "libc", + "option-ext", + "redox_users", + "windows-sys 0.61.2", +] + +[[package]] +name = "dispatch2" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89a09f22a6c6069a18470eb92d2298acf25463f14256d24778e1230d789a2aec" +dependencies = [ + "bitflags 2.10.0", + "block2", + "libc", + "objc2", +] + +[[package]] +name = "displaydoc" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "dlopen2" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b121caccfc363e4d9a4589528f3bef7c71b83c6ed01c8dc68cbeeb7fd29ec698" +dependencies = [ + "dlopen2_derive", + "libc", + "once_cell", + "winapi", +] + +[[package]] +name = "dlopen2" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09b4f5f101177ff01b8ec4ecc81eead416a8aa42819a2869311b3420fa114ffa" +dependencies = [ + "libc", + "once_cell", + "winapi", +] + +[[package]] +name = "dlopen2_derive" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a09ac8bb8c16a282264c379dffba707b9c998afc7506009137f3c6136888078" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "dtoa" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6add3b8cff394282be81f3fc1a0605db594ed69890078ca6e2cab1c408bcf04" + +[[package]] +name = "ecdsa" +version = "0.16.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" +dependencies = [ + "der", + "digest", + "elliptic-curve", + "rfc6979", + "signature", + "spki", +] + +[[package]] +name = "ed25519" +version = "2.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" +dependencies = [ + "pkcs8", + "serde", + "signature", +] + +[[package]] +name = "ed25519-dalek" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70e796c081cee67dc755e1a36a0a172b897fab85fc3f6bc48307991f64e4eca9" +dependencies = [ + "curve25519-dalek", + "ed25519", + "rand_core 0.6.4", + "serde", + "sha2", + "subtle", + "zeroize", +] + +[[package]] +name = "educe" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d7bc049e1bd8cdeb31b68bbd586a9464ecf9f3944af3958a7a9d0f8b9799417" +dependencies = [ + "enum-ordinalize", + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "either" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" + +[[package]] +name = "elliptic-curve" +version = "0.13.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" +dependencies = [ + "base16ct", + "crypto-bigint", + "digest", + "ff", + "generic-array 0.14.7", + "group", + "pkcs8", + "rand_core 0.6.4", + "sec1", + "subtle", + "zeroize", +] + +[[package]] +name = "enum-as-inner" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1e6a265c649f3f5979b601d26f1d05ada116434c87741c9493cb56218f76cbc" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "enum-ordinalize" +version = "4.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a1091a7bb1f8f2c4b28f1fe2cef4980ca2d410a3d727d67ecc3178c9b0800f0" +dependencies = [ + "enum-ordinalize-derive", +] + +[[package]] +name = "enum-ordinalize-derive" +version = "4.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ca9601fb2d62598ee17836250842873a413586e5d7ed88b356e38ddbb0ec631" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "equator" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4711b213838dfee0117e3be6ac926007d7f433d7bbe33595975d4190cb07e6fc" +dependencies = [ + "equator-macro", +] + +[[package]] +name = "equator-macro" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44f23cf4b44bfce11a86ace86f8a73ffdec849c9fd00a386a53d278bd9e81fb3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + +[[package]] +name = "errno" +version = "0.3.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" +dependencies = [ + "libc", + "windows-sys 0.61.2", +] + +[[package]] +name = "event-listener" +version = "5.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e13b66accf52311f30a0db42147dadea9850cb48cd070028831ae5f5d4b856ab" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite", +] + +[[package]] +name = "event-listener-strategy" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8be9f3dfaaffdae2972880079a491a1a8bb7cbed0b8dd7a347f668b4150a3b93" +dependencies = [ + "event-listener", + "pin-project-lite", +] + +[[package]] +name = "executor-http-client" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "common-http-client", + "futures", + "nomos-core", + "nomos-http-api-common", + "reqwest", + "serde", +] + +[[package]] +name = "eyre" +version = "0.6.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cd915d99f24784cdc19fd37ef22b97e3ff0ae756c7e492e9fbfe897d61e2aec" +dependencies = [ + "indenter", + "once_cell", +] + +[[package]] +name = "fastrand" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" + +[[package]] +name = "ff" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0b50bfb653653f9ca9095b427bed08ab8d75a137839d9ad64eb11810d5b6393" +dependencies = [ + "rand_core 0.6.4", + "subtle", +] + +[[package]] +name = "fiat-crypto" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" + +[[package]] +name = "find-msvc-tools" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a3076410a55c90011c298b04d0cfa770b00fa04e1e3c97d3f6c9de105a03844" + +[[package]] +name = "findshlibs" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "40b9e59cd0f7e0806cca4be089683ecb6434e602038df21fe6bf6711b2f07f64" +dependencies = [ + "cc", + "lazy_static", + "libc", + "winapi", +] + +[[package]] +name = "fixed" +version = "1.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "707070ccf8c4173548210893a0186e29c266901b71ed20cd9e2ca0193dfe95c3" +dependencies = [ + "az", + "bytemuck", + "half", + "serde", + "typenum", +] + +[[package]] +name = "flate2" +version = "1.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfe33edd8e85a12a67454e37f8c75e730830d83e313556ab9ebf9ee7fbeb3bfb" +dependencies = [ + "crc32fast", + "miniz_oxide", +] + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "foldhash" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" + +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + +[[package]] +name = "fork_stream" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc54cf296aa5a82dfffcc911fc7a37b0dcba605725bbb4db486f7b24d7667f9d" +dependencies = [ + "futures", + "pin-project", +] + +[[package]] +name = "form_urlencoded" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "forwarded-header-value" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8835f84f38484cc86f110a805655697908257fb9a7af005234060891557198e9" +dependencies = [ + "nonempty", + "thiserror 1.0.69", +] + +[[package]] +name = "funty" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" + +[[package]] +name = "futures" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-bounded" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91f328e7fb845fc832912fb6a34f40cf6d1888c92f974d1893a54e97b5ff542e" +dependencies = [ + "futures-timer", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" + +[[package]] +name = "futures-executor" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", + "num_cpus", +] + +[[package]] +name = "futures-io" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" + +[[package]] +name = "futures-lite" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f78e10609fe0e0b3f4157ffab1876319b5b0db102a2c60dc4626306dc46b44ad" +dependencies = [ + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "futures-macro" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "futures-rustls" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f2f12607f92c69b12ed746fabf9ca4f5c482cba46679c1a75b874ed7c26adb" +dependencies = [ + "futures-io", + "rustls 0.23.35", + "rustls-pki-types", +] + +[[package]] +name = "futures-sink" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" + +[[package]] +name = "futures-task" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" + +[[package]] +name = "futures-timer" +version = "3.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" + +[[package]] +name = "futures-util" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "pin-utils", + "slab", +] + +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", + "zeroize", +] + +[[package]] +name = "generic-array" +version = "1.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eaf57c49a95fd1fe24b90b3033bee6dc7e8f1288d51494cb44e627c295e38542" +dependencies = [ + "rustversion", + "serde_core", + "typenum", +] + +[[package]] +name = "getrandom" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" +dependencies = [ + "cfg-if", + "js-sys", + "libc", + "wasi", + "wasm-bindgen", +] + +[[package]] +name = "getrandom" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" +dependencies = [ + "cfg-if", + "js-sys", + "libc", + "r-efi", + "wasip2", + "wasm-bindgen", +] + +[[package]] +name = "gimli" +version = "0.32.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e629b9b98ef3dd8afe6ca2bd0f89306cec16d43d907889945bc5d6687f2f13c7" + +[[package]] +name = "glob" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280" + +[[package]] +name = "globset" +version = "0.4.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52dfc19153a48bde0cbd630453615c8151bce3a5adfac7a0aebfbf0a1e1f57e3" +dependencies = [ + "aho-corasick", + "bstr", + "log", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "globwalk" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bf760ebf69878d9fd8f110c89703d90ce35095324d1f1edcb595c63945ee757" +dependencies = [ + "bitflags 2.10.0", + "ignore", + "walkdir", +] + +[[package]] +name = "gloo-timers" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbb143cf96099802033e0d4f4963b19fd2e0b728bcf076cd9cf7f6634f092994" +dependencies = [ + "futures-channel", + "futures-core", + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "governor" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68a7f542ee6b35af73b06abc0dad1c1bae89964e4e253bc4b587b91c9637867b" +dependencies = [ + "cfg-if", + "dashmap", + "futures", + "futures-timer", + "no-std-compat", + "nonzero_ext", + "parking_lot", + "portable-atomic", + "quanta", + "rand 0.8.5", + "smallvec", + "spinning_top", +] + +[[package]] +name = "groth16" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "ark-bn254 0.4.0", + "ark-ec 0.4.2", + "ark-ff 0.4.2", + "ark-groth16", + "ark-serialize 0.4.2", + "generic-array 1.3.5", + "hex", + "num-bigint", + "serde", + "serde_json", + "thiserror 2.0.17", +] + +[[package]] +name = "group" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" +dependencies = [ + "ff", + "rand_core 0.6.4", + "subtle", +] + +[[package]] +name = "h2" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3c0b69cfcb4e1b9f1bf2f53f95f766e4661169728ec61cd3fe5a0166f2d1386" +dependencies = [ + "atomic-waker", + "bytes", + "fnv", + "futures-core", + "futures-sink", + "http 1.4.0", + "indexmap 2.12.1", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "half" +version = "2.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ea2d84b969582b4b1864a92dc5d27cd2b77b622a8d79306834f1be5ba20d84b" +dependencies = [ + "cfg-if", + "crunchy", + "zerocopy", +] + +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" + +[[package]] +name = "hashbrown" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" +dependencies = [ + "ahash", +] + +[[package]] +name = "hashbrown" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" +dependencies = [ + "ahash", + "allocator-api2", +] + +[[package]] +name = "hashbrown" +version = "0.15.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" +dependencies = [ + "allocator-api2", + "equivalent", + "foldhash", +] + +[[package]] +name = "hashbrown" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" + +[[package]] +name = "hashlink" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" +dependencies = [ + "hashbrown 0.14.5", +] + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + +[[package]] +name = "hermit-abi" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + +[[package]] +name = "hex_fmt" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b07f60793ff0a4d9cef0f18e63b5357e06209987153a64648c972c1e5aff336f" + +[[package]] +name = "hickory-proto" +version = "0.25.0-alpha.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d00147af6310f4392a31680db52a3ed45a2e0f68eb18e8c3fe5537ecc96d9e2" +dependencies = [ + "async-recursion", + "async-trait", + "cfg-if", + "data-encoding", + "enum-as-inner", + "futures-channel", + "futures-io", + "futures-util", + "idna", + "ipnet", + "once_cell", + "rand 0.9.2", + "socket2 0.5.10", + "thiserror 2.0.17", + "tinyvec", + "tokio", + "tracing", + "url", +] + +[[package]] +name = "hickory-resolver" +version = "0.25.0-alpha.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5762f69ebdbd4ddb2e975cd24690bf21fe6b2604039189c26acddbc427f12887" +dependencies = [ + "cfg-if", + "futures-util", + "hickory-proto", + "ipconfig", + "moka", + "once_cell", + "parking_lot", + "rand 0.9.2", + "resolv-conf", + "smallvec", + "thiserror 2.0.17", + "tokio", + "tracing", +] + +[[package]] +name = "hkdf" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b5f8eb2ad728638ea2c7d47a21db23b7b58a72ed6a38256b8a1849f15fbbdf7" +dependencies = [ + "hmac", +] + +[[package]] +name = "hmac" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +dependencies = [ + "digest", +] + +[[package]] +name = "home" +version = "0.5.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc627f471c528ff0c4a49e1d5e60450c8f6461dd6d10ba9dcd3a61d3dff7728d" +dependencies = [ + "windows-sys 0.61.2", +] + +[[package]] +name = "hostname" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" +dependencies = [ + "libc", + "match_cfg", + "winapi", +] + +[[package]] +name = "http" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3ba2a386d7f85a81f119ad7498ebe444d2e22c2af0b86b069416ace48b3311a" +dependencies = [ + "bytes", + "itoa", +] + +[[package]] +name = "http-body" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" +dependencies = [ + "bytes", + "http 0.2.12", + "pin-project-lite", +] + +[[package]] +name = "http-body" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" +dependencies = [ + "bytes", + "http 1.4.0", +] + +[[package]] +name = "http-body-util" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" +dependencies = [ + "bytes", + "futures-core", + "http 1.4.0", + "http-body 1.0.1", + "pin-project-lite", +] + +[[package]] +name = "http-range-header" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "add0ab9360ddbd88cfeb3bd9574a1d85cfdfa14db10b3e21d3700dbc4328758f" + +[[package]] +name = "httparse" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" + +[[package]] +name = "httpdate" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" + +[[package]] +name = "humansize" +version = "2.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6cb51c9a029ddc91b07a787f1d86b53ccfa49b0e86688c946ebe8d3555685dd7" +dependencies = [ + "libm", +] + +[[package]] +name = "humantime" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "135b12329e5e3ce057a9f972339ea52bc954fe1e9358ef27f95e89716fbc5424" + +[[package]] +name = "hyper" +version = "0.14.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41dfc780fdec9373c01bae43289ea34c972e40ee3c9f6b3c8801a35f35586ce7" +dependencies = [ + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "http 0.2.12", + "http-body 0.4.6", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "socket2 0.5.10", + "tokio", + "tower-service", + "tracing", + "want", +] + +[[package]] +name = "hyper" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ab2d4f250c3d7b1c9fcdff1cece94ea4e2dfbec68614f7b87cb205f24ca9d11" +dependencies = [ + "atomic-waker", + "bytes", + "futures-channel", + "futures-core", + "h2", + "http 1.4.0", + "http-body 1.0.1", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "pin-utils", + "smallvec", + "tokio", + "want", +] + +[[package]] +name = "hyper-rustls" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" +dependencies = [ + "futures-util", + "http 0.2.12", + "hyper 0.14.32", + "log", + "rustls 0.21.12", + "rustls-native-certs", + "tokio", + "tokio-rustls 0.24.1", +] + +[[package]] +name = "hyper-rustls" +version = "0.27.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" +dependencies = [ + "http 1.4.0", + "hyper 1.8.1", + "hyper-util", + "rustls 0.23.35", + "rustls-pki-types", + "tokio", + "tokio-rustls 0.26.4", + "tower-service", + "webpki-roots", +] + +[[package]] +name = "hyper-timeout" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" +dependencies = [ + "hyper 0.14.32", + "pin-project-lite", + "tokio", + "tokio-io-timeout", +] + +[[package]] +name = "hyper-timeout" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" +dependencies = [ + "hyper 1.8.1", + "hyper-util", + "pin-project-lite", + "tokio", + "tower-service", +] + +[[package]] +name = "hyper-tls" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" +dependencies = [ + "bytes", + "http-body-util", + "hyper 1.8.1", + "hyper-util", + "native-tls", + "tokio", + "tokio-native-tls", + "tower-service", +] + +[[package]] +name = "hyper-util" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52e9a2a24dc5c6821e71a7030e1e14b7b632acac55c40e9d2e082c621261bb56" +dependencies = [ + "base64 0.22.1", + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "http 1.4.0", + "http-body 1.0.1", + "hyper 1.8.1", + "ipnet", + "libc", + "percent-encoding", + "pin-project-lite", + "socket2 0.6.1", + "tokio", + "tower-service", + "tracing", +] + +[[package]] +name = "iana-time-zone" +version = "0.1.64" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33e57f83510bb73707521ebaffa789ec8caf86f9657cad665b092b581d40e9fb" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "log", + "wasm-bindgen", + "windows-core 0.62.2", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", +] + +[[package]] +name = "icu_collections" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c6b649701667bbe825c3b7e6388cb521c23d88644678e83c0c4d0a621a34b43" +dependencies = [ + "displaydoc", + "potential_utf", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locale_core" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edba7861004dd3714265b4db54a3c390e880ab658fec5f7db895fae2046b5bb6" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_normalizer" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f6c8828b67bf8908d82127b2054ea1b4427ff0230ee9141c54251934ab1b599" +dependencies = [ + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a" + +[[package]] +name = "icu_properties" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e93fcd3157766c0c8da2f8cff6ce651a31f0810eaa1c51ec363ef790bbb5fb99" +dependencies = [ + "icu_collections", + "icu_locale_core", + "icu_properties_data", + "icu_provider", + "zerotrie", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02845b3647bb045f1100ecd6480ff52f34c35f82d9880e029d329c21d1054899" + +[[package]] +name = "icu_provider" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85962cf0ce02e1e0a629cc34e7ca3e373ce20dda4c4d7294bbd0bf1fdb59e614" +dependencies = [ + "displaydoc", + "icu_locale_core", + "writeable", + "yoke", + "zerofrom", + "zerotrie", + "zerovec", +] + +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + +[[package]] +name = "idna" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b0875f23caa03898994f6ddc501886a45c7d3d62d04d2d90788d47be1b1e4de" +dependencies = [ + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" +dependencies = [ + "icu_normalizer", + "icu_properties", +] + +[[package]] +name = "if-addrs" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cabb0019d51a643781ff15c9c8a3e5dedc365c47211270f4e8f82812fedd8f0a" +dependencies = [ + "libc", + "windows-sys 0.48.0", +] + +[[package]] +name = "if-watch" +version = "3.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdf9d64cfcf380606e64f9a0bcf493616b65331199f984151a6fa11a7b3cde38" +dependencies = [ + "async-io", + "core-foundation", + "fnv", + "futures", + "if-addrs", + "ipnet", + "log", + "netlink-packet-core 0.7.0", + "netlink-packet-route 0.17.1", + "netlink-proto", + "netlink-sys", + "rtnetlink", + "system-configuration 0.6.1", + "tokio", + "windows 0.53.0", +] + +[[package]] +name = "igd-next" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76b0d7d4541def58a37bf8efc559683f21edce7c82f0d866c93ac21f7e098f93" +dependencies = [ + "async-trait", + "attohttpc 0.24.1", + "bytes", + "futures", + "http 1.4.0", + "http-body-util", + "hyper 1.8.1", + "hyper-util", + "log", + "rand 0.8.5", + "tokio", + "url", + "xmltree", +] + +[[package]] +name = "igd-next" +version = "0.16.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "516893339c97f6011282d5825ac94fc1c7aad5cad26bdc2d0cee068c0bf97f97" +dependencies = [ + "async-trait", + "attohttpc 0.30.1", + "bytes", + "futures", + "http 1.4.0", + "http-body-util", + "hyper 1.8.1", + "hyper-util", + "log", + "rand 0.9.2", + "tokio", + "url", + "xmltree", +] + +[[package]] +name = "ignore" +version = "0.4.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3d782a365a015e0f5c04902246139249abf769125006fbe7649e2ee88169b4a" +dependencies = [ + "crossbeam-deque", + "globset", + "log", + "memchr", + "regex-automata", + "same-file", + "walkdir", + "winapi-util", +] + +[[package]] +name = "indenter" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "964de6e86d545b246d84badc0fef527924ace5134f30641c203ef52ba83f58d5" + +[[package]] +name = "indexmap" +version = "1.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +dependencies = [ + "autocfg", + "hashbrown 0.12.3", +] + +[[package]] +name = "indexmap" +version = "2.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ad4bb2b565bca0645f4d68c5c9af97fba094e9791da685bf83cb5f3ce74acf2" +dependencies = [ + "equivalent", + "hashbrown 0.16.1", + "serde", + "serde_core", +] + +[[package]] +name = "inferno" +version = "0.11.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "232929e1d75fe899576a3d5c7416ad0d88dbfbb3c3d6aa00873a7408a50ddb88" +dependencies = [ + "ahash", + "indexmap 2.12.1", + "is-terminal", + "itoa", + "log", + "num-format", + "once_cell", + "quick-xml", + "rgb", + "str_stack", +] + +[[package]] +name = "inout" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "879f10e63c20629ecabbb64a8010319738c66a5cd0c29b02d63d272b03751d01" +dependencies = [ + "generic-array 0.14.7", +] + +[[package]] +name = "instant" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "integration-configs" +version = "0.1.0" +dependencies = [ + "blst", + "chain-leader", + "chain-network", + "chain-service", + "cryptarchia-engine", + "cryptarchia-sync", + "ed25519-dalek", + "groth16", + "hex", + "key-management-system", + "nomos-api", + "nomos-blend-message", + "nomos-blend-service", + "nomos-core", + "nomos-da-dispersal", + "nomos-da-network-core", + "nomos-da-network-service", + "nomos-da-sampling", + "nomos-da-verifier", + "nomos-executor", + "nomos-ledger", + "nomos-libp2p", + "nomos-node", + "nomos-sdp", + "nomos-time", + "nomos-tracing", + "nomos-tracing-service", + "nomos-utils", + "nomos-wallet", + "num-bigint", + "rand 0.8.5", + "subnetworks-assignations", + "time", + "tracing", + "zksign", +] + +[[package]] +name = "ipconfig" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" +dependencies = [ + "socket2 0.5.10", + "widestring", + "windows-sys 0.48.0", + "winreg", +] + +[[package]] +name = "ipnet" +version = "2.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" + +[[package]] +name = "iri-string" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f867b9d1d896b67beb18518eda36fdb77a32ea590de864f1325b294a6d14397" +dependencies = [ + "memchr", + "serde", +] + +[[package]] +name = "is-terminal" +version = "0.4.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46" +dependencies = [ + "hermit-abi", + "libc", + "windows-sys 0.61.2", +] + +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + +[[package]] +name = "itertools" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" +dependencies = [ + "either", +] + +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + +[[package]] +name = "itertools" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" + +[[package]] +name = "jf-crhf" +version = "0.1.1" +source = "git+https://github.com/EspressoSystems/jellyfish?tag=jf-crhf-v0.1.1#8f3dce0bc2bd161b4648f6ac029dcc1a23aaf4c5" +dependencies = [ + "ark-serialize 0.4.2", + "ark-std 0.4.0", +] + +[[package]] +name = "jf-poseidon2" +version = "0.1.0" +source = "git+https://github.com/EspressoSystems/jellyfish.git?rev=dc166cf0f803c3e5067f9dfcc21e3dade986a447#dc166cf0f803c3e5067f9dfcc21e3dade986a447" +dependencies = [ + "ark-bn254 0.4.0", + "ark-ff 0.4.2", + "ark-std 0.4.0", + "displaydoc", + "hex", + "jf-crhf", + "lazy_static", + "nimue", + "zeroize", +] + +[[package]] +name = "jobserver" +version = "0.1.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9afb3de4395d6b3e67a780b6de64b51c978ecf11cb9a462c66be7d4ca9039d33" +dependencies = [ + "getrandom 0.3.4", + "libc", +] + +[[package]] +name = "js-sys" +version = "0.3.82" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b011eec8cc36da2aab2d5cff675ec18454fad408585853910a202391cf9f8e65" +dependencies = [ + "once_cell", + "wasm-bindgen", +] + +[[package]] +name = "json-patch" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec9ad60d674508f3ca8f380a928cfe7b096bc729c4e2dbfe3852bc45da3ab30b" +dependencies = [ + "serde", + "serde_json", + "thiserror 1.0.69", +] + +[[package]] +name = "jsonpath-rust" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06cc127b7c3d270be504572364f9569761a180b981919dd0d87693a7f5fb7829" +dependencies = [ + "pest", + "pest_derive", + "regex", + "serde_json", + "thiserror 1.0.69", +] + +[[package]] +name = "k256" +version = "0.13.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6e3919bbaa2945715f0bb6d3934a173d1e9a59ac23767fbaaef277265a7411b" +dependencies = [ + "cfg-if", + "ecdsa", + "elliptic-curve", + "once_cell", + "sha2", + "signature", +] + +[[package]] +name = "k8s-openapi" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edc3606fd16aca7989db2f84bb25684d0270c6d6fa1dbcd0025af7b4130523a6" +dependencies = [ + "base64 0.21.7", + "bytes", + "chrono", + "serde", + "serde-value", + "serde_json", +] + +[[package]] +name = "keccak" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" +dependencies = [ + "cpufeatures", +] + +[[package]] +name = "key-management-system" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "async-trait", + "bytes", + "ed25519-dalek", + "groth16", + "key-management-system-macros", + "log", + "nomos-blend-message", + "nomos-utils", + "overwatch", + "poq", + "poseidon2", + "serde", + "thiserror 2.0.17", + "tokio", + "tracing", + "zeroize", + "zksign", +] + +[[package]] +name = "key-management-system-macros" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "kube" +version = "0.87.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3499c8d60c763246c7a213f51caac1e9033f46026904cb89bc8951ae8601f26e" +dependencies = [ + "k8s-openapi", + "kube-client", + "kube-core", + "kube-runtime", +] + +[[package]] +name = "kube-client" +version = "0.87.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "033450dfa0762130565890dadf2f8835faedf749376ca13345bcd8ecd6b5f29f" +dependencies = [ + "base64 0.21.7", + "bytes", + "chrono", + "either", + "futures", + "home", + "http 0.2.12", + "http-body 0.4.6", + "hyper 0.14.32", + "hyper-rustls 0.24.2", + "hyper-timeout 0.4.1", + "jsonpath-rust", + "k8s-openapi", + "kube-core", + "pem", + "pin-project", + "rustls 0.21.12", + "rustls-pemfile", + "secrecy", + "serde", + "serde_json", + "serde_yaml", + "thiserror 1.0.69", + "tokio", + "tokio-util", + "tower 0.4.13", + "tower-http 0.4.4", + "tracing", +] + +[[package]] +name = "kube-core" +version = "0.87.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5bba93d054786eba7994d03ce522f368ef7d48c88a1826faa28478d85fb63ae" +dependencies = [ + "chrono", + "form_urlencoded", + "http 0.2.12", + "json-patch", + "k8s-openapi", + "once_cell", + "serde", + "serde_json", + "thiserror 1.0.69", +] + +[[package]] +name = "kube-runtime" +version = "0.87.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d8893eb18fbf6bb6c80ef6ee7dd11ec32b1dc3c034c988ac1b3a84d46a230ae" +dependencies = [ + "ahash", + "async-trait", + "backoff", + "derivative", + "futures", + "hashbrown 0.14.5", + "json-patch", + "k8s-openapi", + "kube-client", + "parking_lot", + "pin-project", + "serde", + "serde_json", + "smallvec", + "thiserror 1.0.69", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "kzgrs" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "ark-bls12-381", + "ark-ec 0.4.2", + "ark-ff 0.4.2", + "ark-poly 0.4.2", + "ark-poly-commit", + "ark-serialize 0.4.2", + "blake2", + "blst", + "num-bigint", + "num-traits", + "rand 0.8.5", + "thiserror 1.0.69", +] + +[[package]] +name = "kzgrs-backend" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "ark-ff 0.4.2", + "ark-poly 0.4.2", + "ark-serialize 0.4.2", + "blake2", + "itertools 0.12.1", + "kzgrs", + "nomos-core", + "rand 0.8.5", + "serde", +] + +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" + +[[package]] +name = "libc" +version = "0.2.177" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976" + +[[package]] +name = "libloading" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7c4b02199fee7c5d21a5ae7d8cfa79a6ef5bb2fc834d6e9058e89c825efdc55" +dependencies = [ + "cfg-if", + "windows-link", +] + +[[package]] +name = "libm" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" + +[[package]] +name = "libp2p" +version = "0.55.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b72dc443ddd0254cb49a794ed6b6728400ee446a0f7ab4a07d0209ee98de20e9" +dependencies = [ + "bytes", + "either", + "futures", + "futures-timer", + "getrandom 0.2.16", + "libp2p-allow-block-list", + "libp2p-autonat", + "libp2p-connection-limits", + "libp2p-core", + "libp2p-dns", + "libp2p-gossipsub", + "libp2p-identify", + "libp2p-identity", + "libp2p-kad", + "libp2p-mdns", + "libp2p-metrics", + "libp2p-quic", + "libp2p-swarm", + "libp2p-tcp", + "libp2p-upnp", + "multiaddr", + "pin-project", + "rw-stream-sink", + "thiserror 2.0.17", +] + +[[package]] +name = "libp2p-allow-block-list" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38944b7cb981cc93f2f0fb411ff82d0e983bd226fbcc8d559639a3a73236568b" +dependencies = [ + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", +] + +[[package]] +name = "libp2p-autonat" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e297bfc6cabb70c6180707f8fa05661b77ecb9cb67e8e8e1c469301358fa21d0" +dependencies = [ + "async-trait", + "asynchronous-codec", + "either", + "futures", + "futures-bounded", + "futures-timer", + "libp2p-core", + "libp2p-identity", + "libp2p-request-response", + "libp2p-swarm", + "quick-protobuf", + "quick-protobuf-codec", + "rand 0.8.5", + "rand_core 0.6.4", + "thiserror 2.0.17", + "tracing", + "web-time", +] + +[[package]] +name = "libp2p-connection-limits" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "efe9323175a17caa8a2ed4feaf8a548eeef5e0b72d03840a0eab4bcb0210ce1c" +dependencies = [ + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", +] + +[[package]] +name = "libp2p-core" +version = "0.43.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d28e2d2def7c344170f5c6450c0dbe3dfef655610dbfde2f6ac28a527abbe36" +dependencies = [ + "either", + "fnv", + "futures", + "futures-timer", + "libp2p-identity", + "multiaddr", + "multihash", + "multistream-select", + "parking_lot", + "pin-project", + "quick-protobuf", + "rand 0.8.5", + "rw-stream-sink", + "thiserror 2.0.17", + "tracing", + "unsigned-varint 0.8.0", + "web-time", +] + +[[package]] +name = "libp2p-dns" +version = "0.43.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b780a1150214155b0ed1cdf09fbd2e1b0442604f9146a431d1b21d23eef7bd7" +dependencies = [ + "async-trait", + "futures", + "hickory-resolver", + "libp2p-core", + "libp2p-identity", + "parking_lot", + "smallvec", + "tracing", +] + +[[package]] +name = "libp2p-gossipsub" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d558548fa3b5a8e9b66392f785921e363c57c05dcadfda4db0d41ae82d313e4a" +dependencies = [ + "async-channel", + "asynchronous-codec", + "base64 0.22.1", + "byteorder", + "bytes", + "either", + "fnv", + "futures", + "futures-timer", + "getrandom 0.2.16", + "hashlink", + "hex_fmt", + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", + "prometheus-client", + "quick-protobuf", + "quick-protobuf-codec", + "rand 0.8.5", + "regex", + "sha2", + "tracing", + "web-time", +] + +[[package]] +name = "libp2p-identify" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8c06862544f02d05d62780ff590cc25a75f5c2b9df38ec7a370dcae8bb873cf" +dependencies = [ + "asynchronous-codec", + "either", + "futures", + "futures-bounded", + "futures-timer", + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", + "quick-protobuf", + "quick-protobuf-codec", + "smallvec", + "thiserror 2.0.17", + "tracing", +] + +[[package]] +name = "libp2p-identity" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3104e13b51e4711ff5738caa1fb54467c8604c2e94d607e27745bcf709068774" +dependencies = [ + "asn1_der", + "bs58", + "ed25519-dalek", + "hkdf", + "k256", + "multihash", + "quick-protobuf", + "rand 0.8.5", + "serde", + "sha2", + "thiserror 2.0.17", + "tracing", + "zeroize", +] + +[[package]] +name = "libp2p-kad" +version = "0.47.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bab0466a27ebe955bcbc27328fae5429c5b48c915fd6174931414149802ec23" +dependencies = [ + "asynchronous-codec", + "bytes", + "either", + "fnv", + "futures", + "futures-bounded", + "futures-timer", + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", + "quick-protobuf", + "quick-protobuf-codec", + "rand 0.8.5", + "sha2", + "smallvec", + "thiserror 2.0.17", + "tracing", + "uint", + "web-time", +] + +[[package]] +name = "libp2p-mdns" +version = "0.47.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11d0ba095e1175d797540e16b62e7576846b883cb5046d4159086837b36846cc" +dependencies = [ + "futures", + "hickory-proto", + "if-watch", + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", + "rand 0.8.5", + "smallvec", + "socket2 0.5.10", + "tokio", + "tracing", +] + +[[package]] +name = "libp2p-metrics" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ce58c64292e87af624fcb86465e7dd8342e46a388d71e8fec0ab37ee789630a" +dependencies = [ + "futures", + "libp2p-core", + "libp2p-gossipsub", + "libp2p-identify", + "libp2p-identity", + "libp2p-kad", + "libp2p-swarm", + "pin-project", + "prometheus-client", + "web-time", +] + +[[package]] +name = "libp2p-quic" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41432a159b00424a0abaa2c80d786cddff81055ac24aa127e0cf375f7858d880" +dependencies = [ + "futures", + "futures-timer", + "if-watch", + "libp2p-core", + "libp2p-identity", + "libp2p-tls", + "quinn", + "rand 0.8.5", + "ring", + "rustls 0.23.35", + "socket2 0.5.10", + "thiserror 2.0.17", + "tokio", + "tracing", +] + +[[package]] +name = "libp2p-request-response" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "548fe44a80ff275d400f1b26b090d441d83ef73efabbeb6415f4ce37e5aed865" +dependencies = [ + "async-trait", + "futures", + "futures-bounded", + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", + "rand 0.8.5", + "smallvec", + "tracing", +] + +[[package]] +name = "libp2p-stream" +version = "0.3.0-alpha" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "826716f1ee125895f1fb44911413cba023485b552ff96c7a2159bd037ac619bb" +dependencies = [ + "futures", + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", + "rand 0.8.5", + "tracing", +] + +[[package]] +name = "libp2p-swarm" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "803399b4b6f68adb85e63ab573ac568154b193e9a640f03e0f2890eabbcb37f8" +dependencies = [ + "either", + "fnv", + "futures", + "futures-timer", + "libp2p-core", + "libp2p-identity", + "libp2p-swarm-derive", + "lru", + "multistream-select", + "once_cell", + "rand 0.8.5", + "smallvec", + "tokio", + "tracing", + "web-time", +] + +[[package]] +name = "libp2p-swarm-derive" +version = "0.35.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "206e0aa0ebe004d778d79fb0966aa0de996c19894e2c0605ba2f8524dd4443d8" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "libp2p-tcp" +version = "0.43.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65346fb4d36035b23fec4e7be4c320436ba53537ce9b6be1d1db1f70c905cad0" +dependencies = [ + "futures", + "futures-timer", + "if-watch", + "libc", + "libp2p-core", + "socket2 0.5.10", + "tokio", + "tracing", +] + +[[package]] +name = "libp2p-tls" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96ff65a82e35375cbc31ebb99cacbbf28cb6c4fefe26bf13756ddcf708d40080" +dependencies = [ + "futures", + "futures-rustls", + "libp2p-core", + "libp2p-identity", + "rcgen", + "ring", + "rustls 0.23.35", + "rustls-webpki 0.103.8", + "thiserror 2.0.17", + "x509-parser", + "yasna", +] + +[[package]] +name = "libp2p-upnp" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d457b9ecceb66e7199f049926fad447f1f17f040e8d29d690c086b4cab8ed14a" +dependencies = [ + "futures", + "futures-timer", + "igd-next 0.15.1", + "libp2p-core", + "libp2p-swarm", + "tokio", + "tracing", +] + +[[package]] +name = "libredox" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "416f7e718bdb06000964960ffa43b4335ad4012ae8b99060261aa4a8088d5ccb" +dependencies = [ + "bitflags 2.10.0", + "libc", +] + +[[package]] +name = "librocksdb-sys" +version = "0.17.3+10.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cef2a00ee60fe526157c9023edab23943fae1ce2ab6f4abb2a807c1746835de9" +dependencies = [ + "bindgen", + "bzip2-sys", + "cc", + "libc", + "libz-sys", +] + +[[package]] +name = "libz-sys" +version = "1.1.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15d118bbf3771060e7311cc7bb0545b01d08a8b4a7de949198dec1fa0ca1c0f7" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "light-poseidon" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47a1ccadd0bb5a32c196da536fd72c59183de24a055f6bf0513bf845fefab862" +dependencies = [ + "ark-bn254 0.5.0", + "ark-ff 0.5.0", + "num-bigint", + "thiserror 1.0.69", +] + +[[package]] +name = "linux-raw-sys" +version = "0.4.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" + +[[package]] +name = "linux-raw-sys" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" + +[[package]] +name = "litemap" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" + +[[package]] +name = "lock_api" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "224399e74b87b5f3557511d98dff8b14089b3dadafcab6bb93eab67d3aace965" +dependencies = [ + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432" + +[[package]] +name = "loki-api" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bdc38a304f59a03e6efa3876766a48c70a766a93f88341c3fff4212834b8e327" +dependencies = [ + "prost", + "prost-types", +] + +[[package]] +name = "lru" +version = "0.12.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" +dependencies = [ + "hashbrown 0.15.5", +] + +[[package]] +name = "lru-slab" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" + +[[package]] +name = "match-lookup" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1265724d8cb29dbbc2b0f06fffb8bf1a8c0cf73a78eede9ba73a4a66c52a981e" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "match_cfg" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" + +[[package]] +name = "matchers" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1525a2a28c7f4fa0fc98bb91ae755d1e2d1505079e05539e35bc876b5d65ae9" +dependencies = [ + "regex-automata", +] + +[[package]] +name = "matchit" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" + +[[package]] +name = "memalloc" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df39d232f5c40b0891c10216992c2f250c054105cb1e56f0fc9032db6203ecc1" + +[[package]] +name = "memchr" +version = "2.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" + +[[package]] +name = "memmap2" +version = "0.9.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "744133e4a0e0a658e1374cf3bf8e415c4052a15a111acd372764c55b4177d490" +dependencies = [ + "libc", +] + +[[package]] +name = "mime" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" + +[[package]] +name = "mime_guess" +version = "2.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7c44f8e672c00fe5308fa235f821cb4198414e1c77935c1ab6948d3fd78550e" +dependencies = [ + "mime", + "unicase", +] + +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + +[[package]] +name = "miniz_oxide" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" +dependencies = [ + "adler2", + "simd-adler32", +] + +[[package]] +name = "mio" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69d83b0086dc8ecf3ce9ae2874b2d1290252e2a30720bea58a5c6639b0092873" +dependencies = [ + "libc", + "wasi", + "windows-sys 0.61.2", +] + +[[package]] +name = "mmr" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "ark-ff 0.4.2", + "groth16", + "poseidon2", + "rpds", + "serde", +] + +[[package]] +name = "moka" +version = "0.12.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8261cd88c312e0004c1d51baad2980c66528dfdb2bee62003e643a4d8f86b077" +dependencies = [ + "crossbeam-channel", + "crossbeam-epoch", + "crossbeam-utils", + "equivalent", + "parking_lot", + "portable-atomic", + "rustc_version", + "smallvec", + "tagptr", + "uuid", +] + +[[package]] +name = "multiaddr" +version = "0.18.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe6351f60b488e04c1d21bc69e56b89cb3f5e8f5d22557d6e8031bdfd79b6961" +dependencies = [ + "arrayref", + "byteorder", + "data-encoding", + "libp2p-identity", + "multibase", + "multihash", + "percent-encoding", + "serde", + "static_assertions", + "unsigned-varint 0.8.0", + "url", +] + +[[package]] +name = "multibase" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8694bb4835f452b0e3bb06dbebb1d6fc5385b6ca1caf2e55fd165c042390ec77" +dependencies = [ + "base-x", + "base256emoji", + "data-encoding", + "data-encoding-macro", +] + +[[package]] +name = "multihash" +version = "0.19.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b430e7953c29dd6a09afc29ff0bb69c6e306329ee6794700aee27b76a1aea8d" +dependencies = [ + "core2", + "unsigned-varint 0.8.0", +] + +[[package]] +name = "multistream-select" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea0df8e5eec2298a62b326ee4f0d7fe1a6b90a09dfcf9df37b38f947a8c42f19" +dependencies = [ + "bytes", + "futures", + "log", + "pin-project", + "smallvec", + "unsigned-varint 0.7.2", +] + +[[package]] +name = "native-tls" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87de3442987e9dbec73158d5c715e7ad9072fda936bb03d19d7fa10e00520f0e" +dependencies = [ + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework", + "security-framework-sys", + "tempfile", +] + +[[package]] +name = "natpmp" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77366fa8ce34e2e1322dd97da65f11a62f451bd3daae8be6993c00800f61dd07" +dependencies = [ + "async-trait", + "cc", + "netdev", + "tokio", +] + +[[package]] +name = "netdev" +version = "0.31.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f901362e84cd407be6f8cd9d3a46bccf09136b095792785401ea7d283c79b91d" +dependencies = [ + "dlopen2 0.5.0", + "ipnet", + "libc", + "netlink-packet-core 0.7.0", + "netlink-packet-route 0.17.1", + "netlink-sys", + "once_cell", + "system-configuration 0.6.1", + "windows-sys 0.52.0", +] + +[[package]] +name = "netlink-packet-core" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e5cf0b54effda4b91615c40ff0fd12d0d4c9a6e0f5116874f03941792ff535a" +dependencies = [ + "anyhow", + "byteorder", + "libc", + "netlink-packet-utils", +] + +[[package]] +name = "netlink-packet-core" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72724faf704479d67b388da142b186f916188505e7e0b26719019c525882eda4" +dependencies = [ + "anyhow", + "byteorder", + "netlink-packet-utils", +] + +[[package]] +name = "netlink-packet-route" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea993e32c77d87f01236c38f572ecb6c311d592e56a06262a007fd2a6e31253c" +dependencies = [ + "anyhow", + "bitflags 1.3.2", + "byteorder", + "libc", + "netlink-packet-core 0.5.0", + "netlink-packet-utils", +] + +[[package]] +name = "netlink-packet-route" +version = "0.17.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053998cea5a306971f88580d0829e90f270f940befd7cf928da179d4187a5a66" +dependencies = [ + "anyhow", + "bitflags 1.3.2", + "byteorder", + "libc", + "netlink-packet-core 0.7.0", + "netlink-packet-utils", +] + +[[package]] +name = "netlink-packet-utils" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ede8a08c71ad5a95cdd0e4e52facd37190977039a4704eb82a283f713747d34" +dependencies = [ + "anyhow", + "byteorder", + "paste", + "thiserror 1.0.69", +] + +[[package]] +name = "netlink-proto" +version = "0.11.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72452e012c2f8d612410d89eea01e2d9b56205274abb35d53f60200b2ec41d60" +dependencies = [ + "bytes", + "futures", + "log", + "netlink-packet-core 0.7.0", + "netlink-sys", + "thiserror 2.0.17", +] + +[[package]] +name = "netlink-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16c903aa70590cb93691bf97a767c8d1d6122d2cc9070433deb3bbf36ce8bd23" +dependencies = [ + "bytes", + "futures", + "libc", + "log", + "tokio", +] + +[[package]] +name = "nimue" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0dc7d3b2b7bd112c0cecf7d6f4f16a174ee7a98e27615b1d08256d0176588f2" +dependencies = [ + "ark-ec 0.4.2", + "ark-ff 0.4.2", + "ark-serialize 0.4.2", + "digest", + "generic-array 0.14.7", + "hex", + "keccak", + "log", + "rand 0.8.5", + "zeroize", +] + +[[package]] +name = "nix" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "598beaf3cc6fdd9a5dfb1630c2800c7acd31df7aaf0f565796fba2b53ca1af1b" +dependencies = [ + "bitflags 1.3.2", + "cfg-if", + "libc", +] + +[[package]] +name = "nix" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74523f3a35e05aba87a1d978330aef40f67b0304ac79c1c00b294c9830543db6" +dependencies = [ + "bitflags 2.10.0", + "cfg-if", + "cfg_aliases", + "libc", +] + +[[package]] +name = "no-std-compat" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b93853da6d84c2e3c7d730d6473e8817692dd89be387eb01b94d7f108ecb5b8c" + +[[package]] +name = "nom" +version = "7.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +dependencies = [ + "memchr", + "minimal-lexical", +] + +[[package]] +name = "nom" +version = "8.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df9761775871bdef83bee530e60050f7e54b1105350d6884eb0fb4f46c2f9405" +dependencies = [ + "memchr", +] + +[[package]] +name = "nomos-api" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "async-trait", + "broadcast-service", + "bytes", + "chain-service", + "futures", + "kzgrs-backend", + "nomos-core", + "nomos-da-dispersal", + "nomos-da-network-core", + "nomos-da-network-service", + "nomos-da-sampling", + "nomos-da-verifier", + "nomos-libp2p", + "nomos-network", + "nomos-sdp", + "nomos-storage", + "overwatch", + "serde", + "serde_json", + "subnetworks-assignations", + "tokio", + "tokio-stream", + "tracing", + "tx-service", + "utoipa", + "utoipa-swagger-ui", +] + +[[package]] +name = "nomos-blend-message" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "blake2", + "derivative", + "ed25519-dalek", + "generic-array 1.3.5", + "groth16", + "itertools 0.14.0", + "nomos-core", + "nomos-utils", + "num-bigint", + "poq", + "serde", + "serde_with", + "thiserror 1.0.69", + "tracing", + "x25519-dalek", +] + +[[package]] +name = "nomos-blend-network" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "either", + "futures", + "futures-timer", + "libp2p", + "nomos-blend-message", + "nomos-blend-scheduling", + "nomos-core", + "nomos-libp2p", + "tracing", +] + +[[package]] +name = "nomos-blend-scheduling" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "async-trait", + "derivative", + "fork_stream", + "futures", + "hex", + "multiaddr", + "nomos-blend-message", + "nomos-core", + "rand 0.8.5", + "serde", + "thiserror 1.0.69", + "tokio", + "tokio-stream", + "tracing", +] + +[[package]] +name = "nomos-blend-service" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "async-trait", + "broadcast-service", + "chain-service", + "cryptarchia-engine", + "fork_stream", + "futures", + "groth16", + "key-management-system", + "libp2p", + "libp2p-stream", + "nomos-blend-message", + "nomos-blend-network", + "nomos-blend-scheduling", + "nomos-core", + "nomos-ledger", + "nomos-libp2p", + "nomos-network", + "nomos-time", + "nomos-utils", + "overwatch", + "poq", + "rand 0.8.5", + "rs-merkle-tree", + "serde", + "serde_with", + "services-utils", + "thiserror 1.0.69", + "tokio", + "tokio-stream", + "tracing", + "zksign", +] + +[[package]] +name = "nomos-core" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "ark-ff 0.4.2", + "async-trait", + "bincode", + "blake2", + "bytes", + "const-hex", + "cryptarchia-engine", + "ed25519", + "ed25519-dalek", + "futures", + "generic-array 1.3.5", + "groth16", + "hex", + "multiaddr", + "nom 8.0.0", + "num-bigint", + "pol", + "poseidon2", + "serde", + "serde_with", + "strum", + "thiserror 1.0.69", + "tracing", + "zksign", +] + +[[package]] +name = "nomos-da-dispersal" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "async-trait", + "broadcast-service", + "ed25519", + "ed25519-dalek", + "futures", + "kzgrs-backend", + "nomos-core", + "nomos-da-network-core", + "nomos-da-network-service", + "nomos-tracing", + "nomos-utils", + "overwatch", + "serde", + "serde_with", + "services-utils", + "subnetworks-assignations", + "thiserror 2.0.17", + "tokio", + "tokio-stream", + "tracing", + "zksign", +] + +[[package]] +name = "nomos-da-messages" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "blake2", + "futures", + "kzgrs-backend", + "nomos-core", + "serde", + "tokio", +] + +[[package]] +name = "nomos-da-network-core" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "cached", + "either", + "fixed", + "futures", + "indexmap 2.12.1", + "kzgrs-backend", + "libp2p", + "libp2p-stream", + "log", + "nomos-core", + "nomos-da-messages", + "nomos-utils", + "rand 0.9.2", + "serde", + "serde_with", + "subnetworks-assignations", + "thiserror 1.0.69", + "tokio", + "tokio-stream", + "tracing", + "tracing-subscriber 0.3.20", +] + +[[package]] +name = "nomos-da-network-service" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "arc-swap", + "async-trait", + "bitvec", + "blake2", + "broadcast-service", + "common-http-client", + "futures", + "kzgrs-backend", + "libp2p", + "libp2p-identity", + "log", + "multiaddr", + "nomos-core", + "nomos-da-messages", + "nomos-da-network-core", + "nomos-libp2p", + "nomos-sdp", + "nomos-storage", + "nomos-tracing", + "nomos-utils", + "overwatch", + "rand 0.8.5", + "serde", + "services-utils", + "subnetworks-assignations", + "thiserror 1.0.69", + "tokio", + "tokio-stream", + "tracing", + "url", +] + +[[package]] +name = "nomos-da-sampling" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "async-trait", + "bytes", + "futures", + "hex", + "kzgrs-backend", + "libp2p-identity", + "nomos-core", + "nomos-da-network-core", + "nomos-da-network-service", + "nomos-storage", + "nomos-tracing", + "overwatch", + "rand 0.8.5", + "serde", + "services-utils", + "subnetworks-assignations", + "thiserror 1.0.69", + "tokio", + "tokio-stream", + "tracing", + "tx-service", +] + +[[package]] +name = "nomos-da-verifier" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "async-trait", + "futures", + "kzgrs-backend", + "libp2p", + "nomos-core", + "nomos-da-network-core", + "nomos-da-network-service", + "nomos-storage", + "nomos-tracing", + "nomos-utils", + "overwatch", + "serde", + "serde_with", + "services-utils", + "subnetworks-assignations", + "thiserror 1.0.69", + "tokio", + "tokio-stream", + "tracing", + "tx-service", +] + +[[package]] +name = "nomos-executor" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "async-trait", + "axum", + "broadcast-service", + "clap", + "color-eyre", + "futures", + "kzgrs-backend", + "nomos-api", + "nomos-core", + "nomos-da-dispersal", + "nomos-da-network-core", + "nomos-da-network-service", + "nomos-da-sampling", + "nomos-da-verifier", + "nomos-http-api-common", + "nomos-libp2p", + "nomos-network", + "nomos-node", + "nomos-sdp", + "nomos-storage", + "nomos-time", + "overwatch", + "serde", + "serde_yaml", + "services-utils", + "subnetworks-assignations", + "tokio", + "tower 0.4.13", + "tower-http 0.5.2", + "tracing", + "tx-service", + "utoipa", + "utoipa-swagger-ui", +] + +[[package]] +name = "nomos-http-api-common" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "axum", + "governor", + "nomos-core", + "pprof", + "serde", + "serde_with", + "tokio", + "tower_governor", + "tracing", +] + +[[package]] +name = "nomos-ledger" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "cryptarchia-engine", + "ed25519", + "groth16", + "mmr", + "nomos-core", + "nomos-utils", + "num-bigint", + "rand 0.8.5", + "rpds", + "serde", + "thiserror 1.0.69", + "utxotree", + "zksign", +] + +[[package]] +name = "nomos-libp2p" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "async-trait", + "backon", + "blake2", + "cryptarchia-sync", + "default-net", + "either", + "futures", + "hex", + "igd-next 0.16.2", + "libp2p", + "multiaddr", + "natpmp", + "netdev", + "nomos-utils", + "num_enum", + "rand 0.8.5", + "serde", + "serde_with", + "thiserror 1.0.69", + "tokio", + "tracing", + "zerocopy", +] + +[[package]] +name = "nomos-network" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "async-trait", + "cryptarchia-sync", + "futures", + "nomos-core", + "nomos-libp2p", + "overwatch", + "rand 0.8.5", + "rand_chacha 0.3.1", + "serde", + "tokio", + "tokio-stream", + "tracing", +] + +[[package]] +name = "nomos-node" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "async-trait", + "axum", + "broadcast-service", + "chain-leader", + "chain-network", + "chain-service", + "clap", + "color-eyre", + "derivative", + "futures", + "groth16", + "hex", + "http 1.4.0", + "key-management-system", + "kzgrs-backend", + "nomos-api", + "nomos-blend-message", + "nomos-blend-scheduling", + "nomos-blend-service", + "nomos-core", + "nomos-da-messages", + "nomos-da-network-core", + "nomos-da-network-service", + "nomos-da-sampling", + "nomos-da-verifier", + "nomos-http-api-common", + "nomos-libp2p", + "nomos-network", + "nomos-sdp", + "nomos-storage", + "nomos-system-sig", + "nomos-time", + "nomos-tracing", + "nomos-tracing-service", + "nomos-utils", + "nomos-wallet", + "num-bigint", + "overwatch", + "pol", + "poq", + "serde", + "serde_json", + "serde_with", + "serde_yaml", + "services-utils", + "subnetworks-assignations", + "time", + "tokio", + "tokio-stream", + "tower 0.4.13", + "tower-http 0.5.2", + "tracing", + "tx-service", + "utoipa", + "utoipa-swagger-ui", + "zksign", +] + +[[package]] +name = "nomos-sdp" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "async-trait", + "ed25519-dalek", + "futures", + "nomos-core", + "overwatch", + "serde", + "thiserror 2.0.17", + "tokio", + "tokio-stream", + "tracing", + "tx-service", + "zksign", +] + +[[package]] +name = "nomos-storage" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "async-trait", + "bytes", + "cryptarchia-engine", + "futures", + "libp2p-identity", + "multiaddr", + "nomos-core", + "overwatch", + "rocksdb", + "serde", + "thiserror 1.0.69", + "tokio", + "tracing", +] + +[[package]] +name = "nomos-system-sig" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "async-ctrlc", + "async-trait", + "overwatch", + "tracing", +] + +[[package]] +name = "nomos-time" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "async-trait", + "cfg_eval", + "cryptarchia-engine", + "futures", + "log", + "nomos-utils", + "overwatch", + "serde", + "serde_with", + "sntpc", + "thiserror 2.0.17", + "time", + "tokio", + "tokio-stream", + "tracing", +] + +[[package]] +name = "nomos-tracing" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "opentelemetry", + "opentelemetry-http", + "opentelemetry-otlp", + "opentelemetry-semantic-conventions", + "opentelemetry_sdk", + "rand 0.8.5", + "reqwest", + "serde", + "tokio", + "tracing", + "tracing-appender", + "tracing-gelf", + "tracing-loki", + "tracing-opentelemetry", + "tracing-subscriber 0.3.20", + "url", +] + +[[package]] +name = "nomos-tracing-service" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "async-trait", + "nomos-tracing", + "overwatch", + "serde", + "tracing", + "tracing-appender", + "tracing-subscriber 0.3.20", +] + +[[package]] +name = "nomos-utils" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "async-trait", + "blake2", + "cipher", + "const-hex", + "humantime", + "overwatch", + "rand 0.8.5", + "serde", + "serde_with", + "time", +] + +[[package]] +name = "nomos-wallet" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "async-trait", + "bytes", + "chain-service", + "futures", + "groth16", + "hex", + "key-management-system", + "nomos-core", + "nomos-ledger", + "nomos-storage", + "overwatch", + "serde", + "services-utils", + "thiserror 1.0.69", + "tokio", + "tracing", + "wallet", + "zksign", +] + +[[package]] +name = "nonempty" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9e591e719385e6ebaeb5ce5d3887f7d5676fceca6411d1925ccc95745f3d6f7" + +[[package]] +name = "nonzero_ext" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38bf9645c8b145698bb0b18a4637dcacbc421ea49bef2317e4fd8065a387cf21" + +[[package]] +name = "nu-ansi-term" +version = "0.50.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" +dependencies = [ + "windows-sys 0.61.2", +] + +[[package]] +name = "num-bigint" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" +dependencies = [ + "num-integer", + "num-traits", +] + +[[package]] +name = "num-conv" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + +[[package]] +name = "num-format" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a652d9771a63711fd3c3deb670acfbe5c30a4072e664d7a3bf5a9e1056ac72c3" +dependencies = [ + "arrayvec", + "itoa", +] + +[[package]] +name = "num-integer" +version = "0.1.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", +] + +[[package]] +name = "num_cpus" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91df4bbde75afed763b708b7eee1e8e7651e02d97f6d5dd763e89367e957b23b" +dependencies = [ + "hermit-abi", + "libc", +] + +[[package]] +name = "num_enum" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1207a7e20ad57b847bbddc6776b968420d38292bbfe2089accff5e19e82454c" +dependencies = [ + "num_enum_derive", + "rustversion", +] + +[[package]] +name = "num_enum_derive" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff32365de1b6743cb203b710788263c44a03de03802daf96092f2da4fe6ba4d7" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "objc2" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7c2599ce0ec54857b29ce62166b0ed9b4f6f1a70ccc9a71165b6154caca8c05" +dependencies = [ + "objc2-encode", +] + +[[package]] +name = "objc2-encode" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef25abbcd74fb2609453eb695bd2f860d389e457f67dc17cafc8b8cbc89d0c33" + +[[package]] +name = "object" +version = "0.37.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff76201f031d8863c38aa7f905eca4f53abbfa15f609db4277d44cd8938f33fe" +dependencies = [ + "memchr", +] + +[[package]] +name = "oid-registry" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12f40cff3dde1b6087cc5d5f5d4d65712f34016a03ed60e9c08dcc392736b5b7" +dependencies = [ + "asn1-rs", +] + +[[package]] +name = "once_cell" +version = "1.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" + +[[package]] +name = "oorandom" +version = "11.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" + +[[package]] +name = "openssl" +version = "0.10.75" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08838db121398ad17ab8531ce9de97b244589089e290a384c900cb9ff7434328" +dependencies = [ + "bitflags 2.10.0", + "cfg-if", + "foreign-types", + "libc", + "once_cell", + "openssl-macros", + "openssl-sys", +] + +[[package]] +name = "openssl-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "openssl-probe" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" + +[[package]] +name = "openssl-sys" +version = "0.9.111" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82cab2d520aa75e3c58898289429321eb788c3106963d0dc886ec7a5f4adc321" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "opentelemetry" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "570074cc999d1a58184080966e5bd3bf3a9a4af650c3b05047c2621e7405cd17" +dependencies = [ + "futures-core", + "futures-sink", + "js-sys", + "once_cell", + "pin-project-lite", + "thiserror 1.0.69", +] + +[[package]] +name = "opentelemetry-http" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6351496aeaa49d7c267fb480678d85d1cd30c5edb20b497c48c56f62a8c14b99" +dependencies = [ + "async-trait", + "bytes", + "http 1.4.0", + "opentelemetry", + "reqwest", +] + +[[package]] +name = "opentelemetry-otlp" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29e1f9c8b032d4f635c730c0efcf731d5e2530ea13fa8bef7939ddc8420696bd" +dependencies = [ + "async-trait", + "futures-core", + "http 1.4.0", + "opentelemetry", + "opentelemetry-http", + "opentelemetry-proto", + "opentelemetry_sdk", + "prost", + "thiserror 1.0.69", + "tokio", + "tonic", +] + +[[package]] +name = "opentelemetry-proto" +version = "0.26.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9d3968ce3aefdcca5c27e3c4ea4391b37547726a70893aab52d3de95d5f8b34" +dependencies = [ + "opentelemetry", + "opentelemetry_sdk", + "prost", + "tonic", +] + +[[package]] +name = "opentelemetry-semantic-conventions" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db945c1eaea8ac6a9677185357480d215bb6999faa9f691d0c4d4d641eab7a09" + +[[package]] +name = "opentelemetry_sdk" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2c627d9f4c9cdc1f21a29ee4bfbd6028fcb8bcf2a857b43f3abdf72c9c862f3" +dependencies = [ + "async-trait", + "futures-channel", + "futures-executor", + "futures-util", + "glob", + "once_cell", + "opentelemetry", + "percent-encoding", + "rand 0.8.5", + "thiserror 1.0.69", + "tokio", + "tokio-stream", +] + +[[package]] +name = "option-ext" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" + +[[package]] +name = "ordered-float" +version = "2.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68f19d67e5a2795c94e73e0bb1cc1a7edeb2e28efd39e2e1c9b7a40c1108b11c" +dependencies = [ + "num-traits", +] + +[[package]] +name = "overwatch" +version = "0.1.0" +source = "git+https://github.com/logos-co/Overwatch?rev=f5a9902#f5a99022f389d65adbd55e51f1e3f9eead62432a" +dependencies = [ + "async-trait", + "futures", + "overwatch-derive", + "thiserror 2.0.17", + "tokio", + "tokio-stream", + "tokio-util", + "tracing", +] + +[[package]] +name = "overwatch-derive" +version = "0.1.0" +source = "git+https://github.com/logos-co/Overwatch?rev=f5a9902#f5a99022f389d65adbd55e51f1e3f9eead62432a" +dependencies = [ + "convert_case", + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "owo-colors" +version = "4.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c6901729fa79e91a0913333229e9ca5dc725089d1c363b2f4b4760709dc4a52" + +[[package]] +name = "parking" +version = "2.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" + +[[package]] +name = "parking_lot" +version = "0.12.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93857453250e3077bd71ff98b6a65ea6621a19bb0f559a85248955ac12c45a1a" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-link", +] + +[[package]] +name = "parse-zoneinfo" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f2a05b18d44e2957b88f96ba460715e295bc1d7510468a2f3d3b44535d26c24" +dependencies = [ + "regex", +] + +[[package]] +name = "paste" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" + +[[package]] +name = "pem" +version = "3.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d30c53c26bc5b31a98cd02d20f25a7c8567146caf63ed593a9d87b2775291be" +dependencies = [ + "base64 0.22.1", + "serde_core", +] + +[[package]] +name = "percent-encoding" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" + +[[package]] +name = "pest" +version = "2.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbcfd20a6d4eeba40179f05735784ad32bdaef05ce8e8af05f180d45bb3e7e22" +dependencies = [ + "memchr", + "ucd-trie", +] + +[[package]] +name = "pest_derive" +version = "2.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51f72981ade67b1ca6adc26ec221be9f463f2b5839c7508998daa17c23d94d7f" +dependencies = [ + "pest", + "pest_generator", +] + +[[package]] +name = "pest_generator" +version = "2.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dee9efd8cdb50d719a80088b76f81aec7c41ed6d522ee750178f83883d271625" +dependencies = [ + "pest", + "pest_meta", + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "pest_meta" +version = "2.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf1d70880e76bdc13ba52eafa6239ce793d85c8e43896507e43dd8984ff05b82" +dependencies = [ + "pest", + "sha2", +] + +[[package]] +name = "phf" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078" +dependencies = [ + "phf_shared", +] + +[[package]] +name = "phf_codegen" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aef8048c789fa5e851558d709946d6d79a8ff88c0440c587967f8e94bfb1216a" +dependencies = [ + "phf_generator", + "phf_shared", +] + +[[package]] +name = "phf_generator" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c80231409c20246a13fddb31776fb942c38553c51e871f8cbd687a4cfb5843d" +dependencies = [ + "phf_shared", + "rand 0.8.5", +] + +[[package]] +name = "phf_shared" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67eabc2ef2a60eb7faa00097bd1ffdb5bd28e62bf39990626a582201b7a754e5" +dependencies = [ + "siphasher", +] + +[[package]] +name = "pin-project" +version = "1.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677f1add503faace112b9f1373e43e9e054bfdd22ff1a63c1bc485eaec6a6a8a" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "pkcs8" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" +dependencies = [ + "der", + "spki", +] + +[[package]] +name = "pkg-config" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" + +[[package]] +name = "plotters" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747" +dependencies = [ + "num-traits", + "plotters-backend", + "plotters-svg", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "plotters-backend" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a" + +[[package]] +name = "plotters-svg" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670" +dependencies = [ + "plotters-backend", +] + +[[package]] +name = "pol" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "circuits-prover", + "circuits-utils", + "groth16", + "num-bigint", + "num-traits", + "serde", + "serde_json", + "thiserror 2.0.17", + "witness-generator", +] + +[[package]] +name = "polling" +version = "3.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d0e4f59085d47d8241c88ead0f274e8a0cb551f3625263c05eb8dd897c34218" +dependencies = [ + "cfg-if", + "concurrent-queue", + "hermit-abi", + "pin-project-lite", + "rustix 1.1.2", + "windows-sys 0.61.2", +] + +[[package]] +name = "poq" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "circuits-prover", + "circuits-utils", + "groth16", + "num-bigint", + "pol", + "serde", + "serde_json", + "thiserror 2.0.17", + "witness-generator", +] + +[[package]] +name = "portable-atomic" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" + +[[package]] +name = "poseidon2" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "ark-bn254 0.4.0", + "ark-ff 0.4.2", + "jf-poseidon2", + "num-bigint", +] + +[[package]] +name = "potential_utf" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b73949432f5e2a09657003c25bca5e19a0e9c84f8058ca374f49e0ebe605af77" +dependencies = [ + "zerovec", +] + +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + +[[package]] +name = "pprof" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38a01da47675efa7673b032bf8efd8214f1917d89685e07e395ab125ea42b187" +dependencies = [ + "aligned-vec", + "backtrace", + "cfg-if", + "criterion", + "findshlibs", + "inferno", + "libc", + "log", + "nix 0.26.4", + "once_cell", + "protobuf", + "protobuf-codegen", + "smallvec", + "spin", + "symbolic-demangle", + "tempfile", + "thiserror 2.0.17", +] + +[[package]] +name = "ppv-lite86" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" +dependencies = [ + "zerocopy", +] + +[[package]] +name = "proc-macro-crate" +version = "3.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" +dependencies = [ + "toml_edit", +] + +[[package]] +name = "proc-macro-error" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" +dependencies = [ + "proc-macro-error-attr", + "proc-macro2", + "quote", + "syn 1.0.109", + "version_check", +] + +[[package]] +name = "proc-macro-error-attr" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" +dependencies = [ + "proc-macro2", + "quote", + "version_check", +] + +[[package]] +name = "proc-macro-error-attr2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5" +dependencies = [ + "proc-macro2", + "quote", +] + +[[package]] +name = "proc-macro-error2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802" +dependencies = [ + "proc-macro-error-attr2", + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "proc-macro2" +version = "1.0.103" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "prometheus-client" +version = "0.22.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "504ee9ff529add891127c4827eb481bd69dc0ebc72e9a682e187db4caa60c3ca" +dependencies = [ + "dtoa", + "itoa", + "parking_lot", + "prometheus-client-derive-encode", +] + +[[package]] +name = "prometheus-client-derive-encode" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "prometheus-http-query" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fcebfa99f03ae51220778316b37d24981e36322c82c24848f48c5bd0f64cbdb" +dependencies = [ + "enum-as-inner", + "mime", + "reqwest", + "serde", + "time", + "url", +] + +[[package]] +name = "proptest" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bee689443a2bd0a16ab0348b52ee43e3b2d1b1f931c8aa5c9f8de4c86fbe8c40" +dependencies = [ + "bitflags 2.10.0", + "num-traits", + "rand 0.9.2", + "rand_chacha 0.9.0", + "rand_xorshift", + "unarray", +] + +[[package]] +name = "prost" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2796faa41db3ec313a31f7624d9286acf277b52de526150b7e69f3debf891ee5" +dependencies = [ + "bytes", + "prost-derive", +] + +[[package]] +name = "prost-derive" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" +dependencies = [ + "anyhow", + "itertools 0.14.0", + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "prost-types" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52c2c1bf36ddb1a1c396b3601a3cec27c2462e45f07c386894ec3ccf5332bd16" +dependencies = [ + "prost", +] + +[[package]] +name = "protobuf" +version = "3.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d65a1d4ddae7d8b5de68153b48f6aa3bba8cb002b243dbdbc55a5afbc98f99f4" +dependencies = [ + "once_cell", + "protobuf-support", + "thiserror 1.0.69", +] + +[[package]] +name = "protobuf-codegen" +version = "3.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d3976825c0014bbd2f3b34f0001876604fe87e0c86cd8fa54251530f1544ace" +dependencies = [ + "anyhow", + "once_cell", + "protobuf", + "protobuf-parse", + "regex", + "tempfile", + "thiserror 1.0.69", +] + +[[package]] +name = "protobuf-parse" +version = "3.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4aeaa1f2460f1d348eeaeed86aea999ce98c1bded6f089ff8514c9d9dbdc973" +dependencies = [ + "anyhow", + "indexmap 2.12.1", + "log", + "protobuf", + "protobuf-support", + "tempfile", + "thiserror 1.0.69", + "which", +] + +[[package]] +name = "protobuf-support" +version = "3.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e36c2f31e0a47f9280fb347ef5e461ffcd2c52dd520d8e216b52f93b0b0d7d6" +dependencies = [ + "thiserror 1.0.69", +] + +[[package]] +name = "quanta" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3ab5a9d756f0d97bdc89019bd2e4ea098cf9cde50ee7564dde6b81ccc8f06c7" +dependencies = [ + "crossbeam-utils", + "libc", + "once_cell", + "raw-cpuid", + "wasi", + "web-sys", + "winapi", +] + +[[package]] +name = "quick-protobuf" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d6da84cc204722a989e01ba2f6e1e276e190f22263d0cb6ce8526fcdb0d2e1f" +dependencies = [ + "byteorder", +] + +[[package]] +name = "quick-protobuf-codec" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15a0580ab32b169745d7a39db2ba969226ca16738931be152a3209b409de2474" +dependencies = [ + "asynchronous-codec", + "bytes", + "quick-protobuf", + "thiserror 1.0.69", + "unsigned-varint 0.8.0", +] + +[[package]] +name = "quick-xml" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f50b1c63b38611e7d4d7f68b82d3ad0cc71a2ad2e7f61fc10f1328d917c93cd" +dependencies = [ + "memchr", +] + +[[package]] +name = "quinn" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e20a958963c291dc322d98411f541009df2ced7b5a4f2bd52337638cfccf20" +dependencies = [ + "bytes", + "cfg_aliases", + "futures-io", + "pin-project-lite", + "quinn-proto", + "quinn-udp", + "rustc-hash", + "rustls 0.23.35", + "socket2 0.6.1", + "thiserror 2.0.17", + "tokio", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-proto" +version = "0.11.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1906b49b0c3bc04b5fe5d86a77925ae6524a19b816ae38ce1e426255f1d8a31" +dependencies = [ + "bytes", + "getrandom 0.3.4", + "lru-slab", + "rand 0.9.2", + "ring", + "rustc-hash", + "rustls 0.23.35", + "rustls-pki-types", + "slab", + "thiserror 2.0.17", + "tinyvec", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-udp" +version = "0.5.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "addec6a0dcad8a8d96a771f815f0eaf55f9d1805756410b39f5fa81332574cbd" +dependencies = [ + "cfg_aliases", + "libc", + "once_cell", + "socket2 0.6.1", + "tracing", + "windows-sys 0.60.2", +] + +[[package]] +name = "quote" +version = "1.0.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "r-efi" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" + +[[package]] +name = "radium" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha 0.3.1", + "rand_core 0.6.4", +] + +[[package]] +name = "rand" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" +dependencies = [ + "rand_chacha 0.9.0", + "rand_core 0.9.3", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +dependencies = [ + "ppv-lite86", + "rand_core 0.9.3", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom 0.2.16", +] + +[[package]] +name = "rand_core" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" +dependencies = [ + "getrandom 0.3.4", +] + +[[package]] +name = "rand_xorshift" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "513962919efc330f829edb2535844d1b912b0fbe2ca165d613e4e8788bb05a5a" +dependencies = [ + "rand_core 0.9.3", +] + +[[package]] +name = "raw-cpuid" +version = "11.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "498cd0dc59d73224351ee52a95fee0f1a617a2eae0e7d9d720cc622c73a54186" +dependencies = [ + "bitflags 2.10.0", +] + +[[package]] +name = "rayon" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "368f01d005bf8fd9b1206fb6fa653e6c4a81ceb1466406b81792d87c5677a58f" +dependencies = [ + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22e18b0f0062d30d4230b2e85ff77fdfe4326feb054b9783a3460d8435c8ab91" +dependencies = [ + "crossbeam-deque", + "crossbeam-utils", +] + +[[package]] +name = "rcgen" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75e669e5202259b5314d1ea5397316ad400819437857b90861765f24c4cf80a2" +dependencies = [ + "pem", + "ring", + "rustls-pki-types", + "time", + "yasna", +] + +[[package]] +name = "redox_syscall" +version = "0.5.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" +dependencies = [ + "bitflags 2.10.0", +] + +[[package]] +name = "redox_users" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4e608c6638b9c18977b00b475ac1f28d14e84b27d8d42f70e0bf1e3dec127ac" +dependencies = [ + "getrandom 0.2.16", + "libredox", + "thiserror 2.0.17", +] + +[[package]] +name = "regex" +version = "1.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "843bc0191f75f3e22651ae5f1e72939ab2f72a4bc30fa80a066bd66edefc24d4" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5276caf25ac86c8d810222b3dbb938e512c55c6831a10f3e6ed1c93b84041f1c" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.8.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" + +[[package]] +name = "reqwest" +version = "0.12.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d0946410b9f7b082a427e4ef5c8ff541a88b357bc6c637c40db3a68ac70a36f" +dependencies = [ + "base64 0.22.1", + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "http 1.4.0", + "http-body 1.0.1", + "http-body-util", + "hyper 1.8.1", + "hyper-rustls 0.27.7", + "hyper-tls", + "hyper-util", + "js-sys", + "log", + "native-tls", + "percent-encoding", + "pin-project-lite", + "quinn", + "rustls 0.23.35", + "rustls-pki-types", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper", + "tokio", + "tokio-native-tls", + "tokio-rustls 0.26.4", + "tokio-util", + "tower 0.5.2", + "tower-http 0.6.7", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "wasm-streams", + "web-sys", + "webpki-roots", +] + +[[package]] +name = "resolv-conf" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e061d1b48cb8d38042de4ae0a7a6401009d6143dc80d2e2d6f31f0bdd6470c7" + +[[package]] +name = "rfc6979" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8dd2a808d456c4a54e300a23e9f5a67e122c3024119acbfd73e3bf664491cb2" +dependencies = [ + "hmac", + "subtle", +] + +[[package]] +name = "rgb" +version = "0.8.52" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c6a884d2998352bb4daf0183589aec883f16a6da1f4dde84d8e2e9a5409a1ce" +dependencies = [ + "bytemuck", +] + +[[package]] +name = "ring" +version = "0.17.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" +dependencies = [ + "cc", + "cfg-if", + "getrandom 0.2.16", + "libc", + "untrusted", + "windows-sys 0.52.0", +] + +[[package]] +name = "rocksdb" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddb7af00d2b17dbd07d82c0063e25411959748ff03e8d4f96134c2ff41fce34f" +dependencies = [ + "libc", + "librocksdb-sys", +] + +[[package]] +name = "rpds" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e75f485e819d4d3015e6c0d55d02a4fd3db47c1993d9e603e0361fba2bffb34" +dependencies = [ + "archery", + "serde", +] + +[[package]] +name = "rs-merkle-tree" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7a3ef170810c387d31b64c0b59734abb0839dac2a8d137909e271bfdec9b1e0" +dependencies = [ + "ark-bn254 0.5.0", + "ark-ff 0.5.0", + "byteorder", + "futures", + "light-poseidon", + "quote", + "rand 0.9.2", + "syn 1.0.109", + "thiserror 2.0.17", + "tiny-keccak", + "tokio", +] + +[[package]] +name = "rtnetlink" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a552eb82d19f38c3beed3f786bd23aa434ceb9ac43ab44419ca6d67a7e186c0" +dependencies = [ + "futures", + "log", + "netlink-packet-core 0.7.0", + "netlink-packet-route 0.17.1", + "netlink-packet-utils", + "netlink-proto", + "netlink-sys", + "nix 0.26.4", + "thiserror 1.0.69", + "tokio", +] + +[[package]] +name = "rust-embed" +version = "8.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "947d7f3fad52b283d261c4c99a084937e2fe492248cb9a68a8435a861b8798ca" +dependencies = [ + "rust-embed-impl", + "rust-embed-utils", + "walkdir", +] + +[[package]] +name = "rust-embed-impl" +version = "8.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5fa2c8c9e8711e10f9c4fd2d64317ef13feaab820a4c51541f1a8c8e2e851ab2" +dependencies = [ + "proc-macro2", + "quote", + "rust-embed-utils", + "syn 2.0.111", + "walkdir", +] + +[[package]] +name = "rust-embed-utils" +version = "8.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60b161f275cb337fe0a44d924a5f4df0ed69c2c39519858f931ce61c779d3475" +dependencies = [ + "sha2", + "walkdir", +] + +[[package]] +name = "rustc-demangle" +version = "0.1.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace" + +[[package]] +name = "rustc-hash" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" + +[[package]] +name = "rustc_version" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" +dependencies = [ + "semver", +] + +[[package]] +name = "rusticata-macros" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "faf0c4a6ece9950b9abdb62b1cfcf2a68b3b67a10ba445b3bb85be2a293d0632" +dependencies = [ + "nom 7.1.3", +] + +[[package]] +name = "rustix" +version = "0.38.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" +dependencies = [ + "bitflags 2.10.0", + "errno", + "libc", + "linux-raw-sys 0.4.15", + "windows-sys 0.59.0", +] + +[[package]] +name = "rustix" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd15f8a2c5551a84d56efdc1cd049089e409ac19a3072d5037a17fd70719ff3e" +dependencies = [ + "bitflags 2.10.0", + "errno", + "libc", + "linux-raw-sys 0.11.0", + "windows-sys 0.61.2", +] + +[[package]] +name = "rustls" +version = "0.21.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" +dependencies = [ + "log", + "ring", + "rustls-webpki 0.101.7", + "sct", +] + +[[package]] +name = "rustls" +version = "0.23.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "533f54bc6a7d4f647e46ad909549eda97bf5afc1585190ef692b4286b198bd8f" +dependencies = [ + "once_cell", + "ring", + "rustls-pki-types", + "rustls-webpki 0.103.8", + "subtle", + "zeroize", +] + +[[package]] +name = "rustls-native-certs" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" +dependencies = [ + "openssl-probe", + "rustls-pemfile", + "schannel", + "security-framework", +] + +[[package]] +name = "rustls-pemfile" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" +dependencies = [ + "base64 0.21.7", +] + +[[package]] +name = "rustls-pki-types" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94182ad936a0c91c324cd46c6511b9510ed16af436d7b5bab34beab0afd55f7a" +dependencies = [ + "web-time", + "zeroize", +] + +[[package]] +name = "rustls-webpki" +version = "0.101.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "rustls-webpki" +version = "0.103.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ffdfa2f5286e2247234e03f680868ac2815974dc39e00ea15adc445d0aafe52" +dependencies = [ + "ring", + "rustls-pki-types", + "untrusted", +] + +[[package]] +name = "rustversion" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" + +[[package]] +name = "rw-stream-sink" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8c9026ff5d2f23da5e45bbc283f156383001bfb09c4e44256d02c1a685fe9a1" +dependencies = [ + "futures", + "pin-project", + "static_assertions", +] + +[[package]] +name = "ryu" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" + +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "scc" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46e6f046b7fef48e2660c57ed794263155d713de679057f2d0c169bfc6e756cc" +dependencies = [ + "sdd", +] + +[[package]] +name = "schannel" +version = "0.1.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "891d81b926048e76efe18581bf793546b4c0eaf8448d72be8de2bbee5fd166e1" +dependencies = [ + "windows-sys 0.61.2", +] + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "sct" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "sdd" +version = "3.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "490dcfcbfef26be6800d11870ff2df8774fa6e86d047e3e8c8a76b25655e41ca" + +[[package]] +name = "sec1" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" +dependencies = [ + "base16ct", + "der", + "generic-array 0.14.7", + "pkcs8", + "subtle", + "zeroize", +] + +[[package]] +name = "secrecy" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9bd1c54ea06cfd2f6b63219704de0b9b4f72dcc2b8fdef820be6cd799780e91e" +dependencies = [ + "serde", + "zeroize", +] + +[[package]] +name = "security-framework" +version = "2.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" +dependencies = [ + "bitflags 2.10.0", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc1f0cbffaac4852523ce30d8bd3c5cdc873501d96ff467ca09b6767bb8cd5c0" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "semver" +version = "1.0.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2" + +[[package]] +name = "serde" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" +dependencies = [ + "serde_core", + "serde_derive", +] + +[[package]] +name = "serde-value" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3a1a3341211875ef120e117ea7fd5228530ae7e7036a779fdc9117be6b3282c" +dependencies = [ + "ordered-float", + "serde", +] + +[[package]] +name = "serde_core" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "serde_json" +version = "1.0.145" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" +dependencies = [ + "itoa", + "memchr", + "ryu", + "serde", + "serde_core", +] + +[[package]] +name = "serde_path_to_error" +version = "0.1.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10a9ff822e371bb5403e391ecd83e182e0e77ba7f6fe0160b795797109d1b457" +dependencies = [ + "itoa", + "serde", + "serde_core", +] + +[[package]] +name = "serde_urlencoded" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" +dependencies = [ + "form_urlencoded", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "serde_with" +version = "3.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10574371d41b0d9b2cff89418eda27da52bcaff2cc8741db26382a77c29131f1" +dependencies = [ + "base64 0.22.1", + "chrono", + "hex", + "serde_core", + "serde_json", + "serde_with_macros", + "time", +] + +[[package]] +name = "serde_with_macros" +version = "3.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08a72d8216842fdd57820dc78d840bef99248e35fb2554ff923319e60f2d686b" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "serde_yaml" +version = "0.9.34+deprecated" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" +dependencies = [ + "indexmap 2.12.1", + "itoa", + "ryu", + "serde", + "unsafe-libyaml", +] + +[[package]] +name = "serial_test" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e56dd856803e253c8f298af3f4d7eb0ae5e23a737252cd90bb4f3b435033b2d" +dependencies = [ + "dashmap", + "futures", + "lazy_static", + "log", + "parking_lot", + "serial_test_derive 2.0.0", +] + +[[package]] +name = "serial_test" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b258109f244e1d6891bf1053a55d63a5cd4f8f4c30cf9a1280989f80e7a1fa9" +dependencies = [ + "futures", + "log", + "once_cell", + "parking_lot", + "scc", + "serial_test_derive 3.2.0", +] + +[[package]] +name = "serial_test_derive" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "serial_test_derive" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d69265a08751de7844521fd15003ae0a888e035773ba05695c5c759a6f89eef" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "services-utils" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "async-trait", + "futures", + "log", + "overwatch", + "serde", + "serde_json", + "thiserror 1.0.69", + "tracing", +] + +[[package]] +name = "sha2" +version = "0.10.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "signal-hook-registry" +version = "1.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7664a098b8e616bdfcc2dc0e9ac44eb231eedf41db4e9fe95d8d32ec728dedad" +dependencies = [ + "libc", +] + +[[package]] +name = "signature" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" +dependencies = [ + "digest", + "rand_core 0.6.4", +] + +[[package]] +name = "simd-adler32" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" + +[[package]] +name = "siphasher" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" + +[[package]] +name = "slab" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" + +[[package]] +name = "slug" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "882a80f72ee45de3cc9a5afeb2da0331d58df69e4e7d8eeb5d3c7784ae67e724" +dependencies = [ + "deunicode", + "wasm-bindgen", +] + +[[package]] +name = "smallvec" +version = "1.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" + +[[package]] +name = "snap" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b6b67fb9a61334225b5b790716f609cd58395f895b3fe8b328786812a40bc3b" + +[[package]] +name = "sntpc" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78f778a0f82b3cf5d75f858eceee38e84d5292f1d03415e88cc4ec45ca6ba8a2" +dependencies = [ + "cfg-if", + "tokio", +] + +[[package]] +name = "socket2" +version = "0.5.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "socket2" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17129e116933cf371d018bb80ae557e889637989d8638274fb25622827b03881" +dependencies = [ + "libc", + "windows-sys 0.60.2", +] + +[[package]] +name = "spin" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5fe4ccb98d9c292d56fec89a5e07da7fc4cf0dc11e156b41793132775d3e591" +dependencies = [ + "lock_api", +] + +[[package]] +name = "spinning_top" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d96d2d1d716fb500937168cc09353ffdc7a012be8475ac7308e1bdf0e3923300" +dependencies = [ + "lock_api", +] + +[[package]] +name = "spki" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" +dependencies = [ + "base64ct", + "der", +] + +[[package]] +name = "stable_deref_trait" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596" + +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + +[[package]] +name = "str_stack" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9091b6114800a5f2141aee1d1b9d6ca3592ac062dc5decb3764ec5895a47b4eb" + +[[package]] +name = "strsim" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" + +[[package]] +name = "strum" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af23d6f6c1a224baef9d3f61e287d2761385a5b88fdab4eb4c6f11aeb54c4bcf" +dependencies = [ + "strum_macros", +] + +[[package]] +name = "strum_macros" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7695ce3845ea4b33927c055a39dc438a45b059f7c1b3d91d38d10355fb8cbca7" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "subnetworks-assignations" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "counter", + "libp2p-identity", + "nomos-core", + "nomos-utils", + "rand 0.8.5", + "serde", +] + +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + +[[package]] +name = "symbolic-common" +version = "12.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3d8046c5674ab857104bc4559d505f4809b8060d57806e45d49737c97afeb60" +dependencies = [ + "debugid", + "memmap2", + "stable_deref_trait", + "uuid", +] + +[[package]] +name = "symbolic-demangle" +version = "12.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1accb6e5c4b0f682de907623912e616b44be1c9e725775155546669dbff720ec" +dependencies = [ + "rustc-demangle", + "symbolic-common", +] + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.111" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "390cc9a294ab71bdb1aa2e99d13be9c753cd2d7bd6560c77118597410c4d2e87" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "sync_wrapper" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" +dependencies = [ + "futures-core", +] + +[[package]] +name = "synstructure" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "system-configuration" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" +dependencies = [ + "bitflags 1.3.2", + "core-foundation", + "system-configuration-sys 0.5.0", +] + +[[package]] +name = "system-configuration" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" +dependencies = [ + "bitflags 2.10.0", + "core-foundation", + "system-configuration-sys 0.6.0", +] + +[[package]] +name = "system-configuration-sys" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "system-configuration-sys" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "tagptr" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417" + +[[package]] +name = "tap" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" + +[[package]] +name = "tempfile" +version = "3.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d31c77bdf42a745371d260a26ca7163f1e0924b64afa0b688e61b5a9fa02f16" +dependencies = [ + "fastrand", + "getrandom 0.3.4", + "once_cell", + "rustix 1.1.2", + "windows-sys 0.61.2", +] + +[[package]] +name = "tera" +version = "1.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8004bca281f2d32df3bacd59bc67b312cb4c70cea46cbd79dbe8ac5ed206722" +dependencies = [ + "chrono", + "chrono-tz", + "globwalk", + "humansize", + "lazy_static", + "percent-encoding", + "pest", + "pest_derive", + "rand 0.8.5", + "regex", + "serde", + "serde_json", + "slug", + "unicode-segmentation", +] + +[[package]] +name = "testing-framework-core" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-trait", + "broadcast-service", + "chain-service", + "common-http-client", + "futures", + "groth16", + "hex", + "integration-configs", + "key-management-system", + "kzgrs-backend", + "nomos-core", + "nomos-da-network-core", + "nomos-da-network-service", + "nomos-executor", + "nomos-http-api-common", + "nomos-libp2p", + "nomos-network", + "nomos-node", + "nomos-tracing", + "nomos-tracing-service", + "nomos-utils", + "prometheus-http-query", + "rand 0.8.5", + "reqwest", + "serde", + "serde_json", + "serde_with", + "serde_yaml", + "tempfile", + "thiserror 2.0.17", + "tokio", + "tracing", + "tx-service", +] + +[[package]] +name = "testing-framework-runner-compose" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-trait", + "axum", + "cfgsync", + "groth16", + "nomos-core", + "nomos-ledger", + "nomos-tracing-service", + "reqwest", + "serde", + "tempfile", + "tera", + "testing-framework-core", + "tests", + "thiserror 2.0.17", + "tokio", + "tracing", + "url", + "uuid", + "zksign", +] + +[[package]] +name = "testing-framework-runner-k8s" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-trait", + "k8s-openapi", + "kube", + "reqwest", + "serde", + "serde_yaml", + "tempfile", + "testing-framework-core", + "thiserror 2.0.17", + "tokio", + "tracing", + "url", + "uuid", +] + +[[package]] +name = "testing-framework-runner-local" +version = "0.1.0" +dependencies = [ + "async-trait", + "testing-framework-core", + "thiserror 2.0.17", +] + +[[package]] +name = "testing-framework-workflows" +version = "0.1.0" +dependencies = [ + "async-trait", + "ed25519-dalek", + "executor-http-client", + "integration-configs", + "nomos-core", + "rand 0.8.5", + "testing-framework-core", + "thiserror 2.0.17", + "tokio", + "tracing", + "zksign", +] + +[[package]] +name = "tests" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "async-trait", + "blst", + "broadcast-service", + "chain-leader", + "chain-network", + "chain-service", + "common-http-client", + "cryptarchia-engine", + "cryptarchia-sync", + "ed25519-dalek", + "executor-http-client", + "futures", + "futures-util", + "groth16", + "hex", + "key-management-system", + "kzgrs-backend", + "nomos-api", + "nomos-blend-message", + "nomos-blend-service", + "nomos-core", + "nomos-da-dispersal", + "nomos-da-network-core", + "nomos-da-network-service", + "nomos-da-sampling", + "nomos-da-verifier", + "nomos-executor", + "nomos-http-api-common", + "nomos-ledger", + "nomos-libp2p", + "nomos-network", + "nomos-node", + "nomos-sdp", + "nomos-time", + "nomos-tracing", + "nomos-tracing-service", + "nomos-utils", + "nomos-wallet", + "num-bigint", + "rand 0.8.5", + "reqwest", + "serde_json", + "serde_yaml", + "serial_test 3.2.0", + "subnetworks-assignations", + "tempfile", + "time", + "tokio", + "tracing", + "tx-service", + "zksign", +] + +[[package]] +name = "tests-workflows" +version = "0.1.0" +dependencies = [ + "anyhow", + "serial_test 2.0.0", + "testing-framework-core", + "testing-framework-runner-compose", + "testing-framework-runner-k8s", + "testing-framework-runner-local", + "testing-framework-workflows", + "tokio", +] + +[[package]] +name = "thiserror" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +dependencies = [ + "thiserror-impl 1.0.69", +] + +[[package]] +name = "thiserror" +version = "2.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f63587ca0f12b72a0600bcba1d40081f830876000bb46dd2337a3051618f4fc8" +dependencies = [ + "thiserror-impl 2.0.17", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "thread_local" +version = "1.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "threadpool" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa" +dependencies = [ + "num_cpus", +] + +[[package]] +name = "time" +version = "0.3.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e7d9e3bb61134e77bde20dd4825b97c010155709965fedf0f49bb138e52a9d" +dependencies = [ + "deranged", + "itoa", + "num-conv", + "powerfmt", + "serde", + "time-core", + "time-macros", +] + +[[package]] +name = "time-core" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "40868e7c1d2f0b8d73e4a8c7f0ff63af4f6d19be117e90bd73eb1d62cf831c6b" + +[[package]] +name = "time-macros" +version = "0.2.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30cfb0125f12d9c277f35663a0a33f8c30190f4e4574868a330595412d34ebf3" +dependencies = [ + "num-conv", + "time-core", +] + +[[package]] +name = "tiny-keccak" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" +dependencies = [ + "crunchy", +] + +[[package]] +name = "tinystr" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42d3e9c45c09de15d06dd8acf5f4e0e399e85927b7f00711024eb7ae10fa4869" +dependencies = [ + "displaydoc", + "zerovec", +] + +[[package]] +name = "tinytemplate" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" +dependencies = [ + "serde", + "serde_json", +] + +[[package]] +name = "tinyvec" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa5fdc3bce6191a1dbc8c02d5c8bffcf557bafa17c124c5264a458f1b0613fa" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + +[[package]] +name = "tokio" +version = "1.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff360e02eab121e0bc37a2d3b4d4dc622e6eda3a8e5253d5435ecf5bd4c68408" +dependencies = [ + "bytes", + "libc", + "mio", + "parking_lot", + "pin-project-lite", + "signal-hook-registry", + "socket2 0.6.1", + "tokio-macros", + "windows-sys 0.61.2", +] + +[[package]] +name = "tokio-io-timeout" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bd86198d9ee903fedd2f9a2e72014287c0d9167e4ae43b5853007205dda1b76" +dependencies = [ + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tokio-macros" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "tokio-native-tls" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" +dependencies = [ + "native-tls", + "tokio", +] + +[[package]] +name = "tokio-rustls" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" +dependencies = [ + "rustls 0.21.12", + "tokio", +] + +[[package]] +name = "tokio-rustls" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61" +dependencies = [ + "rustls 0.23.35", + "tokio", +] + +[[package]] +name = "tokio-stream" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", + "tokio-util", +] + +[[package]] +name = "tokio-util" +version = "0.7.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2efa149fe76073d6e8fd97ef4f4eca7b67f599660115591483572e406e165594" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "slab", + "tokio", +] + +[[package]] +name = "toml_datetime" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2cdb639ebbc97961c51720f858597f7f24c4fc295327923af55b74c3c724533" +dependencies = [ + "serde_core", +] + +[[package]] +name = "toml_edit" +version = "0.23.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6485ef6d0d9b5d0ec17244ff7eb05310113c3f316f2d14200d4de56b3cb98f8d" +dependencies = [ + "indexmap 2.12.1", + "toml_datetime", + "toml_parser", + "winnow", +] + +[[package]] +name = "toml_parser" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0cbe268d35bdb4bb5a56a2de88d0ad0eb70af5384a99d648cd4b3d04039800e" +dependencies = [ + "winnow", +] + +[[package]] +name = "tonic" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52" +dependencies = [ + "async-stream", + "async-trait", + "axum", + "base64 0.22.1", + "bytes", + "h2", + "http 1.4.0", + "http-body 1.0.1", + "http-body-util", + "hyper 1.8.1", + "hyper-timeout 0.5.2", + "hyper-util", + "percent-encoding", + "pin-project", + "prost", + "socket2 0.5.10", + "tokio", + "tokio-stream", + "tower 0.4.13", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "futures-core", + "futures-util", + "indexmap 1.9.3", + "pin-project", + "pin-project-lite", + "rand 0.8.5", + "slab", + "tokio", + "tokio-util", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper", + "tokio", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-http" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61c5bb1d698276a2443e5ecfabc1008bf15a36c12e6a7176e7bf089ea9131140" +dependencies = [ + "base64 0.21.7", + "bitflags 2.10.0", + "bytes", + "futures-core", + "futures-util", + "http 0.2.12", + "http-body 0.4.6", + "http-range-header", + "mime", + "pin-project-lite", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-http" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" +dependencies = [ + "bitflags 2.10.0", + "bytes", + "http 1.4.0", + "http-body 1.0.1", + "http-body-util", + "pin-project-lite", + "tokio", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-http" +version = "0.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cf146f99d442e8e68e585f5d798ccd3cad9a7835b917e09728880a862706456" +dependencies = [ + "bitflags 2.10.0", + "bytes", + "futures-util", + "http 1.4.0", + "http-body 1.0.1", + "iri-string", + "pin-project-lite", + "tower 0.5.2", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-layer" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" + +[[package]] +name = "tower-service" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" + +[[package]] +name = "tower_governor" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3790eac6ad3fb8d9d96c2b040ae06e2517aa24b067545d1078b96ae72f7bb9a7" +dependencies = [ + "axum", + "forwarded-header-value", + "governor", + "http 1.4.0", + "pin-project", + "thiserror 1.0.69", + "tower 0.4.13", + "tracing", +] + +[[package]] +name = "tracing" +version = "0.1.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" +dependencies = [ + "log", + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-appender" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3566e8ce28cc0a3fe42519fc80e6b4c943cc4c8cef275620eb8dac2d3d4e06cf" +dependencies = [ + "crossbeam-channel", + "thiserror 1.0.69", + "time", + "tracing-subscriber 0.3.20", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "tracing-core" +version = "0.1.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" +dependencies = [ + "once_cell", + "valuable", +] + +[[package]] +name = "tracing-futures" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" +dependencies = [ + "pin-project", + "tracing", +] + +[[package]] +name = "tracing-gelf" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7c0170f1bf67b749d4377c2da1d99d6e722600051ee53870cfb6f618611e29e" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "hostname", + "serde_json", + "thiserror 1.0.69", + "tokio", + "tokio-util", + "tracing-core", + "tracing-futures", + "tracing-subscriber 0.3.20", +] + +[[package]] +name = "tracing-log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-loki" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba3beec919fbdf99d719de8eda6adae3281f8a5b71ae40431f44dc7423053d34" +dependencies = [ + "loki-api", + "reqwest", + "serde", + "serde_json", + "snap", + "tokio", + "tokio-stream", + "tracing", + "tracing-core", + "tracing-log", + "tracing-serde", + "tracing-subscriber 0.3.20", + "url", +] + +[[package]] +name = "tracing-opentelemetry" +version = "0.27.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc58af5d3f6c5811462cabb3289aec0093f7338e367e5a33d28c0433b3c7360b" +dependencies = [ + "js-sys", + "once_cell", + "opentelemetry", + "opentelemetry_sdk", + "smallvec", + "tracing", + "tracing-core", + "tracing-subscriber 0.3.20", + "web-time", +] + +[[package]] +name = "tracing-serde" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "704b1aeb7be0d0a84fc9828cae51dab5970fee5088f83d1dd7ee6f6246fc6ff1" +dependencies = [ + "serde", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.2.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e0d2eaa99c3c2e41547cfa109e910a68ea03823cccad4a0525dcbc9b01e8c71" +dependencies = [ + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2054a14f5307d601f88daf0553e1cbf472acc4f2c51afab632431cdcd72124d5" +dependencies = [ + "matchers", + "nu-ansi-term", + "once_cell", + "regex-automata", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", +] + +[[package]] +name = "triomphe" +version = "0.1.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd69c5aa8f924c7519d6372789a74eac5b94fb0f8fcf0d4a97eb0bfc3e785f39" + +[[package]] +name = "try-lock" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" + +[[package]] +name = "tx-service" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "async-trait", + "futures", + "nomos-core", + "nomos-network", + "nomos-storage", + "overwatch", + "rand 0.8.5", + "serde", + "serde_json", + "services-utils", + "thiserror 1.0.69", + "tokio", + "tokio-stream", + "tracing", + "utoipa", +] + +[[package]] +name = "typenum" +version = "1.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "562d481066bde0658276a35467c4af00bdc6ee726305698a55b86e61d7ad82bb" + +[[package]] +name = "ucd-trie" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971" + +[[package]] +name = "uint" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "909988d098b2f738727b161a106cfc7cab00c539c2687a8836f8e565976fb53e" +dependencies = [ + "byteorder", + "crunchy", + "hex", + "static_assertions", +] + +[[package]] +name = "unarray" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" + +[[package]] +name = "unicase" +version = "2.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" + +[[package]] +name = "unicode-ident" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" + +[[package]] +name = "unicode-segmentation" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" + +[[package]] +name = "unsafe-libyaml" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" + +[[package]] +name = "unsigned-varint" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6889a77d49f1f013504cec6bf97a2c730394adedaeb1deb5ea08949a50541105" + +[[package]] +name = "unsigned-varint" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb066959b24b5196ae73cb057f45598450d2c5f71460e98c49b738086eff9c06" + +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + +[[package]] +name = "url" +version = "2.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08bc136a29a3d1758e07a9cca267be308aeebf5cfd5a10f3f67ab2097683ef5b" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", + "serde", +] + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + +[[package]] +name = "utoipa" +version = "4.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c5afb1a60e207dca502682537fefcfd9921e71d0b83e9576060f09abc6efab23" +dependencies = [ + "indexmap 2.12.1", + "serde", + "serde_json", + "utoipa-gen", +] + +[[package]] +name = "utoipa-gen" +version = "4.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20c24e8ab68ff9ee746aad22d39b5535601e6416d1b0feeabf78be986a5c4392" +dependencies = [ + "proc-macro-error", + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "utoipa-swagger-ui" +version = "7.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "943e0ff606c6d57d410fd5663a4d7c074ab2c5f14ab903b9514565e59fa1189e" +dependencies = [ + "axum", + "mime_guess", + "regex", + "reqwest", + "rust-embed", + "serde", + "serde_json", + "utoipa", + "zip", +] + +[[package]] +name = "utxotree" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "ark-ff 0.4.2", + "groth16", + "num-bigint", + "poseidon2", + "rpds", + "serde", + "thiserror 1.0.69", +] + +[[package]] +name = "uuid" +version = "1.18.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f87b8aa10b915a06587d0dec516c282ff295b475d94abf425d62b57710070a2" +dependencies = [ + "getrandom 0.3.4", + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "valuable" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" + +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + +[[package]] +name = "version_check" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + +[[package]] +name = "walkdir" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" +dependencies = [ + "same-file", + "winapi-util", +] + +[[package]] +name = "wallet" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "nomos-core", + "nomos-ledger", + "num-bigint", + "rpds", + "thiserror 2.0.17", + "tracing", + "zksign", +] + +[[package]] +name = "want" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" +dependencies = [ + "try-lock", +] + +[[package]] +name = "wasi" +version = "0.11.1+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" + +[[package]] +name = "wasip2" +version = "1.0.1+wasi-0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7" +dependencies = [ + "wit-bindgen", +] + +[[package]] +name = "wasm-bindgen" +version = "0.2.105" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da95793dfc411fbbd93f5be7715b0578ec61fe87cb1a42b12eb625caa5c5ea60" +dependencies = [ + "cfg-if", + "once_cell", + "rustversion", + "wasm-bindgen-macro", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.55" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "551f88106c6d5e7ccc7cd9a16f312dd3b5d36ea8b4954304657d5dfba115d4a0" +dependencies = [ + "cfg-if", + "js-sys", + "once_cell", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.105" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04264334509e04a7bf8690f2384ef5265f05143a4bff3889ab7a3269adab59c2" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.105" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "420bc339d9f322e562942d52e115d57e950d12d88983a14c79b86859ee6c7ebc" +dependencies = [ + "bumpalo", + "proc-macro2", + "quote", + "syn 2.0.111", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.105" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76f218a38c84bcb33c25ec7059b07847d465ce0e0a76b995e134a45adcb6af76" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "wasm-streams" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15053d8d85c7eccdbefef60f06769760a563c7f0a9d6902a13d35c7800b0ad65" +dependencies = [ + "futures-util", + "js-sys", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + +[[package]] +name = "web-sys" +version = "0.3.82" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a1f95c0d03a47f4ae1f7a64643a6bb97465d9b740f0fa8f90ea33915c99a9a1" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "web-time" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "webpki-roots" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2878ef029c47c6e8cf779119f20fcf52bde7ad42a731b2a304bc221df17571e" +dependencies = [ + "rustls-pki-types", +] + +[[package]] +name = "which" +version = "4.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" +dependencies = [ + "either", + "home", + "once_cell", + "rustix 0.38.44", +] + +[[package]] +name = "widestring" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72069c3113ab32ab29e5584db3c6ec55d416895e60715417b5b883a357c3e471" + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" +dependencies = [ + "windows-sys 0.61.2", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows" +version = "0.32.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbedf6db9096bc2364adce0ae0aa636dcd89f3c3f2cd67947062aaf0ca2a10ec" +dependencies = [ + "windows_aarch64_msvc 0.32.0", + "windows_i686_gnu 0.32.0", + "windows_i686_msvc 0.32.0", + "windows_x86_64_gnu 0.32.0", + "windows_x86_64_msvc 0.32.0", +] + +[[package]] +name = "windows" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "efc5cf48f83140dcaab716eeaea345f9e93d0018fb81162753a3f76c3397b538" +dependencies = [ + "windows-core 0.53.0", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-core" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dcc5b895a6377f1ab9fa55acedab1fd5ac0db66ad1e6c7f47e28a22e446a5dd" +dependencies = [ + "windows-result 0.1.2", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-core" +version = "0.62.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-link", + "windows-result 0.4.1", + "windows-strings", +] + +[[package]] +name = "windows-implement" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "windows-interface" +version = "0.59.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "windows-link" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" + +[[package]] +name = "windows-result" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e383302e8ec8515204254685643de10811af0ed97ea37210dc26fb0032647f8" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-result" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-strings" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.5", +] + +[[package]] +name = "windows-sys" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm 0.52.6", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", +] + +[[package]] +name = "windows-targets" +version = "0.53.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3" +dependencies = [ + "windows-link", + "windows_aarch64_gnullvm 0.53.1", + "windows_aarch64_msvc 0.53.1", + "windows_i686_gnu 0.53.1", + "windows_i686_gnullvm 0.53.1", + "windows_i686_msvc 0.53.1", + "windows_x86_64_gnu 0.53.1", + "windows_x86_64_gnullvm 0.53.1", + "windows_x86_64_msvc 0.53.1", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.32.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8e92753b1c443191654ec532f14c199742964a061be25d77d7a96f09db20bf5" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" + +[[package]] +name = "windows_i686_gnu" +version = "0.32.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a711c68811799e017b6038e0922cb27a5e2f43a2ddb609fe0b6f3eeda9de615" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnu" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "960e6da069d81e09becb0ca57a65220ddff016ff2d6af6a223cf372a506593a3" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" + +[[package]] +name = "windows_i686_msvc" +version = "0.32.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "146c11bb1a02615db74680b32a68e2d61f553cc24c4eb5b4ca10311740e44172" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_i686_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.32.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c912b12f7454c6620635bbff3450962753834be2a594819bd5e945af18ec64bc" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.32.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "504a2476202769977a040c6364301a3f65d0cc9e3fb08600b2bda150a0488316" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" + +[[package]] +name = "winnow" +version = "0.7.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21a0236b59786fed61e2a80582dd500fe61f18b5dca67a4a067d0bc9039339cf" +dependencies = [ + "memchr", +] + +[[package]] +name = "winreg" +version = "0.50.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" +dependencies = [ + "cfg-if", + "windows-sys 0.48.0", +] + +[[package]] +name = "wit-bindgen" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" + +[[package]] +name = "witness-generator" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "circuits-utils", + "tempfile", +] + +[[package]] +name = "writeable" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9" + +[[package]] +name = "wyz" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" +dependencies = [ + "tap", +] + +[[package]] +name = "x25519-dalek" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7e468321c81fb07fa7f4c636c3972b9100f0346e5b6a9f2bd0603a52f7ed277" +dependencies = [ + "curve25519-dalek", + "rand_core 0.6.4", + "serde", +] + +[[package]] +name = "x509-parser" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4569f339c0c402346d4a75a9e39cf8dad310e287eef1ff56d4c68e5067f53460" +dependencies = [ + "asn1-rs", + "data-encoding", + "der-parser", + "lazy_static", + "nom 7.1.3", + "oid-registry", + "rusticata-macros", + "thiserror 2.0.17", + "time", +] + +[[package]] +name = "xml-rs" +version = "0.8.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ae8337f8a065cfc972643663ea4279e04e7256de865aa66fe25cec5fb912d3f" + +[[package]] +name = "xmltree" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7d8a75eaf6557bb84a65ace8609883db44a29951042ada9b393151532e41fcb" +dependencies = [ + "xml-rs", +] + +[[package]] +name = "yasna" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e17bb3549cc1321ae1296b9cdc2698e2b6cb1992adfa19a8c72e5b7a738f44cd" +dependencies = [ + "time", +] + +[[package]] +name = "yoke" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72d6e5c6afb84d73944e5cedb052c4680d5657337201555f9f2a16b7406d4954" +dependencies = [ + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", + "synstructure", +] + +[[package]] +name = "zerocopy" +version = "0.8.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ea879c944afe8a2b25fef16bb4ba234f47c694565e97383b36f3a878219065c" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.8.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf955aa904d6040f70dc8e9384444cb1030aed272ba3cb09bbc4ab9e7c1f34f5" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "zerofrom" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", + "synstructure", +] + +[[package]] +name = "zeroize" +version = "1.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "zerotrie" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a59c17a5562d507e4b54960e8569ebee33bee890c70aa3fe7b97e85a9fd7851" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", +] + +[[package]] +name = "zerovec" +version = "0.11.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c28719294829477f525be0186d13efa9a3c602f7ec202ca9e353d310fb9a002" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "zip" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cc23c04387f4da0374be4533ad1208cbb091d5c11d070dfef13676ad6497164" +dependencies = [ + "arbitrary", + "crc32fast", + "crossbeam-utils", + "displaydoc", + "flate2", + "indexmap 2.12.1", + "num_enum", + "thiserror 1.0.69", +] + +[[package]] +name = "zksign" +version = "0.1.0" +source = "git+https://github.com/logos-co/nomos-node.git?branch=master#2f60a0372c228968c3526c341ebc7e58bbd178dd" +dependencies = [ + "ark-ff 0.4.2", + "circuits-prover", + "circuits-utils", + "generic-array 1.3.5", + "groth16", + "num-bigint", + "poseidon2", + "serde", + "serde_json", + "thiserror 2.0.17", + "tracing", + "witness-generator", + "zeroize", +] diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 0000000..b5a624f --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,98 @@ +[workspace] +members = [ + "testing-framework/configs", + "testing-framework/core", + "testing-framework/runners/compose", + "testing-framework/runners/k8s", + "testing-framework/runners/local", + "testing-framework/workflows", + "tests/workflows", +] +resolver = "2" + +[workspace.package] +categories = [] +description = "Nomos testing framework workspace (split out from nomos-node)" +edition = "2024" +keywords = ["framework", "nomos", "testing"] +license = "MIT OR Apache-2.0" +readme = "README.md" +repository = "https://example.invalid/nomos-testing-local" +version = "0.1.0" + +[workspace.lints.rust] +unsafe_code = "allow" + +[workspace.lints.clippy] +all = "allow" + +[workspace.dependencies] +# Local testing framework crates +integration-configs = { default-features = false, path = "testing-framework/configs" } +testing-framework-core = { default-features = false, path = "testing-framework/core" } +testing-framework-runner-compose = { default-features = false, path = "testing-framework/runners/compose" } +testing-framework-runner-k8s = { default-features = false, path = "testing-framework/runners/k8s" } +testing-framework-runner-local = { default-features = false, path = "testing-framework/runners/local" } +testing-framework-workflows = { default-features = false, path = "testing-framework/workflows" } + +# Nomos git dependencies (tracking master) +broadcast-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", branch = "master" } +cfgsync = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", branch = "master" } +chain-leader = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", branch = "master", features = [ + "pol-dev-mode", +] } +chain-network = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", branch = "master" } +chain-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", branch = "master" } +common-http-client = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", branch = "master" } +cryptarchia-engine = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", branch = "master" } +cryptarchia-sync = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", branch = "master" } +executor-http-client = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", branch = "master" } +groth16 = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", branch = "master" } +key-management-system = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", branch = "master" } +kzgrs = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", branch = "master" } +kzgrs-backend = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", branch = "master" } +nomos-api = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", branch = "master" } +nomos-blend-message = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", branch = "master" } +nomos-blend-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", branch = "master" } +nomos-cli = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", branch = "master" } +nomos-core = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", branch = "master" } +nomos-da-dispersal = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", branch = "master" } +nomos-da-network-core = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", branch = "master" } +nomos-da-network-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", branch = "master" } +nomos-da-sampling = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", branch = "master" } +nomos-da-verifier = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", branch = "master" } +nomos-executor = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", branch = "master" } +nomos-http-api-common = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", branch = "master" } +nomos-ledger = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", branch = "master" } +nomos-libp2p = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", branch = "master" } +nomos-network = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", branch = "master" } +nomos-node = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", branch = "master" } +nomos-sdp = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", branch = "master" } +nomos-time = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", branch = "master" } +nomos-tracing = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", branch = "master" } +nomos-tracing-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", branch = "master" } +nomos-utils = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", branch = "master" } +nomos-wallet = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", branch = "master" } +poc = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", branch = "master" } +pol = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", branch = "master" } +subnetworks-assignations = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", branch = "master" } +tests = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", branch = "master" } +tx-service = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", branch = "master" } +wallet = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", branch = "master" } +zksign = { default-features = false, git = "https://github.com/logos-co/nomos-node.git", branch = "master" } + +# External crates +async-trait = { default-features = false, version = "0.1" } +bytes = { default-features = false, version = "1.3" } +hex = { default-features = false, version = "0.4.3" } +libp2p = { default-features = false, version = "0.55" } +rand = { default-features = false, version = "0.8" } +reqwest = { default-features = false, version = "0.12" } +serde = { default-features = true, version = "1.0", features = ["derive"] } +serde_json = { default-features = false, version = "1.0" } +serde_with = { default-features = false, version = "3.14.0" } +serde_yaml = { default-features = false, version = "0.9.33" } +tempfile = { default-features = false, version = "3" } +thiserror = { default-features = false, version = "2.0" } +tokio = { default-features = false, version = "1" } +tracing = { default-features = false, version = "0.1" } diff --git a/README.md b/README.md new file mode 100644 index 0000000..720a4a7 --- /dev/null +++ b/README.md @@ -0,0 +1,39 @@ +# Nomos Testing (split workspace) + +This workspace contains only the testing framework crates pulled from the `nomos-node` repo: + +- `testing-framework/configs` +- `testing-framework/core` +- `testing-framework/workflows` +- `testing-framework/runners` (compose, k8s, local) +- `tests/workflows` (demo/integration tests) +- helper scripts (`scripts/setup-nomos-circuits.sh`, `scripts/build-rapidsnark.sh`) + +## Layout + +The workspace expects a sibling checkout of `nomos-node`: + +``` +IdeaProjects/ +├─ nomos-node/ # existing monorepo with all node crates +└─ nomos-testing/ # this workspace (you are here) +``` + +Path dependencies in `Cargo.toml` point to `../nomos-node/...`. + +## Usage + +```bash +cd nomos-testing +cargo test -p tests-workflows -- --ignored # or any crate you need +``` + +If you need circuits/prover assets, run the usual helpers from this workspace: + +```bash +scripts/setup-nomos-circuits.sh +scripts/build-rapidsnark.sh +``` + +All code is sourced from the local branches: +`feat/testing-framework-move`, `feat/testing-framework`, `feat/testing-framework-runners`, `feat/testing-framework-k8s-runner`. diff --git a/hooks/cargo-hack.sh b/hooks/cargo-hack.sh new file mode 100755 index 0000000..aef5276 --- /dev/null +++ b/hooks/cargo-hack.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +RUSTFLAGS="-D warnings" cargo hack --feature-powerset --no-dev-deps check diff --git a/rust-toolchain.toml b/rust-toolchain.toml new file mode 100644 index 0000000..8373f6e --- /dev/null +++ b/rust-toolchain.toml @@ -0,0 +1,12 @@ +[toolchain] +# Keep this version in sync also in the following places: +# * Dockerfile +# * flake.nix +# * testnet/Dockerfile +# Also, update the version of the nightly toolchain to the latest nightly of the new version specified in the following places: +# * .github/workflows/code-check.yml (fmt job) +# * .pre-commit-config.yml (fmt hook) +# Then, if there is any new allow-by-default rustc lint introduced/stabilized, add it to the respective entry in our `config.toml`. +channel = "nightly-2025-09-14" +# Even if clippy should be included in the default profile, in some cases it is not installed. So we force it with an explicit declaration. +components = ["clippy", "rustfmt"] diff --git a/rustfmt.toml b/rustfmt.toml new file mode 100644 index 0000000..4efac2b --- /dev/null +++ b/rustfmt.toml @@ -0,0 +1,5 @@ +group_imports = "StdExternalCrate" +imports_granularity = "Crate" +reorder_imports = true +reorder_modules = true +wrap_comments = true diff --git a/scripts/build-rapidsnark.sh b/scripts/build-rapidsnark.sh new file mode 100755 index 0000000..4f5fc90 --- /dev/null +++ b/scripts/build-rapidsnark.sh @@ -0,0 +1,52 @@ +#!/bin/bash +# +# Rebuild the rapidsnark prover for the current architecture. +# +# Usage: ./scripts/build-rapidsnark.sh + +set -euo pipefail + +if [ $# -lt 1 ]; then + echo "usage: $0 " >&2 + exit 1 +fi + +TARGET_ARCH="$(uname -m)" +CIRCUITS_DIR="$1" +RAPIDSNARK_REPO="${RAPIDSNARK_REPO:-https://github.com/iden3/rapidsnark.git}" +RAPIDSNARK_REF="${RAPIDSNARK_REF:-main}" + +if [ ! -d "$CIRCUITS_DIR" ]; then + echo "circuits directory '$CIRCUITS_DIR' does not exist" >&2 + exit 1 +fi + +case "$TARGET_ARCH" in + arm64 | aarch64) + ;; + *) + echo "rapidsnark rebuild skipped for architecture '$TARGET_ARCH'" >&2 + exit 0 + ;; +esac + +workdir="$(mktemp -d)" +trap 'rm -rf "$workdir"' EXIT + +echo "Building rapidsnark ($RAPIDSNARK_REF) for $TARGET_ARCH..." >&2 +git clone --depth 1 --branch "$RAPIDSNARK_REF" "$RAPIDSNARK_REPO" "$workdir/rapidsnark" >&2 +cd "$workdir/rapidsnark" +git submodule update --init --recursive >&2 + +if [ "${RAPIDSNARK_BUILD_GMP:-1}" = "1" ]; then + GMP_TARGET="${RAPIDSNARK_GMP_TARGET:-aarch64}" + ./build_gmp.sh "$GMP_TARGET" >&2 +fi + +MAKE_TARGET="${RAPIDSNARK_MAKE_TARGET:-host_arm64}" +PACKAGE_DIR="${RAPIDSNARK_PACKAGE_DIR:-package_arm64}" + +make "$MAKE_TARGET" -j"$(nproc)" >&2 + +install -m 0755 "${PACKAGE_DIR}/bin/prover" "$CIRCUITS_DIR/prover" +echo "rapidsnark prover installed to $CIRCUITS_DIR/prover" >&2 diff --git a/scripts/setup-nomos-circuits.sh b/scripts/setup-nomos-circuits.sh new file mode 100755 index 0000000..8057058 --- /dev/null +++ b/scripts/setup-nomos-circuits.sh @@ -0,0 +1,216 @@ +#!/bin/bash +# +# Setup script for nomos-circuits +# +# Usage: ./setup-nomos-circuits.sh [VERSION] [INSTALL_DIR] +# +# Arguments: +# VERSION - Optional. Version to install (default: v0.3.1) +# INSTALL_DIR - Optional. Installation directory (default: $HOME/.nomos-circuits) +# +# Examples: +# ./setup-nomos-circuits.sh # Install default version to default location +# ./setup-nomos-circuits.sh v0.2.0 # Install specific version to default location +# ./setup-nomos-circuits.sh v0.2.0 /opt/circuits # Install to custom location + +set -e + +# Default values +VERSION="${1:-v0.3.1}" +DEFAULT_INSTALL_DIR="$HOME/.nomos-circuits" +INSTALL_DIR="${2:-$DEFAULT_INSTALL_DIR}" +REPO="logos-co/nomos-circuits" +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +print_info() { + echo -e "${BLUE}ℹ${NC} $1" +} + +print_success() { + echo -e "${GREEN}✓${NC} $1" +} + +print_warning() { + echo -e "${YELLOW}⚠${NC} $1" +} + +print_error() { + echo -e "${RED}✗${NC} $1" +} + +# Detect OS and architecture +detect_platform() { + local os="" + local arch="" + + # Detect OS + case "$(uname -s)" in + Linux*) os="linux";; + Darwin*) os="macos";; + MINGW*|MSYS*|CYGWIN*) os="windows";; + *) print_error "Unsupported operating system: $(uname -s)"; exit 1;; + esac + + # Detect architecture + case "$(uname -m)" in + x86_64) arch="x86_64";; + aarch64) arch="aarch64";; + arm64) arch="aarch64";; + *) print_error "Unsupported architecture: $(uname -m)"; exit 1;; + esac + + echo "${os}-${arch}" +} + +# Check if installation directory exists and get confirmation +check_existing_installation() { + if [ -d "$INSTALL_DIR" ]; then + print_warning "Installation directory already exists: $INSTALL_DIR" + + # Check if it has a VERSION file + if [ -f "$INSTALL_DIR/VERSION" ]; then + local current_version=$(cat "$INSTALL_DIR/VERSION") + print_info "Currently installed version: $current_version" + fi + + # In non-interactive environments (CI), automatically overwrite + if [ ! -t 0 ]; then + print_info "Non-interactive environment detected, automatically overwriting..." + else + # Interactive environment - ask for confirmation + echo + read -p "Do you want to overwrite it? (y/N): " -n 1 -r + echo + if [[ ! $REPLY =~ ^[Yy]$ ]]; then + print_info "Installation cancelled." + exit 0 + fi + fi + + print_info "Removing existing installation..." + rm -rf "$INSTALL_DIR" + fi +} + +# Download and extract the release +download_release() { + local platform="$1" + local artifact="nomos-circuits-${VERSION}-${platform}.tar.gz" + local url="https://github.com/${REPO}/releases/download/${VERSION}/${artifact}" + local temp_dir=$(mktemp -d) + + print_info "Downloading nomos-circuits ${VERSION} for ${platform}..." + print_info "URL: $url" + + # Build curl command with optional authentication + local curl_cmd="curl -L" + if [ -n "$GITHUB_TOKEN" ]; then + curl_cmd="$curl_cmd --header 'authorization: Bearer ${GITHUB_TOKEN}'" + fi + curl_cmd="$curl_cmd -o ${temp_dir}/${artifact} $url" + + if ! eval "$curl_cmd"; then + print_error "Failed to download release artifact" + print_error "Please check that version ${VERSION} exists for platform ${platform}" + print_error "Available releases: https://github.com/${REPO}/releases" + rm -rf "$temp_dir" + exit 1 + fi + + print_success "Download complete" + + print_info "Extracting to ${INSTALL_DIR}..." + mkdir -p "$INSTALL_DIR" + + if ! tar -xzf "${temp_dir}/${artifact}" -C "$INSTALL_DIR" --strip-components=1; then + print_error "Failed to extract archive" + rm -rf "$temp_dir" + exit 1 + fi + + rm -rf "$temp_dir" + print_success "Extraction complete" +} + +# Handle macOS code signing/quarantine issues +handle_macos_quarantine() { + print_info "macOS detected: Removing quarantine attributes from executables..." + + # Remove quarantine attribute from all executable files + if find "$INSTALL_DIR" -type f -perm +111 -exec xattr -d com.apple.quarantine {} \; 2>/dev/null; then + print_success "Quarantine attributes removed" + else + print_warning "Could not remove quarantine attributes (they may not exist)" + fi +} + +# Main installation process +main() { + print_info "Setting up nomos-circuits ${VERSION}" + print_info "Installation directory: $INSTALL_DIR" + echo + + # Detect platform (allow override via NOMOS_CIRCUITS_PLATFORM) + local platform_override="${NOMOS_CIRCUITS_PLATFORM:-}" + local platform + if [ -n "$platform_override" ]; then + platform="$platform_override" + print_info "Using overridden platform: $platform" + else + platform=$(detect_platform) + print_info "Detected platform: $platform" + fi + + # Check existing installation + check_existing_installation + + # Download and extract + download_release "$platform" + + # Handle macOS quarantine if needed + if [[ "$platform" == macos-* ]]; then + echo + handle_macos_quarantine + fi + + if [[ "${NOMOS_CIRCUITS_REBUILD_RAPIDSNARK:-0}" == "1" || "$platform" == *"aarch64" ]]; then + echo + print_info "Rebuilding rapidsnark prover for ${platform}..." + "${SCRIPT_DIR}/build-rapidsnark.sh" "$INSTALL_DIR" + fi + + echo + print_success "Installation complete!" + echo + print_info "nomos-circuits ${VERSION} is now installed at: $INSTALL_DIR" + print_info "The following circuits are available:" + + # Discover circuits by finding directories that contain a witness_generator + for dir in "$INSTALL_DIR"/*/; do + if [ -d "$dir" ]; then + local circuit_name + circuit_name=$(basename "$dir") + if [ -f "$dir/witness_generator" ]; then + echo " • $circuit_name" + fi + fi + done + + # Only show export instructions if not using the default location + if [ "$INSTALL_DIR" != "$DEFAULT_INSTALL_DIR" ]; then + echo + print_info "Since you're using a custom installation directory, set the environment variable:" + print_info " export NOMOS_CIRCUITS=$INSTALL_DIR" + echo + fi +} + +# Run main +main diff --git a/testing-framework/configs/Cargo.toml b/testing-framework/configs/Cargo.toml new file mode 100644 index 0000000..eb720bd --- /dev/null +++ b/testing-framework/configs/Cargo.toml @@ -0,0 +1,50 @@ +[package] +categories.workspace = true +description.workspace = true +edition.workspace = true +keywords.workspace = true +license.workspace = true +name = "integration-configs" +readme.workspace = true +repository.workspace = true +version = "0.1.0" + +[dependencies] +blst = "0.3.11" +chain-leader = { workspace = true } +chain-network = { workspace = true } +chain-service = { workspace = true } +cryptarchia-engine = { workspace = true, features = ["serde"] } +cryptarchia-sync = { workspace = true } +ed25519-dalek = { version = "2.2.0", features = ["rand_core", "serde"] } +groth16 = { workspace = true } +hex = { version = "0.4.3", default-features = false } +key-management-system = { workspace = true } +nomos-api = { workspace = true } +nomos-blend-message = { workspace = true } +nomos-blend-service = { workspace = true, features = ["libp2p"] } +nomos-core = { workspace = true } +nomos-da-dispersal = { workspace = true } +nomos-da-network-core = { workspace = true } +nomos-da-network-service = { workspace = true } +nomos-da-sampling = { workspace = true } +nomos-da-verifier = { workspace = true } +nomos-executor = { workspace = true, default-features = false, features = ["testing", "tracing"] } +nomos-ledger = { workspace = true, features = ["serde"] } +nomos-libp2p = { workspace = true } +nomos-node = { workspace = true, default-features = false, features = ["testing"] } +nomos-sdp = { workspace = true } +nomos-time = { workspace = true } +nomos-tracing = { workspace = true } +nomos-tracing-service = { workspace = true } +nomos-utils = { workspace = true } +nomos-wallet = { workspace = true } +num-bigint = { version = "0.4", default-features = false } +rand = { workspace = true } +subnetworks-assignations = { workspace = true } +time = { version = "0.3", default-features = true } +tracing = { workspace = true } +zksign = { workspace = true } + +[lints] +workspace = true diff --git a/testing-framework/configs/src/common/kms.rs b/testing-framework/configs/src/common/kms.rs new file mode 100644 index 0000000..cea5d6a --- /dev/null +++ b/testing-framework/configs/src/common/kms.rs @@ -0,0 +1,14 @@ +use groth16::fr_to_bytes; +use key_management_system::{ + backend::preload::KeyId, + keys::{Key, secured_key::SecuredKey as _}, +}; + +#[must_use] +pub fn key_id_for_preload_backend(key: &Key) -> KeyId { + let key_id_bytes = match key { + Key::Ed25519(ed25519_secret_key) => ed25519_secret_key.as_public_key().to_bytes(), + Key::Zk(zk_secret_key) => fr_to_bytes(zk_secret_key.as_public_key().as_fr()), + }; + hex::encode(key_id_bytes) +} diff --git a/testing-framework/configs/src/common/mod.rs b/testing-framework/configs/src/common/mod.rs new file mode 100644 index 0000000..5550fd5 --- /dev/null +++ b/testing-framework/configs/src/common/mod.rs @@ -0,0 +1 @@ +pub mod kms; diff --git a/testing-framework/configs/src/lib.rs b/testing-framework/configs/src/lib.rs new file mode 100644 index 0000000..de9fb03 --- /dev/null +++ b/testing-framework/configs/src/lib.rs @@ -0,0 +1,45 @@ +use std::{env, net::Ipv4Addr, ops::Mul as _, sync::LazyLock, time::Duration}; + +use nomos_core::sdp::ProviderId; +use nomos_libp2p::{Multiaddr, PeerId, multiaddr}; + +pub mod common; +pub mod nodes; +pub mod topology; + +static IS_SLOW_TEST_ENV: LazyLock = + LazyLock::new(|| env::var("SLOW_TEST_ENV").is_ok_and(|s| s == "true")); + +pub static IS_DEBUG_TRACING: LazyLock = LazyLock::new(|| { + env::var("NOMOS_TESTS_TRACING").is_ok_and(|val| val.eq_ignore_ascii_case("true")) +}); + +/// In slow test environments like Codecov, use 2x timeout. +#[must_use] +pub fn adjust_timeout(d: Duration) -> Duration { + if *IS_SLOW_TEST_ENV { d.mul(2) } else { d } +} + +#[must_use] +pub fn node_address_from_port(port: u16) -> Multiaddr { + multiaddr(Ipv4Addr::LOCALHOST, port) +} + +#[must_use] +pub fn secret_key_to_peer_id(node_key: nomos_libp2p::ed25519::SecretKey) -> PeerId { + PeerId::from_public_key( + &nomos_libp2p::ed25519::Keypair::from(node_key) + .public() + .into(), + ) +} + +#[must_use] +pub fn secret_key_to_provider_id(node_key: nomos_libp2p::ed25519::SecretKey) -> ProviderId { + ProviderId::try_from( + nomos_libp2p::ed25519::Keypair::from(node_key) + .public() + .to_bytes(), + ) + .unwrap() +} diff --git a/testing-framework/configs/src/nodes/executor.rs b/testing-framework/configs/src/nodes/executor.rs new file mode 100644 index 0000000..5d0e5f6 --- /dev/null +++ b/testing-framework/configs/src/nodes/executor.rs @@ -0,0 +1,328 @@ +use std::{ + collections::HashSet, + num::{NonZeroU64, NonZeroUsize}, + path::PathBuf, + time::Duration, +}; + +use chain_leader::LeaderSettings; +use chain_network::{ChainNetworkSettings, OrphanConfig, SyncConfig}; +use chain_service::{CryptarchiaSettings, StartingState}; +use cryptarchia_engine::time::SlotConfig; +use key_management_system::keys::{Key, ZkKey}; +use nomos_blend_service::{ + core::settings::{CoverTrafficSettings, MessageDelayerSettings, SchedulerSettings, ZkSettings}, + settings::TimingSettings, +}; +use nomos_da_dispersal::{ + DispersalServiceSettings, + backend::kzgrs::{DispersalKZGRSBackendSettings, EncoderSettings}, +}; +use nomos_da_network_core::protocols::sampling::SubnetsConfig; +use nomos_da_network_service::{ + NetworkConfig as DaNetworkConfig, + api::http::ApiAdapterSettings, + backends::libp2p::{ + common::DaNetworkBackendSettings, executor::DaNetworkExecutorBackendSettings, + }, +}; +use nomos_da_sampling::{ + DaSamplingServiceSettings, backend::kzgrs::KzgrsSamplingBackendSettings, + verifier::kzgrs::KzgrsDaVerifierSettings as SamplingVerifierSettings, +}; +use nomos_da_verifier::{ + DaVerifierServiceSettings, + backend::{kzgrs::KzgrsDaVerifierSettings, trigger::MempoolPublishTriggerConfig}, + storage::adapters::rocksdb::RocksAdapterSettings as VerifierStorageAdapterSettings, +}; +use nomos_executor::config::Config as ExecutorConfig; +use nomos_node::{ + RocksBackendSettings, + api::backend::AxumBackendSettings as NodeAxumBackendSettings, + config::{ + blend::{ + deployment::{self as blend_deployment}, + serde as blend_serde, + }, + deployment::{CustomDeployment, Settings as NodeDeploymentSettings}, + mempool::MempoolConfig, + network::deployment::Settings as NetworkDeploymentSettings, + }, +}; +use nomos_sdp::SdpSettings; +use nomos_time::{ + TimeServiceSettings, + backends::{NtpTimeBackendSettings, ntp::async_client::NTPClientSettings}, +}; +use nomos_utils::math::NonNegativeF64; +use nomos_wallet::WalletServiceSettings; + +use crate::{ + adjust_timeout, + common::kms::key_id_for_preload_backend, + topology::configs::{ + GeneralConfig, blend::GeneralBlendConfig as TopologyBlendConfig, wallet::WalletAccount, + }, +}; + +#[must_use] +#[expect(clippy::too_many_lines, reason = "TODO: Address this at some point.")] +pub fn create_executor_config(config: GeneralConfig) -> ExecutorConfig { + let (blend_user_config, deployment_settings) = build_blend_service_config(&config.blend_config); + ExecutorConfig { + network: config.network_config, + blend: blend_user_config, + deployment: deployment_settings, + cryptarchia: CryptarchiaSettings { + config: config.consensus_config.ledger_config.clone(), + starting_state: StartingState::Genesis { + genesis_tx: config.consensus_config.genesis_tx, + }, + recovery_file: PathBuf::from("./recovery/cryptarchia.json"), + bootstrap: chain_service::BootstrapConfig { + prolonged_bootstrap_period: Duration::from_secs(3), + force_bootstrap: false, + offline_grace_period: chain_service::OfflineGracePeriodConfig { + grace_period: Duration::from_secs(20 * 60), + state_recording_interval: Duration::from_secs(60), + }, + }, + }, + chain_network: ChainNetworkSettings { + config: config.consensus_config.ledger_config.clone(), + network_adapter_settings: + chain_network::network::adapters::libp2p::LibP2pAdapterSettings { + topic: String::from(nomos_node::CONSENSUS_TOPIC), + }, + bootstrap: chain_network::BootstrapConfig { + ibd: chain_network::IbdConfig { + peers: HashSet::new(), + delay_before_new_download: Duration::from_secs(10), + }, + }, + sync: SyncConfig { + orphan: OrphanConfig { + max_orphan_cache_size: NonZeroUsize::new(5) + .expect("Max orphan cache size must be non-zero"), + }, + }, + }, + cryptarchia_leader: LeaderSettings { + transaction_selector_settings: (), + config: config.consensus_config.ledger_config.clone(), + leader_config: config.consensus_config.leader_config.clone(), + blend_broadcast_settings: + nomos_blend_service::core::network::libp2p::Libp2pBroadcastSettings { + topic: String::from(nomos_node::CONSENSUS_TOPIC), + }, + }, + da_network: DaNetworkConfig { + backend: DaNetworkExecutorBackendSettings { + validator_settings: DaNetworkBackendSettings { + node_key: config.da_config.node_key, + listening_address: config.da_config.listening_address, + policy_settings: config.da_config.policy_settings, + monitor_settings: config.da_config.monitor_settings, + balancer_interval: config.da_config.balancer_interval, + redial_cooldown: config.da_config.redial_cooldown, + replication_settings: config.da_config.replication_settings, + subnets_settings: SubnetsConfig { + num_of_subnets: config.da_config.num_samples as usize, + shares_retry_limit: config.da_config.retry_shares_limit, + commitments_retry_limit: config.da_config.retry_commitments_limit, + }, + }, + num_subnets: config.da_config.num_subnets, + }, + membership: config.da_config.membership.clone(), + api_adapter_settings: ApiAdapterSettings { + api_port: config.api_config.address.port(), + is_secure: false, + }, + subnet_refresh_interval: config.da_config.subnets_refresh_interval, + subnet_threshold: config.da_config.num_samples as usize, + min_session_members: config.da_config.num_samples as usize, + }, + da_verifier: DaVerifierServiceSettings { + share_verifier_settings: KzgrsDaVerifierSettings { + global_params_path: config.da_config.global_params_path.clone(), + domain_size: config.da_config.num_subnets as usize, + }, + tx_verifier_settings: (), + network_adapter_settings: (), + storage_adapter_settings: VerifierStorageAdapterSettings { + blob_storage_directory: "./".into(), + }, + mempool_trigger_settings: MempoolPublishTriggerConfig { + publish_threshold: NonNegativeF64::try_from(0.8).unwrap(), + share_duration: Duration::from_secs(5), + prune_duration: Duration::from_secs(30), + prune_interval: Duration::from_secs(5), + }, + }, + tracing: config.tracing_config.tracing_settings, + http: nomos_api::ApiServiceSettings { + backend_settings: NodeAxumBackendSettings { + address: config.api_config.address, + rate_limit_per_second: 10000, + rate_limit_burst: 10000, + max_concurrent_requests: 1000, + ..Default::default() + }, + }, + da_sampling: DaSamplingServiceSettings { + sampling_settings: KzgrsSamplingBackendSettings { + num_samples: config.da_config.num_samples, + num_subnets: config.da_config.num_subnets, + old_blobs_check_interval: config.da_config.old_blobs_check_interval, + blobs_validity_duration: config.da_config.blobs_validity_duration, + }, + share_verifier_settings: SamplingVerifierSettings { + global_params_path: config.da_config.global_params_path.clone(), + domain_size: config.da_config.num_subnets as usize, + }, + commitments_wait_duration: Duration::from_secs(1), + sdp_blob_trigger_sampling_delay: adjust_timeout(Duration::from_secs(5)), + }, + storage: RocksBackendSettings { + db_path: "./db".into(), + read_only: false, + column_family: Some("blocks".into()), + }, + da_dispersal: DispersalServiceSettings { + backend: DispersalKZGRSBackendSettings { + encoder_settings: EncoderSettings { + num_columns: config.da_config.num_subnets as usize, + with_cache: false, + global_params_path: config.da_config.global_params_path, + }, + dispersal_timeout: Duration::from_secs(20), + retry_cooldown: Duration::from_secs(3), + retry_limit: 2, + }, + }, + time: TimeServiceSettings { + backend_settings: NtpTimeBackendSettings { + ntp_server: config.time_config.ntp_server, + ntp_client_settings: NTPClientSettings { + timeout: config.time_config.timeout, + listening_interface: config.time_config.interface, + }, + update_interval: config.time_config.update_interval, + slot_config: SlotConfig { + slot_duration: config.time_config.slot_duration, + chain_start_time: config.time_config.chain_start_time, + }, + epoch_config: config.consensus_config.ledger_config.epoch_config, + base_period_length: config.consensus_config.ledger_config.base_period_length(), + }, + }, + mempool: MempoolConfig { + pool_recovery_path: "./recovery/mempool.json".into(), + }, + sdp: SdpSettings { declaration: None }, + wallet: WalletServiceSettings { + known_keys: { + let mut keys = HashSet::from_iter([config.consensus_config.leader_config.pk]); + keys.extend( + config + .consensus_config + .wallet_accounts + .iter() + .map(WalletAccount::public_key), + ); + keys + }, + }, + key_management: config.kms_config, + + testing_http: nomos_api::ApiServiceSettings { + backend_settings: NodeAxumBackendSettings { + address: config.api_config.testing_http_address, + rate_limit_per_second: 10000, + rate_limit_burst: 10000, + max_concurrent_requests: 1000, + ..Default::default() + }, + }, + } +} + +fn build_blend_service_config( + config: &TopologyBlendConfig, +) -> (blend_serde::Config, NodeDeploymentSettings) { + let zk_key_id = + key_id_for_preload_backend(&Key::from(ZkKey::new(config.secret_zk_key.clone()))); + + let backend_core = &config.backend_core; + let backend_edge = &config.backend_edge; + + let user = blend_serde::Config { + common: blend_serde::common::Config { + non_ephemeral_signing_key: config.private_key.clone(), + recovery_path_prefix: PathBuf::from("./recovery/blend"), + }, + core: blend_serde::core::Config { + backend: blend_serde::core::BackendConfig { + listening_address: backend_core.listening_address.clone(), + core_peering_degree: backend_core.core_peering_degree.clone(), + edge_node_connection_timeout: backend_core.edge_node_connection_timeout, + max_edge_node_incoming_connections: backend_core.max_edge_node_incoming_connections, + max_dial_attempts_per_peer: backend_core.max_dial_attempts_per_peer, + }, + zk: ZkSettings { + secret_key_kms_id: zk_key_id, + }, + }, + edge: blend_serde::edge::Config { + backend: blend_serde::edge::BackendConfig { + max_dial_attempts_per_peer_per_message: backend_edge + .max_dial_attempts_per_peer_per_message, + replication_factor: backend_edge.replication_factor, + }, + }, + }; + + let deployment_settings = blend_deployment::Settings { + common: blend_deployment::CommonSettings { + num_blend_layers: NonZeroU64::try_from(1).unwrap(), + minimum_network_size: NonZeroU64::try_from(1).unwrap(), + timing: TimingSettings { + round_duration: Duration::from_secs(1), + rounds_per_interval: NonZeroU64::try_from(30u64).unwrap(), + rounds_per_session: NonZeroU64::try_from(648_000u64).unwrap(), + rounds_per_observation_window: NonZeroU64::try_from(30u64).unwrap(), + rounds_per_session_transition_period: NonZeroU64::try_from(30u64).unwrap(), + epoch_transition_period_in_slots: NonZeroU64::try_from(2_600).unwrap(), + }, + protocol_name: backend_core.protocol_name.clone(), + }, + core: blend_deployment::CoreSettings { + scheduler: SchedulerSettings { + cover: CoverTrafficSettings { + intervals_for_safety_buffer: 100, + message_frequency_per_round: NonNegativeF64::try_from(1f64).unwrap(), + }, + delayer: MessageDelayerSettings { + maximum_release_delay_in_rounds: NonZeroU64::try_from(3u64).unwrap(), + }, + }, + minimum_messages_coefficient: backend_core.minimum_messages_coefficient, + normalization_constant: backend_core.normalization_constant, + }, + }; + + let deployment = NodeDeploymentSettings::Custom(CustomDeployment { + blend: deployment_settings, + network: NetworkDeploymentSettings { + identify_protocol_name: nomos_libp2p::protocol_name::StreamProtocol::new( + "/integration/nomos/identify/1.0.0", + ), + kademlia_protocol_name: nomos_libp2p::protocol_name::StreamProtocol::new( + "/integration/nomos/kad/1.0.0", + ), + }, + }); + + (user, deployment) +} diff --git a/testing-framework/configs/src/nodes/mod.rs b/testing-framework/configs/src/nodes/mod.rs new file mode 100644 index 0000000..aa3e506 --- /dev/null +++ b/testing-framework/configs/src/nodes/mod.rs @@ -0,0 +1,2 @@ +pub mod executor; +pub mod validator; diff --git a/testing-framework/configs/src/nodes/validator.rs b/testing-framework/configs/src/nodes/validator.rs new file mode 100644 index 0000000..2b07aff --- /dev/null +++ b/testing-framework/configs/src/nodes/validator.rs @@ -0,0 +1,317 @@ +use std::{ + collections::HashSet, + num::{NonZeroU64, NonZeroUsize}, + path::PathBuf, + time::Duration, +}; + +use chain_leader::LeaderSettings; +use chain_network::{ChainNetworkSettings, OrphanConfig, SyncConfig}; +use chain_service::{CryptarchiaSettings, StartingState}; +use cryptarchia_engine::time::SlotConfig; +use key_management_system::keys::{Key, ZkKey}; +use nomos_blend_service::{ + core::settings::{CoverTrafficSettings, MessageDelayerSettings, SchedulerSettings, ZkSettings}, + settings::TimingSettings, +}; +use nomos_da_network_core::{ + protocols::sampling::SubnetsConfig, swarm::DAConnectionPolicySettings, +}; +use nomos_da_network_service::{ + NetworkConfig as DaNetworkConfig, api::http::ApiAdapterSettings, + backends::libp2p::common::DaNetworkBackendSettings, +}; +use nomos_da_sampling::{ + DaSamplingServiceSettings, backend::kzgrs::KzgrsSamplingBackendSettings, + verifier::kzgrs::KzgrsDaVerifierSettings as SamplingVerifierSettings, +}; +use nomos_da_verifier::{ + DaVerifierServiceSettings, + backend::{kzgrs::KzgrsDaVerifierSettings, trigger::MempoolPublishTriggerConfig}, + storage::adapters::rocksdb::RocksAdapterSettings as VerifierStorageAdapterSettings, +}; +use nomos_node::{ + Config as ValidatorConfig, RocksBackendSettings, + api::backend::AxumBackendSettings as NodeAxumBackendSettings, + config::{ + blend::{ + deployment::{self as blend_deployment}, + serde as blend_serde, + }, + deployment::{CustomDeployment, Settings as NodeDeploymentSettings}, + mempool::MempoolConfig, + network::deployment::Settings as NetworkDeploymentSettings, + }, +}; +use nomos_sdp::SdpSettings; +use nomos_time::{ + TimeServiceSettings, + backends::{NtpTimeBackendSettings, ntp::async_client::NTPClientSettings}, +}; +use nomos_utils::math::NonNegativeF64; +use nomos_wallet::WalletServiceSettings; + +use crate::{ + adjust_timeout, + common::kms::key_id_for_preload_backend, + topology::configs::{ + GeneralConfig, blend::GeneralBlendConfig as TopologyBlendConfig, wallet::WalletAccount, + }, +}; + +#[must_use] +#[expect( + clippy::too_many_lines, + reason = "Validator config wiring aggregates many service settings" +)] +pub fn create_validator_config(config: GeneralConfig) -> ValidatorConfig { + let da_policy_settings = config.da_config.policy_settings; + let (blend_user_config, deployment_settings) = build_blend_service_config(&config.blend_config); + ValidatorConfig { + network: config.network_config, + blend: blend_user_config, + deployment: deployment_settings, + cryptarchia: CryptarchiaSettings { + config: config.consensus_config.ledger_config.clone(), + starting_state: StartingState::Genesis { + genesis_tx: config.consensus_config.genesis_tx, + }, + recovery_file: PathBuf::from("./recovery/cryptarchia.json"), + bootstrap: chain_service::BootstrapConfig { + prolonged_bootstrap_period: config.bootstrapping_config.prolonged_bootstrap_period, + force_bootstrap: false, + offline_grace_period: chain_service::OfflineGracePeriodConfig { + grace_period: Duration::from_secs(20 * 60), + state_recording_interval: Duration::from_secs(60), + }, + }, + }, + chain_network: ChainNetworkSettings { + config: config.consensus_config.ledger_config.clone(), + network_adapter_settings: + chain_network::network::adapters::libp2p::LibP2pAdapterSettings { + topic: String::from(nomos_node::CONSENSUS_TOPIC), + }, + bootstrap: chain_network::BootstrapConfig { + ibd: chain_network::IbdConfig { + peers: HashSet::new(), + delay_before_new_download: Duration::from_secs(10), + }, + }, + sync: SyncConfig { + orphan: OrphanConfig { + max_orphan_cache_size: NonZeroUsize::new(5) + .expect("Max orphan cache size must be non-zero"), + }, + }, + }, + cryptarchia_leader: LeaderSettings { + transaction_selector_settings: (), + config: config.consensus_config.ledger_config.clone(), + leader_config: config.consensus_config.leader_config.clone(), + blend_broadcast_settings: + nomos_blend_service::core::network::libp2p::Libp2pBroadcastSettings { + topic: String::from(nomos_node::CONSENSUS_TOPIC), + }, + }, + da_network: DaNetworkConfig { + backend: DaNetworkBackendSettings { + node_key: config.da_config.node_key, + listening_address: config.da_config.listening_address, + policy_settings: DAConnectionPolicySettings { + min_dispersal_peers: 0, + min_replication_peers: da_policy_settings.min_replication_peers, + max_dispersal_failures: da_policy_settings.max_dispersal_failures, + max_sampling_failures: da_policy_settings.max_sampling_failures, + max_replication_failures: da_policy_settings.max_replication_failures, + malicious_threshold: da_policy_settings.malicious_threshold, + }, + monitor_settings: config.da_config.monitor_settings, + balancer_interval: config.da_config.balancer_interval, + redial_cooldown: config.da_config.redial_cooldown, + replication_settings: config.da_config.replication_settings, + subnets_settings: SubnetsConfig { + num_of_subnets: config.da_config.num_samples as usize, + shares_retry_limit: config.da_config.retry_shares_limit, + commitments_retry_limit: config.da_config.retry_commitments_limit, + }, + }, + membership: config.da_config.membership.clone(), + api_adapter_settings: ApiAdapterSettings { + api_port: config.api_config.address.port(), + is_secure: false, + }, + subnet_refresh_interval: config.da_config.subnets_refresh_interval, + subnet_threshold: config.da_config.num_samples as usize, + min_session_members: config.da_config.num_samples as usize, + }, + da_verifier: DaVerifierServiceSettings { + share_verifier_settings: KzgrsDaVerifierSettings { + global_params_path: config.da_config.global_params_path.clone(), + domain_size: config.da_config.num_subnets as usize, + }, + tx_verifier_settings: (), + network_adapter_settings: (), + storage_adapter_settings: VerifierStorageAdapterSettings { + blob_storage_directory: "./".into(), + }, + mempool_trigger_settings: MempoolPublishTriggerConfig { + publish_threshold: NonNegativeF64::try_from(0.8).unwrap(), + share_duration: Duration::from_secs(5), + prune_duration: Duration::from_secs(30), + prune_interval: Duration::from_secs(5), + }, + }, + tracing: config.tracing_config.tracing_settings, + http: nomos_api::ApiServiceSettings { + backend_settings: NodeAxumBackendSettings { + address: config.api_config.address, + rate_limit_per_second: 10000, + rate_limit_burst: 10000, + max_concurrent_requests: 1000, + ..Default::default() + }, + }, + da_sampling: DaSamplingServiceSettings { + sampling_settings: KzgrsSamplingBackendSettings { + num_samples: config.da_config.num_samples, + num_subnets: config.da_config.num_subnets, + old_blobs_check_interval: config.da_config.old_blobs_check_interval, + blobs_validity_duration: config.da_config.blobs_validity_duration, + }, + share_verifier_settings: SamplingVerifierSettings { + global_params_path: config.da_config.global_params_path, + domain_size: config.da_config.num_subnets as usize, + }, + commitments_wait_duration: Duration::from_secs(1), + sdp_blob_trigger_sampling_delay: adjust_timeout(Duration::from_secs(5)), + }, + storage: RocksBackendSettings { + db_path: "./db".into(), + read_only: false, + column_family: Some("blocks".into()), + }, + time: TimeServiceSettings { + backend_settings: NtpTimeBackendSettings { + ntp_server: config.time_config.ntp_server, + ntp_client_settings: NTPClientSettings { + timeout: config.time_config.timeout, + listening_interface: config.time_config.interface, + }, + update_interval: config.time_config.update_interval, + slot_config: SlotConfig { + slot_duration: config.time_config.slot_duration, + chain_start_time: config.time_config.chain_start_time, + }, + epoch_config: config.consensus_config.ledger_config.epoch_config, + base_period_length: config.consensus_config.ledger_config.base_period_length(), + }, + }, + mempool: MempoolConfig { + pool_recovery_path: "./recovery/mempool.json".into(), + }, + sdp: SdpSettings { declaration: None }, + wallet: WalletServiceSettings { + known_keys: { + let mut keys = HashSet::from_iter([config.consensus_config.leader_config.pk]); + keys.extend( + config + .consensus_config + .wallet_accounts + .iter() + .map(WalletAccount::public_key), + ); + keys + }, + }, + key_management: config.kms_config, + testing_http: nomos_api::ApiServiceSettings { + backend_settings: NodeAxumBackendSettings { + address: config.api_config.testing_http_address, + rate_limit_per_second: 10000, + rate_limit_burst: 10000, + max_concurrent_requests: 1000, + ..Default::default() + }, + }, + } +} + +fn build_blend_service_config( + config: &TopologyBlendConfig, +) -> (blend_serde::Config, NodeDeploymentSettings) { + let zk_key_id = + key_id_for_preload_backend(&Key::from(ZkKey::new(config.secret_zk_key.clone()))); + + let backend_core = &config.backend_core; + let backend_edge = &config.backend_edge; + + let user = blend_serde::Config { + common: blend_serde::common::Config { + non_ephemeral_signing_key: config.private_key.clone(), + recovery_path_prefix: PathBuf::from("./recovery/blend"), + }, + core: blend_serde::core::Config { + backend: blend_serde::core::BackendConfig { + listening_address: backend_core.listening_address.clone(), + core_peering_degree: backend_core.core_peering_degree.clone(), + edge_node_connection_timeout: backend_core.edge_node_connection_timeout, + max_edge_node_incoming_connections: backend_core.max_edge_node_incoming_connections, + max_dial_attempts_per_peer: backend_core.max_dial_attempts_per_peer, + }, + zk: ZkSettings { + secret_key_kms_id: zk_key_id, + }, + }, + edge: blend_serde::edge::Config { + backend: blend_serde::edge::BackendConfig { + max_dial_attempts_per_peer_per_message: backend_edge + .max_dial_attempts_per_peer_per_message, + replication_factor: backend_edge.replication_factor, + }, + }, + }; + + let deployment_settings = blend_deployment::Settings { + common: blend_deployment::CommonSettings { + num_blend_layers: NonZeroU64::try_from(1).unwrap(), + minimum_network_size: NonZeroU64::try_from(1).unwrap(), + timing: TimingSettings { + round_duration: Duration::from_secs(1), + rounds_per_interval: NonZeroU64::try_from(30u64).unwrap(), + rounds_per_session: NonZeroU64::try_from(648_000u64).unwrap(), + rounds_per_observation_window: NonZeroU64::try_from(30u64).unwrap(), + rounds_per_session_transition_period: NonZeroU64::try_from(30u64).unwrap(), + epoch_transition_period_in_slots: NonZeroU64::try_from(2_600).unwrap(), + }, + protocol_name: backend_core.protocol_name.clone(), + }, + core: blend_deployment::CoreSettings { + scheduler: SchedulerSettings { + cover: CoverTrafficSettings { + intervals_for_safety_buffer: 100, + message_frequency_per_round: NonNegativeF64::try_from(1f64).unwrap(), + }, + delayer: MessageDelayerSettings { + maximum_release_delay_in_rounds: NonZeroU64::try_from(3u64).unwrap(), + }, + }, + minimum_messages_coefficient: backend_core.minimum_messages_coefficient, + normalization_constant: backend_core.normalization_constant, + }, + }; + + let deployment = NodeDeploymentSettings::Custom(CustomDeployment { + blend: deployment_settings, + network: NetworkDeploymentSettings { + identify_protocol_name: nomos_libp2p::protocol_name::StreamProtocol::new( + "/integration/nomos/identify/1.0.0", + ), + kademlia_protocol_name: nomos_libp2p::protocol_name::StreamProtocol::new( + "/integration/nomos/kad/1.0.0", + ), + }, + }); + + (user, deployment) +} diff --git a/testing-framework/configs/src/topology/configs/api.rs b/testing-framework/configs/src/topology/configs/api.rs new file mode 100644 index 0000000..bd759c7 --- /dev/null +++ b/testing-framework/configs/src/topology/configs/api.rs @@ -0,0 +1,23 @@ +use std::net::SocketAddr; + +use nomos_utils::net::get_available_tcp_port; + +#[derive(Clone)] +pub struct GeneralApiConfig { + pub address: SocketAddr, + pub testing_http_address: SocketAddr, +} + +#[must_use] +pub fn create_api_configs(ids: &[[u8; 32]]) -> Vec { + ids.iter() + .map(|_| GeneralApiConfig { + address: format!("127.0.0.1:{}", get_available_tcp_port().unwrap()) + .parse() + .unwrap(), + testing_http_address: format!("127.0.0.1:{}", get_available_tcp_port().unwrap()) + .parse() + .unwrap(), + }) + .collect() +} diff --git a/testing-framework/configs/src/topology/configs/blend.rs b/testing-framework/configs/src/topology/configs/blend.rs new file mode 100644 index 0000000..2d1e0ea --- /dev/null +++ b/testing-framework/configs/src/topology/configs/blend.rs @@ -0,0 +1,72 @@ +use core::time::Duration; +use std::{num::NonZeroU64, str::FromStr as _}; + +use ed25519_dalek::SigningKey; +use nomos_blend_message::crypto::keys::Ed25519PrivateKey; +use nomos_blend_service::{ + core::backends::libp2p::Libp2pBlendBackendSettings as Libp2pCoreBlendBackendSettings, + edge::backends::libp2p::Libp2pBlendBackendSettings as Libp2pEdgeBlendBackendSettings, +}; +use nomos_libp2p::{Multiaddr, protocol_name::StreamProtocol}; +use num_bigint::BigUint; +use zksign::SecretKey; + +#[derive(Clone)] +pub struct GeneralBlendConfig { + pub backend_core: Libp2pCoreBlendBackendSettings, + pub backend_edge: Libp2pEdgeBlendBackendSettings, + pub private_key: Ed25519PrivateKey, + pub secret_zk_key: SecretKey, + pub signer: SigningKey, +} + +/// Builds blend configs for each node. +/// +/// # Panics +/// +/// Panics if the provided port strings cannot be parsed into valid `Multiaddr`s +/// or if any of the numeric blend parameters are zero, which would make the +/// libp2p configuration invalid. +#[must_use] +pub fn create_blend_configs(ids: &[[u8; 32]], ports: &[u16]) -> Vec { + ids.iter() + .zip(ports) + .map(|(id, port)| { + let signer = SigningKey::from_bytes(id); + + let private_key = Ed25519PrivateKey::from(*id); + // We need unique ZK secret keys, so we just derive them deterministically from + // the generated Ed25519 public keys, which are guaranteed to be unique because + // they are in turned derived from node ID. + let secret_zk_key = + SecretKey::from(BigUint::from_bytes_le(private_key.public_key().as_bytes())); + GeneralBlendConfig { + backend_core: Libp2pCoreBlendBackendSettings { + listening_address: Multiaddr::from_str(&format!( + "/ip4/127.0.0.1/udp/{port}/quic-v1", + )) + .unwrap(), + core_peering_degree: 1..=3, + minimum_messages_coefficient: NonZeroU64::try_from(1) + .expect("Minimum messages coefficient cannot be zero."), + normalization_constant: 1.03f64 + .try_into() + .expect("Normalization constant cannot be negative."), + edge_node_connection_timeout: Duration::from_secs(1), + max_edge_node_incoming_connections: 300, + max_dial_attempts_per_peer: NonZeroU64::try_from(3) + .expect("Max dial attempts per peer cannot be zero."), + protocol_name: StreamProtocol::new("/blend/integration-tests"), + }, + backend_edge: Libp2pEdgeBlendBackendSettings { + max_dial_attempts_per_peer_per_message: 1.try_into().unwrap(), + protocol_name: StreamProtocol::new("/blend/integration-tests"), + replication_factor: 1.try_into().unwrap(), + }, + private_key, + secret_zk_key, + signer, + } + }) + .collect() +} diff --git a/testing-framework/configs/src/topology/configs/bootstrap.rs b/testing-framework/configs/src/topology/configs/bootstrap.rs new file mode 100644 index 0000000..14e51a5 --- /dev/null +++ b/testing-framework/configs/src/topology/configs/bootstrap.rs @@ -0,0 +1,20 @@ +use std::time::Duration; + +#[derive(Clone)] +pub struct GeneralBootstrapConfig { + pub prolonged_bootstrap_period: Duration, +} + +pub const SHORT_PROLONGED_BOOTSTRAP_PERIOD: Duration = Duration::from_secs(1); + +#[must_use] +pub fn create_bootstrap_configs( + ids: &[[u8; 32]], + prolonged_bootstrap_period: Duration, +) -> Vec { + ids.iter() + .map(|_| GeneralBootstrapConfig { + prolonged_bootstrap_period, + }) + .collect() +} diff --git a/testing-framework/configs/src/topology/configs/consensus.rs b/testing-framework/configs/src/topology/configs/consensus.rs new file mode 100644 index 0000000..49c8292 --- /dev/null +++ b/testing-framework/configs/src/topology/configs/consensus.rs @@ -0,0 +1,343 @@ +use std::{num::NonZero, sync::Arc}; + +use chain_leader::LeaderConfig; +use cryptarchia_engine::EpochConfig; +use ed25519_dalek::ed25519::signature::SignerMut as _; +use groth16::CompressedGroth16Proof; +use nomos_core::{ + mantle::{ + MantleTx, Note, OpProof, Utxo, + genesis_tx::GenesisTx, + ledger::Tx as LedgerTx, + ops::{ + Op, + channel::{ChannelId, Ed25519PublicKey, MsgId, inscribe::InscriptionOp}, + }, + }, + sdp::{DeclarationMessage, Locator, ProviderId, ServiceParameters, ServiceType}, +}; +use nomos_node::{SignedMantleTx, Transaction as _}; +use num_bigint::BigUint; +use zksign::{PublicKey, SecretKey}; + +use super::wallet::{WalletAccount, WalletConfig}; + +#[derive(Clone)] +pub struct ConsensusParams { + pub n_participants: usize, + pub security_param: NonZero, + pub active_slot_coeff: f64, +} + +impl ConsensusParams { + #[must_use] + pub const fn default_for_participants(n_participants: usize) -> Self { + Self { + n_participants, + // by setting the slot coeff to 1, we also increase the probability of multiple blocks + // (forks) being produced in the same slot (epoch). Setting the security + // parameter to some value > 1 ensures nodes have some time to sync before + // deciding on the longest chain. + security_param: NonZero::new(10).unwrap(), + // a block should be produced (on average) every slot + active_slot_coeff: 0.9, + } + } +} + +#[derive(Clone)] +pub struct ProviderInfo { + pub service_type: ServiceType, + pub provider_sk: ed25519_dalek::SigningKey, + pub zk_sk: SecretKey, + pub locator: Locator, + pub note: ServiceNote, +} + +impl ProviderInfo { + #[must_use] + pub fn provider_id(&self) -> ProviderId { + ProviderId(self.provider_sk.verifying_key()) + } + + #[must_use] + pub fn zk_id(&self) -> PublicKey { + self.zk_sk.to_public_key() + } +} + +/// General consensus configuration for a chosen participant, that later could +/// be converted into a specific service or services configuration. +#[derive(Clone)] +pub struct GeneralConsensusConfig { + pub leader_config: LeaderConfig, + pub ledger_config: nomos_ledger::Config, + pub genesis_tx: GenesisTx, + pub utxos: Vec, + pub blend_notes: Vec, + pub da_notes: Vec, + pub wallet_accounts: Vec, +} + +#[derive(Clone)] +pub struct ServiceNote { + pub pk: PublicKey, + pub sk: SecretKey, + pub note: Note, + pub output_index: usize, +} + +fn create_genesis_tx(utxos: &[Utxo]) -> GenesisTx { + // Create a genesis inscription op (similar to config.yaml) + let inscription = InscriptionOp { + channel_id: ChannelId::from([0; 32]), + inscription: vec![103, 101, 110, 101, 115, 105, 115], // "genesis" in bytes + parent: MsgId::root(), + signer: Ed25519PublicKey::from_bytes(&[0; 32]).unwrap(), + }; + + // Create ledger transaction with the utxos as outputs + let outputs: Vec = utxos.iter().map(|u| u.note).collect(); + let ledger_tx = LedgerTx::new(vec![], outputs); + + // Create the mantle transaction + let mantle_tx = MantleTx { + ops: vec![Op::ChannelInscribe(inscription)], + ledger_tx, + execution_gas_price: 0, + storage_gas_price: 0, + }; + let signed_mantle_tx = SignedMantleTx { + mantle_tx, + ops_proofs: vec![OpProof::NoProof], + ledger_tx_proof: zksign::Signature::new(CompressedGroth16Proof::from_bytes(&[0u8; 128])), + }; + + // Wrap in GenesisTx + GenesisTx::from_tx(signed_mantle_tx).expect("Invalid genesis transaction") +} + +#[must_use] +pub fn create_consensus_configs( + ids: &[[u8; 32]], + consensus_params: &ConsensusParams, + wallet: &WalletConfig, +) -> Vec { + let mut leader_keys = Vec::new(); + let mut blend_notes = Vec::new(); + let mut da_notes = Vec::new(); + + let utxos = create_utxos_for_leader_and_services( + ids, + &mut leader_keys, + &mut blend_notes, + &mut da_notes, + ); + let utxos = append_wallet_utxos(utxos, wallet); + let genesis_tx = create_genesis_tx(&utxos); + let ledger_config = nomos_ledger::Config { + epoch_config: EpochConfig { + epoch_stake_distribution_stabilization: NonZero::new(3).unwrap(), + epoch_period_nonce_buffer: NonZero::new(3).unwrap(), + epoch_period_nonce_stabilization: NonZero::new(4).unwrap(), + }, + consensus_config: cryptarchia_engine::Config { + security_param: consensus_params.security_param, + active_slot_coeff: consensus_params.active_slot_coeff, + }, + sdp_config: nomos_ledger::mantle::sdp::Config { + service_params: Arc::new( + [ + ( + ServiceType::BlendNetwork, + ServiceParameters { + lock_period: 10, + inactivity_period: 20, + retention_period: 100, + timestamp: 0, + session_duration: 1000, + }, + ), + ( + ServiceType::DataAvailability, + ServiceParameters { + lock_period: 10, + inactivity_period: 20, + retention_period: 100, + timestamp: 0, + session_duration: 1000, + }, + ), + ] + .into(), + ), + min_stake: nomos_core::sdp::MinStake { + threshold: 1, + timestamp: 0, + }, + }, + }; + + leader_keys + .into_iter() + .map(|(pk, sk)| GeneralConsensusConfig { + leader_config: LeaderConfig { pk, sk }, + ledger_config: ledger_config.clone(), + genesis_tx: genesis_tx.clone(), + utxos: utxos.clone(), + da_notes: da_notes.clone(), + blend_notes: blend_notes.clone(), + wallet_accounts: wallet.accounts.clone(), + }) + .collect() +} + +fn create_utxos_for_leader_and_services( + ids: &[[u8; 32]], + leader_keys: &mut Vec<(PublicKey, SecretKey)>, + blend_notes: &mut Vec, + da_notes: &mut Vec, +) -> Vec { + let derive_key_material = |prefix: &[u8], id_bytes: &[u8]| -> [u8; 16] { + let mut sk_data = [0; 16]; + let prefix_len = prefix.len(); + + sk_data[..prefix_len].copy_from_slice(prefix); + let remaining_len = 16 - prefix_len; + sk_data[prefix_len..].copy_from_slice(&id_bytes[..remaining_len]); + + sk_data + }; + + let mut utxos = Vec::new(); + + // Assume output index which will be set by the ledger tx. + let mut output_index = 0; + + // Create notes for leader, Blend and DA declarations. + for &id in ids { + let sk_leader_data = derive_key_material(b"ld", &id); + let sk_leader = SecretKey::from(BigUint::from_bytes_le(&sk_leader_data)); + let pk_leader = sk_leader.to_public_key(); + leader_keys.push((pk_leader, sk_leader)); + utxos.push(Utxo { + note: Note::new(1_000, pk_leader), + tx_hash: BigUint::from(0u8).into(), + output_index: 0, + }); + output_index += 1; + + let sk_da_data = derive_key_material(b"da", &id); + let sk_da = SecretKey::from(BigUint::from_bytes_le(&sk_da_data)); + let pk_da = sk_da.to_public_key(); + let note_da = Note::new(1, pk_da); + da_notes.push(ServiceNote { + pk: pk_da, + sk: sk_da, + note: note_da, + output_index, + }); + utxos.push(Utxo { + note: note_da, + tx_hash: BigUint::from(0u8).into(), + output_index: 0, + }); + output_index += 1; + + let sk_blend_data = derive_key_material(b"bn", &id); + let sk_blend = SecretKey::from(BigUint::from_bytes_le(&sk_blend_data)); + let pk_blend = sk_blend.to_public_key(); + let note_blend = Note::new(1, pk_blend); + blend_notes.push(ServiceNote { + pk: pk_blend, + sk: sk_blend, + note: note_blend, + output_index, + }); + utxos.push(Utxo { + note: note_blend, + tx_hash: BigUint::from(0u8).into(), + output_index: 0, + }); + output_index += 1; + } + + utxos +} + +fn append_wallet_utxos(mut utxos: Vec, wallet: &WalletConfig) -> Vec { + for account in &wallet.accounts { + utxos.push(Utxo { + note: Note::new(account.value, account.public_key()), + tx_hash: BigUint::from(0u8).into(), + output_index: 0, + }); + } + + utxos +} + +#[must_use] +pub fn create_genesis_tx_with_declarations( + ledger_tx: LedgerTx, + providers: Vec, +) -> GenesisTx { + let inscription = InscriptionOp { + channel_id: ChannelId::from([0; 32]), + inscription: vec![103, 101, 110, 101, 115, 105, 115], // "genesis" in bytes + parent: MsgId::root(), + signer: Ed25519PublicKey::from_bytes(&[0; 32]).unwrap(), + }; + + let ledger_tx_hash = ledger_tx.hash(); + + let mut ops = vec![Op::ChannelInscribe(inscription)]; + + for provider in &providers { + let utxo = Utxo { + tx_hash: ledger_tx_hash, + output_index: provider.note.output_index, + note: provider.note.note, + }; + let declaration = DeclarationMessage { + service_type: provider.service_type, + locators: vec![provider.locator.clone()], + provider_id: provider.provider_id(), + zk_id: provider.zk_id(), + locked_note_id: utxo.id(), + }; + ops.push(Op::SDPDeclare(declaration)); + } + + let mantle_tx = MantleTx { + ops, + ledger_tx, + execution_gas_price: 0, + storage_gas_price: 0, + }; + + let mantle_tx_hash = mantle_tx.hash(); + let mut ops_proofs = vec![OpProof::NoProof]; + + for mut provider in providers { + let zk_sig = + SecretKey::multi_sign(&[provider.note.sk, provider.zk_sk], mantle_tx_hash.as_ref()) + .unwrap(); + let ed25519_sig = provider + .provider_sk + .sign(mantle_tx_hash.as_signing_bytes().as_ref()); + + ops_proofs.push(OpProof::ZkAndEd25519Sigs { + zk_sig, + ed25519_sig, + }); + } + + let signed_mantle_tx = SignedMantleTx { + mantle_tx, + ops_proofs, + ledger_tx_proof: zksign::Signature::new(CompressedGroth16Proof::from_bytes(&[0u8; 128])), + }; + + GenesisTx::from_tx(signed_mantle_tx).expect("Invalid genesis transaction") +} diff --git a/testing-framework/configs/src/topology/configs/da.rs b/testing-framework/configs/src/topology/configs/da.rs new file mode 100644 index 0000000..8f372ff --- /dev/null +++ b/testing-framework/configs/src/topology/configs/da.rs @@ -0,0 +1,212 @@ +use std::{ + collections::{HashMap, HashSet}, + path::PathBuf, + str::FromStr as _, + sync::LazyLock, + time::Duration, +}; + +use ed25519_dalek::SigningKey; +use nomos_core::sdp::SessionNumber; +use nomos_da_network_core::swarm::{ + DAConnectionMonitorSettings, DAConnectionPolicySettings, ReplicationConfig, +}; +use nomos_libp2p::{Multiaddr, PeerId, ed25519}; +use nomos_node::NomosDaMembership; +use num_bigint::BigUint; +use subnetworks_assignations::{MembershipCreator as _, MembershipHandler as _}; +use zksign::SecretKey; + +use crate::secret_key_to_peer_id; + +pub static GLOBAL_PARAMS_PATH: LazyLock = LazyLock::new(|| { + let manifest_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + let relative_path = PathBuf::from("../../tests/kzgrs/kzgrs_test_params"); + manifest_dir + .join(relative_path) + .canonicalize() + .expect("Failed to resolve absolute path") + .to_string_lossy() + .to_string() +}); + +#[derive(Clone)] +pub struct DaParams { + pub subnetwork_size: usize, + pub dispersal_factor: usize, + pub num_samples: u16, + pub num_subnets: u16, + pub old_blobs_check_interval: Duration, + pub blobs_validity_duration: Duration, + pub global_params_path: String, + pub policy_settings: DAConnectionPolicySettings, + pub monitor_settings: DAConnectionMonitorSettings, + pub balancer_interval: Duration, + pub redial_cooldown: Duration, + pub replication_settings: ReplicationConfig, + pub subnets_refresh_interval: Duration, + pub retry_shares_limit: usize, + pub retry_commitments_limit: usize, +} + +impl Default for DaParams { + fn default() -> Self { + Self { + subnetwork_size: 2, + dispersal_factor: 1, + num_samples: 1, + num_subnets: 2, + old_blobs_check_interval: Duration::from_secs(5), + blobs_validity_duration: Duration::from_secs(60), + global_params_path: GLOBAL_PARAMS_PATH.to_string(), + policy_settings: DAConnectionPolicySettings { + min_dispersal_peers: 1, + min_replication_peers: 1, + max_dispersal_failures: 0, + max_sampling_failures: 0, + max_replication_failures: 0, + malicious_threshold: 0, + }, + monitor_settings: DAConnectionMonitorSettings { + failure_time_window: Duration::from_secs(5), + ..Default::default() + }, + balancer_interval: Duration::from_secs(1), + redial_cooldown: Duration::ZERO, + replication_settings: ReplicationConfig { + seen_message_cache_size: 1000, + seen_message_ttl: Duration::from_secs(3600), + }, + subnets_refresh_interval: Duration::from_secs(30), + retry_shares_limit: 1, + retry_commitments_limit: 1, + } + } +} + +#[derive(Debug, Clone)] +pub struct GeneralDaConfig { + pub node_key: ed25519::SecretKey, + pub signer: SigningKey, + pub peer_id: PeerId, + pub membership: NomosDaMembership, + pub listening_address: Multiaddr, + pub blob_storage_directory: PathBuf, + pub global_params_path: String, + pub verifier_sk: String, + pub verifier_index: HashSet, + pub num_samples: u16, + pub num_subnets: u16, + pub old_blobs_check_interval: Duration, + pub blobs_validity_duration: Duration, + pub policy_settings: DAConnectionPolicySettings, + pub monitor_settings: DAConnectionMonitorSettings, + pub balancer_interval: Duration, + pub redial_cooldown: Duration, + pub replication_settings: ReplicationConfig, + pub subnets_refresh_interval: Duration, + pub retry_shares_limit: usize, + pub retry_commitments_limit: usize, + pub secret_zk_key: SecretKey, +} + +#[must_use] +pub fn create_da_configs( + ids: &[[u8; 32]], + da_params: &DaParams, + ports: &[u16], +) -> Vec { + let mut node_keys = vec![]; + let mut peer_ids = vec![]; + let mut listening_addresses = vec![]; + + for (i, id) in ids.iter().enumerate() { + let mut node_key_bytes = *id; + let node_key = ed25519::SecretKey::try_from_bytes(&mut node_key_bytes) + .expect("Failed to generate secret key from bytes"); + node_keys.push(node_key.clone()); + + let peer_id = secret_key_to_peer_id(node_key); + peer_ids.push(peer_id); + + let listening_address = + Multiaddr::from_str(&format!("/ip4/127.0.0.1/udp/{}/quic-v1", ports[i],)) + .expect("Failed to create multiaddr"); + listening_addresses.push(listening_address); + } + + let membership = { + let template = NomosDaMembership::new( + SessionNumber::default(), + da_params.subnetwork_size, + da_params.dispersal_factor, + ); + let mut assignations: HashMap> = HashMap::new(); + if peer_ids.is_empty() { + for id in 0..da_params.subnetwork_size { + assignations.insert(u16::try_from(id).unwrap_or_default(), HashSet::new()); + } + } else { + let mut sorted_peers = peer_ids.clone(); + sorted_peers.sort_unstable(); + let dispersal = da_params.dispersal_factor.max(1); + let mut peer_cycle = sorted_peers.iter().cycle(); + for id in 0..da_params.subnetwork_size { + let mut members = HashSet::new(); + for _ in 0..dispersal { + // cycle() only yields None when the iterator is empty, which we guard against. + if let Some(peer) = peer_cycle.next() { + members.insert(*peer); + } + } + assignations.insert(u16::try_from(id).unwrap_or_default(), members); + } + } + + template.init(SessionNumber::default(), assignations) + }; + + ids.iter() + .zip(node_keys) + .enumerate() + .map(|(i, (id, node_key))| { + let blob_storage_directory = PathBuf::from(format!("/tmp/blob_storage_{i}")); + let verifier_sk = blst::min_sig::SecretKey::key_gen(id, &[]).unwrap(); + let verifier_sk_bytes = verifier_sk.to_bytes(); + let peer_id = peer_ids[i]; + let signer = SigningKey::from_bytes(id); + let subnetwork_ids = membership.membership(&peer_id); + + // We need unique ZK secret keys, so we just derive them deterministically from + // the generated Ed25519 public keys, which are guaranteed to be unique because + // they are in turned derived from node ID. + let secret_zk_key = + SecretKey::from(BigUint::from_bytes_le(signer.verifying_key().as_bytes())); + + GeneralDaConfig { + node_key, + signer, + peer_id, + secret_zk_key, + membership: membership.clone(), + listening_address: listening_addresses[i].clone(), + blob_storage_directory, + global_params_path: da_params.global_params_path.clone(), + verifier_sk: hex::encode(verifier_sk_bytes), + verifier_index: subnetwork_ids, + num_samples: da_params.num_samples, + num_subnets: da_params.num_subnets, + old_blobs_check_interval: da_params.old_blobs_check_interval, + blobs_validity_duration: da_params.blobs_validity_duration, + policy_settings: da_params.policy_settings.clone(), + monitor_settings: da_params.monitor_settings.clone(), + balancer_interval: da_params.balancer_interval, + redial_cooldown: da_params.redial_cooldown, + replication_settings: da_params.replication_settings, + subnets_refresh_interval: da_params.subnets_refresh_interval, + retry_shares_limit: da_params.retry_shares_limit, + retry_commitments_limit: da_params.retry_commitments_limit, + } + }) + .collect() +} diff --git a/testing-framework/configs/src/topology/configs/deployment.rs b/testing-framework/configs/src/topology/configs/deployment.rs new file mode 100644 index 0000000..6d6d3fe --- /dev/null +++ b/testing-framework/configs/src/topology/configs/deployment.rs @@ -0,0 +1,67 @@ +use core::{num::NonZeroU64, time::Duration}; + +use nomos_blend_service::{ + core::settings::{CoverTrafficSettings, MessageDelayerSettings, SchedulerSettings}, + settings::TimingSettings, +}; +use nomos_libp2p::protocol_name::StreamProtocol; +use nomos_node::config::{ + blend::deployment::{ + CommonSettings as BlendCommonSettings, CoreSettings as BlendCoreSettings, + Settings as BlendDeploymentSettings, + }, + deployment::{CustomDeployment, Settings as DeploymentSettings}, + network::deployment::Settings as NetworkDeploymentSettings, +}; +use nomos_utils::math::NonNegativeF64; + +#[must_use] +pub fn default_e2e_deployment_settings() -> DeploymentSettings { + DeploymentSettings::Custom(CustomDeployment { + blend: BlendDeploymentSettings { + common: BlendCommonSettings { + minimum_network_size: NonZeroU64::try_from(30u64) + .expect("Minimum network size cannot be zero."), + num_blend_layers: NonZeroU64::try_from(3) + .expect("Number of blend layers cannot be zero."), + timing: TimingSettings { + round_duration: Duration::from_secs(1), + rounds_per_interval: NonZeroU64::try_from(30u64) + .expect("Rounds per interval cannot be zero."), + // (21,600 blocks * 30s per block) / 1s per round = 648,000 rounds + rounds_per_session: NonZeroU64::try_from(648_000u64) + .expect("Rounds per session cannot be zero."), + rounds_per_observation_window: NonZeroU64::try_from(30u64) + .expect("Rounds per observation window cannot be zero."), + rounds_per_session_transition_period: NonZeroU64::try_from(30u64) + .expect("Rounds per session transition period cannot be zero."), + epoch_transition_period_in_slots: NonZeroU64::try_from(2_600) + .expect("Epoch transition period in slots cannot be zero."), + }, + protocol_name: StreamProtocol::new("/blend/integration-tests"), + }, + core: BlendCoreSettings { + minimum_messages_coefficient: NonZeroU64::try_from(1) + .expect("Minimum messages coefficient cannot be zero."), + normalization_constant: 1.03f64 + .try_into() + .expect("Normalization constant cannot be negative."), + scheduler: SchedulerSettings { + cover: CoverTrafficSettings { + intervals_for_safety_buffer: 100, + message_frequency_per_round: NonNegativeF64::try_from(1f64) + .expect("Message frequency per round cannot be negative."), + }, + delayer: MessageDelayerSettings { + maximum_release_delay_in_rounds: NonZeroU64::try_from(3u64) + .expect("Maximum release delay between rounds cannot be zero."), + }, + }, + }, + }, + network: NetworkDeploymentSettings { + identify_protocol_name: StreamProtocol::new("/integration/nomos/identify/1.0.0"), + kademlia_protocol_name: StreamProtocol::new("/integration/nomos/kad/1.0.0"), + }, + }) +} diff --git a/testing-framework/configs/src/topology/configs/mod.rs b/testing-framework/configs/src/topology/configs/mod.rs new file mode 100644 index 0000000..2a73e58 --- /dev/null +++ b/testing-framework/configs/src/topology/configs/mod.rs @@ -0,0 +1,164 @@ +pub mod api; +pub mod blend; +pub mod bootstrap; +pub mod consensus; +pub mod da; +pub mod network; +pub mod time; +pub mod tracing; +pub mod wallet; + +use blend::GeneralBlendConfig; +use consensus::{GeneralConsensusConfig, ProviderInfo, create_genesis_tx_with_declarations}; +use da::GeneralDaConfig; +use key_management_system::{ + backend::preload::PreloadKMSBackendSettings, + keys::{Ed25519Key, Key, ZkKey}, +}; +use network::GeneralNetworkConfig; +use nomos_core::{ + mantle::GenesisTx as _, + sdp::{Locator, ServiceType}, +}; +use nomos_utils::net::get_available_udp_port; +use rand::{Rng as _, thread_rng}; +use tracing::GeneralTracingConfig; +use wallet::WalletConfig; + +use crate::{ + common::kms::key_id_for_preload_backend, + topology::configs::{ + api::GeneralApiConfig, + bootstrap::{GeneralBootstrapConfig, SHORT_PROLONGED_BOOTSTRAP_PERIOD}, + consensus::ConsensusParams, + da::DaParams, + network::NetworkParams, + time::GeneralTimeConfig, + }, +}; + +#[derive(Clone)] +pub struct GeneralConfig { + pub api_config: GeneralApiConfig, + pub consensus_config: GeneralConsensusConfig, + pub bootstrapping_config: GeneralBootstrapConfig, + pub da_config: GeneralDaConfig, + pub network_config: GeneralNetworkConfig, + pub blend_config: GeneralBlendConfig, + pub tracing_config: GeneralTracingConfig, + pub time_config: GeneralTimeConfig, + pub kms_config: PreloadKMSBackendSettings, +} + +#[must_use] +pub fn create_general_configs(n_nodes: usize) -> Vec { + create_general_configs_with_network(n_nodes, &NetworkParams::default()) +} + +#[must_use] +pub fn create_general_configs_with_network( + n_nodes: usize, + network_params: &NetworkParams, +) -> Vec { + create_general_configs_with_blend_core_subset(n_nodes, n_nodes, network_params) +} + +#[must_use] +pub fn create_general_configs_with_blend_core_subset( + n_nodes: usize, + // TODO: Instead of this, define a config struct for each node. + // That would be also useful for non-even token distributions: https://github.com/logos-co/nomos/issues/1888 + n_blend_core_nodes: usize, + network_params: &NetworkParams, +) -> Vec { + assert!( + n_blend_core_nodes <= n_nodes, + "n_blend_core_nodes({n_blend_core_nodes}) must be less than or equal to n_nodes({n_nodes})", + ); + + // Blend relies on each node declaring a different ZK public key, so we need + // different IDs to generate different keys. + let mut ids: Vec<_> = (0..n_nodes).map(|i| [i as u8; 32]).collect(); + let mut da_ports = vec![]; + let mut blend_ports = vec![]; + + for id in &mut ids { + thread_rng().fill(id); + da_ports.push(get_available_udp_port().unwrap()); + blend_ports.push(get_available_udp_port().unwrap()); + } + + let consensus_params = ConsensusParams::default_for_participants(n_nodes); + let mut consensus_configs = + consensus::create_consensus_configs(&ids, &consensus_params, &WalletConfig::default()); + let bootstrap_config = + bootstrap::create_bootstrap_configs(&ids, SHORT_PROLONGED_BOOTSTRAP_PERIOD); + let network_configs = network::create_network_configs(&ids, network_params); + let da_configs = da::create_da_configs(&ids, &DaParams::default(), &da_ports); + let api_configs = api::create_api_configs(&ids); + let blend_configs = blend::create_blend_configs(&ids, &blend_ports); + let tracing_configs = tracing::create_tracing_configs(&ids); + let time_config = time::default_time_config(); + + let providers: Vec<_> = blend_configs + .iter() + .enumerate() + .take(n_blend_core_nodes) + .map(|(i, blend_conf)| ProviderInfo { + service_type: ServiceType::BlendNetwork, + provider_sk: blend_conf.signer.clone(), + zk_sk: blend_conf.secret_zk_key.clone(), + locator: Locator(blend_conf.backend_core.listening_address.clone()), + note: consensus_configs[0].blend_notes[i].clone(), + }) + .collect(); + let ledger_tx = consensus_configs[0] + .genesis_tx + .mantle_tx() + .ledger_tx + .clone(); + let genesis_tx = create_genesis_tx_with_declarations(ledger_tx, providers); + for c in &mut consensus_configs { + c.genesis_tx = genesis_tx.clone(); + } + + // Set Blend and DA keys in KMS of each node config. + let kms_configs: Vec<_> = blend_configs + .iter() + .map(|blend_conf| { + let ed_key = Ed25519Key::new(blend_conf.signer.clone()); + let zk_key = ZkKey::new(blend_conf.secret_zk_key.clone()); + PreloadKMSBackendSettings { + keys: [ + ( + key_id_for_preload_backend(&Key::from(ed_key.clone())), + Key::from(ed_key), + ), + ( + key_id_for_preload_backend(&Key::from(zk_key.clone())), + Key::from(zk_key), + ), + ] + .into(), + } + }) + .collect(); + + let mut general_configs = vec![]; + + for i in 0..n_nodes { + general_configs.push(GeneralConfig { + api_config: api_configs[i].clone(), + consensus_config: consensus_configs[i].clone(), + bootstrapping_config: bootstrap_config[i].clone(), + da_config: da_configs[i].clone(), + network_config: network_configs[i].clone(), + blend_config: blend_configs[i].clone(), + tracing_config: tracing_configs[i].clone(), + time_config: time_config.clone(), + kms_config: kms_configs[i].clone(), + }); + } + + general_configs +} diff --git a/testing-framework/configs/src/topology/configs/network.rs b/testing-framework/configs/src/topology/configs/network.rs new file mode 100644 index 0000000..2e8fe2a --- /dev/null +++ b/testing-framework/configs/src/topology/configs/network.rs @@ -0,0 +1,116 @@ +use std::time::Duration; + +use nomos_libp2p::{ + IdentifySettings, KademliaSettings, Multiaddr, NatSettings, ed25519, gossipsub, +}; +use nomos_node::config::network::serde::{BackendSettings, Config, SwarmConfig}; +use nomos_utils::net::get_available_udp_port; + +use crate::node_address_from_port; + +#[derive(Default, Clone)] +pub enum Libp2pNetworkLayout { + #[default] + Star, + Chain, + Full, +} + +#[derive(Default, Clone)] +pub struct NetworkParams { + pub libp2p_network_layout: Libp2pNetworkLayout, +} + +pub type GeneralNetworkConfig = Config; + +fn default_swarm_config() -> SwarmConfig { + SwarmConfig { + host: std::net::Ipv4Addr::UNSPECIFIED, + port: 60000, + node_key: ed25519::SecretKey::generate(), + gossipsub_config: gossipsub::Config::default(), + kademlia_config: KademliaSettings::default(), + identify_config: IdentifySettings::default(), + chain_sync_config: cryptarchia_sync::Config::default(), + nat_config: NatSettings::default(), + } +} + +#[must_use] +pub fn create_network_configs( + ids: &[[u8; 32]], + network_params: &NetworkParams, +) -> Vec { + let swarm_configs: Vec = ids + .iter() + .map(|id| { + let mut node_key_bytes = *id; + let node_key = ed25519::SecretKey::try_from_bytes(&mut node_key_bytes) + .expect("Failed to generate secret key from bytes"); + + SwarmConfig { + node_key, + port: get_available_udp_port().unwrap(), + chain_sync_config: cryptarchia_sync::Config { + peer_response_timeout: Duration::from_secs(60), + }, + ..default_swarm_config() + } + }) + .collect(); + + let all_initial_peers = initial_peers_by_network_layout(&swarm_configs, network_params); + + swarm_configs + .iter() + .zip(all_initial_peers) + .map(|(swarm_config, initial_peers)| GeneralNetworkConfig { + backend: BackendSettings { + initial_peers, + inner: swarm_config.to_owned(), + }, + }) + .collect() +} + +fn initial_peers_by_network_layout( + swarm_configs: &[SwarmConfig], + network_params: &NetworkParams, +) -> Vec> { + let mut all_initial_peers = vec![]; + + match network_params.libp2p_network_layout { + Libp2pNetworkLayout::Star => { + // First node is the hub - has no initial peers + all_initial_peers.push(vec![]); + let first_addr = node_address_from_port(swarm_configs[0].port); + + // All other nodes connect to the first node + for _ in 1..swarm_configs.len() { + all_initial_peers.push(vec![first_addr.clone()]); + } + } + Libp2pNetworkLayout::Chain => { + // First node has no initial peers + all_initial_peers.push(vec![]); + + // Each subsequent node connects to the previous one + for i in 1..swarm_configs.len() { + let prev_addr = node_address_from_port(swarm_configs[i - 1].port); + all_initial_peers.push(vec![prev_addr]); + } + } + Libp2pNetworkLayout::Full => { + // Each node connects to all previous nodes, unidirectional connections + for i in 0..swarm_configs.len() { + let mut peers = vec![]; + for swarm_config in swarm_configs.iter().take(i) { + peers.push(node_address_from_port(swarm_config.port)); + } + all_initial_peers.push(peers); + } + } + } + + all_initial_peers +} diff --git a/testing-framework/configs/src/topology/configs/time.rs b/testing-framework/configs/src/topology/configs/time.rs new file mode 100644 index 0000000..e6f65c7 --- /dev/null +++ b/testing-framework/configs/src/topology/configs/time.rs @@ -0,0 +1,35 @@ +use std::{ + net::{IpAddr, Ipv4Addr}, + str::FromStr as _, + time::Duration, +}; + +use time::OffsetDateTime; + +const DEFAULT_SLOT_TIME: u64 = 2; +const CONSENSUS_SLOT_TIME_VAR: &str = "CONSENSUS_SLOT_TIME"; + +#[derive(Clone, Debug)] +pub struct GeneralTimeConfig { + pub slot_duration: Duration, + pub chain_start_time: OffsetDateTime, + pub ntp_server: String, + pub timeout: Duration, + pub interface: IpAddr, + pub update_interval: Duration, +} + +#[must_use] +pub fn default_time_config() -> GeneralTimeConfig { + let slot_duration = std::env::var(CONSENSUS_SLOT_TIME_VAR) + .map(|s| ::from_str(&s).unwrap()) + .unwrap_or(DEFAULT_SLOT_TIME); + GeneralTimeConfig { + slot_duration: Duration::from_secs(slot_duration), + chain_start_time: OffsetDateTime::now_utc(), + ntp_server: String::from("pool.ntp.org"), + timeout: Duration::from_secs(5), + interface: IpAddr::V4(Ipv4Addr::UNSPECIFIED), + update_interval: Duration::from_secs(16), + } +} diff --git a/testing-framework/configs/src/topology/configs/tracing.rs b/testing-framework/configs/src/topology/configs/tracing.rs new file mode 100644 index 0000000..fd8e73b --- /dev/null +++ b/testing-framework/configs/src/topology/configs/tracing.rs @@ -0,0 +1,71 @@ +use nomos_tracing::{ + logging::loki::LokiConfig, metrics::otlp::OtlpMetricsConfig, tracing::otlp::OtlpTracingConfig, +}; +use nomos_tracing_service::{ + ConsoleLayer, FilterLayer, LoggerLayer, MetricsLayer, TracingLayer, TracingSettings, +}; +use tracing::Level; + +use crate::IS_DEBUG_TRACING; + +#[derive(Clone, Default)] +pub struct GeneralTracingConfig { + pub tracing_settings: TracingSettings, +} + +impl GeneralTracingConfig { + fn local_debug_tracing(id: usize) -> Self { + let host_identifier = format!("node-{id}"); + Self { + tracing_settings: TracingSettings { + logger: LoggerLayer::Loki(LokiConfig { + endpoint: "http://localhost:3100".try_into().unwrap(), + host_identifier: host_identifier.clone(), + }), + tracing: TracingLayer::Otlp(OtlpTracingConfig { + endpoint: "http://localhost:4317".try_into().unwrap(), + sample_ratio: 0.5, + service_name: host_identifier.clone(), + }), + filter: FilterLayer::EnvFilter(nomos_tracing::filter::envfilter::EnvFilterConfig { + // Allow events only from modules that matches the regex, if it matches - use + // provided tracing level. Libp2p related crates are very log intensive in debug + // mode. + filters: std::iter::once(&("nomos", "debug")) + .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())) + .collect(), + }), + metrics: MetricsLayer::Otlp(OtlpMetricsConfig { + endpoint: "http://127.0.0.1:9090/api/v1/otlp/v1/metrics" + .try_into() + .unwrap(), + host_identifier, + }), + console: ConsoleLayer::None, + level: Level::DEBUG, + }, + } + } +} + +#[must_use] +pub fn create_tracing_configs(ids: &[[u8; 32]]) -> Vec { + if *IS_DEBUG_TRACING { + create_debug_configs(ids) + } else { + create_default_configs(ids) + } +} + +fn create_debug_configs(ids: &[[u8; 32]]) -> Vec { + ids.iter() + .enumerate() + .map(|(i, _)| GeneralTracingConfig::local_debug_tracing(i)) + .collect() +} + +fn create_default_configs(ids: &[[u8; 32]]) -> Vec { + ids.iter() + .map(|_| GeneralTracingConfig::default()) + .collect() +} diff --git a/testing-framework/configs/src/topology/configs/wallet.rs b/testing-framework/configs/src/topology/configs/wallet.rs new file mode 100644 index 0000000..33602a4 --- /dev/null +++ b/testing-framework/configs/src/topology/configs/wallet.rs @@ -0,0 +1,79 @@ +use std::num::NonZeroUsize; + +use num_bigint::BigUint; +use zksign::{PublicKey, SecretKey}; + +/// Collection of wallet accounts that should be funded at genesis. +#[derive(Clone, Default, Debug)] +pub struct WalletConfig { + pub accounts: Vec, +} + +impl WalletConfig { + #[must_use] + pub const fn new(accounts: Vec) -> Self { + Self { accounts } + } + + #[must_use] + pub fn uniform(total_funds: u64, users: NonZeroUsize) -> Self { + let user_count = users.get() as u64; + assert!(user_count > 0, "wallet user count must be non-zero"); + assert!( + total_funds >= user_count, + "wallet funds must allocate at least 1 token per user" + ); + + let base_allocation = total_funds / user_count; + let mut remainder = total_funds % user_count; + + let accounts = (0..users.get()) + .map(|idx| { + let mut amount = base_allocation; + if remainder > 0 { + amount += 1; + remainder -= 1; + } + + WalletAccount::deterministic(idx as u64, amount) + }) + .collect(); + + Self { accounts } + } +} + +/// Wallet account that holds funds in the genesis state. +#[derive(Clone, Debug)] +pub struct WalletAccount { + pub label: String, + pub secret_key: SecretKey, + pub value: u64, +} + +impl WalletAccount { + #[must_use] + pub fn new(label: impl Into, secret_key: SecretKey, value: u64) -> Self { + assert!(value > 0, "wallet account value must be positive"); + Self { + label: label.into(), + secret_key, + value, + } + } + + #[must_use] + pub fn deterministic(index: u64, value: u64) -> Self { + let mut seed = [0u8; 32]; + seed[..2].copy_from_slice(b"wl"); + seed[2..10].copy_from_slice(&index.to_le_bytes()); + + let secret_key = SecretKey::from(BigUint::from_bytes_le(&seed)); + Self::new(format!("wallet-user-{index}"), secret_key, value) + } + + #[must_use] + pub fn public_key(&self) -> PublicKey { + self.secret_key.to_public_key() + } +} diff --git a/testing-framework/configs/src/topology/mod.rs b/testing-framework/configs/src/topology/mod.rs new file mode 100644 index 0000000..3810d5b --- /dev/null +++ b/testing-framework/configs/src/topology/mod.rs @@ -0,0 +1 @@ +pub mod configs; diff --git a/testing-framework/core/Cargo.toml b/testing-framework/core/Cargo.toml new file mode 100644 index 0000000..f4128dc --- /dev/null +++ b/testing-framework/core/Cargo.toml @@ -0,0 +1,52 @@ +[package] +categories.workspace = true +description.workspace = true +edition.workspace = true +keywords.workspace = true +license.workspace = true +name = "testing-framework-core" +readme.workspace = true +repository.workspace = true +version = "0.1.0" + +[lints] +workspace = true + +[features] +default = [] + +[dependencies] +anyhow = "1" +async-trait = "0.1" +broadcast-service = { workspace = true } +chain-service = { workspace = true } +common-http-client = { workspace = true } +futures = { default-features = false, version = "0.3" } +groth16 = { workspace = true } +hex = { version = "0.4.3", default-features = false } +integration-configs = { workspace = true } +key-management-system = { workspace = true } +kzgrs-backend = { workspace = true } +nomos-core = { workspace = true } +nomos-da-network-core = { workspace = true } +nomos-da-network-service = { workspace = true } +nomos-executor = { workspace = true, default-features = false, features = ["testing", "tracing"] } +nomos-http-api-common = { workspace = true } +nomos-libp2p = { workspace = true } +nomos-network = { workspace = true, features = ["libp2p"] } +nomos-node = { workspace = true, default-features = false, features = ["testing"] } +nomos-tracing = { workspace = true } +nomos-tracing-service = { workspace = true } +nomos-utils = { workspace = true } +prometheus-http-query = "0.8" +rand = { workspace = true } +reqwest = { workspace = true, features = ["json"] } +serde = { workspace = true } +serde_json = { workspace = true } +serde_with = { workspace = true } +serde_yaml = { workspace = true } +tempfile = { workspace = true } +thiserror = { workspace = true } +tokio = { workspace = true, features = ["macros", "process", "rt-multi-thread", "time"] } +tracing = { workspace = true } +tx-service = { workspace = true, features = ["libp2p", "mock"] } diff --git a/testing-framework/core/src/lib.rs b/testing-framework/core/src/lib.rs new file mode 100644 index 0000000..b36c288 --- /dev/null +++ b/testing-framework/core/src/lib.rs @@ -0,0 +1,19 @@ +pub mod nodes; +pub mod scenario; +pub mod topology; + +use std::{env, ops::Mul as _, sync::LazyLock, time::Duration}; + +pub use integration_configs::{ + IS_DEBUG_TRACING, node_address_from_port, secret_key_to_peer_id, secret_key_to_provider_id, + topology::configs::da::GLOBAL_PARAMS_PATH, +}; + +static IS_SLOW_TEST_ENV: LazyLock = + LazyLock::new(|| env::var("SLOW_TEST_ENV").is_ok_and(|s| s == "true")); + +/// In slow test environments like Codecov, use 2x timeout. +#[must_use] +pub fn adjust_timeout(d: Duration) -> Duration { + if *IS_SLOW_TEST_ENV { d.mul(2) } else { d } +} diff --git a/testing-framework/core/src/nodes/api_client.rs b/testing-framework/core/src/nodes/api_client.rs new file mode 100644 index 0000000..6b6245a --- /dev/null +++ b/testing-framework/core/src/nodes/api_client.rs @@ -0,0 +1,252 @@ +use std::net::SocketAddr; + +use chain_service::CryptarchiaInfo; +use common_http_client::CommonHttpClient; +use nomos_core::{block::Block, da::BlobId, mantle::SignedMantleTx, sdp::SessionNumber}; +use nomos_da_network_core::swarm::{BalancerStats, MonitorStats}; +use nomos_da_network_service::MembershipResponse; +use nomos_http_api_common::paths::{ + CRYPTARCHIA_INFO, DA_BALANCER_STATS, DA_BLACKLISTED_PEERS, DA_BLOCK_PEER, DA_GET_MEMBERSHIP, + DA_HISTORIC_SAMPLING, DA_MONITOR_STATS, DA_UNBLOCK_PEER, MEMPOOL_ADD_TX, NETWORK_INFO, + STORAGE_BLOCK, +}; +use nomos_network::backends::libp2p::Libp2pInfo; +use nomos_node::{HeaderId, api::testing::handlers::HistoricSamplingRequest}; +use reqwest::{Client, RequestBuilder, Response, Url}; +use serde::{Serialize, de::DeserializeOwned}; +use serde_json::Value; + +pub const DA_GET_TESTING_ENDPOINT_ERROR: &str = "Failed to connect to testing endpoint. The binary was likely built without the 'testing' \ + feature. Try: cargo build --workspace --all-features"; + +#[derive(Clone)] +pub struct ApiClient { + pub(crate) base_url: Url, + pub(crate) testing_url: Option, + client: Client, + pub(crate) http_client: CommonHttpClient, +} + +impl ApiClient { + #[must_use] + pub fn new(base_addr: SocketAddr, testing_addr: Option) -> Self { + let base_url = + Url::parse(&format!("http://{base_addr}")).expect("Valid base address for node"); + let testing_url = testing_addr + .map(|addr| Url::parse(&format!("http://{addr}")).expect("Valid testing address")); + Self::from_urls(base_url, testing_url) + } + + #[must_use] + pub fn from_urls(base_url: Url, testing_url: Option) -> Self { + let client = Client::new(); + Self { + base_url, + testing_url, + http_client: CommonHttpClient::new_with_client(client.clone(), None), + client, + } + } + + #[must_use] + pub fn testing_url(&self) -> Option { + self.testing_url.clone() + } + + pub fn get_builder(&self, path: &str) -> RequestBuilder { + self.client.get(self.join_base(path)) + } + + pub async fn get_response(&self, path: &str) -> reqwest::Result { + self.client.get(self.join_base(path)).send().await + } + + pub async fn get_json(&self, path: &str) -> reqwest::Result + where + T: DeserializeOwned, + { + self.get_response(path) + .await? + .error_for_status()? + .json() + .await + } + + pub async fn post_json_decode(&self, path: &str, body: &T) -> reqwest::Result + where + T: Serialize + Sync + ?Sized, + R: DeserializeOwned, + { + self.post_json_response(path, body) + .await? + .error_for_status()? + .json() + .await + } + + pub async fn post_json_response(&self, path: &str, body: &T) -> reqwest::Result + where + T: Serialize + Sync + ?Sized, + { + self.client + .post(self.join_base(path)) + .json(body) + .send() + .await + } + + pub async fn post_json_unit(&self, path: &str, body: &T) -> reqwest::Result<()> + where + T: Serialize + Sync + ?Sized, + { + self.post_json_response(path, body) + .await? + .error_for_status()?; + Ok(()) + } + + pub async fn get_testing_json(&self, path: &str) -> reqwest::Result + where + T: DeserializeOwned, + { + self.get_testing_response(path) + .await? + .error_for_status()? + .json() + .await + } + + pub async fn post_testing_json_decode(&self, path: &str, body: &T) -> reqwest::Result + where + T: Serialize + Sync + ?Sized, + R: DeserializeOwned, + { + self.post_testing_json_response(path, body) + .await? + .error_for_status()? + .json() + .await + } + + pub async fn post_testing_json_unit(&self, path: &str, body: &T) -> reqwest::Result<()> + where + T: Serialize + Sync + ?Sized, + { + self.post_testing_json_response(path, body) + .await? + .error_for_status()?; + Ok(()) + } + + pub async fn post_testing_json_response( + &self, + path: &str, + body: &T, + ) -> reqwest::Result + where + T: Serialize + Sync + ?Sized, + { + let testing_url = self + .testing_url + .as_ref() + .expect(DA_GET_TESTING_ENDPOINT_ERROR); + self.client + .post(Self::join_url(testing_url, path)) + .json(body) + .send() + .await + } + + pub async fn get_testing_response(&self, path: &str) -> reqwest::Result { + let testing_url = self + .testing_url + .as_ref() + .expect(DA_GET_TESTING_ENDPOINT_ERROR); + self.client + .get(Self::join_url(testing_url, path)) + .send() + .await + } + + pub async fn block_peer(&self, peer_id: &str) -> reqwest::Result { + self.post_json_decode(DA_BLOCK_PEER, &peer_id).await + } + + pub async fn unblock_peer(&self, peer_id: &str) -> reqwest::Result { + self.post_json_decode(DA_UNBLOCK_PEER, &peer_id).await + } + + pub async fn blacklisted_peers(&self) -> reqwest::Result> { + self.get_json(DA_BLACKLISTED_PEERS).await + } + + pub async fn balancer_stats(&self) -> reqwest::Result { + self.get_json(DA_BALANCER_STATS).await + } + + pub async fn monitor_stats(&self) -> reqwest::Result { + self.get_json(DA_MONITOR_STATS).await + } + + pub async fn consensus_info(&self) -> reqwest::Result { + self.get_json(CRYPTARCHIA_INFO).await + } + + pub async fn network_info(&self) -> reqwest::Result { + self.get_json(NETWORK_INFO).await + } + + pub async fn storage_block( + &self, + id: &HeaderId, + ) -> reqwest::Result>> { + self.post_json_decode(STORAGE_BLOCK, id).await + } + + pub async fn da_get_membership( + &self, + session_id: &SessionNumber, + ) -> reqwest::Result { + self.post_testing_json_decode(DA_GET_MEMBERSHIP, session_id) + .await + } + + pub async fn da_historic_sampling( + &self, + request: &HistoricSamplingRequest, + ) -> reqwest::Result { + self.post_testing_json_decode(DA_HISTORIC_SAMPLING, request) + .await + } + + pub async fn submit_transaction(&self, tx: &SignedMantleTx) -> reqwest::Result<()> { + self.post_json_unit(MEMPOOL_ADD_TX, tx).await + } + + pub async fn get_headers_raw(&self, builder: RequestBuilder) -> reqwest::Result { + builder.send().await + } + + pub async fn mempool_metrics(&self, pool: &str) -> reqwest::Result { + self.get_json(&format!("/{pool}/metrics")).await + } + + #[must_use] + pub const fn base_url(&self) -> &Url { + &self.base_url + } + + #[must_use] + pub const fn http_client(&self) -> &CommonHttpClient { + &self.http_client + } + + fn join_base(&self, path: &str) -> Url { + Self::join_url(&self.base_url, path) + } + + fn join_url(base: &Url, path: &str) -> Url { + let trimmed = path.trim_start_matches('/'); + base.join(trimmed).expect("valid relative path") + } +} diff --git a/testing-framework/core/src/nodes/executor.rs b/testing-framework/core/src/nodes/executor.rs new file mode 100644 index 0000000..bb78466 --- /dev/null +++ b/testing-framework/core/src/nodes/executor.rs @@ -0,0 +1,282 @@ +use std::{ + collections::HashSet, + path::PathBuf, + process::{Child, Command, Stdio}, + time::Duration, +}; + +use broadcast_service::BlockInfo; +use chain_service::CryptarchiaInfo; +use futures::Stream; +pub use integration_configs::nodes::executor::create_executor_config; +use kzgrs_backend::common::share::{DaLightShare, DaShare, DaSharesCommitments}; +use nomos_core::{ + block::Block, da::BlobId, header::HeaderId, mantle::SignedMantleTx, sdp::SessionNumber, +}; +use nomos_da_network_core::swarm::{BalancerStats, MonitorStats}; +use nomos_da_network_service::MembershipResponse; +use nomos_executor::config::Config; +use nomos_http_api_common::paths::{DA_GET_SHARES_COMMITMENTS, MANTLE_METRICS, MEMPOOL_ADD_TX}; +use nomos_network::backends::libp2p::Libp2pInfo; +use nomos_node::api::testing::handlers::HistoricSamplingRequest; +use nomos_tracing::logging::local::FileConfig; +use nomos_tracing_service::LoggerLayer; +use reqwest::Url; +use serde_yaml::{Mapping, Number as YamlNumber, Value}; + +use super::{ApiClient, create_tempdir, persist_tempdir, should_persist_tempdir}; +use crate::{IS_DEBUG_TRACING, adjust_timeout, nodes::LOGS_PREFIX}; + +const BIN_PATH: &str = "target/debug/nomos-executor"; + +fn binary_path() -> PathBuf { + PathBuf::from(env!("CARGO_MANIFEST_DIR")) + .join("../../") + .join(BIN_PATH) +} + +pub struct Executor { + tempdir: tempfile::TempDir, + child: Child, + config: Config, + api: ApiClient, +} + +fn inject_ibd_into_cryptarchia(yaml_value: &mut Value) { + let Some(root) = yaml_value.as_mapping_mut() else { + return; + }; + let Some(cryptarchia) = root + .get_mut(&Value::String("cryptarchia".into())) + .and_then(Value::as_mapping_mut) + else { + return; + }; + if !cryptarchia.contains_key(&Value::String("network_adapter_settings".into())) { + let mut network = Mapping::new(); + network.insert( + Value::String("topic".into()), + Value::String(nomos_node::CONSENSUS_TOPIC.into()), + ); + cryptarchia.insert(Value::String("network_adapter_settings".into()), Value::Mapping(network)); + } + if !cryptarchia.contains_key(&Value::String("sync".into())) { + let mut orphan = Mapping::new(); + orphan.insert( + Value::String("max_orphan_cache_size".into()), + Value::Number(YamlNumber::from(5)), + ); + let mut sync = Mapping::new(); + sync.insert(Value::String("orphan".into()), Value::Mapping(orphan)); + cryptarchia.insert(Value::String("sync".into()), Value::Mapping(sync)); + } + let Some(bootstrap) = cryptarchia + .get_mut(&Value::String("bootstrap".into())) + .and_then(Value::as_mapping_mut) + else { + return; + }; + + let ibd_key = Value::String("ibd".into()); + if bootstrap.contains_key(&ibd_key) { + return; + } + + let mut ibd = Mapping::new(); + ibd.insert(Value::String("peers".into()), Value::Sequence(vec![])); + + bootstrap.insert(ibd_key, Value::Mapping(ibd)); +} + +impl Drop for Executor { + fn drop(&mut self) { + if should_persist_tempdir() + && let Err(e) = persist_tempdir(&mut self.tempdir, "nomos-executor") + { + println!("failed to persist tempdir: {e}"); + } + + if let Err(e) = self.child.kill() { + println!("failed to kill the child process: {e}"); + } + } +} + +impl Executor { + pub async fn spawn(mut config: Config) -> Self { + let dir = create_tempdir().unwrap(); + let config_path = dir.path().join("executor.yaml"); + let file = std::fs::File::create(&config_path).unwrap(); + + if !*IS_DEBUG_TRACING { + // setup logging so that we can intercept it later in testing + config.tracing.logger = LoggerLayer::File(FileConfig { + directory: dir.path().to_owned(), + prefix: Some(LOGS_PREFIX.into()), + }); + } + + config.storage.db_path = dir.path().join("db"); + dir.path().clone_into( + &mut config + .da_verifier + .storage_adapter_settings + .blob_storage_directory, + ); + + let addr = config.http.backend_settings.address; + let testing_addr = config.testing_http.backend_settings.address; + + let mut yaml_value = serde_yaml::to_value(&config).unwrap(); + inject_ibd_into_cryptarchia(&mut yaml_value); + serde_yaml::to_writer(file, &yaml_value).unwrap(); + let child = Command::new(binary_path()) + .arg(&config_path) + .current_dir(dir.path()) + .stdout(Stdio::inherit()) + .spawn() + .unwrap(); + let node = Self { + child, + tempdir: dir, + config, + api: ApiClient::new(addr, Some(testing_addr)), + }; + tokio::time::timeout(adjust_timeout(Duration::from_secs(10)), async { + node.wait_online().await; + }) + .await + .unwrap(); + + node + } + + pub async fn block_peer(&self, peer_id: String) -> bool { + self.api.block_peer(&peer_id).await.unwrap() + } + + pub async fn unblock_peer(&self, peer_id: String) -> bool { + self.api.unblock_peer(&peer_id).await.unwrap() + } + + pub async fn blacklisted_peers(&self) -> Vec { + self.api.blacklisted_peers().await.unwrap() + } + + async fn wait_online(&self) { + loop { + let res = self.api.get_response(MANTLE_METRICS).await; + if res.is_ok() && res.unwrap().status().is_success() { + break; + } + tokio::time::sleep(Duration::from_millis(100)).await; + } + } + + #[must_use] + pub const fn config(&self) -> &Config { + &self.config + } + + #[must_use] + pub fn url(&self) -> Url { + self.api.base_url().clone() + } + + #[must_use] + pub fn testing_url(&self) -> Option { + self.api.testing_url() + } + + pub async fn balancer_stats(&self) -> BalancerStats { + self.api.balancer_stats().await.unwrap() + } + + pub async fn monitor_stats(&self) -> MonitorStats { + self.api.monitor_stats().await.unwrap() + } + + pub async fn network_info(&self) -> Libp2pInfo { + self.api.network_info().await.unwrap() + } + + pub async fn consensus_info(&self) -> CryptarchiaInfo { + self.api.consensus_info().await.unwrap() + } + + pub async fn get_block(&self, id: HeaderId) -> Option> { + self.api.storage_block(&id).await.unwrap() + } + + pub async fn get_shares( + &self, + blob_id: BlobId, + requested_shares: HashSet<[u8; 2]>, + filter_shares: HashSet<[u8; 2]>, + return_available: bool, + ) -> Result, common_http_client::Error> { + self.api + .http_client() + .get_shares::( + self.api.base_url().clone(), + blob_id, + requested_shares, + filter_shares, + return_available, + ) + .await + } + + pub async fn get_commitments(&self, blob_id: BlobId) -> Option { + self.api + .post_json_decode(DA_GET_SHARES_COMMITMENTS, &blob_id) + .await + .unwrap() + } + + pub async fn get_storage_commitments( + &self, + blob_id: BlobId, + ) -> Result, common_http_client::Error> { + self.api + .http_client() + .get_storage_commitments::(self.api.base_url().clone(), blob_id) + .await + } + + pub async fn da_get_membership( + &self, + session_id: SessionNumber, + ) -> Result { + self.api.da_get_membership(&session_id).await + } + + pub async fn da_historic_sampling( + &self, + block_id: HeaderId, + blob_ids: I, + ) -> Result + where + I: IntoIterator, + { + let request = HistoricSamplingRequest { + block_id, + blob_ids: blob_ids.into_iter().collect(), + }; + + self.api.da_historic_sampling(&request).await + } + + pub async fn get_lib_stream( + &self, + ) -> Result, common_http_client::Error> { + self.api + .http_client() + .get_lib_stream(self.api.base_url().clone()) + .await + } + + pub async fn add_tx(&self, tx: SignedMantleTx) -> Result<(), reqwest::Error> { + self.api.post_json_unit(MEMPOOL_ADD_TX, &tx).await + } +} diff --git a/testing-framework/core/src/nodes/mod.rs b/testing-framework/core/src/nodes/mod.rs new file mode 100644 index 0000000..ee1534a --- /dev/null +++ b/testing-framework/core/src/nodes/mod.rs @@ -0,0 +1,35 @@ +mod api_client; +pub mod executor; +pub mod validator; + +use std::sync::LazyLock; + +pub use api_client::ApiClient; +use tempfile::TempDir; + +pub(crate) const LOGS_PREFIX: &str = "__logs"; +static KEEP_NODE_TEMPDIRS: LazyLock = + LazyLock::new(|| std::env::var("NOMOS_TESTS_KEEP_LOGS").is_ok()); + +fn create_tempdir() -> std::io::Result { + // It's easier to use the current location instead of OS-default tempfile + // location because Github Actions can easily access files in the current + // location using wildcard to upload them as artifacts. + TempDir::new_in(std::env::current_dir()?) +} + +fn persist_tempdir(tempdir: &mut TempDir, label: &str) -> std::io::Result<()> { + println!( + "{}: persisting directory at {}", + label, + tempdir.path().display() + ); + // we need ownership of the dir to persist it + let dir = std::mem::replace(tempdir, tempfile::tempdir()?); + let _ = dir.keep(); + Ok(()) +} + +pub(crate) fn should_persist_tempdir() -> bool { + std::thread::panicking() || *KEEP_NODE_TEMPDIRS +} diff --git a/testing-framework/core/src/nodes/validator.rs b/testing-framework/core/src/nodes/validator.rs new file mode 100644 index 0000000..91e89e7 --- /dev/null +++ b/testing-framework/core/src/nodes/validator.rs @@ -0,0 +1,344 @@ +use std::{ + collections::HashSet, + path::PathBuf, + process::{Child, Command, Stdio}, + time::Duration, +}; + +use broadcast_service::BlockInfo; +use chain_service::CryptarchiaInfo; +use futures::Stream; +pub use integration_configs::nodes::validator::create_validator_config; +use kzgrs_backend::common::share::{DaLightShare, DaShare, DaSharesCommitments}; +use nomos_core::{block::Block, da::BlobId, mantle::SignedMantleTx, sdp::SessionNumber}; +use nomos_da_network_core::swarm::{BalancerStats, MonitorStats}; +use nomos_da_network_service::MembershipResponse; +use nomos_http_api_common::paths::{CRYPTARCHIA_HEADERS, DA_GET_SHARES_COMMITMENTS}; +use nomos_network::backends::libp2p::Libp2pInfo; +use nomos_node::{Config, HeaderId, api::testing::handlers::HistoricSamplingRequest}; +use nomos_tracing::logging::local::FileConfig; +use nomos_tracing_service::LoggerLayer; +use reqwest::Url; +use serde_yaml::{Mapping, Number as YamlNumber, Value}; +use tokio::time::error::Elapsed; +use tx_service::MempoolMetrics; + +use super::{ApiClient, create_tempdir, persist_tempdir, should_persist_tempdir}; +use crate::{IS_DEBUG_TRACING, adjust_timeout, nodes::LOGS_PREFIX}; + +const BIN_PATH: &str = "target/debug/nomos-node"; + +fn binary_path() -> PathBuf { + PathBuf::from(env!("CARGO_MANIFEST_DIR")) + .join("../../") + .join(BIN_PATH) +} + +pub enum Pool { + Da, + Mantle, +} + +pub struct Validator { + tempdir: tempfile::TempDir, + child: Child, + config: Config, + api: ApiClient, +} + +fn inject_ibd_into_cryptarchia(yaml_value: &mut Value) { + let Some(root) = yaml_value.as_mapping_mut() else { + return; + }; + let Some(cryptarchia) = root + .get_mut(&Value::String("cryptarchia".into())) + .and_then(Value::as_mapping_mut) + else { + return; + }; + if !cryptarchia.contains_key(&Value::String("network_adapter_settings".into())) { + let mut network = Mapping::new(); + network.insert( + Value::String("topic".into()), + Value::String(nomos_node::CONSENSUS_TOPIC.into()), + ); + cryptarchia.insert(Value::String("network_adapter_settings".into()), Value::Mapping(network)); + } + if !cryptarchia.contains_key(&Value::String("sync".into())) { + let mut orphan = Mapping::new(); + orphan.insert( + Value::String("max_orphan_cache_size".into()), + Value::Number(YamlNumber::from(5)), + ); + let mut sync = Mapping::new(); + sync.insert(Value::String("orphan".into()), Value::Mapping(orphan)); + cryptarchia.insert(Value::String("sync".into()), Value::Mapping(sync)); + } + let Some(bootstrap) = cryptarchia + .get_mut(&Value::String("bootstrap".into())) + .and_then(Value::as_mapping_mut) + else { + return; + }; + + let ibd_key = Value::String("ibd".into()); + if bootstrap.contains_key(&ibd_key) { + return; + } + + let mut ibd = Mapping::new(); + ibd.insert(Value::String("peers".into()), Value::Sequence(vec![])); + + bootstrap.insert(ibd_key, Value::Mapping(ibd)); +} + +impl Drop for Validator { + fn drop(&mut self) { + if should_persist_tempdir() + && let Err(e) = persist_tempdir(&mut self.tempdir, "nomos-node") + { + println!("failed to persist tempdir: {e}"); + } + + if let Err(e) = self.child.kill() { + println!("failed to kill the child process: {e}"); + } + } +} + +impl Validator { + /// Check if the validator process is still running + pub fn is_running(&mut self) -> bool { + match self.child.try_wait() { + Ok(None) => true, + Ok(Some(_)) | Err(_) => false, + } + } + + /// Wait for the validator process to exit, with a timeout + /// Returns true if the process exited within the timeout, false otherwise + pub async fn wait_for_exit(&mut self, timeout: Duration) -> bool { + tokio::time::timeout(timeout, async { + loop { + if !self.is_running() { + return; + } + tokio::time::sleep(Duration::from_millis(100)).await; + } + }) + .await + .is_ok() + } + + pub async fn spawn(mut config: Config) -> Result { + let dir = create_tempdir().unwrap(); + let config_path = dir.path().join("validator.yaml"); + let file = std::fs::File::create(&config_path).unwrap(); + + if !*IS_DEBUG_TRACING { + // setup logging so that we can intercept it later in testing + config.tracing.logger = LoggerLayer::File(FileConfig { + directory: dir.path().to_owned(), + prefix: Some(LOGS_PREFIX.into()), + }); + } + + config.storage.db_path = dir.path().join("db"); + dir.path().clone_into( + &mut config + .da_verifier + .storage_adapter_settings + .blob_storage_directory, + ); + + let addr = config.http.backend_settings.address; + let testing_addr = config.testing_http.backend_settings.address; + + let mut yaml_value = serde_yaml::to_value(&config).unwrap(); + inject_ibd_into_cryptarchia(&mut yaml_value); + serde_yaml::to_writer(file, &yaml_value).unwrap(); + let child = Command::new(binary_path()) + .arg(&config_path) + .current_dir(dir.path()) + .stdout(Stdio::inherit()) + .stderr(Stdio::inherit()) + .spawn() + .unwrap(); + let node = Self { + child, + tempdir: dir, + config, + api: ApiClient::new(addr, Some(testing_addr)), + }; + + tokio::time::timeout(adjust_timeout(Duration::from_secs(10)), async { + node.wait_online().await; + }) + .await?; + + Ok(node) + } + + #[must_use] + pub fn url(&self) -> Url { + self.api.base_url().clone() + } + + #[must_use] + pub fn testing_url(&self) -> Option { + self.api.testing_url() + } + + async fn wait_online(&self) { + loop { + if self.api.consensus_info().await.is_ok() { + break; + } + tokio::time::sleep(Duration::from_millis(100)).await; + } + } + + pub async fn get_block(&self, id: HeaderId) -> Option> { + self.api.storage_block(&id).await.unwrap() + } + + pub async fn get_commitments(&self, blob_id: BlobId) -> Option { + self.api + .post_json_decode(DA_GET_SHARES_COMMITMENTS, &blob_id) + .await + .unwrap() + } + + pub async fn get_mempoool_metrics(&self, pool: Pool) -> MempoolMetrics { + let discr = match pool { + Pool::Mantle => "mantle", + Pool::Da => "da", + }; + let res = self.api.mempool_metrics(discr).await.unwrap(); + MempoolMetrics { + pending_items: res["pending_items"].as_u64().unwrap() as usize, + last_item_timestamp: res["last_item_timestamp"].as_u64().unwrap(), + } + } + + pub async fn da_historic_sampling( + &self, + block_id: HeaderId, + blob_ids: I, + ) -> Result + where + I: IntoIterator, + { + let request = HistoricSamplingRequest { + block_id, + blob_ids: blob_ids.into_iter().collect(), + }; + + self.api.da_historic_sampling(&request).await + } + + // not async so that we can use this in `Drop` + #[must_use] + pub fn get_logs_from_file(&self) -> String { + println!( + "fetching logs from dir {}...", + self.tempdir.path().display() + ); + // std::thread::sleep(std::time::Duration::from_secs(50)); + std::fs::read_dir(self.tempdir.path()) + .unwrap() + .filter_map(|entry| { + let entry = entry.unwrap(); + let path = entry.path(); + (path.is_file() && path.to_str().unwrap().contains(LOGS_PREFIX)).then_some(path) + }) + .map(|f| std::fs::read_to_string(f).unwrap()) + .collect::() + } + + #[must_use] + pub const fn config(&self) -> &Config { + &self.config + } + + pub async fn get_headers(&self, from: Option, to: Option) -> Vec { + let mut req = self.api.get_builder(CRYPTARCHIA_HEADERS); + + if let Some(from) = from { + req = req.query(&[("from", from)]); + } + + if let Some(to) = to { + req = req.query(&[("to", to)]); + } + + let res = self.api.get_headers_raw(req).await; + + println!("res: {res:?}"); + + res.unwrap().json::>().await.unwrap() + } + + pub async fn consensus_info(&self) -> CryptarchiaInfo { + let info = self.api.consensus_info().await.unwrap(); + println!("{info:?}"); + info + } + + pub async fn balancer_stats(&self) -> BalancerStats { + self.api.balancer_stats().await.unwrap() + } + + pub async fn monitor_stats(&self) -> MonitorStats { + self.api.monitor_stats().await.unwrap() + } + + pub async fn da_get_membership( + &self, + session_id: SessionNumber, + ) -> Result { + self.api.da_get_membership(&session_id).await + } + + pub async fn network_info(&self) -> Libp2pInfo { + self.api.network_info().await.unwrap() + } + + pub async fn get_shares( + &self, + blob_id: BlobId, + requested_shares: HashSet<[u8; 2]>, + filter_shares: HashSet<[u8; 2]>, + return_available: bool, + ) -> Result, common_http_client::Error> { + self.api + .http_client() + .get_shares::( + self.api.base_url().clone(), + blob_id, + requested_shares, + filter_shares, + return_available, + ) + .await + } + + pub async fn get_storage_commitments( + &self, + blob_id: BlobId, + ) -> Result, common_http_client::Error> { + self.api + .http_client() + .get_storage_commitments::(self.api.base_url().clone(), blob_id) + .await + } + + pub async fn get_lib_stream( + &self, + ) -> Result, common_http_client::Error> { + self.api + .http_client() + .get_lib_stream(self.api.base_url().clone()) + .await + } +} diff --git a/testing-framework/core/src/scenario/capabilities.rs b/testing-framework/core/src/scenario/capabilities.rs new file mode 100644 index 0000000..83a2372 --- /dev/null +++ b/testing-framework/core/src/scenario/capabilities.rs @@ -0,0 +1,28 @@ +use async_trait::async_trait; + +use super::DynError; + +/// Marker type used by scenario builders to request node control support. +#[derive(Clone, Copy, Debug, Default)] +pub struct NodeControlCapability; + +/// Trait implemented by scenario capability markers to signal whether node +/// control is required. +pub trait RequiresNodeControl { + const REQUIRED: bool; +} + +impl RequiresNodeControl for () { + const REQUIRED: bool = false; +} + +impl RequiresNodeControl for NodeControlCapability { + const REQUIRED: bool = true; +} + +/// Interface exposed by runners that can restart nodes at runtime. +#[async_trait] +pub trait NodeControlHandle: Send + Sync { + async fn restart_validator(&self, index: usize) -> Result<(), DynError>; + async fn restart_executor(&self, index: usize) -> Result<(), DynError>; +} diff --git a/testing-framework/core/src/scenario/cfgsync.rs b/testing-framework/core/src/scenario/cfgsync.rs new file mode 100644 index 0000000..157b58f --- /dev/null +++ b/testing-framework/core/src/scenario/cfgsync.rs @@ -0,0 +1,153 @@ +use std::{fs::File, num::NonZero, path::Path, time::Duration}; + +use anyhow::{Context as _, Result}; +use nomos_da_network_core::swarm::ReplicationConfig; +use nomos_tracing::metrics::otlp::OtlpMetricsConfig; +use nomos_tracing_service::{MetricsLayer, TracingSettings}; +use nomos_utils::bounded_duration::{MinimalBoundedDuration, SECOND}; +use reqwest::Url; +use serde::{Deserialize, Serialize}; +use serde_with::serde_as; + +use crate::topology::GeneratedTopology; + +#[serde_as] +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct CfgSyncConfig { + pub port: u16, + pub n_hosts: usize, + pub timeout: u64, + pub security_param: NonZero, + pub active_slot_coeff: f64, + pub subnetwork_size: usize, + pub dispersal_factor: usize, + pub num_samples: u16, + pub num_subnets: u16, + #[serde_as(as = "MinimalBoundedDuration<0, SECOND>")] + pub old_blobs_check_interval: Duration, + #[serde_as(as = "MinimalBoundedDuration<0, SECOND>")] + pub blobs_validity_duration: Duration, + pub global_params_path: String, + pub min_dispersal_peers: usize, + pub min_replication_peers: usize, + #[serde_as(as = "MinimalBoundedDuration<0, SECOND>")] + pub monitor_failure_time_window: Duration, + #[serde_as(as = "MinimalBoundedDuration<0, SECOND>")] + pub balancer_interval: Duration, + pub replication_settings: ReplicationConfig, + pub retry_shares_limit: usize, + pub retry_commitments_limit: usize, + pub tracing_settings: TracingSettings, +} + +pub fn load_cfgsync_template(path: &Path) -> Result { + let file = File::open(path) + .with_context(|| format!("opening cfgsync template at {}", path.display()))?; + serde_yaml::from_reader(file).context("parsing cfgsync template") +} + +pub fn write_cfgsync_template(path: &Path, cfg: &CfgSyncConfig) -> Result<()> { + let file = File::create(path) + .with_context(|| format!("writing cfgsync template to {}", path.display()))?; + let serializable = SerializableCfgSyncConfig::from(cfg); + serde_yaml::to_writer(file, &serializable).context("serializing cfgsync template") +} + +pub fn render_cfgsync_yaml(cfg: &CfgSyncConfig) -> Result { + let serializable = SerializableCfgSyncConfig::from(cfg); + serde_yaml::to_string(&serializable).context("rendering cfgsync yaml") +} + +pub fn apply_topology_overrides( + cfg: &mut CfgSyncConfig, + topology: &GeneratedTopology, + use_kzg_mount: bool, +) { + let hosts = topology.validators().len() + topology.executors().len(); + cfg.n_hosts = hosts; + + let consensus = &topology.config().consensus_params; + cfg.security_param = consensus.security_param; + cfg.active_slot_coeff = consensus.active_slot_coeff; + + let da = &topology.config().da_params; + cfg.subnetwork_size = da.subnetwork_size; + cfg.dispersal_factor = da.dispersal_factor; + cfg.num_samples = da.num_samples; + cfg.num_subnets = da.num_subnets; + cfg.old_blobs_check_interval = da.old_blobs_check_interval; + cfg.blobs_validity_duration = da.blobs_validity_duration; + cfg.global_params_path = if use_kzg_mount { + "/kzgrs_test_params".into() + } else { + da.global_params_path.clone() + }; + cfg.min_dispersal_peers = da.policy_settings.min_dispersal_peers; + cfg.min_replication_peers = da.policy_settings.min_replication_peers; + cfg.monitor_failure_time_window = da.monitor_settings.failure_time_window; + cfg.balancer_interval = da.balancer_interval; + cfg.replication_settings = da.replication_settings; + cfg.retry_shares_limit = da.retry_shares_limit; + cfg.retry_commitments_limit = da.retry_commitments_limit; + cfg.tracing_settings.metrics = MetricsLayer::Otlp(OtlpMetricsConfig { + endpoint: Url::parse("http://prometheus:9090/api/v1/otlp/v1/metrics") + .expect("valid prometheus otlp endpoint"), + host_identifier: String::new(), + }); +} + +#[serde_as] +#[derive(Serialize)] +struct SerializableCfgSyncConfig { + port: u16, + n_hosts: usize, + timeout: u64, + security_param: NonZero, + active_slot_coeff: f64, + subnetwork_size: usize, + dispersal_factor: usize, + num_samples: u16, + num_subnets: u16, + #[serde_as(as = "MinimalBoundedDuration<0, SECOND>")] + old_blobs_check_interval: Duration, + #[serde_as(as = "MinimalBoundedDuration<0, SECOND>")] + blobs_validity_duration: Duration, + global_params_path: String, + min_dispersal_peers: usize, + min_replication_peers: usize, + #[serde_as(as = "MinimalBoundedDuration<0, SECOND>")] + monitor_failure_time_window: Duration, + #[serde_as(as = "MinimalBoundedDuration<0, SECOND>")] + balancer_interval: Duration, + replication_settings: ReplicationConfig, + retry_shares_limit: usize, + retry_commitments_limit: usize, + tracing_settings: TracingSettings, +} + +impl From<&CfgSyncConfig> for SerializableCfgSyncConfig { + fn from(cfg: &CfgSyncConfig) -> Self { + Self { + port: cfg.port, + n_hosts: cfg.n_hosts, + timeout: cfg.timeout, + security_param: cfg.security_param, + active_slot_coeff: cfg.active_slot_coeff, + subnetwork_size: cfg.subnetwork_size, + dispersal_factor: cfg.dispersal_factor, + num_samples: cfg.num_samples, + num_subnets: cfg.num_subnets, + old_blobs_check_interval: cfg.old_blobs_check_interval, + blobs_validity_duration: cfg.blobs_validity_duration, + global_params_path: cfg.global_params_path.clone(), + min_dispersal_peers: cfg.min_dispersal_peers, + min_replication_peers: cfg.min_replication_peers, + monitor_failure_time_window: cfg.monitor_failure_time_window, + balancer_interval: cfg.balancer_interval, + replication_settings: cfg.replication_settings, + retry_shares_limit: cfg.retry_shares_limit, + retry_commitments_limit: cfg.retry_commitments_limit, + tracing_settings: cfg.tracing_settings.clone(), + } + } +} diff --git a/testing-framework/core/src/scenario/definition.rs b/testing-framework/core/src/scenario/definition.rs new file mode 100644 index 0000000..3aeaf29 --- /dev/null +++ b/testing-framework/core/src/scenario/definition.rs @@ -0,0 +1,255 @@ +use std::{num::NonZeroUsize, sync::Arc, time::Duration}; + +use super::{ + NodeControlCapability, expectation::Expectation, runtime::context::RunMetrics, + workload::Workload, +}; +use crate::topology::{ + GeneratedTopology, TopologyBuilder, TopologyConfig, configs::wallet::WalletConfig, +}; + +const DEFAULT_FUNDS_PER_WALLET: u64 = 100; + +/// Immutable scenario definition shared between the runner, workloads, and +/// expectations. +pub struct Scenario { + topology: GeneratedTopology, + workloads: Vec>, + expectations: Vec>, + duration: Duration, + capabilities: Caps, +} + +impl Scenario { + fn new( + topology: GeneratedTopology, + workloads: Vec>, + expectations: Vec>, + duration: Duration, + capabilities: Caps, + ) -> Self { + Self { + topology, + workloads, + expectations, + duration, + capabilities, + } + } + + #[must_use] + pub const fn topology(&self) -> &GeneratedTopology { + &self.topology + } + + #[must_use] + pub fn workloads(&self) -> &[Arc] { + &self.workloads + } + + #[must_use] + pub fn expectations(&self) -> &[Box] { + &self.expectations + } + + #[must_use] + pub fn expectations_mut(&mut self) -> &mut [Box] { + &mut self.expectations + } + + #[must_use] + pub const fn duration(&self) -> Duration { + self.duration + } + + #[must_use] + pub const fn capabilities(&self) -> &Caps { + &self.capabilities + } +} + +/// Builder used by callers to describe the desired scenario. +pub struct Builder { + topology: TopologyBuilder, + workloads: Vec>, + expectations: Vec>, + duration: Duration, + capabilities: Caps, +} + +pub type ScenarioBuilder = Builder<()>; + +impl Builder { + #[must_use] + pub fn new(topology: TopologyBuilder) -> Self { + Self { + topology, + workloads: Vec::new(), + expectations: Vec::new(), + duration: Duration::ZERO, + capabilities: Caps::default(), + } + } + + #[must_use] + pub fn with_node_counts(validators: usize, executors: usize) -> Self { + Self::new(TopologyBuilder::new(TopologyConfig::with_node_numbers( + validators, executors, + ))) + } +} + +impl Builder { + #[must_use] + pub fn with_capabilities(self, capabilities: NewCaps) -> Builder { + let Self { + topology, + workloads, + expectations, + duration, + .. + } = self; + + Builder { + topology, + workloads, + expectations, + duration, + capabilities, + } + } + + #[must_use] + pub const fn capabilities(&self) -> &Caps { + &self.capabilities + } + + #[must_use] + pub const fn capabilities_mut(&mut self) -> &mut Caps { + &mut self.capabilities + } + + #[must_use] + pub fn with_workload(mut self, workload: W) -> Self + where + W: Workload + 'static, + { + self.expectations.extend(workload.expectations()); + self.workloads.push(Arc::new(workload)); + self + } + + #[must_use] + pub fn with_expectation(mut self, expectation: E) -> Self + where + E: Expectation + 'static, + { + self.expectations.push(Box::new(expectation)); + self + } + + #[must_use] + pub const fn with_run_duration(mut self, duration: Duration) -> Self { + self.duration = duration; + self + } + + #[must_use] + pub fn map_topology(mut self, f: impl FnOnce(TopologyBuilder) -> TopologyBuilder) -> Self { + self.topology = f(self.topology); + self + } + + #[must_use] + pub fn with_wallet_config(mut self, wallet: WalletConfig) -> Self { + self.topology = self.topology.with_wallet_config(wallet); + self + } + + #[must_use] + pub fn wallets(self, users: usize) -> Self { + let user_count = NonZeroUsize::new(users).expect("wallet user count must be non-zero"); + let total_funds = DEFAULT_FUNDS_PER_WALLET + .checked_mul(users as u64) + .expect("wallet count exceeds capacity"); + let wallet = WalletConfig::uniform(total_funds, user_count); + self.with_wallet_config(wallet) + } + + #[must_use] + pub fn build(self) -> Scenario { + let Self { + topology, + mut workloads, + mut expectations, + duration, + capabilities, + .. + } = self; + + let generated = topology.build(); + let duration = enforce_min_duration(&generated, duration); + let run_metrics = RunMetrics::from_topology(&generated, duration); + initialize_components(&generated, &run_metrics, &mut workloads, &mut expectations); + + Scenario::new(generated, workloads, expectations, duration, capabilities) + } +} + +impl Builder<()> { + #[must_use] + pub fn enable_node_control(self) -> Builder { + self.with_capabilities(NodeControlCapability) + } +} + +fn initialize_components( + descriptors: &GeneratedTopology, + run_metrics: &RunMetrics, + workloads: &mut [Arc], + expectations: &mut [Box], +) { + initialize_workloads(descriptors, run_metrics, workloads); + initialize_expectations(descriptors, run_metrics, expectations); +} + +fn initialize_workloads( + descriptors: &GeneratedTopology, + run_metrics: &RunMetrics, + workloads: &mut [Arc], +) { + for workload in workloads { + let inner = + Arc::get_mut(workload).expect("workload unexpectedly cloned before initialization"); + if let Err(err) = inner.init(descriptors, run_metrics) { + panic!("workload '{}' failed to initialize: {err}", inner.name()); + } + } +} + +fn initialize_expectations( + descriptors: &GeneratedTopology, + run_metrics: &RunMetrics, + expectations: &mut [Box], +) { + for expectation in expectations { + if let Err(err) = expectation.init(descriptors, run_metrics) { + panic!( + "expectation '{}' failed to initialize: {err}", + expectation.name() + ); + } + } +} + +fn enforce_min_duration(descriptors: &GeneratedTopology, requested: Duration) -> Duration { + const MIN_BLOCKS: u32 = 2; + const FALLBACK_SECS: u64 = 10; + + let min_duration = descriptors.slot_duration().map_or_else( + || Duration::from_secs(FALLBACK_SECS), + |slot| slot * MIN_BLOCKS, + ); + + requested.max(min_duration) +} diff --git a/testing-framework/core/src/scenario/expectation.rs b/testing-framework/core/src/scenario/expectation.rs new file mode 100644 index 0000000..edf73dc --- /dev/null +++ b/testing-framework/core/src/scenario/expectation.rs @@ -0,0 +1,23 @@ +use async_trait::async_trait; + +use super::{DynError, RunContext, runtime::context::RunMetrics}; +use crate::topology::GeneratedTopology; + +#[async_trait] +pub trait Expectation: Send + Sync { + fn name(&self) -> &str; + + fn init( + &mut self, + _descriptors: &GeneratedTopology, + _run_metrics: &RunMetrics, + ) -> Result<(), DynError> { + Ok(()) + } + + async fn start_capture(&mut self, _ctx: &RunContext) -> Result<(), DynError> { + Ok(()) + } + + async fn evaluate(&mut self, ctx: &RunContext) -> Result<(), DynError>; +} diff --git a/testing-framework/core/src/scenario/expectations/mod.rs b/testing-framework/core/src/scenario/expectations/mod.rs new file mode 100644 index 0000000..38d5f92 --- /dev/null +++ b/testing-framework/core/src/scenario/expectations/mod.rs @@ -0,0 +1,204 @@ +use std::{fmt::Write as _, time::Duration}; + +use futures::FutureExt as _; + +use super::{ + BoxFuture, CONSENSUS_PROCESSED_BLOCKS, Expectation, ExpectationError, MetricsError, RunContext, +}; + +/// Enforces that every validator advances to the minimum block height implied +/// by the scenario duration, slot timing, and active slot coefficient (or a +/// caller-provided override). +/// +/// Polls each validator's HTTP consensus info to catch stalls even when +/// Prometheus is unavailable. +#[derive(Clone, Copy, Debug)] +pub struct ConsensusLiveness { + minimum_override: Option, + tolerance: f64, +} + +pub struct PrometheusBlockProduction { + minimum: u64, +} + +impl PrometheusBlockProduction { + #[must_use] + pub const fn new(minimum: u64) -> Self { + Self { minimum } + } + + #[must_use] + pub const fn minimum(&self) -> u64 { + self.minimum + } +} + +impl Expectation for PrometheusBlockProduction { + fn name(&self) -> &'static str { + "prometheus_block_production" + } + + fn evaluate<'a>(&'a self, ctx: &'a RunContext) -> BoxFuture<'a, Result<(), ExpectationError>> { + async move { + let total = ctx + .metrics() + .consensus_processed_blocks() + .map_err(|err| into_expectation_error(&err))?; + + if total >= self.minimum() as f64 { + tracing::info!( + query = CONSENSUS_PROCESSED_BLOCKS, + observed_total = total, + minimum = self.minimum(), + "block production expectation satisfied via prometheus" + ); + Ok(()) + } else { + Err(ExpectationError::new(format!( + "prometheus query `{}` sum {total} below block target {}", + CONSENSUS_PROCESSED_BLOCKS, + self.minimum() + ))) + } + } + .boxed() + } +} + +fn into_expectation_error(err: &MetricsError) -> ExpectationError { + ExpectationError::new(err.to_string()) +} + +impl ConsensusLiveness { + #[must_use] + pub const fn with_minimum(minimum_blocks: u64) -> Self { + Self { + minimum_override: Some(minimum_blocks), + tolerance: 1.0, + } + } + + #[must_use] + pub const fn with_tolerance(tolerance: f64) -> Self { + Self { + minimum_override: None, + tolerance, + } + } +} + +impl Default for ConsensusLiveness { + fn default() -> Self { + Self::with_tolerance(0.8) + } +} + +impl Expectation for ConsensusLiveness { + fn name(&self) -> &'static str { + "consensus_liveness" + } + + fn evaluate<'a>(&'a self, ctx: &'a RunContext) -> BoxFuture<'a, Result<(), ExpectationError>> { + async move { + if ctx.validators().is_empty() { + return Err(ExpectationError::new( + "consensus liveness requires at least one validator", + )); + } + + let target = consensus_target_blocks(ctx, self.minimum_override, self.tolerance); + let mut issues = Vec::new(); + let mut heights = Vec::with_capacity(ctx.validators().len()); + + for handle in ctx.validators() { + let index = handle.descriptor().index; + match handle.client().consensus_info().await { + Ok(info) => { + heights.push(info.height); + if info.height < target { + issues.push(format!( + "validator-{index} height {} below target {}", + info.height, target + )); + } + } + Err(err) => { + issues.push(format!("validator-{index} consensus_info failed: {err}")); + } + } + } + + if issues.is_empty() { + tracing::info!( + target, + heights = ?heights, + "consensus liveness expectation satisfied" + ); + Ok(()) + } else { + let mut message = String::new(); + let _ = writeln!( + &mut message, + "consensus liveness violated (target={target}):" + ); + for issue in issues { + let _ = writeln!(&mut message, "- {issue}"); + } + Err(ExpectationError::new(message.trim_end())) + } + } + .boxed() + } +} + +fn consensus_target_blocks(ctx: &RunContext, override_minimum: Option, tolerance: f64) -> u64 { + if let Some(minimum) = override_minimum { + return minimum; + } + + if tolerance <= 0.0 { + return 0; + } + + let slot_duration = ctx + .descriptors() + .validators() + .first() + .map_or(Duration::from_secs(2), |node| { + node.general.time_config.slot_duration + }); + + if slot_duration.is_zero() { + return 0; + } + + let active_slot_coeff = ctx + .descriptors() + .config() + .consensus_params + .active_slot_coeff; + + if active_slot_coeff <= 0.0 { + return 0; + } + + let run_duration = ctx.run_duration(); + if run_duration.is_zero() { + return 0; + } + + let slot_duration_secs = slot_duration.as_secs_f64(); + if slot_duration_secs == 0.0 { + return 0; + } + + let slot_count = run_duration.as_secs_f64() / slot_duration_secs; + if slot_count < 1.0 { + return 0; + } + + let expected_blocks = slot_count * active_slot_coeff; + let adjusted = (expected_blocks * tolerance).floor(); + adjusted.max(1.0) as u64 +} diff --git a/testing-framework/core/src/scenario/http_probe.rs b/testing-framework/core/src/scenario/http_probe.rs new file mode 100644 index 0000000..c525437 --- /dev/null +++ b/testing-framework/core/src/scenario/http_probe.rs @@ -0,0 +1,129 @@ +use std::{fmt, time::Duration}; + +use futures::future::try_join_all; +use nomos_http_api_common::paths; +use reqwest::Client as ReqwestClient; +use thiserror::Error; +use tokio::time::{sleep, timeout}; + +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum NodeRole { + Validator, + Executor, +} + +impl NodeRole { + #[must_use] + pub const fn label(self) -> &'static str { + match self { + Self::Validator => "validator", + Self::Executor => "executor", + } + } +} + +impl fmt::Display for NodeRole { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(self.label()) + } +} + +#[derive(Clone, Copy, Debug, Error)] +#[error("timeout waiting for {role} HTTP endpoint on port {port} after {timeout:?}")] +pub struct HttpReadinessError { + role: NodeRole, + port: u16, + timeout: Duration, +} + +impl HttpReadinessError { + #[must_use] + pub const fn new(role: NodeRole, port: u16, timeout: Duration) -> Self { + Self { + role, + port, + timeout, + } + } + + #[must_use] + pub const fn role(&self) -> NodeRole { + self.role + } + + #[must_use] + pub const fn port(&self) -> u16 { + self.port + } + + #[must_use] + pub const fn timeout(&self) -> Duration { + self.timeout + } +} + +pub async fn wait_for_http_ports( + ports: &[u16], + role: NodeRole, + timeout_duration: Duration, + poll_interval: Duration, +) -> Result<(), HttpReadinessError> { + wait_for_http_ports_with_host(ports, role, "127.0.0.1", timeout_duration, poll_interval).await +} + +pub async fn wait_for_http_ports_with_host( + ports: &[u16], + role: NodeRole, + host: &str, + timeout_duration: Duration, + poll_interval: Duration, +) -> Result<(), HttpReadinessError> { + if ports.is_empty() { + return Ok(()); + } + + let client = ReqwestClient::new(); + let probes = ports.iter().copied().map(|port| { + wait_for_single_port( + client.clone(), + port, + role, + host, + timeout_duration, + poll_interval, + ) + }); + + try_join_all(probes).await.map(|_| ()) +} + +async fn wait_for_single_port( + client: ReqwestClient, + port: u16, + role: NodeRole, + host: &str, + timeout_duration: Duration, + poll_interval: Duration, +) -> Result<(), HttpReadinessError> { + let url = format!("http://{host}:{port}{}", paths::CRYPTARCHIA_INFO); + let probe = async { + loop { + let is_ready = client + .get(&url) + .send() + .await + .map(|response| response.status().is_success()) + .unwrap_or(false); + + if is_ready { + return; + } + + sleep(poll_interval).await; + } + }; + + timeout(timeout_duration, probe) + .await + .map_err(|_| HttpReadinessError::new(role, port, timeout_duration)) +} diff --git a/testing-framework/core/src/scenario/mod.rs b/testing-framework/core/src/scenario/mod.rs new file mode 100644 index 0000000..64c31a1 --- /dev/null +++ b/testing-framework/core/src/scenario/mod.rs @@ -0,0 +1,25 @@ +//! Scenario orchestration primitives shared by integration tests and runners. + +mod capabilities; +pub mod cfgsync; +mod definition; +mod expectation; +pub mod http_probe; +mod runtime; +mod workload; + +pub type DynError = Box; + +pub use capabilities::{NodeControlCapability, NodeControlHandle, RequiresNodeControl}; +pub use definition::{Builder, Scenario, ScenarioBuilder}; +pub use expectation::Expectation; +pub use runtime::{ + BlockFeed, BlockFeedTask, BlockRecord, BlockStats, CleanupGuard, Deployer, NodeClients, + RunContext, RunHandle, RunMetrics, Runner, ScenarioError, + metrics::{ + CONSENSUS_PROCESSED_BLOCKS, CONSENSUS_TRANSACTIONS_TOTAL, Metrics, MetricsError, + PrometheusEndpoint, PrometheusInstantSample, + }, + spawn_block_feed, +}; +pub use workload::Workload; diff --git a/testing-framework/core/src/scenario/runtime/block_feed.rs b/testing-framework/core/src/scenario/runtime/block_feed.rs new file mode 100644 index 0000000..2ca3b1f --- /dev/null +++ b/testing-framework/core/src/scenario/runtime/block_feed.rs @@ -0,0 +1,178 @@ +use std::{ + collections::HashSet, + sync::{ + Arc, + atomic::{AtomicU64, Ordering}, + }, + time::Duration, +}; + +use anyhow::{Context as _, Result}; +use nomos_core::{block::Block, mantle::SignedMantleTx}; +use nomos_node::HeaderId; +use tokio::{sync::broadcast, task::JoinHandle, time::sleep}; +use tracing::{debug, error}; + +use super::context::CleanupGuard; +use crate::nodes::ApiClient; + +const POLL_INTERVAL: Duration = Duration::from_secs(1); + +#[derive(Clone)] +pub struct BlockFeed { + inner: Arc, +} + +struct BlockFeedInner { + sender: broadcast::Sender>, + stats: Arc, +} + +#[derive(Clone)] +pub struct BlockRecord { + pub header: HeaderId, + pub block: Arc>, +} + +pub struct BlockFeedTask { + handle: JoinHandle<()>, +} + +impl BlockFeed { + #[must_use] + pub fn subscribe(&self) -> broadcast::Receiver> { + self.inner.sender.subscribe() + } + + #[must_use] + pub fn stats(&self) -> Arc { + Arc::clone(&self.inner.stats) + } + + fn ingest(&self, header: HeaderId, block: Block) { + self.inner.stats.record_block(&block); + let record = Arc::new(BlockRecord { + header, + block: Arc::new(block), + }); + + let _ = self.inner.sender.send(record); + } +} + +impl BlockFeedTask { + #[must_use] + pub const fn new(handle: JoinHandle<()>) -> Self { + Self { handle } + } +} + +pub async fn spawn_block_feed(client: ApiClient) -> Result<(BlockFeed, BlockFeedTask)> { + let (sender, _) = broadcast::channel(1024); + let feed = BlockFeed { + inner: Arc::new(BlockFeedInner { + sender, + stats: Arc::new(BlockStats::default()), + }), + }; + + let mut scanner = BlockScanner::new(client, feed.clone()); + scanner.catch_up().await?; + + let handle = tokio::spawn(async move { scanner.run().await }); + + Ok((feed, BlockFeedTask::new(handle))) +} + +struct BlockScanner { + client: ApiClient, + feed: BlockFeed, + seen: HashSet, +} + +impl BlockScanner { + fn new(client: ApiClient, feed: BlockFeed) -> Self { + Self { + client, + feed, + seen: HashSet::new(), + } + } + + async fn run(&mut self) { + loop { + if let Err(err) = self.catch_up().await { + error!(%err, "block feed catch up failed"); + } + sleep(POLL_INTERVAL).await; + } + } + + async fn catch_up(&mut self) -> Result<()> { + let info = self.client.consensus_info().await?; + let tip = info.tip; + let mut remaining_height = info.height; + let mut stack = Vec::new(); + let mut cursor = tip; + + loop { + if self.seen.contains(&cursor) { + break; + } + + if remaining_height == 0 { + self.seen.insert(cursor); + break; + } + + let block = self + .client + .storage_block(&cursor) + .await? + .context("missing block while catching up")?; + + let parent = block.header().parent(); + stack.push((cursor, block)); + + if self.seen.contains(&parent) || parent == cursor { + break; + } + + cursor = parent; + remaining_height = remaining_height.saturating_sub(1); + } + + let mut processed = 0usize; + while let Some((header, block)) = stack.pop() { + self.feed.ingest(header, block); + self.seen.insert(header); + processed += 1; + } + + debug!(processed, "block feed processed catch up batch"); + Ok(()) + } +} + +impl CleanupGuard for BlockFeedTask { + fn cleanup(self: Box) { + self.handle.abort(); + } +} + +#[derive(Default)] +pub struct BlockStats { + total_transactions: AtomicU64, +} + +impl BlockStats { + fn record_block(&self, block: &Block) { + self.total_transactions + .fetch_add(block.transactions().len() as u64, Ordering::Relaxed); + } + + #[must_use] + pub fn total_transactions(&self) -> u64 { + self.total_transactions.load(Ordering::Relaxed) + } +} diff --git a/testing-framework/core/src/scenario/runtime/context.rs b/testing-framework/core/src/scenario/runtime/context.rs new file mode 100644 index 0000000..99a4a3c --- /dev/null +++ b/testing-framework/core/src/scenario/runtime/context.rs @@ -0,0 +1,215 @@ +use std::{sync::Arc, time::Duration}; + +use super::{block_feed::BlockFeed, metrics::Metrics, node_clients::ClusterClient}; +use crate::{ + nodes::ApiClient, + scenario::{NodeClients, NodeControlHandle}, + topology::{GeneratedTopology, Topology, configs::wallet::WalletAccount}, +}; + +pub struct RunContext { + descriptors: GeneratedTopology, + cluster: Option, + node_clients: NodeClients, + metrics: RunMetrics, + telemetry: Metrics, + block_feed: BlockFeed, + node_control: Option>, +} + +impl RunContext { + /// Builds a run context, clamping the requested duration so we always run + /// for at least a couple of slot lengths (or a fallback window if slots are + /// unknown). + #[must_use] + pub fn new( + descriptors: GeneratedTopology, + cluster: Option, + node_clients: NodeClients, + run_duration: Duration, + telemetry: Metrics, + block_feed: BlockFeed, + node_control: Option>, + ) -> Self { + let metrics = RunMetrics::new(&descriptors, run_duration); + + Self { + descriptors, + cluster, + node_clients, + metrics, + telemetry, + block_feed, + node_control, + } + } + + #[must_use] + pub const fn descriptors(&self) -> &GeneratedTopology { + &self.descriptors + } + + #[must_use] + pub const fn topology(&self) -> Option<&Topology> { + self.cluster.as_ref() + } + + #[must_use] + pub const fn node_clients(&self) -> &NodeClients { + &self.node_clients + } + + #[must_use] + pub fn random_node_client(&self) -> Option<&ApiClient> { + self.node_clients.any_client() + } + + #[must_use] + pub fn block_feed(&self) -> BlockFeed { + self.block_feed.clone() + } + + #[must_use] + pub fn wallet_accounts(&self) -> &[WalletAccount] { + self.descriptors.wallet_accounts() + } + + #[must_use] + pub const fn telemetry(&self) -> &Metrics { + &self.telemetry + } + + #[must_use] + pub const fn run_duration(&self) -> Duration { + self.metrics.run_duration() + } + + #[must_use] + pub const fn expected_blocks(&self) -> u64 { + self.metrics.expected_consensus_blocks() + } + + #[must_use] + pub const fn run_metrics(&self) -> RunMetrics { + self.metrics + } + + #[must_use] + pub fn node_control(&self) -> Option> { + self.node_control.clone() + } + + #[must_use] + pub const fn cluster_client(&self) -> ClusterClient<'_> { + self.node_clients.cluster_client() + } +} + +/// Handle returned by the runner to control the lifecycle of the run. +pub struct RunHandle { + run_context: Arc, + cleanup_guard: Option>, +} + +impl Drop for RunHandle { + fn drop(&mut self) { + if let Some(guard) = self.cleanup_guard.take() { + guard.cleanup(); + } + } +} + +impl RunHandle { + #[must_use] + pub fn new(context: RunContext, cleanup_guard: Option>) -> Self { + Self { + run_context: Arc::new(context), + cleanup_guard, + } + } + + #[must_use] + pub(crate) fn from_shared( + context: Arc, + cleanup_guard: Option>, + ) -> Self { + Self { + run_context: context, + cleanup_guard, + } + } + + #[must_use] + pub fn context(&self) -> &RunContext { + &self.run_context + } +} + +#[derive(Clone, Copy)] +pub struct RunMetrics { + run_duration: Duration, + expected_blocks: u64, + block_interval_hint: Option, +} + +impl RunMetrics { + #[must_use] + pub fn new(descriptors: &GeneratedTopology, run_duration: Duration) -> Self { + Self::from_topology(descriptors, run_duration) + } + + #[must_use] + pub fn from_topology(descriptors: &GeneratedTopology, run_duration: Duration) -> Self { + let slot_duration = descriptors.slot_duration(); + + let active_slot_coeff = descriptors.config().consensus_params.active_slot_coeff; + let expected_blocks = + calculate_expected_blocks(run_duration, slot_duration, active_slot_coeff); + + let block_interval_hint = + slot_duration.map(|duration| duration.mul_f64(active_slot_coeff.clamp(0.0, 1.0))); + + Self { + run_duration, + expected_blocks, + block_interval_hint, + } + } + + #[must_use] + pub const fn run_duration(&self) -> Duration { + self.run_duration + } + + #[must_use] + pub const fn expected_consensus_blocks(&self) -> u64 { + self.expected_blocks + } + + #[must_use] + pub const fn block_interval_hint(&self) -> Option { + self.block_interval_hint + } +} + +pub trait CleanupGuard: Send { + fn cleanup(self: Box); +} + +/// Computes the minimum duration we’ll allow for a scenario run so that the +/// scheduler can observe a few block opportunities even if the caller +/// requested an extremely short window. +fn calculate_expected_blocks( + run_duration: Duration, + slot_duration: Option, + active_slot_coeff: f64, +) -> u64 { + let Some(slot_duration) = slot_duration else { + return 0; + }; + let slot_secs = slot_duration.as_secs_f64(); + let run_secs = run_duration.as_secs_f64(); + let expected = run_secs / slot_secs * active_slot_coeff; + + expected.ceil().clamp(0.0, u64::MAX as f64) as u64 +} diff --git a/testing-framework/core/src/scenario/runtime/deployer.rs b/testing-framework/core/src/scenario/runtime/deployer.rs new file mode 100644 index 0000000..c3c62b1 --- /dev/null +++ b/testing-framework/core/src/scenario/runtime/deployer.rs @@ -0,0 +1,22 @@ +use async_trait::async_trait; + +use super::runner::Runner; +use crate::scenario::{DynError, Scenario}; + +/// Error returned when executing workloads or expectations. +#[derive(Debug, thiserror::Error)] +pub enum ScenarioError { + #[error("workload failure: {0}")] + Workload(#[source] DynError), + #[error("expectation capture failed: {0}")] + ExpectationCapture(#[source] DynError), + #[error("expectations failed:\n{0}")] + Expectations(#[source] DynError), +} + +#[async_trait] +pub trait Deployer: Send + Sync { + type Error; + + async fn deploy(&self, scenario: &Scenario) -> Result; +} diff --git a/testing-framework/core/src/scenario/runtime/metrics.rs b/testing-framework/core/src/scenario/runtime/metrics.rs new file mode 100644 index 0000000..2826022 --- /dev/null +++ b/testing-framework/core/src/scenario/runtime/metrics.rs @@ -0,0 +1,201 @@ +use std::{collections::HashMap, sync::Arc}; + +use prometheus_http_query::{Client as PrometheusClient, response::Data as PrometheusData}; +use reqwest::Url; +use tracing::warn; + +pub const CONSENSUS_PROCESSED_BLOCKS: &str = "consensus_processed_blocks"; +pub const CONSENSUS_TRANSACTIONS_TOTAL: &str = "consensus_transactions_total"; +const CONSENSUS_TRANSACTIONS_VALIDATOR_QUERY: &str = + r#"sum(consensus_transactions_total{job=~"validator-.*"})"#; + +#[derive(Clone, Default)] +pub struct Metrics { + prometheus: Option>, +} + +impl Metrics { + #[must_use] + pub const fn empty() -> Self { + Self { prometheus: None } + } + + pub fn from_prometheus(url: Url) -> Result { + let handle = Arc::new(PrometheusEndpoint::new(url)?); + Ok(Self::empty().with_prometheus_endpoint(handle)) + } + + pub fn from_prometheus_str(raw_url: &str) -> Result { + Url::parse(raw_url) + .map_err(|err| MetricsError::new(format!("invalid prometheus url: {err}"))) + .and_then(Self::from_prometheus) + } + + #[must_use] + pub fn with_prometheus_endpoint(mut self, handle: Arc) -> Self { + self.prometheus = Some(handle); + self + } + + #[must_use] + pub fn prometheus(&self) -> Option> { + self.prometheus.as_ref().map(Arc::clone) + } + + #[must_use] + pub const fn is_configured(&self) -> bool { + self.prometheus.is_some() + } + + pub fn instant_values(&self, query: &str) -> Result, MetricsError> { + let handle = self + .prometheus() + .ok_or_else(|| MetricsError::new("prometheus endpoint unavailable"))?; + handle.instant_values(query) + } + + pub fn counter_value(&self, query: &str) -> Result { + let handle = self + .prometheus() + .ok_or_else(|| MetricsError::new("prometheus endpoint unavailable"))?; + handle.counter_value(query) + } + + pub fn consensus_processed_blocks(&self) -> Result { + self.counter_value(CONSENSUS_PROCESSED_BLOCKS) + } + + pub fn consensus_transactions_total(&self) -> Result { + let handle = self + .prometheus() + .ok_or_else(|| MetricsError::new("prometheus endpoint unavailable"))?; + + match handle.instant_samples(CONSENSUS_TRANSACTIONS_VALIDATOR_QUERY) { + Ok(samples) if !samples.is_empty() => { + return Ok(samples.into_iter().map(|sample| sample.value).sum()); + } + Ok(_) => { + warn!( + query = CONSENSUS_TRANSACTIONS_VALIDATOR_QUERY, + "validator-specific consensus transaction metric returned no samples; falling back to aggregate counter" + ); + } + Err(err) => { + warn!( + query = CONSENSUS_TRANSACTIONS_VALIDATOR_QUERY, + error = %err, + "failed to query validator-specific consensus transaction metric; falling back to aggregate counter" + ); + } + } + + handle.counter_value(CONSENSUS_TRANSACTIONS_TOTAL) + } +} + +#[derive(Debug, thiserror::Error)] +pub enum MetricsError { + #[error("{0}")] + Store(String), +} + +impl MetricsError { + #[must_use] + pub fn new(message: impl Into) -> Self { + Self::Store(message.into()) + } +} + +pub struct PrometheusEndpoint { + base_url: Url, + client: PrometheusClient, +} + +#[derive(Clone, Debug)] +pub struct PrometheusInstantSample { + pub labels: HashMap, + pub timestamp: f64, + pub value: f64, +} + +impl PrometheusEndpoint { + pub fn new(base_url: Url) -> Result { + let client = PrometheusClient::try_from(base_url.as_str().to_owned()).map_err(|err| { + MetricsError::new(format!("failed to create prometheus client: {err}")) + })?; + + Ok(Self { base_url, client }) + } + + #[must_use] + pub const fn base_url(&self) -> &Url { + &self.base_url + } + + #[must_use] + pub fn port(&self) -> Option { + self.base_url.port_or_known_default() + } + + pub fn instant_samples( + &self, + query: &str, + ) -> Result, MetricsError> { + let query = query.to_owned(); + let client = self.client.clone(); + + let response = std::thread::spawn(move || -> Result<_, MetricsError> { + let runtime = tokio::runtime::Runtime::new() + .map_err(|err| MetricsError::new(format!("failed to create runtime: {err}")))?; + runtime + .block_on(async { client.query(&query).get().await }) + .map_err(|err| MetricsError::new(format!("prometheus query failed: {err}"))) + }) + .join() + .map_err(|_| MetricsError::new("prometheus query thread panicked"))??; + + let mut samples = Vec::new(); + match response.data() { + PrometheusData::Vector(vectors) => { + for vector in vectors { + samples.push(PrometheusInstantSample { + labels: vector.metric().clone(), + timestamp: vector.sample().timestamp(), + value: vector.sample().value(), + }); + } + } + PrometheusData::Matrix(ranges) => { + for range in ranges { + let labels = range.metric().clone(); + for sample in range.samples() { + samples.push(PrometheusInstantSample { + labels: labels.clone(), + timestamp: sample.timestamp(), + value: sample.value(), + }); + } + } + } + PrometheusData::Scalar(sample) => { + samples.push(PrometheusInstantSample { + labels: HashMap::new(), + timestamp: sample.timestamp(), + value: sample.value(), + }); + } + } + + Ok(samples) + } + + pub fn instant_values(&self, query: &str) -> Result, MetricsError> { + self.instant_samples(query) + .map(|samples| samples.into_iter().map(|sample| sample.value).collect()) + } + + pub fn counter_value(&self, query: &str) -> Result { + self.instant_values(query) + .map(|values| values.into_iter().sum()) + } +} diff --git a/testing-framework/core/src/scenario/runtime/mod.rs b/testing-framework/core/src/scenario/runtime/mod.rs new file mode 100644 index 0000000..82c060b --- /dev/null +++ b/testing-framework/core/src/scenario/runtime/mod.rs @@ -0,0 +1,12 @@ +mod block_feed; +pub mod context; +mod deployer; +pub mod metrics; +mod node_clients; +mod runner; + +pub use block_feed::{BlockFeed, BlockFeedTask, BlockRecord, BlockStats, spawn_block_feed}; +pub use context::{CleanupGuard, RunContext, RunHandle, RunMetrics}; +pub use deployer::{Deployer, ScenarioError}; +pub use node_clients::NodeClients; +pub use runner::Runner; diff --git a/testing-framework/core/src/scenario/runtime/node_clients.rs b/testing-framework/core/src/scenario/runtime/node_clients.rs new file mode 100644 index 0000000..4322aa3 --- /dev/null +++ b/testing-framework/core/src/scenario/runtime/node_clients.rs @@ -0,0 +1,135 @@ +use std::pin::Pin; + +use rand::{Rng as _, seq::SliceRandom as _, thread_rng}; + +use crate::{ + nodes::ApiClient, + scenario::DynError, + topology::{GeneratedTopology, Topology}, +}; + +#[derive(Clone, Default)] +pub struct NodeClients { + validators: Vec, + executors: Vec, +} + +impl NodeClients { + #[must_use] + pub const fn new(validators: Vec, executors: Vec) -> Self { + Self { + validators, + executors, + } + } + + #[must_use] + pub fn from_topology(_descriptors: &GeneratedTopology, topology: &Topology) -> Self { + let validator_clients = topology.validators().iter().map(|node| { + let testing = node.testing_url(); + ApiClient::from_urls(node.url(), testing) + }); + + let executor_clients = topology.executors().iter().map(|node| { + let testing = node.testing_url(); + ApiClient::from_urls(node.url(), testing) + }); + + Self::new(validator_clients.collect(), executor_clients.collect()) + } + + #[must_use] + pub fn validator_clients(&self) -> &[ApiClient] { + &self.validators + } + + #[must_use] + pub fn executor_clients(&self) -> &[ApiClient] { + &self.executors + } + + #[must_use] + pub fn random_validator(&self) -> Option<&ApiClient> { + if self.validators.is_empty() { + return None; + } + let mut rng = thread_rng(); + let idx = rng.gen_range(0..self.validators.len()); + self.validators.get(idx) + } + + #[must_use] + pub fn random_executor(&self) -> Option<&ApiClient> { + if self.executors.is_empty() { + return None; + } + let mut rng = thread_rng(); + let idx = rng.gen_range(0..self.executors.len()); + self.executors.get(idx) + } + + pub fn all_clients(&self) -> impl Iterator { + self.validators.iter().chain(self.executors.iter()) + } + + #[must_use] + pub fn any_client(&self) -> Option<&ApiClient> { + let validator_count = self.validators.len(); + let executor_count = self.executors.len(); + let total = validator_count + executor_count; + if total == 0 { + return None; + } + let mut rng = thread_rng(); + let choice = rng.gen_range(0..total); + if choice < validator_count { + self.validators.get(choice) + } else { + self.executors.get(choice - validator_count) + } + } + + #[must_use] + pub const fn cluster_client(&self) -> ClusterClient<'_> { + ClusterClient::new(self) + } +} + +pub struct ClusterClient<'a> { + node_clients: &'a NodeClients, +} + +impl<'a> ClusterClient<'a> { + #[must_use] + pub const fn new(node_clients: &'a NodeClients) -> Self { + Self { node_clients } + } + + pub async fn try_all_clients( + &self, + mut f: impl for<'b> FnMut( + &'b ApiClient, + ) -> Pin> + Send + 'b>> + + Send, + ) -> Result + where + E: Into, + { + let mut clients: Vec<&ApiClient> = self.node_clients.all_clients().collect(); + if clients.is_empty() { + return Err("cluster client has no api clients".into()); + } + + clients.shuffle(&mut thread_rng()); + + let mut last_err = None; + for client in clients { + match f(client).await { + Ok(value) => return Ok(value), + Err(err) => last_err = Some(err.into()), + } + } + + Err(last_err.unwrap_or_else(|| "cluster client exhausted all nodes".into())) + } +} diff --git a/testing-framework/core/src/scenario/runtime/runner.rs b/testing-framework/core/src/scenario/runtime/runner.rs new file mode 100644 index 0000000..4d6de22 --- /dev/null +++ b/testing-framework/core/src/scenario/runtime/runner.rs @@ -0,0 +1,251 @@ +use std::{any::Any, panic::AssertUnwindSafe, sync::Arc, time::Duration}; + +use futures::FutureExt as _; +use tokio::{ + task::JoinSet, + time::{sleep, timeout}, +}; + +use super::deployer::ScenarioError; +use crate::scenario::{ + DynError, Expectation, Scenario, + runtime::context::{CleanupGuard, RunContext, RunHandle}, +}; + +type WorkloadOutcome = Result<(), DynError>; + +/// Represents a fully prepared environment capable of executing a scenario. +pub struct Runner { + context: Arc, + cleanup_guard: Option>, +} + +impl Runner { + #[must_use] + pub fn new(context: RunContext, cleanup_guard: Option>) -> Self { + Self { + context: Arc::new(context), + cleanup_guard, + } + } + + #[must_use] + pub fn context(&self) -> Arc { + Arc::clone(&self.context) + } + + pub(crate) fn cleanup(&mut self) { + if let Some(guard) = self.cleanup_guard.take() { + guard.cleanup(); + } + } + + pub(crate) fn into_run_handle(mut self) -> RunHandle { + RunHandle::from_shared(Arc::clone(&self.context), self.cleanup_guard.take()) + } + + /// Executes the scenario by driving workloads first and then evaluating all + /// expectations. On any failure it cleans up resources and propagates the + /// error to the caller. + pub async fn run( + mut self, + scenario: &mut Scenario, + ) -> Result + where + Caps: Send + Sync, + { + let context = self.context(); + if let Err(error) = + Self::prepare_expectations(scenario.expectations_mut(), context.as_ref()).await + { + self.cleanup(); + return Err(error); + } + + if let Err(error) = Self::run_workloads(&context, scenario).await { + self.cleanup(); + return Err(error); + } + + Self::cooldown(&context).await; + + if let Err(error) = + Self::run_expectations(scenario.expectations_mut(), context.as_ref()).await + { + self.cleanup(); + return Err(error); + } + + Ok(self.into_run_handle()) + } + + async fn prepare_expectations( + expectations: &mut [Box], + context: &RunContext, + ) -> Result<(), ScenarioError> { + for expectation in expectations { + if let Err(source) = expectation.start_capture(context).await { + return Err(ScenarioError::ExpectationCapture(source)); + } + } + Ok(()) + } + + /// Spawns every workload, waits until the configured duration elapses (or a + /// workload fails), and then aborts the remaining tasks. + async fn run_workloads( + context: &Arc, + scenario: &Scenario, + ) -> Result<(), ScenarioError> + where + Caps: Send + Sync, + { + let mut workloads = Self::spawn_workloads(scenario, context); + let _ = Self::drive_until_timer(&mut workloads, scenario.duration()).await?; + Self::drain_workloads(&mut workloads).await + } + + /// Evaluates every registered expectation, aggregating failures so callers + /// can see all missing conditions in a single report. + async fn run_expectations( + expectations: &mut [Box], + context: &RunContext, + ) -> Result<(), ScenarioError> { + let mut failures: Vec<(String, DynError)> = Vec::new(); + for expectation in expectations { + if let Err(source) = expectation.evaluate(context).await { + failures.push((expectation.name().to_owned(), source)); + } + } + + if failures.is_empty() { + return Ok(()); + } + + let summary = failures + .into_iter() + .map(|(name, source)| format!("{name}: {source}")) + .collect::>() + .join("\n"); + + Err(ScenarioError::Expectations(summary.into())) + } + + async fn cooldown(context: &Arc) { + let metrics = context.run_metrics(); + let needs_stabilization = context.node_control().is_some(); + + if let Some(interval) = metrics.block_interval_hint() { + if interval.is_zero() { + return; + } + let mut wait = interval.mul_f64(5.0); + if needs_stabilization { + let minimum = Duration::from_secs(30); + if wait < minimum { + wait = minimum; + } + } + if !wait.is_zero() { + sleep(wait).await; + } + } else if needs_stabilization { + sleep(Duration::from_secs(30)).await; + } + } + + /// Spawns each workload inside its own task and returns the join set for + /// cooperative management. + fn spawn_workloads( + scenario: &Scenario, + context: &Arc, + ) -> JoinSet + where + Caps: Send + Sync, + { + let mut workloads = JoinSet::new(); + for workload in scenario.workloads() { + let workload = Arc::clone(workload); + let ctx = Arc::clone(context); + + workloads.spawn(async move { + let outcome = AssertUnwindSafe(async { workload.start(ctx.as_ref()).await }) + .catch_unwind() + .await; + + outcome.unwrap_or_else(|panic| { + Err(format!("workload panicked: {}", panic_message(panic)).into()) + }) + }); + } + + workloads + } + + /// Polls workload tasks until the timeout fires or one reports an error. + async fn drive_until_timer( + workloads: &mut JoinSet, + duration: Duration, + ) -> Result { + let run_future = async { + while let Some(result) = workloads.join_next().await { + Self::map_join_result(result)?; + } + Ok(()) + }; + + timeout(duration, run_future) + .await + .map_or(Ok(true), |result| { + result?; + Ok(false) + }) + } + + /// Aborts and drains any remaining workload tasks so we do not leak work + /// across scenario runs. + async fn drain_workloads( + workloads: &mut JoinSet, + ) -> Result<(), ScenarioError> { + workloads.abort_all(); + + while let Some(result) = workloads.join_next().await { + Self::map_join_result(result)?; + } + + Ok(()) + } + + /// Converts the outcome of a workload task into the canonical scenario + /// error, tolerating cancellation when the runner aborts unfinished tasks. + fn map_join_result( + result: Result, + ) -> Result<(), ScenarioError> { + match result { + Ok(outcome) => outcome.map_err(ScenarioError::Workload), + Err(join_err) if join_err.is_cancelled() => Ok(()), + Err(join_err) => Err(ScenarioError::Workload( + format!("workload task failed: {join_err}").into(), + )), + } + } +} + +/// Attempts to turn a panic payload into a readable string for diagnostics. +fn panic_message(panic: Box) -> String { + panic.downcast::().map_or_else( + |panic| { + panic.downcast::<&'static str>().map_or_else( + |_| "unknown panic".to_owned(), + |message| (*message).to_owned(), + ) + }, + |message| *message, + ) +} + +impl Drop for Runner { + fn drop(&mut self) { + self.cleanup(); + } +} diff --git a/testing-framework/core/src/scenario/workload.rs b/testing-framework/core/src/scenario/workload.rs new file mode 100644 index 0000000..4547303 --- /dev/null +++ b/testing-framework/core/src/scenario/workload.rs @@ -0,0 +1,23 @@ +use async_trait::async_trait; + +use super::{DynError, Expectation, RunContext, runtime::context::RunMetrics}; +use crate::topology::GeneratedTopology; + +#[async_trait] +pub trait Workload: Send + Sync { + fn name(&self) -> &str; + + fn expectations(&self) -> Vec> { + Vec::new() + } + + fn init( + &mut self, + _descriptors: &GeneratedTopology, + _run_metrics: &RunMetrics, + ) -> Result<(), DynError> { + Ok(()) + } + + async fn start(&self, ctx: &RunContext) -> Result<(), DynError>; +} diff --git a/testing-framework/core/src/topology/mod.rs b/testing-framework/core/src/topology/mod.rs new file mode 100644 index 0000000..e2a300a --- /dev/null +++ b/testing-framework/core/src/topology/mod.rs @@ -0,0 +1,1408 @@ +pub mod configs { + pub use integration_configs::topology::configs::*; +} + +use std::{ + collections::{HashMap, HashSet}, + iter, + time::Duration, +}; + +use configs::{ + GeneralConfig, + consensus::{ProviderInfo, create_genesis_tx_with_declarations}, + da::{DaParams, create_da_configs}, + network::{Libp2pNetworkLayout, NetworkParams, create_network_configs}, + tracing::create_tracing_configs, + wallet::{WalletAccount, WalletConfig}, +}; +use futures::future::join_all; +use groth16::fr_to_bytes; +use key_management_system::{ + backend::preload::PreloadKMSBackendSettings, + keys::{Ed25519Key, Key, ZkKey}, +}; +use nomos_core::{ + mantle::GenesisTx as _, + sdp::{Locator, ServiceType, SessionNumber}, +}; +use nomos_da_network_core::swarm::{BalancerStats, DAConnectionPolicySettings}; +use nomos_da_network_service::MembershipResponse; +use nomos_http_api_common::paths; +use nomos_network::backends::libp2p::Libp2pInfo; +use nomos_utils::net::get_available_udp_port; +use rand::{Rng as _, thread_rng}; +use reqwest::{Client, Url}; +use thiserror::Error; +use tokio::time::{sleep, timeout}; +use tracing::warn; + +use crate::{ + adjust_timeout, + nodes::{ + executor::{Executor, create_executor_config}, + validator::{Validator, create_validator_config}, + }, + topology::configs::{ + api::create_api_configs, + blend::{GeneralBlendConfig, create_blend_configs}, + bootstrap::{SHORT_PROLONGED_BOOTSTRAP_PERIOD, create_bootstrap_configs}, + consensus::{ConsensusParams, create_consensus_configs}, + da::GeneralDaConfig, + time::default_time_config, + }, +}; + +#[derive(Clone)] +pub struct TopologyConfig { + pub n_validators: usize, + pub n_executors: usize, + pub consensus_params: ConsensusParams, + pub da_params: DaParams, + pub network_params: NetworkParams, + pub wallet_config: WalletConfig, +} + +impl TopologyConfig { + #[must_use] + pub fn two_validators() -> Self { + Self { + n_validators: 2, + n_executors: 0, + consensus_params: ConsensusParams::default_for_participants(2), + da_params: DaParams::default(), + network_params: NetworkParams::default(), + wallet_config: WalletConfig::default(), + } + } + + #[must_use] + pub fn validator_and_executor() -> Self { + Self { + n_validators: 1, + n_executors: 1, + consensus_params: ConsensusParams::default_for_participants(2), + da_params: DaParams { + dispersal_factor: 2, + subnetwork_size: 2, + num_subnets: 2, + policy_settings: DAConnectionPolicySettings { + min_dispersal_peers: 1, + min_replication_peers: 1, + max_dispersal_failures: 0, + max_sampling_failures: 0, + max_replication_failures: 0, + malicious_threshold: 0, + }, + balancer_interval: Duration::from_secs(1), + ..Default::default() + }, + network_params: NetworkParams::default(), + wallet_config: WalletConfig::default(), + } + } + + #[must_use] + pub fn with_node_numbers(validators: usize, executors: usize) -> Self { + let participants = validators + executors; + assert!(participants > 0, "topology must include at least one node"); + + let mut da_params = DaParams::default(); + let da_nodes = participants; + if da_nodes <= 1 { + da_params.subnetwork_size = 1; + da_params.num_subnets = 1; + da_params.dispersal_factor = 1; + da_params.policy_settings.min_dispersal_peers = 0; + da_params.policy_settings.min_replication_peers = 0; + } else { + let dispersal = da_nodes.min(da_params.dispersal_factor.max(2)); + da_params.dispersal_factor = dispersal; + da_params.subnetwork_size = da_params.subnetwork_size.max(dispersal); + da_params.num_subnets = da_params.subnetwork_size as u16; + let min_peers = dispersal.saturating_sub(1).max(1); + da_params.policy_settings.min_dispersal_peers = min_peers; + da_params.policy_settings.min_replication_peers = min_peers; + da_params.balancer_interval = Duration::from_secs(1); + } + + Self { + n_validators: validators, + n_executors: executors, + consensus_params: ConsensusParams::default_for_participants(participants), + da_params, + network_params: NetworkParams::default(), + wallet_config: WalletConfig::default(), + } + } + + #[must_use] + pub fn validators_and_executor( + num_validators: usize, + num_subnets: usize, + dispersal_factor: usize, + ) -> Self { + Self { + n_validators: num_validators, + n_executors: 1, + consensus_params: ConsensusParams::default_for_participants(num_validators + 1), + da_params: DaParams { + dispersal_factor, + subnetwork_size: num_subnets, + num_subnets: num_subnets as u16, + policy_settings: DAConnectionPolicySettings { + min_dispersal_peers: num_subnets, + min_replication_peers: dispersal_factor - 1, + max_dispersal_failures: 0, + max_sampling_failures: 0, + max_replication_failures: 0, + malicious_threshold: 0, + }, + balancer_interval: Duration::from_secs(5), + ..Default::default() + }, + network_params: NetworkParams::default(), + wallet_config: WalletConfig::default(), + } + } + + #[must_use] + pub const fn wallet(&self) -> &WalletConfig { + &self.wallet_config + } +} + +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum NodeRole { + Validator, + Executor, +} + +#[derive(Clone)] +pub struct GeneratedNodeConfig { + pub role: NodeRole, + pub index: usize, + pub id: [u8; 32], + pub general: GeneralConfig, + pub da_port: u16, + pub blend_port: u16, +} + +impl GeneratedNodeConfig { + #[must_use] + pub const fn role(&self) -> NodeRole { + self.role + } + + #[must_use] + pub const fn index(&self) -> usize { + self.index + } + + #[must_use] + pub const fn network_port(&self) -> u16 { + self.general.network_config.backend.inner.port + } + + #[must_use] + pub const fn api_port(&self) -> u16 { + self.general.api_config.address.port() + } + + #[must_use] + pub const fn testing_http_port(&self) -> u16 { + self.general.api_config.testing_http_address.port() + } +} + +#[derive(Clone)] +pub struct GeneratedTopology { + config: TopologyConfig, + validators: Vec, + executors: Vec, +} + +impl GeneratedTopology { + #[must_use] + pub const fn config(&self) -> &TopologyConfig { + &self.config + } + + #[must_use] + pub fn validators(&self) -> &[GeneratedNodeConfig] { + &self.validators + } + + #[must_use] + pub fn executors(&self) -> &[GeneratedNodeConfig] { + &self.executors + } + + pub fn nodes(&self) -> impl Iterator { + self.validators.iter().chain(self.executors.iter()) + } + + #[must_use] + pub fn slot_duration(&self) -> Option { + self.validators + .first() + .map(|node| node.general.time_config.slot_duration) + } + + #[must_use] + pub fn wallet_accounts(&self) -> &[WalletAccount] { + &self.config.wallet_config.accounts + } + + pub async fn spawn_local(&self) -> Topology { + let configs = self + .nodes() + .map(|node| node.general.clone()) + .collect::>(); + + let (validators, executors) = Topology::spawn_validators_executors( + configs, + self.config.n_validators, + self.config.n_executors, + ) + .await; + + Topology { + validators, + executors, + } + } + + pub async fn wait_remote_readiness( + &self, + validator_endpoints: &[Url], + executor_endpoints: &[Url], + validator_membership_endpoints: Option<&[Url]>, + executor_membership_endpoints: Option<&[Url]>, + ) -> Result<(), ReadinessError> { + let total_nodes = self.validators.len() + self.executors.len(); + if total_nodes == 0 { + return Ok(()); + } + + assert_eq!( + self.validators.len(), + validator_endpoints.len(), + "validator endpoints must match topology" + ); + assert_eq!( + self.executors.len(), + executor_endpoints.len(), + "executor endpoints must match topology" + ); + + let mut endpoints = Vec::with_capacity(total_nodes); + endpoints.extend_from_slice(validator_endpoints); + endpoints.extend_from_slice(executor_endpoints); + + let labels = self.labels(); + let client = Client::new(); + let make_testing_base_url = |port: u16| -> Url { + Url::parse(&format!("http://127.0.0.1:{port}/")) + .expect("failed to construct local testing base url") + }; + + if endpoints.len() > 1 { + let listen_ports = self.listen_ports(); + let initial_peer_ports = self.initial_peer_ports(); + let expected_peer_counts = + find_expected_peer_counts(&listen_ports, &initial_peer_ports); + let network_check = HttpNetworkReadiness { + client: &client, + endpoints: &endpoints, + expected_peer_counts: &expected_peer_counts, + labels: &labels, + }; + + network_check.wait().await?; + } + + let mut membership_endpoints = Vec::with_capacity(total_nodes); + if let Some(urls) = validator_membership_endpoints { + assert_eq!( + self.validators.len(), + urls.len(), + "validator membership endpoints must match topology" + ); + membership_endpoints.extend_from_slice(urls); + } else { + membership_endpoints.extend( + self.validators + .iter() + .map(|node| make_testing_base_url(node.testing_http_port())), + ); + } + + if let Some(urls) = executor_membership_endpoints { + assert_eq!( + self.executors.len(), + urls.len(), + "executor membership endpoints must match topology" + ); + membership_endpoints.extend_from_slice(urls); + } else { + membership_endpoints.extend( + self.executors + .iter() + .map(|node| make_testing_base_url(node.testing_http_port())), + ); + } + + let membership_check = HttpMembershipReadiness { + client: &client, + endpoints: &membership_endpoints, + session: SessionNumber::from(0u64), + labels: &labels, + expect_non_empty: true, + }; + + membership_check.wait().await + } + + fn listen_ports(&self) -> Vec { + self.validators + .iter() + .map(|node| node.general.network_config.backend.inner.port) + .chain( + self.executors + .iter() + .map(|node| node.general.network_config.backend.inner.port), + ) + .collect() + } + + fn initial_peer_ports(&self) -> Vec> { + self.validators + .iter() + .map(|node| { + node.general + .network_config + .backend + .initial_peers + .iter() + .filter_map(multiaddr_port) + .collect::>() + }) + .chain(self.executors.iter().map(|node| { + node.general + .network_config + .backend + .initial_peers + .iter() + .filter_map(multiaddr_port) + .collect::>() + })) + .collect() + } + + fn labels(&self) -> Vec { + self.validators + .iter() + .enumerate() + .map(|(idx, node)| { + format!( + "validator#{idx}@{}", + node.general.network_config.backend.inner.port + ) + }) + .chain(self.executors.iter().enumerate().map(|(idx, node)| { + format!( + "executor#{idx}@{}", + node.general.network_config.backend.inner.port + ) + })) + .collect() + } +} + +#[derive(Clone)] +pub struct TopologyBuilder { + config: TopologyConfig, + ids: Option>, + da_ports: Option>, + blend_ports: Option>, +} + +impl TopologyBuilder { + #[must_use] + pub const fn new(config: TopologyConfig) -> Self { + Self { + config, + ids: None, + da_ports: None, + blend_ports: None, + } + } + + #[must_use] + pub fn with_ids(mut self, ids: Vec<[u8; 32]>) -> Self { + self.ids = Some(ids); + self + } + + #[must_use] + pub fn with_da_ports(mut self, ports: Vec) -> Self { + self.da_ports = Some(ports); + self + } + + #[must_use] + pub fn with_blend_ports(mut self, ports: Vec) -> Self { + self.blend_ports = Some(ports); + self + } + + #[must_use] + pub const fn with_validator_count(mut self, validators: usize) -> Self { + self.config.n_validators = validators; + self + } + + #[must_use] + pub const fn with_executor_count(mut self, executors: usize) -> Self { + self.config.n_executors = executors; + self + } + + #[must_use] + pub const fn with_node_counts(mut self, validators: usize, executors: usize) -> Self { + self.config.n_validators = validators; + self.config.n_executors = executors; + self + } + + #[must_use] + pub const fn with_network_layout(mut self, layout: Libp2pNetworkLayout) -> Self { + self.config.network_params.libp2p_network_layout = layout; + self + } + + #[must_use] + pub fn with_wallet_config(mut self, wallet: WalletConfig) -> Self { + self.config.wallet_config = wallet; + self + } + + #[must_use] + pub fn build(self) -> GeneratedTopology { + let Self { + config, + ids, + da_ports, + blend_ports, + } = self; + + let n_participants = config.n_validators + config.n_executors; + assert!(n_participants > 0, "topology must have at least one node"); + + let ids = resolve_ids(ids, n_participants); + let da_ports = resolve_ports(da_ports, n_participants, "DA"); + let blend_ports = resolve_ports(blend_ports, n_participants, "Blend"); + + let mut consensus_configs = + create_consensus_configs(&ids, &config.consensus_params, &config.wallet_config); + let bootstrapping_config = create_bootstrap_configs(&ids, SHORT_PROLONGED_BOOTSTRAP_PERIOD); + let da_configs = create_da_configs(&ids, &config.da_params, &da_ports); + let network_configs = create_network_configs(&ids, &config.network_params); + let blend_configs = create_blend_configs(&ids, &blend_ports); + let api_configs = create_api_configs(&ids); + let tracing_configs = create_tracing_configs(&ids); + let time_config = default_time_config(); + + let mut providers: Vec<_> = da_configs + .iter() + .enumerate() + .map(|(i, da_conf)| ProviderInfo { + service_type: ServiceType::DataAvailability, + provider_sk: da_conf.signer.clone(), + zk_sk: da_conf.secret_zk_key.clone(), + locator: Locator(da_conf.listening_address.clone()), + note: consensus_configs[0].da_notes[i].clone(), + }) + .collect(); + providers.extend( + blend_configs + .iter() + .enumerate() + .map(|(i, blend_conf)| ProviderInfo { + service_type: ServiceType::BlendNetwork, + provider_sk: blend_conf.signer.clone(), + zk_sk: blend_conf.secret_zk_key.clone(), + locator: Locator(blend_conf.backend_core.listening_address.clone()), + note: consensus_configs[0].blend_notes[i].clone(), + }), + ); + + let ledger_tx = consensus_configs[0] + .genesis_tx + .mantle_tx() + .ledger_tx + .clone(); + let genesis_tx = create_genesis_tx_with_declarations(ledger_tx, providers); + for c in &mut consensus_configs { + c.genesis_tx = genesis_tx.clone(); + } + + let kms_configs = + create_kms_configs(&blend_configs, &da_configs, &config.wallet_config.accounts); + + let mut validators = Vec::with_capacity(config.n_validators); + let mut executors = Vec::with_capacity(config.n_executors); + + for i in 0..n_participants { + let general = GeneralConfig { + consensus_config: consensus_configs[i].clone(), + bootstrapping_config: bootstrapping_config[i].clone(), + da_config: da_configs[i].clone(), + network_config: network_configs[i].clone(), + blend_config: blend_configs[i].clone(), + api_config: api_configs[i].clone(), + tracing_config: tracing_configs[i].clone(), + time_config: time_config.clone(), + kms_config: kms_configs[i].clone(), + }; + + let role = if i < config.n_validators { + NodeRole::Validator + } else { + NodeRole::Executor + }; + let index = match role { + NodeRole::Validator => i, + NodeRole::Executor => i - config.n_validators, + }; + + let descriptor = GeneratedNodeConfig { + role, + index, + id: ids[i], + general, + da_port: da_ports[i], + blend_port: blend_ports[i], + }; + + match role { + NodeRole::Validator => validators.push(descriptor), + NodeRole::Executor => executors.push(descriptor), + } + } + + GeneratedTopology { + config, + validators, + executors, + } + } +} + +pub struct Topology { + validators: Vec, + executors: Vec, +} + +impl Topology { + pub async fn spawn(config: TopologyConfig) -> Self { + let n_participants = config.n_validators + config.n_executors; + + // we use the same random bytes for: + // * da id + // * coin sk + // * coin nonce + // * libp2p node key + let mut ids = vec![[0; 32]; n_participants]; + let mut da_ports = vec![]; + let mut blend_ports = vec![]; + for id in &mut ids { + thread_rng().fill(id); + da_ports.push(get_available_udp_port().unwrap()); + blend_ports.push(get_available_udp_port().unwrap()); + } + + let mut consensus_configs = + create_consensus_configs(&ids, &config.consensus_params, &config.wallet_config); + let bootstrapping_config = create_bootstrap_configs(&ids, SHORT_PROLONGED_BOOTSTRAP_PERIOD); + let da_configs = create_da_configs(&ids, &config.da_params, &da_ports); + let network_configs = create_network_configs(&ids, &config.network_params); + let blend_configs = create_blend_configs(&ids, &blend_ports); + let api_configs = create_api_configs(&ids); + let tracing_configs = create_tracing_configs(&ids); + let time_config = default_time_config(); + + // Setup genesis TX with Blend and DA service declarationse + let mut providers: Vec<_> = da_configs + .iter() + .enumerate() + .map(|(i, da_conf)| ProviderInfo { + service_type: ServiceType::DataAvailability, + provider_sk: da_conf.signer.clone(), + zk_sk: da_conf.secret_zk_key.clone(), + locator: Locator(da_conf.listening_address.clone()), + note: consensus_configs[0].da_notes[i].clone(), + }) + .collect(); + providers.extend( + blend_configs + .iter() + .enumerate() + .map(|(i, blend_conf)| ProviderInfo { + service_type: ServiceType::BlendNetwork, + provider_sk: blend_conf.signer.clone(), + zk_sk: blend_conf.secret_zk_key.clone(), + locator: Locator(blend_conf.backend_core.listening_address.clone()), + note: consensus_configs[0].blend_notes[i].clone(), + }), + ); + + // Update genesis TX to contain Blend and DA providers. + let ledger_tx = consensus_configs[0] + .genesis_tx + .mantle_tx() + .ledger_tx + .clone(); + let genesis_tx = create_genesis_tx_with_declarations(ledger_tx, providers); + for c in &mut consensus_configs { + c.genesis_tx = genesis_tx.clone(); + } + + // Set Blend and DA keys in KMS of each node config. + let kms_configs = + create_kms_configs(&blend_configs, &da_configs, &config.wallet_config.accounts); + + let mut node_configs = vec![]; + + for i in 0..n_participants { + node_configs.push(GeneralConfig { + consensus_config: consensus_configs[i].clone(), + bootstrapping_config: bootstrapping_config[i].clone(), + da_config: da_configs[i].clone(), + network_config: network_configs[i].clone(), + blend_config: blend_configs[i].clone(), + api_config: api_configs[i].clone(), + tracing_config: tracing_configs[i].clone(), + time_config: time_config.clone(), + kms_config: kms_configs[i].clone(), + }); + } + + let (validators, executors) = + Self::spawn_validators_executors(node_configs, config.n_validators, config.n_executors) + .await; + + Self { + validators, + executors, + } + } + + pub async fn spawn_with_empty_membership( + config: TopologyConfig, + ids: &[[u8; 32]], + da_ports: &[u16], + blend_ports: &[u16], + ) -> Self { + let n_participants = config.n_validators + config.n_executors; + + let consensus_configs = + create_consensus_configs(ids, &config.consensus_params, &config.wallet_config); + let bootstrapping_config = create_bootstrap_configs(ids, SHORT_PROLONGED_BOOTSTRAP_PERIOD); + let da_configs = create_da_configs(ids, &config.da_params, da_ports); + let network_configs = create_network_configs(ids, &config.network_params); + let blend_configs = create_blend_configs(ids, blend_ports); + let api_configs = create_api_configs(ids); + // Create membership configs without DA nodes. + let tracing_configs = create_tracing_configs(ids); + let time_config = default_time_config(); + + let kms_config = PreloadKMSBackendSettings { + keys: HashMap::new(), + }; + + let mut node_configs = vec![]; + + for i in 0..n_participants { + node_configs.push(GeneralConfig { + consensus_config: consensus_configs[i].clone(), + bootstrapping_config: bootstrapping_config[i].clone(), + da_config: da_configs[i].clone(), + network_config: network_configs[i].clone(), + blend_config: blend_configs[i].clone(), + api_config: api_configs[i].clone(), + tracing_config: tracing_configs[i].clone(), + time_config: time_config.clone(), + kms_config: kms_config.clone(), + }); + } + let (validators, executors) = + Self::spawn_validators_executors(node_configs, config.n_validators, config.n_executors) + .await; + + Self { + validators, + executors, + } + } + + async fn spawn_validators_executors( + config: Vec, + n_validators: usize, + n_executors: usize, + ) -> (Vec, Vec) { + let mut validators = Vec::new(); + for i in 0..n_validators { + let config = create_validator_config(config[i].clone()); + validators.push(Validator::spawn(config).await.unwrap()); + } + + let mut executors = Vec::new(); + for i in n_validators..(n_validators + n_executors) { + let config = create_executor_config(config[i].clone()); + executors.push(Executor::spawn(config).await); + } + + (validators, executors) + } + + #[must_use] + pub fn validators(&self) -> &[Validator] { + &self.validators + } + + #[must_use] + pub fn executors(&self) -> &[Executor] { + &self.executors + } + + pub async fn wait_network_ready(&self) -> Result<(), ReadinessError> { + let listen_ports = self.node_listen_ports(); + if listen_ports.len() <= 1 { + return Ok(()); + } + + let initial_peer_ports = self.node_initial_peer_ports(); + let expected_peer_counts = find_expected_peer_counts(&listen_ports, &initial_peer_ports); + let labels = self.node_labels(); + + let check = NetworkReadiness { + topology: self, + expected_peer_counts: &expected_peer_counts, + labels: &labels, + }; + + check.wait().await?; + Ok(()) + } + + pub async fn wait_da_balancer_ready(&self) -> Result<(), ReadinessError> { + if self.validators.is_empty() && self.executors.is_empty() { + return Ok(()); + } + + let labels = self.node_labels(); + let check = DaBalancerReadiness { + topology: self, + labels: &labels, + }; + + check.wait().await?; + Ok(()) + } + + pub async fn wait_membership_ready(&self) -> Result<(), ReadinessError> { + self.wait_membership_ready_for_session(SessionNumber::from(0u64)) + .await + } + + pub async fn wait_membership_ready_for_session( + &self, + session: SessionNumber, + ) -> Result<(), ReadinessError> { + self.wait_membership_assignations(session, true).await + } + + pub async fn wait_membership_empty_for_session( + &self, + session: SessionNumber, + ) -> Result<(), ReadinessError> { + self.wait_membership_assignations(session, false).await + } + + async fn wait_membership_assignations( + &self, + session: SessionNumber, + expect_non_empty: bool, + ) -> Result<(), ReadinessError> { + let total_nodes = self.validators.len() + self.executors.len(); + + if total_nodes == 0 { + return Ok(()); + } + + let labels = self.node_labels(); + let check = MembershipReadiness { + topology: self, + session, + labels: &labels, + expect_non_empty, + }; + + check.wait().await?; + Ok(()) + } + + fn node_listen_ports(&self) -> Vec { + self.validators + .iter() + .map(|node| node.config().network.backend.inner.port) + .chain( + self.executors + .iter() + .map(|node| node.config().network.backend.inner.port), + ) + .collect() + } + + fn node_initial_peer_ports(&self) -> Vec> { + self.validators + .iter() + .map(|node| { + node.config() + .network + .backend + .initial_peers + .iter() + .filter_map(multiaddr_port) + .collect::>() + }) + .chain(self.executors.iter().map(|node| { + node.config() + .network + .backend + .initial_peers + .iter() + .filter_map(multiaddr_port) + .collect::>() + })) + .collect() + } + + fn node_labels(&self) -> Vec { + self.validators + .iter() + .enumerate() + .map(|(idx, node)| { + format!( + "validator#{idx}@{}", + node.config().network.backend.inner.port + ) + }) + .chain(self.executors.iter().enumerate().map(|(idx, node)| { + format!( + "executor#{idx}@{}", + node.config().network.backend.inner.port + ) + })) + .collect() + } +} + +#[derive(Debug, Error)] +pub enum ReadinessError { + #[error("{message}")] + Timeout { message: String }, +} + +#[async_trait::async_trait] +trait ReadinessCheck<'a> { + type Data: Send; + + async fn collect(&'a self) -> Self::Data; + + fn is_ready(&self, data: &Self::Data) -> bool; + + fn timeout_message(&self, data: Self::Data) -> String; + + fn poll_interval(&self) -> Duration { + Duration::from_millis(200) + } + + async fn wait(&'a self) -> Result<(), ReadinessError> { + let timeout_duration = adjust_timeout(Duration::from_secs(60)); + let poll_interval = self.poll_interval(); + let mut data = self.collect().await; + + let wait_result = timeout(timeout_duration, async { + loop { + if self.is_ready(&data) { + return; + } + + sleep(poll_interval).await; + + data = self.collect().await; + } + }) + .await; + + if wait_result.is_err() { + let message = self.timeout_message(data); + return Err(ReadinessError::Timeout { message }); + } + + Ok(()) + } +} + +struct NetworkReadiness<'a> { + topology: &'a Topology, + expected_peer_counts: &'a [usize], + labels: &'a [String], +} + +#[async_trait::async_trait] +impl<'a> ReadinessCheck<'a> for NetworkReadiness<'a> { + type Data = Vec; + + async fn collect(&'a self) -> Self::Data { + let (validator_infos, executor_infos) = tokio::join!( + join_all(self.topology.validators.iter().map(Validator::network_info)), + join_all(self.topology.executors.iter().map(Executor::network_info)) + ); + + validator_infos.into_iter().chain(executor_infos).collect() + } + + fn is_ready(&self, data: &Self::Data) -> bool { + data.iter() + .enumerate() + .all(|(idx, info)| info.n_peers >= self.expected_peer_counts[idx]) + } + + fn timeout_message(&self, data: Self::Data) -> String { + let summary = build_timeout_summary(self.labels, data, self.expected_peer_counts); + format!("timed out waiting for network readiness: {summary}") + } +} + +struct HttpNetworkReadiness<'a> { + client: &'a Client, + endpoints: &'a [Url], + expected_peer_counts: &'a [usize], + labels: &'a [String], +} + +#[async_trait::async_trait] +impl<'a> ReadinessCheck<'a> for HttpNetworkReadiness<'a> { + type Data = Vec; + + async fn collect(&'a self) -> Self::Data { + let futures = self + .endpoints + .iter() + .map(|endpoint| fetch_network_info(self.client, endpoint)); + join_all(futures).await + } + + fn is_ready(&self, data: &Self::Data) -> bool { + data.iter() + .enumerate() + .all(|(idx, info)| info.n_peers >= self.expected_peer_counts[idx]) + } + + fn timeout_message(&self, data: Self::Data) -> String { + let summary = build_timeout_summary(self.labels, data, self.expected_peer_counts); + format!("timed out waiting for network readiness: {summary}") + } +} + +struct MembershipReadiness<'a> { + topology: &'a Topology, + session: SessionNumber, + labels: &'a [String], + expect_non_empty: bool, +} + +#[async_trait::async_trait] +impl<'a> ReadinessCheck<'a> for MembershipReadiness<'a> { + type Data = Vec>; + + async fn collect(&'a self) -> Self::Data { + let (validator_responses, executor_responses) = tokio::join!( + join_all( + self.topology + .validators + .iter() + .map(|node| node.da_get_membership(self.session)), + ), + join_all( + self.topology + .executors + .iter() + .map(|node| node.da_get_membership(self.session)), + ) + ); + + validator_responses + .into_iter() + .chain(executor_responses) + .collect() + } + + fn is_ready(&self, data: &Self::Data) -> bool { + self.assignation_statuses(data) + .into_iter() + .all(|ready| ready) + } + + fn timeout_message(&self, data: Self::Data) -> String { + let statuses = self.assignation_statuses(&data); + let description = if self.expect_non_empty { + "non-empty assignations" + } else { + "empty assignations" + }; + let summary = build_membership_summary(self.labels, &statuses, description); + format!("timed out waiting for DA membership readiness ({description}): {summary}") + } +} + +impl MembershipReadiness<'_> { + fn assignation_statuses( + &self, + responses: &[Result], + ) -> Vec { + responses + .iter() + .map(|res| { + res.as_ref() + .map(|resp| { + let is_non_empty = !resp.assignations.is_empty(); + if self.expect_non_empty { + is_non_empty + } else { + !is_non_empty + } + }) + .unwrap_or(false) + }) + .collect() + } +} + +struct HttpMembershipReadiness<'a> { + client: &'a Client, + endpoints: &'a [Url], + session: SessionNumber, + labels: &'a [String], + expect_non_empty: bool, +} + +#[async_trait::async_trait] +impl<'a> ReadinessCheck<'a> for HttpMembershipReadiness<'a> { + type Data = Vec>; + + async fn collect(&'a self) -> Self::Data { + let futures = self + .endpoints + .iter() + .map(|endpoint| fetch_membership(self.client, endpoint, self.session)); + join_all(futures).await + } + + fn is_ready(&self, data: &Self::Data) -> bool { + assignation_statuses(data, self.expect_non_empty) + .into_iter() + .all(|ready| ready) + } + + fn timeout_message(&self, data: Self::Data) -> String { + let statuses = assignation_statuses(&data, self.expect_non_empty); + let description = if self.expect_non_empty { + "non-empty assignations" + } else { + "empty assignations" + }; + let summary = build_membership_summary(self.labels, &statuses, description); + format!("timed out waiting for DA membership readiness ({description}): {summary}") + } +} + +struct DaBalancerReadiness<'a> { + topology: &'a Topology, + labels: &'a [String], +} + +#[async_trait::async_trait] +impl<'a> ReadinessCheck<'a> for DaBalancerReadiness<'a> { + type Data = Vec<(String, usize, BalancerStats)>; + + async fn collect(&'a self) -> Self::Data { + let mut data = Vec::new(); + for (idx, validator) in self.topology.validators.iter().enumerate() { + data.push(( + self.labels[idx].clone(), + validator.config().da_network.subnet_threshold, + validator.balancer_stats().await, + )); + } + for (offset, executor) in self.topology.executors.iter().enumerate() { + let label_index = self.topology.validators.len() + offset; + data.push(( + self.labels[label_index].clone(), + executor.config().da_network.subnet_threshold, + executor.balancer_stats().await, + )); + } + data + } + + fn is_ready(&self, data: &Self::Data) -> bool { + data.iter().all(|(_, threshold, stats)| { + if *threshold == 0 { + return true; + } + connected_subnetworks(stats) >= *threshold + }) + } + + fn timeout_message(&self, data: Self::Data) -> String { + let summary = data + .into_iter() + .map(|(label, threshold, stats)| { + let connected = connected_subnetworks(&stats); + format!("{label}: connected={connected}, required={threshold}") + }) + .collect::>() + .join(", "); + format!("timed out waiting for DA balancer readiness: {summary}") + } + + fn poll_interval(&self) -> Duration { + Duration::from_secs(1) + } +} + +fn connected_subnetworks(stats: &BalancerStats) -> usize { + stats + .values() + .filter(|stat| stat.inbound > 0 || stat.outbound > 0) + .count() +} + +fn build_timeout_summary( + labels: &[String], + infos: Vec, + expected_counts: &[usize], +) -> String { + infos + .into_iter() + .zip(expected_counts.iter()) + .zip(labels.iter()) + .map(|((info, expected), label)| { + format!("{}: peers={}, expected={}", label, info.n_peers, expected) + }) + .collect::>() + .join(", ") +} + +fn build_membership_summary(labels: &[String], statuses: &[bool], description: &str) -> String { + statuses + .iter() + .zip(labels.iter()) + .map(|(ready, label)| { + let status = if *ready { "ready" } else { "waiting" }; + format!("{label}: status={status}, expected {description}") + }) + .collect::>() + .join(", ") +} + +async fn fetch_network_info(client: &Client, base: &Url) -> Libp2pInfo { + let url = join_path(base, paths::NETWORK_INFO); + let response = match client.get(url).send().await { + Ok(resp) => resp, + Err(err) => { + return log_network_warning(base, err, "failed to reach network info endpoint"); + } + }; + + let response = match response.error_for_status() { + Ok(resp) => resp, + Err(err) => { + return log_network_warning(base, err, "network info endpoint returned error"); + } + }; + + match response.json::().await { + Ok(info) => info, + Err(err) => log_network_warning(base, err, "failed to decode network info response"), + } +} + +async fn fetch_membership( + client: &Client, + base: &Url, + session: SessionNumber, +) -> Result { + let url = join_path(base, paths::DA_GET_MEMBERSHIP); + client + .post(url) + .json(&session) + .send() + .await? + .error_for_status()? + .json() + .await +} + +fn log_network_warning(base: &Url, err: impl std::fmt::Display, message: &str) -> Libp2pInfo { + warn!(target: "readiness", url = %base, error = %err, "{message}"); + empty_libp2p_info() +} + +fn empty_libp2p_info() -> Libp2pInfo { + Libp2pInfo { + listen_addresses: Vec::with_capacity(0), + n_peers: 0, + n_connections: 0, + n_pending_connections: 0, + } +} + +fn join_path(base: &Url, path: &str) -> Url { + base.join(path.trim_start_matches('/')) + .unwrap_or_else(|err| panic!("failed to join url {base} with path {path}: {err}")) +} + +fn assignation_statuses( + responses: &[Result], + expect_non_empty: bool, +) -> Vec { + responses + .iter() + .map(|res| { + res.as_ref() + .map(|resp| { + let is_non_empty = !resp.assignations.is_empty(); + if expect_non_empty { + is_non_empty + } else { + !is_non_empty + } + }) + .unwrap_or(false) + }) + .collect() +} + +fn multiaddr_port(addr: &nomos_libp2p::Multiaddr) -> Option { + for protocol in addr { + match protocol { + nomos_libp2p::Protocol::Udp(port) | nomos_libp2p::Protocol::Tcp(port) => { + return Some(port); + } + _ => {} + } + } + None +} + +fn find_expected_peer_counts( + listen_ports: &[u16], + initial_peer_ports: &[HashSet], +) -> Vec { + let mut expected: Vec> = vec![HashSet::new(); initial_peer_ports.len()]; + + for (idx, ports) in initial_peer_ports.iter().enumerate() { + for port in ports { + let Some(peer_idx) = listen_ports.iter().position(|p| p == port) else { + continue; + }; + if peer_idx == idx { + continue; + } + + expected[idx].insert(peer_idx); + expected[peer_idx].insert(idx); + } + } + + expected.into_iter().map(|set| set.len()).collect() +} + +#[must_use] +pub fn create_kms_configs( + blend_configs: &[GeneralBlendConfig], + da_configs: &[GeneralDaConfig], + _wallet_accounts: &[WalletAccount], +) -> Vec { + da_configs + .iter() + .zip(blend_configs.iter()) + .map(|(da_conf, blend_conf)| PreloadKMSBackendSettings { + keys: [ + ( + hex::encode(blend_conf.signer.verifying_key().as_bytes()), + Key::Ed25519(Ed25519Key::new(blend_conf.signer.clone())), + ), + ( + hex::encode(fr_to_bytes( + &blend_conf.secret_zk_key.to_public_key().into_inner(), + )), + Key::Zk(ZkKey::new(blend_conf.secret_zk_key.clone())), + ), + ( + hex::encode(da_conf.signer.verifying_key().as_bytes()), + Key::Ed25519(Ed25519Key::new(da_conf.signer.clone())), + ), + ( + hex::encode(fr_to_bytes( + &da_conf.secret_zk_key.to_public_key().into_inner(), + )), + Key::Zk(ZkKey::new(da_conf.secret_zk_key.clone())), + ), + ] + .into(), + }) + .collect() +} + +fn resolve_ids(ids: Option>, count: usize) -> Vec<[u8; 32]> { + ids.map_or_else( + || { + let mut generated = vec![[0; 32]; count]; + for id in &mut generated { + thread_rng().fill(id); + } + generated + }, + |ids| { + assert_eq!( + ids.len(), + count, + "expected {count} ids but got {}", + ids.len() + ); + ids + }, + ) +} + +fn resolve_ports(ports: Option>, count: usize, label: &str) -> Vec { + let resolved = ports.unwrap_or_else(|| { + iter::repeat_with(|| get_available_udp_port().unwrap()) + .take(count) + .collect() + }); + assert_eq!( + resolved.len(), + count, + "expected {count} {label} ports but got {}", + resolved.len() + ); + resolved +} diff --git a/testing-framework/runners/compose/Cargo.toml b/testing-framework/runners/compose/Cargo.toml new file mode 100644 index 0000000..6ea87d5 --- /dev/null +++ b/testing-framework/runners/compose/Cargo.toml @@ -0,0 +1,37 @@ +[package] +categories.workspace = true +description.workspace = true +edition.workspace = true +keywords.workspace = true +license.workspace = true +name = "testing-framework-runner-compose" +readme.workspace = true +repository.workspace = true +version = "0.1.0" + +[lints] +workspace = true + +[dependencies] +anyhow = "1" +async-trait = { workspace = true } +axum = { version = "0.7", default-features = false, features = ["http1", "json", "tokio"] } +cfgsync = { workspace = true } +reqwest = { workspace = true, features = ["json"] } +serde = { workspace = true, features = ["derive"] } +tempfile = { workspace = true } +tera = "1.19" +testing-framework-core = { path = "../../core" } +thiserror = { workspace = true } +tokio = { workspace = true, features = ["macros", "net", "process", "rt-multi-thread", "sync", "time"] } +tracing = { workspace = true } +url = { version = "2" } +uuid = { version = "1", features = ["v4"] } + +[dev-dependencies] +groth16 = { workspace = true } +nomos-core = { workspace = true } +nomos-ledger = { workspace = true } +nomos-tracing-service = { workspace = true } +tests = { workspace = true } +zksign = { workspace = true } diff --git a/testing-framework/runners/compose/assets/docker-compose.yml.tera b/testing-framework/runners/compose/assets/docker-compose.yml.tera new file mode 100644 index 0000000..f0e511b --- /dev/null +++ b/testing-framework/runners/compose/assets/docker-compose.yml.tera @@ -0,0 +1,65 @@ +services: + prometheus: + image: prom/prometheus:v3.0.1 + command: + - --config.file=/etc/prometheus/prometheus.yml + - --storage.tsdb.retention.time=7d + - --web.enable-otlp-receiver + - --enable-feature=otlp-write-receiver + volumes: + - ./testnet/monitoring/prometheus.yml:/etc/prometheus/prometheus.yml:z + ports: + - {{ prometheus.host_port }} + restart: on-failure + +{% for node in validators %} + {{ node.name }}: + image: {{ node.image }} +{% if node.platform %} platform: {{ node.platform }} +{% endif %} entrypoint: {{ node.entrypoint }} + volumes: +{% for volume in node.volumes %} + - {{ volume }} +{% endfor %} +{% if node.extra_hosts | length > 0 %} + extra_hosts: +{% for host in node.extra_hosts %} + - {{ host }} +{% endfor %} +{% endif %} + ports: +{% for port in node.ports %} + - {{ port }} +{% endfor %} + environment: +{% for env in node.environment %} + {{ env.key }}: "{{ env.value }}" +{% endfor %} + restart: on-failure + +{% endfor %}{% for node in executors %} + {{ node.name }}: + image: {{ node.image }} +{% if node.platform %} platform: {{ node.platform }} +{% endif %} entrypoint: {{ node.entrypoint }} + volumes: +{% for volume in node.volumes %} + - {{ volume }} +{% endfor %} +{% if node.extra_hosts | length > 0 %} + extra_hosts: +{% for host in node.extra_hosts %} + - {{ host }} +{% endfor %} +{% endif %} + ports: +{% for port in node.ports %} + - {{ port }} +{% endfor %} + environment: +{% for env in node.environment %} + {{ env.key }}: "{{ env.value }}" +{% endfor %} + restart: on-failure + +{% endfor %} diff --git a/testing-framework/runners/compose/src/cfgsync.rs b/testing-framework/runners/compose/src/cfgsync.rs new file mode 100644 index 0000000..5811703 --- /dev/null +++ b/testing-framework/runners/compose/src/cfgsync.rs @@ -0,0 +1,77 @@ +use std::{net::Ipv4Addr, path::Path, sync::Arc}; + +use anyhow::Context as _; +use axum::serve; +use cfgsync::{ + repo::ConfigRepo, + server::{CfgSyncConfig as ServerCfgSyncConfig, cfgsync_app}, +}; +use testing_framework_core::{ + scenario::cfgsync::{apply_topology_overrides, load_cfgsync_template, write_cfgsync_template}, + topology::GeneratedTopology, +}; +use tokio::{net::TcpListener, sync::oneshot, task::JoinHandle}; + +#[derive(Debug)] +pub struct CfgsyncServerHandle { + shutdown: Option>, + pub join: JoinHandle<()>, +} + +impl CfgsyncServerHandle { + pub fn shutdown(&mut self) { + if let Some(tx) = self.shutdown.take() { + let _ = tx.send(()); + } + self.join.abort(); + } +} + +pub fn update_cfgsync_config( + path: &Path, + topology: &GeneratedTopology, + use_kzg_mount: bool, +) -> anyhow::Result<()> { + let mut cfg = load_cfgsync_template(path)?; + apply_topology_overrides(&mut cfg, topology, use_kzg_mount); + write_cfgsync_template(path, &cfg)?; + Ok(()) +} + +pub async fn start_cfgsync_server( + cfgsync_path: &Path, + port: u16, +) -> anyhow::Result { + let cfg_path = cfgsync_path.to_path_buf(); + let config = ServerCfgSyncConfig::load_from_file(&cfg_path) + .map_err(|err| anyhow::anyhow!("loading cfgsync config: {err}"))?; + let repo: Arc = config.into(); + + let listener = TcpListener::bind((Ipv4Addr::UNSPECIFIED, port)) + .await + .context("binding cfgsync listener")?; + + let cfgsync_router = cfgsync_app(repo); + let (shutdown_tx, shutdown_rx) = oneshot::channel(); + let (ready_tx, ready_rx) = oneshot::channel(); + + let join = tokio::spawn(async move { + let server = + serve(listener, cfgsync_router.into_make_service()).with_graceful_shutdown(async { + let _ = shutdown_rx.await; + }); + let _ = ready_tx.send(()); + if let Err(err) = server.await { + eprintln!("[compose-runner] cfgsync server error: {err}"); + } + }); + + ready_rx + .await + .context("waiting for cfgsync server to become ready")?; + + Ok(CfgsyncServerHandle { + shutdown: Some(shutdown_tx), + join, + }) +} diff --git a/testing-framework/runners/compose/src/cleanup.rs b/testing-framework/runners/compose/src/cleanup.rs new file mode 100644 index 0000000..588b342 --- /dev/null +++ b/testing-framework/runners/compose/src/cleanup.rs @@ -0,0 +1,70 @@ +use std::{env, path::PathBuf}; + +use testing_framework_core::scenario::CleanupGuard; + +use crate::{cfgsync::CfgsyncServerHandle, compose::compose_down, workspace::ComposeWorkspace}; + +pub struct RunnerCleanup { + pub compose_file: PathBuf, + pub project_name: String, + pub root: PathBuf, + workspace: Option, + cfgsync: Option, +} + +impl RunnerCleanup { + pub fn new( + compose_file: PathBuf, + project_name: String, + root: PathBuf, + workspace: ComposeWorkspace, + cfgsync: Option, + ) -> Self { + debug_assert!( + !compose_file.as_os_str().is_empty() && !project_name.is_empty(), + "compose cleanup should receive valid identifiers" + ); + Self { + compose_file, + project_name, + root, + workspace: Some(workspace), + cfgsync, + } + } + + fn teardown_compose(&self) { + if let Err(err) = compose_down(&self.compose_file, &self.project_name, &self.root) { + eprintln!("[compose-runner] docker compose down failed: {err}"); + } + } +} + +impl CleanupGuard for RunnerCleanup { + fn cleanup(mut self: Box) { + let preserve = env::var("COMPOSE_RUNNER_PRESERVE").is_ok() + || env::var("TESTNET_RUNNER_PRESERVE").is_ok(); + if preserve { + if let Some(mut handle) = self.cfgsync.take() { + handle.shutdown(); + } + + if let Some(workspace) = self.workspace.take() { + let keep = workspace.into_inner().keep(); + eprintln!( + "[compose-runner] preserving docker state at {}", + keep.display() + ); + } + + eprintln!("[compose-runner] compose preserve flag set; skipping docker compose down"); + return; + } + + self.teardown_compose(); + + if let Some(mut handle) = self.cfgsync.take() { + handle.shutdown(); + } + } +} diff --git a/testing-framework/runners/compose/src/compose.rs b/testing-framework/runners/compose/src/compose.rs new file mode 100644 index 0000000..b676571 --- /dev/null +++ b/testing-framework/runners/compose/src/compose.rs @@ -0,0 +1,612 @@ +use std::{ + env, fs, io, + path::{Path, PathBuf}, + process, + time::Duration, +}; + +use anyhow::Context as _; +use serde::Serialize; +use tera::Context as TeraContext; +use testing_framework_core::{ + adjust_timeout, + topology::{GeneratedNodeConfig, GeneratedTopology}, +}; +use tokio::{process::Command, time::timeout}; + +const COMPOSE_UP_TIMEOUT: Duration = Duration::from_secs(120); +const TEMPLATE_RELATIVE_PATH: &str = + "testing-framework/runners/compose/assets/docker-compose.yml.tera"; + +#[derive(Debug, thiserror::Error)] +pub enum ComposeCommandError { + #[error("{command} exited with status {status}")] + Failed { + command: String, + status: process::ExitStatus, + }, + #[error("failed to spawn {command}: {source}")] + Spawn { + command: String, + #[source] + source: io::Error, + }, + #[error("{command} timed out after {timeout:?}")] + Timeout { command: String, timeout: Duration }, +} + +pub async fn compose_up( + compose_path: &Path, + project_name: &str, + root: &Path, +) -> Result<(), ComposeCommandError> { + let mut cmd = Command::new("docker"); + cmd.arg("compose") + .arg("-f") + .arg(compose_path) + .arg("-p") + .arg(project_name) + .arg("up") + .arg("-d") + .current_dir(root); + + run_compose_command(cmd, adjust_timeout(COMPOSE_UP_TIMEOUT), "docker compose up").await +} + +pub fn compose_down( + compose_path: &Path, + project_name: &str, + root: &Path, +) -> Result<(), ComposeCommandError> { + let description = "docker compose down".to_owned(); + let status = process::Command::new("docker") + .arg("compose") + .arg("-f") + .arg(compose_path) + .arg("-p") + .arg(project_name) + .arg("down") + .arg("--volumes") + .current_dir(root) + .status() + .map_err(|source| ComposeCommandError::Spawn { + command: description.clone(), + source, + })?; + + if status.success() { + Ok(()) + } else { + Err(ComposeCommandError::Failed { + command: description, + status, + }) + } +} + +#[derive(Debug, thiserror::Error)] +pub enum TemplateError { + #[error("failed to resolve repository root for compose template: {source}")] + RepositoryRoot { + #[source] + source: anyhow::Error, + }, + #[error("failed to read compose template at {path}: {source}")] + Read { + path: PathBuf, + #[source] + source: io::Error, + }, + #[error("failed to serialise compose descriptor for templating: {source}")] + Serialize { + #[source] + source: tera::Error, + }, + #[error("failed to render compose template at {path}: {source}")] + Render { + path: PathBuf, + #[source] + source: tera::Error, + }, + #[error("failed to write compose file at {path}: {source}")] + Write { + path: PathBuf, + #[source] + source: io::Error, + }, +} + +#[derive(Debug, thiserror::Error)] +pub enum DescriptorBuildError { + #[error("cfgsync port is not configured for compose descriptor")] + MissingCfgsyncPort, + #[error("prometheus port is not configured for compose descriptor")] + MissingPrometheusPort, +} + +#[derive(Clone, Debug, Serialize)] +pub struct ComposeDescriptor { + prometheus: PrometheusTemplate, + validators: Vec, + executors: Vec, +} + +impl ComposeDescriptor { + #[must_use] + pub const fn builder(topology: &GeneratedTopology) -> ComposeDescriptorBuilder<'_> { + ComposeDescriptorBuilder::new(topology) + } + + #[cfg(test)] + fn validators(&self) -> &[NodeDescriptor] { + &self.validators + } + + #[cfg(test)] + fn executors(&self) -> &[NodeDescriptor] { + &self.executors + } +} + +pub struct ComposeDescriptorBuilder<'a> { + topology: &'a GeneratedTopology, + use_kzg_mount: bool, + cfgsync_port: Option, + prometheus_port: Option, +} + +impl<'a> ComposeDescriptorBuilder<'a> { + const fn new(topology: &'a GeneratedTopology) -> Self { + Self { + topology, + use_kzg_mount: false, + cfgsync_port: None, + prometheus_port: None, + } + } + + #[must_use] + pub const fn with_kzg_mount(mut self, enabled: bool) -> Self { + self.use_kzg_mount = enabled; + self + } + + #[must_use] + pub const fn with_cfgsync_port(mut self, port: u16) -> Self { + self.cfgsync_port = Some(port); + self + } + + #[must_use] + pub const fn with_prometheus_port(mut self, port: u16) -> Self { + self.prometheus_port = Some(port); + self + } + + pub fn build(self) -> Result { + let cfgsync_port = self + .cfgsync_port + .ok_or(DescriptorBuildError::MissingCfgsyncPort)?; + let prometheus_host_port = self + .prometheus_port + .ok_or(DescriptorBuildError::MissingPrometheusPort)?; + + let (default_image, default_platform) = resolve_image(); + let image = default_image; + let platform = default_platform; + + let validators = build_nodes( + self.topology.validators(), + ComposeNodeKind::Validator, + &image, + platform.as_deref(), + self.use_kzg_mount, + cfgsync_port, + ); + + let executors = build_nodes( + self.topology.executors(), + ComposeNodeKind::Executor, + &image, + platform.as_deref(), + self.use_kzg_mount, + cfgsync_port, + ); + + Ok(ComposeDescriptor { + prometheus: PrometheusTemplate::new(prometheus_host_port), + validators, + executors, + }) + } +} + +#[derive(Clone, Debug, Serialize)] +pub struct PrometheusTemplate { + host_port: String, +} + +impl PrometheusTemplate { + fn new(port: u16) -> Self { + Self { + host_port: format!("127.0.0.1:{port}:9090"), + } + } +} + +#[derive(Clone, Debug, Serialize, PartialEq, Eq)] +pub struct EnvEntry { + key: String, + value: String, +} + +impl EnvEntry { + fn new(key: impl Into, value: impl Into) -> Self { + Self { + key: key.into(), + value: value.into(), + } + } + + #[cfg(test)] + fn key(&self) -> &str { + &self.key + } + + #[cfg(test)] + fn value(&self) -> &str { + &self.value + } +} + +#[derive(Clone, Debug, Serialize)] +pub struct NodeDescriptor { + name: String, + image: String, + entrypoint: String, + volumes: Vec, + extra_hosts: Vec, + ports: Vec, + environment: Vec, + #[serde(skip_serializing_if = "Option::is_none")] + platform: Option, +} + +#[derive(Clone, Debug)] +pub struct NodeHostPorts { + pub api: u16, + pub testing: u16, +} + +#[derive(Clone, Debug)] +pub struct HostPortMapping { + pub validators: Vec, + pub executors: Vec, +} + +impl HostPortMapping { + pub fn validator_api_ports(&self) -> Vec { + self.validators.iter().map(|ports| ports.api).collect() + } + + pub fn executor_api_ports(&self) -> Vec { + self.executors.iter().map(|ports| ports.api).collect() + } +} + +impl NodeDescriptor { + fn from_node( + kind: ComposeNodeKind, + index: usize, + node: &GeneratedNodeConfig, + image: &str, + platform: Option<&str>, + use_kzg_mount: bool, + cfgsync_port: u16, + ) -> Self { + let mut environment = base_environment(cfgsync_port); + let identifier = kind.instance_name(index); + environment.extend([ + EnvEntry::new( + "CFG_NETWORK_PORT", + node.general.network_config.backend.inner.port.to_string(), + ), + EnvEntry::new("CFG_DA_PORT", node.da_port.to_string()), + EnvEntry::new("CFG_BLEND_PORT", node.blend_port.to_string()), + EnvEntry::new( + "CFG_API_PORT", + node.general.api_config.address.port().to_string(), + ), + EnvEntry::new( + "CFG_TESTING_HTTP_PORT", + node.general + .api_config + .testing_http_address + .port() + .to_string(), + ), + EnvEntry::new("CFG_HOST_IDENTIFIER", identifier), + ]); + + let ports = vec![ + node.general.api_config.address.port().to_string(), + node.general + .api_config + .testing_http_address + .port() + .to_string(), + ]; + + Self { + name: kind.instance_name(index), + image: image.to_owned(), + entrypoint: kind.entrypoint().to_owned(), + volumes: base_volumes(use_kzg_mount), + extra_hosts: default_extra_hosts(), + ports, + environment, + platform: platform.map(ToOwned::to_owned), + } + } + + #[cfg(test)] + fn ports(&self) -> &[String] { + &self.ports + } + + #[cfg(test)] + fn environment(&self) -> &[EnvEntry] { + &self.environment + } +} + +pub fn write_compose_file( + descriptor: &ComposeDescriptor, + compose_path: &Path, +) -> Result<(), TemplateError> { + TemplateSource::load()?.write(descriptor, compose_path) +} + +pub async fn dump_compose_logs(compose_file: &Path, project: &str, root: &Path) { + let mut cmd = Command::new("docker"); + cmd.arg("compose") + .arg("-f") + .arg(compose_file) + .arg("-p") + .arg(project) + .arg("logs") + .arg("--no-color") + .current_dir(root); + + match cmd.output().await { + Ok(output) => { + if !output.stdout.is_empty() { + eprintln!( + "[compose-runner] docker compose logs:\n{}", + String::from_utf8_lossy(&output.stdout) + ); + } + if !output.stderr.is_empty() { + eprintln!( + "[compose-runner] docker compose errors:\n{}", + String::from_utf8_lossy(&output.stderr) + ); + } + } + Err(err) => { + eprintln!("[compose-runner] failed to collect docker compose logs: {err}"); + } + } +} + +struct TemplateSource { + path: PathBuf, + contents: String, +} + +impl TemplateSource { + fn load() -> Result { + let repo_root = + repository_root().map_err(|source| TemplateError::RepositoryRoot { source })?; + let path = repo_root.join(TEMPLATE_RELATIVE_PATH); + let contents = fs::read_to_string(&path).map_err(|source| TemplateError::Read { + path: path.clone(), + source, + })?; + + Ok(Self { path, contents }) + } + + fn render(&self, descriptor: &ComposeDescriptor) -> Result { + let context = TeraContext::from_serialize(descriptor) + .map_err(|source| TemplateError::Serialize { source })?; + + tera::Tera::one_off(&self.contents, &context, false).map_err(|source| { + TemplateError::Render { + path: self.path.clone(), + source, + } + }) + } + + fn write(&self, descriptor: &ComposeDescriptor, output: &Path) -> Result<(), TemplateError> { + let rendered = self.render(descriptor)?; + fs::write(output, rendered).map_err(|source| TemplateError::Write { + path: output.to_path_buf(), + source, + }) + } +} + +pub fn repository_root() -> anyhow::Result { + env::var("CARGO_WORKSPACE_DIR") + .map(PathBuf::from) + .or_else(|_| { + Path::new(env!("CARGO_MANIFEST_DIR")) + .parent() + .and_then(Path::parent) + .and_then(Path::parent) + .map(PathBuf::from) + .context("resolving repository root from manifest dir") + }) +} + +#[derive(Clone, Copy)] +enum ComposeNodeKind { + Validator, + Executor, +} + +impl ComposeNodeKind { + fn instance_name(self, index: usize) -> String { + match self { + Self::Validator => format!("validator-{index}"), + Self::Executor => format!("executor-{index}"), + } + } + + const fn entrypoint(self) -> &'static str { + match self { + Self::Validator => "/etc/nomos/scripts/run_nomos_node.sh", + Self::Executor => "/etc/nomos/scripts/run_nomos_executor.sh", + } + } +} + +fn build_nodes( + nodes: &[GeneratedNodeConfig], + kind: ComposeNodeKind, + image: &str, + platform: Option<&str>, + use_kzg_mount: bool, + cfgsync_port: u16, +) -> Vec { + nodes + .iter() + .enumerate() + .map(|(index, node)| { + NodeDescriptor::from_node( + kind, + index, + node, + image, + platform, + use_kzg_mount, + cfgsync_port, + ) + }) + .collect() +} + +fn base_environment(cfgsync_port: u16) -> Vec { + vec![ + EnvEntry::new("POL_PROOF_DEV_MODE", "true"), + EnvEntry::new( + "CFG_SERVER_ADDR", + format!("http://host.docker.internal:{cfgsync_port}"), + ), + EnvEntry::new("OTEL_METRIC_EXPORT_INTERVAL", "5000"), + ] +} + +fn base_volumes(use_kzg_mount: bool) -> Vec { + let mut volumes = vec!["./testnet:/etc/nomos".into()]; + if use_kzg_mount { + volumes.push("./kzgrs_test_params:/kzgrs_test_params:z".into()); + } + volumes +} + +fn default_extra_hosts() -> Vec { + host_gateway_entry().into_iter().collect() +} + +pub fn resolve_image() -> (String, Option) { + let image = + env::var("NOMOS_TESTNET_IMAGE").unwrap_or_else(|_| String::from("nomos-testnet:local")); + let platform = (image == "ghcr.io/logos-co/nomos:testnet").then(|| "linux/amd64".to_owned()); + (image, platform) +} + +fn host_gateway_entry() -> Option { + if let Ok(value) = env::var("COMPOSE_RUNNER_HOST_GATEWAY") { + if value.eq_ignore_ascii_case("disable") || value.is_empty() { + return None; + } + return Some(value); + } + + if cfg!(any(target_os = "macos", target_os = "windows")) { + return Some("host.docker.internal:host-gateway".into()); + } + + env::var("DOCKER_HOST_GATEWAY") + .ok() + .filter(|value| !value.is_empty()) + .map(|gateway| format!("host.docker.internal:{gateway}")) +} + +async fn run_compose_command( + mut command: Command, + timeout_duration: Duration, + description: &str, +) -> Result<(), ComposeCommandError> { + match timeout(timeout_duration, command.status()).await { + Ok(Ok(status)) if status.success() => Ok(()), + Ok(Ok(status)) => Err(ComposeCommandError::Failed { + command: description.to_owned(), + status, + }), + Ok(Err(err)) => Err(ComposeCommandError::Spawn { + command: description.to_owned(), + source: err, + }), + Err(_) => Err(ComposeCommandError::Timeout { + command: description.to_owned(), + timeout: timeout_duration, + }), + } +} + +#[cfg(test)] +mod tests { + use testing_framework_core::topology::{TopologyBuilder, TopologyConfig}; + + use super::*; + + #[test] + fn descriptor_matches_topology_counts() { + let topology = TopologyBuilder::new(TopologyConfig::with_node_numbers(2, 1)).build(); + let descriptor = ComposeDescriptor::builder(&topology) + .with_cfgsync_port(4400) + .with_prometheus_port(9090) + .build() + .expect("descriptor"); + + assert_eq!(descriptor.validators().len(), topology.validators().len()); + assert_eq!(descriptor.executors().len(), topology.executors().len()); + } + + #[test] + fn descriptor_includes_expected_env_and_ports() { + let topology = TopologyBuilder::new(TopologyConfig::with_node_numbers(1, 1)).build(); + let cfgsync_port = 4555; + let descriptor = ComposeDescriptor::builder(&topology) + .with_cfgsync_port(cfgsync_port) + .with_prometheus_port(9090) + .build() + .expect("descriptor"); + + let validator = &descriptor.validators()[0]; + assert!( + validator + .environment() + .iter() + .any(|entry| entry.key() == "CFG_SERVER_ADDR" + && entry.value() == format!("http://host.docker.internal:{cfgsync_port}")) + ); + + let api_container = topology.validators()[0].general.api_config.address.port(); + assert!(validator.ports().contains(&api_container.to_string())); + } +} diff --git a/testing-framework/runners/compose/src/lib.rs b/testing-framework/runners/compose/src/lib.rs new file mode 100644 index 0000000..9abe650 --- /dev/null +++ b/testing-framework/runners/compose/src/lib.rs @@ -0,0 +1,9 @@ +mod cfgsync; +mod cleanup; +mod compose; +mod runner; +mod wait; +mod workspace; + +pub use runner::{ComposeRunner, ComposeRunnerError}; +pub use workspace::ComposeWorkspace; diff --git a/testing-framework/runners/compose/src/runner.rs b/testing-framework/runners/compose/src/runner.rs new file mode 100644 index 0000000..a98d5cc --- /dev/null +++ b/testing-framework/runners/compose/src/runner.rs @@ -0,0 +1,1218 @@ +use std::{ + env, + net::{Ipv4Addr, TcpListener as StdTcpListener}, + path::{Path, PathBuf}, + process::{Command as StdCommand, Stdio}, + sync::Arc, + time::Duration, +}; + +use anyhow::{Context as _, anyhow}; +use async_trait::async_trait; +use reqwest::Url; +use testing_framework_core::{ + nodes::ApiClient, + scenario::{ + BlockFeed, BlockFeedTask, CleanupGuard, Deployer, DynError, Metrics, MetricsError, + NodeClients, NodeControlHandle, RequiresNodeControl, RunContext, Runner, Scenario, + http_probe::{HttpReadinessError, NodeRole as HttpNodeRole}, + spawn_block_feed, + }, + topology::{GeneratedTopology, NodeRole as TopologyNodeRole, ReadinessError}, +}; +use tokio::{ + process::Command, + time::{sleep, timeout}, +}; +use tracing::{error, info, warn}; +use url::ParseError; +use uuid::Uuid; + +use crate::{ + cfgsync::{CfgsyncServerHandle, start_cfgsync_server, update_cfgsync_config}, + cleanup::RunnerCleanup, + compose::{ + ComposeCommandError, ComposeDescriptor, DescriptorBuildError, HostPortMapping, + NodeHostPorts, TemplateError, compose_up, dump_compose_logs, repository_root, + resolve_image, write_compose_file, + }, + wait::{wait_for_executors, wait_for_validators}, + workspace::ComposeWorkspace, +}; + +pub struct ComposeRunner { + readiness_checks: bool, +} + +impl Default for ComposeRunner { + fn default() -> Self { + Self::new() + } +} + +impl ComposeRunner { + #[must_use] + pub const fn new() -> Self { + Self { + readiness_checks: true, + } + } + + #[must_use] + pub const fn with_readiness(mut self, enabled: bool) -> Self { + self.readiness_checks = enabled; + self + } +} + +const PROMETHEUS_PORT_ENV: &str = "TEST_FRAMEWORK_PROMETHEUS_PORT"; +const DEFAULT_PROMETHEUS_PORT: u16 = 9090; +const IMAGE_BUILD_TIMEOUT: Duration = Duration::from_secs(600); +const BLOCK_FEED_MAX_ATTEMPTS: usize = 5; +const BLOCK_FEED_RETRY_DELAY: Duration = Duration::from_secs(1); + +#[derive(Debug, thiserror::Error)] +pub enum ComposeRunnerError { + #[error( + "compose runner requires at least one validator (validators={validators}, executors={executors})" + )] + MissingValidator { validators: usize, executors: usize }, + #[error("docker does not appear to be available on this host")] + DockerUnavailable, + #[error("failed to resolve host port for {service} container port {container_port}: {source}")] + PortDiscovery { + service: String, + container_port: u16, + #[source] + source: anyhow::Error, + }, + #[error(transparent)] + Workspace(#[from] WorkspaceError), + #[error(transparent)] + Config(#[from] ConfigError), + #[error(transparent)] + Compose(#[from] ComposeCommandError), + #[error(transparent)] + Readiness(#[from] StackReadinessError), + #[error(transparent)] + NodeClients(#[from] NodeClientError), + #[error(transparent)] + Telemetry(#[from] MetricsError), + #[error("block feed requires at least one validator client")] + BlockFeedMissing, + #[error("failed to start block feed: {source}")] + BlockFeed { + #[source] + source: anyhow::Error, + }, + #[error( + "docker image '{image}' is not available; set NOMOS_TESTNET_IMAGE or build the image manually" + )] + MissingImage { image: String }, + #[error("failed to prepare docker image: {source}")] + ImageBuild { + #[source] + source: anyhow::Error, + }, +} + +#[derive(Debug, thiserror::Error)] +#[error("failed to prepare compose workspace: {source}")] +pub struct WorkspaceError { + #[source] + source: anyhow::Error, +} + +impl WorkspaceError { + const fn new(source: anyhow::Error) -> Self { + Self { source } + } +} + +#[derive(Debug, thiserror::Error)] +pub enum ConfigError { + #[error("failed to update cfgsync configuration at {path}: {source}")] + Cfgsync { + path: PathBuf, + #[source] + source: anyhow::Error, + }, + #[error("failed to allocate cfgsync port: {source}")] + Port { + #[source] + source: anyhow::Error, + }, + #[error("failed to start cfgsync server on port {port}: {source}")] + CfgsyncStart { + port: u16, + #[source] + source: anyhow::Error, + }, + #[error("failed to build compose descriptor: {source}")] + Descriptor { + #[source] + source: DescriptorBuildError, + }, + #[error("failed to render compose template: {source}")] + Template { + #[source] + source: TemplateError, + }, +} + +#[derive(Debug, thiserror::Error)] +pub enum StackReadinessError { + #[error(transparent)] + Http(#[from] HttpReadinessError), + #[error("failed to build readiness URL for {role} port {port}: {source}")] + Endpoint { + role: HttpNodeRole, + port: u16, + #[source] + source: ParseError, + }, + #[error("remote readiness probe failed: {source}")] + Remote { + #[source] + source: ReadinessError, + }, +} + +#[derive(Debug, thiserror::Error)] +pub enum NodeClientError { + #[error("failed to build {endpoint} client URL for {role} port {port}: {source}")] + Endpoint { + role: HttpNodeRole, + endpoint: &'static str, + port: u16, + #[source] + source: ParseError, + }, +} + +#[async_trait] +impl Deployer for ComposeRunner +where + Caps: RequiresNodeControl + Send + Sync, +{ + type Error = ComposeRunnerError; + + async fn deploy(&self, scenario: &Scenario) -> Result { + ensure_docker_available()?; + let descriptors = scenario.topology().clone(); + ensure_supported_topology(&descriptors)?; + + info!( + validators = descriptors.validators().len(), + executors = descriptors.executors().len(), + "starting compose deployment" + ); + + let prometheus_port = desired_prometheus_port(); + let mut environment = prepare_environment(&descriptors, prometheus_port).await?; + + let host_ports = match discover_host_ports(&environment, &descriptors).await { + Ok(mapping) => mapping, + Err(err) => { + environment + .fail("failed to determine container host ports") + .await; + return Err(err); + } + }; + + if self.readiness_checks { + info!("waiting for validator HTTP endpoints"); + if let Err(err) = + ensure_validators_ready_with_ports(&host_ports.validator_api_ports()).await + { + environment.fail("validator readiness failed").await; + return Err(err.into()); + } + + info!("waiting for executor HTTP endpoints"); + if let Err(err) = + ensure_executors_ready_with_ports(&host_ports.executor_api_ports()).await + { + environment.fail("executor readiness failed").await; + return Err(err.into()); + } + + info!("waiting for remote service readiness"); + if let Err(err) = ensure_remote_readiness_with_ports(&descriptors, &host_ports).await { + environment.fail("remote readiness probe failed").await; + return Err(err.into()); + } + } else { + info!("readiness checks disabled; giving the stack a short grace period"); + sleep(Duration::from_secs(5)).await; + } + + info!("compose stack ready; building node clients"); + let node_clients = match build_node_clients_with_ports(&descriptors, &host_ports) { + Ok(clients) => clients, + Err(err) => { + environment + .fail("failed to construct node api clients") + .await; + return Err(err.into()); + } + }; + let telemetry = metrics_handle_from_port(prometheus_port)?; + let node_control = Caps::REQUIRED.then(|| { + Arc::new(ComposeNodeControl { + compose_file: environment.compose_path().to_path_buf(), + project_name: environment.project_name().to_owned(), + }) as Arc + }); + let (block_feed, block_feed_guard) = match spawn_block_feed_with_retry(&node_clients).await + { + Ok(pair) => pair, + Err(err) => { + environment.fail("failed to initialize block feed").await; + return Err(err); + } + }; + let cleanup_guard: Box = Box::new(ComposeCleanupGuard::new( + environment.into_cleanup(), + block_feed_guard, + )); + let context = RunContext::new( + descriptors, + None, + node_clients, + scenario.duration(), + telemetry, + block_feed, + node_control, + ); + + Ok(Runner::new(context, Some(cleanup_guard))) + } +} + +fn desired_prometheus_port() -> u16 { + env::var(PROMETHEUS_PORT_ENV) + .ok() + .and_then(|raw| raw.parse::().ok()) + .unwrap_or_else(|| allocate_prometheus_port().unwrap_or(DEFAULT_PROMETHEUS_PORT)) +} + +fn allocate_prometheus_port() -> Option { + let try_bind = |port| StdTcpListener::bind((Ipv4Addr::LOCALHOST, port)); + let listener = try_bind(DEFAULT_PROMETHEUS_PORT) + .or_else(|_| try_bind(0)) + .ok()?; + listener.local_addr().ok().map(|addr| addr.port()) +} + +fn build_node_clients_with_ports( + descriptors: &GeneratedTopology, + mapping: &HostPortMapping, +) -> Result { + let validators = descriptors + .validators() + .iter() + .zip(mapping.validators.iter()) + .map(|(node, ports)| api_client_from_host_ports(to_http_role(node.role()), ports)) + .collect::, _>>()?; + let executors = descriptors + .executors() + .iter() + .zip(mapping.executors.iter()) + .map(|(node, ports)| api_client_from_host_ports(to_http_role(node.role()), ports)) + .collect::, _>>()?; + + Ok(NodeClients::new(validators, executors)) +} + +fn api_client_from_host_ports( + role: HttpNodeRole, + ports: &NodeHostPorts, +) -> Result { + let base_url = localhost_url(ports.api).map_err(|source| NodeClientError::Endpoint { + role, + endpoint: "api", + port: ports.api, + source, + })?; + + let testing_url = + Some( + localhost_url(ports.testing).map_err(|source| NodeClientError::Endpoint { + role, + endpoint: "testing", + port: ports.testing, + source, + })?, + ); + + Ok(ApiClient::from_urls(base_url, testing_url)) +} + +const fn to_http_role(role: TopologyNodeRole) -> HttpNodeRole { + match role { + TopologyNodeRole::Validator => HttpNodeRole::Validator, + TopologyNodeRole::Executor => HttpNodeRole::Executor, + } +} + +async fn spawn_block_feed_with( + node_clients: &NodeClients, +) -> Result<(BlockFeed, BlockFeedTask), ComposeRunnerError> { + let block_source_client = node_clients + .random_validator() + .cloned() + .ok_or(ComposeRunnerError::BlockFeedMissing)?; + + spawn_block_feed(block_source_client) + .await + .map_err(|source| ComposeRunnerError::BlockFeed { source }) +} + +async fn spawn_block_feed_with_retry( + node_clients: &NodeClients, +) -> Result<(BlockFeed, BlockFeedTask), ComposeRunnerError> { + let mut last_err = None; + for attempt in 1..=BLOCK_FEED_MAX_ATTEMPTS { + match spawn_block_feed_with(node_clients).await { + Ok(result) => return Ok(result), + Err(err) => { + last_err = Some(err); + if attempt < BLOCK_FEED_MAX_ATTEMPTS { + warn!(attempt, "block feed initialization failed; retrying"); + sleep(BLOCK_FEED_RETRY_DELAY).await; + } + } + } + } + + Err(last_err.expect("block feed retry should capture an error")) +} + +async fn restart_compose_service( + compose_file: &Path, + project_name: &str, + service: &str, +) -> Result<(), ComposeRunnerError> { + let mut command = Command::new("docker"); + command + .arg("compose") + .arg("-f") + .arg(compose_file) + .arg("-p") + .arg(project_name) + .arg("restart") + .arg(service); + + let description = "docker compose restart"; + run_docker_command(command, description, Duration::from_secs(120)).await +} + +struct ComposeNodeControl { + compose_file: PathBuf, + project_name: String, +} + +#[async_trait] +impl NodeControlHandle for ComposeNodeControl { + async fn restart_validator(&self, index: usize) -> Result<(), DynError> { + restart_compose_service( + &self.compose_file, + &self.project_name, + &format!("validator-{index}"), + ) + .await + .map_err(|err| format!("validator restart failed: {err}").into()) + } + + async fn restart_executor(&self, index: usize) -> Result<(), DynError> { + restart_compose_service( + &self.compose_file, + &self.project_name, + &format!("executor-{index}"), + ) + .await + .map_err(|err| format!("executor restart failed: {err}").into()) + } +} + +fn localhost_url(port: u16) -> Result { + Url::parse(&format!("http://127.0.0.1:{port}/")) +} + +async fn discover_host_ports( + environment: &StackEnvironment, + descriptors: &GeneratedTopology, +) -> Result { + let mut validators = Vec::new(); + for node in descriptors.validators() { + let service = node_identifier(TopologyNodeRole::Validator, node.index()); + let api = resolve_service_port(environment, &service, node.api_port()).await?; + let testing = resolve_service_port(environment, &service, node.testing_http_port()).await?; + validators.push(NodeHostPorts { api, testing }); + } + + let mut executors = Vec::new(); + for node in descriptors.executors() { + let service = node_identifier(TopologyNodeRole::Executor, node.index()); + let api = resolve_service_port(environment, &service, node.api_port()).await?; + let testing = resolve_service_port(environment, &service, node.testing_http_port()).await?; + executors.push(NodeHostPorts { api, testing }); + } + + Ok(HostPortMapping { + validators, + executors, + }) +} + +async fn resolve_service_port( + environment: &StackEnvironment, + service: &str, + container_port: u16, +) -> Result { + let mut cmd = Command::new("docker"); + cmd.arg("compose") + .arg("-f") + .arg(environment.compose_path()) + .arg("-p") + .arg(environment.project_name()) + .arg("port") + .arg(service) + .arg(container_port.to_string()) + .current_dir(environment.root()); + + let output = cmd + .output() + .await + .with_context(|| format!("running docker compose port {service} {container_port}")) + .map_err(|source| ComposeRunnerError::PortDiscovery { + service: service.to_owned(), + container_port, + source, + })?; + + if !output.status.success() { + return Err(ComposeRunnerError::PortDiscovery { + service: service.to_owned(), + container_port, + source: anyhow!("docker compose port exited with {}", output.status), + }); + } + + let stdout = String::from_utf8_lossy(&output.stdout); + for line in stdout.lines() { + let line = line.trim(); + if line.is_empty() { + continue; + } + if let Some(port_str) = line.rsplit(':').next() + && let Ok(port) = port_str.trim().parse::() + { + return Ok(port); + } + } + + Err(ComposeRunnerError::PortDiscovery { + service: service.to_owned(), + container_port, + source: anyhow!("unable to parse docker compose port output: {stdout}"), + }) +} + +fn ensure_docker_available() -> Result<(), ComposeRunnerError> { + let available = StdCommand::new("docker") + .arg("info") + .stdout(Stdio::null()) + .stderr(Stdio::null()) + .status() + .map(|status| status.success()) + .unwrap_or(false); + if available { + Ok(()) + } else { + Err(ComposeRunnerError::DockerUnavailable) + } +} + +fn metrics_handle_from_port(port: u16) -> Result { + let url = localhost_url(port) + .map_err(|err| MetricsError::new(format!("invalid prometheus url: {err}")))?; + Metrics::from_prometheus(url) +} + +async fn ensure_validators_ready_with_ports(ports: &[u16]) -> Result<(), StackReadinessError> { + if ports.is_empty() { + return Ok(()); + } + + wait_for_validators(ports).await.map_err(Into::into) +} + +async fn ensure_executors_ready_with_ports(ports: &[u16]) -> Result<(), StackReadinessError> { + if ports.is_empty() { + return Ok(()); + } + + wait_for_executors(ports).await.map_err(Into::into) +} + +async fn ensure_remote_readiness_with_ports( + descriptors: &GeneratedTopology, + mapping: &HostPortMapping, +) -> Result<(), StackReadinessError> { + let validator_urls = mapping + .validators + .iter() + .map(|ports| readiness_url(HttpNodeRole::Validator, ports.api)) + .collect::, _>>()?; + let executor_urls = mapping + .executors + .iter() + .map(|ports| readiness_url(HttpNodeRole::Executor, ports.api)) + .collect::, _>>()?; + + descriptors + .wait_remote_readiness(&validator_urls, &executor_urls, None, None) + .await + .map_err(|source| StackReadinessError::Remote { source }) +} + +fn readiness_url(role: HttpNodeRole, port: u16) -> Result { + localhost_url(port).map_err(|source| StackReadinessError::Endpoint { role, port, source }) +} + +fn node_identifier(role: TopologyNodeRole, index: usize) -> String { + match role { + TopologyNodeRole::Validator => format!("validator-{index}"), + TopologyNodeRole::Executor => format!("executor-{index}"), + } +} + +struct WorkspaceState { + workspace: ComposeWorkspace, + root: PathBuf, + cfgsync_path: PathBuf, + use_kzg: bool, +} + +fn ensure_supported_topology(descriptors: &GeneratedTopology) -> Result<(), ComposeRunnerError> { + let validators = descriptors.validators().len(); + if validators == 0 { + return Err(ComposeRunnerError::MissingValidator { + validators, + executors: descriptors.executors().len(), + }); + } + Ok(()) +} + +async fn prepare_environment( + descriptors: &GeneratedTopology, + prometheus_port: u16, +) -> Result { + let workspace = prepare_workspace_logged()?; + update_cfgsync_logged(&workspace, descriptors)?; + ensure_compose_image().await?; + + let (cfgsync_port, mut cfgsync_handle) = start_cfgsync_stage(&workspace).await?; + let compose_path = + render_compose_logged(&workspace, descriptors, cfgsync_port, prometheus_port)?; + + let project_name = format!("nomos-compose-{}", Uuid::new_v4()); + bring_up_stack_logged( + &compose_path, + &project_name, + &workspace.root, + &mut cfgsync_handle, + ) + .await?; + + Ok(StackEnvironment::from_workspace( + workspace, + compose_path, + project_name, + Some(cfgsync_handle), + )) +} + +fn prepare_workspace_state() -> Result { + let workspace = ComposeWorkspace::create().map_err(WorkspaceError::new)?; + let root = workspace.root_path().to_path_buf(); + let cfgsync_path = workspace.testnet_dir().join("cfgsync.yaml"); + let use_kzg = workspace.root_path().join("kzgrs_test_params").exists(); + + Ok(WorkspaceState { + workspace, + root, + cfgsync_path, + use_kzg, + }) +} + +fn prepare_workspace_logged() -> Result { + info!("preparing compose workspace"); + prepare_workspace_state().map_err(Into::into) +} + +fn update_cfgsync_logged( + workspace: &WorkspaceState, + descriptors: &GeneratedTopology, +) -> Result<(), ComposeRunnerError> { + info!("updating cfgsync configuration"); + configure_cfgsync(workspace, descriptors).map_err(Into::into) +} + +async fn start_cfgsync_stage( + workspace: &WorkspaceState, +) -> Result<(u16, CfgsyncServerHandle), ComposeRunnerError> { + let cfgsync_port = allocate_cfgsync_port()?; + info!(cfgsync_port = cfgsync_port, "launching cfgsync server"); + let handle = launch_cfgsync(&workspace.cfgsync_path, cfgsync_port).await?; + Ok((cfgsync_port, handle)) +} + +fn configure_cfgsync( + workspace: &WorkspaceState, + descriptors: &GeneratedTopology, +) -> Result<(), ConfigError> { + update_cfgsync_config(&workspace.cfgsync_path, descriptors, workspace.use_kzg).map_err( + |source| ConfigError::Cfgsync { + path: workspace.cfgsync_path.clone(), + source, + }, + ) +} + +fn allocate_cfgsync_port() -> Result { + let listener = StdTcpListener::bind((Ipv4Addr::LOCALHOST, 0)) + .context("allocating cfgsync port") + .map_err(|source| ConfigError::Port { source })?; + + let port = listener + .local_addr() + .context("reading cfgsync port") + .map_err(|source| ConfigError::Port { source })? + .port(); + Ok(port) +} + +async fn launch_cfgsync( + cfgsync_path: &Path, + port: u16, +) -> Result { + start_cfgsync_server(cfgsync_path, port) + .await + .map_err(|source| ConfigError::CfgsyncStart { port, source }) +} + +fn write_compose_artifacts( + workspace: &WorkspaceState, + descriptors: &GeneratedTopology, + cfgsync_port: u16, + prometheus_port: u16, +) -> Result { + let descriptor = ComposeDescriptor::builder(descriptors) + .with_kzg_mount(workspace.use_kzg) + .with_cfgsync_port(cfgsync_port) + .with_prometheus_port(prometheus_port) + .build() + .map_err(|source| ConfigError::Descriptor { source })?; + + let compose_path = workspace.root.join("compose.generated.yml"); + write_compose_file(&descriptor, &compose_path) + .map_err(|source| ConfigError::Template { source })?; + Ok(compose_path) +} + +fn render_compose_logged( + workspace: &WorkspaceState, + descriptors: &GeneratedTopology, + cfgsync_port: u16, + prometheus_port: u16, +) -> Result { + info!("rendering compose file"); + write_compose_artifacts(workspace, descriptors, cfgsync_port, prometheus_port) + .map_err(Into::into) +} + +async fn bring_up_stack( + compose_path: &Path, + project_name: &str, + workspace_root: &Path, + cfgsync_handle: &mut CfgsyncServerHandle, +) -> Result<(), ComposeRunnerError> { + if let Err(err) = compose_up(compose_path, project_name, workspace_root).await { + cfgsync_handle.shutdown(); + return Err(ComposeRunnerError::Compose(err)); + } + Ok(()) +} + +async fn ensure_compose_image() -> Result<(), ComposeRunnerError> { + let (image, platform) = resolve_image(); + ensure_image_present(&image, platform.as_deref()).await +} + +async fn ensure_image_present( + image: &str, + platform: Option<&str>, +) -> Result<(), ComposeRunnerError> { + if docker_image_exists(image).await? { + return Ok(()); + } + + if image != "nomos-testnet:local" { + return Err(ComposeRunnerError::MissingImage { + image: image.to_owned(), + }); + } + + build_local_image(image, platform).await +} + +async fn docker_image_exists(image: &str) -> Result { + let mut cmd = Command::new("docker"); + cmd.arg("image") + .arg("inspect") + .arg(image) + .stdout(Stdio::null()) + .stderr(Stdio::null()); + + match cmd.status().await { + Ok(status) => Ok(status.success()), + Err(source) => Err(ComposeRunnerError::Compose(ComposeCommandError::Spawn { + command: format!("docker image inspect {image}"), + source, + })), + } +} + +async fn build_local_image(image: &str, platform: Option<&str>) -> Result<(), ComposeRunnerError> { + let repo_root = + repository_root().map_err(|source| ComposeRunnerError::ImageBuild { source })?; + let dockerfile = repo_root.join("testing-framework/runners/docker/runner.Dockerfile"); + + info!(image, "building compose runner docker image"); + + let mut cmd = Command::new("docker"); + cmd.arg("build"); + + if let Some(build_platform) = select_build_platform(platform)? { + cmd.arg("--platform").arg(&build_platform); + } + + let circuits_platform = env::var("COMPOSE_CIRCUITS_PLATFORM") + .ok() + .filter(|value| !value.is_empty()) + .unwrap_or_else(|| String::from("linux-x86_64")); + + cmd.arg("--build-arg") + .arg(format!("NOMOS_CIRCUITS_PLATFORM={circuits_platform}")); + + if let Some(value) = env::var("CIRCUITS_OVERRIDE") + .ok() + .filter(|val| !val.is_empty()) + { + cmd.arg("--build-arg") + .arg(format!("CIRCUITS_OVERRIDE={value}")); + } + + cmd.arg("-t") + .arg(image) + .arg("-f") + .arg(&dockerfile) + .arg(&repo_root); + + run_docker_command(cmd, "docker build compose image", IMAGE_BUILD_TIMEOUT).await +} + +async fn run_docker_command( + mut command: Command, + description: &str, + timeout_duration: Duration, +) -> Result<(), ComposeRunnerError> { + match timeout(timeout_duration, command.status()).await { + Ok(Ok(status)) if status.success() => Ok(()), + Ok(Ok(status)) => Err(ComposeRunnerError::Compose(ComposeCommandError::Failed { + command: description.to_owned(), + status, + })), + Ok(Err(source)) => Err(ComposeRunnerError::Compose(ComposeCommandError::Spawn { + command: description.to_owned(), + source, + })), + Err(_) => Err(ComposeRunnerError::Compose(ComposeCommandError::Timeout { + command: description.to_owned(), + timeout: timeout_duration, + })), + } +} + +fn detect_docker_platform() -> Result, ComposeRunnerError> { + let output = StdCommand::new("docker") + .arg("info") + .arg("-f") + .arg("{{.Architecture}}") + .output() + .map_err(|source| ComposeRunnerError::ImageBuild { + source: source.into(), + })?; + + if !output.status.success() { + return Ok(None); + } + + let arch = String::from_utf8_lossy(&output.stdout).trim().to_owned(); + if arch.is_empty() { + return Ok(None); + } + + Ok(Some(format!("linux/{arch}"))) +} + +fn select_build_platform(requested: Option<&str>) -> Result, ComposeRunnerError> { + if let Some(value) = requested { + return Ok(Some(value.to_owned())); + } + + detect_docker_platform()?.map_or_else( + || { + warn!("docker host architecture unavailable; letting docker choose default platform"); + Ok(None) + }, + |host_platform| Ok(Some(host_platform)), + ) +} + +async fn bring_up_stack_logged( + compose_path: &Path, + project_name: &str, + workspace_root: &Path, + cfgsync_handle: &mut CfgsyncServerHandle, +) -> Result<(), ComposeRunnerError> { + info!(project = %project_name, "bringing up docker compose stack"); + bring_up_stack(compose_path, project_name, workspace_root, cfgsync_handle).await +} + +struct StackEnvironment { + compose_path: PathBuf, + project_name: String, + root: PathBuf, + workspace: Option, + cfgsync_handle: Option, +} + +impl StackEnvironment { + fn from_workspace( + state: WorkspaceState, + compose_path: PathBuf, + project_name: String, + cfgsync_handle: Option, + ) -> Self { + let WorkspaceState { + workspace, root, .. + } = state; + + Self { + compose_path, + project_name, + root, + workspace: Some(workspace), + cfgsync_handle, + } + } + + fn compose_path(&self) -> &Path { + &self.compose_path + } + + fn project_name(&self) -> &str { + &self.project_name + } + + fn root(&self) -> &Path { + &self.root + } + + fn take_cleanup(&mut self) -> RunnerCleanup { + RunnerCleanup::new( + self.compose_path.clone(), + self.project_name.clone(), + self.root.clone(), + self.workspace + .take() + .expect("workspace must be available while cleaning up"), + self.cfgsync_handle.take(), + ) + } + + fn into_cleanup(self) -> RunnerCleanup { + RunnerCleanup::new( + self.compose_path, + self.project_name, + self.root, + self.workspace + .expect("workspace must be available while cleaning up"), + self.cfgsync_handle, + ) + } + + async fn fail(&mut self, reason: &str) { + error!( + reason = reason, + "compose stack failure; dumping docker logs" + ); + dump_compose_logs(self.compose_path(), self.project_name(), self.root()).await; + Box::new(self.take_cleanup()).cleanup(); + } +} + +struct ComposeCleanupGuard { + environment: RunnerCleanup, + block_feed: Option, +} + +impl ComposeCleanupGuard { + const fn new(environment: RunnerCleanup, block_feed: BlockFeedTask) -> Self { + Self { + environment, + block_feed: Some(block_feed), + } + } +} + +impl CleanupGuard for ComposeCleanupGuard { + fn cleanup(mut self: Box) { + if let Some(block_feed) = self.block_feed.take() { + CleanupGuard::cleanup(Box::new(block_feed)); + } + CleanupGuard::cleanup(Box::new(self.environment)); + } +} + +#[cfg(test)] +mod tests { + use std::{collections::HashMap, net::Ipv4Addr}; + + use cfgsync::config::{Host, create_node_configs}; + use groth16::Fr; + use nomos_core::{ + mantle::{GenesisTx as GenesisTxTrait, ledger::NoteId}, + sdp::{ProviderId, ServiceType}, + }; + use nomos_ledger::LedgerState; + use nomos_tracing_service::TracingSettings; + use testing_framework_core::{ + scenario::ScenarioBuilder, + topology::{ + GeneratedNodeConfig, GeneratedTopology, NodeRole as TopologyNodeRole, + configs::{consensus, da}, + }, + }; + use zksign::PublicKey; + + #[test] + fn cfgsync_prebuilt_configs_preserve_genesis() { + let scenario = ScenarioBuilder::with_node_counts(1, 1).build(); + let topology = scenario.topology().clone(); + let hosts = hosts_from_topology(&topology); + let tracing_settings = tracing_settings(&topology); + + let configs = create_node_configs( + &to_tests_consensus(&topology.config().consensus_params), + &to_tests_da(&topology.config().da_params), + &tracing_settings, + hosts, + ); + let configs_by_identifier: HashMap<_, _> = configs + .into_iter() + .map(|(host, config)| (host.identifier, config)) + .collect(); + + for node in topology.nodes() { + let identifier = identifier_for(node.role(), node.index()); + let cfgsync_config = configs_by_identifier + .get(&identifier) + .unwrap_or_else(|| panic!("missing cfgsync config for {identifier}")); + let expected_genesis = &node.general.consensus_config.genesis_tx; + let actual_genesis = &cfgsync_config.consensus_config.genesis_tx; + if std::env::var("PRINT_GENESIS").is_ok() { + println!( + "[fingerprint {identifier}] expected={:?}", + declaration_fingerprint(expected_genesis) + ); + println!( + "[fingerprint {identifier}] actual={:?}", + declaration_fingerprint(actual_genesis) + ); + } + assert_eq!( + expected_genesis.mantle_tx().ledger_tx, + actual_genesis.mantle_tx().ledger_tx, + "ledger tx mismatch for {identifier}" + ); + assert_eq!( + declaration_fingerprint(expected_genesis), + declaration_fingerprint(actual_genesis), + "declaration entries mismatch for {identifier}" + ); + } + } + + #[test] + fn cfgsync_genesis_proofs_verify_against_ledger() { + let scenario = ScenarioBuilder::with_node_counts(1, 1).build(); + let topology = scenario.topology().clone(); + let hosts = hosts_from_topology(&topology); + let tracing_settings = tracing_settings(&topology); + + let configs = create_node_configs( + &to_tests_consensus(&topology.config().consensus_params), + &to_tests_da(&topology.config().da_params), + &tracing_settings, + hosts, + ); + let configs_by_identifier: HashMap<_, _> = configs + .into_iter() + .map(|(host, config)| (host.identifier, config)) + .collect(); + + for node in topology.nodes() { + let identifier = identifier_for(node.role(), node.index()); + let cfgsync_config = configs_by_identifier + .get(&identifier) + .unwrap_or_else(|| panic!("missing cfgsync config for {identifier}")); + LedgerState::from_genesis_tx::<()>( + cfgsync_config.consensus_config.genesis_tx.clone(), + &cfgsync_config.consensus_config.ledger_config, + Fr::from(0u64), + ) + .unwrap_or_else(|err| panic!("ledger rejected genesis for {identifier}: {err:?}")); + } + } + + #[test] + fn cfgsync_docker_overrides_produce_valid_genesis() { + let scenario = ScenarioBuilder::with_node_counts(1, 1).build(); + let topology = scenario.topology().clone(); + let tracing_settings = tracing_settings(&topology); + let hosts = docker_style_hosts(&topology); + + let configs = create_node_configs( + &to_tests_consensus(&topology.config().consensus_params), + &to_tests_da(&topology.config().da_params), + &tracing_settings, + hosts, + ); + + for (host, config) in configs { + let genesis = &config.consensus_config.genesis_tx; + LedgerState::from_genesis_tx::<()>( + genesis.clone(), + &config.consensus_config.ledger_config, + Fr::from(0u64), + ) + .unwrap_or_else(|err| { + panic!("ledger rejected genesis for {}: {err:?}", host.identifier) + }); + } + } + + fn hosts_from_topology(topology: &GeneratedTopology) -> Vec { + topology.nodes().map(host_from_node).collect() + } + + fn docker_style_hosts(topology: &GeneratedTopology) -> Vec { + topology + .nodes() + .map(|node| docker_host(node, 10 + node.index() as u8)) + .collect() + } + + fn host_from_node(node: &GeneratedNodeConfig) -> Host { + let identifier = identifier_for(node.role(), node.index()); + let ip = Ipv4Addr::LOCALHOST; + let mut host = match node.role() { + TopologyNodeRole::Validator => Host::default_validator_from_ip(ip, identifier), + TopologyNodeRole::Executor => Host::default_executor_from_ip(ip, identifier), + }; + host.network_port = node.network_port(); + host.da_network_port = node.da_port; + host.blend_port = node.blend_port; + host + } + + fn docker_host(node: &GeneratedNodeConfig, octet: u8) -> Host { + let identifier = identifier_for(node.role(), node.index()); + let ip = Ipv4Addr::new(172, 23, 0, octet); + let mut host = match node.role() { + TopologyNodeRole::Validator => Host::default_validator_from_ip(ip, identifier), + TopologyNodeRole::Executor => Host::default_executor_from_ip(ip, identifier), + }; + host.network_port = node.network_port() + 1000; + host.da_network_port = node.da_port + 1000; + host.blend_port = node.blend_port + 1000; + host + } + + fn tracing_settings(topology: &GeneratedTopology) -> TracingSettings { + topology + .validators() + .first() + .or_else(|| topology.executors().first()) + .expect("topology must contain at least one node") + .general + .tracing_config + .tracing_settings + .clone() + } + + fn identifier_for(role: TopologyNodeRole, index: usize) -> String { + match role { + TopologyNodeRole::Validator => format!("validator-{index}"), + TopologyNodeRole::Executor => format!("executor-{index}"), + } + } + + fn to_tests_consensus( + params: &consensus::ConsensusParams, + ) -> tests::topology::configs::consensus::ConsensusParams { + tests::topology::configs::consensus::ConsensusParams { + n_participants: params.n_participants, + security_param: params.security_param, + active_slot_coeff: params.active_slot_coeff, + } + } + + fn to_tests_da(params: &da::DaParams) -> tests::topology::configs::da::DaParams { + tests::topology::configs::da::DaParams { + subnetwork_size: params.subnetwork_size, + dispersal_factor: params.dispersal_factor, + num_samples: params.num_samples, + num_subnets: params.num_subnets, + old_blobs_check_interval: params.old_blobs_check_interval, + blobs_validity_duration: params.blobs_validity_duration, + global_params_path: params.global_params_path.clone(), + policy_settings: params.policy_settings.clone(), + monitor_settings: params.monitor_settings.clone(), + balancer_interval: params.balancer_interval, + redial_cooldown: params.redial_cooldown, + replication_settings: params.replication_settings, + subnets_refresh_interval: params.subnets_refresh_interval, + retry_shares_limit: params.retry_shares_limit, + retry_commitments_limit: params.retry_commitments_limit, + } + } + + fn declaration_fingerprint(genesis: &G) -> Vec<(ServiceType, ProviderId, NoteId, PublicKey)> + where + G: GenesisTxTrait, + { + genesis + .sdp_declarations() + .map(|(op, _)| (op.service_type, op.provider_id, op.locked_note_id, op.zk_id)) + .collect() + } +} diff --git a/testing-framework/runners/compose/src/wait.rs b/testing-framework/runners/compose/src/wait.rs new file mode 100644 index 0000000..404c3af --- /dev/null +++ b/testing-framework/runners/compose/src/wait.rs @@ -0,0 +1,21 @@ +use std::time::Duration; + +use testing_framework_core::{ + adjust_timeout, + scenario::http_probe::{self, HttpReadinessError, NodeRole}, +}; + +const DEFAULT_WAIT: Duration = Duration::from_secs(90); +const POLL_INTERVAL: Duration = Duration::from_millis(250); + +pub async fn wait_for_validators(ports: &[u16]) -> Result<(), HttpReadinessError> { + wait_for_ports(ports, NodeRole::Validator).await +} + +pub async fn wait_for_executors(ports: &[u16]) -> Result<(), HttpReadinessError> { + wait_for_ports(ports, NodeRole::Executor).await +} + +async fn wait_for_ports(ports: &[u16], role: NodeRole) -> Result<(), HttpReadinessError> { + http_probe::wait_for_http_ports(ports, role, adjust_timeout(DEFAULT_WAIT), POLL_INTERVAL).await +} diff --git a/testing-framework/runners/compose/src/workspace.rs b/testing-framework/runners/compose/src/workspace.rs new file mode 100644 index 0000000..4a9d31a --- /dev/null +++ b/testing-framework/runners/compose/src/workspace.rs @@ -0,0 +1,89 @@ +use std::{ + env, fs, + path::{Path, PathBuf}, +}; + +use anyhow::{Context as _, Result}; +use tempfile::TempDir; + +/// Copy the repository `testnet/` directory into a scenario-specific temp dir. +#[derive(Debug)] +pub struct ComposeWorkspace { + root: TempDir, +} + +impl ComposeWorkspace { + /// Clone the testnet assets into a temporary directory. + pub fn create() -> Result { + let repo_root = env::var("CARGO_WORKSPACE_DIR") + .map(PathBuf::from) + .or_else(|_| { + Path::new(env!("CARGO_MANIFEST_DIR")) + .parent() + .and_then(Path::parent) + .and_then(Path::parent) + .map(Path::to_path_buf) + .context("resolving workspace root from manifest dir") + }) + .context("locating repository root")?; + let temp = tempfile::Builder::new() + .prefix("nomos-testnet-") + .tempdir() + .context("creating testnet temp dir")?; + let testnet_source = repo_root.join("testnet"); + if !testnet_source.exists() { + anyhow::bail!( + "testnet directory not found at {}", + testnet_source.display() + ); + } + copy_dir_recursive(&testnet_source, &temp.path().join("testnet"))?; + + let kzg_source = repo_root.join("tests/kzgrs/kzgrs_test_params"); + if kzg_source.exists() { + let target = temp.path().join("kzgrs_test_params"); + if kzg_source.is_dir() { + copy_dir_recursive(&kzg_source, &target)?; + } else { + fs::copy(&kzg_source, &target).with_context(|| { + format!("copying {} -> {}", kzg_source.display(), target.display()) + })?; + } + } + + Ok(Self { root: temp }) + } + + #[must_use] + pub fn root_path(&self) -> &Path { + self.root.path() + } + + #[must_use] + pub fn testnet_dir(&self) -> PathBuf { + self.root.path().join("testnet") + } + + #[must_use] + pub fn into_inner(self) -> TempDir { + self.root + } +} + +fn copy_dir_recursive(source: &Path, target: &Path) -> Result<()> { + fs::create_dir_all(target) + .with_context(|| format!("creating target dir {}", target.display()))?; + for entry in fs::read_dir(source).with_context(|| format!("reading {}", source.display()))? { + let entry = entry?; + let file_type = entry.file_type()?; + let dest = target.join(entry.file_name()); + if file_type.is_dir() { + copy_dir_recursive(&entry.path(), &dest)?; + } else if !file_type.is_dir() { + fs::copy(entry.path(), &dest).with_context(|| { + format!("copying {} -> {}", entry.path().display(), dest.display()) + })?; + } + } + Ok(()) +} diff --git a/testing-framework/runners/docker/runner.Dockerfile b/testing-framework/runners/docker/runner.Dockerfile new file mode 100644 index 0000000..9746375 --- /dev/null +++ b/testing-framework/runners/docker/runner.Dockerfile @@ -0,0 +1,74 @@ +# syntax=docker/dockerfile:1 + +ARG VERSION=v0.3.1 +ARG NOMOS_CIRCUITS_PLATFORM=linux-x86_64 + +# =========================== +# BUILD IMAGE +# =========================== + +FROM rust:1.91.0-slim-bookworm AS builder + +ARG VERSION +ARG NOMOS_CIRCUITS_PLATFORM +ARG TARGETARCH + +LABEL maintainer="logos devs" \ + source="https://github.com/logos-co/nomos-node" \ + description="Nomos testing framework build image" + +WORKDIR /nomos +COPY . . + +RUN apt-get update && apt-get install -yq \ + git gcc g++ clang libssl-dev pkg-config ca-certificates curl wget \ + build-essential cmake libgmp-dev libsodium-dev nasm m4 && \ + rm -rf /var/lib/apt/lists/* + +ENV NOMOS_CIRCUITS_PLATFORM=${NOMOS_CIRCUITS_PLATFORM} + +RUN chmod +x scripts/setup-nomos-circuits.sh && \ + scripts/setup-nomos-circuits.sh "$VERSION" "/opt/circuits" + +RUN if [ "${TARGETARCH:-amd64}" = "arm64" ]; then \ + chmod +x scripts/build-rapidsnark.sh && \ + scripts/build-rapidsnark.sh "/opt/circuits"; \ + fi + +ENV NOMOS_CIRCUITS=/opt/circuits + +# Use debug builds to keep the linker memory footprint low; we only need +# binaries for integration testing, not optimized releases. +RUN cargo build --all-features + +# =========================== +# NODE IMAGE +# =========================== + +FROM debian:bookworm-slim + +ARG VERSION + +LABEL maintainer="logos devs" \ + source="https://github.com/logos-co/nomos-node" \ + description="Nomos testing framework runtime image" + +RUN apt-get update && apt-get install -yq \ + libstdc++6 \ + libssl3 \ + ca-certificates \ + && rm -rf /var/lib/apt/lists/* + +COPY --from=builder /opt/circuits /opt/circuits + +COPY --from=builder /nomos/target/debug/nomos-node /usr/bin/nomos-node +COPY --from=builder /nomos/target/debug/nomos-executor /usr/bin/nomos-executor +COPY --from=builder /nomos/target/debug/nomos-cli /usr/bin/nomos-cli +COPY --from=builder /nomos/target/debug/cfgsync-server /usr/bin/cfgsync-server +COPY --from=builder /nomos/target/debug/cfgsync-client /usr/bin/cfgsync-client + +ENV NOMOS_CIRCUITS=/opt/circuits + +EXPOSE 3000 8080 9000 60000 + +ENTRYPOINT ["/usr/bin/nomos-node"] diff --git a/testing-framework/runners/k8s/Cargo.toml b/testing-framework/runners/k8s/Cargo.toml new file mode 100644 index 0000000..985bb64 --- /dev/null +++ b/testing-framework/runners/k8s/Cargo.toml @@ -0,0 +1,29 @@ +[package] +categories.workspace = true +description.workspace = true +edition.workspace = true +keywords.workspace = true +license.workspace = true +name = "testing-framework-runner-k8s" +readme.workspace = true +repository.workspace = true +version = "0.1.0" + +[lints] +workspace = true + +[dependencies] +anyhow = "1" +async-trait = { workspace = true } +k8s-openapi = { version = "0.20", features = ["latest"] } +kube = { version = "0.87", default-features = false, features = ["client", "runtime", "rustls-tls"] } +reqwest = { workspace = true, features = ["json"] } +serde = { version = "1", features = ["derive"] } +serde_yaml = { workspace = true } +tempfile = { workspace = true } +testing-framework-core = { path = "../../core" } +thiserror = { workspace = true } +tokio = { workspace = true, features = ["macros", "net", "process", "rt-multi-thread", "sync", "time"] } +tracing = { workspace = true } +url = { version = "2" } +uuid = { version = "1", features = ["v4"] } diff --git a/testing-framework/runners/k8s/helm/nomos-runner/Chart.yaml b/testing-framework/runners/k8s/helm/nomos-runner/Chart.yaml new file mode 100644 index 0000000..1785e7e --- /dev/null +++ b/testing-framework/runners/k8s/helm/nomos-runner/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: nomos-runner +description: Helm chart for Nomos integration test runner assets +type: application +version: 0.1.0 +appVersion: "0.1.0" diff --git a/testing-framework/runners/k8s/helm/nomos-runner/templates/_helpers.tpl b/testing-framework/runners/k8s/helm/nomos-runner/templates/_helpers.tpl new file mode 100644 index 0000000..1665098 --- /dev/null +++ b/testing-framework/runners/k8s/helm/nomos-runner/templates/_helpers.tpl @@ -0,0 +1,41 @@ +{{- define "nomos-runner.chart" -}} +{{- .Chart.Name -}} +{{- end -}} + +{{- define "nomos-runner.fullname" -}} +{{- printf "%s" .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "nomos-runner.labels" -}} +app.kubernetes.io/name: {{ include "nomos-runner.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{- define "nomos-runner.selectorLabels" -}} +app.kubernetes.io/name: {{ include "nomos-runner.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{- define "nomos-runner.validatorLabels" -}} +{{- $root := index . "root" -}} +{{- $index := index . "index" -}} +app.kubernetes.io/name: {{ include "nomos-runner.chart" $root }} +app.kubernetes.io/instance: {{ $root.Release.Name }} +nomos/logical-role: validator +nomos/validator-index: "{{ $index }}" +{{- end -}} + +{{- define "nomos-runner.executorLabels" -}} +{{- $root := index . "root" -}} +{{- $index := index . "index" -}} +app.kubernetes.io/name: {{ include "nomos-runner.chart" $root }} +app.kubernetes.io/instance: {{ $root.Release.Name }} +nomos/logical-role: executor +nomos/executor-index: "{{ $index }}" +{{- end -}} + +{{- define "nomos-runner.prometheusLabels" -}} +app.kubernetes.io/name: {{ include "nomos-runner.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +nomos/logical-role: prometheus +{{- end -}} diff --git a/testing-framework/runners/k8s/helm/nomos-runner/templates/cfgsync-deployment.yaml b/testing-framework/runners/k8s/helm/nomos-runner/templates/cfgsync-deployment.yaml new file mode 100644 index 0000000..bc497dd --- /dev/null +++ b/testing-framework/runners/k8s/helm/nomos-runner/templates/cfgsync-deployment.yaml @@ -0,0 +1,43 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "nomos-runner.fullname" . }}-cfgsync + labels: + {{- include "nomos-runner.labels" . | nindent 4 }} +spec: + replicas: 1 + selector: + matchLabels: + {{- include "nomos-runner.selectorLabels" . | nindent 6 }} + nomos/component: cfgsync + template: + metadata: + labels: + {{- include "nomos-runner.selectorLabels" . | nindent 8 }} + nomos/component: cfgsync + spec: + containers: + - name: cfgsync + image: {{ .Values.image }} + imagePullPolicy: {{ .Values.imagePullPolicy }} + command: ["/etc/nomos/scripts/run_cfgsync.sh"] + ports: + - name: http + containerPort: {{ .Values.cfgsync.port }} + env: + - name: RUST_LOG + value: debug + volumeMounts: + - name: assets + mountPath: /etc/nomos + readOnly: true + volumes: + - name: assets + configMap: + name: {{ include "nomos-runner.fullname" . }}-assets + defaultMode: 0755 + items: + - key: cfgsync.yaml + path: cfgsync.yaml + - key: run_cfgsync.sh + path: scripts/run_cfgsync.sh diff --git a/testing-framework/runners/k8s/helm/nomos-runner/templates/cfgsync-service.yaml b/testing-framework/runners/k8s/helm/nomos-runner/templates/cfgsync-service.yaml new file mode 100644 index 0000000..db09c16 --- /dev/null +++ b/testing-framework/runners/k8s/helm/nomos-runner/templates/cfgsync-service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "nomos-runner.fullname" . }}-cfgsync + labels: + {{- include "nomos-runner.labels" . | nindent 4 }} +spec: + type: ClusterIP + selector: + {{- include "nomos-runner.selectorLabels" . | nindent 4 }} + nomos/component: cfgsync + ports: + - name: http + port: {{ .Values.cfgsync.port }} + targetPort: http diff --git a/testing-framework/runners/k8s/helm/nomos-runner/templates/configmap.yaml b/testing-framework/runners/k8s/helm/nomos-runner/templates/configmap.yaml new file mode 100644 index 0000000..1803e4e --- /dev/null +++ b/testing-framework/runners/k8s/helm/nomos-runner/templates/configmap.yaml @@ -0,0 +1,31 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "nomos-runner.fullname" . }}-assets + labels: + {{- include "nomos-runner.labels" . | nindent 4 }} +data: + cfgsync.yaml: | +{{- if .Values.cfgsync.config }} +{{ .Values.cfgsync.config | indent 4 }} +{{- else }} +{{ "" | indent 4 }} +{{- end }} + run_cfgsync.sh: | +{{- if .Values.scripts.runCfgsyncSh }} +{{ .Values.scripts.runCfgsyncSh | indent 4 }} +{{- else }} +{{ "" | indent 4 }} +{{- end }} + run_nomos_node.sh: | +{{- if .Values.scripts.runNomosNodeSh }} +{{ .Values.scripts.runNomosNodeSh | indent 4 }} +{{- else }} +{{ "" | indent 4 }} +{{- end }} + run_nomos_executor.sh: | +{{- if .Values.scripts.runNomosExecutorSh }} +{{ .Values.scripts.runNomosExecutorSh | indent 4 }} +{{- else }} +{{ "" | indent 4 }} +{{- end }} diff --git a/testing-framework/runners/k8s/helm/nomos-runner/templates/executor-deployments.yaml b/testing-framework/runners/k8s/helm/nomos-runner/templates/executor-deployments.yaml new file mode 100644 index 0000000..0aa1f42 --- /dev/null +++ b/testing-framework/runners/k8s/helm/nomos-runner/templates/executor-deployments.yaml @@ -0,0 +1,63 @@ +{{- $root := . -}} +{{- $nodes := default (list) .Values.executors.nodes }} +{{- range $i, $node := $nodes }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "nomos-runner.fullname" $root }}-executor-{{ $i }} + labels: + {{- include "nomos-runner.executorLabels" (dict "root" $root "index" $i) | nindent 4 }} +spec: + replicas: 1 + selector: + matchLabels: + {{- include "nomos-runner.executorLabels" (dict "root" $root "index" $i) | nindent 6 }} + template: + metadata: + labels: + {{- include "nomos-runner.executorLabels" (dict "root" $root "index" $i) | nindent 8 }} + spec: + containers: + - name: executor + image: {{ $root.Values.image }} + imagePullPolicy: {{ $root.Values.imagePullPolicy }} + command: ["/etc/nomos/scripts/run_nomos_executor.sh"] + ports: + - name: http + containerPort: {{ default 18080 $node.apiPort }} + - name: testing-http + containerPort: {{ default 18081 $node.testingHttpPort }} + env: + - name: CFG_SERVER_ADDR + value: http://{{ include "nomos-runner.fullname" $root }}-cfgsync:{{ $root.Values.cfgsync.port }} + {{- range $key, $value := $node.env }} + - name: {{ $key }} + value: "{{ $value }}" + {{- end }} + volumeMounts: + - name: assets + mountPath: /etc/nomos + readOnly: true + - name: kzg-params + mountPath: /kzgrs_test_params + readOnly: true + volumes: + - name: assets + configMap: + name: {{ include "nomos-runner.fullname" $root }}-assets + defaultMode: 0755 + items: + - key: cfgsync.yaml + path: cfgsync.yaml + - key: run_cfgsync.sh + path: scripts/run_cfgsync.sh + - key: run_nomos_executor.sh + path: scripts/run_nomos_executor.sh + - key: run_nomos_node.sh + path: scripts/run_nomos_node.sh + - name: kzg-params + persistentVolumeClaim: + claimName: {{ include "nomos-runner.fullname" $root }}-kzg + readOnly: true +{{- end }} diff --git a/testing-framework/runners/k8s/helm/nomos-runner/templates/executor-services.yaml b/testing-framework/runners/k8s/helm/nomos-runner/templates/executor-services.yaml new file mode 100644 index 0000000..279a976 --- /dev/null +++ b/testing-framework/runners/k8s/helm/nomos-runner/templates/executor-services.yaml @@ -0,0 +1,22 @@ +{{- $root := . -}} +{{- $nodes := default (list) .Values.executors.nodes }} +{{- range $i, $node := $nodes }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ include "nomos-runner.fullname" $root }}-executor-{{ $i }} + labels: + {{- include "nomos-runner.executorLabels" (dict "root" $root "index" $i) | nindent 4 }} +spec: + type: NodePort + selector: + {{- include "nomos-runner.executorLabels" (dict "root" $root "index" $i) | nindent 4 }} + ports: + - name: http + port: {{ default 18080 $node.apiPort }} + targetPort: http + - name: testing-http + port: {{ default 18081 $node.testingHttpPort }} + targetPort: testing-http +{{- end }} diff --git a/testing-framework/runners/k8s/helm/nomos-runner/templates/prometheus-configmap.yaml b/testing-framework/runners/k8s/helm/nomos-runner/templates/prometheus-configmap.yaml new file mode 100644 index 0000000..7eaa16a --- /dev/null +++ b/testing-framework/runners/k8s/helm/nomos-runner/templates/prometheus-configmap.yaml @@ -0,0 +1,16 @@ +{{- if .Values.prometheus.enabled }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: prometheus + labels: + {{- include "nomos-runner.prometheusLabels" . | nindent 4 }} +data: + prometheus.yml: | +{{- if .Values.prometheus.config }} +{{ .Values.prometheus.config | indent 4 }} +{{- else }} +{{ "" | indent 4 }} +{{- end }} +{{- end }} diff --git a/testing-framework/runners/k8s/helm/nomos-runner/templates/prometheus-deployment.yaml b/testing-framework/runners/k8s/helm/nomos-runner/templates/prometheus-deployment.yaml new file mode 100644 index 0000000..4cda1c1 --- /dev/null +++ b/testing-framework/runners/k8s/helm/nomos-runner/templates/prometheus-deployment.yaml @@ -0,0 +1,38 @@ +{{- if .Values.prometheus.enabled }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: prometheus + labels: + {{- include "nomos-runner.prometheusLabels" . | nindent 4 }} +spec: + replicas: 1 + selector: + matchLabels: + {{- include "nomos-runner.prometheusLabels" . | nindent 6 }} + template: + metadata: + labels: + {{- include "nomos-runner.prometheusLabels" . | nindent 8 }} + spec: + containers: + - name: prometheus + image: {{ .Values.prometheus.image }} + imagePullPolicy: {{ .Values.prometheus.imagePullPolicy | default "IfNotPresent" }} + args: + - --config.file=/etc/prometheus/prometheus.yml + - --storage.tsdb.retention.time={{ .Values.prometheus.retention }} + - --web.enable-otlp-receiver + - --enable-feature=otlp-write-receiver + ports: + - containerPort: 9090 + name: http + volumeMounts: + - name: prometheus-config + mountPath: /etc/prometheus + volumes: + - name: prometheus-config + configMap: + name: prometheus +{{- end }} diff --git a/testing-framework/runners/k8s/helm/nomos-runner/templates/prometheus-service.yaml b/testing-framework/runners/k8s/helm/nomos-runner/templates/prometheus-service.yaml new file mode 100644 index 0000000..c0d90e2 --- /dev/null +++ b/testing-framework/runners/k8s/helm/nomos-runner/templates/prometheus-service.yaml @@ -0,0 +1,20 @@ +{{- if .Values.prometheus.enabled }} +--- +apiVersion: v1 +kind: Service +metadata: + name: prometheus + labels: + {{- include "nomos-runner.prometheusLabels" . | nindent 4 }} +spec: + type: {{ .Values.prometheus.service.type | default "NodePort" }} + selector: + {{- include "nomos-runner.prometheusLabels" . | nindent 4 }} + ports: + - name: http + port: 9090 + targetPort: http + {{- if and (eq (default "NodePort" .Values.prometheus.service.type) "NodePort") .Values.prometheus.service.nodePort }} + nodePort: {{ .Values.prometheus.service.nodePort }} + {{- end }} +{{- end }} diff --git a/testing-framework/runners/k8s/helm/nomos-runner/templates/pv.yaml b/testing-framework/runners/k8s/helm/nomos-runner/templates/pv.yaml new file mode 100644 index 0000000..3af3b2e --- /dev/null +++ b/testing-framework/runners/k8s/helm/nomos-runner/templates/pv.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: {{ include "nomos-runner.fullname" . }}-kzg + labels: + {{- include "nomos-runner.labels" . | nindent 4 }} +spec: + capacity: + storage: {{ .Values.kzg.storageSize }} + accessModes: + - ReadOnlyMany + persistentVolumeReclaimPolicy: Delete + storageClassName: manual + hostPath: + path: {{ .Values.kzg.hostPath }} + type: {{ .Values.kzg.hostPathType }} diff --git a/testing-framework/runners/k8s/helm/nomos-runner/templates/pvc.yaml b/testing-framework/runners/k8s/helm/nomos-runner/templates/pvc.yaml new file mode 100644 index 0000000..52248fe --- /dev/null +++ b/testing-framework/runners/k8s/helm/nomos-runner/templates/pvc.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ include "nomos-runner.fullname" . }}-kzg + labels: + {{- include "nomos-runner.labels" . | nindent 4 }} +spec: + accessModes: + - ReadOnlyMany + storageClassName: manual + volumeName: {{ include "nomos-runner.fullname" . }}-kzg + resources: + requests: + storage: {{ .Values.kzg.storageSize }} diff --git a/testing-framework/runners/k8s/helm/nomos-runner/templates/validator-deployments.yaml b/testing-framework/runners/k8s/helm/nomos-runner/templates/validator-deployments.yaml new file mode 100644 index 0000000..d807365 --- /dev/null +++ b/testing-framework/runners/k8s/helm/nomos-runner/templates/validator-deployments.yaml @@ -0,0 +1,61 @@ +{{- $root := . -}} +{{- $nodes := default (list) .Values.validators.nodes }} +{{- range $i, $node := $nodes }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "nomos-runner.fullname" $root }}-validator-{{ $i }} + labels: + {{- include "nomos-runner.validatorLabels" (dict "root" $root "index" $i) | nindent 4 }} +spec: + replicas: 1 + selector: + matchLabels: + {{- include "nomos-runner.validatorLabels" (dict "root" $root "index" $i) | nindent 6 }} + template: + metadata: + labels: + {{- include "nomos-runner.validatorLabels" (dict "root" $root "index" $i) | nindent 8 }} + spec: + containers: + - name: validator + image: {{ $root.Values.image }} + imagePullPolicy: {{ $root.Values.imagePullPolicy }} + command: ["/etc/nomos/scripts/run_nomos_node.sh"] + ports: + - name: http + containerPort: {{ default 18080 $node.apiPort }} + - name: testing-http + containerPort: {{ default 18081 $node.testingHttpPort }} + env: + - name: CFG_SERVER_ADDR + value: http://{{ include "nomos-runner.fullname" $root }}-cfgsync:{{ $root.Values.cfgsync.port }} + {{- range $key, $value := $node.env }} + - name: {{ $key }} + value: "{{ $value }}" + {{- end }} + volumeMounts: + - name: assets + mountPath: /etc/nomos + readOnly: true + - name: kzg-params + mountPath: /kzgrs_test_params + readOnly: true + volumes: + - name: assets + configMap: + name: {{ include "nomos-runner.fullname" $root }}-assets + defaultMode: 0755 + items: + - key: cfgsync.yaml + path: cfgsync.yaml + - key: run_cfgsync.sh + path: scripts/run_cfgsync.sh + - key: run_nomos_node.sh + path: scripts/run_nomos_node.sh + - name: kzg-params + persistentVolumeClaim: + claimName: {{ include "nomos-runner.fullname" $root }}-kzg + readOnly: true +{{- end }} diff --git a/testing-framework/runners/k8s/helm/nomos-runner/templates/validator-services.yaml b/testing-framework/runners/k8s/helm/nomos-runner/templates/validator-services.yaml new file mode 100644 index 0000000..ff94e2e --- /dev/null +++ b/testing-framework/runners/k8s/helm/nomos-runner/templates/validator-services.yaml @@ -0,0 +1,22 @@ +{{- $root := . -}} +{{- $nodes := default (list) .Values.validators.nodes }} +{{- range $i, $node := $nodes }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ include "nomos-runner.fullname" $root }}-validator-{{ $i }} + labels: + {{- include "nomos-runner.validatorLabels" (dict "root" $root "index" $i) | nindent 4 }} +spec: + type: NodePort + selector: + {{- include "nomos-runner.validatorLabels" (dict "root" $root "index" $i) | nindent 4 }} + ports: + - name: http + port: {{ default 18080 $node.apiPort }} + targetPort: http + - name: testing-http + port: {{ default 18081 $node.testingHttpPort }} + targetPort: testing-http +{{- end }} diff --git a/testing-framework/runners/k8s/helm/nomos-runner/values.yaml b/testing-framework/runners/k8s/helm/nomos-runner/values.yaml new file mode 100644 index 0000000..bc72438 --- /dev/null +++ b/testing-framework/runners/k8s/helm/nomos-runner/values.yaml @@ -0,0 +1,38 @@ +image: "nomos-testnet:local" +imagePullPolicy: IfNotPresent + +cfgsync: + port: 4400 + config: "" + +scripts: + runCfgsyncSh: "" + runNomosNodeSh: "" + runNomosExecutorSh: "" + +validators: + count: 1 + nodes: [] + +executors: + count: 1 + nodes: [] + +kzg: + hostPath: "/var/lib/nomos/kzgrs_test_params" + hostPathType: "Directory" + storageSize: "1Gi" + +prometheus: + enabled: true + image: "prom/prometheus:v3.0.1" + imagePullPolicy: IfNotPresent + retention: "7d" + service: + type: NodePort + nodePort: null + config: | + global: + evaluation_interval: 15s + external_labels: + monitor: "NomosRunner" diff --git a/testing-framework/runners/k8s/src/assets.rs b/testing-framework/runners/k8s/src/assets.rs new file mode 100644 index 0000000..7117a8c --- /dev/null +++ b/testing-framework/runners/k8s/src/assets.rs @@ -0,0 +1,280 @@ +use std::{ + collections::BTreeMap, + env, fs, io, + path::{Path, PathBuf}, +}; + +use anyhow::{Context as _, Result as AnyResult}; +use serde::Serialize; +use tempfile::TempDir; +use testing_framework_core::{ + scenario::cfgsync::{apply_topology_overrides, load_cfgsync_template, render_cfgsync_yaml}, + topology::GeneratedTopology, +}; +use thiserror::Error; + +pub struct RunnerAssets { + pub image: String, + pub kzg_path: PathBuf, + pub chart_path: PathBuf, + pub cfgsync_file: PathBuf, + pub run_cfgsync_script: PathBuf, + pub run_nomos_node_script: PathBuf, + pub run_nomos_executor_script: PathBuf, + pub values_file: PathBuf, + _tempdir: TempDir, +} + +pub const CFGSYNC_PORT: u16 = 4400; + +#[derive(Debug, Error)] +pub enum AssetsError { + #[error("failed to locate workspace root: {source}")] + WorkspaceRoot { + #[source] + source: anyhow::Error, + }, + #[error("failed to render cfgsync configuration: {source}")] + Cfgsync { + #[source] + source: anyhow::Error, + }, + #[error("missing required script at {path}")] + MissingScript { path: PathBuf }, + #[error("missing KZG parameters at {path}; build them with `make kzgrs_test_params`")] + MissingKzg { path: PathBuf }, + #[error("missing Helm chart at {path}; ensure the repository is up-to-date")] + MissingChart { path: PathBuf }, + #[error("failed to create temporary directory for rendered assets: {source}")] + TempDir { + #[source] + source: io::Error, + }, + #[error("failed to write asset at {path}: {source}")] + Io { + path: PathBuf, + #[source] + source: io::Error, + }, + #[error("failed to render Helm values: {source}")] + Values { + #[source] + source: serde_yaml::Error, + }, +} + +pub fn prepare_assets(topology: &GeneratedTopology) -> Result { + let root = workspace_root().map_err(|source| AssetsError::WorkspaceRoot { source })?; + let cfgsync_yaml = render_cfgsync_config(&root, topology)?; + + let tempdir = tempfile::Builder::new() + .prefix("nomos-helm-") + .tempdir() + .map_err(|source| AssetsError::TempDir { source })?; + + let cfgsync_file = write_temp_file(tempdir.path(), "cfgsync.yaml", cfgsync_yaml)?; + let scripts = validate_scripts(&root)?; + let kzg_path = validate_kzg_params(&root)?; + let chart_path = helm_chart_path()?; + let values_yaml = render_values_yaml(topology)?; + let values_file = write_temp_file(tempdir.path(), "values.yaml", values_yaml)?; + let image = + env::var("NOMOS_TESTNET_IMAGE").unwrap_or_else(|_| String::from("nomos-testnet:local")); + + Ok(RunnerAssets { + image, + kzg_path, + chart_path, + cfgsync_file, + run_cfgsync_script: scripts.run_cfgsync, + run_nomos_node_script: scripts.run_node, + run_nomos_executor_script: scripts.run_executor, + values_file, + _tempdir: tempdir, + }) +} + +const CFGSYNC_K8S_TIMEOUT_SECS: u64 = 300; + +fn render_cfgsync_config(root: &Path, topology: &GeneratedTopology) -> Result { + let cfgsync_template_path = root.join("testnet/cfgsync.yaml"); + let mut cfg = load_cfgsync_template(&cfgsync_template_path) + .map_err(|source| AssetsError::Cfgsync { source })?; + apply_topology_overrides(&mut cfg, topology, true); + cfg.timeout = cfg.timeout.max(CFGSYNC_K8S_TIMEOUT_SECS); + render_cfgsync_yaml(&cfg).map_err(|source| AssetsError::Cfgsync { source }) +} + +struct ScriptPaths { + run_cfgsync: PathBuf, + run_node: PathBuf, + run_executor: PathBuf, +} + +fn validate_scripts(root: &Path) -> Result { + let scripts_dir = root.join("testnet/scripts"); + let run_cfgsync = scripts_dir.join("run_cfgsync.sh"); + let run_node = scripts_dir.join("run_nomos_node.sh"); + let run_executor = scripts_dir.join("run_nomos_executor.sh"); + + for path in [&run_cfgsync, &run_node, &run_executor] { + if !path.exists() { + return Err(AssetsError::MissingScript { path: path.clone() }); + } + } + + Ok(ScriptPaths { + run_cfgsync, + run_node, + run_executor, + }) +} + +fn validate_kzg_params(root: &Path) -> Result { + let path = root.join("tests/kzgrs/kzgrs_test_params"); + if path.exists() { + Ok(path) + } else { + Err(AssetsError::MissingKzg { path }) + } +} + +fn helm_chart_path() -> Result { + let path = Path::new(env!("CARGO_MANIFEST_DIR")).join("helm/nomos-runner"); + if path.exists() { + Ok(path) + } else { + Err(AssetsError::MissingChart { path }) + } +} + +fn render_values_yaml(topology: &GeneratedTopology) -> Result { + let values = build_values(topology); + serde_yaml::to_string(&values).map_err(|source| AssetsError::Values { source }) +} + +fn write_temp_file( + dir: &Path, + name: &str, + contents: impl AsRef<[u8]>, +) -> Result { + let path = dir.join(name); + fs::write(&path, contents).map_err(|source| AssetsError::Io { + path: path.clone(), + source, + })?; + Ok(path) +} + +pub fn workspace_root() -> AnyResult { + if let Ok(var) = env::var("CARGO_WORKSPACE_DIR") { + return Ok(PathBuf::from(var)); + } + let manifest_dir = Path::new(env!("CARGO_MANIFEST_DIR")); + manifest_dir + .parent() + .and_then(Path::parent) + .and_then(Path::parent) + .map(Path::to_path_buf) + .context("resolving workspace root from manifest dir") +} + +#[derive(Serialize)] +struct HelmValues { + validators: NodeGroup, + executors: NodeGroup, +} + +#[derive(Serialize)] +struct NodeGroup { + count: usize, + nodes: Vec, +} + +#[derive(Serialize)] +struct NodeValues { + #[serde(rename = "apiPort")] + api_port: u16, + #[serde(rename = "testingHttpPort")] + testing_http_port: u16, + env: BTreeMap, +} + +fn build_values(topology: &GeneratedTopology) -> HelmValues { + let validators = topology + .validators() + .iter() + .map(|validator| { + let mut env = BTreeMap::new(); + env.insert( + "CFG_NETWORK_PORT".into(), + validator.network_port().to_string(), + ); + env.insert("CFG_DA_PORT".into(), validator.da_port.to_string()); + env.insert("CFG_BLEND_PORT".into(), validator.blend_port.to_string()); + env.insert( + "CFG_API_PORT".into(), + validator.general.api_config.address.port().to_string(), + ); + env.insert( + "CFG_TESTING_HTTP_PORT".into(), + validator + .general + .api_config + .testing_http_address + .port() + .to_string(), + ); + + NodeValues { + api_port: validator.general.api_config.address.port(), + testing_http_port: validator.general.api_config.testing_http_address.port(), + env, + } + }) + .collect(); + + let executors = topology + .executors() + .iter() + .map(|executor| { + let mut env = BTreeMap::new(); + env.insert( + "CFG_NETWORK_PORT".into(), + executor.network_port().to_string(), + ); + env.insert("CFG_DA_PORT".into(), executor.da_port.to_string()); + env.insert("CFG_BLEND_PORT".into(), executor.blend_port.to_string()); + env.insert( + "CFG_API_PORT".into(), + executor.general.api_config.address.port().to_string(), + ); + env.insert( + "CFG_TESTING_HTTP_PORT".into(), + executor + .general + .api_config + .testing_http_address + .port() + .to_string(), + ); + + NodeValues { + api_port: executor.general.api_config.address.port(), + testing_http_port: executor.general.api_config.testing_http_address.port(), + env, + } + }) + .collect(); + + HelmValues { + validators: NodeGroup { + count: topology.validators().len(), + nodes: validators, + }, + executors: NodeGroup { + count: topology.executors().len(), + nodes: executors, + }, + } +} diff --git a/testing-framework/runners/k8s/src/cleanup.rs b/testing-framework/runners/k8s/src/cleanup.rs new file mode 100644 index 0000000..1906b37 --- /dev/null +++ b/testing-framework/runners/k8s/src/cleanup.rs @@ -0,0 +1,219 @@ +use std::thread; + +use k8s_openapi::api::core::v1::Namespace; +use kube::{Api, Client, api::DeleteParams}; +use testing_framework_core::scenario::CleanupGuard; +use tokio::{ + process::Command, + time::{Duration, sleep}, +}; +use tracing::warn; + +use crate::helm::uninstall_release; + +pub struct RunnerCleanup { + client: Client, + namespace: String, + release: String, + preserve: bool, +} + +impl RunnerCleanup { + pub fn new(client: Client, namespace: String, release: String, preserve: bool) -> Self { + debug_assert!( + !namespace.is_empty() && !release.is_empty(), + "k8s cleanup requires namespace and release" + ); + Self { + client, + namespace, + release, + preserve, + } + } + + async fn cleanup_async(&self) { + if self.preserve { + println!( + "[k8s-runner] preserving Helm release `{}` in namespace `{}`", + self.release, self.namespace + ); + + return; + } + + if let Err(err) = uninstall_release(&self.release, &self.namespace).await { + println!("[k8s-runner] helm uninstall {} failed: {err}", self.release); + } + + println!( + "[k8s-runner] deleting namespace `{}` via k8s API", + self.namespace + ); + delete_namespace(&self.client, &self.namespace).await; + println!( + "[k8s-runner] delete request for namespace `{}` finished", + self.namespace + ); + } + + fn blocking_cleanup_success(&self) -> bool { + match tokio::runtime::Runtime::new() { + Ok(rt) => match rt.block_on(async { + tokio::time::timeout(Duration::from_secs(120), self.cleanup_async()).await + }) { + Ok(()) => true, + Err(err) => { + warn!( + "[k8s-runner] cleanup timed out after 120s: {err}; falling back to background thread" + ); + false + } + }, + Err(err) => { + warn!( + "[k8s-runner] unable to create cleanup runtime: {err}; falling back to background thread" + ); + false + } + } + } + + fn spawn_cleanup_thread(self: Box) { + match thread::Builder::new() + .name("k8s-runner-cleanup".into()) + .spawn(move || match tokio::runtime::Runtime::new() { + Ok(rt) => { + if let Err(err) = rt.block_on(async { + tokio::time::timeout(Duration::from_secs(120), self.cleanup_async()).await + }) { + warn!("[k8s-runner] background cleanup timed out: {err}"); + } + } + Err(err) => warn!("[k8s-runner] unable to create cleanup runtime: {err}"), + }) { + Ok(handle) => { + if let Err(err) = handle.join() { + warn!("[k8s-runner] cleanup thread panicked: {err:?}"); + } + } + Err(err) => warn!("[k8s-runner] failed to spawn cleanup thread: {err}"), + } + } +} + +async fn delete_namespace(client: &Client, namespace: &str) { + let namespaces: Api = Api::all(client.clone()); + + if delete_namespace_via_api(&namespaces, namespace).await { + wait_for_namespace_termination(&namespaces, namespace).await; + return; + } + + if delete_namespace_via_cli(namespace).await { + wait_for_namespace_termination(&namespaces, namespace).await; + } else { + warn!("[k8s-runner] unable to delete namespace `{namespace}` using kubectl fallback"); + } +} + +async fn delete_namespace_via_api(namespaces: &Api, namespace: &str) -> bool { + println!("[k8s-runner] invoking kubernetes API to delete namespace `{namespace}`"); + match tokio::time::timeout( + Duration::from_secs(10), + namespaces.delete(namespace, &DeleteParams::default()), + ) + .await + { + Ok(Ok(_)) => { + println!( + "[k8s-runner] delete request accepted for namespace `{namespace}`; waiting for termination" + ); + true + } + Ok(Err(err)) => { + println!("[k8s-runner] failed to delete namespace `{namespace}` via API: {err}"); + warn!("[k8s-runner] api delete failed for namespace {namespace}: {err}"); + false + } + Err(_) => { + println!( + "[k8s-runner] kubernetes API timed out deleting namespace `{namespace}`; falling back to kubectl" + ); + false + } + } +} + +async fn delete_namespace_via_cli(namespace: &str) -> bool { + println!("[k8s-runner] invoking `kubectl delete namespace {namespace}` fallback"); + let output = Command::new("kubectl") + .arg("delete") + .arg("namespace") + .arg(namespace) + .arg("--wait=true") + .output() + .await; + + match output { + Ok(result) if result.status.success() => { + println!("[k8s-runner] `kubectl delete namespace {namespace}` completed successfully"); + true + } + Ok(result) => { + println!( + "[k8s-runner] `kubectl delete namespace {namespace}` failed: {}\n{}", + String::from_utf8_lossy(&result.stderr), + String::from_utf8_lossy(&result.stdout) + ); + false + } + Err(err) => { + println!("[k8s-runner] failed to spawn kubectl for namespace `{namespace}`: {err}"); + false + } + } +} + +async fn wait_for_namespace_termination(namespaces: &Api, namespace: &str) { + for attempt in 0..60 { + match namespaces.get_opt(namespace).await { + Ok(Some(ns)) => { + if attempt == 0 { + println!( + "[k8s-runner] waiting for namespace `{}` to terminate (phase={:?})", + namespace, + ns.status + .as_ref() + .and_then(|status| status.phase.clone()) + .unwrap_or_else(|| "Unknown".into()) + ); + } + } + Ok(None) => { + println!("[k8s-runner] namespace `{namespace}` deleted"); + return; + } + Err(err) => { + warn!("[k8s-runner] namespace `{namespace}` poll failed: {err}"); + break; + } + } + + sleep(Duration::from_secs(1)).await; + } + + warn!( + "[k8s-runner] namespace `{}` still present after waiting for deletion", + namespace + ); +} + +impl CleanupGuard for RunnerCleanup { + fn cleanup(self: Box) { + if tokio::runtime::Handle::try_current().is_err() && self.blocking_cleanup_success() { + return; + } + self.spawn_cleanup_thread(); + } +} diff --git a/testing-framework/runners/k8s/src/helm.rs b/testing-framework/runners/k8s/src/helm.rs new file mode 100644 index 0000000..a1a23a7 --- /dev/null +++ b/testing-framework/runners/k8s/src/helm.rs @@ -0,0 +1,141 @@ +use std::{io, process::Stdio}; + +use thiserror::Error; +use tokio::process::Command; + +use crate::assets::{CFGSYNC_PORT, RunnerAssets, workspace_root}; + +#[derive(Debug, Error)] +pub enum HelmError { + #[error("failed to spawn {command}: {source}")] + Spawn { + command: String, + #[source] + source: io::Error, + }, + #[error("{command} exited with status {status:?}\nstderr:\n{stderr}\nstdout:\n{stdout}")] + Failed { + command: String, + status: Option, + stdout: String, + stderr: String, + }, +} + +pub async fn install_release( + assets: &RunnerAssets, + release: &str, + namespace: &str, + validators: usize, + executors: usize, +) -> Result<(), HelmError> { + let host_path_type = if assets.kzg_path.is_dir() { + "Directory" + } else { + "File" + }; + + let mut cmd = Command::new("helm"); + cmd.arg("install") + .arg(release) + .arg(&assets.chart_path) + .arg("--namespace") + .arg(namespace) + .arg("--create-namespace") + .arg("--wait") + .arg("--timeout") + .arg("5m") + .arg("--set") + .arg(format!("image={}", assets.image)) + .arg("--set") + .arg(format!("validators.count={validators}")) + .arg("--set") + .arg(format!("executors.count={executors}")) + .arg("--set") + .arg(format!("cfgsync.port={CFGSYNC_PORT}")) + .arg("--set") + .arg(format!("kzg.hostPath={}", assets.kzg_path.display())) + .arg("--set") + .arg(format!("kzg.hostPathType={host_path_type}")) + .arg("-f") + .arg(&assets.values_file) + .arg("--set-file") + .arg(format!("cfgsync.config={}", assets.cfgsync_file.display())) + .arg("--set-file") + .arg(format!( + "scripts.runCfgsyncSh={}", + assets.run_cfgsync_script.display() + )) + .arg("--set-file") + .arg(format!( + "scripts.runNomosNodeSh={}", + assets.run_nomos_node_script.display() + )) + .arg("--set-file") + .arg(format!( + "scripts.runNomosExecutorSh={}", + assets.run_nomos_executor_script.display() + )) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()); + + if let Ok(root) = workspace_root() { + cmd.current_dir(root); + } + + let command = format!("helm install {release}"); + let output = run_helm_command(cmd, &command).await?; + + if std::env::var("K8S_RUNNER_DEBUG").is_ok() { + println!( + "[k8s-runner] {command} stdout:\n{}", + String::from_utf8_lossy(&output.stdout) + ); + println!( + "[k8s-runner] {command} stderr:\n{}", + String::from_utf8_lossy(&output.stderr) + ); + } + + Ok(()) +} + +pub async fn uninstall_release(release: &str, namespace: &str) -> Result<(), HelmError> { + let mut cmd = Command::new("helm"); + cmd.arg("uninstall") + .arg(release) + .arg("--namespace") + .arg(namespace) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()); + + println!("[k8s-runner] issuing `helm uninstall {release}` in namespace `{namespace}`"); + + run_helm_command(cmd, &format!("helm uninstall {release}")).await?; + println!( + "[k8s-runner] helm uninstall {release} completed successfully (namespace `{namespace}`)" + ); + Ok(()) +} + +async fn run_helm_command( + mut cmd: Command, + command: &str, +) -> Result { + cmd.stdout(Stdio::piped()).stderr(Stdio::piped()); + let output = cmd.output().await.map_err(|source| HelmError::Spawn { + command: command.to_owned(), + source, + })?; + + if output.status.success() { + Ok(output) + } else { + Err(HelmError::Failed { + command: command.to_owned(), + status: output.status.code(), + stdout: String::from_utf8_lossy(&output.stdout).into_owned(), + stderr: String::from_utf8_lossy(&output.stderr).into_owned(), + }) + } +} diff --git a/testing-framework/runners/k8s/src/host.rs b/testing-framework/runners/k8s/src/host.rs new file mode 100644 index 0000000..6bafd52 --- /dev/null +++ b/testing-framework/runners/k8s/src/host.rs @@ -0,0 +1,20 @@ +use std::env; + +const NODE_HOST_ENV: &str = "K8S_RUNNER_NODE_HOST"; +const KUBE_SERVICE_HOST_ENV: &str = "KUBERNETES_SERVICE_HOST"; + +/// Returns the hostname or IP used to reach `NodePorts` exposed by the cluster. +/// Prefers `K8S_RUNNER_NODE_HOST`, then the standard `KUBERNETES_SERVICE_HOST` +/// (e.g. `kubernetes.docker.internal` on Docker Desktop), and finally falls +/// back to `127.0.0.1`. +pub fn node_host() -> String { + if let Ok(host) = env::var(NODE_HOST_ENV) { + return host; + } + if let Ok(host) = env::var(KUBE_SERVICE_HOST_ENV) + && !host.is_empty() + { + return host; + } + "127.0.0.1".to_owned() +} diff --git a/testing-framework/runners/k8s/src/lib.rs b/testing-framework/runners/k8s/src/lib.rs new file mode 100644 index 0000000..a1da317 --- /dev/null +++ b/testing-framework/runners/k8s/src/lib.rs @@ -0,0 +1,9 @@ +mod assets; +mod cleanup; +mod helm; +mod host; +mod logs; +mod runner; +mod wait; + +pub use runner::{K8sRunner, K8sRunnerError}; diff --git a/testing-framework/runners/k8s/src/logs.rs b/testing-framework/runners/k8s/src/logs.rs new file mode 100644 index 0000000..eea910a --- /dev/null +++ b/testing-framework/runners/k8s/src/logs.rs @@ -0,0 +1,44 @@ +use k8s_openapi::api::core::v1::Pod; +use kube::{ + Api, Client, + api::{ListParams, LogParams}, +}; +use tracing::{info, warn}; + +pub async fn dump_namespace_logs(client: &Client, namespace: &str) { + let pod_names = match list_pod_names(client, namespace).await { + Ok(names) => names, + Err(err) => { + warn!("[k8s-runner] failed to list pods in namespace {namespace}: {err}"); + return; + } + }; + + for pod_name in pod_names { + stream_pod_logs(client, namespace, &pod_name).await; + } +} + +async fn list_pod_names(client: &Client, namespace: &str) -> Result, kube::Error> { + let list = Api::::namespaced(client.clone(), namespace) + .list(&ListParams::default()) + .await?; + Ok(list + .into_iter() + .filter_map(|pod| pod.metadata.name) + .collect()) +} + +async fn stream_pod_logs(client: &Client, namespace: &str, pod_name: &str) { + let pods: Api = Api::namespaced(client.clone(), namespace); + let params = LogParams { + follow: false, + tail_lines: Some(500), + ..Default::default() + }; + + match pods.logs(pod_name, ¶ms).await { + Ok(log) => info!("[k8s-runner] pod {pod_name} logs:\n{log}"), + Err(err) => warn!("[k8s-runner] failed to fetch logs for pod {pod_name}: {err}"), + } +} diff --git a/testing-framework/runners/k8s/src/runner.rs b/testing-framework/runners/k8s/src/runner.rs new file mode 100644 index 0000000..c917d7f --- /dev/null +++ b/testing-framework/runners/k8s/src/runner.rs @@ -0,0 +1,519 @@ +use std::env; + +use anyhow::Error; +use async_trait::async_trait; +use kube::Client; +use reqwest::Url; +use testing_framework_core::{ + nodes::ApiClient, + scenario::{ + BlockFeed, BlockFeedTask, CleanupGuard, Deployer, Metrics, MetricsError, NodeClients, + RunContext, Runner, Scenario, http_probe::NodeRole, spawn_block_feed, + }, + topology::{GeneratedTopology, ReadinessError}, +}; +use tracing::{error, info}; +use url::ParseError; +use uuid::Uuid; + +use crate::{ + assets::{AssetsError, RunnerAssets, prepare_assets}, + cleanup::RunnerCleanup, + helm::{HelmError, install_release}, + host::node_host, + logs::dump_namespace_logs, + wait::{ClusterPorts, ClusterWaitError, NodeConfigPorts, wait_for_cluster_ready}, +}; + +pub struct K8sRunner { + readiness_checks: bool, +} + +impl Default for K8sRunner { + fn default() -> Self { + Self::new() + } +} + +impl K8sRunner { + #[must_use] + pub const fn new() -> Self { + Self { + readiness_checks: true, + } + } + + #[must_use] + pub const fn with_readiness(mut self, enabled: bool) -> Self { + self.readiness_checks = enabled; + self + } +} + +#[derive(Default)] +struct PortSpecs { + validators: Vec, + executors: Vec, +} + +struct ClusterEnvironment { + client: Client, + namespace: String, + release: String, + cleanup: Option, + validator_api_ports: Vec, + validator_testing_ports: Vec, + executor_api_ports: Vec, + executor_testing_ports: Vec, + prometheus_port: u16, +} + +impl ClusterEnvironment { + fn new( + client: Client, + namespace: String, + release: String, + cleanup: RunnerCleanup, + ports: &ClusterPorts, + ) -> Self { + Self { + client, + namespace, + release, + cleanup: Some(cleanup), + validator_api_ports: ports.validators.iter().map(|ports| ports.api).collect(), + validator_testing_ports: ports.validators.iter().map(|ports| ports.testing).collect(), + executor_api_ports: ports.executors.iter().map(|ports| ports.api).collect(), + executor_testing_ports: ports.executors.iter().map(|ports| ports.testing).collect(), + prometheus_port: ports.prometheus, + } + } + + async fn fail(&mut self, reason: &str) { + error!( + reason = reason, + namespace = %self.namespace, + release = %self.release, + "k8s stack failure; collecting diagnostics" + ); + dump_namespace_logs(&self.client, &self.namespace).await; + if let Some(guard) = self.cleanup.take() { + Box::new(guard).cleanup(); + } + } + + fn into_cleanup(mut self) -> RunnerCleanup { + self.cleanup + .take() + .expect("cleanup guard should be available") + } +} + +#[derive(Debug, thiserror::Error)] +pub enum NodeClientError { + #[error("failed to build {endpoint} client URL for {role} port {port}: {source}")] + Endpoint { + role: NodeRole, + endpoint: &'static str, + port: u16, + #[source] + source: ParseError, + }, +} + +#[derive(Debug, thiserror::Error)] +pub enum RemoteReadinessError { + #[error("failed to build readiness URL for {role} port {port}: {source}")] + Endpoint { + role: NodeRole, + port: u16, + #[source] + source: ParseError, + }, + #[error("remote readiness probe failed: {source}")] + Remote { + #[source] + source: ReadinessError, + }, +} + +fn readiness_urls(ports: &[u16], role: NodeRole) -> Result, RemoteReadinessError> { + ports + .iter() + .copied() + .map(|port| readiness_url(role, port)) + .collect() +} + +fn readiness_url(role: NodeRole, port: u16) -> Result { + cluster_host_url(port).map_err(|source| RemoteReadinessError::Endpoint { role, port, source }) +} + +fn cluster_host_url(port: u16) -> Result { + Url::parse(&format!("http://{}:{port}/", node_host())) +} + +fn metrics_handle_from_port(port: u16) -> Result { + let url = cluster_host_url(port) + .map_err(|err| MetricsError::new(format!("invalid prometheus url: {err}")))?; + Metrics::from_prometheus(url) +} + +async fn spawn_block_feed_with( + node_clients: &NodeClients, +) -> Result<(BlockFeed, BlockFeedTask), K8sRunnerError> { + let block_source_client = node_clients + .any_client() + .cloned() + .ok_or(K8sRunnerError::BlockFeedMissing)?; + + spawn_block_feed(block_source_client) + .await + .map_err(|source| K8sRunnerError::BlockFeed { source }) +} + +#[derive(Debug, thiserror::Error)] +pub enum K8sRunnerError { + #[error( + "kubernetes runner requires at least one validator and one executor (validators={validators}, executors={executors})" + )] + UnsupportedTopology { validators: usize, executors: usize }, + #[error("failed to initialise kubernetes client: {source}")] + ClientInit { + #[source] + source: kube::Error, + }, + #[error(transparent)] + Assets(#[from] AssetsError), + #[error(transparent)] + Helm(#[from] HelmError), + #[error(transparent)] + Cluster(#[from] Box), + #[error(transparent)] + Readiness(#[from] RemoteReadinessError), + #[error(transparent)] + NodeClients(#[from] NodeClientError), + #[error(transparent)] + Telemetry(#[from] MetricsError), + #[error("k8s runner requires at least one node client to follow blocks")] + BlockFeedMissing, + #[error("failed to initialize block feed: {source}")] + BlockFeed { + #[source] + source: Error, + }, +} + +#[async_trait] +impl Deployer for K8sRunner { + type Error = K8sRunnerError; + + async fn deploy(&self, scenario: &Scenario) -> Result { + let descriptors = scenario.topology().clone(); + ensure_supported_topology(&descriptors)?; + + let client = Client::try_default() + .await + .map_err(|source| K8sRunnerError::ClientInit { source })?; + info!( + validators = descriptors.validators().len(), + executors = descriptors.executors().len(), + "starting k8s deployment" + ); + + let port_specs = collect_port_specs(&descriptors); + let mut cluster = + Some(setup_cluster(&client, &port_specs, &descriptors, self.readiness_checks).await?); + + info!("building node clients"); + let node_clients = match build_node_clients( + cluster + .as_ref() + .expect("cluster must be available while building clients"), + ) { + Ok(clients) => clients, + Err(err) => { + if let Some(env) = cluster.as_mut() { + env.fail("failed to construct node api clients").await; + } + return Err(err.into()); + } + }; + + let telemetry = match metrics_handle_from_port( + cluster + .as_ref() + .expect("cluster must be available for telemetry") + .prometheus_port, + ) { + Ok(handle) => handle, + Err(err) => { + if let Some(env) = cluster.as_mut() { + env.fail("failed to configure prometheus metrics handle") + .await; + } + return Err(err.into()); + } + }; + let (block_feed, block_feed_guard) = match spawn_block_feed_with(&node_clients).await { + Ok(pair) => pair, + Err(err) => { + if let Some(env) = cluster.as_mut() { + env.fail("failed to initialize block feed").await; + } + return Err(err); + } + }; + let cleanup = cluster + .take() + .expect("cluster should still be available") + .into_cleanup(); + let cleanup_guard: Box = + Box::new(K8sCleanupGuard::new(cleanup, block_feed_guard)); + let context = RunContext::new( + descriptors, + None, + node_clients, + scenario.duration(), + telemetry, + block_feed, + None, + ); + Ok(Runner::new(context, Some(cleanup_guard))) + } +} + +impl From for K8sRunnerError { + fn from(value: ClusterWaitError) -> Self { + Self::Cluster(Box::new(value)) + } +} + +fn ensure_supported_topology(descriptors: &GeneratedTopology) -> Result<(), K8sRunnerError> { + let validators = descriptors.validators().len(); + let executors = descriptors.executors().len(); + if validators == 0 || executors == 0 { + return Err(K8sRunnerError::UnsupportedTopology { + validators, + executors, + }); + } + Ok(()) +} + +fn collect_port_specs(descriptors: &GeneratedTopology) -> PortSpecs { + let validators = descriptors + .validators() + .iter() + .map(|node| NodeConfigPorts { + api: node.general.api_config.address.port(), + testing: node.general.api_config.testing_http_address.port(), + }) + .collect(); + let executors = descriptors + .executors() + .iter() + .map(|node| NodeConfigPorts { + api: node.general.api_config.address.port(), + testing: node.general.api_config.testing_http_address.port(), + }) + .collect(); + + PortSpecs { + validators, + executors, + } +} + +fn build_node_clients(cluster: &ClusterEnvironment) -> Result { + let validators = cluster + .validator_api_ports + .iter() + .copied() + .zip(cluster.validator_testing_ports.iter().copied()) + .map(|(api_port, testing_port)| { + api_client_from_ports(NodeRole::Validator, api_port, testing_port) + }) + .collect::, _>>()?; + let executors = cluster + .executor_api_ports + .iter() + .copied() + .zip(cluster.executor_testing_ports.iter().copied()) + .map(|(api_port, testing_port)| { + api_client_from_ports(NodeRole::Executor, api_port, testing_port) + }) + .collect::, _>>()?; + + Ok(NodeClients::new(validators, executors)) +} + +fn api_client_from_ports( + role: NodeRole, + api_port: u16, + testing_port: u16, +) -> Result { + let base_endpoint = cluster_host_url(api_port).map_err(|source| NodeClientError::Endpoint { + role, + endpoint: "api", + port: api_port, + source, + })?; + let testing_endpoint = + Some( + cluster_host_url(testing_port).map_err(|source| NodeClientError::Endpoint { + role, + endpoint: "testing", + port: testing_port, + source, + })?, + ); + Ok(ApiClient::from_urls(base_endpoint, testing_endpoint)) +} + +async fn setup_cluster( + client: &Client, + specs: &PortSpecs, + descriptors: &GeneratedTopology, + readiness_checks: bool, +) -> Result { + let assets = prepare_assets(descriptors)?; + let validators = descriptors.validators().len(); + let executors = descriptors.executors().len(); + + let (namespace, release) = cluster_identifiers(); + + let mut cleanup_guard = + Some(install_stack(client, &assets, &namespace, &release, validators, executors).await?); + + let cluster_ports = + wait_for_ports_or_cleanup(client, &namespace, &release, specs, &mut cleanup_guard).await?; + + info!( + prometheus_port = cluster_ports.prometheus, + "discovered prometheus endpoint" + ); + + let environment = ClusterEnvironment::new( + client.clone(), + namespace, + release, + cleanup_guard + .take() + .expect("cleanup guard must exist after successful cluster startup"), + &cluster_ports, + ); + + if readiness_checks { + ensure_cluster_readiness(descriptors, &environment).await?; + } + + Ok(environment) +} + +fn cluster_identifiers() -> (String, String) { + let run_id = Uuid::new_v4().simple().to_string(); + let namespace = format!("nomos-k8s-{run_id}"); + (namespace.clone(), namespace) +} + +async fn install_stack( + client: &Client, + assets: &RunnerAssets, + namespace: &str, + release: &str, + validators: usize, + executors: usize, +) -> Result { + info!( + release = %release, + namespace = %namespace, + "installing helm release" + ); + install_release(assets, release, namespace, validators, executors).await?; + info!(release = %release, "helm install succeeded"); + + let preserve = env::var("K8S_RUNNER_PRESERVE").is_ok(); + Ok(RunnerCleanup::new( + client.clone(), + namespace.to_owned(), + release.to_owned(), + preserve, + )) +} + +async fn wait_for_ports_or_cleanup( + client: &Client, + namespace: &str, + release: &str, + specs: &PortSpecs, + cleanup_guard: &mut Option, +) -> Result { + match wait_for_cluster_ready( + client, + namespace, + release, + &specs.validators, + &specs.executors, + ) + .await + { + Ok(ports) => Ok(ports), + Err(err) => { + cleanup_pending(client, namespace, cleanup_guard).await; + Err(err.into()) + } + } +} + +async fn cleanup_pending(client: &Client, namespace: &str, guard: &mut Option) { + dump_namespace_logs(client, namespace).await; + if let Some(guard) = guard.take() { + Box::new(guard).cleanup(); + } +} + +async fn ensure_cluster_readiness( + descriptors: &GeneratedTopology, + cluster: &ClusterEnvironment, +) -> Result<(), RemoteReadinessError> { + let validator_urls = readiness_urls(&cluster.validator_api_ports, NodeRole::Validator)?; + let executor_urls = readiness_urls(&cluster.executor_api_ports, NodeRole::Executor)?; + let validator_membership_urls = + readiness_urls(&cluster.validator_testing_ports, NodeRole::Validator)?; + let executor_membership_urls = + readiness_urls(&cluster.executor_testing_ports, NodeRole::Executor)?; + + descriptors + .wait_remote_readiness( + &validator_urls, + &executor_urls, + Some(&validator_membership_urls), + Some(&executor_membership_urls), + ) + .await + .map_err(|source| RemoteReadinessError::Remote { source }) +} + +struct K8sCleanupGuard { + cleanup: RunnerCleanup, + block_feed: Option, +} + +impl K8sCleanupGuard { + const fn new(cleanup: RunnerCleanup, block_feed: BlockFeedTask) -> Self { + Self { + cleanup, + block_feed: Some(block_feed), + } + } +} + +impl CleanupGuard for K8sCleanupGuard { + fn cleanup(mut self: Box) { + if let Some(block_feed) = self.block_feed.take() { + CleanupGuard::cleanup(Box::new(block_feed)); + } + CleanupGuard::cleanup(Box::new(self.cleanup)); + } +} diff --git a/testing-framework/runners/k8s/src/wait.rs b/testing-framework/runners/k8s/src/wait.rs new file mode 100644 index 0000000..3453198 --- /dev/null +++ b/testing-framework/runners/k8s/src/wait.rs @@ -0,0 +1,255 @@ +use std::time::Duration; + +use k8s_openapi::api::{apps::v1::Deployment, core::v1::Service}; +use kube::{Api, Client, Error as KubeError}; +use testing_framework_core::scenario::http_probe::{self, HttpReadinessError, NodeRole}; +use thiserror::Error; +use tokio::time::sleep; + +use crate::host::node_host; + +const DEPLOYMENT_TIMEOUT: Duration = Duration::from_secs(180); +const PROMETHEUS_HTTP_PORT: u16 = 9090; +const PROMETHEUS_SERVICE_NAME: &str = "prometheus"; + +#[derive(Clone, Copy)] +pub struct NodeConfigPorts { + pub api: u16, + pub testing: u16, +} + +#[derive(Clone, Copy)] +pub struct NodePortAllocation { + pub api: u16, + pub testing: u16, +} + +pub struct ClusterPorts { + pub validators: Vec, + pub executors: Vec, + pub prometheus: u16, +} + +#[derive(Debug, Error)] +pub enum ClusterWaitError { + #[error("deployment {name} in namespace {namespace} did not become ready within {timeout:?}")] + DeploymentTimeout { + name: String, + namespace: String, + timeout: Duration, + }, + #[error("failed to fetch deployment {name}: {source}")] + DeploymentFetch { + name: String, + #[source] + source: KubeError, + }, + #[error("failed to fetch service {service}: {source}")] + ServiceFetch { + service: String, + #[source] + source: KubeError, + }, + #[error("service {service} did not allocate a node port for {port}")] + NodePortUnavailable { service: String, port: u16 }, + #[error("cluster must have at least one validator")] + MissingValidator, + #[error("timeout waiting for {role} HTTP endpoint on port {port} after {timeout:?}")] + NodeHttpTimeout { + role: NodeRole, + port: u16, + timeout: Duration, + }, + #[error("timeout waiting for prometheus readiness on NodePort {port}")] + PrometheusTimeout { port: u16 }, +} + +pub async fn wait_for_deployment_ready( + client: &Client, + namespace: &str, + name: &str, + timeout: Duration, +) -> Result<(), ClusterWaitError> { + let mut elapsed = Duration::ZERO; + let interval = Duration::from_secs(2); + + while elapsed <= timeout { + match Api::::namespaced(client.clone(), namespace) + .get(name) + .await + { + Ok(deployment) => { + let desired = deployment + .spec + .as_ref() + .and_then(|spec| spec.replicas) + .unwrap_or(1); + let ready = deployment + .status + .as_ref() + .and_then(|status| status.ready_replicas) + .unwrap_or(0); + if ready >= desired { + return Ok(()); + } + } + Err(err) => { + return Err(ClusterWaitError::DeploymentFetch { + name: name.to_owned(), + source: err, + }); + } + } + + sleep(interval).await; + elapsed += interval; + } + + Err(ClusterWaitError::DeploymentTimeout { + name: name.to_owned(), + namespace: namespace.to_owned(), + timeout, + }) +} + +pub async fn find_node_port( + client: &Client, + namespace: &str, + service_name: &str, + service_port: u16, +) -> Result { + let interval = Duration::from_secs(1); + for _ in 0..120 { + match Api::::namespaced(client.clone(), namespace) + .get(service_name) + .await + { + Ok(service) => { + if let Some(spec) = service.spec.clone() + && let Some(ports) = spec.ports + { + for port in ports { + if port.port == i32::from(service_port) + && let Some(node_port) = port.node_port + { + return Ok(node_port as u16); + } + } + } + } + Err(err) => { + return Err(ClusterWaitError::ServiceFetch { + service: service_name.to_owned(), + source: err, + }); + } + } + sleep(interval).await; + } + + Err(ClusterWaitError::NodePortUnavailable { + service: service_name.to_owned(), + port: service_port, + }) +} + +pub async fn wait_for_cluster_ready( + client: &Client, + namespace: &str, + release: &str, + validator_ports: &[NodeConfigPorts], + executor_ports: &[NodeConfigPorts], +) -> Result { + if validator_ports.is_empty() { + return Err(ClusterWaitError::MissingValidator); + } + + let mut validator_allocations = Vec::with_capacity(validator_ports.len()); + + for (index, ports) in validator_ports.iter().enumerate() { + let name = format!("{release}-validator-{index}"); + wait_for_deployment_ready(client, namespace, &name, DEPLOYMENT_TIMEOUT).await?; + let api_port = find_node_port(client, namespace, &name, ports.api).await?; + let testing_port = find_node_port(client, namespace, &name, ports.testing).await?; + validator_allocations.push(NodePortAllocation { + api: api_port, + testing: testing_port, + }); + } + + let validator_api_ports: Vec = validator_allocations + .iter() + .map(|ports| ports.api) + .collect(); + wait_for_node_http(&validator_api_ports, NodeRole::Validator).await?; + + let mut executor_allocations = Vec::with_capacity(executor_ports.len()); + for (index, ports) in executor_ports.iter().enumerate() { + let name = format!("{release}-executor-{index}"); + wait_for_deployment_ready(client, namespace, &name, DEPLOYMENT_TIMEOUT).await?; + let api_port = find_node_port(client, namespace, &name, ports.api).await?; + let testing_port = find_node_port(client, namespace, &name, ports.testing).await?; + executor_allocations.push(NodePortAllocation { + api: api_port, + testing: testing_port, + }); + } + + if !executor_allocations.is_empty() { + let executor_api_ports: Vec = + executor_allocations.iter().map(|ports| ports.api).collect(); + wait_for_node_http(&executor_api_ports, NodeRole::Executor).await?; + } + + let prometheus_port = find_node_port( + client, + namespace, + PROMETHEUS_SERVICE_NAME, + PROMETHEUS_HTTP_PORT, + ) + .await?; + wait_for_prometheus_http(prometheus_port).await?; + + Ok(ClusterPorts { + validators: validator_allocations, + executors: executor_allocations, + prometheus: prometheus_port, + }) +} + +async fn wait_for_node_http(ports: &[u16], role: NodeRole) -> Result<(), ClusterWaitError> { + let host = node_host(); + http_probe::wait_for_http_ports_with_host( + ports, + role, + &host, + Duration::from_secs(240), + Duration::from_secs(1), + ) + .await + .map_err(map_http_error) +} + +const fn map_http_error(error: HttpReadinessError) -> ClusterWaitError { + ClusterWaitError::NodeHttpTimeout { + role: error.role(), + port: error.port(), + timeout: error.timeout(), + } +} + +pub async fn wait_for_prometheus_http(port: u16) -> Result<(), ClusterWaitError> { + let client = reqwest::Client::new(); + let url = format!("http://{}:{port}/-/ready", node_host()); + + for _ in 0..240 { + if let Ok(resp) = client.get(&url).send().await + && resp.status().is_success() + { + return Ok(()); + } + sleep(Duration::from_secs(1)).await; + } + + Err(ClusterWaitError::PrometheusTimeout { port }) +} diff --git a/testing-framework/runners/local/Cargo.toml b/testing-framework/runners/local/Cargo.toml new file mode 100644 index 0000000..808b240 --- /dev/null +++ b/testing-framework/runners/local/Cargo.toml @@ -0,0 +1,18 @@ +[package] +categories.workspace = true +description.workspace = true +edition.workspace = true +keywords.workspace = true +license.workspace = true +name = "testing-framework-runner-local" +readme.workspace = true +repository.workspace = true +version = "0.1.0" + +[lints] +workspace = true + +[dependencies] +async-trait = "0.1" +testing-framework-core = { path = "../../core" } +thiserror = { workspace = true } diff --git a/testing-framework/runners/local/src/lib.rs b/testing-framework/runners/local/src/lib.rs new file mode 100644 index 0000000..c304ecb --- /dev/null +++ b/testing-framework/runners/local/src/lib.rs @@ -0,0 +1,3 @@ +mod runner; + +pub use runner::{LocalDeployer, LocalDeployerError}; diff --git a/testing-framework/runners/local/src/runner.rs b/testing-framework/runners/local/src/runner.rs new file mode 100644 index 0000000..70a07cb --- /dev/null +++ b/testing-framework/runners/local/src/runner.rs @@ -0,0 +1,133 @@ +use async_trait::async_trait; +use testing_framework_core::{ + scenario::{ + BlockFeed, BlockFeedTask, Deployer, DynError, Metrics, NodeClients, RunContext, Runner, + Scenario, ScenarioError, spawn_block_feed, + }, + topology::{ReadinessError, Topology}, +}; +use thiserror::Error; + +/// Spawns validators and executors as local processes, reusing the existing +/// integration harness. +#[derive(Clone)] +pub struct LocalDeployer { + membership_check: bool, +} + +#[derive(Debug, Error)] +pub enum LocalDeployerError { + #[error("readiness probe failed: {source}")] + ReadinessFailed { + #[source] + source: ReadinessError, + }, + #[error("workload failed: {source}")] + WorkloadFailed { + #[source] + source: DynError, + }, + #[error("expectations failed: {source}")] + ExpectationsFailed { + #[source] + source: DynError, + }, +} + +impl From for LocalDeployerError { + fn from(value: ScenarioError) -> Self { + match value { + ScenarioError::Workload(source) => Self::WorkloadFailed { source }, + ScenarioError::ExpectationCapture(source) | ScenarioError::Expectations(source) => { + Self::ExpectationsFailed { source } + } + } + } +} + +#[async_trait] +impl Deployer<()> for LocalDeployer { + type Error = LocalDeployerError; + + async fn deploy(&self, scenario: &Scenario<()>) -> Result { + let topology = Self::prepare_topology(scenario, self.membership_check).await?; + let node_clients = NodeClients::from_topology(scenario.topology(), &topology); + + let (block_feed, block_feed_guard) = spawn_block_feed_with(&node_clients).await?; + + let context = RunContext::new( + scenario.topology().clone(), + Some(topology), + node_clients, + scenario.duration(), + Metrics::empty(), + block_feed, + None, + ); + + Ok(Runner::new(context, Some(Box::new(block_feed_guard)))) + } +} + +impl LocalDeployer { + #[must_use] + pub fn new() -> Self { + Self::default() + } + + #[must_use] + pub const fn with_membership_check(mut self, enabled: bool) -> Self { + self.membership_check = enabled; + self + } + + async fn prepare_topology( + scenario: &Scenario<()>, + membership_check: bool, + ) -> Result { + let descriptors = scenario.topology(); + let topology = descriptors.clone().spawn_local().await; + + let skip_membership = !membership_check; + if let Err(source) = wait_for_readiness(&topology, skip_membership).await { + return Err(LocalDeployerError::ReadinessFailed { source }); + } + + Ok(topology) + } +} + +impl Default for LocalDeployer { + fn default() -> Self { + Self { + membership_check: true, + } + } +} + +async fn wait_for_readiness( + topology: &Topology, + skip_membership: bool, +) -> Result<(), ReadinessError> { + topology.wait_network_ready().await?; + if !skip_membership { + topology.wait_membership_ready().await?; + } + topology.wait_da_balancer_ready().await +} + +async fn spawn_block_feed_with( + node_clients: &NodeClients, +) -> Result<(BlockFeed, BlockFeedTask), LocalDeployerError> { + let block_source_client = node_clients.random_validator().cloned().ok_or_else(|| { + LocalDeployerError::WorkloadFailed { + source: "block feed requires at least one validator".into(), + } + })?; + + spawn_block_feed(block_source_client) + .await + .map_err(|source| LocalDeployerError::WorkloadFailed { + source: source.into(), + }) +} diff --git a/testing-framework/workflows/Cargo.toml b/testing-framework/workflows/Cargo.toml new file mode 100644 index 0000000..14b13b8 --- /dev/null +++ b/testing-framework/workflows/Cargo.toml @@ -0,0 +1,26 @@ +[package] +categories.workspace = true +description.workspace = true +edition.workspace = true +keywords.workspace = true +license.workspace = true +name = "testing-framework-workflows" +readme.workspace = true +repository.workspace = true +version = "0.1.0" + +[lints] +workspace = true + +[dependencies] +async-trait = "0.1" +ed25519-dalek = { version = "2.2.0", features = ["rand_core", "serde"] } +executor-http-client = { workspace = true } +integration-configs = { workspace = true } +nomos-core = { workspace = true } +rand = { workspace = true } +testing-framework-core = { workspace = true } +thiserror = { workspace = true } +tokio = { workspace = true, features = ["macros", "net", "rt-multi-thread", "time"] } +tracing = { workspace = true } +zksign = { workspace = true } diff --git a/testing-framework/workflows/src/builder/mod.rs b/testing-framework/workflows/src/builder/mod.rs new file mode 100644 index 0000000..ebb212d --- /dev/null +++ b/testing-framework/workflows/src/builder/mod.rs @@ -0,0 +1,298 @@ +use std::{ + num::{NonZeroU64, NonZeroUsize}, + time::Duration, +}; + +use integration_configs::topology::configs::network::Libp2pNetworkLayout; +use testing_framework_core::{ + scenario::{Builder as CoreScenarioBuilder, NodeControlCapability}, + topology::configs::wallet::WalletConfig, +}; + +use crate::{ + expectations::ConsensusLiveness, + workloads::{chaos::RandomRestartWorkload, da, transaction}, +}; + +macro_rules! non_zero_rate_fn { + ($name:ident, $message:literal) => { + const fn $name(rate: u64) -> NonZeroU64 { + match NonZeroU64::new(rate) { + Some(value) => value, + None => panic!($message), + } + } + }; +} + +non_zero_rate_fn!( + transaction_rate_checked, + "transaction rate must be non-zero" +); +non_zero_rate_fn!(channel_rate_checked, "channel rate must be non-zero"); +non_zero_rate_fn!(blob_rate_checked, "blob rate must be non-zero"); + +pub trait ScenarioBuilderExt: Sized { + fn topology(self) -> TopologyConfigurator; + fn transactions(self) -> TransactionFlowBuilder; + fn da(self) -> DataAvailabilityFlowBuilder; + #[must_use] + fn expect_consensus_liveness(self) -> Self; + #[must_use] + fn initialize_wallet(self, total_funds: u64, users: usize) -> Self; +} + +impl ScenarioBuilderExt for CoreScenarioBuilder { + fn topology(self) -> TopologyConfigurator { + TopologyConfigurator { builder: self } + } + + fn transactions(self) -> TransactionFlowBuilder { + TransactionFlowBuilder::new(self) + } + + fn da(self) -> DataAvailabilityFlowBuilder { + DataAvailabilityFlowBuilder::new(self) + } + + fn expect_consensus_liveness(self) -> Self { + self.with_expectation(ConsensusLiveness::default()) + } + + fn initialize_wallet(self, total_funds: u64, users: usize) -> Self { + let user_count = NonZeroUsize::new(users).expect("wallet user count must be non-zero"); + let wallet = WalletConfig::uniform(total_funds, user_count); + self.with_wallet_config(wallet) + } +} + +pub struct TopologyConfigurator { + builder: CoreScenarioBuilder, +} + +impl TopologyConfigurator { + #[must_use] + pub fn validators(mut self, count: usize) -> Self { + self.builder = self + .builder + .map_topology(|topology| topology.with_validator_count(count)); + self + } + + #[must_use] + pub fn executors(mut self, count: usize) -> Self { + self.builder = self + .builder + .map_topology(|topology| topology.with_executor_count(count)); + self + } + + #[must_use] + pub fn network_star(mut self) -> Self { + self.builder = self + .builder + .map_topology(|topology| topology.with_network_layout(Libp2pNetworkLayout::Star)); + self + } + + #[must_use] + pub fn apply(self) -> CoreScenarioBuilder { + self.builder + } +} + +pub struct TransactionFlowBuilder { + builder: CoreScenarioBuilder, + rate: NonZeroU64, + users: Option, +} + +impl TransactionFlowBuilder { + const fn default_rate() -> NonZeroU64 { + transaction_rate_checked(1) + } + + const fn new(builder: CoreScenarioBuilder) -> Self { + Self { + builder, + rate: Self::default_rate(), + users: None, + } + } + + #[must_use] + pub const fn rate(mut self, rate: u64) -> Self { + self.rate = transaction_rate_checked(rate); + self + } + + #[must_use] + pub const fn rate_per_block(mut self, rate: NonZeroU64) -> Self { + self.rate = rate; + self + } + + #[must_use] + pub const fn users(mut self, users: usize) -> Self { + match NonZeroUsize::new(users) { + Some(value) => self.users = Some(value), + None => panic!("transaction user count must be non-zero"), + } + self + } + + #[must_use] + pub fn apply(mut self) -> CoreScenarioBuilder { + let workload = transaction::Workload::with_rate(self.rate.get()) + .expect("transaction rate must be non-zero") + .with_user_limit(self.users); + self.builder = self.builder.with_workload(workload); + self.builder + } +} + +pub struct DataAvailabilityFlowBuilder { + builder: CoreScenarioBuilder, + channel_rate: NonZeroU64, + blob_rate: NonZeroU64, +} + +impl DataAvailabilityFlowBuilder { + const fn default_channel_rate() -> NonZeroU64 { + channel_rate_checked(1) + } + + const fn default_blob_rate() -> NonZeroU64 { + blob_rate_checked(1) + } + + const fn new(builder: CoreScenarioBuilder) -> Self { + Self { + builder, + channel_rate: Self::default_channel_rate(), + blob_rate: Self::default_blob_rate(), + } + } + + #[must_use] + pub const fn channel_rate(mut self, rate: u64) -> Self { + self.channel_rate = channel_rate_checked(rate); + self + } + + #[must_use] + pub const fn channel_rate_per_block(mut self, rate: NonZeroU64) -> Self { + self.channel_rate = rate; + self + } + + #[must_use] + pub const fn blob_rate(mut self, rate: u64) -> Self { + self.blob_rate = blob_rate_checked(rate); + self + } + + #[must_use] + pub const fn blob_rate_per_block(mut self, rate: NonZeroU64) -> Self { + self.blob_rate = rate; + self + } + + #[must_use] + pub fn apply(mut self) -> CoreScenarioBuilder { + let count = (self.channel_rate.get() * self.blob_rate.get()) as usize; + let workload = da::Workload::with_channel_count(count.max(1)); + self.builder = self.builder.with_workload(workload); + self.builder + } +} + +pub trait ChaosBuilderExt: Sized { + fn chaos_random_restart(self) -> ChaosRestartBuilder; +} + +impl ChaosBuilderExt for CoreScenarioBuilder { + fn chaos_random_restart(self) -> ChaosRestartBuilder { + ChaosRestartBuilder { + builder: self, + min_delay: Duration::from_secs(10), + max_delay: Duration::from_secs(30), + target_cooldown: Duration::from_secs(60), + include_validators: true, + include_executors: true, + } + } +} + +pub struct ChaosRestartBuilder { + builder: CoreScenarioBuilder, + min_delay: Duration, + max_delay: Duration, + target_cooldown: Duration, + include_validators: bool, + include_executors: bool, +} + +impl ChaosRestartBuilder { + #[must_use] + pub fn min_delay(mut self, delay: Duration) -> Self { + assert!(!delay.is_zero(), "chaos restart min delay must be non-zero"); + self.min_delay = delay; + self + } + + #[must_use] + pub fn max_delay(mut self, delay: Duration) -> Self { + assert!(!delay.is_zero(), "chaos restart max delay must be non-zero"); + self.max_delay = delay; + self + } + + #[must_use] + pub fn target_cooldown(mut self, cooldown: Duration) -> Self { + assert!( + !cooldown.is_zero(), + "chaos restart target cooldown must be non-zero" + ); + self.target_cooldown = cooldown; + self + } + + #[must_use] + pub const fn include_validators(mut self, enabled: bool) -> Self { + self.include_validators = enabled; + self + } + + #[must_use] + pub const fn include_executors(mut self, enabled: bool) -> Self { + self.include_executors = enabled; + self + } + + #[must_use] + pub fn apply(mut self) -> CoreScenarioBuilder { + assert!( + self.min_delay <= self.max_delay, + "chaos restart min delay must not exceed max delay" + ); + assert!( + self.target_cooldown >= self.min_delay, + "chaos restart target cooldown must be >= min delay" + ); + assert!( + self.include_validators || self.include_executors, + "chaos restart requires at least one node group" + ); + + let workload = RandomRestartWorkload::new( + self.min_delay, + self.max_delay, + self.target_cooldown, + self.include_validators, + self.include_executors, + ); + self.builder = self.builder.with_workload(workload); + self.builder + } +} diff --git a/testing-framework/workflows/src/expectations/consensus_liveness.rs b/testing-framework/workflows/src/expectations/consensus_liveness.rs new file mode 100644 index 0000000..cef48c1 --- /dev/null +++ b/testing-framework/workflows/src/expectations/consensus_liveness.rs @@ -0,0 +1,220 @@ +use std::time::Duration; + +use async_trait::async_trait; +use testing_framework_core::scenario::{DynError, Expectation, RunContext}; +use thiserror::Error; +use tokio::time::sleep; + +#[derive(Clone, Copy, Debug)] +pub struct ConsensusLiveness { + lag_allowance: u64, +} + +impl Default for ConsensusLiveness { + fn default() -> Self { + Self { + lag_allowance: LAG_ALLOWANCE, + } + } +} + +const LAG_ALLOWANCE: u64 = 2; +const MIN_PROGRESS_BLOCKS: u64 = 5; +const REQUEST_RETRIES: usize = 5; +const REQUEST_RETRY_DELAY: Duration = Duration::from_secs(2); + +#[async_trait] +impl Expectation for ConsensusLiveness { + fn name(&self) -> &'static str { + "consensus_liveness" + } + + async fn evaluate(&mut self, ctx: &RunContext) -> Result<(), DynError> { + Self::ensure_participants(ctx)?; + let target_hint = Self::target_blocks(ctx); + let check = Self::collect_results(ctx).await; + (*self).report(target_hint, check) + } +} + +const fn consensus_target_blocks(ctx: &RunContext) -> u64 { + ctx.expected_blocks() +} + +#[derive(Debug, Error)] +enum ConsensusLivenessIssue { + #[error("{node} height {height} below target {target}")] + HeightBelowTarget { + node: String, + height: u64, + target: u64, + }, + #[error("{node} consensus_info failed: {source}")] + RequestFailed { + node: String, + #[source] + source: DynError, + }, +} + +#[derive(Debug, Error)] +enum ConsensusLivenessError { + #[error("consensus liveness requires at least one validator or executor")] + MissingParticipants, + #[error("consensus liveness violated (target={target}):\n{details}")] + Violations { + target: u64, + #[source] + details: ViolationIssues, + }, +} + +#[derive(Debug, Error)] +#[error("{message}")] +struct ViolationIssues { + issues: Vec, + message: String, +} + +impl ConsensusLiveness { + const fn target_blocks(ctx: &RunContext) -> u64 { + consensus_target_blocks(ctx) + } + + fn ensure_participants(ctx: &RunContext) -> Result<(), DynError> { + if ctx.node_clients().all_clients().count() == 0 { + Err(Box::new(ConsensusLivenessError::MissingParticipants)) + } else { + Ok(()) + } + } + + async fn collect_results(ctx: &RunContext) -> LivenessCheck { + let participant_count = ctx.node_clients().all_clients().count().max(1); + let max_attempts = participant_count * REQUEST_RETRIES; + let mut samples = Vec::with_capacity(participant_count); + let mut issues = Vec::new(); + + for attempt in 0..max_attempts { + match Self::fetch_cluster_height(ctx).await { + Ok(height) => { + samples.push(NodeSample { + label: format!("sample-{attempt}"), + height, + }); + if samples.len() >= participant_count { + break; + } + } + Err(err) => issues.push(ConsensusLivenessIssue::RequestFailed { + node: format!("sample-{attempt}"), + source: err, + }), + } + + if samples.len() < participant_count { + sleep(REQUEST_RETRY_DELAY).await; + } + } + + LivenessCheck { samples, issues } + } + + async fn fetch_cluster_height(ctx: &RunContext) -> Result { + ctx.cluster_client() + .try_all_clients(|client| { + Box::pin(async move { + client + .consensus_info() + .await + .map(|info| info.height) + .map_err(|err| -> DynError { err.into() }) + }) + }) + .await + } + + #[must_use] + pub const fn with_lag_allowance(mut self, lag_allowance: u64) -> Self { + self.lag_allowance = lag_allowance; + self + } + + fn report(self, target_hint: u64, mut check: LivenessCheck) -> Result<(), DynError> { + if check.samples.is_empty() { + return Err(Box::new(ConsensusLivenessError::MissingParticipants)); + } + + let max_height = check + .samples + .iter() + .map(|sample| sample.height) + .max() + .unwrap_or(0); + + let mut target = target_hint; + if target == 0 || target > max_height { + target = max_height; + } + + if max_height < MIN_PROGRESS_BLOCKS { + check + .issues + .push(ConsensusLivenessIssue::HeightBelowTarget { + node: "network".to_owned(), + height: max_height, + target: MIN_PROGRESS_BLOCKS, + }); + } + + for sample in &check.samples { + if sample.height + self.lag_allowance < target { + check + .issues + .push(ConsensusLivenessIssue::HeightBelowTarget { + node: sample.label.clone(), + height: sample.height, + target, + }); + } + } + + if check.issues.is_empty() { + tracing::info!( + target, + heights = ?check.samples.iter().map(|s| s.height).collect::>(), + "consensus liveness expectation satisfied" + ); + Ok(()) + } else { + Err(Box::new(ConsensusLivenessError::Violations { + target, + details: check.issues.into(), + })) + } + } +} + +struct NodeSample { + label: String, + height: u64, +} + +struct LivenessCheck { + samples: Vec, + issues: Vec, +} + +impl From> for ViolationIssues { + fn from(issues: Vec) -> Self { + let mut message = String::new(); + for issue in &issues { + if !message.is_empty() { + message.push('\n'); + } + message.push_str("- "); + message.push_str(&issue.to_string()); + } + Self { issues, message } + } +} diff --git a/testing-framework/workflows/src/expectations/mod.rs b/testing-framework/workflows/src/expectations/mod.rs new file mode 100644 index 0000000..e17d8ca --- /dev/null +++ b/testing-framework/workflows/src/expectations/mod.rs @@ -0,0 +1,3 @@ +mod consensus_liveness; + +pub use consensus_liveness::ConsensusLiveness; diff --git a/testing-framework/workflows/src/lib.rs b/testing-framework/workflows/src/lib.rs new file mode 100644 index 0000000..40fc535 --- /dev/null +++ b/testing-framework/workflows/src/lib.rs @@ -0,0 +1,8 @@ +pub mod builder; +pub mod expectations; +pub mod util; +pub mod workloads; + +pub use builder::{ChaosBuilderExt, ScenarioBuilderExt}; +pub use expectations::ConsensusLiveness; +pub use workloads::transaction::TxInclusionExpectation; diff --git a/testing-framework/workflows/src/util/mod.rs b/testing-framework/workflows/src/util/mod.rs new file mode 100644 index 0000000..d7c3294 --- /dev/null +++ b/testing-framework/workflows/src/util/mod.rs @@ -0,0 +1 @@ +pub mod tx; diff --git a/testing-framework/workflows/src/util/tx.rs b/testing-framework/workflows/src/util/tx.rs new file mode 100644 index 0000000..648ba57 --- /dev/null +++ b/testing-framework/workflows/src/util/tx.rs @@ -0,0 +1,37 @@ +use ed25519_dalek::{Signer as _, SigningKey}; +use nomos_core::mantle::{ + MantleTx, Op, OpProof, SignedMantleTx, Transaction as _, + ledger::Tx as LedgerTx, + ops::channel::{ChannelId, MsgId, inscribe::InscriptionOp}, +}; +use zksign::SecretKey; + +#[must_use] +pub fn create_inscription_transaction_with_id(id: ChannelId) -> SignedMantleTx { + let signing_key = SigningKey::from_bytes(&[0u8; 32]); + let signer = signing_key.verifying_key(); + + let inscription_op = InscriptionOp { + channel_id: id, + inscription: format!("Test channel inscription {id:?}").into_bytes(), + parent: MsgId::root(), + signer, + }; + + let mantle_tx = MantleTx { + ops: vec![Op::ChannelInscribe(inscription_op)], + ledger_tx: LedgerTx::new(vec![], vec![]), + storage_gas_price: 0, + execution_gas_price: 0, + }; + + let tx_hash = mantle_tx.hash(); + let signature = signing_key.sign(&tx_hash.as_signing_bytes()); + + SignedMantleTx::new( + mantle_tx, + vec![OpProof::Ed25519Sig(signature)], + SecretKey::multi_sign(&[], tx_hash.as_ref()).expect("zk signature generation"), + ) + .expect("valid transaction") +} diff --git a/testing-framework/workflows/src/workloads/chaos.rs b/testing-framework/workflows/src/workloads/chaos.rs new file mode 100644 index 0000000..72c3195 --- /dev/null +++ b/testing-framework/workflows/src/workloads/chaos.rs @@ -0,0 +1,160 @@ +use std::{collections::HashMap, time::Duration}; + +use async_trait::async_trait; +use rand::{Rng as _, seq::SliceRandom as _, thread_rng}; +use testing_framework_core::scenario::{DynError, RunContext, Workload}; +use tokio::time::{Instant, sleep}; +use tracing::info; + +pub struct RandomRestartWorkload { + min_delay: Duration, + max_delay: Duration, + target_cooldown: Duration, + include_validators: bool, + include_executors: bool, +} + +impl RandomRestartWorkload { + #[must_use] + pub const fn new( + min_delay: Duration, + max_delay: Duration, + target_cooldown: Duration, + include_validators: bool, + include_executors: bool, + ) -> Self { + Self { + min_delay, + max_delay, + target_cooldown, + include_validators, + include_executors, + } + } + + fn targets(&self, ctx: &RunContext) -> Vec { + let mut targets = Vec::new(); + let validator_count = ctx.descriptors().validators().len(); + if self.include_validators { + if validator_count > 1 { + for index in 0..validator_count { + targets.push(Target::Validator(index)); + } + } else if validator_count == 1 { + info!("chaos restart skipping validators: only one validator configured"); + } + } + if self.include_executors { + for index in 0..ctx.descriptors().executors().len() { + targets.push(Target::Executor(index)); + } + } + targets + } + + fn random_delay(&self) -> Duration { + if self.max_delay <= self.min_delay { + return self.min_delay; + } + let spread = self + .max_delay + .checked_sub(self.min_delay) + .unwrap_or_else(|| Duration::from_millis(1)) + .as_secs_f64(); + let offset = thread_rng().gen_range(0.0..=spread); + self.min_delay + .checked_add(Duration::from_secs_f64(offset)) + .unwrap_or(self.max_delay) + } + + fn initialize_cooldowns(&self, targets: &[Target]) -> HashMap { + let now = Instant::now(); + let ready = now.checked_sub(self.target_cooldown).unwrap_or(now); + targets + .iter() + .copied() + .map(|target| (target, ready)) + .collect() + } + + async fn pick_target( + &self, + targets: &[Target], + cooldowns: &HashMap, + ) -> Target { + loop { + let now = Instant::now(); + if let Some(next_ready) = cooldowns + .values() + .copied() + .filter(|ready| *ready > now) + .min() + { + let wait = next_ready.saturating_duration_since(now); + if !wait.is_zero() { + sleep(wait).await; + continue; + } + } + + let available: Vec = targets + .iter() + .copied() + .filter(|target| cooldowns.get(target).is_none_or(|ready| *ready <= now)) + .collect(); + + if let Some(choice) = available.choose(&mut thread_rng()).copied() { + return choice; + } + + return targets + .choose(&mut thread_rng()) + .copied() + .expect("chaos restart workload has targets"); + } + } +} + +#[async_trait] +impl Workload for RandomRestartWorkload { + fn name(&self) -> &'static str { + "chaos_random_restart" + } + + async fn start(&self, ctx: &RunContext) -> Result<(), DynError> { + let handle = ctx + .node_control() + .ok_or_else(|| "chaos restart workload requires node control".to_owned())?; + + let targets = self.targets(ctx); + if targets.is_empty() { + return Err("chaos restart workload has no eligible targets".into()); + } + + let mut cooldowns = self.initialize_cooldowns(&targets); + + loop { + sleep(self.random_delay()).await; + let target = self.pick_target(&targets, &cooldowns).await; + + match target { + Target::Validator(index) => handle + .restart_validator(index) + .await + .map_err(|err| format!("validator restart failed: {err}"))?, + Target::Executor(index) => handle + .restart_executor(index) + .await + .map_err(|err| format!("executor restart failed: {err}"))?, + } + + cooldowns.insert(target, Instant::now() + self.target_cooldown); + } + } +} + +#[derive(Clone, Copy, PartialEq, Eq, Hash)] +enum Target { + Validator(usize), + Executor(usize), +} diff --git a/testing-framework/workflows/src/workloads/da/expectation.rs b/testing-framework/workflows/src/workloads/da/expectation.rs new file mode 100644 index 0000000..91b04f7 --- /dev/null +++ b/testing-framework/workflows/src/workloads/da/expectation.rs @@ -0,0 +1,177 @@ +use std::{ + collections::HashSet, + sync::{Arc, Mutex}, +}; + +use async_trait::async_trait; +use nomos_core::mantle::{ + AuthenticatedMantleTx as _, + ops::{Op, channel::ChannelId}, +}; +use testing_framework_core::scenario::{BlockRecord, DynError, Expectation, RunContext}; +use thiserror::Error; +use tokio::sync::broadcast; + +#[derive(Debug)] +pub struct DaWorkloadExpectation { + planned_channels: Vec, + capture_state: Option, +} + +#[derive(Debug)] +struct CaptureState { + planned: Arc>, + inscriptions: Arc>>, + blobs: Arc>>, +} + +const MIN_INCLUSION_RATIO: f64 = 0.8; + +#[derive(Debug, Error)] +enum DaExpectationError { + #[error("da workload expectation not started")] + NotCaptured, + #[error("missing inscriptions for {missing:?}")] + MissingInscriptions { missing: Vec }, + #[error("missing blobs for {missing:?}")] + MissingBlobs { missing: Vec }, +} + +impl DaWorkloadExpectation { + pub const fn new(planned_channels: Vec) -> Self { + Self { + planned_channels, + capture_state: None, + } + } +} + +#[async_trait] +impl Expectation for DaWorkloadExpectation { + fn name(&self) -> &'static str { + "da_workload_inclusions" + } + + async fn start_capture(&mut self, ctx: &RunContext) -> Result<(), DynError> { + if self.capture_state.is_some() { + return Ok(()); + } + + let planned = Arc::new( + self.planned_channels + .iter() + .copied() + .collect::>(), + ); + let inscriptions = Arc::new(Mutex::new(HashSet::new())); + let blobs = Arc::new(Mutex::new(HashSet::new())); + + let mut receiver = ctx.block_feed().subscribe(); + let planned_for_task = Arc::clone(&planned); + let inscriptions_for_task = Arc::clone(&inscriptions); + let blobs_for_task = Arc::clone(&blobs); + + tokio::spawn(async move { + loop { + match receiver.recv().await { + Ok(record) => capture_block( + record.as_ref(), + &planned_for_task, + &inscriptions_for_task, + &blobs_for_task, + ), + Err(broadcast::error::RecvError::Lagged(_)) => {} + Err(broadcast::error::RecvError::Closed) => break, + } + } + }); + + self.capture_state = Some(CaptureState { + planned, + inscriptions, + blobs, + }); + + Ok(()) + } + + async fn evaluate(&mut self, _ctx: &RunContext) -> Result<(), DynError> { + let state = self + .capture_state + .as_ref() + .ok_or(DaExpectationError::NotCaptured) + .map_err(DynError::from)?; + + let planned_total = state.planned.len(); + let missing_inscriptions = { + let inscriptions = state + .inscriptions + .lock() + .expect("inscription lock poisoned"); + missing_channels(&state.planned, &inscriptions) + }; + let required_inscriptions = minimum_required(planned_total, MIN_INCLUSION_RATIO); + if planned_total.saturating_sub(missing_inscriptions.len()) < required_inscriptions { + return Err(DaExpectationError::MissingInscriptions { + missing: missing_inscriptions, + } + .into()); + } + + let missing_blobs = { + let blobs = state.blobs.lock().expect("blob lock poisoned"); + missing_channels(&state.planned, &blobs) + }; + let required_blobs = minimum_required(planned_total, MIN_INCLUSION_RATIO); + if planned_total.saturating_sub(missing_blobs.len()) < required_blobs { + return Err(DaExpectationError::MissingBlobs { + missing: missing_blobs, + } + .into()); + } + + Ok(()) + } +} + +fn capture_block( + block: &BlockRecord, + planned: &HashSet, + inscriptions: &Arc>>, + blobs: &Arc>>, +) { + let mut new_inscriptions = Vec::new(); + let mut new_blobs = Vec::new(); + + for tx in block.block.transactions() { + for op in &tx.mantle_tx().ops { + match op { + Op::ChannelInscribe(inscribe) if planned.contains(&inscribe.channel_id) => { + new_inscriptions.push(inscribe.channel_id); + } + Op::ChannelBlob(blob) if planned.contains(&blob.channel) => { + new_blobs.push(blob.channel); + } + _ => {} + } + } + } + + if !new_inscriptions.is_empty() { + let mut guard = inscriptions.lock().expect("inscription lock poisoned"); + guard.extend(new_inscriptions); + } + + if !new_blobs.is_empty() { + let mut guard = blobs.lock().expect("blob lock poisoned"); + guard.extend(new_blobs); + } +} + +fn missing_channels(planned: &HashSet, observed: &HashSet) -> Vec { + planned.difference(observed).copied().collect() +} + +fn minimum_required(total: usize, ratio: f64) -> usize { + ((total as f64) * ratio).ceil() as usize +} diff --git a/testing-framework/workflows/src/workloads/da/mod.rs b/testing-framework/workflows/src/workloads/da/mod.rs new file mode 100644 index 0000000..69ae438 --- /dev/null +++ b/testing-framework/workflows/src/workloads/da/mod.rs @@ -0,0 +1,4 @@ +mod expectation; +mod workload; + +pub use workload::Workload; diff --git a/testing-framework/workflows/src/workloads/da/workload.rs b/testing-framework/workflows/src/workloads/da/workload.rs new file mode 100644 index 0000000..93da188 --- /dev/null +++ b/testing-framework/workflows/src/workloads/da/workload.rs @@ -0,0 +1,206 @@ +use std::{sync::Arc, time::Duration}; + +use async_trait::async_trait; +use ed25519_dalek::SigningKey; +use executor_http_client::ExecutorHttpClient; +use nomos_core::{ + da::BlobId, + mantle::ops::{ + Op, + channel::{ChannelId, MsgId}, + }, +}; +use rand::{Rng as _, RngCore as _, seq::SliceRandom as _, thread_rng}; +use testing_framework_core::{ + nodes::ApiClient, + scenario::{BlockRecord, DynError, Expectation, RunContext, Workload as ScenarioWorkload}, +}; +use tokio::{sync::broadcast, time::sleep}; + +use super::expectation::DaWorkloadExpectation; +use crate::{ + util::tx, + workloads::util::{find_channel_op, submit_transaction_via_cluster}, +}; + +const TEST_KEY_BYTES: [u8; 32] = [0u8; 32]; +const DEFAULT_CHANNELS: usize = 1; +const MIN_BLOB_CHUNKS: usize = 1; +const MAX_BLOB_CHUNKS: usize = 8; +const PUBLISH_RETRIES: usize = 5; +const PUBLISH_RETRY_DELAY: Duration = Duration::from_secs(2); + +#[derive(Clone)] +pub struct Workload { + planned_channels: Arc<[ChannelId]>, +} + +impl Default for Workload { + fn default() -> Self { + Self::with_channel_count(DEFAULT_CHANNELS) + } +} + +impl Workload { + #[must_use] + pub fn with_channel_count(count: usize) -> Self { + assert!(count > 0, "da workload requires positive count"); + Self { + planned_channels: Arc::from(planned_channel_ids(count)), + } + } + + fn plan(&self) -> Arc<[ChannelId]> { + Arc::clone(&self.planned_channels) + } +} + +#[async_trait] +impl ScenarioWorkload for Workload { + fn name(&self) -> &'static str { + "channel_workload" + } + + fn expectations(&self) -> Vec> { + let planned = self.plan().to_vec(); + vec![Box::new(DaWorkloadExpectation::new(planned))] + } + + async fn start(&self, ctx: &RunContext) -> Result<(), DynError> { + let mut receiver = ctx.block_feed().subscribe(); + + for channel_id in self.plan().iter().copied() { + run_channel_flow(ctx, &mut receiver, channel_id).await?; + } + + Ok(()) + } +} + +async fn run_channel_flow( + ctx: &RunContext, + receiver: &mut broadcast::Receiver>, + channel_id: ChannelId, +) -> Result<(), DynError> { + let tx = Arc::new(tx::create_inscription_transaction_with_id(channel_id)); + submit_transaction_via_cluster(ctx, Arc::clone(&tx)).await?; + + let inscription_id = wait_for_inscription(receiver, channel_id).await?; + let blob_id = publish_blob(ctx, channel_id, inscription_id).await?; + wait_for_blob(receiver, channel_id, blob_id).await?; + Ok(()) +} + +async fn wait_for_inscription( + receiver: &mut broadcast::Receiver>, + channel_id: ChannelId, +) -> Result { + wait_for_channel_op(receiver, move |op| { + if let Op::ChannelInscribe(inscribe) = op + && inscribe.channel_id == channel_id + { + Some(inscribe.id()) + } else { + None + } + }) + .await +} + +async fn wait_for_blob( + receiver: &mut broadcast::Receiver>, + channel_id: ChannelId, + blob_id: BlobId, +) -> Result { + wait_for_channel_op(receiver, move |op| { + if let Op::ChannelBlob(blob_op) = op + && blob_op.channel == channel_id + && blob_op.blob == blob_id + { + Some(blob_op.id()) + } else { + None + } + }) + .await +} + +async fn wait_for_channel_op( + receiver: &mut broadcast::Receiver>, + mut matcher: F, +) -> Result +where + F: FnMut(&Op) -> Option, +{ + loop { + match receiver.recv().await { + Ok(record) => { + if let Some(msg_id) = find_channel_op(record.block.as_ref(), &mut matcher) { + return Ok(msg_id); + } + } + Err(broadcast::error::RecvError::Lagged(_)) => {} + Err(broadcast::error::RecvError::Closed) => { + return Err("block feed closed while waiting for channel operations".into()); + } + } + } +} + +async fn publish_blob( + ctx: &RunContext, + channel_id: ChannelId, + parent_msg: MsgId, +) -> Result { + let executors = ctx.node_clients().executor_clients(); + if executors.is_empty() { + return Err("da workload requires at least one executor".into()); + } + + let signer = SigningKey::from_bytes(&TEST_KEY_BYTES).verifying_key(); + let data = random_blob_payload(); + let client = ExecutorHttpClient::new(None); + + let mut candidates: Vec<&ApiClient> = executors.iter().collect(); + let mut last_err = None; + for attempt in 1..=PUBLISH_RETRIES { + candidates.shuffle(&mut thread_rng()); + for executor in &candidates { + let executor_url = executor.base_url().clone(); + match client + .publish_blob(executor_url, channel_id, parent_msg, signer, data.clone()) + .await + { + Ok(blob_id) => return Ok(blob_id), + Err(err) => last_err = Some(err.into()), + } + } + + if attempt < PUBLISH_RETRIES { + sleep(PUBLISH_RETRY_DELAY).await; + } + } + + Err(last_err.unwrap_or_else(|| "da workload could not publish blob".into())) +} + +fn random_blob_payload() -> Vec { + let mut rng = thread_rng(); + let chunks = rng.gen_range(MIN_BLOB_CHUNKS..=MAX_BLOB_CHUNKS); + let mut data = vec![0u8; 31 * chunks]; + rng.fill_bytes(&mut data); + data +} + +fn planned_channel_ids(total: usize) -> Vec { + (0..total as u64) + .map(deterministic_channel_id) + .collect::>() +} + +fn deterministic_channel_id(index: u64) -> ChannelId { + let mut bytes = [0u8; 32]; + bytes[..8].copy_from_slice(b"chn_wrkd"); + bytes[24..].copy_from_slice(&index.to_be_bytes()); + ChannelId::from(bytes) +} diff --git a/testing-framework/workflows/src/workloads/mod.rs b/testing-framework/workflows/src/workloads/mod.rs new file mode 100644 index 0000000..5dce733 --- /dev/null +++ b/testing-framework/workflows/src/workloads/mod.rs @@ -0,0 +1,6 @@ +pub mod chaos; +pub mod da; +pub mod transaction; +pub mod util; + +pub use transaction::TxInclusionExpectation; diff --git a/testing-framework/workflows/src/workloads/transaction/expectation.rs b/testing-framework/workflows/src/workloads/transaction/expectation.rs new file mode 100644 index 0000000..6efde59 --- /dev/null +++ b/testing-framework/workflows/src/workloads/transaction/expectation.rs @@ -0,0 +1,141 @@ +use std::{ + collections::HashSet, + num::{NonZeroU64, NonZeroUsize}, + sync::{ + Arc, + atomic::{AtomicU64, Ordering}, + }, +}; + +use async_trait::async_trait; +use nomos_core::{header::HeaderId, mantle::AuthenticatedMantleTx as _}; +use testing_framework_core::scenario::{DynError, Expectation, RunContext}; +use thiserror::Error; +use tokio::sync::broadcast; +use zksign::PublicKey; + +use super::workload::{limited_user_count, submission_plan}; + +const MIN_INCLUSION_RATIO: f64 = 0.5; + +#[derive(Clone)] +pub struct TxInclusionExpectation { + txs_per_block: NonZeroU64, + user_limit: Option, + capture_state: Option, +} + +#[derive(Clone)] +struct CaptureState { + observed: Arc, + expected: u64, +} + +#[derive(Debug, Error)] +enum TxExpectationError { + #[error("transaction workload requires seeded accounts")] + MissingAccounts, + #[error("transaction workload planned zero transactions")] + NoPlannedTransactions, + #[error("transaction inclusion expectation not captured")] + NotCaptured, + #[error("transaction inclusion observed {observed} below required {required}")] + InsufficientInclusions { observed: u64, required: u64 }, +} + +impl TxInclusionExpectation { + pub const NAME: &'static str = "tx_inclusion_expectation"; + + #[must_use] + pub const fn new(txs_per_block: NonZeroU64, user_limit: Option) -> Self { + Self { + txs_per_block, + user_limit, + capture_state: None, + } + } +} + +#[async_trait] +impl Expectation for TxInclusionExpectation { + fn name(&self) -> &'static str { + Self::NAME + } + + async fn start_capture(&mut self, ctx: &RunContext) -> Result<(), DynError> { + if self.capture_state.is_some() { + return Ok(()); + } + + let wallet_accounts = ctx.descriptors().config().wallet().accounts.clone(); + if wallet_accounts.is_empty() { + return Err(TxExpectationError::MissingAccounts.into()); + } + + let available = limited_user_count(self.user_limit, wallet_accounts.len()); + let (planned, _) = submission_plan(self.txs_per_block, ctx, available)?; + if planned == 0 { + return Err(TxExpectationError::NoPlannedTransactions.into()); + } + + let wallet_pks = wallet_accounts + .into_iter() + .take(planned) + .map(|account| account.secret_key.to_public_key()) + .collect::>(); + + let observed = Arc::new(AtomicU64::new(0)); + let receiver = ctx.block_feed().subscribe(); + let tracked_accounts = Arc::new(wallet_pks); + let spawn_accounts = Arc::clone(&tracked_accounts); + let spawn_observed = Arc::clone(&observed); + + tokio::spawn(async move { + let mut receiver = receiver; + let genesis_parent = HeaderId::from([0; 32]); + loop { + match receiver.recv().await { + Ok(record) => { + if record.block.header().parent_block() == genesis_parent { + continue; + } + + for tx in record.block.transactions() { + for note in &tx.mantle_tx().ledger_tx.outputs { + if spawn_accounts.contains(¬e.pk) { + spawn_observed.fetch_add(1, Ordering::Relaxed); + break; + } + } + } + } + Err(broadcast::error::RecvError::Lagged(_)) => {} + Err(broadcast::error::RecvError::Closed) => break, + } + } + }); + + self.capture_state = Some(CaptureState { + observed, + expected: planned as u64, + }); + + Ok(()) + } + + async fn evaluate(&mut self, _ctx: &RunContext) -> Result<(), DynError> { + let state = self + .capture_state + .as_ref() + .ok_or(TxExpectationError::NotCaptured)?; + + let observed = state.observed.load(Ordering::Relaxed); + let required = ((state.expected as f64) * MIN_INCLUSION_RATIO).ceil() as u64; + + if observed >= required { + Ok(()) + } else { + Err(TxExpectationError::InsufficientInclusions { observed, required }.into()) + } + } +} diff --git a/testing-framework/workflows/src/workloads/transaction/mod.rs b/testing-framework/workflows/src/workloads/transaction/mod.rs new file mode 100644 index 0000000..df5c612 --- /dev/null +++ b/testing-framework/workflows/src/workloads/transaction/mod.rs @@ -0,0 +1,5 @@ +mod expectation; +mod workload; + +pub use expectation::TxInclusionExpectation; +pub use workload::Workload; diff --git a/testing-framework/workflows/src/workloads/transaction/workload.rs b/testing-framework/workflows/src/workloads/transaction/workload.rs new file mode 100644 index 0000000..af1d991 --- /dev/null +++ b/testing-framework/workflows/src/workloads/transaction/workload.rs @@ -0,0 +1,244 @@ +use std::{ + collections::{HashMap, VecDeque}, + num::{NonZeroU64, NonZeroUsize}, + sync::Arc, + time::Duration, +}; + +use async_trait::async_trait; +use integration_configs::topology::configs::wallet::WalletAccount; +use nomos_core::mantle::{ + GenesisTx as _, Note, SignedMantleTx, Transaction as _, Utxo, tx_builder::MantleTxBuilder, +}; +use testing_framework_core::{ + scenario::{DynError, Expectation, RunContext, RunMetrics, Workload as ScenarioWorkload}, + topology::{GeneratedNodeConfig, GeneratedTopology}, +}; +use tokio::time::sleep; +use zksign::{PublicKey, SecretKey}; + +use super::expectation::TxInclusionExpectation; +use crate::workloads::util::submit_transaction_via_cluster; + +#[derive(Clone)] +pub struct Workload { + txs_per_block: NonZeroU64, + user_limit: Option, + accounts: Vec, +} + +#[derive(Clone)] +struct WalletInput { + account: WalletAccount, + utxo: Utxo, +} + +#[async_trait] +impl ScenarioWorkload for Workload { + fn name(&self) -> &'static str { + "tx_workload" + } + + fn expectations(&self) -> Vec> { + vec![Box::new(TxInclusionExpectation::new( + self.txs_per_block, + self.user_limit, + ))] + } + + fn init( + &mut self, + descriptors: &GeneratedTopology, + _run_metrics: &RunMetrics, + ) -> Result<(), DynError> { + let wallet_accounts = descriptors.config().wallet().accounts.clone(); + if wallet_accounts.is_empty() { + return Err("transaction workload requires seeded accounts".into()); + } + + let reference_node = descriptors + .validators() + .first() + .or_else(|| descriptors.executors().first()) + .ok_or("transaction workload requires at least one node in the topology")?; + + let utxo_map = wallet_utxo_map(reference_node); + let mut accounts = wallet_accounts + .into_iter() + .filter_map(|account| { + utxo_map + .get(&account.public_key()) + .copied() + .map(|utxo| WalletInput { account, utxo }) + }) + .collect::>(); + + apply_user_limit(&mut accounts, self.user_limit); + + if accounts.is_empty() { + return Err( + "transaction workload could not match any accounts to genesis UTXOs".into(), + ); + } + + self.accounts = accounts; + Ok(()) + } + + async fn start(&self, ctx: &RunContext) -> Result<(), DynError> { + Submission::new(self, ctx)?.execute().await + } +} + +impl Workload { + #[must_use] + pub const fn new(txs_per_block: NonZeroU64) -> Self { + Self { + txs_per_block, + user_limit: None, + accounts: Vec::new(), + } + } + + #[must_use] + pub fn with_rate(txs_per_block: u64) -> Option { + NonZeroU64::new(txs_per_block).map(Self::new) + } + + #[must_use] + pub const fn txs_per_block(&self) -> NonZeroU64 { + self.txs_per_block + } + + #[must_use] + pub const fn with_user_limit(mut self, user_limit: Option) -> Self { + self.user_limit = user_limit; + self + } +} + +impl Default for Workload { + fn default() -> Self { + Self::new(NonZeroU64::new(1).expect("non-zero")) + } +} + +struct Submission<'a> { + plan: VecDeque, + ctx: &'a RunContext, + interval: Duration, +} + +impl<'a> Submission<'a> { + fn new(workload: &Workload, ctx: &'a RunContext) -> Result { + if workload.accounts.is_empty() { + return Err("transaction workload has no available accounts".into()); + } + + let (planned, interval) = + submission_plan(workload.txs_per_block, ctx, workload.accounts.len())?; + + let plan = workload + .accounts + .iter() + .take(planned) + .cloned() + .collect::>(); + + Ok(Self { + plan, + ctx, + interval, + }) + } + + async fn execute(mut self) -> Result<(), DynError> { + while let Some(input) = self.plan.pop_front() { + submit_wallet_transaction(self.ctx, &input).await?; + + if !self.interval.is_zero() { + sleep(self.interval).await; + } + } + + Ok(()) + } +} + +async fn submit_wallet_transaction(ctx: &RunContext, input: &WalletInput) -> Result<(), DynError> { + let signed_tx = Arc::new(build_wallet_transaction(input)?); + submit_transaction_via_cluster(ctx, signed_tx).await +} + +fn build_wallet_transaction(input: &WalletInput) -> Result { + let builder = MantleTxBuilder::new() + .add_ledger_input(input.utxo) + .add_ledger_output(Note::new(input.utxo.note.value, input.account.public_key())); + + let mantle_tx = builder.build(); + let tx_hash = mantle_tx.hash(); + + let signature = SecretKey::multi_sign( + std::slice::from_ref(&input.account.secret_key), + tx_hash.as_ref(), + ) + .map_err(|err| format!("transaction workload could not sign transaction: {err}"))?; + + SignedMantleTx::new(mantle_tx, Vec::new(), signature).map_err(|err| { + format!("transaction workload constructed invalid transaction: {err}").into() + }) +} + +fn wallet_utxo_map(node: &GeneratedNodeConfig) -> HashMap { + let genesis_tx = node.general.consensus_config.genesis_tx.clone(); + let ledger_tx = genesis_tx.mantle_tx().ledger_tx.clone(); + let tx_hash = ledger_tx.hash(); + + ledger_tx + .outputs + .iter() + .enumerate() + .map(|(idx, note)| (note.pk, Utxo::new(tx_hash, idx, *note))) + .collect() +} + +fn apply_user_limit(items: &mut Vec, user_limit: Option) { + if let Some(limit) = user_limit { + let allowed = limit.get().min(items.len()); + items.truncate(allowed); + } +} + +pub(super) fn limited_user_count(user_limit: Option, available: usize) -> usize { + user_limit.map_or(available, |limit| limit.get().min(available)) +} + +pub(super) fn submission_plan( + txs_per_block: NonZeroU64, + ctx: &RunContext, + available_accounts: usize, +) -> Result<(usize, Duration), DynError> { + if available_accounts == 0 { + return Err("transaction workload scheduled zero transactions".into()); + } + + let run_secs = ctx.run_duration().as_secs_f64(); + let block_secs = ctx + .run_metrics() + .block_interval_hint() + .unwrap_or_else(|| ctx.run_duration()) + .as_secs_f64(); + + let expected_blocks = run_secs / block_secs; + let requested = (expected_blocks * txs_per_block.get() as f64) + .floor() + .clamp(0.0, u64::MAX as f64) as u64; + + let planned = requested.min(available_accounts as u64) as usize; + if planned == 0 { + return Err("transaction workload scheduled zero transactions".into()); + } + + let interval = Duration::from_secs_f64(run_secs / planned as f64); + Ok((planned, interval)) +} diff --git a/testing-framework/workflows/src/workloads/util.rs b/testing-framework/workflows/src/workloads/util.rs new file mode 100644 index 0000000..8229c16 --- /dev/null +++ b/testing-framework/workflows/src/workloads/util.rs @@ -0,0 +1,44 @@ +use std::sync::Arc; + +use nomos_core::{ + block::Block, + mantle::{ + AuthenticatedMantleTx as _, SignedMantleTx, + ops::{Op, channel::MsgId}, + }, +}; +use testing_framework_core::scenario::{DynError, RunContext}; + +/// Scans a block and invokes the matcher for every operation until it returns +/// `Some(...)`. Returns `None` when no matching operation is found. +pub fn find_channel_op(block: &Block, matcher: &mut F) -> Option +where + F: FnMut(&Op) -> Option, +{ + for tx in block.transactions() { + for op in &tx.mantle_tx().ops { + if let Some(msg_id) = matcher(op) { + return Some(msg_id); + } + } + } + + None +} + +pub async fn submit_transaction_via_cluster( + ctx: &RunContext, + tx: Arc, +) -> Result<(), DynError> { + ctx.cluster_client() + .try_all_clients(|client| { + let tx = Arc::clone(&tx); + Box::pin(async move { + client + .submit_transaction(&tx) + .await + .map_err(|err| -> DynError { err.into() }) + }) + }) + .await +} diff --git a/tests/kzgrs/kzgrs_test_params b/tests/kzgrs/kzgrs_test_params new file mode 100644 index 0000000..0389d61 Binary files /dev/null and b/tests/kzgrs/kzgrs_test_params differ diff --git a/tests/workflows/Cargo.toml b/tests/workflows/Cargo.toml new file mode 100644 index 0000000..31a356f --- /dev/null +++ b/tests/workflows/Cargo.toml @@ -0,0 +1,23 @@ +[package] +categories.workspace = true +description.workspace = true +edition.workspace = true +keywords.workspace = true +license.workspace = true +name = "tests-workflows" +readme.workspace = true +repository.workspace = true +version = "0.1.0" + +[dependencies] +anyhow = "1.0.100" +serial_test = "2" +testing-framework-core = { workspace = true } +testing-framework-runner-compose = { workspace = true } +testing-framework-runner-k8s = { workspace = true } +testing-framework-runner-local = { workspace = true } +testing-framework-workflows = { workspace = true } +tokio = { workspace = true, features = ["macros", "net", "rt-multi-thread", "time"] } + +[lints] +workspace = true diff --git a/tests/workflows/src/bin/compose_runner_ci.rs b/tests/workflows/src/bin/compose_runner_ci.rs new file mode 100644 index 0000000..d755bbb --- /dev/null +++ b/tests/workflows/src/bin/compose_runner_ci.rs @@ -0,0 +1,68 @@ +use std::time::Duration; + +use anyhow::{Context as _, Result}; +use testing_framework_core::scenario::{Deployer as _, Runner, ScenarioBuilder}; +use testing_framework_runner_compose::{ComposeRunner, ComposeRunnerError}; +use tests_workflows::{ + ChaosBuilderExt as _, ScenarioBuilderExt as _, expectations::ConsensusLiveness, +}; + +const RUN_DURATION: Duration = Duration::from_secs(120); +const VALIDATORS: usize = 2; +const EXECUTORS: usize = 2; +const MIXED_TXS_PER_BLOCK: u64 = 5; +const TOTAL_WALLETS: usize = 64; +const TRANSACTION_WALLETS: usize = 8; + +#[tokio::main] +async fn main() -> Result<()> { + let topology = ScenarioBuilder::with_node_counts(VALIDATORS, EXECUTORS) + .enable_node_control() + .chaos_random_restart() + .min_delay(Duration::from_secs(45)) + .max_delay(Duration::from_secs(75)) + .target_cooldown(Duration::from_secs(120)) + .apply() + .topology() + .validators(VALIDATORS) + .executors(EXECUTORS) + .network_star() + .apply(); + + let workloads = topology + .wallets(TOTAL_WALLETS) + .transactions() + .rate(MIXED_TXS_PER_BLOCK) + .users(TRANSACTION_WALLETS) + .apply() + .da() + .channel_rate(1) + .blob_rate(1) + .apply(); + + let lag_allowance = 2 + (VALIDATORS + EXECUTORS) as u64; + let mut plan = workloads + .with_expectation(ConsensusLiveness::default().with_lag_allowance(lag_allowance)) + .with_run_duration(RUN_DURATION) + .build(); + + let deployer = ComposeRunner::new().with_readiness(false); + let runner: Runner = match deployer.deploy(&plan).await { + Ok(runner) => runner, + Err(ComposeRunnerError::DockerUnavailable) => { + anyhow::bail!("Docker is required for compose runner CI binary"); + } + Err(err) => return Err(err.into()), + }; + + if !runner.context().telemetry().is_configured() { + anyhow::bail!("compose runner should expose prometheus metrics"); + } + + runner + .run(&mut plan) + .await + .context("compose scenario execution failed")?; + + Ok(()) +} diff --git a/tests/workflows/src/lib.rs b/tests/workflows/src/lib.rs new file mode 100644 index 0000000..8738278 --- /dev/null +++ b/tests/workflows/src/lib.rs @@ -0,0 +1,11 @@ +use testing_framework_core::scenario::Metrics; +pub use testing_framework_workflows::{ + builder::{ChaosBuilderExt, ScenarioBuilderExt}, + expectations, util, workloads, +}; + +/// Metrics are currently disabled in this branch; return a stub handle. +#[must_use] +pub const fn configure_prometheus_metrics() -> Metrics { + Metrics::empty() +} diff --git a/tests/workflows/tests/compose_runner.rs b/tests/workflows/tests/compose_runner.rs new file mode 100644 index 0000000..ddf22a1 --- /dev/null +++ b/tests/workflows/tests/compose_runner.rs @@ -0,0 +1,104 @@ +use std::{env, time::Duration}; + +use serial_test::serial; +use testing_framework_core::scenario::{Deployer as _, Runner, ScenarioBuilder}; +use testing_framework_runner_compose::{ComposeRunner, ComposeRunnerError}; +use tests_workflows::{ + ChaosBuilderExt as _, ScenarioBuilderExt as _, expectations::ConsensusLiveness, +}; + +const RUN_DURATION: Duration = Duration::from_secs(120); +const MIXED_TXS_PER_BLOCK: u64 = 5; +const TOTAL_WALLETS: usize = 64; +const TRANSACTION_WALLETS: usize = 8; +const MAX_NODE_PAIR: usize = 6; + +#[tokio::test] +#[serial] +async fn compose_runner_mixed_workloads() { + for (validators, executors) in selected_node_pairs() { + run_compose_case(validators, executors).await; + } +} + +fn selected_node_pairs() -> Vec<(usize, usize)> { + if let Ok(raw) = env::var("COMPOSE_NODE_PAIRS") { + return raw + .split(',') + .filter(|entry| !entry.trim().is_empty()) + .map(|entry| { + let parts: Vec<_> = entry + .split(['x', 'X']) + .map(str::trim) + .filter(|part| !part.is_empty()) + .collect(); + assert!( + parts.len() == 2, + "invalid COMPOSE_NODE_PAIRS entry '{entry}'; expected format 'x'", + ); + let validators = parts[0] + .parse::() + .unwrap_or_else(|_| panic!("invalid validator count '{}'", parts[0])); + let executors = parts[1] + .parse::() + .unwrap_or_else(|_| panic!("invalid executor count '{}'", parts[1])); + (validators, executors) + }) + .collect(); + } + + (1..=MAX_NODE_PAIR).map(|n| (n, n)).collect() +} + +async fn run_compose_case(validators: usize, executors: usize) { + println!( + "running compose chaos test with {validators} validator(s) and {executors} executor(s)" + ); + + let topology = ScenarioBuilder::with_node_counts(validators, executors) + .enable_node_control() + .chaos_random_restart() + .min_delay(Duration::from_secs(45)) + .max_delay(Duration::from_secs(75)) + .target_cooldown(Duration::from_secs(120)) + .apply() + .topology() + .validators(validators) + .executors(executors) + .network_star() + .apply(); + + let workloads = topology + .wallets(TOTAL_WALLETS) + .transactions() + .rate(MIXED_TXS_PER_BLOCK) + .users(TRANSACTION_WALLETS) + .apply() + .da() + .channel_rate(1) + .blob_rate(1) + .apply(); + + let lag_allowance = 2 + (validators + executors) as u64; + let mut plan = workloads + .with_expectation(ConsensusLiveness::default().with_lag_allowance(lag_allowance)) + .with_run_duration(RUN_DURATION) + .build(); + + let deployer = ComposeRunner::new().with_readiness(false); + let runner: Runner = match deployer.deploy(&plan).await { + Ok(runner) => runner, + Err(ComposeRunnerError::DockerUnavailable) => { + eprintln!("Skipping compose_runner_mixed_workloads: Docker is unavailable"); + return; + } + Err(err) => panic!("scenario deployment: {err}"), + }; + let context = runner.context(); + assert!( + context.telemetry().is_configured(), + "compose runner should expose prometheus metrics" + ); + + let _handle = runner.run(&mut plan).await.expect("scenario executed"); +} diff --git a/tests/workflows/tests/framework_demo.rs b/tests/workflows/tests/framework_demo.rs new file mode 100644 index 0000000..da64552 --- /dev/null +++ b/tests/workflows/tests/framework_demo.rs @@ -0,0 +1,235 @@ +//! # Test Framework Demo Topology +//! +//! The demo showcases how the testing framework composes deployments: +//! +//! ```text +//! ┌────────────────────────────────────────────────────────┐ +//! │ ScenarioBuilder │ +//! │ ├─ plan() ───────────▶ Runner::new(plan) │ +//! │ ├─ enable_node_control → chaos workloads │ +//! │ ├─ topology() → network layout → validators/executors│ +//! │ └─ workloads (transactions + DA) │ +//! └────────────────────────────────────────────────────────┘ +//! +//! ┌─────────────────────────────┐ +//! │ Deployers │ +//! │ ├─ LocalDeployer │ +//! │ ├─ ComposeRunner │ +//! │ └─ K8sRunner │ +//! │ │ +//! │ Runner │ +//! │ ├─ execute plan │ +//! │ ├─ telemetry │ +//! │ └─ control handles │ +//! └─────────────────────────────┘ +//! ``` +//! +//! Component responsibilities: +//! +//! ┌─────────────────────────────────────────────────────────────┐ +//! │ Component │ Role │ +//! │--------------│----------------------------------------------│ +//! │ Workloads │ drive traffic (tx, DA blobs, chaos restarts) │ +//! │ Expectations │ assert cluster health │ +//! │ Deployers │ provision env (host, Docker, k8s) │ +//! │ Runner │ drives workloads/expectations, telemetry │ +//! └─────────────────────────────────────────────────────────────┘ +//! +//! Execution flow: +//! +//! ```text +//! ┌──────┐ ┌───────────────┐ ┌──────────────┐ ┌────────┐ +//! │ 1. │ ─▶ │ 2. Workloads/ │ ─▶ │ 3. Deployers │ ─▶ │ Runner │ +//! │ Plan │ │ Expectations │ │ Environment │ │ │ +//! └──────┘ └───────────────┘ └──────────────┘ └────────┘ +//! ├─ orchestrate +//! ├─ telemetry +//! └─ control +//! ``` +//! +//! Cluster interaction: +//! +//! ```text +//! ┌───────────────┐ +//! │ Deployers │ provision VMs/containers +//! └──────┬────────┘ +//! │ +//! ┌─────────▼─────────┐ +//! │ Cluster Nodes │ (validators, executors) +//! └───────┬───────────┘ +//! │ +//! ┌─────────▼──────────┐ +//! │ Runner │ command/control + telemetry +//! └──────┬────────┬────┘ +//! │ │ +//! workloads expectations +//! ``` +//! +//! Each runner consumes the same scenario plan; only the deployment backend +//! changes. `full_plan` shows the high-level builder-style DSL, while +//! `explicit_workload_plan` wires the same components explicitly through +//! `with_workload` calls. + +use std::{num::NonZeroUsize, time::Duration}; + +use testing_framework_core::scenario::{ + Deployer as _, NodeControlCapability, Runner, ScenarioBuilder, +}; +use testing_framework_runner_compose::ComposeRunner; +use testing_framework_runner_k8s::K8sRunner; +use testing_framework_runner_local::LocalDeployer; +use testing_framework_workflows::ConsensusLiveness; +use tests_workflows::{ChaosBuilderExt as _, ScenarioBuilderExt as _}; + +const RUN_DURATION: Duration = Duration::from_secs(60); +const VALIDATORS: usize = 1; +const EXECUTORS: usize = 1; +const MIXED_TXS_PER_BLOCK: u64 = 5; +const TOTAL_WALLETS: usize = 64; +const TRANSACTION_WALLETS: usize = 8; + +#[rustfmt::skip] +fn explicit_workload_plan() -> testing_framework_core::scenario::Builder { + use testing_framework_workflows::workloads::{chaos::RandomRestartWorkload, da, transaction}; + + let builder = ScenarioBuilder::with_node_counts(VALIDATORS, EXECUTORS).enable_node_control(); + + let topology = builder + .topology() + .network_star() + .validators(VALIDATORS) + .executors(EXECUTORS) + .apply(); + + let chaos = RandomRestartWorkload::new( + Duration::from_secs(45), + Duration::from_secs(75), + Duration::from_secs(120), + true, + true, + ); + let tx = transaction::Workload::with_rate(MIXED_TXS_PER_BLOCK) + .expect("transaction rate must be non-zero") + .with_user_limit(Some(NonZeroUsize::new(TRANSACTION_WALLETS).unwrap())); + + let da_workload = da::Workload::with_channel_count(1); + + topology + .with_workload(chaos) + .with_workload(tx) + .with_workload(da_workload) + .with_run_duration(RUN_DURATION) + .expect_consensus_liveness() + .with_expectation(ConsensusLiveness::default()) +} + +#[rustfmt::skip] +fn full_plan() -> testing_framework_core::scenario::Builder { + ScenarioBuilder:: + with_node_counts(VALIDATORS, EXECUTORS) + .enable_node_control() + // configure random restarts and schedule + .chaos_random_restart() + // earliest interval between restarts + .min_delay(Duration::from_secs(45)) + // latest interval between restarts + .max_delay(Duration::from_secs(75)) + // avoid restarting same node too soon + .target_cooldown(Duration::from_secs(120)) + .apply() + // shape the network layout + .topology() + // star network layout for libp2p topology + .network_star() + // validator count in the plan + .validators(VALIDATORS) + // executor count in the plan + .executors(EXECUTORS) + .apply() + // seed wallet accounts + .wallets(TOTAL_WALLETS) + // transaction workload configuration + .transactions() + // submissions per block + .rate(MIXED_TXS_PER_BLOCK) + // number of unique wallet actors + .users(TRANSACTION_WALLETS) + .apply() + // data-availability workload configuration + .da() + // channel operations per block + .channel_rate(1) + // number of blobs per channel + .blob_rate(1) + .apply() + // run window and expectation + .with_run_duration(RUN_DURATION) + // assert consensus keeps up with workload + .expect_consensus_liveness() +} + +#[rustfmt::skip] +fn demo_plan() -> ScenarioBuilder { + ScenarioBuilder:: + with_node_counts(VALIDATORS, EXECUTORS) + .topology() + .network_star() + .validators(VALIDATORS) + .executors(EXECUTORS) + .apply() + .wallets(TOTAL_WALLETS) + .transactions() + .rate(MIXED_TXS_PER_BLOCK) + .users(TRANSACTION_WALLETS) + .apply() + .da() + .channel_rate(1) + .blob_rate(1) + .apply() + .with_run_duration(RUN_DURATION) + .expect_consensus_liveness() +} + +#[tokio::test] +async fn demo_local_runner_mixed_workloads() { + let mut plan = demo_plan().build(); + + let deployer = LocalDeployer::default(); + + let runner: Runner = deployer.deploy(&plan).await.expect("scenario deployment"); + + let _handle = runner + .run(&mut plan) + .await + .expect("scenario should execute"); +} + +#[tokio::test] +async fn demo_compose_runner_tx_workload() { + // Keep the explicit wiring example compiled and linted. + let _ = explicit_workload_plan(); + + let mut plan = full_plan().build(); + + let deployer = ComposeRunner::default(); + + let runner: Runner = deployer.deploy(&plan).await.expect("scenario deployment"); + + let _handle = runner + .run(&mut plan) + .await + .expect("compose scenario should execute"); +} + +#[tokio::test] +async fn demo_k8s_runner_tx_workload() { + let mut plan = demo_plan().build(); + + let deployer = K8sRunner::default(); + let runner: Runner = deployer.deploy(&plan).await.expect("scenario deployment"); + + let _handle = runner + .run(&mut plan) + .await + .expect("k8s scenario should execute"); +} diff --git a/tests/workflows/tests/k8s_runner.rs b/tests/workflows/tests/k8s_runner.rs new file mode 100644 index 0000000..2cf25f3 --- /dev/null +++ b/tests/workflows/tests/k8s_runner.rs @@ -0,0 +1,70 @@ +use std::time::Duration; + +use serial_test::serial; +use testing_framework_core::scenario::{Deployer as _, Runner, ScenarioBuilder}; +use testing_framework_runner_k8s::{K8sRunner, K8sRunnerError}; +use tests_workflows::ScenarioBuilderExt as _; + +const RUN_DURATION: Duration = Duration::from_secs(60); +const VALIDATORS: usize = 1; +const EXECUTORS: usize = 1; +// Kubernetes has less throughput headroom than the local runner, so we use a +// lighter per-block rate while keeping the same mixed workload shape. +const MIXED_TXS_PER_BLOCK: u64 = 2; + +#[tokio::test] +#[ignore = "requires access to a Kubernetes cluster"] +#[serial] +async fn k8s_runner_tx_workload() { + let topology = ScenarioBuilder::with_node_counts(VALIDATORS, EXECUTORS) + .topology() + .validators(VALIDATORS) + .executors(EXECUTORS) + .network_star() + .apply(); + + let workloads = topology + .transactions() + .rate(MIXED_TXS_PER_BLOCK) + .apply() + .da() + .channel_rate(1) + .blob_rate(1) + .apply(); + + let mut plan = workloads.with_run_duration(RUN_DURATION).build(); + + let deployer = K8sRunner::new(); + let runner: Runner = match deployer.deploy(&plan).await { + Ok(runner) => runner, + Err(K8sRunnerError::ClientInit { source }) => { + eprintln!("Skipping k8s_runner_tx_workload: Kubernetes cluster unavailable ({source})"); + return; + } + Err(err) => panic!("scenario deployment failed: {err}"), + }; + + let context = runner.context(); + assert!( + context.telemetry().is_configured(), + "k8s runner should expose prometheus metrics" + ); + let validator_clients = context.node_clients().validator_clients().to_vec(); + + let _handle = runner + .run(&mut plan) + .await + .expect("k8s scenario should execute"); + + for (idx, client) in validator_clients.iter().enumerate() { + let info = client + .consensus_info() + .await + .unwrap_or_else(|err| panic!("validator {idx} consensus_info failed: {err}")); + assert!( + info.height >= 5, + "validator {idx} height {} should reach at least 5 blocks", + info.height + ); + } +} diff --git a/tests/workflows/tests/local_runner.rs b/tests/workflows/tests/local_runner.rs new file mode 100644 index 0000000..2fa0530 --- /dev/null +++ b/tests/workflows/tests/local_runner.rs @@ -0,0 +1,46 @@ +use std::time::Duration; + +use serial_test::serial; +use testing_framework_core::scenario::{Deployer as _, Runner, ScenarioBuilder}; +use testing_framework_runner_local::LocalDeployer; +use tests_workflows::ScenarioBuilderExt as _; + +const RUN_DURATION: Duration = Duration::from_secs(60); +const VALIDATORS: usize = 1; +const EXECUTORS: usize = 1; +const MIXED_TXS_PER_BLOCK: u64 = 5; +const TOTAL_WALLETS: usize = 64; +const TRANSACTION_WALLETS: usize = 8; + +#[tokio::test] +#[serial] +/// Drives both workloads concurrently to mimic a user mixing transaction flow +/// with blob publishing on the same topology. +async fn local_runner_mixed_workloads() { + let topology = ScenarioBuilder::with_node_counts(VALIDATORS, EXECUTORS) + .topology() + .validators(VALIDATORS) + .executors(EXECUTORS) + .network_star() + .apply(); + + let workloads = topology + .wallets(TOTAL_WALLETS) + .transactions() + .rate(MIXED_TXS_PER_BLOCK) + .users(TRANSACTION_WALLETS) + .apply() + .da() + .channel_rate(1) + .blob_rate(1) + .apply(); + + let mut plan = workloads + .expect_consensus_liveness() + .with_run_duration(RUN_DURATION) + .build(); + + let deployer = LocalDeployer::default(); + let runner: Runner = deployer.deploy(&plan).await.expect("scenario deployment"); + let _handle = runner.run(&mut plan).await.expect("scenario executed"); +}